message
stringlengths
13
484
diff
stringlengths
38
4.63k
Added legendre_triple_product Analytical solution of triple product of legendre polynomials
@@ -45,3 +45,14 @@ class Legendre(Polynomials): l = np.sqrt(2) * l / st_lege_norm return l + + @staticmethod + def legendre_triple_product (k,l,m): + + normk=1/((2*k)+1) + norml=1/((2*l)+1) + normm=1/((2*m)+1) + norm=np.sqrt(normm/(normk*norml)) + + + return norm*(2*m+1)*Legendre.wigner_3j_PCE(k,l,m)**2
Update lang.py account for nonetype being passed to language functions
@@ -3,8 +3,11 @@ from babelfish import Language def getAlpha3TCode(code, default=None): - code = code.strip().lower().replace('.', '') lang = default or 'und' + if not code: + return lang + + code = code.strip().lower().replace('.', '') if len(code) == 3: try: @@ -26,8 +29,11 @@ def getAlpha3TCode(code, default=None): def getAlpha2BCode(code, default=None): - code = code.strip().lower().replace('.', '') lang = default or 'un' + if not code: + return lang + + code = code.strip().lower().replace('.', '') if len(code) == 3: try:
Update CODEOWNERS for distributed and rpc modules Summary: Pull Request resolved: Test Plan: Imported from OSS
/docs/cpp @goldsborough @ebetica @yf225 /torch/csrc/api/ @ebetica @goldsborough @yf225 /test/cpp/api/ @ebetica @goldsborough @yf225 -/torch/lib/c10d/ @pietern @mrshenli -/torch/csrc/distributed/ @pietern @mrshenli -/torch/distributed/ @apaszke @pietern @mrshenli -/test/test_c10d.py @pietern @mrshenli +/torch/lib/c10d/ @pietern @mrshenli @zhaojuanmao +/torch/csrc/distributed/ @pietern @mrshenli @zhaojuanmao +/torch/distributed/ @apaszke @pietern @mrshenli @zhaojuanmao +/test/test_c10d.py @pietern @mrshenli @zhaojuanmao /torch/utils/cpp_extension.py @goldsborough @fmassa @soumith @ezyang # Not there to stricly require the approval, but to be tagged as a reviewer /torch/utils/data/ @apaszke # Distributed RPC Framework. -/torch/csrc/distributed/rpc @mrshenli @pritamdamania87 -/torch/csrc/distributed/autograd @mrshenli @pritamdamania87 -/torch/distributed/rpc @mrshenli @pritamdamania87 -/torch/distributed/autograd @mrshenli @pritamdamania87 +/torch/csrc/distributed/rpc @mrshenli @pritamdamania87 @zhaojuanmao +/torch/csrc/distributed/autograd @mrshenli @pritamdamania87 @zhaojuanmao +/torch/distributed/rpc @mrshenli @pritamdamania87 @zhaojuanmao +/torch/distributed/autograd @mrshenli @pritamdamania87 @zhaojuanmao
[PersonalRoles] optimize code "async with" not required for just reading data
@@ -121,7 +121,7 @@ class PersonalRoles(commands.Cog): @checks.admin_or_permissions(manage_roles=True) async def bl_list(self, ctx): """List of blacklisted role names""" - async with self.config.guild(ctx.guild).blacklist() as blacklist: + blacklist = await self.config.guild(ctx.guild).blacklist() pages = [chat.box(page) for page in chat.pagify('\n'.join(blacklist))] if pages: await menu(ctx, pages, DEFAULT_CONTROLS) @@ -154,8 +154,7 @@ class PersonalRoles(commands.Cog): role = await self.config.member(ctx.author).role() role = ctx.guild.get_role(role) name = name[:100] - async with self.config.guild(ctx.guild).blacklist() as blacklist: - if name.casefold() in blacklist: + if name.casefold() in await self.config.guild(ctx.guild).blacklist(): await ctx.send(chat.error("NONONO!!! This rolename is blacklisted.")) return try:
fix profiler and self.inputs in process_results.py profiler was not measuring all of the time consumed in process_results.py (especially not the outage simulator run as part of calc_avoided_outage_costs) inputs passed to Results class was not attached to self
@@ -83,6 +83,7 @@ def process_results(self, dfm_list, data, meta, saveToDB=True): :param saveToDB: boolean for saving postgres models :return: None """ + profiler = Profiler() class Results: @@ -108,13 +109,13 @@ def process_results(self, dfm_list, data, meta, saveToDB=True): "gen_year_one_variable_om_costs", ] - def __init__(self, results_dict, results_dict_bau, dfm, inputs=[]): + def __init__(self, results_dict, results_dict_bau, dfm, inputs): """ Convenience (and legacy) class for handling REopt results :param results_dict: flat dict of results from reopt.jl :param results_dict_bau: flat dict of results from reopt.jl for bau case """ - self.profiler = Profiler() + self.inputs = inputs self.dfm = dfm # remove invalid sizes due to optimization error margins for r in [results_dict, results_dict_bau]: @@ -423,8 +424,6 @@ def process_results(self, dfm_list, data, meta, saveToDB=True): self.nested_outputs["Scenario"]["Site"][name][ "existing_gen_year_one_fuel_cost_us_dollars"] = self.results_dict.get( "gen_year_one_fuel_cost_bau") - self.profiler.profileEnd() - self.nested_outputs["Scenario"]["Profile"]["parse_run_outputs_seconds"] = self.profiler.getDuration() def compute_total_power(self, tech): power_lists = list() @@ -448,7 +447,7 @@ def process_results(self, dfm_list, data, meta, saveToDB=True): try: results_object = Results(results_dict=dfm_list[0]['results'], results_dict_bau=dfm_list[1]['results_bau'], - dfm=dfm_list[0]) + dfm=dfm_list[0], inputs=data['inputs']['Scenario']['Site']) results = results_object.get_output() data['outputs'].update(results) data['outputs']['Scenario'].update(meta) # run_uuid and api_version @@ -468,6 +467,9 @@ def process_results(self, dfm_list, data, meta, saveToDB=True): # Calculate avoided outage costs calc_avoided_outage_costs(data, present_worth_factor=dfm_list[0]['pwf_e']) + profiler.profileEnd() + data['outputs']["Scenario"]["Profile"]["parse_run_outputs_seconds"] = profiler.getDuration() + if len(data['outputs']['Scenario']['Site']['PV'])==1: data['outputs']['Scenario']['Site']['PV'] = data['outputs']['Scenario']['Site']['PV'][0] if saveToDB:
Testsuite: reset StructMetaclass.entity_info TN:
@@ -144,6 +144,7 @@ def reset_langkit(): StructMetaclass.astnode_types = [] StructMetaclass.struct_types = [] StructMetaclass.env_metadata = None + StructMetaclass.entity_info = None Self.__dict__['_frozen'] = False T._type_dict = {}
Update README.md Point to new documentation on readthedocs.io
@@ -11,28 +11,24 @@ However, powerful business logic works in the background to ensure that stock tr ## Getting Started -Refer to the [getting started guide](https://inventree.github.io/docs/start/install) for installation and setup instructions. +Refer to the [getting started guide](https://inventree.readthedocs.io/en/latest/start/install/) for installation and setup instructions. ## Documentation -For InvenTree documentation, refer to the [InvenTree documentation website](https://inventree.github.io). +For InvenTree documentation, refer to the [InvenTree documentation website](https://inventree.readthedocs.io/en/latest/). ## Integration InvenTree is designed to be extensible, and provides multiple options for integration with external applications or addition of custom plugins: -* [InvenTree API](https://inventree.github.io/docs/extend/api) -* [Python module](https://inventree.github.io/docs/extend/python) -* [Plugin interface](https://inventree.github.io/docs/extend/plugins) -* [Third party](https://inventree.github.io/docs/extend/integrate) - -## Developer Documentation - -For code documentation, refer to the [developer documentation](http://inventree.readthedocs.io/en/latest/). +* [InvenTree API](https://inventree.readthedocs.io/en/latest/extend/api/) +* [Python module](https://inventree.readthedocs.io/en/latest/extend/python) +* [Plugin interface](https://inventree.readthedocs.io/en/latest/extend/plugins) +* [Third party](https://inventree.readthedocs.io/en/latest/extend/integrate) ## Contributing -Contributions are welcomed and encouraged. Please help to make this project even better! Refer to the [contribution page](https://inventree.github.io/pages/contribute). +Contributions are welcomed and encouraged. Please help to make this project even better! Refer to the [contribution page](https://inventree.readthedocs.io/en/latest/contribute/). ## Donate
Encode filename with sys.getfilesystemencoding() On Linux, it remains utf-8. On windows, broken filepaths with non-ascii characters are fixed with this patch.
@@ -1793,7 +1793,7 @@ references to the parent Dataset or Group. if diskless and __netcdf4libversion__ < '4.2.1': #diskless = False # don't raise error, instead silently ignore raise ValueError('diskless mode requires netcdf lib >= 4.2.1, you have %s' % __netcdf4libversion__) - bytestr = _strencode(str(filename)) + bytestr = _strencode(str(filename), encoding=sys.getfilesystemencoding()) path = bytestr if memory is not None and (mode != 'r' or type(memory) != bytes):
Update README * Update README.md * Typo fix for README * Revert "Typo fix for README" This reverts commit
BaseAxisPartition -> PandasOnRayAxisPartition -> {PandasOnRayColumnPartition, PandasOnRayRowPartition} ``` - `BaseAxisPartition` is a high level view onto BaseBlockPartitions' data. It is more - convient to operate on `BaseAxisPartition` sometimes. \ No newline at end of file + convenient to operate on `BaseAxisPartition` sometimes.
defaults: fix CI issue with ceph_uid fact The CI complains because of `ceph_uid` fact which doesn't exist since the docker image tag used in the CI doesn't match with this condition.
ceph_uid: 64045 when: - containerized_deployment - - ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("ubuntu") + - ceph_docker_image_tag | search("latest") or ceph_docker_image_tag | search("ubuntu") - name: set_fact ceph_uid for Red Hat based system set_fact:
[tests] Revert "Temporary deactivate wikidata default site tests" This reverts commit
@@ -155,9 +155,8 @@ matrix: env: LANGUAGE=test FAMILY=wikidata PYWIKIBOT_SITE_ONLY=1 - python: '3.4' env: LANGUAGE=ar FAMILY=wiktionary PYWIKIBOT_TEST_NO_RC=1 - # T220999 - # - python: '3.6' - # env: LANGUAGE=wikidata FAMILY=wikidata PYWIKIBOT_SITE_ONLY=1 + - python: '3.6' + env: LANGUAGE=wikidata FAMILY=wikidata PYWIKIBOT_SITE_ONLY=1 - python: '3.7' env: LANGUAGE=de FAMILY=wikipedia dist: xenial
Playground should have red errors/tooltips when required config is missing Summary: issue: 'missing' state should be shown in the same style as 'invalid' use explicit tooltip instead of div's title Test Plan: {F159351} Reviewers: nate, alangenfeld, max, bengotow
import * as React from "react"; import gql from "graphql-tag"; import styled from "styled-components/macro"; -import { Colors, Icon, Checkbox } from "@blueprintjs/core"; +import { + Colors, + Icon, + Checkbox, + Tooltip, + Intent, + Position +} from "@blueprintjs/core"; import PythonErrorInfo from "../PythonErrorInfo"; import { showCustomAlert } from "../CustomAlertProvider"; @@ -25,6 +32,22 @@ function isValidationError(e: ValidationErrorOrNode): e is ValidationError { return e && typeof e === "object" && "__typename" in e ? true : false; } +const stateToHint = { + invalid: { + title: `You need to fix this configuration section.`, + intent: Intent.DANGER + }, + missing: { + title: `You need to add this configuration section.`, + intent: Intent.DANGER + }, + present: { + title: `This section is present and valid.`, + intent: Intent.SUCCESS + }, + none: { title: `This section is empty and valid.`, intent: Intent.PRIMARY } +}; + interface RunPreviewProps { validation: RunPreviewValidationFragment | null; document: object | null; @@ -208,17 +231,15 @@ export class RunPreview extends React.Component< : "none"; return ( + <Tooltip + position={Position.BOTTOM} + content={stateToHint[state].title} + intent={stateToHint[state].intent} + key={name} + > <Item key={name} state={state} - title={ - { - invalid: `You need to fix this configuration section.`, - missing: `You need to add this configuration section.`, - present: `This section is present and valid.`, - none: `This section is empty and valid.` - }[state] - } onClick={() => { const first = pathErrors.find(isValidationError); onHighlightPath( @@ -228,6 +249,7 @@ export class RunPreview extends React.Component< > {name} </Item> + </Tooltip> ); }) .filter(Boolean); @@ -339,21 +361,21 @@ const ItemsEmptyNotice = styled.div` const ItemBorder = { invalid: `1px solid #CE1126`, - missing: `1px solid #D9822B`, + missing: `1px solid #CE1126`, present: `1px solid #AFCCE1`, none: `1px solid ${Colors.LIGHT_GRAY2}` }; const ItemBackground = { invalid: Colors.RED5, - missing: "#F2A85C", + missing: Colors.RED5, present: "#C8E1F4", none: Colors.LIGHT_GRAY4 }; const ItemBackgroundHover = { invalid: "#E15858", - missing: "#F2A85C", + missing: "#E15858", present: "#AFCCE1", none: Colors.LIGHT_GRAY4 };
[tests] Improvements for DeprecationTestCase remove '.pyo' extension which was used by Python < 3.5 with -o option avoid deeply nested flow control in _build_message
@@ -1408,7 +1408,7 @@ class DeprecationTestCase(DebugOnlyTestCase, TestCase): self.warning_log = [] self.expect_warning_filename = inspect.getfile(self.__class__) - if self.expect_warning_filename.endswith(('.pyc', '.pyo')): + if self.expect_warning_filename.endswith('.pyc'): self.expect_warning_filename = self.expect_warning_filename[:-1] self._do_test_warning_filename = True @@ -1430,18 +1430,17 @@ class DeprecationTestCase(DebugOnlyTestCase, TestCase): @classmethod def _build_message(cls, deprecated, instead): - if deprecated is None: - if instead is None: + if deprecated is not None: + msg = '{0} is deprecated'.format(deprecated) + if instead: + msg += '; use {0} instead.'.format(instead) + elif instead is None: msg = None elif instead is True: msg = cls.INSTEAD else: assert instead is False msg = cls.NO_INSTEAD - else: - msg = '{0} is deprecated'.format(deprecated) - if instead: - msg += '; use {0} instead.'.format(instead) return msg def assertDeprecationParts(self, deprecated=None, instead=None):
Convert footnote to note Github doesn't render footnotes correctly.
@@ -106,7 +106,10 @@ Once your PR has been merged, a GitHub action will automatically create the rele After a couple minutes, check for the new release's appearance at https://pypi.org/project/cumulusci/ -Next, head to the Release object that was autocreated in the GitHub repository, edit it, paste in the changelog notes and hit publish.[#pandoc]_ +Next, head to the Release object that was autocreated in the GitHub repository, edit it, paste in the changelog notes and hit publish. + +.. note:: +If pandoc is installed on macOS, you can run ``pbpaste | pandoc -f rst -t gfm | pbcopy`` to convert from RST to Github Flavored Markdown. You can then create a pull request to update the `Homebrew Tap`_ by running this locally (note, it's important to do this as soon as possible after the release is published on PyPI, because PyPI is the source CumulusCI checks to see if a new version is available):: @@ -127,7 +130,6 @@ Finally, post the release notes to our usual channels: .. _Homebrew Tap: https://github.com/SFDO-Tooling/homebrew-sfdo .. _jq: https://stedolan.github.io/jq/ -.. [#pandoc] If pandoc is installed on macOS, you can run ``pbpaste | pandoc -f rst -t gfm | pbcopy`` Org-reliant Integration tests ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Update daylight-osm.yaml added a few tags
@@ -5,11 +5,17 @@ Documentation: "[Project Website](https://daylightmap.org)" Contact: [email protected] ManagedBy: "[Meta](https://dataforgood.fb.com/)" UpdateFrequency: Quarterly +Collabs: + ASDI: + Tags: + - disaster response # Collabs: Tags: - geospatial - osm - mapping + - disaster response + - sustainability License: | [Open Database License (ODbL)](https://opendatacommons.org/licenses/odbl/1-0/) Resources:
Fix lint in test_utils.py Summary: Pull Request resolved: ghimport-source-id: Stack: * **#17944 Fix lint in test_utils.py**
@@ -201,7 +201,7 @@ class TestCheckpoint(TestCase): ) def test_checkpoint_rng_cpu(self): - for i in range(5): + for _ in range(5): inp = torch.randn(20000, device='cpu').requires_grad_() phase1 = torch.nn.Dropout() phase2 = torch.nn.Dropout() @@ -229,7 +229,7 @@ class TestCheckpoint(TestCase): @unittest.skipIf(not HAS_CUDA, 'No CUDA') def test_checkpoint_rng_cuda(self): - for i in range(5): + for _ in range(5): inp = torch.randn(20000, device='cuda').requires_grad_() phase1 = torch.nn.Dropout() phase2 = torch.nn.Dropout() @@ -325,7 +325,7 @@ class TestBottleneck(TestCase): import subprocess from common_utils import PY3 - p = subprocess.Popen(command, stdout=subprocess.PIPE, + p = subprocess.Popen(command, stdout=subprocess.PIPE, # noqa stderr=subprocess.PIPE, shell=True) output, err = p.communicate() rc = p.returncode
TST: Add tests for datetime byteswaps and unicode byteswap casts It seemst he unicode didn't actually help coverage, but here we go.
@@ -690,10 +690,28 @@ def test_datetime_string_conversion(self): def test_time_byteswapping(self, time_dtype): times = np.array(["2017", "NaT"], dtype=time_dtype) times_swapped = times.astype(times.dtype.newbyteorder()) + assert_array_equal(times, times_swapped) unswapped = times_swapped.view(np.int64).newbyteorder() assert_array_equal(unswapped, times.view(np.int64)) + @pytest.mark.parametrize(["time1", "time2"], + [("M8[s]", "M8[D]"), ("m8[s]", "m8[ns]")]) + def test_time_byteswapped_cast(self, time1, time2): + dtype1 = np.dtype(time1) + dtype2 = np.dtype(time2) + times = np.array(["2017", "NaT"], dtype=dtype1) + expected = times.astype(dtype2) + + # Test that every byte-swapping combination also returns the same + # results (previous tests check that this comparison works fine). + res1 = times.astype(dtype1.newbyteorder()).astype(dtype2) + assert_array_equal(res1, expected) + res2 = times.astype(dtype2.newbyteorder()) + assert_array_equal(res2, expected) + res3 = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder()) + assert_array_equal(res3, expected) + @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"]) @pytest.mark.parametrize("str_dtype", ["U", "S"]) def test_datetime_conversions_byteorders(self, str_dtype, time_dtype):
Fixes Linksys.SPS2xx.get_mac_address_table HG-- branch : feature/microservices
@@ -30,13 +30,12 @@ class Script(BaseScript): vlan_oid = [] if mac is not None: mac = mac.lower() - for v in self.snmp.get_tables(["1.3.6.1.2.1.17.7.1.2.2.1.2"], - bulk=True): + for v in self.snmp.get_tables(["1.3.6.1.2.1.17.7.1.2.2.1.2"]): vlan_oid.append(v[0]) # mac iface type - for v in self.snmp.get_tables( - ["1.3.6.1.2.1.17.4.3.1.1", "1.3.6.1.2.1.17.4.3.1.2", - "1.3.6.1.2.1.17.4.3.1.3"], bulk=True): + for v in self.snmp.get_tables(["1.3.6.1.2.1.17.4.3.1.1", + "1.3.6.1.2.1.17.4.3.1.2", + "1.3.6.1.2.1.17.4.3.1.3"]): if v[1]: chassis = ":".join(["%02x" % ord(c) for c in v[1]]) if mac is not None: @@ -48,8 +47,7 @@ class Script(BaseScript): continue if int(v[3]) > 3 or int(v[3]) < 1: continue - iface = self.snmp.get("1.3.6.1.2.1.31.1.1.1.1." + v[2], - cached=True) # IF-MIB + iface = self.snmp.get("1.3.6.1.2.1.31.1.1.1.1." + str(v[2])) # IF-MIB if interface is not None: if iface == interface: pass
docs: Update Webmasters API sample * docs: Update Webmasters API sample webmasters@v3 has been merged into searchconsole@v1, so sample should be updated accordingly. * docs: Update webmasters to searchconsole in method doc
@@ -55,7 +55,7 @@ argparser.add_argument('end_date', type=str, def main(argv): service, flags = sample_tools.init( - argv, 'webmasters', 'v3', __doc__, __file__, parents=[argparser], + argv, 'searchconsole', 'v1', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/webmasters.readonly') # First run a query to learn which dates we have data for. You should always @@ -157,7 +157,7 @@ def execute_request(service, property_uri, request): """Executes a searchAnalytics.query request. Args: - service: The webmasters service to use when executing the query. + service: The searchconsole service to use when executing the query. property_uri: The site or app URI to request data for. request: The request to be executed.
Plugins: Fix automatic heading style detection Function was confused by headings of style # MathJax <img src="url">
@@ -73,10 +73,10 @@ class MdeMatchHeadingHashesDetector(MdeViewEventListener): for h1, h2 in zip( view.find_by_selector("markup.heading")[:10], - view.find_by_selector("markup.heading - punctuation")[:10], + view.find_by_selector("markup.heading - punctuation.definition.heading")[:10], ): num_leading += 1 - if h1.end() != h2.end(): + if h1.end() > h2.end(): num_trailing += 1 if num_leading:
call superclass __init__ hopefully this doesn't break anything. it includes all the previous lines of this class's __init__ plus a few lines about `excluded_states` that hopefully we either wanted or are at least harmless.
@@ -21,10 +21,7 @@ class BeneficiaryExport(ExportableMixin, IcdsSqlData): config.update({ '5_years': 60, }) - self.config = config - self.loc_level = loc_level - self.show_test = show_test - self.beta = beta + super(BeneficiaryExport, self).__init__(config, loc_level, show_test, beta) @property def group_by(self):
minor print formatting issue used f-formatting instead
@@ -321,23 +321,21 @@ def revoke_grants(privs_to_revoke, dry_run=False, verbose=False, roles_by_slug=N grants_to_revoke = [] for grantee_slug, priv_slugs in privs_to_revoke: if grantee_slug not in roles_by_slug: - logger.info('grantee %s does not exist.', grantee_slug) + logger.info(f'grantee {grantee_slug} does not exist.') continue for priv_slug in priv_slugs: if priv_slug not in roles_by_slug: - logger.info('privilege %s does not exist.', priv_slug) + logger.info(f'privilege {priv_slug} does not exist.') continue if priv_slug not in granted[grantee_slug]: if verbose or dry_run: - logger.info('%sPrivilege already revoked: %s => %s', - dry_run_tag, grantee_slug, priv_slug) + logger.info(f'{dry_run_tag}Privilege already revoked: {grantee_slug} => f{priv_slug}') else: granted[grantee_slug].discard(priv_slug) if verbose or dry_run: - logger.info('%Revoking privilege: %s => %s', - dry_run_tag, grantee_slug, priv_slug) + logger.info(f'{dry_run_tag}Revoking privilege: {grantee_slug} => {priv_slug}') if not dry_run: grants_to_revoke = Grant.objects.filter( from_role=roles_by_slug[grantee_slug],
invert logic I think this makes it easier to reason about
@@ -469,7 +469,7 @@ def rebuild_export(export_instance, last_access_cutoff=None, filters=None): """ Rebuild the given daily saved ExportInstance """ - if _should_not_rebuild_export(export_instance, last_access_cutoff): + if not _should_rebuild_export(export_instance, last_access_cutoff): return filters = filters or export_instance.get_filters() export_file = get_export_file([export_instance], filters or []) @@ -477,18 +477,18 @@ def rebuild_export(export_instance, last_access_cutoff=None, filters=None): save_export_payload(export_instance, payload) -def _should_not_rebuild_export(export, last_access_cutoff=None): +def _should_rebuild_export(export, last_access_cutoff=None): """ :param last_access_cutoff: Any exports not accessed since this date will not be rebuilt. - :return: False if export should be rebuilt + :return: True if export should be rebuilt """ # Don't rebuild exports that haven't been accessed since last_access_cutoff or aren't enabled is_auto_rebuild = last_access_cutoff is not None - return is_auto_rebuild and ( - not export.auto_rebuild_enabled - or ( - export.last_accessed - and export.last_accessed < last_access_cutoff + return not is_auto_rebuild or ( + export.auto_rebuild_enabled + and ( + export.last_accessed is None + or export.last_accessed > last_access_cutoff ) )
Fix urllib usage in install.py `getheader()` is no more in Python 3, use `get()` instead.
@@ -588,7 +588,7 @@ def _install_kraken_db(datadir, args): db = os.path.join(kraken, base) tooldir = args.tooldir or get_defaults()["tooldir"] requests.packages.urllib3.disable_warnings() - last_mod = urllib.request.urlopen(url).info().getheader('Last-Modified') + last_mod = urllib.request.urlopen(url).info().get('Last-Modified') last_mod = dateutil.parser.parse(last_mod).astimezone(dateutil.tz.tzutc()) if os.path.exists(os.path.join(tooldir, "bin", "kraken")): if not os.path.exists(db):
Report IP in autoscaler DEBUG logging message Fixes
@@ -616,7 +616,7 @@ class ScalerThread(ExceptionalThread): for node, ip in ((node, node.privateIP) for node in provisionerNodes): info = None if ip not in recentMesosNodes: - logger.debug("Worker node at %s is not reporting executor information") + logger.debug("Worker node at %s is not reporting executor information", ip) # we don't have up to date information about the node info = _getInfo(allMesosNodes, ip) else:
qt cpfp: (trivial) make some strings translatable gettext was not picking these up
@@ -3306,17 +3306,17 @@ class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): return d = WindowModalDialog(self, _('Child Pays for Parent')) vbox = QVBoxLayout(d) - msg = ( + msg = _( "A CPFP is a transaction that sends an unconfirmed output back to " "yourself, with a high fee. The goal is to have miners confirm " "the parent transaction in order to get the fee attached to the " "child transaction.") - vbox.addWidget(WWLabel(_(msg))) - msg2 = ("The proposed fee is computed using your " + vbox.addWidget(WWLabel(msg)) + msg2 = _("The proposed fee is computed using your " "fee/kB settings, applied to the total size of both child and " "parent transactions. After you broadcast a CPFP transaction, " "it is normal to see a new unconfirmed transaction in your history.") - vbox.addWidget(WWLabel(_(msg2))) + vbox.addWidget(WWLabel(msg2)) grid = QGridLayout() grid.addWidget(QLabel(_('Total size') + ':'), 0, 0) grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
Update mouse.py Tap has been stubbed in, so the zoom mouse workaround is no longer needed. This used to crash on Windows.
@@ -100,13 +100,7 @@ class Actions: def mouse_toggle_zoom_mouse(): """Toggles zoom mouse""" - if eye_zoom_mouse.zoom_mouse.enabled: - try: - eye_zoom_mouse.zoom_mouse.disable() - except: - eye_zoom_mouse.zoom_mouse.enabled = False - else: - eye_zoom_mouse.zoom_mouse.enable() + eye_zoom_mouse.toggle_zoom_mouse(not eye_zoom_mouse.zoom_mouse.enabled) def mouse_cancel_zoom_mouse(): """Cancel zoom mouse if pending"""
Install typing for Mac Summary: Breaking this out of When BUILD_CAFFE2 and BUILD_ATEN are removed, we need to install typing on Mac. cc orionr Pull Request resolved:
@@ -37,6 +37,12 @@ else pip install --user pyyaml fi + # Make sure that typing is installed for the codegen of building Aten to work + if [[ -n "$(python -c 'import typing' 2>&1)" ]]; then + echo "Installing typing with pip at $(which pip)" + pip install --user typing + fi + # Build protobuf compiler from third_party if configured to do so if [ -n "${USE_HOST_PROTOC:-}" ]; then echo "USE_HOST_PROTOC is set; building protoc before building Caffe2..."
Update ug015_storm_ref_pivot.rst Added documentation for join()
@@ -106,7 +106,46 @@ Optional parameters: join() ------ -Todo +Returns the current (working) set of nodes **and** the set of nodes that share a specified property of the same type / valu as the original set of nodes. + +``join()`` can be thought of as a ``pivot()`` that retains the original set of nodes and combines (joins) them with the set of nodes that are pivoted to. + +Optional parameters: + +* **Return limit**: specify the maximum number of nodes returned by the ``join()`` query. + + * ``limit=`` (operator syntax) + +**Operator syntax:** + +.. parsed-literal:: + + **join(** *<dstprop>* **,** *<srcprop>* [ **,** limit=** *<num>* ] **)** + +**Macro syntax:** + +N/A + +**Examples:** + +* Given a set of domains (``inet:fqdn``) in the working set, return the domains and their set of immediate subdomains: + :: + join( inet:fqdn:domain, inet:fqdn ) + +* Given a set of email addresses (``inet:email``) in the working set, return the set of domain / registrant email (``inet:whois:regmail``) records associated with those email addresses: + :: + join( inet:whois:regmail:email, inet:email ) + +**Usage notes:** + +* ``join()`` takes its arguments in the order *<dstprop>*, *<srcprop>*, which is the opposite of ``pivot()``, which takes its arguments as *<srcprop>*, *<dstprop>*. ``join()`` may be modified in a future release so its syntax matches that of ``pivot()``. +* Both *<dstprop>* and *<srcprop>* must be specified. +* ``join()`` does not consume nodes by design. +* The ``limit=`` parameter can be provided as input to the ``join()`` operator itself when using Operator syntax. Alternately the ``limit()`` operator_ can be used after the ``join()`` operator (in either Operator or Macro syntax) to specify a limit on the number of nodes returned. +* Because ``join()`` does not consume nodes, this impacts the results returned by either the ``limit=`` parameter or the ``limit()`` operator. + + * The ``limit=`` parameter will return **all** of the original nodes, **plus** the specified number of results (if ``limit=10`` and the number of working nodes was eight, this will return 18 nodes). + * The ``limit()`` operator will return a **total** number of nodes equal to the specified limit, first including the original working nodes and then including resulting nodes (if ``limit=10`` and the number of working nodes was eight, this will return 10 nodes: the original eight, plus two results). refs() ------
Ensure that quickstart examples actually work. Closes
@@ -66,12 +66,12 @@ messages to it from google.cloud import pubsub publisher = pubsub.PublisherClient() - topic = 'projects/{project_id}/topics/{topic}'.format( + topic_name = 'projects/{project_id}/topics/{topic}'.format( project_id=os.getenv('GOOGLE_CLOUD_PROJECT'), topic='MY_TOPIC_NAME', # Set this to something appropriate. ) - publisher.create_topic() - publisher.publish(topic, b'My first message!', spam='eggs') + publisher.create_topic(topic_name) + publisher.publish(topic_name, b'My first message!', spam='eggs') To learn more, consult the `publishing documentation`_. @@ -90,7 +90,7 @@ the topic, and subscribe to that. from google.cloud import pubsub subscriber = pubsub.SubscriberClient() - topic = 'projects/{project_id}/topics/{topic}'.format( + topic_name = 'projects/{project_id}/topics/{topic}'.format( project_id=os.getenv('GOOGLE_CLOUD_PROJECT'), topic='MY_TOPIC_NAME', # Set this to something appropriate. ) @@ -98,7 +98,8 @@ the topic, and subscribe to that. project_id=os.getenv('GOOGLE_CLOUD_PROJECT'), sub='MY_SUBSCRIPTION_NAME', # Set this to something appropriate. ) - subscription = subscriber.create_subscription(topic, subscription) + subscription = subscriber.create_subscription( + name=subscription_name, topic=topic_name) The subscription is opened asychronously, and messages are processed by use of a callback.
Update component.yaml to kfp v2 sdk Update component.yaml to kfp v2 compatible. In v2, you need to declare the data type for all of the input/output arguments.
@@ -24,23 +24,23 @@ description: | metadata: annotations: {platform: 'OpenSource'} inputs: - - {name: model_id, description: 'Required. Training model ID', default: 'training-dummy'} - - {name: epsilon, description: 'Required. Epsilon value for the FGSM attack', default: '0.2'} - - {name: model_class_file, description: 'Required. pytorch model class file'} - - {name: model_class_name, description: 'Required. pytorch model class name', default: 'model'} - - {name: feature_testset_path, description: 'Required. Feature test dataset path in the data bucket'} - - {name: label_testset_path, description: 'Required. Label test dataset path in the data bucket'} - - {name: loss_fn, description: 'Required. PyTorch model loss function'} - - {name: optimizer, description: 'Required. PyTorch model optimizer'} - - {name: clip_values, description: 'Required. PyTorch model clip_values allowed for features (min, max)'} - - {name: nb_classes, description: 'Required. The number of classes of the model'} - - {name: input_shape, description: 'Required. The shape of one input instance for the pytorch model'} - - {name: data_bucket_name, description: 'Bucket that has the processed data', default: 'training-data'} - - {name: result_bucket_name, description: 'Bucket that has the training results', default: 'training-result'} - - {name: adversarial_accuracy_threshold, description: 'Model accuracy threshold on adversarial samples', default: '0.2'} + - {name: model_id, type: String, description: 'Required. Training model ID', default: 'training-dummy'} + - {name: epsilon, type: String, description: 'Required. Epsilon value for the FGSM attack', default: '0.2'} + - {name: model_class_file, type: String, description: 'Required. pytorch model class file'} + - {name: model_class_name, type: String, description: 'Required. pytorch model class name', default: 'model'} + - {name: feature_testset_path, type: String, description: 'Required. Feature test dataset path in the data bucket'} + - {name: label_testset_path, type: String, description: 'Required. Label test dataset path in the data bucket'} + - {name: loss_fn, type: String, description: 'Required. PyTorch model loss function'} + - {name: optimizer, type: String, description: 'Required. PyTorch model optimizer'} + - {name: clip_values, type: String, description: 'Required. PyTorch model clip_values allowed for features (min, max)'} + - {name: nb_classes, type: String, description: 'Required. The number of classes of the model'} + - {name: input_shape, type: String, description: 'Required. The shape of one input instance for the pytorch model'} + - {name: data_bucket_name, type: String, description: 'Bucket that has the processed data', default: 'training-data'} + - {name: result_bucket_name, type: String, description: 'Bucket that has the training results', default: 'training-result'} + - {name: adversarial_accuracy_threshold, type: String, description: 'Model accuracy threshold on adversarial samples', default: '0.2'} outputs: - - {name: metric_path, description: 'Path for robustness check output'} - - {name: robust_status, description: 'Path for robustness status output'} + - {name: metric_path, type: String, description: 'Path for robustness check output'} + - {name: robust_status, type: String, description: 'Path for robustness status output'} implementation: container: image: aipipeline/robustness-evaluation:pytorch
Apply PR feedback: add validation failed test to `push_msg` test
@@ -192,13 +192,13 @@ async def test_validate_msg(pubsubs_fsub, is_topic_1_val_passed, is_topic_2_val_ return True def failed_sync_validator(peer_id, msg): - raise ValidationError() + return False async def passed_async_validator(peer_id, msg): return True async def failed_async_validator(peer_id, msg): - raise ValidationError() + return False topic_1 = "TEST_SYNC_VALIDATOR" topic_2 = "TEST_ASYNC_VALIDATOR" @@ -462,3 +462,23 @@ async def test_push_msg(pubsubs_fsub, monkeypatch): await asyncio.wait_for(event.wait(), timeout=0.1) # Test: Subscribers are notified when `push_msg` new messages. assert (await sub.get()) == msg_1 + + # Test: add a topic validator and `push_msg` the message that + # does not pass the validation. + # `router_publish` is not called then. + def failed_sync_validator(peer_id, msg): + return False + + pubsubs_fsub[0].set_topic_validator(TESTING_TOPIC, failed_sync_validator, False) + + msg_2 = make_pubsub_msg( + origin_id=pubsubs_fsub[0].my_id, + topic_ids=[TESTING_TOPIC], + data=TESTING_DATA, + seqno=b"\x22" * 8, + ) + + event.clear() + await pubsubs_fsub[0].push_msg(pubsubs_fsub[0].my_id, msg_2) + await asyncio.sleep(0.01) + assert not event.is_set()
doc(tutorial): Fix typo in waitress instructions This should be a colon
@@ -129,7 +129,7 @@ since the latter doesn't work under Windows: .. code:: bash $ pip install waitress - $ waitress-serve --port=8000 look.app + $ waitress-serve --port=8000 look:app Now, in a different terminal, try querying the running app with curl:
Have a NodePort service for ambassador-admin, so that things in the cluster can talk to it. Fixes
@@ -3,6 +3,22 @@ eval $(sh $HERE/../scripts/get_registries.sh) cat <<EOF --- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + service: ambassador-admin + name: ambassador-admin +spec: + type: NodePort + ports: + - name: ambassador-admin + port: 8888 + targetPort: 8888 + selector: + service: ambassador +--- apiVersion: extensions/v1beta1 kind: Deployment metadata:
Event Mgr Invalid function Not sure what this effects, but there is no get_user_connection(user), so return was always False.
@@ -91,8 +91,8 @@ class EventManager(object): ''' Returns bool if the given user has an open notify socket ''' - connections = self.get_user_connection(user) - return False if connections is None or len(connections) else True + connections = self.get_user_connections(user.team.id, user.id) + return False if connections is None or len(connections) == 0 else True @property def all_connections(self):
Content: update help channel claiming system This commit changes the following inside the help channel guide: * Only one help channel can be claimed at the same time * You can use the search to find your channel * The channel will close after 10 minutes if someone else sends a message
@@ -42,14 +42,14 @@ There are always 3 available help channels waiting to be claimed in the **Python In order to claim one, simply start typing your question into one of these channels. Once your question has been posted, you have claimed this channel, and the channel will be moved down to the **Python Help: Occupied** category. -If you're unable to type into these channels, this means you're currently **on cooldown**. In order to prevent someone from claiming all the channels for themselves, **we only allow someone to claim a new help channel every 15 minutes**. However, if you close your help channel using the `!dormant` command, this cooldown is reset early. +If you're unable to type into these channels, this means you're currently **on cooldown**. In order to prevent someone from claiming all the channels for themselves, **we only allow someone to claim a single help channel at the same time**. You can search in the top right corner of your Discord client `from:yourusername#xxxx` to find back your channel. ![Channel available message](/static/images/content/help_channels/available_message.png) *This message is always posted when a channel becomes available for use.* ## Q: For how long is the channel mine? -The channel is yours until it has been inactive for **30 minutes**. When this happens, we move the channel down to the **Python Help: Dormant** category, and make the channel read-only. After a while, the channel will be rotated back into **Python Help: Available** for the next question. Please try to resist the urge to continue bumping the channel so that it never gets marked as inactive. If nobody is answering your question, you should try to reformulate the question to increase your chances of getting help. +The channel is yours until it has been inactive for **10 minutes**, or 30 minutes until someone participate in the channel. When this happens, we move the channel down to the **Python Help: Dormant** category, and make the channel read-only. After a while, the channel will be rotated back into **Python Help: Available** for the next question. Please try to resist the urge to continue bumping the channel so that it never gets marked as inactive. If nobody is answering your question, you should try to reformulate the question to increase your chances of getting help. ![Channel dormant message](/static/images/content/help_channels/dormant_message.png) *You'll see this message in your channel when the channel is marked as inactive.*
SUPP: More informative IntegrityError on duplicate columns Closes Author: Olof Asbrink Closes from oasbrink/warning and squashes the following commits: [Olof Asbrink] SUPP: More informative IntegrityError on duplicate columns
@@ -33,7 +33,11 @@ class Schema: self._name_locs = dict((v, i) for i, v in enumerate(self.names)) if len(self._name_locs) < len(self.names): - raise com.IntegrityError('Duplicate column names') + duplicate_names = list(self.names) + for v in self._name_locs.keys(): + duplicate_names.remove(v) + raise com.IntegrityError( + 'Duplicate column name(s): {}'.format(duplicate_names)) def __repr__(self): space = 2 + max(map(len, self.names), default=0)
Update avcodecs.py set maxrate with bitrate
@@ -859,6 +859,8 @@ class H265VAAPI(H265Codec): # optlist.extend(['-vf', 'scale_vaapi=format=p010']) # optlist.extend(['-hwaccel_output_format', 'vaapi']) optlist.extend(['-vf', 'format=nv12,hwupload']) + if 'bitrate' in safe: + optlist.extend(['-maxrate:v', str(safe['bitrate']) + 'k']) return optlist
Single source version per Fixes
# limitations under the License. ############################################################################## +import os +import re + from setuptools import setup, find_packages -from pyquil import __version__ + +HERE = os.path.abspath(os.path.dirname(__file__)) + + +def read(*parts): + with open(os.path.join(HERE, *parts), 'r') as fp: + return fp.read() + + +def find_version(*file_paths): + version_file = read(*file_paths) + version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) + if version_match: + return version_match.group(1) + raise RuntimeError("Unable to find version string.") + setup( name="pyquil", - version=__version__, + version=find_version('pyquil', '__init__.py'), author="Rigetti Computing", author_email="[email protected]", description="A Python library to generate Quantum Instruction Language (Quil) Programs.",
Update log.py fix sysloghandler breaking windows
@@ -91,6 +91,14 @@ def checkLoggingConfig(configfile): for k in defaults[s]: if not config.has_option(s, k): config.set(s, k, str(defaults[s][k])) + + # Remove sysLogHandler if you're on Windows + if os.name == 'nt' and 'sysLogHandler' in config.get('handlers', 'keys'): + config.set('handlers', 'keys', config.get('handlers', 'keys').replace('sysLogHandler', '')) + write = True + while config.get('handlers', 'keys').endswith(",") or config.get('handlers', 'keys').endswith(" "): + config.set('handlers', 'keys', config.get('handlers', 'keys')[:-1]) + write = True if write: fp = open(configfile, "w") config.write(fp)
Add event_broker to ExampleTrackerStore Custom tracker accepts optional event_broker parameter which needs to be reflected in ExampleTrackerStore
@@ -109,9 +109,15 @@ def test_find_tracker_store(default_domain): class ExampleTrackerStore(RedisTrackerStore): - def __init__(self, domain, url, port, db, password, record_exp): + def __init__(self, domain, url, port, db, password, record_exp, event_broker=None): super(ExampleTrackerStore, self).__init__( - domain, host=url, port=port, db=db, password=password, record_exp=record_exp + domain, + event_broker=event_broker, + host=url, + port=port, + db=db, + password=password, + record_exp=record_exp, )
[2018.3] Update the latest release information for docs 2018.3.0 is new stable release 2017.7.5 is new previous stable release 2018.3 branch is the "latest" release branch
@@ -250,9 +250,9 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ project = 'Salt' version = salt.version.__version__ -latest_release = '2017.7.4' # latest release -previous_release = '2016.11.9' # latest release from previous branch -previous_release_dir = '2016.11' # path on web server for previous branch +latest_release = '2018.3.0' # latest release +previous_release = '2017.7.5' # latest release from previous branch +previous_release_dir = '2017.7' # path on web server for previous branch next_release = '' # next release next_release_dir = '' # path on web server for next release branch @@ -263,8 +263,8 @@ if on_saltstack: copyright = time.strftime("%Y") # < --- START do not merge these settings to other branches START ---> # -build_type = 'develop' # latest, previous, develop, next -release = version # version, latest_release, previous_release +build_type = 'latest' # latest, previous, develop, next +release = latest_release # version, latest_release, previous_release # < --- END do not merge these settings to other branches END ---> # # Set google custom search engine
Fix script name Huawei.VRP get_fqdn HG-- branch : feature/microservices
@@ -14,7 +14,7 @@ from noc.sa.interfaces.igetfqdn import IGetFQDN class Script(BaseScript): - name = "Cisco.IOS.get_fqdn" + name = "Huawei.VRP.get_fqdn" interface = IGetFQDN rx_hostname = re.compile(r"^sysname\s+(?P<hostname>\S+)", re.MULTILINE) rx_hostname_lldp = re.compile(r"^System name\s+:\s*(?P<hostname>\S+)", re.MULTILINE)
Update logical display regexp Add predicted rotation regexp
# coding=utf-8 ''' -Copyright (C) 2012-2018 Diego Torres Milano +Copyright (C) 2012-2022 Diego Torres Milano Created on Dec 1, 2012 Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,7 +27,7 @@ from typing import Optional from com.dtmilano.android.adb.dumpsys import Dumpsys -__version__ = '21.6.0' +__version__ = '21.7.0' import sys import warnings @@ -576,16 +576,19 @@ class AdbClient: raise RuntimeError("Couldn't find display info in 'wm size', 'dumpsys display' or 'dumpsys window'") def getLogicalDisplayInfo(self): - ''' + """ Gets C{mDefaultViewport} and then C{deviceWidth} and C{deviceHeight} values from dumpsys. - This is a method to obtain display logical dimensions and density - ''' + This is a method to obtain display logical dimensions and density. + Obtains the rotation from dumpsys. + """ self.__checkTransport() logicalDisplayRE = re.compile( - '.*DisplayViewport\{valid=true, .*orientation=(?P<orientation>\d+), .*deviceWidth=(?P<width>\d+), deviceHeight=(?P<height>\d+).*') - for line in self.shell('dumpsys display').splitlines(): - m = logicalDisplayRE.search(line, 0) + r'.*DisplayViewport\{.*valid=true, .*orientation=(?P<orientation>\d+), .*deviceWidth=(?P<width>\d+), ' + r'deviceHeight=(?P<height>\d+).*') + predictedRotationRE = re.compile(r'.*mPredictedRotation=(?P<rotation>\d).*') + for _line in self.shell('dumpsys display').splitlines(): + m = logicalDisplayRE.search(_line, pos=0) if m: self.__displayInfo = {} for prop in ['width', 'height', 'orientation']: @@ -597,6 +600,13 @@ class AdbClient: else: # No available density information self.__displayInfo[prop] = -1.0 + + for _line in self.shell('dumpsys window displays').splitlines(): + m = predictedRotationRE.search(_line, pos=0) + if m: + self.__displayInfo['rotation'] = int(m.group('rotation')) + + if self.__displayInfo: return self.__displayInfo return None
SQLAlchemy transport: Use Query.with_for_update() instead of deprecated Query.with_lockmode(). Based on SQLAlchemy documentation: * method sqlalchemy.orm.query.Query.with_lockmode(mode) with mode='update' - translates to FOR UPDATE (Deprecated since version 0.9) * method sqlalchemy.orm.query.Query.with_for_update() When called with no arguments, the resulting SELECT statement will have a FOR UPDATE clause appended.
@@ -99,7 +99,7 @@ class Channel(virtual.Channel): self.session.execute('BEGIN IMMEDIATE TRANSACTION') try: msg = self.session.query(self.message_cls) \ - .with_lockmode('update') \ + .with_for_update() \ .filter(self.message_cls.queue_id == obj.id) \ .filter(self.message_cls.visible != False) \ .order_by(self.message_cls.sent_at) \
Pin graphene to <3.2 Summary & Motivation: It started breaking a ton of our tests. Doing this to unbork trunk Test Plan: BK Reviewers: alangenfeld, gibsondan, jmsanders Pull Request:
@@ -34,7 +34,7 @@ def get_version() -> str: packages=find_packages(exclude=["dagster_graphql_tests*"]), install_requires=[ f"dagster{pin}", - "graphene>=3", + "graphene>=3,<3.2", "gql[requests]", "requests", "starlette", # used for run_in_threadpool utility fn
Update contributing.rst new Discussion link
@@ -13,7 +13,7 @@ First off, thanks for taking the time to contribute! and `Pandas <http://pandas.pydata.org/pandas*docs/stable/contributing.html>`_ contributing guides. If you seek **support** for your argopy usage or if you don't want to read -this whole thing and just have a question: `visit the chat room at gitter <https://gitter.im/Argo-floats/argopy>`_. +this whole thing and just have a question: `visit our Discussion forum <https://github.com/euroargodev/argopy/discussions>`_. Where to start? =============== @@ -21,18 +21,18 @@ Where to start? All contributions, bug reports, bug fixes, documentation improvements, enhancements, and ideas are welcome. -We will complete this document for guidelines with regard to each of these contributions over time. - If you are brand new to *argopy* or open source development, we recommend going through the `GitHub "issues" tab <https://github.com/euroargodev/argopy/issues>`_ to find issues that interest you. There are a number of issues listed under `Documentation <https://github.com/euroargodev/argopy/issues?q=is%3Aissue+is%3Aopen+label%3Adocumentation>`_ -and `good first issue +and `Good first issue <https://github.com/euroargodev/argopy/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22>`_ where you could start out. Once you've found an interesting issue, you can return here to get your development environment setup. -Please don't file an issue to ask a question, instead `visit the chat room at gitter <https://gitter.im/Argo-floats/argopy>`_. +Please don't file an issue to ask a question, instead `visit our Discussion forum <https://github.com/euroargodev/argopy/discussions>`_. +where a number of items are listed under `Documentation <https://github.com/euroargodev/argopy/discussions?discussions_q=label%3Adocumentation+>`_ +and `Good first issue <https://github.com/euroargodev/argopy/discussions?discussions_q=label%3A%22good+first+issue%22++>`_ .. _contributing.bug_reports:
Moved assert to check out_path earlier. Preserve temporary output directory with -d option.
@@ -36,9 +36,12 @@ class openram_test(openram_test): os.chmod(out_path, 0o0750) # specify the same verbosity for the system call - verbosity = "" + opts = "" for i in range(OPTS.debug_level): - verbosity += " -v" + opts += " -v" + # keep the temp directory around + if not OPTS.purge_temp: + opts += " -d" OPENRAM_HOME = os.path.abspath(os.environ.get("OPENRAM_HOME")) @@ -46,12 +49,15 @@ class openram_test(openram_test): cmd = "python3 {0}/openram.py -n -o {1} -p {2} {3} config_20_{4}.py 2>&1 > {5}/output.log".format(OPENRAM_HOME, out_file, out_path, - verbosity, + opts, OPTS.tech_name, out_path) debug.info(1, cmd) os.system(cmd) + # check that the output path was created + self.assertEqual(os.path.exists(out_path),True) + # assert an error until we actually check a resul for extension in ["gds", "v", "lef", "sp"]: filename = "{0}/{1}.{2}".format(out_path,out_file,extension) @@ -64,7 +70,6 @@ class openram_test(openram_test): self.assertTrue(len(files)>0) # Make sure there is any .html file - if os.path.exists(out_path): datasheets = glob.glob('{0}/*html'.format(out_path)) self.assertTrue(len(datasheets)>0) @@ -76,8 +81,8 @@ class openram_test(openram_test): self.assertEqual(len(re.findall('WARNING',output)),0) - # now clean up the directory - if os.path.exists(out_path): + # now clean up the output directory (or preserve if specified to preserve temp dirs) + if os.path.exists(out_path) and OPTS.purge_temp: shutil.rmtree(out_path, ignore_errors=True) self.assertEqual(os.path.exists(out_path),False)
Fix static build on Windows Summary: Tested locally. It could be now be started by running `set EXTRA_CAFFE2_CMAKE_FLAGS= -DTORCH_STATIC=1` before build. If we want to make sure it works, then maybe we should add it into CI. Pull Request resolved:
@@ -258,8 +258,8 @@ endif() if (TORCH_STATIC) - target_compile_definitions(torch PUBLIC TORCH_BUILD_STATIC_LIBS) add_library(torch STATIC ${TORCH_SRCS}) + target_compile_definitions(torch PUBLIC TORCH_BUILD_STATIC_LIBS) else() add_library(torch SHARED ${TORCH_SRCS}) endif()
fix serialization with legacy segmenter addressed error encountered in
@@ -123,7 +123,9 @@ def serialize(records: Sequence[ocr_record], idx += 1 # build region and line type dict - page['types'] = list(regions.keys()) + list(set(line.script for line in records if line.script is not None)) + page['types'] = list(set(line.script for line in records if line.script is not None)) + if regions is not None: + page['types'].extend(list(regions.keys())) is_in_region = -1 for idx, record in enumerate(records):
Docs- Update python-libselinux to python3-libselinux EL8 Associated issue
@@ -21,7 +21,7 @@ CentOS 8 .. code-block:: bash - $ sudo yum install -y gcc python3-pip python3-devel openssl-devel libselinux-python + $ sudo yum install -y gcc python3-pip python3-devel openssl-devel python3-libselinux Ubuntu 16.x -----------
Init click option will generate: batch files, config files(with overwrite option, and certificate key pair(overwrite option)
@@ -202,6 +202,37 @@ def sync(**kwargs): run_stats.log_end(logger) [email protected]() [email protected]_option('-h', '--help') [email protected]_context +def init(ctx): +#genrerate example configs + sync = os.path.abspath('user-sync-config.yml') + umapi = os.path.abspath('connector-umapi.yml') + ldap = os.path.abspath('connector-ldap.yml') + existing = "\n".join({f for f in (sync, umapi, ldap) if os.path.exists(f)}) + if existing: + if click.confirm('\nWarning: files already exist: \n{}\nOverwrite?'.format(existing)): + ctx.forward(example_config, root='user-sync-config.yml', umapi='connector-umapi.yml', + ldap='connector-ldap.yml') +#generate private.key and certificate_pub.crt + ctx.forward(certgen, randomize=True, key='private.key', certificate='certificate_pub.crt') + cwd = os.getcwd() +#generate batch files + with open(os.path.join(cwd, 'Run_UST_Test_Mode.bat'), 'w+') as OPATH: + OPATH.writelines(['mode 155,50', '\ncd /D "%~dp0"', '\nuser-sync.exe --process-groups --users mapped -t', + '\npause']) + with open(os.path.join(cwd, "Run_UST_Live.bat"), 'w+') as OPATH: + OPATH.writelines( + ['mode 155,50', '\ncd /D "%~dp0"', '\nuser-sync.exe --configure-filename user-sync-config.yml']) + + + + + + + + @main.command() @click.help_option('-h', '--help') @click.option('--root', help="Filename of root user sync config file",
fix CP timestep database field type there was no bug caused by it being a float field, I'm just changing for correctness
@@ -410,7 +410,7 @@ class ElectricTariffModel(models.Model): chp_standby_rate_us_dollars_per_kw_per_month = models.FloatField(blank=True, null=True) chp_does_not_reduce_demand_charges = models.BooleanField(null=True, blank=True) emissions_region = models.TextField(null=True, blank=True) - coincident_peak_load_active_timesteps = ArrayField(ArrayField(models.FloatField(null=True, blank=True), null=True, default=list), null=True, default=list) + coincident_peak_load_active_timesteps = ArrayField(ArrayField(models.IntegerField(null=True, blank=True), null=True, default=list), null=True, default=list) coincident_peak_load_charge_us_dollars_per_kw = ArrayField(models.FloatField(null=True, blank=True), null=True, default=list) emissions_factor_CO2_pct_decrease = models.FloatField(null=True, blank=True) emissions_factor_NOx_pct_decrease = models.FloatField(null=True, blank=True)
Fix a problem that receipts doesn't exist. It occurs when invoke results are not cached.
@@ -423,6 +423,7 @@ class BlockChain: receipts, next_prep = self.__invoke_results.get(block.header.hash, (None, None)) if receipts is None and need_to_score_invoke: self.get_invoke_func(block.header.height)(block, self.__last_block) + receipts, next_prep = self.__invoke_results.get(block.header.hash, (None, None)) if not need_to_write_tx_info: receipts = None
Warn when a session is closed quickly Fixes
@@ -5,8 +5,9 @@ Session class for interacting with the FiftyOne App. | `voxel51.com <https://voxel51.com/>`_ | """ -import logging from collections import defaultdict +import logging +import time import fiftyone.core.client as foc import fiftyone.core.service as fos @@ -135,6 +136,10 @@ class Session(foc.HasClient): def __init__(self, dataset=None, view=None, port=5151, remote=False): self._port = port self._remote = remote + # maintain a reference to prevent garbage collection + self._get_time = time.perf_counter + self._WAIT_INSTRUCTIONS = _WAIT_INSTRUCTIONS + global _server_services # pylint: disable=global-statement if port not in _server_services: _server_services[port] = fos.ServerService(port) @@ -157,6 +162,7 @@ class Session(foc.HasClient): _REMOTE_INSTRUCTIONS.strip() % (self.server_port, self.server_port, self.server_port) ) + self._start_time = self._get_time() def __del__(self): """Deletes the Session by removing it from the `_subscribed_sessions` @@ -165,6 +171,10 @@ class Session(foc.HasClient): subscribed. """ try: + if self._get_time() - self._start_time < 5: + # logger may already have been garbage-collected + print(self._WAIT_INSTRUCTIONS) + global _subscribed_sessions # pylint: disable=global-statement _subscribed_sessions[self._port].discard(self) @@ -294,3 +304,9 @@ ssh -N -L 5151:127.0.0.1:%d [<username>@]<hostname> and then connect to the app on that machine using either `fiftyone app connect` or from Python via `fiftyone.launch_app()`. """ + +_WAIT_INSTRUCTIONS = """ +A session appears to have terminated shortly after it was started. If you +intended to start an app instance or a remote session from a script, you +should call `session.wait()` to keep the session (and the script) alive. +"""
Remove the trailing "," in signaturesDB That corresponding statement in worst case is `if (False or False or False,)` simplifies to `if (False,):` which simplifies to `True` , so the code below is unreachable
@@ -129,7 +129,7 @@ class SignatureDB(object): if ( not self.enable_online_lookup or byte_sig in self.online_lookup_miss - or time.time() < self.online_lookup_timeout, + or time.time() < self.online_lookup_timeout ): return []
fix store_credentials fix store_credentials to store creds correctly when authorized_user_filename is passed.
@@ -163,7 +163,7 @@ def oauth( if not creds: creds = flow(scopes=scopes) - store_credentials(creds) + store_credentials(creds, filename=authorized_user_filename) client = Client(auth=creds) return client
Adds more complex RandomSearchTuner test using input LSTMModule; tests basic functionality + determinism
@@ -6,6 +6,7 @@ import numpy as np import torch from metal.end_model import EndModel +from metal.modules import LSTMModule from metal.tuners.random_tuner import RandomSearchTuner from metal.utils import LogWriter @@ -108,6 +109,76 @@ class RandomSearchModelTunerTest(unittest.TestCase): # Clean up rmtree(tuner.log_rootdir) + def test_tuner_with_lstm(self): + """Test basic functionality *and* determinism/seeding of the tuner + with a more complex EndModel having an input module""" + # From tests/metal/modules/test_lstm.py; TODO: Refactor this + n = 1000 + SEQ_LEN = 5 + MAX_INT = 8 + X = torch.randint(1, MAX_INT + 1, (n, SEQ_LEN)).long() + Y = torch.zeros(n).long() + needles = np.random.randint(1, SEQ_LEN - 1, n) + for i in range(n): + X[i, needles[i]] = MAX_INT + 1 + Y[i] = X[i, needles[i] + 1] + Xs = [X[:800], X[800:900], X[900:]] + Ys = [Y[:800], Y[800:900], Y[900:]] + + embed_size = 4 + hidden_size = 10 + vocab_size = MAX_INT + 2 + + # Initialize LSTM module here + # TODO: Note the issue is that we will need to re-init each time... + lstm_module = LSTMModule( + embed_size, + hidden_size, + vocab_size=vocab_size, + seed=123, + bidirectional=True, + verbose=False, + lstm_reduction="attention", + ) + + # Set up RandomSearchTuner + tuner = RandomSearchTuner(EndModel, log_writer_class=LogWriter) + + # EndModel init kwargs + init_kwargs = { + "seed": 123, + "batchnorm": True, + "k": MAX_INT, + "input_module": lstm_module, + "layer_out_dims": [hidden_size * 2, MAX_INT], + "input_batchnorm": True, + "verbose": False, + } + + # Set up search space + # NOTE: No middle layers here, so these should return the same scores! + search_space = {"middle_dropout": [0.0, 1.0]} + + # Run random grid search + tuner.search( + search_space, + (Xs[1], Ys[1]), + init_kwargs=init_kwargs, + train_args=[(Xs[0], Ys[0])], + train_kwargs={"n_epochs": 2}, + verbose=False, + ) + + # Load the log + with open(tuner.report_path, "r") as f: + tuner_report = json.load(f) + + # Confirm determinism + self.assertEqual(tuner_report[0]["score"], tuner_report[1]["score"]) + + # Clean up + rmtree(tuner.log_rootdir) + if __name__ == "__main__": unittest.main()
Remove use_count() == 1 in Tensor::Extend Summary: Pull Request resolved: As suggested by jerryzh168, temporary fix for a new constraint that was added is to remove this assert. Long term jerryzh168 is going to work out a better way of handling this.
@@ -275,9 +275,6 @@ class CAFFE2_API TensorImpl : public c10::intrusive_ptr_target { CAFFE_ENFORCE_GE_WITH_CALLER(dims_.size(), 1); CAFFE_ENFORCE_GE_WITH_CALLER( num, 0, "`num` must be non-negative for Extend"); - CAFFE_ENFORCE( - storage_.use_count() == 1, - "Can't call Extend on shared storage, please call Resize instead"); auto newDims = dims_; newDims[0] += num; if (!storage_->data()) {
fix test Summary: test that wasn't on the CI, but is tested internally. Pull Request resolved:
@@ -6,7 +6,6 @@ from __future__ import unicode_literals import torch.jit import torch.nn as nn import torch.nn.functional as F - from common_utils import TestCase # TODO : Quantizer tests to be integrated with CI once quantizer intf hardened @@ -219,6 +218,7 @@ class QuantizerTestCase(TestCase): eagerDict = eagerQuantObj.getQParamDict() activationDict = activationQuantObj.getQParamDict() - self.assertTrue('z' in eagerDict and 'z' in activationDict) - self.assertAlmostEqual(eagerDict["z"][0], activationDict["z"][0], places=15) - self.assertAlmostEqual(eagerDict["z"][1], activationDict["z"][1], places=15) + # TODO - fix @eellison + self.assertTrue('z' in eagerDict and 'z.1' in activationDict) + self.assertAlmostEqual(eagerDict["z"][0], activationDict["z.1"][0], places=15) + self.assertAlmostEqual(eagerDict["z"][1], activationDict["z.1"][1], places=15)
Added shell autocomplete config instructions How to configure autocomplete for the GE CLI
@@ -957,6 +957,34 @@ If you have built a suite called ``churn_model_assumptions`` and a postgres data This tap can then be run nightly before your model makes churn predictions! +Shell autocompletion for the CLI +====================== + +If you want to enable autocompletion for the Great Expectations CLI, you can execute following commands in your shell (or add them to your .bashrc/.zshrc or ~/.config/fish/completions/): + +.. code-block:: bash + + $ eval "$(_GREAT_EXPECTATIONS_COMPLETE=source_bash great_expectations)" + +for bash + +.. code-block:: zsh + + $ eval "$(_GREAT_EXPECTATIONS_COMPLETE=source_zsh great_expectations)" + +for zsh, and + +.. code-block:: fish + + $ eval (env _GREAT_EXPECTATIONS_COMPLETE=source_fish great_expectations) + +for fish (you'll have to create a ~/.config/fish/completions/great_expectations.fish file). + +Alternatively, if you don't want the eval command to slow down your shell startup time, you can instead add the commands as a script to your shell profile. For more info, see the official `Click documentation`_. + +.. _Click documentation: https://click.palletsprojects.com/en/7.x/bashcomplete/ + + Miscellaneous ======================
Adding APIs for listing/adding/removing notification ignores - Adding API endpoint for deleting notifications
@@ -15,7 +15,7 @@ from ..socketio import socketio from ..misc import ratelimit, POSTING_LIMIT, AUTH_LIMIT, captchas_required from ..models import Sub, User, SubPost, SubPostComment, SubMetadata, SubPostCommentVote, SubPostVote, SubSubscriber from ..models import SiteMetadata, UserMetadata, Message, SubRule, Notification, SubMod, InviteCode, UserStatus -from ..models import SubPostMetadata +from ..models import SubPostMetadata, UserIgnores from ..caching import cache from ..config import config from ..badges import badges @@ -1000,6 +1000,91 @@ def get_notifications(): return jsonify(notifications=notification_list) [email protected]('/notifications', methods=['DELETE']) +@jwt_required +def delete_notification(): + uid = get_jwt_identity() + if not request.is_json: + return jsonify(msg="Missing JSON in request"), 400 + + notification_id = request.json.get('notificationId', None) + if not notification_id: + return jsonify(error="The notificationId parameter is required"), 400 + + try: + notification = Notification.get((Notification.id == notification_id) & (Notification.target == uid)) + except Notification.DoesNotExist: + return jsonify(error="Notification does not exist"), 404 + + notification.delete_instance() + return jsonify(status="ok") + + [email protected]('/notifications/ignore', methods=['GET']) +@jwt_required +def get_ignored(): + """ Lists all the users the user has blocked. """ + uid = get_jwt_identity() + + ignores = UserIgnores.select(User.name).join(User, on=User.uid == UserIgnores.target) + ignores = ignores.where(UserIgnores.uid == uid).dicts() + + return jsonify(ignores=list(ignores)) + + [email protected]('/notifications/ignore', methods=['POST']) +@jwt_required +def ignore_notifications(): + """ Ignores all notifications coming from a certain user. """ + uid = get_jwt_identity() + if not request.is_json: + return jsonify(msg="Missing JSON in request"), 400 + + user = request.json.get('user', None) + if not user: + return jsonify(error="The user parameter is required"), 400 + + try: + user = User.get(User.name == user) + except User.DoesNotExist: + return jsonify(error="The user provided does not exist"), 400 + + try: + UserIgnores.get((UserIgnores.uid == uid) & (UserIgnores.target == user.uid)) + return jsonify(error="User is already blocked") + except UserIgnores.DoesNotExist: + pass + + UserIgnores.create(uid=uid, target=user.uid) + return jsonify(status='ok') + + [email protected]('/notifications/ignore', methods=['DELETE']) +@jwt_required +def unignore_notifications(): + """ Removes an ignore. """ + uid = get_jwt_identity() + if not request.is_json: + return jsonify(msg="Missing JSON in request"), 400 + + user = request.json.get('user', None) + if not user: + return jsonify(error="The user parameter is required"), 400 + + try: + user = User.get(User.name == user) + except User.DoesNotExist: + return jsonify(error="The user provided does not exist"), 400 + + try: + ignore = UserIgnores.get((UserIgnores.uid == uid) & (UserIgnores.target == user.uid)) + ignore.delete_instance() + except UserIgnores.DoesNotExist: + return jsonify(error="User is not blocked") + + return jsonify(status='ok') + + @API.route('/user/settings', methods=['GET']) @jwt_required def get_settings():
better form spacing and remove redundant comment resstricting to project target languages is anyway not useful since it restricts user to not be able to create new languages directly from HQ. Transifex adds any new languages pushed by HQ.
@@ -105,8 +105,6 @@ class AppTranslationsForm(forms.Form): ) transifex_project_slug = forms.ChoiceField(label=ugettext_lazy("Trasifex project"), choices=(), required=True) - # Unfortunately transifex api does not provide a way to pull all possible target languages and - # allow us to just add a checkbox instead of selecting a single/multiple target languages at once target_lang = forms.ChoiceField(label=ugettext_lazy("Translated Language"), choices=([(None, ugettext_lazy('Select Translated Language'))] + langcodes.get_all_langs_for_select()), @@ -124,8 +122,8 @@ class AppTranslationsForm(forms.Form): self.domain = domain self.helper = FormHelper() self.helper.form_tag = False - self.helper.label_class = 'col-sm-3 col-md-4 col-lg-2' - self.helper.field_class = 'col-sm-4 col-md-5 col-lg-3' + self.helper.label_class = 'col-sm-4 col-md-4 col-lg-3' + self.helper.field_class = 'col-sm-6 col-md-6 col-lg-5' self.fields['app_id'].choices = tuple((app.id, app.name) for app in get_brief_apps_in_domain(domain)) if settings.TRANSIFEX_DETAILS:
linemin was miscalculating the error bar (standard deviation divided by N, not sqrt(N))
@@ -98,7 +98,7 @@ def line_minimization( data, coords = vmc(wf, coords, accumulators={"pgrad": pgrad_acc}, **vmcoptions) df = pd.DataFrame(data)[warmup:] en = np.mean(df["pgradtotal"]) - en_err = np.std(df["pgradtotal"]) / len(df) + en_err = np.std(df["pgradtotal"]) / np.sqrt(len(df)) dpH = np.mean(df["pgraddpH"], axis=0) dp = np.mean(df["pgraddppsi"], axis=0) dpdp = np.mean(df["pgraddpidpj"], axis=0)
ebuild.processor: inherit_handler(): handle empty string arg to inherit Previously these would throw a traceback.
@@ -935,7 +935,7 @@ class EbuildProcessor(object): finally: self.unlock() -def inherit_handler(ecache, ebp, line, updates=None): +def inherit_handler(ecache, ebp, line=None, updates=None): """Callback for implementing inherit digging into eclass_cache. Not for normal consumption.
library: import ca_test_common in test_ceph_key since we added `ca_test_common.py` let's use it in `test_ceph_key.py`
import json import os -import sys import mock import pytest -from ansible.module_utils import basic -from ansible.module_utils._text import to_bytes +import ca_test_common import ceph_key -# From ceph-ansible documentation -def set_module_args(args): - if '_ansible_remote_tmp' not in args: - args['_ansible_remote_tmp'] = '/tmp' - if '_ansible_keep_remote_files' not in args: - args['_ansible_keep_remote_files'] = False - - args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) - basic._ANSIBLE_ARGS = to_bytes(args) - - -class AnsibleExitJson(Exception): - pass - - -class AnsibleFailJson(Exception): - pass - - -def exit_json(*args, **kwargs): - raise AnsibleExitJson(kwargs) - - -def fail_json(*args, **kwargs): - raise AnsibleFailJson(kwargs) - - @mock.patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': 'docker'}) class TestCephKeyModule(object): @@ -560,18 +531,17 @@ class TestCephKeyModule(object): @mock.patch('ceph_key.exec_commands') @pytest.mark.parametrize('output_format', ['json', 'plain', 'xml', 'yaml']) def test_state_info(self, m_exec_commands, m_exit_json, output_format): - set_module_args({"state": "info", + ca_test_common.set_module_args({"state": "info", "cluster": "ceph", "name": "client.admin", - "output_format": output_format} - ) - m_exit_json.side_effect = exit_json + "output_format": output_format}) + m_exit_json.side_effect = ca_test_common.exit_json m_exec_commands.return_value = (0, ['ceph', 'auth', 'get', 'client.admin', '-f', output_format], '[{"entity":"client.admin","key":"AQC1tw5fF156GhAAoJCvHGX/jl/k7/N4VZm8iQ==","caps":{"mds":"allow *","mgr":"allow *","mon":"allow *","osd":"allow *"}}]', # noqa: E501 'exported keyring for client.admin') - with pytest.raises(AnsibleExitJson) as result: + with pytest.raises(ca_test_common.AnsibleExitJson) as result: ceph_key.run_module() result = result.value.args[0] @@ -584,14 +554,13 @@ class TestCephKeyModule(object): @mock.patch('ansible.module_utils.basic.AnsibleModule.fail_json') def test_state_info_invalid_format(self, m_fail_json): invalid_format = 'txt' - set_module_args({"state": "info", + ca_test_common.set_module_args({"state": "info", "cluster": "ceph", "name": "client.admin", - "output_format": invalid_format} - ) - m_fail_json.side_effect = fail_json + "output_format": invalid_format}) + m_fail_json.side_effect = ca_test_common.fail_json - with pytest.raises(AnsibleFailJson) as result: + with pytest.raises(ca_test_common.AnsibleFailJson) as result: ceph_key.run_module() result = result.value.args[0]
Update apt_38.txt Root domains + sub-domains in explicit way to detect.
# Reference: https://otx.alienvault.com/pulse/5bb4bdccd63eeb0a87994870 bitdefs.ignorelist.com + +# Reference: https://twitter.com/ccxsaber/status/1204007469053165570 + +gphi.site +gphi-gsaeyheq.top +gphi-adhaswe.xyz +updatesinfos.com +a.updatesinfos.com +b.updatesinfos.com +ip1.s.gphi.site +ip2.s.gphi.site +ip1.gphi-gsaeyheq.top +ip1.gphi-adhaswe.xyz
Updating details and adding a video link Added: Detail about elderly man (75 years old) Video from Facebook showing alternate angle of event Removed: Duplicate Twitter link that was related to the police tackling the man being interviewed, same link as is listed on line 8.
@@ -9,14 +9,14 @@ Three police officers run over to and tackle man with hands raised giving an int ### Police shove elderly man, causing him to fall on the back of his head | June 4th -Two police officers shove an unarmed, elderly man, who falls backwards and strikes his head on the concrete sidewalk. He appears to be bleeding. +Two police officers shove an unarmed, 75 year old man, who falls backwards and strikes his head on the concrete sidewalk. He appears to be bleeding. Buffalo P.D. released an official statement saying that he "tripped and fell." They have now opened an investigation into the incident. Location: Niagara Square **Links** * https://twitter.com/WBFO/status/1268712530358292484?s=20 * https://news.wbfo.org/post/graphic-video-buffalo-police-officers-violently-shove-man-ground -* https://twitter.com/secretlaith/status/1268251322467450880 +* https://www.facebook.com/watch/?ref=external&v=2489206818056597 * https://www.wivb.com/news/five-people-arrested-one-person-injured-during-protest-activity-in-niagara-square/ ## Rochester
Fix a broken test for nomination reason. We no longer return 400 if a reason is missing.
@@ -80,7 +80,7 @@ class CreationTests(APISubdomainTestCase): 'actor': ['This field is required.'] }) - def test_returns_400_for_missing_reason(self): + def test_returns_201_for_missing_reason(self): url = reverse('bot:nomination-list', host='api') data = { 'user': self.user.id, @@ -88,10 +88,7 @@ class CreationTests(APISubdomainTestCase): } response = self.client.post(url, data=data) - self.assertEqual(response.status_code, 400) - self.assertEqual(response.json(), { - 'reason': ['This field is required.'] - }) + self.assertEqual(response.status_code, 201) def test_returns_400_for_bad_user(self): url = reverse('bot:nomination-list', host='api')
PlugLayout : Support for embedded PlugLayout This enables us to embed the PlugLayout in a larger layout that already includes things like a scroll bar.
@@ -88,11 +88,15 @@ class PlugLayout( GafferUI.Widget ) : # We use this when we can't find a ScriptNode to provide the context. __fallbackContext = Gaffer.Context() - def __init__( self, parent, orientation = GafferUI.ListContainer.Orientation.Vertical, layoutName = "layout", rootSection = "", **kw ) : + def __init__( self, parent, orientation = GafferUI.ListContainer.Orientation.Vertical, layoutName = "layout", rootSection = "", embedded = False, **kw ) : assert( isinstance( parent, ( Gaffer.Node, Gaffer.Plug ) ) ) - self.__layout = _TabLayout( orientation ) if isinstance( parent, Gaffer.Node ) and not rootSection else _CollapsibleLayout( orientation ) + # embedded indicates that the PlugLayout is embedded in another layout + # which affects how the widget is built + self.__embedded = embedded + + self.__layout = _TabLayout( orientation, embedded = embedded ) if isinstance( parent, Gaffer.Node ) and not rootSection else _CollapsibleLayout( orientation ) GafferUI.Widget.__init__( self, self.__layout, **kw ) @@ -598,7 +602,9 @@ class _Layout( GafferUI.Widget ) : class _TabLayout( _Layout ) : - def __init__( self, orientation, **kw ) : + def __init__( self, orientation, embedded = False, **kw ) : + + self.__embedded = embedded self.__mainColumn = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical ) @@ -607,6 +613,12 @@ class _TabLayout( _Layout ) : with self.__mainColumn : self.__widgetsColumn = GafferUI.ListContainer( self.orientation(), spacing = 4, borderWidth = 4 ) self.__tabbedContainer = GafferUI.TabbedContainer() + # if the TabLayout is embedded, we want to restrict the maximum width/height depending on the orientation + if self.__embedded : + if self.orientation() == GafferUI.ListContainer.Orientation.Vertical : + self.__tabbedContainer._qtWidget().setSizePolicy( QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum ) ) + else : + self.__tabbedContainer._qtWidget().setSizePolicy( QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Expanding ) ) self.__currentTabChangedConnection = self.__tabbedContainer.currentChangedSignal().connect( Gaffer.WeakMethod( self.__currentTabChanged ) @@ -625,11 +637,16 @@ class _TabLayout( _Layout ) : for name, subsection in section.subsections.items() : tab = existingTabs.get( name ) if tab is None : + # Use scroll bars only when the TabLayout is not embedded + if self.__embedded : + tab = GafferUI.Frame( borderWidth = 0, borderStyle = GafferUI.Frame.BorderStyle.None ) + else : tab = GafferUI.ScrolledContainer( borderWidth = 8 ) if self.orientation() == GafferUI.ListContainer.Orientation.Vertical : tab.setHorizontalMode( GafferUI.ScrolledContainer.ScrollMode.Never ) else : tab.setVerticalMode( GafferUI.ScrolledContainer.ScrollMode.Never ) + tab.setChild( _CollapsibleLayout( self.orientation() ) ) tab.getChild().update( subsection ) updatedTabs[name] = tab
popover: Rename show_user_info_popover function. This commit renames the show_user_info_popover function to show_user_info_popover_for_message, as it is used to open the popover for users which are essentially related to a particular message, like message sender and mentioned user.
@@ -247,7 +247,7 @@ exports._test_calculate_info_popover_placement = calculate_info_popover_placemen // element is the target element to pop off of // user is the user whose profile to show // message is the message containing it, which should be selected -function show_user_info_popover(element, user, message) { +function show_user_info_popover_for_message(element, user, message) { const last_popover_elem = current_message_info_popover_elem; exports.hide_all(); if (last_popover_elem !== undefined && last_popover_elem.get()[0] === element) { @@ -701,7 +701,7 @@ exports.show_sender_info = function () { const message = current_msg_list.get(rows.id($message)); const user = people.get_by_user_id(message.sender_id); - show_user_info_popover($sender[0], user, message); + show_user_info_popover_for_message($sender[0], user, message); if (current_message_info_popover_elem) { focus_user_info_popover_item(); } @@ -733,8 +733,7 @@ exports.register_click_handlers = function () { e.stopPropagation(); const message = current_msg_list.get(rows.id(row)); const user = people.get_by_user_id(message.sender_id); - - show_user_info_popover(this, user, message); + show_user_info_popover_for_message(this, user, message); }, ); @@ -756,7 +755,7 @@ exports.register_click_handlers = function () { } else { user = people.get_by_email(email); } - show_user_info_popover(this, user, message); + show_user_info_popover_for_message(this, user, message); }); $("#main_div").on("click", ".user-group-mention", function (e) {
api/iodevices: simplify LUMPDevice API All these device can do is read or write values at a particular mode. There is also no need for separate mode setters.
@@ -12,61 +12,23 @@ class LUMPDevice(): """ pass - def read(self): - """Read latest values from the sensor. - - Returns: - ``tuple``: Values read from the sensor. - """ - pass - - def write(self, values): - """Write values to the sensor. + def read(self, mode): + """Read values from a given mode. Arguments: - data (``tuple``): Values to be written. - """ - pass - - def get_mode(self): - """Get the currently active mode or combi-mode. + mode (``int``): Device mode. Returns: ``tuple``: Values read from the sensor. """ pass - def set_mode(self, mode): - """Set the sensor to a given mode. - - Arguments: - mode (``int``): Mode index. - """ - pass - - def set_combi_mode(self, todo1, todo2): - """Set the sensor to a given combi-mode. - - TODO: Need to figure out how this works. + def write(self, mode, values): + """Write values to the sensor. Arguments: - TODO (``tuple``): TODO. - """ - pass - - def format(self): - """Get the data type for the currently active mode. - - Returns: - ``str``: Struct format specifier. - """ - pass - - def id(self): - """Get the unique type ID for this device. - - Returns: - ``int``: Device ID. + mode (``int``): Device mode. + data (``tuple``): Values to be written. """ pass
run_isolated: update isolated client revision This is to take crrev.com/4f137b333a6be75dd13c2fb375c97c444ea40979
@@ -113,7 +113,7 @@ ISOLATED_CLIENT_DIR = u'ic' # Take revision from # https://ci.chromium.org/p/infra-internal/g/infra-packagers/console ISOLATED_PACKAGE = 'infra/tools/luci/isolated/${platform}' -ISOLATED_REVISION = 'git_revision:2ee27ca739de90c29d46eb3af3371a42fec3ebff' +ISOLATED_REVISION = 'git_revision:1190afd45e12e2adbdde61b24a14365f0dc415e3' # Keep synced with task_request.py CACHE_NAME_RE = re.compile(r'^[a-z0-9_]{1,4096}$')
Move fio runtime to a data disk os disk might always have the required disk capacity to run fio
@@ -12,6 +12,8 @@ from lisa import ( TestCaseMetadata, TestSuite, TestSuiteMetadata, + schema, + search_space, simple_requirement, ) from lisa.environment import Environment @@ -74,19 +76,27 @@ class CPUSuite(TestSuite): The cpu hotplug steps are same as `verify_cpu_hot_plug` test case. """, priority=4, + requirement=simple_requirement( + disk=schema.DiskOptionSettings( + data_disk_count=search_space.IntRange(min=1), + data_disk_size=search_space.IntRange(min=20), + ), + ), ) def verify_cpu_offline_storage_workload(self, log: Logger, node: Node) -> None: # run fio process asynchronously on the node + fio_data_size_in_gb = 1 try: + image_folder_path = node.find_partition_with_freespace(fio_data_size_in_gb) fio_process = node.tools[Fio].launch_async( name="workload", - filename="fiodata", + filename=f"{image_folder_path}/fiodata", mode="readwrite", iodepth=128, numjob=10, time=300, block_size="1M", - size_gb=1, + size_gb=fio_data_size_in_gb, group_reporting=False, overwrite=True, time_based=True,
builders: Make single_connection_client conifgurable single_connection_client keeps a connection open to the pool on init which may cause issues if the master changes
@@ -32,7 +32,7 @@ class RedisBuildLogs(object): args = dict(self._redis_config) args.update( - {"socket_connect_timeout": 1, "socket_timeout": 2, "single_connection_client": True} + {"socket_connect_timeout": 1, "socket_timeout": 2} ) self._redis_client = redis.StrictRedis(**args) @@ -127,7 +127,7 @@ class RedisBuildLogs(object): try: args = dict(self._redis_config) args.update( - {"socket_connect_timeout": 1, "socket_timeout": 1, "single_connection_client": True} + {"socket_connect_timeout": 1, "socket_timeout": 1} ) with closing(redis.StrictRedis(**args)) as connection:
Remove default logging. All tracked by datadog and stackdriver now.
@@ -44,25 +44,6 @@ DATABASES = { } } -LOGGING = { - 'version': 1, - 'disable_existing_loggers': False, - 'handlers': { - 'file': { - 'level': 'DEBUG', - 'class': 'logging.FileHandler', - 'filename': '/tmp/django.log', - }, - }, - 'loggers': { - 'django': { - 'handlers': ['file'], - 'level': 'DEBUG', - 'propagate': True, - }, - }, -} - # email settings EMAIL_BACKEND = "postmark.backends.PostmarkBackend" POSTMARK_API_KEY = os.getenv("EMAIL_CREDENTIALS_POSTMARK_API_KEY")
Add number words into integer parsing to todo list Parse number words separated by whitespace or hyphens into integers.
@@ -40,13 +40,37 @@ def _print(data): def sort(data): return sorted(data, key = lambda k: (-k['priority'] if 'priority' in k else 0, k['complete'])) -def parseNumber(string): - ret = {'skip':1, 'value':1} +def parseNumber(string, numwords = {}): + if not numwords: + units = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen" ] + tens = ["", "", "twenty", "thirty", "fourty", "fifty", "sixty", "seventy", "eighty", "ninety"] + scales = ["hundred", "thousand", "million", "billion", "trillion"] + + numwords["and"] = (1, 0) + for idx, word in enumerate(units): numwords[word] = (1, idx) + for idx, word in enumerate(tens): numwords[word] = (1, idx * 10) + for idx, word in enumerate(scales): numwords[word] = (10 ** (idx * 3 or 2), 0) + + ret = {'skip':0, 'value':0} try: - ret['value'] = int(string) + ret['skip'] = 1 + ret['value'] = int(string.split()[0]) except ValueError: - #TODO Turn words into integers/floats - print("number words not yet supported") + elements = string.split() + current = 0 + for d in elements: + number = d.split("-") + for word in number: + if word not in numwords: + ret['value'] += current + return ret + scale, increment = numwords[word] + current = current * scale + increment + if scale > 100: + ret['value'] += current + current = 0 + ret['skip'] += 1 + ret['value'] += current return ret def parseDate(data): @@ -74,14 +98,12 @@ def parseDate(data): retDate += timedelta(days = 7) parseDay = False elif parseDeltaValue: - tmp = parseNumber(d) + tmp = parseNumber(" ".join(elements[index:])) deltaValue = tmp['value'] parseDeltaUnit = tmp['skip'] parseDeltaValue = False elif parseDeltaUnit: - if parseDeltaUnit > 1: - parseDeltaUnit += 1 - elif "year" in d: + if "year" in d: retDate += relativedelta(years = deltaValue) elif "month" in d: retDate += relativedelta(months = deltaValue) @@ -101,8 +123,9 @@ def parseDate(data): newTime = dt.now() + timedelta(seconds = deltaValue) retDate = newTime.date() retTime = newTime.time() - else: + elif parseDeltaUnit == 1: print("Missing time unit") + parseDeltaUnit -= 1 elif re.match("^[0-9]{2}-[0-1][0-9]-[0-3][0-9]", d): retDate = dt.strptime(d, "%y-%m-%d").date()
[cleanup] Drop unused YahooSearchPageGenerator deprecation Yahoo search wasn't functional and has been removed. Dropping its deprecation clause.
@@ -50,7 +50,6 @@ from pywikibot.tools import ( filter_unique, intersect_generators, itergroup, - ModuleDeprecationWrapper, redirect_func, ) @@ -3001,9 +3000,6 @@ FileGenerator = redirect_func( CategoryGenerator = redirect_func( PageClassGenerator, old_name='CategoryGenerator', since='20161017', future_warning=True) -wrapper = ModuleDeprecationWrapper(__name__) -wrapper._add_deprecated_attr('YahooSearchPageGenerator', replacement_name='', - since='20181128') if __name__ == '__main__': # pragma: no cover pywikibot.output('Pagegenerators cannot be run as script - are you '
[client] allow providing tokens for linking This is mostly useful for testing, when having gone through the OAuth flow previously / on another machine.
@@ -247,10 +247,10 @@ class DropboxClient: def get_auth_url(self) -> str: """ Returns a URL to authorize access to a Dropbox account. To link a Dropbox - account, retrieve an auth token from the URL and link Maestral by calling - :meth:`link` with the provided token. + account, retrieve an authorization code from the URL and link Maestral by + calling :meth:`link` with the provided code. - :returns: URL to retrieve an OAuth token. + :returns: URL to retrieve an authorization code. """ self._auth_flow = DropboxOAuth2FlowNoRedirect( consumer_key=DROPBOX_APP_KEY, @@ -259,34 +259,57 @@ class DropboxClient: ) return self._auth_flow.start() - def link(self, token: str) -> int: - """ - Links Maestral with a Dropbox account using the given access token. The token - will be stored for future usage in the provided credential store. - - :param token: OAuth token for Dropbox access. + def link( + self, + code: str | None = None, + refresh_token: str | None = None, + access_token: str | None = None, + ) -> int: + """ + Links Maestral with a Dropbox account using the given authorization code. The + code will be exchanged for an access token and a refresh token with Dropbox + servers. The refresh token will be stored for future usage in the provided + credential store. + + :param code: Authorization code. + :param refresh_token: Optionally, instead of an authorization code, directly + provide a refresh token. + :param access_token: Optionally, instead of an authorization code or a refresh + token, directly provide an access token. Note that access tokens are + short-lived. :returns: 0 on success, 1 for an invalid token and 2 for connection errors. """ + if code: if not self._auth_flow: raise RuntimeError("Please start auth flow with 'get_auth_url' first") try: - res = self._auth_flow.finish(token) + res = self._auth_flow.finish(code) except requests.exceptions.HTTPError: return 1 except CONNECTION_ERRORS: return 2 - self._init_sdk(res.refresh_token, TokenType.Offline) + token = res.refresh_token + token_type = TokenType.Offline + elif refresh_token: + token = refresh_token + token_type = TokenType.Offline + elif access_token: + token = access_token + token_type = TokenType.Legacy + else: + raise RuntimeError("No auth code, refresh token ior access token provided.") + + self._init_sdk(token, token_type) try: - self.update_path_root() + account_info = self.get_account_info() + self.update_path_root(account_info.root_info) except CONNECTION_ERRORS: return 2 - self._cred_storage.save_creds( - res.account_id, res.refresh_token, TokenType.Offline - ) + self._cred_storage.save_creds(account_info.account_id, token, token_type) self._auth_flow = None return 0
Rewriting: avoid using the dot notation for bare node primitives TN:
@@ -548,9 +548,9 @@ package body ${ada_lib_name}.Rewriting_Implementation is Unit_Handle : constant Unit_Rewriting_Handle := Handle (N.Unit); begin - if N.Is_Token_Node then + if Is_Token_Node (N) then Children := (Kind => Expanded_Token_Node, - Text => To_Unbounded_Wide_Wide_String (N.Text)); + Text => To_Unbounded_Wide_Wide_String (Text (N))); else Children := (Kind => Expanded_Regular, Vector => <>); @@ -561,7 +561,8 @@ package body ${ada_lib_name}.Rewriting_Implementation is (Ada.Containers.Count_Type (Count)); for I in 1 .. Count loop declare - Child : constant ${root_node_type_name} := N.Child (I); + Child : constant ${root_node_type_name} := + Implementation.Child (N, I); begin Children.Vector.Append ((if Child = null @@ -682,7 +683,7 @@ package body ${ada_lib_name}.Rewriting_Implementation is ${pre_check_nrw_handle('Handle')} return (case Handle.Children.Kind is - when Unexpanded => Handle.Node.Children_Count, + when Unexpanded => Children_Count (Handle.Node), when Expanded_Regular => Natural (Handle.Children.Vector.Length), when Expanded_Token_Node => 0); end Children_Count; @@ -748,7 +749,7 @@ package body ${ada_lib_name}.Rewriting_Implementation is case Handle.Children.Kind is when Unexpanded => if Is_Token_Node (Handle.Kind) then - return Handle.Node.Text; + return Text (Handle.Node); else raise Program_Error; end if; @@ -1131,7 +1132,7 @@ package body ${ada_lib_name}.Rewriting_Implementation is Result.Node := null; Nodes_Pools.Append (Handle.New_Nodes, Result); - if Node.Is_Token_Node then + if Is_Token_Node (Node) then declare Index : constant Natural := Natural (Node.Token_Start_Index); Data : constant Stored_Token_Data := @@ -1146,14 +1147,14 @@ package body ${ada_lib_name}.Rewriting_Implementation is else declare - Count : constant Natural := Node.Children_Count; + Count : constant Natural := Children_Count (Node); begin Result.Children := (Kind => Expanded_Regular, Vector => <>); Result.Children.Vector.Reserve_Capacity (Ada.Containers.Count_Type (Count)); for I in 1 .. Count loop Result.Children.Vector.Append - (Transform (Node.Child (I), Result)); + (Transform (Child (Node, I), Result)); end loop; end; end if;
Review of the `domain_to_idna()` notes. This patch fix
@@ -1146,7 +1146,7 @@ def domain_to_idna(line): Notes ----- - - This method/function encode only the domain to `idna` format because in + - This function encode only the domain to `idna` format because in most cases the encoding issue is due to a domain which looks like `b'\xc9\xa2oogle.com'.decode('idna')`. - About the splitting:
Laikad: minor refactor extract code to get_est_pos func
@@ -62,6 +62,16 @@ class Laikad: cls=CacheSerializer)) self.last_cached_t = t + def get_est_pos(self, t, processed_measurements): + if self.last_pos_fix_t is None or abs(self.last_pos_fix_t - t) >= 2: + min_measurements = 5 if any(p.constellation_id == ConstellationId.GLONASS for p in processed_measurements) else 4 + pos_fix, pos_fix_residual = calc_pos_fix_gauss_newton(processed_measurements, self.posfix_functions, min_measurements=min_measurements) + if len(pos_fix) > 0: + self.last_pos_fix = pos_fix[:3] + self.last_pos_residual = pos_fix_residual + self.last_pos_fix_t = t + return self.last_pos_fix + def process_ublox_msg(self, ublox_msg, ublox_mono_time: int, block=False): if ublox_msg.which == 'measurementReport': t = ublox_mono_time * 1e-9 @@ -73,17 +83,11 @@ class Laikad: new_meas = read_raw_ublox(report) processed_measurements = process_measurements(new_meas, self.astro_dog) - if self.last_pos_fix_t is None or abs(self.last_pos_fix_t - t) >= 2: - min_measurements = 5 if any(p.constellation_id == ConstellationId.GLONASS for p in processed_measurements) else 4 - pos_fix, pos_fix_residual = calc_pos_fix_gauss_newton(processed_measurements, self.posfix_functions, min_measurements=min_measurements) - if len(pos_fix) > 0: - self.last_pos_fix = pos_fix[:3] - self.last_pos_residual = pos_fix_residual - self.last_pos_fix_t = t + est_pos = self.get_est_pos(t, processed_measurements) - corrected_measurements = correct_measurements(processed_measurements, self.last_pos_fix, self.astro_dog) if self.last_pos_fix_t is not None else [] + corrected_measurements = correct_measurements(processed_measurements, est_pos, self.astro_dog) if len(est_pos) > 0 else [] - self.update_localizer(self.last_pos_fix, t, corrected_measurements) + self.update_localizer(est_pos, t, corrected_measurements) kf_valid = all(self.kf_valid(t)) ecef_pos = self.gnss_kf.x[GStates.ECEF_POS].tolist() ecef_vel = self.gnss_kf.x[GStates.ECEF_VELOCITY].tolist() @@ -116,7 +120,7 @@ class Laikad: valid = self.kf_valid(t) if not all(valid): if not valid[0]: - cloudlog.info("Init gnss kalman filter") + cloudlog.info("Kalman filter uninitialized") elif not valid[1]: cloudlog.error("Time gap of over 10s detected, gnss kalman reset") elif not valid[2]: @@ -133,7 +137,7 @@ class Laikad: # Ensure gnss filter is updated even with no new measurements self.gnss_kf.predict(t) - def kf_valid(self, t: float): + def kf_valid(self, t: float) -> List[bool]: filter_time = self.gnss_kf.filter.filter_time return [filter_time is not None, filter_time is not None and abs(t - filter_time) < MAX_TIME_GAP,
fix py27 test Summary: something changed with the `re` package in 2.7? I dunno this works around it well enough. Test Plan: buildkite Reviewers: #ft, max
-import re - import pytest from dagster import ( @@ -37,10 +35,7 @@ def muptiple_outputs_pipeline(): with pytest.raises( DagsterInvariantViolationError, - match=re.escape( - 'Output \'not_defined\' not defined in solid \'multiple_outputs\': ' - 'found outputs [\'output_one\', \'output_two\']' - ), + match="Output 'not_defined' not defined in solid 'multiple_outputs'", ): solid_result.output_value('not_defined') @@ -117,10 +112,7 @@ def multiple_outputs_only_emit_one_pipeline(): with pytest.raises( DagsterInvariantViolationError, - match=re.escape( - 'Output \'not_defined\' not defined in solid \'multiple_outputs\': ' - 'found outputs [\'output_one\', \'output_two\']' - ), + match="Output 'not_defined' not defined in solid 'multiple_outputs'", ): solid_result.output_value('not_defined')
MAINT: Fix azure linter problems with pip 20.1 The default Python 3.8 pip version seems to have been upgraded, leading to a large number of harmless messages being printed during package installation. This PR fixes the linter script to ignore the messages.
@@ -29,9 +29,10 @@ stages: addToPath: true architecture: 'x64' - script: >- - python -m pip --disable-pip-version-check install -r linter_requirements.txt + python -m pip install -r linter_requirements.txt displayName: 'Install tools' - failOnStderr: true + # pip 21.1 emits a pile of garbage messages to annoy users :) + # failOnStderr: true - script: | python tools/linter.py --branch origin/$(System.PullRequest.TargetBranch) displayName: 'Run Lint Checks'
remove unused variable tmp as suggested by lgtm
@@ -237,7 +237,6 @@ def pslq(ctx, x, tol=None, maxcoeff=1000, maxsteps=100, verbose=False): szmax = sz # Step 2 y[m], y[m+1] = y[m+1], y[m] - tmp = {} for i in xrange(1,n+1): H[m,i], H[m+1,i] = H[m+1,i], H[m,i] for i in xrange(1,n+1): A[m,i], A[m+1,i] = A[m+1,i], A[m,i] for i in xrange(1,n+1): B[i,m], B[i,m+1] = B[i,m+1], B[i,m]
Update strategy.rst Add docs for the prediction score
@@ -112,6 +112,9 @@ A prediction sample is shown as follows. ``Forecast Model`` module can make predictions, please refer to `Forecast Model: Model Training & Prediction <model.html>`_. +Normally, the prediction score is the output of the models. But some models are learned from a label with a different scale. So the scale of the prediction score may be different from your expectation(e.g. the return of instruments). + +Qlib didn't add a step to scale the prediction score to a unified scale. Because not every trading strategy cares about the scale(e.g. TopkDropoutStrategy only cares about the order). So the strategy is responsible for rescaling the prediction score(e.g. some portfolio-optimization-based strategies may require a meaningful scale). Running backtest -----------------
Convert output from kind to str subprocess output is bytes. decode to string so it renders better in error messages.
@@ -324,10 +324,10 @@ class KindWrapper(object): output = subprocess.check_output(args, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: plog("vvvvvvvvvvvvvvvv Output from {} vvvvvvvvvvvvvvvv".format(args[0])) - plog(e.output) + plog(e.output.decode('utf-8')) plog("^^^^^^^^^^^^^^^^ Output from {} ^^^^^^^^^^^^^^^^".format(args[0])) raise e - return output.strip() + return output.decode('utf-8').strip() def _is_macos():
[unit test] simple rework of the TestProfileToolBar unit test to make sure mean and sum are computed in testDiagomalLine Add to move down the "Trigger tool button for diagonal profile mode" because profile action are disable when the plot has no image.
@@ -110,27 +110,38 @@ class TestProfileToolBar(TestCaseQt, ParametricTestCase): for method in ('sum', 'mean'): with self.subTest(method=method): + self.toolBar.setProfileMethod(method) + # 2 positions to use for mouse events pos1 = widget.width() * 0.4, widget.height() * 0.4 pos2 = widget.width() * 0.6, widget.height() * 0.6 - # Trigger tool button for diagonal profile mode - toolButton = getQToolButtonFromAction(self.toolBar.lineAction) - self.assertIsNot(toolButton, None) - self.mouseMove(toolButton) - self.mouseClick(toolButton, qt.Qt.LeftButton) - for image in (False, True): with self.subTest(image=image): if image: self.plot.addImage( numpy.arange(100 * 100).reshape(100, -1)) + # Trigger tool button for diagonal profile mode + toolButton = getQToolButtonFromAction( + self.toolBar.lineAction) + self.assertIsNot(toolButton, None) + self.mouseMove(toolButton) + self.mouseClick(toolButton, qt.Qt.LeftButton) + self.toolBar.lineWidthSpinBox.setValue(3) + + # draw profile line self.mouseMove(widget, pos=pos1) self.mousePress(widget, qt.Qt.LeftButton, pos=pos1) self.mouseMove(widget, pos=pos2) self.mouseRelease(widget, qt.Qt.LeftButton, pos=pos2) + if image is True: + profileCurve = self.toolBar.getProfilePlot().getAllCurves()[0] + if method == 'sum': + self.assertTrue(profileCurve.getData()[1].max() > 10000) + elif method == 'mean': + self.assertTrue(profileCurve.getData()[1].max() < 10000) self.plot.clear()
Update index.rst We decided not to include a description here. If it doesn't work, we can change it back.
Toil Documentation ================== -Everything you need to know about Toil. - -* Toil is a scalable, efficient, cross-platform pipeline management system. -* Run it on Amazon Web Services and Toil can automatically manage the number of workers. -* Write your workflows with an easy to use Python API (Turing complete!) or CWL. -* Share worker output and state in the cloud using the Toil jobStore. -* Toil will also run in existing high-performance computing environments like GridEngine, LSF, Slurm, Torque. - +Toil is an open-source pure-Python workflow engine that lets people write better pipelines. Check out our `website`_ for a comprehensive list of Toil's features, and read our `paper`_ to learn what Toil can do in the real world. Feel free to also join us on `GitHub`_ and `Gitter`_.
Man: mention $$ a subst escape [skip appveyor] Some rewordings elsewhere in Variable Substitution section - mainly to a variable that's a function.
@@ -6414,18 +6414,25 @@ env.Command('marker', 'input_file', action=[MyBuildAction, Touch('$TARGET')]) &scons; performs variable substitution on the string that makes up the action part of the builder. -Variables to be interpolated are indicated in the -string with a leading -<literal>$</literal>, to distinguish them from plain text +Variables or other text to be substituted are indicated in the +string by a leading <emphasis role="bold">$</emphasis>, +to distinguish them from plain text which is not to be substituted. -The name may be surrounded by curly braces -(<literal>${}</literal>) -to separate the name from surrounding characters if necessary. -Curly braces are required when you use +The substitutable text may be surrounded by curly braces +to separate it from surrounding characters if necessary +(for example <literal>${FOO}BAR</literal>). +To avoid substituting a substring that looks like a variable name, +escape it with an additional <emphasis role="bold">$</emphasis>, +(for example, <literal>$$FOO</literal> will be left in the +final string as <literal>$FOO</literal>). +</para> +<para> +The curly brace notation is required when you use Python list subscripting/slicing notation on a variable to select one or more items from a list, or access a variable's special attributes, -or use Python expression substitution. +or when you use Python expression substitution +(see below for descriptions of these). </para> <para> @@ -6670,9 +6677,10 @@ echo Last build occurred . &gt; $TARGET <para>While &consvars; are normally directly substituted, if a variable refers to a &consvar; -whose value is a &Python; function, -that function is called during substitution. -Such a function must accept four arguments: +whose value is a callable &Python; object (a function +or a class with a <literal>__call__</literal> method), +that object is called during substitution. +The callable must accept four arguments: <parameter>target</parameter>, <parameter>source</parameter>, <parameter>env</parameter> and @@ -6681,19 +6689,21 @@ Such a function must accept four arguments: <parameter>target</parameter> is a list of target nodes, <parameter>env</parameter> is the &consenv; to use for context, and <parameter>for_signature</parameter> is -a Boolean value that tells the function +a boolean value that tells the callable if it is being called for the purpose of generating a build signature. Since the build signature is used for rebuild determination, -the function should omit variable elements -that do not affect whether a rebuild should be triggered -(see <emphasis role="bold">$(</emphasis> -and <emphasis role="bold">$)</emphasis> -above) if <parameter>for_signature</parameter> is true. +variable elements that do not affect whether +a rebuild should be triggered +should be omitted from the returned string +if <parameter>for_signature</parameter> is true. +See <emphasis role="bold">$(</emphasis> +and <emphasis role="bold">$)</emphasis> above +for the syntax. </para> <para> &SCons; will insert whatever -the called function returns +the callable returns into the expanded string: </para> @@ -6712,11 +6722,11 @@ will be exactly as it was set: <literal>"$FOO baz"</literal>. </para> <para>You can use this feature to pass arguments to a -Python function by creating a callable class -that stores one or more arguments in an object, -and then uses them when the -<methodname>__call__()</methodname> -method is called. +callable variable by creating a callable class +that stores passed arguments in the instance, +and then uses them +(in the <methodname>__call__</methodname> method) +when the instance is called. Note that in this case, the entire variable expansion must be enclosed by curly braces
Minor bugfix for database initialization in demcz changing randompar to vector
@@ -189,13 +189,13 @@ class demcz(_algorithm): for rep, vector, simulations in self.repeat(param_generator): - if firstcall == True: - self.initialize_database(randompar, self.parameter()['name'], simulations, likelist) - firstcall = False burnInpar[i][rep] = vector likelist = self.objectivefunction( evaluation=self.evaluation, simulation=simulations) + if firstcall == True: + self.initialize_database(vector, self.parameter()['name'], simulations, likelist) + firstcall = False simulationlist.append(simulations) self._logPs.append(likelist)
STY: pavement.py: don't use `type` as variable name [ci skip]
@@ -137,17 +137,17 @@ def pdf(): ref = os.path.join(bdir_latex, "scipy-ref.pdf") shutil.copy(ref, os.path.join(destdir_pdf, "reference.pdf")) -def tarball_name(type='gztar'): +def tarball_name(type_name='gztar'): root = 'scipy-%s' % FULLVERSION - if type == 'gztar': + if type_name == 'gztar': return root + '.tar.gz' - elif type == 'xztar': + elif type_name == 'xztar': return root + '.tar.xz' - elif type == 'tar': + elif type_name == 'tar': return root + '.tar' - elif type == 'zip': + elif type_name == 'zip': return root + '.zip' - raise ValueError("Unknown type %s" % type) + raise ValueError("Unknown type %s" % type_name) @task def sdist():
Dv3: add link for trained model dv3: add trained model link for downloading
@@ -119,7 +119,10 @@ For more details about `train.py`, see `python train.py --help`. #### load checkpoints -You can load saved checkpoint and resume training with `--checkpoint`, if you wan to reset optimizer states, pass `--reset-optimizer` in addition. +We provide a trained model ([dv3.single_frame](https://paddlespeech.bj.bcebos.com/Parakeet/dv3.single_frame.tar.gz)) for downloading, which is trained with the default preset. Unzip the downloaded file with `tar xzvf dv3.single_frame.tar.gz`, you will get `config.json`, `model.pdparams` and `README.md`. `config.json` is the preset json with which the model is trained, `model.pdparams` is the parameter file, and `README.md` is a brief introduction of the model. + +You can load saved checkpoint and resume training with `--checkpoint` (You only need to provide the base name of the parameter file, eg. if you want to load `model.pdparams`, just use `--checkpoint=model`). If there is also a file with the same basename and extension name `.pdopt` in the same folder with the model file (i.e. `model.pdopt`, which is the optimizer file), it is also loaded automatically. If you wan to reset optimizer states, pass `--reset-optimizer` in addition. + #### train a part of the model
fix(global): data.path without hashes removing hashes from data.path
@@ -314,8 +314,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): index_frame_start = int(repre.get("frameStart")) dst_padding_exp = src_padding_exp + dst_start_frame = None for i in src_collection.indexes: src_padding = src_padding_exp % i + + # for adding first frame into db + if not dst_start_frame: + dst_start_frame = src_padding + src_file_name = "{0}{1}{2}".format( src_head, src_padding, src_tail) @@ -326,19 +332,22 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): dst_padding = dst_padding_exp % index_frame_start index_frame_start += 1 - dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail).replace("..", ".") + dst = "{0}{1}{2}".format( + dst_head, + dst_padding, + dst_tail).replace("..", ".") + self.log.debug("destination: `{}`".format(dst)) src = os.path.join(stagingdir, src_file_name) + self.log.debug("source: {}".format(src)) instance.data["transfers"].append([src, dst]) - repre['published_path'] = "{0}{1}{2}".format(dst_head, - dst_padding_exp, - dst_tail) - # for imagesequence version data - hashes = '#' * len(dst_padding) - dst = os.path.normpath("{0}{1}{2}".format( - dst_head, hashes, dst_tail)) + dst = "{0}{1}{2}".format( + dst_head, + dst_start_frame, + dst_tail).replace("..", ".") + repre['published_path'] = dst else: # Single file
Modified train_rl to account for experiment names Removed sbatch folder
@@ -64,6 +64,9 @@ parser.add_argument("--model-mem", action="store_true", default=False, help="use memory in the model") parser.add_argument("--arch", default='cnn1', help="Architecture of Actor") +parser.add_argument("--exp-name", default=None, + help="Name of the experiment to run.") + args = parser.parse_args() # Set seed for all randomness sources @@ -80,9 +83,13 @@ for i in range(args.procs): # Define model name -suffix = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S") +prefix = datetime.datetime.now().strftime("%y-%m-%d") +suffix = datetime.datetime.now().strftime("%H-%M-%S") + -default_model_name = "{}/{}_{}_{}_{}_seed{}_lr{:.1e}_{}".format( + +default_model_name = "{}/{}{}/{}_{}_{}_{}_seed{}_lr{:.1e}_{}".format( + prefix, args.exp_name + '/' if args.exp_name else '', args.env, args.algo, "instr" if args.model_instr else "noinstr", "mem" if args.model_mem else "nomem", @@ -119,7 +126,9 @@ else: # Define logger and tensorboard writer -log_name = model_name + ("_" + suffix if args.model is not None else "") +# log_name = model_name + ("_" + suffix if args.model is else "") + +log_name = model_name logger = utils.Logger(log_name) if args.tb:
Update README.md Update documentation for billing
@@ -29,7 +29,7 @@ CONTENTS &nbsp; &nbsp; &nbsp; &nbsp; [Starting/Stopping services](#Starting_Stopping_services) -&nbsp; &nbsp; &nbsp; &nbsp; [Billing report](#Billing-Report) +&nbsp; &nbsp; &nbsp; &nbsp; [Billing report](#Billing_Report) &nbsp; &nbsp; &nbsp; &nbsp; [Troubleshooting](#Troubleshooting) @@ -474,13 +474,13 @@ After you have configured the billing. You can running it as a process of Self-S sudo supervisorctl stop ui sudo supervisorctl start ui ``` -If you want to load report by manually or to use external scheduler use next command +If you want to load report by manually or to use external scheduler use next command: ``` java -jar /opt/dlab/webapp/lib/billing/billing-x.y.jar --conf /opt/dlab/conf/billing.yml or java -cp /opt/dlab/webapp/lib/billing/billing-x.y.jar com.epam.dlab.BillingTool --conf /opt/dlab/conf/billing.yml ``` -If you want the billing to work as a separate process from the Self-Service use next command +If you want the billing to work as a separate process from the Self-Service use next command: ``` java -cp /opt/dlab/webapp/lib/billing/billing-x.y.jar com.epam.dlab.BillingScheduler --conf /opt/dlab/conf/billing.yml ```
operator manifests: Test validation of new options * Container.yaml has 3 new options allowing opt-out for users (see schema in osbs-client), test validation here
@@ -287,13 +287,19 @@ class TestSourceConfigSchemaValidation(object): package_mappings: bar: baz spam: eggs + enable_digest_pinning: true + enable_repo_replacements: false + enable_registry_replacements: true """, {'operator_manifests': { 'manifests_dir': 'path/to/manifests', 'repo_replacements': [ {'registry': 'foo', 'package_mappings': {'bar': 'baz', 'spam': 'eggs'}} - ] + ], + "enable_digest_pinning": True, + "enable_repo_replacements": False, + "enable_registry_replacements": True, }} ), ]) @@ -427,6 +433,24 @@ class TestSourceConfigSchemaValidation(object): package_mappings: bar: 1 # not a string """, + + """ + operator_manifests: + manifests_dir: some/path + enable_digest_pinning: null # not a boolean + """, + + """ + operator_manifests: + manifests_dir: some/path + enable_repo_replacements: 1 # not a boolean + """, + + """ + operator_manifests: + manifests_dir: some/path + enable_registry_replacements: "true" # not a boolean + """, ]) def test_invalid_source_config_validation_error(self, tmpdir, yml_config): with pytest.raises(jsonschema.ValidationError):
Doc: Reminder to use configured format in examples fix
@@ -415,8 +415,8 @@ Options Examples """""""" -These may need to be adapted for your configuration and/or locale. See :command:`printformats`. -:: +These may need to be adapted for your configuration and/or locale (START and END +need to match the format configured). See :command:`printformats`. :: khal new 18:00 Awesome Event
More thorough check whether UIA can be used In order to prevent failed DLL loads e.g. under WINE
@@ -42,7 +42,11 @@ try: log = logging.getLogger('comtypes') log.setLevel('WARNING') import comtypes # noqa: E402 + import comtypes.client + comtypes.client.GetModule('UIAutomationCore.dll') UIA_support = True +except OSError: + UIA_support = False except ImportError: UIA_support = False
IDM: use projected velocity difference rather than speed difference Fix
@@ -144,18 +144,20 @@ class IDMVehicle(ControlledVehicle): np.power(self.desired_gap(ego_vehicle, front_vehicle) / utils.not_zero(d), 2) return acceleration - def desired_gap(self, ego_vehicle: Vehicle, front_vehicle: Vehicle = None) -> float: + def desired_gap(self, ego_vehicle: Vehicle, front_vehicle: Vehicle = None, projected: bool = True) -> float: """ Compute the desired distance between a vehicle and its leading vehicle. :param ego_vehicle: the vehicle being controlled :param front_vehicle: its leading vehicle + :param projected: project 2D velocities in 1D space :return: the desired distance between the two [m] """ d0 = self.DISTANCE_WANTED tau = self.TIME_WANTED ab = -self.COMFORT_ACC_MAX * self.COMFORT_ACC_MIN - dv = ego_vehicle.speed - front_vehicle.speed + dv = np.dot(ego_vehicle.velocity - front_vehicle.velocity, ego_vehicle.direction) if projected \ + else ego_vehicle.speed - front_vehicle.speed d_star = d0 + ego_vehicle.speed * tau + ego_vehicle.speed * dv / (2 * np.sqrt(ab)) return d_star