message
stringlengths
13
484
diff
stringlengths
38
4.63k
setup.py: add mock module as dependency mock is used to mock output from subprocess in tests. this is a standard module in py3 but no present in py27, that's why this dependency
@@ -54,6 +54,7 @@ setup(name='cwltool', 'schema-salad >= 2.4.20170308171942, < 3', 'typing >= 3.5.2, < 3.6', 'six >= 1.10.0', + 'mock >= 2.0.0', ], setup_requires=[] + pytest_runner,
[ci] Fix `python_package_basic` tests error: pep517.wrappers.BackendUnavailable This error is raised when we try to get the metadata from a package and this happens because we forgot to first install the build requirements from the `pyproject.toml` file
@@ -436,10 +436,11 @@ def _extract_metainfo_files_from_package_unsafe( os.path.join(output_path, 'pyproject.toml') ) - # Get build backend from pyproject.toml: + # Get build backend and requirements from pyproject.toml: with open(os.path.join(path, 'pyproject.toml')) as f: build_sys = pytoml.load(f)['build-system'] backend = build_sys["build-backend"] + build_requires.extend(build_sys["requires"]) # Get a virtualenv with build requirements and get all metadata: env = BuildEnvironment()
Correct sphinx-rtd-theme deps got sphinx==6 and theme == 0.5.1 for some reason (pip resolver?)
@@ -41,7 +41,7 @@ gmpy = ['gmpy2>=2.1.0'] plot = ['matplotlib>=3.5.0'] interactive = ['ipykernel'] docs = ['sphinx>=4', 'sphinxcontrib-bibtex>=2.1', 'sphinxcontrib-autoprogram', - 'sphinx_rtd_theme>=0.2'] + 'sphinx-rtd-theme>=1'] tests = ['pytest>=6', 'hypothesis', 'pytest-timeout', 'pexpect'] develop = ['diofant[tests]', 'flake518', 'flake8-docstrings>=1.3', 'pep8-naming', 'flake8-comprehensions', 'flake8-isort>=4.1',
Update data.py Added type to editor config strings.
@@ -24,8 +24,8 @@ class IndentStyle(str, Enum): class EditorSettings: indent_style: IndentStyle = IndentStyle.Space indent_size: int = 4 - ace_editor_language: "python" - highlightjs_language: "python" + ace_editor_language: str = "python" + highlightjs_language: str = "python" def __post_init__(self):
add error message if Dropout gets no rng key closes (though there's more work to be done here)
@@ -217,6 +217,12 @@ def Dropout(rate, mode='train'): def init_fun(input_shape): return input_shape, () def apply_fun(params, inputs, rng): + if rng is None: + msg = ("Dropout layer requires apply_fun to be called with a PRNG key " + "argument. That is, instead of `apply_fun(params, inputs)`, call " + "it like `apply_fun(params, inputs, key)` where `key` is a " + "jax.random.PRNGKey value.") + raise ValueError(msg) if mode == 'train': keep = random.bernoulli(rng, rate, inputs.shape) return np.where(keep, inputs / rate, 0)
remove platform tabs, as the launch options aren't platform specific anymore clarify niche launch options
@@ -32,10 +32,6 @@ TF2 may be looking for an outdated list of graphics cards to enable higher perfo You can fake your graphics card to one TF2 checks for, in order to unlock better graphics card usage using launch options. -=== "Windows" - * **Intel** (Broadwater or higher (past ~2005)): `-force_device_id 0x2500` - * Other cards are already set automatically. -=== "macOS/Linux" * **Intel** (Broadwater or higher (past ~2005)): `-force_device_id 0x2500` * Other cards are already set automatically. @@ -82,7 +78,10 @@ You can fake your graphics card to one TF2 checks for, in order to unlock better * **-gl_amd_pinned_memory** : use `AMD_pinned_memory` for efficient device memory handling. :warning: **Experimental** as its performance impact (negative or positive) is unknown. * **-gl_nv_bindless_texturing** : use `NV_bindless_texture` for reduced overhead for managing bindings. :warning: **Experimental** as its performance impact (negative or positive) is unknown and its Source implementation may not be complete. -## Niche Launch Options +## Uncommon Launch Options + +These launch options do not need to be used by the vast majority of users, but they are here for the few that need them. + * **-nouserclip** : uses software clipping instead of hardware user clip planes, FPS increase or decrease depends on your CPU+GPU and graphics API. * **-nosound** : disables sound, no performance boost unless your drivers are slow * **-small** : allow for resolutions smaller than 640x480
Add field to model Doing this as a standalone to split the work up more easily
@@ -2158,6 +2158,7 @@ class CaseSearch(DocumentSchema): data_registry = StringProperty() data_registry_workflow = StringProperty() # one of REGISTRY_WORKFLOW_* additional_registry_cases = StringListProperty() # list of xpath expressions + expand_id_property = StringProperty() # case property referencing another case's ID @property def case_session_var(self):
Update run.py Modified error #L527 test=develop
@@ -523,8 +523,8 @@ def evaluate(logger, args): inference_program = main_program.clone(for_test=True) eval_loss, bleu_rouge = validation( - inference_program, avg_cost, s_probs, e_probs, feed_order, - place, dev_count, vocab, brc_data, logger, args) + inference_program, avg_cost, s_probs, e_probs, match, + feed_order, place, dev_count, vocab, brc_data, logger, args) logger.info('Dev eval loss {}'.format(eval_loss)) logger.info('Dev eval result: {}'.format(bleu_rouge)) logger.info('Predicted answers are saved to {}'.format(
python-slugify: use explicit re-exports Fixes
from .__version__ import ( - __author__, - __author_email__, - __copyright__, - __description__, - __license__, - __title__, - __url__, - __version__, + __author__ as __author__, + __author_email__ as __author_email__, + __copyright__ as __copyright__, + __description__ as __description__, + __license__ as __license__, + __title__ as __title__, + __url__ as __url__, + __version__ as __version__, ) from .slugify import * from .special import *
Update netwire.txt Minor update.
@@ -2078,8 +2078,11 @@ kyelines.ddns.net # Reference: https://twitter.com/malware_traffic/status/1242966785462349824 # Reference: https://www.malware-traffic-analysis.net/2020/03/25/index.html +# Reference: https://unit42.paloaltonetworks.com/guloader-installing-netwire-rat/ 185.163.47.168:2020 +185.163.47.168:2121 +185.163.47.213:2020 185.163.47.213:2121 # Reference: https://www.virustotal.com/gui/file/f12113dfd58eebfc534a60d5b4d095f9bd6e1c4631fc2e15fa74e6b769dda6c0/detection
Don't fail during ValueRecord copy if src has more items We drop hinting by simply changing ValueFormat, without cleaning up the actual ValueRecords. This was causing failure at this assert if font was subsetted without hinting and then passed to mutator.
@@ -894,7 +894,8 @@ class ValueRecord(object): setattr(self, name, None if isDevice else 0) if src is not None: for key,val in src.__dict__.items(): - assert hasattr(self, key) + if not hasattr(self, key): + continue setattr(self, key, val) elif src is not None: self.__dict__ = src.__dict__.copy()
Change AOT from ExprVisitor to MixedModeVisitor This should allow better scale-ability for AOT when targeting larger networks.
@@ -53,7 +53,7 @@ using StorageMap = * This is an on demand allocator for AOT. A new temporary * (storage allocator identifier) is allocated for each operation. */ -class AOTOnDemandAllocator : public ExprVisitor { +class AOTOnDemandAllocator : public MixedModeVisitor { public: // run the visitor on a function. void Run(const Function& func) { @@ -84,10 +84,7 @@ class AOTOnDemandAllocator : public ExprVisitor { AssignReturnSid(GetRef<Expr>(op)); } - void VisitExpr_(const VarNode* op) final { - ExprVisitor::VisitExpr_(op); - AssignReturnSid(GetRef<Expr>(op)); - } + void VisitExpr_(const VarNode* op) final { AssignReturnSid(GetRef<Expr>(op)); } void VisitExpr_(const FunctionNode* op) final { // do not recurse into sub function. @@ -218,7 +215,7 @@ class AOTOnDemandAllocator : public ExprVisitor { }; /*! \brief Code generator for AOT executor */ -class AOTExecutorCodegen : public ExprVisitor { +class AOTExecutorCodegen : public MixedModeVisitor { protected: /*! * \brief Utility function to allocate a DLTensor or TVMValue @@ -437,7 +434,6 @@ class AOTExecutorCodegen : public ExprVisitor { void VisitExpr_(const OpNode* op) override { throw std::runtime_error("can not compile op in non-eta expanded form"); } - void VisitExpr_(const GlobalVarNode* op) override { throw std::runtime_error(""); } void VisitExpr_(const IfNode* op) override { throw std::invalid_argument("if not supported"); } void VisitExpr_(const FunctionNode* op) override { ICHECK(op->GetAttr<String>(attr::kCompiler).defined())
Adds chialpha function to chi2fns.py This allows us to test ChiAlphaFunction independently of a GST optimization.
@@ -125,6 +125,64 @@ def chi2_terms(model, dataset, circuits=None, return terms +def chialpha(alpha, model, dataset, circuits=None, + pfratio_stitchpt=1e-4, radius=1e-6, + check=False, memLimit=None, opLabelAliases=None, + evaltree_cache=None, comm=None): + """ + TODO: docstring + """ + from ..objects import objectivefns as _objfns + from ..objects.profiler import DummyProfiler as _DummyProfiler + + if circuits is None: + circuits = list(dataset.keys()) + + if evaltree_cache and 'evTree' in evaltree_cache: + evTree = evaltree_cache['evTree'] + lookup = evaltree_cache['lookup'] + outcomes_lookup = evaltree_cache['outcomes_lookup'] + else: + #OLD: evTree,lookup,outcomes_lookup = smart(model.bulk_evaltree,circuits) + evTree, wrtBlkSize, _, lookup, outcomes_lookup = model.bulk_evaltree_from_resources( + circuits, comm, dataset=dataset) + + #Fill cache dict if one was given + if evaltree_cache is not None: + evaltree_cache['evTree'] = evTree + evaltree_cache['lookup'] = lookup + evaltree_cache['outcomes_lookup'] = outcomes_lookup + + #Expand operation label aliases used in DataSet lookups + if opLabelAliases is not None: + dsCircuitsToUse = _tools.find_replace_tuple_list( + circuits, opLabelAliases) + else: + dsCircuitsToUse = circuits + + if evaltree_cache and 'cntVecMx' in evaltree_cache: + cntVecMx = evaltree_cache['cntVecMx'] + totalCntVec = evaltree_cache['totalCntVec'] + else: + KM = evTree.num_final_elements() # shorthand for combined spam+circuit dimension + cntVecMx = _np.empty(KM, 'd') + totalCntVec = _np.empty(KM, 'd') + for i, opStr in enumerate(dsCircuitsToUse): + cnts = dataset[opStr].counts + totalCntVec[lookup[i]] = sum(cnts.values()) # dataset[opStr].total + cntVecMx[lookup[i]] = [cnts.get(x, 0) for x in outcomes_lookup[i]] + + gthrMem = memLimit + probClipInterval = (-1000,1000) + profiler = _DummyProfiler() + + fn = _objfns.ChiAlphaFunction(alpha, model, evTree, lookup, circuits, opLabelAliases, cntVecMx, totalCntVec, + pfratio_stitchpt, probClipInterval, radius, wrtBlkSize, gthrMem, check, + check, comm, profiler, verbosity=0) + v = fn.fn(model.to_vector()) + return _np.sum(v**2) # I think... + + def chi2(model, dataset, circuits=None, returnGradient=False, returnHessian=False, minProbClipForWeighting=1e-4, clipTo=None,
python_api/astnode_types_py: emit private accessors for implicit args TN:
'()') %> - % for field in cls.fields_with_accessors(): - - <% - arg_list = ['self'] + [a.name.lower for a in field.explicit_arguments] - %> + ## First, emit public properties/methods for field accessors. Accessors + ## with no implicit argument will implement C calls themselves, but those + ## with some will just delegate to the private methods below. + % for field in cls.fields_with_accessors(): + <% arg_list = ['self'] + [a.name.lower + for a in field.explicit_arguments] %> % if not field.explicit_arguments: @property % endif def ${field.name.lower}(${', '.join(arg_list)}): ${py_doc(field, 8)} + % if field.exposed_implicit_arguments: + <% passed_args = arg_list[1:] + [ + arg.type.py_nullexpr() + for arg in field.exposed_implicit_arguments + ] %> + return self._${field.name.lower}(${', '.join(passed_args)}) + % else: ${accessor_body(field)} + % endif + % endfor + + ## Then, for properties with implicit arguments, emit private methods + + % for field in cls.fields_with_accessors(): + % if field.exposed_implicit_arguments: + <% arg_list = ['self'] + [a.name.lower for a in field.exposed_arguments] %> + def _${field.name.lower}(${', '.join(arg_list)}): + ${accessor_body(field)} + % endif % endfor _field_names = ${parent_fields} + (
Update uiautomatorhelper API calls Update tests
@@ -24,7 +24,7 @@ import json from culebratester_client import WindowHierarchyChild, WindowHierarchy -__version__ = '20.9.0' +__version__ = '20.9.1' import sys import warnings @@ -1204,7 +1204,7 @@ class View: # __str = str('', 'utf-8', 'replace') __str = '' if "class" in self.map: - __str += re.sub('.*\.', '', self.map['class']) + __str += re.sub('.*\\.', '', self.map['class']) _id = self.getId().replace('id/no_id/', '-') __str += _id ((L, T), (R, B)) = self.getCoords() @@ -3433,7 +3433,7 @@ class ViewClient: if self.useUiAutomator: if self.uiAutomatorHelper: - received = self.uiAutomatorHelper.dumpWindowHierarchy() + received = self.uiAutomatorHelper.ui_device.dump_window_hierarchy() else: api = self.getSdkVersion() if api >= 24: @@ -4246,7 +4246,7 @@ class ViewClient: if self.uiAutomatorHelper: if DEBUG_UI_AUTOMATOR_HELPER: print("Taking screenshot using UiAutomatorHelper", file=sys.stderr) - received = self.uiAutomatorHelper.takeScreenshot() + received = self.uiAutomatorHelper.ui_device.take_screenshot() stream = io.BytesIO(received.read()) try: from PIL import Image
fix: fix icon on do not disturb switch Update to new MDI entry for do not disturb switch per Home Assistant update 2021.10.0.
@@ -309,7 +309,7 @@ class DNDSwitch(AlexaMediaSwitch): @property def icon(self): """Return the icon of the switch.""" - return super()._icon("mdi:do-not-disturb", "mdi:do-not-disturb-off") + return super()._icon("mdi:minus-circle", "mdi:minus-circle-off") def _handle_event(self, event): """Handle events."""
libcloud: Match cloud names correctly upon deletion. We were previously (greedily) using count() instead of matching exactly.
@@ -621,6 +621,8 @@ class LibcloudCmds(CommonCloudFunctions) : _status = 100 _fmsg = "An error has occurred, but no error message was captured" + _search = "cb-" + obj_attr_list["username"] + "-" + obj_attr_list["cloud_name"] + for credentials_list in obj_attr_list["credentials"].split(";"): _status, _msg, _local_conn, _hostname = self.connect(credentials_list) @@ -637,7 +639,9 @@ class LibcloudCmds(CommonCloudFunctions) : _reservations = self.get_adapter(credentials_list).list_nodes(*self.get_list_node_args(obj_attr_list)) for _reservation in _reservations : - if _reservation.name.count("cb-" + obj_attr_list["username"] + "-" + obj_attr_list["cloud_name"]) : + _match = "-".join(_reservation.name.split("-")[:3]) + + if _match == _search : if _reservation.state in [ NodeState.PENDING, NodeState.STOPPED ] : cbdebug("Instance " + _reservation.name + " still has a pending event. waiting to destroy...") if _reservation.state == NodeState.STOPPED : @@ -682,7 +686,9 @@ class LibcloudCmds(CommonCloudFunctions) : _volumes = self.get_adapter(credentials_list).list_volumes() for _volume in _volumes : - if _volume.name.count("cb-" + obj_attr_list["username"] + "-" + obj_attr_list["cloud_name"]) : + _match = "-".join(_volume.name.split("-")[:3]) + + if _match == _search : try : cbdebug("Destroying: " + _volume.name + " (" + tenant + ")", True) _volume.destroy()
Enable cache This potentially fixes (I'm still waiting for user test results). The potential cause for this error was no cache table (no cache at all was set up), so the Django-select2 AutoResponseView was unable to find a correct widget by `field_id` GET parameter.
@@ -247,6 +247,20 @@ AUTH_PASSWORD_VALIDATORS = [ }, ] +# CACHE +# ----------------------------------------------------------------------------- +# https://docs.djangoproject.com/en/2.2/topics/cache/#database-caching +CACHES = { + 'default': { + 'BACKEND': 'django.core.cache.backends.db.DatabaseCache', + 'LOCATION': 'cache_default', + }, + 'select2': { + 'BACKEND': 'django.core.cache.backends.db.DatabaseCache', + 'LOCATION': 'cache_select2', + } +} + # MIDDLEWARE # ----------------------------------------------------------------------------- # https://docs.djangoproject.com/en/dev/ref/settings/#middleware @@ -521,3 +535,4 @@ COMMENTS_APP = 'extcomments' SELECT2_JS = '' # don't load JS on it's own - we're loading it in `base.html` SELECT2_CSS = '' # the same for CSS SELECT2_I18N = 'select2/js/i18n' +SELECT2_CACHE_BACKEND = 'select2'
Simplify MarkerTask's ctor Based on a comment by
@@ -157,12 +157,14 @@ class SignalTask(Task): class MarkerTask(Task): - def __init__(self, name, *args, **kwargs): - if (args and kwargs) or len(args) > 1 or len(kwargs) > 1 or (kwargs and 'details' not in kwargs): - raise ValueError('Expected only one argument or the kwarg "details"') + def __init__(self, name, details): + """ + :param name: Marker name + :param details: Serializable marker details + """ self._name = name - self.args = self.resolve_args(*args) - self.kwargs = self.resolve_kwargs(**kwargs) + self.args = self.resolve_args(details) + self.kwargs = {} @property def name(self): @@ -175,11 +177,7 @@ class MarkerTask(Task): @property def details(self): - if len(self.args): - details = self.args[0] - else: - details = self.kwargs.get('details') - return details + return self.args[0] def execute(self): pass
More updates to stale.yml adding more labels to stale bot config bug, feature, test failure
@@ -10,10 +10,13 @@ daysUntilClose: 7 # Issues with these labels will never be considered stale exemptLabels: - Confirmed - - Blocker + - Release Blocker - Critical - P1 - P2 + - Bug + - Feature + - Test Failure # Label to use when marking an issue as stale staleLabel: stale
Use the newer one of cmake and cmake3. Summary: On my devgpu, `cmake` is newer than `cmake3`. Using `cmake3` causes compilation to fail. Instead of blindly using `cmake3`, we pick the newer of the two. Pull Request resolved:
@@ -16,11 +16,23 @@ if [ -x "$(command -v rsync)" ]; then fi # We test the presence of cmake3 (for platforms like CentOS and Ubuntu 14.04) -# and use that if so. +# and use the newer of cmake and cmake3 if so. CMAKE_COMMAND="cmake" if [[ -x "$(command -v cmake3)" ]]; then + if [[ -x "$(command -v cmake)" ]]; then + # have both cmake and cmake3, compare versions + CMAKE_VERSION=$(cmake --version | grep 'cmake version' | awk '{print $NF}') + CMAKE3_VERSION=$(cmake3 --version | grep 'cmake version' | awk '{print $NF}') + CMAKE3_IS_NEWER=$($PYTORCH_PYTHON -c "from distutils.version import StrictVersion; print(1 if StrictVersion(\"${CMAKE3_VERSION}\") >= StrictVersion(\"${CMAKE_VERSION}\") else 0)") + else + # don't have cmake + CMAKE3_IS_NEWER=1 + fi + if [[ $CMAKE3_IS_NEWER == "1" ]]; then CMAKE_COMMAND="cmake3" fi + unset CMAKE_VERSION CMAKE3_VERSION CMAKE3_IS_NEWER +fi # Options for building only a subset of the libraries USE_CUDA=0
Fix problem when generating cross_coupling_array Linux machines do not support the same sintaxe as Windows when creating multiple vectors using `numpy.linspace()`.
@@ -747,7 +747,7 @@ class Report: ... RHO_ratio=[1.11, 1.14], ... RHOd=30.45, ... RHOs=37.65, - ... oper_speed=1000.0) # doctest: +ELLIPSIS + ... oper_speed=1000.0) >>> report.Qa 23022.32142857143 """ @@ -787,7 +787,8 @@ class Report: Qa_list[-1] = Qa # Defining cross-coupling range to 10*Qa - API 684 - SP6.8.5.8 - cross_coupled_array = np.linspace(0, 10 * Qa_list, steps) + zeros = np.zeros(len(Qa_list)) + cross_coupled_array = np.linspace(zeros, 10 * Qa_list, steps) log_dec = np.zeros(len(cross_coupled_array)) # remove disks and seals from the rotor model
Update mouse grid Use app.register("launch"... ) to correctly check setting after startup
@@ -9,7 +9,6 @@ import math, time import typing mod = Module() - shimmer_effect_enabled = mod.setting( "grid_shimmer_effect_enabled", type=bool, @@ -405,7 +404,6 @@ mg = MouseSnapNine() class GridActions: def grid_activate(): """Brings up a/the grid (mouse grid or otherwise)""" - if mg.start(): ctx.tags = ["user.mouse_grid_showing"] @@ -453,4 +451,4 @@ def check_shimmer_setting_at_startup(): mg.stop() -cron.after("100ms", check_shimmer_setting_at_startup) +app.register("launch", check_shimmer_setting_at_startup)
Protect core config preparation function against non-existing cache It is legal to call KCAS_IOCTL_INSERT_CORE against non-existing cache (in try_add mode), however in that case core_id has to be provded. Return error code in case when given cache id does not exist and core_id is set to OCF_CORE_MAX.
@@ -1178,6 +1178,10 @@ int cache_mngt_prepare_core_cfg(struct ocf_mngt_core_config *cfg, if (cmd_info->core_id == OCF_CORE_MAX) { struct cache_priv *cache_priv; + + if (!cache) + return -OCF_ERR_CACHE_NOT_EXIST; + cache_priv = ocf_cache_get_priv(cache); core_id = find_free_core_id(cache_priv->core_id_bitmap); if (core_id == OCF_CORE_MAX)
auth: Fix example code Continuation of The oauth2 version of authorize_redirect is no longer a coroutine, so don't use await in example code. The oauth1 version is still a coroutine, but one twitter example was incorrectly calling it with yield instead of await.
@@ -634,7 +634,7 @@ class OAuth2Mixin(object): if not new_entry: # Call failed; perhaps missing permission? - await self.authorize_redirect() + self.authorize_redirect() return self.finish("Posted a message!") @@ -772,7 +772,7 @@ class TwitterMixin(OAuthMixin): access_token=self.current_user["access_token"]) if not new_entry: # Call failed; perhaps missing permission? - yield self.authorize_redirect() + await self.authorize_redirect() return self.finish("Posted a message!") @@ -954,7 +954,7 @@ class FacebookGraphMixin(OAuth2Mixin): code=self.get_argument("code")) # Save the user with e.g. set_secure_cookie else: - await self.authorize_redirect( + self.authorize_redirect( redirect_uri='/auth/facebookgraph/', client_id=self.settings["facebook_api_key"], extra_params={"scope": "read_stream,offline_access"}) @@ -1072,7 +1072,7 @@ class FacebookGraphMixin(OAuth2Mixin): if not new_entry: # Call failed; perhaps missing permission? - yield self.authorize_redirect() + self.authorize_redirect() return self.finish("Posted a message!")
Add Spotify intergration colour Added property `colour` and alias `color` which returns the Spotify integration colour (#1db954). Technically Discord uses both (#1cb050 and #1db954) but it appears the former is an official Spotify colour.
@@ -25,6 +25,7 @@ DEALINGS IN THE SOFTWARE. """ from .enums import ActivityType, try_enum +from .colour import Colour import datetime __all__ = ('Activity', 'Streaming', 'Game', 'Spotify') @@ -456,6 +457,20 @@ class Spotify: """ return ActivityType.listening + @property + def colour(self): + """Returns the Spotify integration colour, as a :class:`Colour`. + + There is an alias for this named :meth:`color`""" + return Colour(0x1db954) + + @property + def color(self): + """Returns the Spotify integration colour, as a :class:`Colour`. + + There is an alias for this named :meth:`colour`""" + return self.colour + def to_dict(self): return { 'flags': 48, # SYNC | PLAY
Added Open990 to IRS 990 dataset page Pull request suggested by Jed Sundwall. Thanks!
@@ -33,3 +33,7 @@ DataAtWork: URL: https://projects.propublica.org/nonprofits/ AuthorName: ProPublica AuthorURL: https://propublica.org + - Title: Open990 + URL: https://www.open990.com/ + AuthorName: 990 Consulting, LLC + AuthorURL: https://www.990consulting.com/
Addressed issues raised by Nathan and Florian Added a comment to clarify supporting branches and cherry-picking Added a git snippet for merging the master clarified language from the main branch descriptions specified where links to the travis and codacy checks can be found fixed typo in formatting
@@ -24,10 +24,11 @@ We consider _origin/master_ to be the main branch where the source code of HEAD * contains only reviewed code #### The project branch -Every project/experiment has it's own project branch, prefixed with _Proj/_. We consider the project branch to be the main branch where the source code of HEAD always reflects a state with the latest changes used in the project. The project branches themselves should never get merged into the master. Rather, individual changes should be cherry-picked and merged in the master using discrete pull requests. See also *merging strategy* below. +Every project/experiment has it's own project branch, prefixed with _proj/_. The project branch serves as a working branch where new features can be developed and tested before they are merged into the master. +The project branches themselves should never get merged into the master. Rather, individual changes should be cherry-picked unto supporting branches and merged in the master using discrete pull requests. See also *merging strategy* below. -* prefixed with *Proj/* -* never get merged into master +* prefixed with *proj/* +* never gets merged into master * changes get cherry-picked into supporting branches to merge into master @@ -57,6 +58,8 @@ Whenever possible the pull request should: Tests are not mandatory as this is generally hard to make for instruments that interact with hardware. +Travis and codacy run automatically when a pull request is created. Links to the checks can be found when clicking the "show all checks" button that appears in the "conversation" of a pull request. + If you want to get changes from a project branches onto a supporting branch for a pull request consider using the following snippet. ``` git checkout master @@ -90,7 +93,7 @@ An issue should have only one type label assigned. A type label is not supposed An issue can have only one priority type label. A priority label can change over time. ### Issue category labels -Optional extra labels exist to further specify what category an issue belong to. These are repository dependent and prefixed with "Cat: +Optional extra labels exist to further specify what category an issue belong to. These are repository dependent and prefixed with "cat: An issue can have multiple category labels assigned. @@ -106,3 +109,9 @@ During the weekly code cleanup you should: * review pull requests you have been asked to review It is recommended to do this more than once a week. + +The following snippet can be used to update your local branch with the master: +``` +git fetch origin # updates your local copy of the 'origin' +git merge origin/master # merges the master into your branch +```
apply_ban() logic refined Refined the logic for `apply_ban()` even further to be cleaner. (Thanks,
@@ -236,26 +236,26 @@ class Infractions(InfractionScheduler, commands.Cog): Will also remove the banned user from the Big Brother watch list if applicable. """ # In the case of a permanent ban, we don't need get_active_infractions to tell us if one is active - send_msg = kwargs.get("expires_at") is None - active_infraction = await utils.get_active_infraction(ctx, user, "ban", send_msg) + is_temporary = kwargs.get("expires_at") is not None + active_infraction = await utils.get_active_infraction(ctx, user, "ban", is_temporary) if active_infraction: log.trace("Active infractions found.") - if kwargs.get('expires_at') is None: + if is_temporary: + log.trace("Active ban is a temp ban being called by a temp or a perma being called by a temp. Ignore.") + return + if active_infraction.get('expires_at') is not None: log.trace("Active ban is a temporary and being called by a perma. Removing temporary.") - await self.pardon_infraction(ctx, "ban", user, send_msg) + await self.pardon_infraction(ctx, "ban", user, is_temporary) - elif active_infraction.get('expires_at') is None: + else: log.trace("Active ban is a perma ban and being called by a perma. Send bounce back message.") await ctx.send( f":x: According to my records, this user is already permanently banned. " f"See infraction **#{active_infraction['id']}**." ) return - else: - log.trace("Active ban is a temp ban being called by a temp or a perma being called by a temp. Ignore.") - return infraction = await utils.post_infraction(ctx, user, "ban", reason, active=True, **kwargs) if infraction is None:
DOC: mitigate newton optimization not converging. [skip azp] [skip actions]
@@ -251,7 +251,8 @@ def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50, The above is the equivalent of solving for each value in ``(x, a)`` separately in a for-loop, just faster: - >>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,)) + >>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,), + ... maxiter=200) ... for x0, a0 in zip(x, a)] >>> np.allclose(vec_res, loop_res) True @@ -259,8 +260,7 @@ def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50, Plot the results found for all values of ``a``: >>> analytical_result = np.sign(a) * np.abs(a)**(1/3) - >>> fig = plt.figure() - >>> ax = fig.add_subplot(111) + >>> fig, ax = plt.subplots() >>> ax.plot(a, analytical_result, 'o') >>> ax.plot(a, vec_res, '.') >>> ax.set_xlabel('$a$')
docker venv fix * docker venv fix Run once with --upgrade-deps Required to ensure that setuptools is automatically upgraded * Fix comment
@@ -30,15 +30,18 @@ fi # This should be done on the *mounted* filesystem, # so that the installed modules persist! if [[ -n "$INVENTREE_PY_ENV" ]]; then + + if test -d "$INVENTREE_PY_ENV"; then + # venv already exists echo "Using Python virtual environment: ${INVENTREE_PY_ENV}" - # Setup a virtual environment (within the "dev" directory) - python3 -m venv ${INVENTREE_PY_ENV} --system-site-packages + else + # Setup a virtual environment (within the "data/env" directory) + echo "Running first time setup for python environment" + python3 -m venv ${INVENTREE_PY_ENV} --system-site-packages --upgrade-deps + fi - # Activate the virtual environment + # Now activate the venv source ${INVENTREE_PY_ENV}/bin/activate - - # Note: Python packages will have to be installed on first run - # e.g docker-compose run inventree-dev-server invoke update fi cd ${INVENTREE_HOME}
Fix datastore abnormal display with trove backup-show According bug description, the datastore display abnormal when use trove backup-show. The cause of the problem is method _print_object unformatted datastore from result. Fix by formatted datastore where necessary. Closes-Bug:
@@ -143,6 +143,12 @@ def _print_object(obj): obj._info['id'] = obj.id del(obj._info['str_id']) + # Get datastore type and version, where necessary + if hasattr(obj, 'datastore'): + if 'type' in obj.datastore: + obj._info['datastore'] = obj.datastore['type'] + obj._info['datastore_version'] = obj.datastore['version'] + utils.print_dict(obj._info)
Update to IGV.js 2.2.11 Update igv javascrit from 1.0.9 to 2.2.11. Addresses issue
src="https://ajax.googleapis.com/ajax/libs/jqueryui/1.11.2/jquery-ui.min.js"></script> <!-- IGV JS--> - <script type="text/javascript" src="https://igv.org/web/release/1.0.9/igv-1.0.9.js"></script> + <script type="text/javascript" src="https://igv.org/web/release/2.2.11/dist/igv.min.js"></script> </head> <body> <div class="container-fluid" id="igvDiv" style="padding:5px; border:1px solid lightgray"></div>
fix: Don't unlink file blindly This made sense with the missing_ok. But now, a try-except seems unnecessary too. Also, possibly destructive. Best to stray away from these things.
@@ -507,7 +507,6 @@ def convert_archive_content(sql_file_path): sql_file_path = Path(sql_file_path) os.rename(sql_file_path, old_sql_file_path) - sql_file_path.unlink() sql_file_path.touch() with open(old_sql_file_path) as r, open(sql_file_path, "a") as w:
update dict access * update dict access ``` if 'header' not in options or 'Sec-WebSocket-Key' not in options['header']: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ TypeError: argument of type 'NoneType' is not iterable``` * Remove extra space for linter
@@ -81,7 +81,7 @@ def _get_handshake_headers(resource, url, host, port, options): hostport = _pack_hostname(host) else: hostport = "%s:%d" % (_pack_hostname(host), port) - if "host" in options and options["host"] is not None: + if options.get("host"): headers.append("Host: %s" % options["host"]) else: headers.append("Host: %s" % hostport) @@ -89,7 +89,7 @@ def _get_handshake_headers(resource, url, host, port, options): # scheme indicates whether http or https is used in Origin # The same approach is used in parse_url of _url.py to set default port scheme, url = url.split(":", 1) - if "suppress_origin" not in options or not options["suppress_origin"]: + if not options.get("suppress_origin"): if "origin" in options and options["origin"] is not None: headers.append("Origin: %s" % options["origin"]) elif scheme == "wss": @@ -100,16 +100,16 @@ def _get_handshake_headers(resource, url, host, port, options): key = _create_sec_websocket_key() # Append Sec-WebSocket-Key & Sec-WebSocket-Version if not manually specified - if 'header' not in options or 'Sec-WebSocket-Key' not in options['header']: + if not options.get('header') or 'Sec-WebSocket-Key' not in options['header']: key = _create_sec_websocket_key() headers.append("Sec-WebSocket-Key: %s" % key) else: key = options['header']['Sec-WebSocket-Key'] - if 'header' not in options or 'Sec-WebSocket-Version' not in options['header']: + if not options.get('header') or 'Sec-WebSocket-Version' not in options['header']: headers.append("Sec-WebSocket-Version: %s" % VERSION) - if 'connection' not in options or options['connection'] is None: + if not options.get('connection'): headers.append('Connection: Upgrade') else: headers.append(options['connection']) @@ -118,8 +118,8 @@ def _get_handshake_headers(resource, url, host, port, options): if subprotocols: headers.append("Sec-WebSocket-Protocol: %s" % ",".join(subprotocols)) - if "header" in options: - header = options["header"] + header = options.get("header") + if header: if isinstance(header, dict): header = [ ": ".join([k, v])
BUG: fixed bug in general unit test Fixed a bug in the `methods.general` unit tests caused by improper cycling through dict keys. Also removed use of 'inplace' to reduce Warnings.
@@ -151,23 +151,23 @@ def remove_leading_text(inst, target=None): for prepend_str in target: if isinstance(inst.data, pds.DataFrame): - inst.data.rename(columns=lambda x: x.split(prepend_str)[-1], - inplace=True) + inst.data = inst.data.rename( + columns=lambda x: x.split(prepend_str)[-1]) else: - map = {} + map_keys = {} for key in inst.data.variables.keys(): - map[key] = key.split(prepend_str)[-1] - inst.data = inst.data.rename(name_dict=map) + map_keys[key] = key.split(prepend_str)[-1] + inst.data = inst.data.rename(name_dict=map_keys) - inst.meta.data.rename(index=lambda x: x.split(prepend_str)[-1], - inplace=True) - orig_keys = inst.meta.keys_nD() + inst.meta.data = inst.meta.data.rename( + index=lambda x: x.split(prepend_str)[-1]) + orig_keys = [kk for kk in inst.meta.keys_nD()] for keynd in orig_keys: if keynd.find(prepend_str) >= 0: new_key = keynd.split(prepend_str)[-1] new_meta = inst.meta.pop(keynd) - new_meta.data.rename(index=lambda x: x.split(prepend_str)[-1], - inplace=True) + new_meta.data = new_meta.data.rename( + index=lambda x: x.split(prepend_str)[-1]) inst.meta[new_key] = new_meta return
SUPP: Disable BigQuery explicitly in all/test_join.py Strangely, `pytest.mark.only_on_backends` doesn't skip BigQuery, so I added an explicit skip as a workaround. Author: Li Jin Closes from icexelloss/disable-test-join-bigquery and squashes the following commits: [Li Jin] Fix comments [Li Jin] SUPP: Disable BigQuery explicitly in all/test_join.py
@@ -2,7 +2,7 @@ import pandas as pd import pytest from pytest import param -from ibis.tests.backends import Csv, Pandas, PySpark +from ibis.tests.backends import BigQuery, Csv, Pandas, PySpark # add here backends that passes join tests all_db_join_supported = [Pandas, PySpark] @@ -30,8 +30,9 @@ all_db_join_supported = [Pandas, PySpark] ], ) @pytest.mark.only_on_backends(all_db_join_supported) -# Csv is a subclass of Pandas so need to skip it explicited [email protected]_backends([Csv]) +# Csv is a subclass of Pandas so need to skip it explicitly. +# BigQuery is also not skipped for unknown reason. [email protected]_backends([Csv, BigQuery]) @pytest.mark.xfail_unsupported def test_join_project_left_table(backend, con, batting, awards_players, how):
Fix inverted confusion_matrix axis confusion_matrix accepts two arrays, but the first one should be the true value, the second one should be the predictions made to be compared. It was inverted right here.
@@ -48,8 +48,8 @@ class ConfusionMatrix: self.idx2label = {idx: str(label) for idx, label in enumerate(np.unique( [self.predictions, self.conditions]))} - self.cm = confusion_matrix(self.predictions, - self.conditions, + self.cm = confusion_matrix(self.conditions, + self.predictions, labels=labels, sample_weight=sample_weight)
Fix - added json support to all resources encode method was missing for WebpublishRestApiResource
@@ -38,18 +38,11 @@ class WebpublishApiEndpoint(ResourceRestApiEndpoint): return self.resource.dbcon -class RestApiResource: - """Resource carrying needed info and Avalon DB connection for publish.""" - def __init__(self, server_manager, executable, upload_dir, - studio_task_queue=None): - self.server_manager = server_manager - self.upload_dir = upload_dir - self.executable = executable - - if studio_task_queue is None: - studio_task_queue = collections.deque().dequeu - self.studio_task_queue = studio_task_queue +class JsonApiResource: + """Resource for json manipulation. + All resources handling sending output to REST should inherit from + """ @staticmethod def json_dump_handler(value): if isinstance(value, datetime.datetime): @@ -69,7 +62,20 @@ class RestApiResource: ).encode("utf-8") -class WebpublishRestApiResource: +class RestApiResource(JsonApiResource): + """Resource carrying needed info and Avalon DB connection for publish.""" + def __init__(self, server_manager, executable, upload_dir, + studio_task_queue=None): + self.server_manager = server_manager + self.upload_dir = upload_dir + self.executable = executable + + if studio_task_queue is None: + studio_task_queue = collections.deque().dequeu + self.studio_task_queue = studio_task_queue + + +class WebpublishRestApiResource(JsonApiResource): """Resource carrying OP DB connection for storing batch info into DB.""" def __init__(self):
[JIT] Optimize before inlining Summary: This speeds up the inlining pass of FairSeq model from 180s -> 13s. Pull Request resolved:
#include <torch/csrc/jit/api/function_impl.h> -#include <torch/csrc/jit/passes/inliner.h> - #include <torch/csrc/jit/frontend/error_report.h> +#include <torch/csrc/jit/passes/inliner.h> +#include <torch/csrc/jit/passes/peephole.h> +#include "torch/csrc/jit/passes/constant_propagation.h" namespace torch { namespace jit { @@ -62,8 +63,12 @@ const c10::FunctionSchema& GraphFunction::getSchema() const { } void preoptimizeGraph(std::shared_ptr<Graph>& graph) { - // TODO: Invoke cleanup passes before and after inlining to reduce amount of - // code we're copying. + // Peephole Optimize cleans up many "is None" checks and creates constant prop + // opportunities + PeepholeOptimize(graph); + // AliasDb construction can be slow, so run it just on immutable types + // to clean up constant Ifs & other easy wins + ConstantPropagationImmutableTypes(graph); Inline(*graph); }
IOS: add support for VRF for get_arp_table func Simply takes into account the vrf function arg and exec the expected ios command.
@@ -2175,12 +2175,12 @@ class IOSDriver(NetworkDriver): ] """ if vrf: - msg = "VRF support has not been added for this getter on this platform." - raise NotImplementedError(msg) + command = 'show arp vrf {} | exclude Incomplete'.format(vrf) + else: + command = 'show arp | exclude Incomplete' arp_table = [] - command = "show arp | exclude Incomplete" output = self._send_command(command) # Skip the first line which is a header
Only install PyQt5 when it cannot be imported to prevent double installation under conda.
@@ -12,8 +12,23 @@ with open('README_PYPI.md', encoding='utf-8') as f: # version_nr contains ... well ... the version in the form __version__ = '0.1b10' version_nr = {} -with open("pyfda/version.py", encoding='utf-8') as fp: - exec(fp.read(), version_nr) +with open("pyfda/version.py", encoding='utf-8') as f_v: + exec(f_v.read(), version_nr) + +requirements_list = [] +with open("requirements.txt", encoding='utf-8') as f_r: + exec(f_r.read(), requirements_list) + +try: + import PyQt5 + requirements_list.remove('pyqt5') + print("PyQt5 {0} is already installed, skipping.\n{1}".format(PyQt5.QtCore.QT_VERSION_STR, requirements_list)) + # try to prevent installing library twice under conda where lib is listed + # as "pyqt" for backward compatibility with PyQt4 +except ImportError: + print("PyQt5 will be installed.\n{1}".format(requirements_list)) + + setup( name = 'pyfda', @@ -28,6 +43,7 @@ setup( author_email = '[email protected]', license = 'MIT', platforms = ['any'], + install_requires = requirements_list, # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[
Update phishing.txt ```.zip``` files contain PHP-based distros of phishing tools. Not a malware as well, so let it be detected by ```phishing``` trail.
phish-education.apwg.org csd.link frog.wix.ru + +# Reference: https://twitter.com/malwrhunterteam/status/1031899551542591490 + +avataarhornefashion.com +chinaspacplus.com + +# Reference: https://twitter.com/malwrhunterteam/status/1031901896234033153 + +ytvertkn.tk
Adds advancedOptions argument to do_std_practice_gst(...) Allows more advanced usage of `do_std_practice_gst`, making it much more flexible with the addition of a single argument.
@@ -621,7 +621,7 @@ def do_long_sequence_gst_base(dataFilenameOrSet, targetGateFilenameOrSet, def do_stdpractice_gst(dataFilenameOrSet,targetGateFilenameOrSet, prepStrsListOrFilename, effectStrsListOrFilename, germsListOrFilename, maxLengths, modes="TP,CPTP,Target", - comm=None, memLimit=None, verbosity=2): + comm=None, memLimit=None, advancedOptions=None, verbosity=2): """ Perform end-to-end GST analysis using standard practices. @@ -683,6 +683,14 @@ def do_stdpractice_gst(dataFilenameOrSet,targetGateFilenameOrSet, A rough memory limit in bytes which restricts the amount of memory used (per core when run on multi-CPUs). + advancedOptions : dict, optional + Specifies advanced options most of which deal with numerical details of + the objective function or expert-level functionality. Keys of this + dictionary can be any of the modes being computed (see the `modes` + argument) or 'all', which applies to all modes. Values are + dictionaries of advanced arguements - see :func:`do_long_sequence_gst` + for a list of the allowed keys for each such dictionary. + verbosity : int, optional The 'verbosity' option is an integer specifying the level of detail printed to stdout during the calculation. @@ -728,7 +736,13 @@ def do_stdpractice_gst(dataFilenameOrSet,targetGateFilenameOrSet, est_label = parameterization = mode #for now, 1-1 correspondence tgt = gs_target.copy(); tgt.set_all_parameterizations(parameterization) - advanced = {'appendTo': ret, 'estimateLabel': est_label } + + #prepare advanced options dictionary + if advancedOptions is not None: + advanced = advancedOptions.get('all',{}) + advanced.update( advancedOptions.get(mode,{}) ) + else: advanced = {} + advanced.update( {'appendTo': ret, 'estimateLabel': est_label } ) ret = do_long_sequence_gst(ds, tgt, prepStrsListOrFilename, effectStrsListOrFilename, germsListOrFilename,
Window resizeable argument change Resizeable argument was having no impact on MainWindow whether set True or False. This allows for fixed window on mainwindow creation.
@@ -41,9 +41,12 @@ class Window: self.native = WinForms.Form(self) self.native.ClientSize = Size(*self.interface._size) self.native.interface = self.interface - self.native.Resize += self.winforms_resize self.toolbar_native = None self.toolbar_items = None + if not self.native.interface.resizeable: + self.native.FormBorderStyle = self.native.FormBorderStyle.FixedSingle; + elif self.native.interface.resizeable: + self.native.Resize += self.winforms_resize def create_toolbar(self): self.toolbar_native = WinForms.ToolStrip()
Infraction Date Humanization Changed to use the format `"%Y-%m-%d %H:%M"`, which will turn out looking like `2019-09-18 13:59`
@@ -1260,11 +1260,11 @@ class Moderation(Scheduler, Cog): active = infraction_object["active"] user_id = infraction_object["user"] hidden = infraction_object["hidden"] - created = datetime.fromisoformat(infraction_object["inserted_at"].strftime("%c")) + created = datetime.fromisoformat(infraction_object["inserted_at"].strftime("%Y-%m-%d %H:%M")) if not infraction_object["expires_at"]: expires = "*Permanent*" else: - expires = datetime.fromisoformat(infraction_object["expires_at"].strftime("%c")) + expires = datetime.fromisoformat(infraction_object["expires_at"].strftime("%Y-%m-%d %H:%M")) lines = textwrap.dedent(f""" {"**===============**" if active else "==============="}
Fixes bug in cloud-noise model creation so "S" and "D" paramroots work correctly. Adds logic needed to make the ham_basis and/or other_basis None when the corresponding types of error generators are not in the model as described by `paramroot`.
@@ -940,8 +940,13 @@ def _get_lindblad_factory(simulator, parameterization, errcomp_type, sparse_lind if parameterization == "CPTP": p = "GLND" elif "S" in parameterization: p = parameterization.replace("S", "s") elif "D" in parameterization: p = parameterization.replace("D", "d") - _, evotype, nonham_mode, param_mode = _op.LindbladOp.decomp_paramtype(p) - return _op.LindbladErrorgen.from_error_generator(error_gen, proj_basis, proj_basis, + bTyp, evotype, nonham_mode, param_mode = _op.LindbladOp.decomp_paramtype(p) + + #Same logic as in LindbladOp.from_operation_obj -- TODO consolidate? + ham_basis = proj_basis if (("H" == bTyp) or ("H+" in bTyp) or bTyp in ("CPTP", "GLND")) else None + nonham_basis = None if bTyp == "H" else proj_basis + + return _op.LindbladErrorgen.from_error_generator(error_gen, ham_basis, nonham_basis, param_mode, nonham_mode, mx_basis, truncate=True, evotype=evotype) return _f
Fixed a bug, where the last chunk was treated as a partial chunk and written out a second time to a different file at the end of the loop.
@@ -137,7 +137,6 @@ def process_gromacs_xtc(queue, processname, totframes, fchunksize, totalchunks, chunkcount = starting_chunk + 1 # Offset the chunkcount by 1 lastchunksize = totframes - (totalchunks * fchunksize) outAL = [] - addzero = "" for curframe in range(first_frame, last_frame): j = last_frame - curframe mdt[curframe] @@ -191,23 +190,22 @@ def process_gromacs_xtc(queue, processname, totframes, fchunksize, totalchunks, # Flush the frames to disk if (i == fchunksize - 1): - myfilename = mdsys.opts.outfile + str(chunkcount).zfill(2) + \ - "_outof_" + str(totalchunks) + ".npy" - print "Flushing chunk (%d records) %d out of %d to file %s" % (i + 1, chunkcount, totalchunks, myfilename) - #~ np.save(myfilename, convert_to_helgi_format(outA)) - np.save(myfilename, outA) + flush_chunk_to_file(processname, i, outA, mdsys.opts.outfile, chunkcount, totalchunks) i = -1 outAL = [] chunkcount = chunkcount + 1 - addzero = "" i = i + 1 + if (i != 0): # Saves to disk the eventually remaining frames after # the last full chunk of data has been written - myfilename = mdsys.opts.outfile + str(chunkcount).zfill(2) + \ - "_outof_" + str(totalchunks) + ".npy" - print "Flushing last chunk (%d records) %d out of %d to file %s" % (i + 1, chunkcount, totalchunks, myfilename) + flush_chunk_to_file(processname, i, outA, mdsys.opts.outfile, chunkcount, totalchunks) +def flush_chunk_to_file(processname, i, outA, outfile, chunkcount, totalchunks): + myfilename = outfile + str(chunkcount).zfill(2) + \ + "_outof_" + str(totalchunks) + ".npy" + print "[%d] Flushing chunk (%d records) %d out of %d to file %s" % (processname, i + 1, chunkcount, totalchunks, myfilename) + #~ np.save(myfilename, convert_to_helgi_format(outA)) np.save(myfilename, outA) def main():
[ROCm] Enable wrongly skipped tests on CPU on ROCm Summary: `skipIfRocm` skips the test on ROCm regardless of device type [CPU or GPU]. `skipCUDAIfRocm` skips only on GPU on ROCm and runs the test on CPU. ezyang iotamudelta Pull Request resolved:
@@ -9373,7 +9373,7 @@ class TestNNDeviceType(NNTestCase): grad_input, = torch.autograd.grad(output, input, create_graph=True) grad_input.sum().backward() - @skipIfRocm + @skipCUDAIfRocm @largeCUDATensorTest('12GB') def test_conv_large_nosplit(self, device): # Here we just test the convolution correctly route to the fallback implementation @@ -9484,7 +9484,7 @@ class TestNNDeviceType(NNTestCase): self.assertEqual(maxdiff2, 0) self.assertEqual(maxdiff3, 0) - @skipIfRocm + @skipCUDAIfRocm @largeCUDATensorTest('12GB') def test_conv_large(self, device): dtype = torch.half if self.device_type == 'cuda' else torch.float
send wakeup chars to wake sleeping devices before talking to them per
@@ -52,6 +52,7 @@ import google.protobuf.json_format import serial import threading import logging +import time import sys import traceback from . import mesh_pb2 @@ -347,9 +348,17 @@ class StreamInterface(MeshInterface): self.stream = serial.Serial( devPath, 921600, exclusive=True, timeout=0.5) self._rxThread = threading.Thread(target=self.__reader, args=()) - self._rxThread.start() + + # Send some bogus UART characters to force a sleeping device to wake + self.stream.write(bytes([START1, START1, START1, START1])) + self.stream.flush() + time.sleep(0.1) # wait 100ms to give device time to start running + MeshInterface.__init__(self, debugOut=debugOut, noProto=noProto) + # Start the reader thread after superclass constructor completes init + self._rxThread.start() + def _sendToRadio(self, toRadio): """Send a ToRadio protobuf to the device""" logging.debug(f"Sending: {toRadio}")
qEI bugfix to use objective for computing incumbent best Summary: The get_acquisition was using f to compute the current best rather than objective(f). This also fixes the seed as discussed.
@@ -59,7 +59,7 @@ def get_acquisition_function( if acquisition_function_name == "qEI": return qExpectedImprovement( model=model, - best_f=model.posterior(X_observed).mean.max().item(), + best_f=objective(model.posterior(X_observed).mean).max().item(), objective=objective, constraints=constraints, X_pending=X_pending, @@ -69,7 +69,7 @@ def get_acquisition_function( elif acquisition_function_name == "qPI": return qProbabilityOfImprovement( model=model, - best_f=model.posterior(X_observed).mean.max().item(), + best_f=objective(model.posterior(X_observed).mean).max().item(), objective=objective, constraints=constraints, X_pending=X_pending,
Sync tests: test _get_confirmation_result for small diffs Should always return True and the given message if the diff size is too small.
@@ -351,3 +351,22 @@ class SyncerSyncTests(unittest.TestCase): self.syncer._get_confirmation_result.assert_called_once() self.assertEqual(self.syncer._get_confirmation_result.call_args[0][1], author) self.assertEqual(self.syncer._get_confirmation_result.call_args[0][2], message) + + def test_confirmation_result_small_diff(self): + """Should always return True and the given message if the diff size is too small.""" + self.syncer.MAX_DIFF = 3 + author = helpers.MockMember() + expected_message = helpers.MockMessage() + + for size in (3, 2): + with self.subTest(size=size): + self.syncer._send_prompt = helpers.AsyncMock() + self.syncer._wait_for_confirmation = helpers.AsyncMock() + + coro = self.syncer._get_confirmation_result(size, author, expected_message) + result, actual_message = asyncio.run(coro) + + self.assertTrue(result) + self.assertEqual(actual_message, expected_message) + self.syncer._send_prompt.assert_not_called() + self.syncer._wait_for_confirmation.assert_not_called()
Adds the universe repository to the used sources This change is required, to support Ubuntu Server 18.04.01, which by default doesn't ship with universe. Universe contains python3-venv which is needed for tljh
@@ -66,6 +66,7 @@ def main(): else: logger.info('Setting up hub environment') initial_setup = True + subprocess.check_output(['add-apt-repository', 'universe'], stderr=subprocess.STDOUT) subprocess.check_output(['apt-get', 'update', '--yes'], stderr=subprocess.STDOUT) subprocess.check_output(['apt-get', 'install', '--yes', 'python3', 'python3-venv', 'git'], stderr=subprocess.STDOUT) logger.info('Installed python & virtual environment')
[p4a] Check if p4a.fork/p4a.branch changed... and if so, remove the old p4a installation so the new one can be installed
@@ -32,7 +32,7 @@ from buildozer.target import Target from os import environ from os.path import exists, join, realpath, expanduser, basename, relpath from platform import architecture -from shutil import copyfile +from shutil import copyfile, rmtree from glob import glob from buildozer.libs.version import parse @@ -634,6 +634,24 @@ class TargetAndroid(Target): self.buildozer.error('') raise BuildozerException() else: + # check that fork/branch has not been changed + if self.buildozer.file_exists(pa_dir): + cur_fork = cmd( + 'git config --get remote.origin.url', + get_stdout=True, + cwd=pa_dir, + )[0].split('/')[3] + cur_branch = cmd( + 'git branch -vv', get_stdout=True, cwd=pa_dir + )[0].split()[1] + if any([cur_fork != p4a_fork, cur_branch != p4a_branch]): + self.buildozer.info( + "Detected old fork/branch ({}/{}), deleting...".format( + cur_fork, cur_branch + ) + ) + rmtree(pa_dir) + if not self.buildozer.file_exists(pa_dir): cmd( (
instruments/energy_measurments: Improve instrument description Add note to users that all configuration for the backends should be added through this instrument rather than directly.
@@ -366,6 +366,9 @@ class EnergyMeasurement(Instrument): description = """ This instrument is designed to be used as an interface to the various energy measurement instruments located in devlib. + + This instrument should be used to provide configuration for any of the + Energy Instrument Backends rather than specifying configuration directly. """ parameters = [
Changed link for document.cookie blacklist Link was not working due to use of period in title.
@@ -54,7 +54,7 @@ Cross-site scripting (XSS) is a type of computer security vulnerability typicall - [Bypass space filter](#bypass-space-filter) - [Bypass email filter](#bypass-email-filter) - [Bypass document blacklist](#bypass-document-blacklist) - - [Bypass document.cookie blacklist](#bypass-document.cookie-blacklist) + - [Bypass document.cookie blacklist](#bypass-document-cookie-blacklist) - [Bypass using javascript inside a string](#bypass-using-javascript-inside-a-string) - [Bypass using an alternate way to redirect](#bypass-using-an-alternate-way-to-redirect) - [Bypass using an alternate way to execute an alert](#bypass-using-an-alternate-way-to-execute-an-alert)
Add design decision for resource peoprety renaming Closes
@@ -16,5 +16,22 @@ It is intended as a reference. in the same zone. By requiring the zone across the board, it is less likely to lead to a miss match. (Related to 63_.) +- **Name property updates will result in cloud-dependent code.** + + Some providers (e.g., GCE, Azure) do not allow names of resources to be + changed after a resource has been created. Similarly, AWS does not allow VM + firewall (i.e., security group) names to be changed. Providers seem to be + gravitating toward use of tags (or labels) to support arbitrary naming and + name changes. Yet, OpenStack for example, does not have a concept of resource + tags so CloudBridge cannot rely solely on tags. Further, tags do not need to + be unique across multiple resources, while names do (at least for some + resources, such as vmfirewalls within a private network). Overall, consistency + is challenging to achieve with resource renaming. With that, CloudBridge will + support resource renaming to the best extent possible and balance between the + use of resource name property and resource tags. However, because of the + inconsistency of rename functionality across the providers, using the rename + capabilities within CloudBridge will lead to cloud-dependent code. (Related to + 131_.) .. _63: https://github.com/CloudVE/cloudbridge/issues/63 + .. _131: https://github.com/CloudVE/cloudbridge/issues/131
adding a line space adding a line space, should be double
@@ -6,6 +6,7 @@ import re from timesketch.lib.analyzers import interface from timesketch.lib.analyzers import manager + class WinCrashSketchPlugin(interface.BaseSketchAnalyzer): """Sketch analyzer for Windows application crashes.""" @@ -191,4 +192,5 @@ class WinCrashSketchPlugin(interface.BaseSketchAnalyzer): 's' if len(filenames) > 1 else '', ', '.join(filenames)) + manager.AnalysisManager.register_analyzer(WinCrashSketchPlugin)
doc: Update sections example. Modify documention and example to better explain sections
@@ -437,20 +437,19 @@ Sections -------- It is a common requirement to be able to run the same set of workloads under -different device configurations. E.g. you may want to investigate impact of +different device configurations. E.g. you may want to investigate the impact of changing a particular setting to different values on the benchmark scores, or to quantify the impact of enabling a particular feature in the kernel. WA allows this by defining "sections" of configuration with an agenda. -For example, suppose what we really want, is to measure the impact of using -interactive cpufreq governor vs the performance governor on the three -benchmarks. We could create another three workload spec entries similar to the -ones we already have and change the sysfile value being set to "interactive". -However, this introduces a lot of duplication; and what if we want to change -spec configuration? We would have to change it in multiple places, running the -risk of forgetting one. +For example, suppose that we want to measure the impact of using 3 different +cpufreq governors on 2 benchmarks. We could create 6 separate workload specs +and set the governor runtime parameter for each entry. However, this +introduces a lot of duplication; and what if we want to change spec +configuration? We would have to change it in multiple places, running the risk +of forgetting one. -A better way is to keep the three workload specs and define a section for each +A better way is to keep the two workload specs and define a section for each governor: .. code-block:: yaml @@ -471,6 +470,9 @@ governor: - id: inter runtime_params: cpu0_governor: interactive + - id: sched + runtime_params: + cpu0_governor: sched workloads: - id: 01_dhry name: dhrystone
Update elf_mirai.txt Trivial update, because basicly we detect so-called ```Echobot``` for a long-long time ago. :)
@@ -3859,6 +3859,7 @@ senpai.site /tnx12015.sh # Reference: https://blog.trendmicro.com/trendlabs-security-intelligence/bashlite-iot-malware-updated-with-mining-and-backdoor-commands-targets-wemo-devices/ +# Reference: https://blogs.akamai.com/sitr/2019/06/latest-echobot-26-infection-vectors.html /ECHOBOT.arc /ECHOBOT.arm @@ -3878,9 +3879,11 @@ senpai.site /ECHOBOT.mips /ECHOBOT.mips64 /ECHOBOT.mpsl +/ECHOBOT.mipsel /ECHOBOT.ppc /ECHOBOT.ppc440 /ECHOBOT.root +/ECHOBOT.sh /ECHOBOT.sh4 /ECHOBOT.spc /ECHOBOT.sparc @@ -3889,6 +3892,7 @@ senpai.site /ECHOBOT.x86 /ECHOBOT.x86_32 /ECHOBOT.x86_64 +/ECHOBOT1.sh /UqHDZbqr9S.sh # Reference: https://twitter.com/_odisseus/status/1114055047221006336
increase the timeout between sending take question api calls to a minute this was bombarding the backend with calls every second as a user was typing, which in turn would update the database, and reindex the question in elastic
} } - $('#id_content').on('keyup', _.throttle(takeQuestion, 1000)); + $('#id_content').on('keyup', _.throttle(takeQuestion, 60000)); $(document).on('click', '#details-edit', function(ev) { ev.preventDefault();
Fix random state generator Answers Authors: - Victor Lafargue (https://github.com/viclafargue) Approvers: - Divye Gala (https://github.com/divyegala) - John Zedlewski (https://github.com/JohnZed) URL:
-# Copyright (c) 2020, NVIDIA CORPORATION. +# Copyright (c) 2020-2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,16 +25,9 @@ def _create_rs_generator(random_state): The random_state from which the CuPy random state is generated """ - if hasattr(random_state, '__module__'): - rs_type = random_state.__module__ + '.' + type(random_state).__name__ - else: - rs_type = type(random_state).__name__ - - rs = None - if rs_type == "NoneType" or rs_type == "int": - rs = cp.random.RandomState(seed=random_state) - elif rs_type == "cupy.random.generator.RandomState": - rs = rs_type + if isinstance(random_state, (type(None), int)): + return cp.random.RandomState(seed=random_state) + elif isinstance(random_state, cp.random.RandomState): + return random_state else: raise ValueError('random_state type must be int or CuPy RandomState') - return rs
flip_update_test binary image must be covered when depug=plot
@@ -882,12 +882,14 @@ def test_plantcv_flip(): pcv.params.debug_outdir = cache_dir # Read in test data img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR)) + img_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY)) # Test with debug = "print" pcv.params.debug = "print" _ = pcv.flip(img=img, direction="horizontal") # Test with debug = "plot" pcv.params.debug = "plot" _ = pcv.flip(img=img, direction="vertical") + _ = pcv.flip(img=img_binary, direction="vertical") # Test with debug = None pcv.params.debug = None flipped_img = pcv.flip(img=img, direction="horizontal") @@ -896,11 +898,9 @@ def test_plantcv_flip(): def test_plantcv_flip_bad_input(): img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR)) - img_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY)) pcv.params.debug = None with pytest.raises(RuntimeError): _ = pcv.flip(img=img, direction="vert") - _ = pcv.flip(img=img_binary, direction="horiz") def test_plantcv_fluor_fvfm():
updates snakemake to handle bug in groups processing reference:
@@ -13,7 +13,7 @@ config = default_config # minimum required snakemake version -min_version("5.1.3") +min_version("5.2.2") def get_conda_envs_dir(): if config.get("yaml_dir"): @@ -160,9 +160,6 @@ elif config.get("workflow", "complete") == "complete": if config.get("perform_genome_binning", True): # later update to include others as well as integrate concoct and metabat REPORTS.append("reports/bin_report_maxbin.html") - # include this? - # expand("{sample}/binning/{binner}/cluster_attribution.tsv", - # binner=config['binner'], sample =SAMPLES) include: "rules/binning.snakefile" localrules: all @@ -184,7 +181,6 @@ elif config.get("workflow", "complete") == "complete": if config.get("perform_genome_binning", True): - rule merge_sample_tables: input: prokka = "{sample}/annotation/prokka/{sample}_plus.tsv", @@ -206,7 +202,6 @@ elif config.get("workflow", "complete") == "complete": {input.refseq} \ {output}" - else: rule merge_sample_tables: input: @@ -223,7 +218,6 @@ elif config.get("workflow", "complete") == "complete": {output}" - elif config.get("workflow") == "binning": # define paired end or not @@ -232,7 +226,7 @@ elif config.get("workflow") == "binning": MULTIFILE_FRACTIONS = ['R1', 'R2', 'se'] if PAIRED_END else ['se'] RAW_INPUT_FRACTIONS = ['R1', 'R2'] if PAIRED_END else ['se'] - + localrules: all rule all: input: expand("{sample}/binning/{binner}/cluster_attribution.tsv",
docs(ruby): update the list of GCP environments in AUTHENTICATION.md pr
@@ -41,7 +41,7 @@ code. 1. Specify project ID in method arguments 2. Specify project ID in configuration 3. Discover project ID in environment variables -4. Discover GCE project ID +4. Discover GCP project ID 5. Discover project ID in credentials JSON **Credentials** are discovered in the following order: @@ -51,36 +51,14 @@ code. 3. Discover credentials path in environment variables 4. Discover credentials JSON in environment variables 5. Discover credentials file in the Cloud SDK's path -6. Discover GCE credentials +6. Discover GCP credentials ### Google Cloud Platform environments -While running on Google Cloud Platform environments such as Google Compute -Engine, Google App Engine and Google Kubernetes Engine, no extra work is needed. -The **Project ID** and **Credentials** and are discovered automatically. Code -should be written as if already authenticated. Just be sure when you [set up the -GCE instance][gce-how-to], you add the correct scopes for the APIs you want to -access. For example: - - * **All APIs** - * `https://www.googleapis.com/auth/cloud-platform` - * `https://www.googleapis.com/auth/cloud-platform.read-only` - * **BigQuery** - * `https://www.googleapis.com/auth/bigquery` - * `https://www.googleapis.com/auth/bigquery.insertdata` - * **Compute Engine** - * `https://www.googleapis.com/auth/compute` - * **Datastore** - * `https://www.googleapis.com/auth/datastore` - * `https://www.googleapis.com/auth/userinfo.email` - * **DNS** - * `https://www.googleapis.com/auth/ndev.clouddns.readwrite` - * **Pub/Sub** - * `https://www.googleapis.com/auth/pubsub` - * **Storage** - * `https://www.googleapis.com/auth/devstorage.full_control` - * `https://www.googleapis.com/auth/devstorage.read_only` - * `https://www.googleapis.com/auth/devstorage.read_write` +When running on Google Cloud Platform (GCP), including Google Compute Engine (GCE), +Google Kubernetes Engine (GKE), Google App Engine (GAE), Google Cloud Functions +(GCF) and Cloud Run, the **Project ID** and **Credentials** and are discovered +automatically. Code should be written as if already authenticated. ### Environment Variables
Update .readthedocs.yml Python 3.8 is still not supported by read the docs (https://github.com/readthedocs/readthedocs.org/issues/6324), so reverting the change to 3.7 as this also fails. We need to wait for rtd to add support for us.
@@ -9,8 +9,7 @@ sphinx: formats: all python: - # TODO: Move to 3.8 when supported by rtd https://github.com/readthedocs/readthedocs.org/issues/6324 - version: 3.7 + version: 3.8 install: - method: pip path: .
Update dace/codegen/targets/rtl.py Implemented suggestion to remove unnecessary comment by definelicht
@@ -488,7 +488,7 @@ model->s_axis_{name}_tdata = {name}[0];''' elif isinstance(arr, data.Stream): buses[edge.dst_conn] = (edge.data.data, False, total_size, vec_len, edge.data.volume) elif isinstance(arr, data.Scalar): - scalars[edge.dst_conn] = (False, total_size) #(edge.data.data, False, total_size, 1, edge.data.volume) + scalars[edge.dst_conn] = (False, total_size) for edge in state.out_edges(tasklet): arr = sdfg.arrays[edge.data.data]
flake8: run the workflow conditionally We don't need to run flake8 on ansible modules and their tests if we don't have any modifitions.
name: flake8 -on: [pull_request] +on: + pull_request: + paths: + - 'library/**.py' + - 'tests/conftest.py' + - 'tests/library/**.py' + - 'tests/functional/tests/**.py' jobs: build: runs-on: ubuntu-latest
ref(RemoteContentFEhandling): added remote content check to Available channels page
import ChannelTokenModal from './ChannelTokenModal'; import ChannelUpdateModal from './ChannelUpdateModal'; import { getFreeSpaceOnServer } from './api'; + import plugin_data from 'plugin_data'; export default { name: 'AvailableChannelsPage', freeSpace: null, disableBottomBar: false, disableModal: false, + remoteContentEnabled: plugin_data.isRemoteContent, }; }, computed: { return this.channelsAreAvailable && (this.inRemoteImportMode || this.isStudioApplication); }, notEnoughFreeSpace() { + // if the REMOTE_CONTENT option is true, we should not be submitting disk space issues + if (this.remoteContentEnabled) { + return false; + } if (this.freeSpace === null) { return false; }
STY: reduce number of unused lines in unit tests Simplified unit tests to reduce number of unused lines.
@@ -285,12 +285,13 @@ class TestBasics(): """Test successful clearance of custom functions """ def custom1(inst, imult, out_units='hours'): - out = (inst.data.mlt * imult).values - return {'data': out, 'long_name': 'doubleMLTlong', + return {'data': (inst.data.mlt * imult).values, + 'long_name': 'doubleMLTlong', 'units': out_units, 'name': 'doubleMLT'} self.testInst.custom.attach(custom1, 'add', args=[2], kwargs={"out_units": "hours1"}) + # Test to see that the custom function was attached assert len(self.testInst.custom._functions) == 1 assert len(self.testInst.custom._kind) == 1 @@ -337,26 +338,21 @@ class TestBasics(): def test_add_multiple_functions_one_not_at_end(self): """Test for error if custom functions are run in the wrong order """ - def custom1(inst): - out = (inst.data.mlt * 2).values - return {'data': out, 'long_name': 'doubleMLTlong', - 'units': 'hours1', 'name': 'doubleMLT'} - - def custom2(inst): - out = (inst.data.mlt * 3).values - return {'data': out, 'long_name': 'tripleMLTlong', - 'units': 'hours1', 'name': 'tripleMLT'} + def custom1(inst, imult): + out = (inst.data.mlt * imult).values + return {'data': out, 'long_name': 'MLT x {:d}'.format(int(imult)), + 'units': 'hours', 'name': 'MLTx{:d}'.format(int(imult))} - def custom3(inst): - out = (inst.data.tripleMLT * 2).values - return {'data': out, 'long_name': 'quadMLTlong', - 'units': 'hours1', 'name': 'quadMLT'} + def custom2(inst, imult): + out = (inst.data.MLTx2 * imult).values + return {'data': out, 'long_name': 'MLT x {:d}'.format(int(imult)), + 'units': 'hours', 'name': 'MLTx{:d}'.format(int(imult))} - self.testInst.custom.attach(custom1, 'add') - self.testInst.custom.attach(custom2, 'add') + self.testInst.custom.attach(custom1, 'add', args=[4]) + self.testInst.custom.attach(custom1, 'add', args=[2]) # if this runs correctly, an error will be thrown # since the data required by custom3 won't be present yet - self.testInst.custom.attach(custom3, 'add', at_pos=1) + self.testInst.custom.attach(custom2, 'add', at_pos=1, args=[2]) with pytest.raises(AttributeError): self.testInst.load(2009, 1)
Add CephOSD service to roles/Standalone.yaml Closes-Bug:
- OS::TripleO::Services::CephMon - OS::TripleO::Services::CephRbdMirror - OS::TripleO::Services::CephRgw + - OS::TripleO::Services::CephOSD - OS::TripleO::Services::CertmongerUser - OS::TripleO::Services::CinderApi - OS::TripleO::Services::CinderBackendDellEMCUnity
update hivealerter Add the possibility to use rule and match fileds in the description of TheHive alert
## Other changes - Upgrade stomp 8.0.0 to 8.0.1 - [#832](https://github.com/jertel/elastalert2/pull/832) - @jertel - Add support for Kibana 8.2 for Kibana Discover, Upgrade Pytest 7.1.1 to 7.1.2, Upgrade pylint 2.13.5 to 2.13.8, Upgrade Jinja2 3.1.1 to 3.1.2 - [#840](https://github.com/jertel/elastalert2/pull/840) - @nsano-rururu +- Add the possibility to use rule and match fileds in the description of TheHive alert # 2.5.0
fix: change reference to master branch In response to review on PR eth-brownie/brownie#917.
@@ -598,7 +598,9 @@ def from_brownie_mix( print(f"Downloading from {url}...") _stream_download(url, str(project_path.parent)) - project_path.parent.joinpath(project_name + "-mix-master").rename(project_path) + project_path.parent.joinpath(project_name + "-mix-{}".format(default_branch)).rename( + project_path + ) _create_folders(project_path) _create_gitfiles(project_path) _add_to_sys_path(project_path)
vscode command palette jupyter commands vscode command ids for jupiter have changed. This updates the jupyter commands in vscode.talon to the new commands. fixes
@@ -249,10 +249,10 @@ select word: user.vscode("editor.action.addSelectionToNextFindMatch") skip word: user.vscode("editor.action.moveSelectionToNextFindMatch") # jupyter -cell next: user.vscode("jupyter.gotoNextCellInFile") -cell last: user.vscode("jupyter.gotoPrevCellInFile") -cell run above: user.vscode("jupyter.runallcellsabove.palette") -cell run: user.vscode("jupyter.runcurrentcell") +cell next: user.vscode("notebook.focusNextEditor") +cell last: user.vscode("notebook.focusPreviousEditor") +cell run above: user.vscode("notebook.cell.executeCellsAbove") +cell run: user.vscode("notebook.cell.execute") install local: user.vscode("workbench.extensions.action.installVSIX") preview markdown: user.vscode("markdown.showPreview")
Fix implicit bug in _AudioLabelDataset Add type checking before calling .split(). The old code will break if the input is a list of strings, which happens when using multiple manifests.
@@ -214,7 +214,7 @@ target_label_0, "offset": offset_in_sec_0} {"audio_filepath": "/path/to/audio_wav_n.wav", "duration": time_in_sec_n, "label": \ target_label_n, "offset": offset_in_sec_n} Args: - manifest_filepath (str): Dataset parameter. Path to JSON containing data. + manifest_filepath (Union[str, List[str]]): Dataset parameter. Path to JSON containing data. labels (list): Dataset parameter. List of target classes that can be output by the speaker recognition model. featurizer min_duration (float): Dataset parameter. All training files which have a duration less than min_duration @@ -261,7 +261,7 @@ target_label_n, "offset": offset_in_sec_n} def __init__( self, *, - manifest_filepath: str, + manifest_filepath: Union[str, List[str]], labels: List[str], featurizer, min_duration: Optional[float] = 0.1, @@ -271,8 +271,10 @@ target_label_n, "offset": offset_in_sec_n} cal_labels_occurrence: Optional[bool] = False, ): super().__init__() + if isinstance(manifest_filepath, str): + manifest_filepath = manifest_filepath.split(',') self.collection = collections.ASRSpeechLabel( - manifests_files=manifest_filepath.split(','), + manifests_files=manifest_filepath, min_duration=min_duration, max_duration=max_duration, is_regression_task=is_regression_task, @@ -341,7 +343,7 @@ class AudioToClassificationLabelDataset(_AudioLabelDataset): {"audio_filepath": "/path/to/audio_wav_n.wav", "duration": time_in_sec_n, "label": \ target_label_n, "offset": offset_in_sec_n} Args: - manifest_filepath: Path to manifest json as described above. Can + manifest_filepath (Union[str, List[str]]): Path to manifest json as described above. Can be comma-separated paths. labels (Optional[list]): String containing all the possible labels to map to if None then automatically picks from ASRSpeechLabel collection. @@ -368,7 +370,7 @@ class AudioToSpeechLabelDataset(_AudioLabelDataset): {"audio_filepath": "/path/to/audio_wav_n.wav", "duration": time_in_sec_n, "label": \ target_label_n, "offset": offset_in_sec_n} Args: - manifest_filepath (str): Path to manifest json as described above. Can + manifest_filepath (Union[str, List[str]]): Path to manifest json as described above. Can be comma-separated paths. labels (Optional[list]): String containing all the possible labels to map to if None then automatically picks from ASRSpeechLabel collection. @@ -398,7 +400,7 @@ class AudioToSpeechLabelDataset(_AudioLabelDataset): def __init__( self, *, - manifest_filepath: str, + manifest_filepath: Union[str, List[str]], labels: List[str], featurizer, min_duration: Optional[float] = 0.1,
Fix documentation of KMeans Just a small fix of duplicates in KMeans doc. Authors: - Micka (@lowener) Approvers: - Dante Gama Dessavre (@dantegd) URL:
@@ -211,12 +211,7 @@ class KMeans(Base, Number of instances the k-means algorithm will be called with different seeds. The final results will be from the instance that produces lowest inertia out of n_init instances. - oversampling_factor : float64 - scalable k-means|| oversampling factor - max_samples_per_batch : int (default=1<<15) - maximum number of samples to use for each batch - of the pairwise distance computation. - oversampling_factor : int (default = 2) + oversampling_factor : float64 (default = 2.0) The amount of points to sample in scalable k-means++ initialization for potential centroids. Increasing this value can lead to better initial centroids at the
ebuild.ebd: pkg_pretend: use base build tempdir for $T and disable writing env to it To avoid create temp pkg dirs in it or writing anything to it during pkg_pretend() as defined in the spec. Decreases overall runtime for threaded sanity checks against large package sets a significant amount.
@@ -889,22 +889,22 @@ class ebuild_operations(object): commands = None if not pkg.built: commands = {"request_inherit": partial(inherit_handler, self._eclass_cache)} + + # Use base build tempdir for $T instead of full pkg specific path to + # avoid having to create/remove directories -- pkg_pretend isn't + # allowed to write to the filesystem anyway. env = expected_ebuild_env(pkg) - builddir = pjoin(domain.pm_tmpdir, env["CATEGORY"], env["PF"]) - pkg_tmpdir = normpath(pjoin(builddir, "temp")) - ensure_dirs(pkg_tmpdir, mode=0o770, gid=portage_gid, minimal=True) + env["T"] = domain.pm_tmpdir env["ROOT"] = domain.root - env["T"] = pkg_tmpdir - try: start = time.time() with TemporaryFile() as f: try: # suppress bash output by default fd_pipes = {1: f.fileno(), 2: f.fileno()} ret = run_generic_phase( - pkg, "pretend", env, fd_pipes=fd_pipes, userpriv=True, - sandbox=True, extra_handlers=commands) + pkg, "pretend", env, tmpdir=None, fd_pipes=fd_pipes, + userpriv=True, sandbox=True, extra_handlers=commands) logger.debug( "pkg_pretend sanity check for %s took %2.2f seconds", pkg.cpvstr, time.time() - start) @@ -913,17 +913,6 @@ class ebuild_operations(object): f.seek(0) msg = f.read().decode().strip('\n') raise errors.PkgPretendError(pkg, msg) - finally: - shutil.rmtree(builddir) - # try to wipe the cat dir; if not empty, ignore it - try: - os.rmdir(os.path.dirname(builddir)) - except EnvironmentError as e: - # POSIX specifies either ENOTEMPTY or EEXIST for non-empty dir - # in particular, Solaris uses EEXIST in that case. - # https://github.com/pkgcore/pkgcore/pull/181 - if e.errno not in (errno.ENOTEMPTY, errno.EEXIST): - raise class src_operations(ebuild_operations, format.build_operations):
Update Michigan.md Closes Closes
@@ -240,8 +240,35 @@ id: mi-kalamazoo-2 **Links** -* WOODTV8 live crew: https://streamable.com/xvlky1 -* Kalamazoo Gazette via Facebook Live: https://streamable.com/0wfiu3 -* MLive article: https://www.mlive.com/news/kalamazoo/2020/06/my-heart-was-wrenched-with-pain-assistant-chief-says-of-ordering-tear-gas-on-protesters.html -* IBB image backup of Asst. Chief and City Manager kneeling: https://ibb.co/Fgrwqkj +* [WOODTV8 live crew](https://streamable.com/xvlky1) +* [Kalamazoo Gazette via Facebook Live](https://streamable.com/0wfiu3) +* [MLive article](https://www.mlive.com/news/kalamazoo/2020/06/my-heart-was-wrenched-with-pain-assistant-chief-says-of-ordering-tear-gas-on-protesters.html) +* [IBB image backup of Asst. Chief and City Manager kneeling](https://ibb.co/Fgrwqkj) + +### Reporter covering Proud Boys arrested | August 15th + +A journalist with MLive was covering a Proud Boys rally on August 15th. Footage shows him arrested while standing in the street. He states he was charged with impeding traffic. + +tags: journalist, arrest + +id: mi-kalamazoo-3 + +**Links** + +* https://twitter.com/Phil_Lewis_/status/1294732715221286913 +* https://twitter.com/samueljrob/status/1294741751425568771 +* https://twitter.com/PolarBarrett/status/1294742239499911170 + + +### Police charge, pepper spray, and tackle protesters | August 15th + +Police charge at protesters, pepper spraying indiscriminately. One protester is also tackled to the ground. + +tags: protester, tackle, pepper-spray, spray + +id: mi-kalamazoo-4 + +**Links** + +* https://www.reddit.com/r/2020PoliceBrutality/comments/iaki6j/police_charge_antifascist_protestors_today_in/
BUG: fixed instruments iteration test Fixed bug introduced by testing for any item that that is iterable. Also simplified kwarg names and added more comments.
""" import importlib +import numpy as np class Constellation(object): @@ -14,7 +15,7 @@ class Constellation(object): Parameters ---------- - constellation_module : string + const_module : string Name of a pysat constellation module instruments : list-like A list of pysat Instruments to include in the Constellation @@ -33,14 +34,14 @@ class Constellation(object): # ----------------------------------------------------------------------- # Define the magic methods - def __init__(self, constellation_module=None, instruments=None): + def __init__(self, const_module=None, instruments=None): """ Constructs a Constellation given a list of instruments or the name of a file with a pre-defined constellation. Parameters ---------- - constellation_module : string + const_module : string Name of a pysat constellation module instruments : list-like @@ -50,16 +51,18 @@ class Constellation(object): """ - # Load Instruments from the constellation module, if it exists - if constellation_module is not None: - const = importlib.import_module(constellation_module) + # Include Instruments from the constellation module, if it exists + if const_module is not None: + const = importlib.import_module(const_module) self.instruments = const.instruments else: self.instruments = [] + # Add any Instruments provided in the list if instruments is not None: - if hasattr(instruments, '__getitem__'): - raise ValueError('instruments must be iterable') + test_instruments = np.asarray(instruments) + if test_instruments.shape == (): + raise ValueError('instruments argument must be list-like') self.instruments.extend(list(instruments))
deps: upgrade upstream requirements.txt package versions These outdated packages were preventing the upstream image from starting correctly.
@@ -82,7 +82,7 @@ Pillow==8.3.2 ply==3.11 prometheus-client==0.7.1 protobuf==3.12.2 -psutil==5.6.7 +psutil==5.9.0 psycopg2-binary==2.8.4 pyasn1==0.4.8 pyasn1-modules==0.2.8 @@ -94,7 +94,7 @@ PyMySQL==0.9.3 pyOpenSSL==19.1.0 pyparsing==2.4.6 PyPDF2==1.26.0 -pyrsistent==0.15.7 +pyrsistent==0.18.1 python-dateutil==2.8.1 python-editor==1.0.4 python-gitlab==2.0.0 @@ -120,7 +120,7 @@ s3transfer==0.3.2 semantic-version==2.8.4 six==1.14.0 soupsieve==1.9.5 -SQLAlchemy==1.3.13 +SQLAlchemy==1.4.31 stevedore==1.31.0 stringscore==0.1.0 stripe==2.42.0 @@ -136,7 +136,7 @@ webencodings==0.5.1 WebOb==1.8.6 websocket-client==0.57.0 Werkzeug==0.16.1 -wrapt==1.11.2 +wrapt==1.13.3 xhtml2pdf==0.2.4 yapf==0.29.0 zipp==2.1.0
Change "Couch" to "Couch/SQL" in doc_in_es I keep getting tripped up by the label "Couch Doc" and then remember it actually means Couch or SQL. Small change to the text should make this clearer and match reality.
</form> <br> <div class="alert alert-warning"> - Hey there! This page is primarily for comparing documents in elasticsearch and couch. + Hey there! This page is primarily for comparing documents in elasticsearch and couch/sql. Are you sure you don't want <a href="{% url "raw_couch" %}?id={{ doc_id }}">raw_couch</a>? </div> <pre>{{ es_doc }}</pre> </div> <div class="col-xs-6"> - <h3>Couch Doc:</h3> + <h3>Couch/SQL Doc:</h3> <pre>{{ couch_info.doc|default:"NOT FOUND" }}</pre> </div> </div> {% endfor %} {% if not found_indices %} <div class="col-xs-6"> - <h3>Couch Doc:</h3> + <h3>Couch/SQL Doc:</h3> <pre>{{ couch_info.doc|default:"NOT FOUND" }}</pre> </div> {% endif %}
[ml-release][no_ci] Do not output progress bar for air tf benchmark. Release test log is highly polluted by pyramid shaped progress bar in remote actors.
@@ -82,6 +82,7 @@ def train_func(use_ray: bool, config: dict): epochs=epochs, steps_per_epoch=steps_per_epoch, callbacks=callbacks, + verbose=2, # Disables progress bar in remote actors. ) results = history.history loss = results["loss"][-1]
Re-arrange for clarity to make it clear that the comment about the log domain specificity applies to the 'defaults write' command and not the 'notifyutil' command.
@@ -52,14 +52,14 @@ To enable complete protocol logging, open Terminal and run the command: defaults write -g CalLogSimpleConfiguration -array com.apple.calendar.store.log.caldav.http +The debug logging domains are specified using a reverse-dns style hierarchy, so to enable all Calendar logging (includes logging of account discovery), use the value 'com.apple.calendar'. + To activate the logging configuration change, quit and relaunch Calendar. For versions of OS X older than 10.12, instead of quitting and relaunching, run this command: :: notifyutil -p com.apple.calendar.foundation.notification.logConfigUpdated -The debug logging domains are specified using a reverse-dns style hierarchy, so to enable all Calendar logging (includes logging of account discovery), use the value 'com.apple.calendar' - To disable Calendar debug logging, run the command: ::
More WIP dependabot changelog CI Fixes a typo in and allows the workflow to trigger on `reopen` for easier debugging.
@@ -3,6 +3,7 @@ on: pull_request: types: - opened + - reopened permissions: # Needed to be able to push the commit. See @@ -11,7 +12,7 @@ permissions: contents: write # The pull_requests "synchronize" event doesn't seem to fire with just `contents: write`, so # CI doesn't run with the new changelog. Maybe `pull_requests: write` will fix this? - pull_requests: write + pull-requests: write jobs: add-changelog:
Update OracleSQL Injection.md missing 'T' in the SELECT in the Oracle blind SQLI section
@@ -68,8 +68,8 @@ SELECT owner, table_name FROM all_tab_columns WHERE column_name LIKE '%PASS%'; | Version is 12.2 | SELECT COUNT(*) FROM v$version WHERE banner LIKE 'Oracle%12.2%'; | | Subselect is enabled | SELECT 1 FROM dual WHERE 1=(SELECT 1 FROM dual) | | Table log_table exists | SELECT 1 FROM dual WHERE 1=(SELECT 1 from log_table); | -| Column message exists in table log_table | SELEC COUNT(*) FROM user_tab_cols WHERE column_name = 'MESSAGE' AND table_name = 'LOG_TABLE'; | -| First letter of first message is t | SELEC message FROM log_table WHERE rownum=1 AND message LIKE 't%'; | +| Column message exists in table log_table | SELECT COUNT(*) FROM user_tab_cols WHERE column_name = 'MESSAGE' AND table_name = 'LOG_TABLE'; | +| First letter of first message is t | SELECT message FROM log_table WHERE rownum=1 AND message LIKE 't%'; | ## Oracle SQL Time based
Fix spelling HG-- branch : feature/microservices
@@ -322,7 +322,7 @@ class MetricsCheck(DiscoveryCheck): else: m["abs_value"] = m["value"] * m["scale"] self.logger.debug( - "[%s] Measured value: %s. Scale: %s. Resuling value: %s", + "[%s] Measured value: %s. Scale: %s. Resulting value: %s", key, m["value"], m["scale"], m["abs_value"] ) # Schedule batch
Update hosts.origin.example Changing sample config from: #openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics To: #openshift_metrics_hawkular_hostname=hawkular-metrics.example.com Reason: When i set my inventory with [openshift_metrics_hawkular_hostname=https://metrics.MYDOMAIN.com/hawkular/metrics/] the results is break of metrics url, like this: [https://https//metrics.cirrus.alterdata.com.br/hawkular/metrics/hawkular/metrics]
@@ -539,7 +539,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics # Currently, you may only alter the hostname portion of the url, alterting the # `/hawkular/metrics` path will break installation of metrics. -#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics +#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com # Configure the prefix and version for the component images #openshift_metrics_image_prefix=docker.io/openshift/origin- #openshift_metrics_image_version=v3.7.0
[IMPR] Simplify report() method The method never runs into the while loop but break always the loop. Return from the method instead exit the loop.
@@ -591,7 +591,7 @@ class checkImagesBot(object): """Function to make the reports easier.""" self.image_to_report = image_to_report self.newtext = newtext - self.head = head or u'' + self.head = head or '' self.notification = notification self.notification2 = notification2 @@ -603,34 +603,24 @@ class checkImagesBot(object): notification2) self.commTalk = commTalk self.commImage = commImage or self.comment - - while True: + image_tagged = False try: - resPutMex = self.tag_image(unver) + image_tagged = self.tag_image(unver) except pywikibot.NoPage: - pywikibot.output(u"The page has been deleted! Skip!") - break + pywikibot.output('The page has been deleted! Skip!') except pywikibot.EditConflict: - pywikibot.output(u"Edit conflict! Skip!") - break - else: - if not resPutMex: - break - if self.notification: + pywikibot.output('Edit conflict! Skip!') + if image_tagged and self.notification: try: self.put_mex_in_talk() except pywikibot.EditConflict: - pywikibot.output(u"Edit Conflict! Retrying...") + pywikibot.output('Edit Conflict! Retrying...') try: self.put_mex_in_talk() - except: + except Exception: + pywikibot.exception() pywikibot.output( - u"Another error... skipping the user..") - break - else: - break - else: - break + 'Another error... skipping the user..') def uploadBotChangeFunction(self, reportPageText, upBotArray): """Detect the user that has uploaded the file through the upload bot.""" @@ -653,7 +643,7 @@ class checkImagesBot(object): reportPageText = reportPageObject.get() except pywikibot.NoPage: pywikibot.output(u'%s has been deleted...' % self.imageName) - return + return False # You can use this function also to find only the user that # has upload the image (FixME: Rewrite a bit this part) if put: @@ -665,7 +655,7 @@ class checkImagesBot(object): summary=self.commImage) except pywikibot.LockedPage: pywikibot.output(u'File is locked. Skipping.') - return + return False # paginetta it's the image page object. try: if reportPageObject == self.image and self.uploader: @@ -679,7 +669,7 @@ class checkImagesBot(object): repme = self.list_entry + "problems '''with the APIs'''" self.report_image(self.image_to_report, self.rep_page, self.com, repme) - return + return False upBots = i18n.translate(self.site, uploadBots) user = pywikibot.User(self.site, nick) luser = user.title(asUrl=True)
Update tests.py add onto testing to cover new lines of code
@@ -4547,10 +4547,10 @@ def test_plantcv_hyperspectral_analyze_spectral(): mask = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1) array_data = pcv.hyperspectral.read_data(filename=spectral_filename) pcv.params.debug = "plot" - _ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True) + _ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True, label=None) pcv.params.debug = "print" - _ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True) - assert len(pcv.outputs.observations['spectral_frequencies']['value']) == 978 + _ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True, label="prefix") + assert len(pcv.outputs.observations['prefix_spectral_frequencies']['value']) == 978 def test_plantcv_hyperspectral_analyze_index(): @@ -4604,7 +4604,7 @@ def test_plantcv_hyperspectral_analyze_index_outside_range_warning(): mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255 f = io.StringIO() with redirect_stdout(f): - pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, min_bin=.5, max_bin=.55) + pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, min_bin=.5, max_bin=.55, label="i") out = f.getvalue() # assert os.listdir(cache_dir) is 0 assert out[0:10] == 'WARNING!!!'
Typo in README.md just a small typo fix
@@ -39,4 +39,4 @@ Here are some ideas I work or want to work on when I have time. If you want to c - Add statistics and visualisations as in [atlas_analyze](https://github.com/metagenome-atlas/atlas_analyze) - Implementation of most rules as snakemake wrapper - Cloud execution -- Update to new nakemake version and use cool reports. +- Update to new Snakemake version and use cool reports.
zulip_tools.py: Add `GENERIC_CACHE_SCRIPT_PARSER`. This parser will act as a parent parser for all the cache cleaning scripts.
#!/usr/bin/env python3 from __future__ import print_function +import argparse import datetime import errno import logging @@ -34,6 +35,19 @@ BLUE = '\x1b[34m' MAGENTA = '\x1b[35m' CYAN = '\x1b[36m' +# Parent parser for cache cleaning scripts. +GENERIC_CACHE_SCRIPT_PARSER = argparse.ArgumentParser(add_help=False) +GENERIC_CACHE_SCRIPT_PARSER.add_argument( + "--threshold", dest="threshold_days", type=int, default=14, + nargs="?", metavar="<days>", help="Any cache which is not in " + "use by a deployment not older than threshold days(current " + "installation in dev) and older than threshold days will be " + "deleted. (defaults to 14)") +GENERIC_CACHE_SCRIPT_PARSER.add_argument( + "--dry-run", dest="dry_run", action="store_true", + help="If specified then script will only print the caches " + "that it will delete/keep back. It will not delete any cache.") + def get_deployment_version(extract_path): # type: (str) -> str version = '0.0.0'
Fix tracing docs and add more comprehensive examples Summary: Fixes Pull Request resolved:
@@ -1373,11 +1373,12 @@ if _enabled: **Tracing:** - Using ``torch.jit.trace``, you can turn an existing module or Python - function into a TorchScript program. You must provide example inputs, - and we run the function, recording the operations performed on all the tensors. We turn the resulting recording - into a TorchScript method that is installed as the ``forward`` method of a - ``ScriptModule``. This module also contains any parameters that the original + Using ``torch.jit.trace`` and ``torch.jit.trace_module``, you can turn an existing module or Python + function into a TorchScript ``torch._C.Function`` or ``ScriptModule``. You must provide example inputs, + and we run the function, recording the operations performed on all the tensors. + * The resulting recording of a standalone function produces ``torch._C.Function``. + * The resulting recording of ``forward`` function of ``nn.Module`` or ``nn.Module`` produces ``ScriptModule``. + This module also contains any parameters that the original module had as well. Example (tracing a function):: @@ -1388,19 +1389,44 @@ if _enabled: traced_foo = torch.jit.trace(foo, (torch.rand(3), torch.rand(3))) .. note:: - Tracing a function will construct a ``ScriptModule`` with a single - ``forward`` method that implements the function. The resulting - ``ScriptModule`` has no parameters or attributes. + Tracing a standalone function will construct a ``torch._C.Function`` + Tracing ``nn.Module``s ``forward`` will construct a ``ScriptModule`` Example (tracing an existing module):: import torch - import torchvision - traced_net = torch.jit.trace(torchvision.models.resnet18(), - torch.rand(1, 3, 224, 224)) + class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv = nn.Conv2d(1, 1, 3) + + def forward(self, x): + return self.conv(x) + + def weighted_kernel_sum(self, weight): + return weight * self.conv.weight + + + n = Net() + example_weight = torch.rand(1, 1, 3, 3) + example_forward_input = torch.rand(1, 1, 3, 3) + + # all three trace calls below are equivalent + # and construct `ScriptModule` with a single `forward` method + module = torch.jit.trace(n.forward, example_forward_input) # produces ScriptModule with `forward` + module = torch.jit.trace(n, example_forward_input) # produces ScriptModule with `forward` + module = torch.jit.trace_module(n, inputs) # produces ScriptModule with `forward` + + inputs = {'forward' : example_forward_input, 'weighted_kernel_sum' : example_weight} + # trace_module produces `ScriptModule` with two methods: + # `forward` and `weighted_kernel_sum` + module = torch.jit.trace_module(n, inputs, True, True) .. note:: + * The first three trace/trace_module calls are equivalent and return ``ScriptModule`` + with a single ``forward`` method. + * The last ``trace_module`` call produces a ``ScriptModule`` with two methods. Tracing only records operations done when the given function is run on the given tensors. Therefore, the returned ``ScriptModule`` will always run the same traced graph on any input. This has some important implications when your module is
Fix missing saltenv and pillarenv in pillar.item Fixes
@@ -373,10 +373,16 @@ def item(*args, **kwargs): ret = {} default = kwargs.get('default', '') delimiter = kwargs.get('delimiter', DEFAULT_TARGET_DELIM) + pillarenv = kwargs.get('pillarenv', None) + saltenv = kwargs.get('saltenv', None) + + pillar_dict = __pillar__ \ + if all(x is None for x in (saltenv, pillarenv)) \ + else items(saltenv=saltenv, pillarenv=pillarenv) try: for arg in args: - ret[arg] = salt.utils.traverse_dict_and_list(__pillar__, + ret[arg] = salt.utils.traverse_dict_and_list(pillar_dict, arg, default, delimiter)
Working fix for one of the bullet points. Thanks Pirate.
@@ -15,7 +15,7 @@ for Discord. - Modern Pythonic API using ``async``\/``await`` syntax - Sane rate limit handling that prevents 429s -- Implements the entirety of the Discord API +- Implements the entire Discord API - Command extension to aid with bot creation - Easy to use with an object oriented design - Optimised for both speed and memory
Fix chapter range selection Fix chapter range selection
@@ -392,7 +392,7 @@ class MessageHandler: def resolve_chapter(name): cid = 0 if name.isdigit(): - cid = int(str) + cid = int(name) else: cid = self.app.crawler.get_chapter_index_of(name) # end if
Support dict of links in pagination detection Some responses, e.g. by keystone (http://git.openstack.org/cgit/openstack/keystone/tree/api-ref/source/v3/samples/admin/groups-list-response.json?h=stable/rocky#n2) contain a dict in the `links` key. We convert such a dict to a list of dicts, because that's the format we expect.
@@ -1369,6 +1369,9 @@ class Resource(dict): pagination_key = '{key}_links'.format(key=cls.resources_key) if pagination_key: links = data.get(pagination_key, {}) + # keystone might return a dict + if isinstance(links, dict): + links = ({k: v} for k, v in six.iteritems(links)) for item in links: if item.get('rel') == 'next' and 'href' in item: next_link = item['href']
Update edf.py Making ch_offsets variable an int64 (instead of int32) which prevents int overflow in Windows.
@@ -247,7 +247,7 @@ class RawEDF(BaseRaw): this_sel = orig_sel[idx] # We could read this one EDF block at a time, which would be this: - ch_offsets = np.cumsum(np.concatenate([[0], n_samps])) + ch_offsets = np.cumsum(np.concatenate([[0], n_samps]), dtype=np.int64) block_start_idx, r_lims, d_lims = _blk_read_lims(start, stop, buf_len) # But to speed it up, we really need to read multiple blocks at once, # Otherwise we can end up with e.g. 18,181 chunks for a 20 MB file!
[Datasets] [Docs] Improve `.limit()` and `.take()` docstrings Improve docstrings for .limit() and .take(), making the distinction more clear.
@@ -1744,7 +1744,11 @@ class Dataset(Generic[T]): return Dataset(plan, self._epoch, self._lazy) def limit(self, limit: int) -> "Dataset[T]": - """Limit the dataset to the first number of records specified. + """Truncate the dataset to the first ``limit`` records. + + Contrary to :meth`.take`, this will not move any data to the caller's + machine. Instead, it will return a new ``Dataset`` pointing to the truncated + distributed data. Examples: >>> import ray @@ -1764,7 +1768,11 @@ class Dataset(Generic[T]): return left def take(self, limit: int = 20) -> List[T]: - """Take up to the given number of records from the dataset. + """Return up to ``limit`` records from the dataset. + + This will move up to ``limit`` records to the caller's machine; if + ``limit`` is very large, this can result in an OutOfMemory crash on + the caller. Time complexity: O(limit specified) @@ -1782,7 +1790,11 @@ class Dataset(Generic[T]): return output def take_all(self, limit: int = 100000) -> List[T]: - """Take all the records in the dataset. + """Return all of the records in the dataset. + + This will move the entire dataset to the caller's machine; if the + dataset is very large, this can result in an OutOfMemory crash on + the caller. Time complexity: O(dataset size)
Update script.py Handle non utf-8 characters during decoding for %%bash
@@ -210,7 +210,7 @@ def in_thread(coro): async def _handle_stream(stream, stream_arg, file_object): while True: - line = (await stream.readline()).decode("utf8") + line = (await stream.readline()).decode("utf8", errors="replace") if not line: break if stream_arg:
Remove debug check Remove debug check, would fail if the image name happens to be "debug"
@@ -145,7 +145,7 @@ parser_run.add_argument("command", help="command to run within container", nargs def run(args): register_docker_subcommand("run") acifile = get_aci_fname(args.image) - if not acifile and args.image != "debug": + if not acifile: pull(parser_pull.parse_args([args.image])) acifile = get_aci_fname(args.image)