message
stringlengths
13
484
diff
stringlengths
38
4.63k
Delay 'operation interrupted - waiting' message To avoid showing a waiting message for fast-terminating operations, wait 2 seconds before showing the message.
@@ -36,6 +36,7 @@ OP_RUNFILE_PATHS = [ ] PROC_TERM_TIMEOUT_SECONDS = 30 +LOG_WAITING_DELAY_SECONDS = 2 ################################################################### # Exception classes @@ -343,10 +344,13 @@ def _proc_wait_minutes(proc, minutes): def _handle_proc_keyboard_interrupt(proc): - if os.getenv("NO_OP_INTERRUPTED_MSG") != "1": - log.info("Operation interrupted - waiting for process to exit") + log_waiting_after = time.time() + LOG_WAITING_DELAY_SECONDS kill_after = time.time() + PROC_TERM_TIMEOUT_SECONDS while time.time() < kill_after: + if log_waiting_after and time.time() > log_waiting_after: + if os.getenv("NO_OP_INTERRUPTED_MSG") != "1": + log.info("Operation interrupted - waiting for process to exit") + log_waiting_after = None if proc.poll() is not None: break time.sleep(1)
Update validators.py update PV validation to happen only if PV key is provided.
@@ -218,13 +218,7 @@ class InputValidator(object): if len(pvmodel.__getattribute__("prod_factor_series")) > 0: self.clean_time_series("PV", "prod_factor_series") - if "PV" in self.models.keys(): # single PV - cross_clean_pv(self.models["PV"]) - - if len(self.pvnames) > 0: # multiple PV - for pvname in self.pvnames: - cross_clean_pv(self.models[pvname]) - + def update_pv_defaults_offgrid(self): if self.models["PV"].__getattribute__("can_net_meter") == None: if self.models["Settings"].off_grid_flag==False: self.models["PV"].can_net_meter = True @@ -249,6 +243,15 @@ class InputValidator(object): else: self.models["PV"].operating_reserve_required_pct = 0.25 + if "PV" in self.models.keys(): # single PV + cross_clean_pv(self.models["PV"]) + update_pv_defaults_offgrid(self) + + if len(self.pvnames) > 0: # multiple PV + for pvname in self.pvnames: + cross_clean_pv(self.models[pvname]) + update_pv_defaults_offgrid(self) + """ Time series values are up or down sampled to align with Settings.time_steps_per_hour """ @@ -359,14 +362,6 @@ class InputValidator(object): # Clean fuel_cost_per_mmbtu to align with time series self.clean_time_series("ExistingBoiler", "fuel_cost_per_mmbtu") - """ - SpaceHeatingLoads - """ - # if self.models["SpaceHeatingLoad"].latitude is None: - # self.models["SpaceHeatingLoad"].latitude = self.models["Site"].latitude - # if self.models["SpaceHeatingLoad"].longitude is None: - # self.models["SpaceHeatingLoad"].longitude = self.models["Site"].longitude - """ ElectricLoad If user does not provide values, set defaults conditional on off-grid flag
Fix initialization of (original) name for root dispatched properties This fixes the spurious "p_" prefix one can see in GDB for the root property in dispatching property trees. TN:
@@ -2155,7 +2155,7 @@ class CompileCtx(object): prop.reset_inheritance_info() if not prop.abstract or prop.abstract_runtime_check: root_static = PropertyDef( - expr=None, prefix=None, name=prop.name, + expr=None, prefix=None, name=None, type=prop.type, doc=prop.doc, public=False, @@ -2166,6 +2166,11 @@ class CompileCtx(object): ) static_props[0] = root_static + # Manually assign names to the new static property to avoid + # extra decorations. + root_static._original_name = prop._original_name + root_static._name = prop._name + # Transfer arguments from the dispatcher to the new static # property, then regenerate arguments in the dispatcher. root_static.arguments = prop.arguments
Update README.md Add more explanation of the demos readme.
# Metaverse + +## Introduction +This demo is an implementation to let a celebrity in an image "speak". With the composition of PaddleSpeech and PaddleGAN, we integrate the installation and the specific modules in a single shell script. + +## How to use our code + You can make your favorite person say the specified content with the `TTS` mudule of `PaddleSpeech` and `PaddleGAN`, and construct your own virtual human. -Run the following command line to get started: +First, locate the directory you would like to run our code, and set the environment variants. +```shell +./path.sh ``` +Download the celebrity image in the current directory, in our case we use `Lamarr.png`. + +Second, run `run.sh` to complete all the essential procedures, including the installation. + +```shell ./run.sh ``` +If you would like to try your own image, please replace the image name in the shell script. + +The result has shown on our notebook[]().
Add a 1 second sleep time before after chain import Removing the parity cache slightly increases the time it takes for the parity import blocks command to release the db.
@@ -149,20 +149,26 @@ def passwordfile(): def parity_process( parity_import_blocks_process, parity_binary, - ipc_path, datadir, + ipc_path, + datadir, passwordfile, author): - - run_parity_command = ( + command_arguments = ( parity_binary, '--chain', os.path.join(datadir, 'chain_config.json'), '--ipc-path', ipc_path, - '--base-path', str(datadir), - '--unlock', str(author), - '--password', str(passwordfile), + '--base-path', datadir, + '--unlock', author, + '--password', passwordfile, ) + time.sleep(1) + yield from get_process(command_arguments) + + +def get_process(command_list): + proc = subprocess.Popen( - run_parity_command, + command_list, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, @@ -185,35 +191,15 @@ def parity_process( @pytest.fixture(scope="session") def parity_import_blocks_process(parity_binary, ipc_path, datadir, passwordfile): - run_parity_command = ( + command_arguments = ( parity_binary, 'import', os.path.join(datadir, 'blocks_export.rlp'), '--chain', os.path.join(datadir, 'chain_config.json'), '--ipc-path', ipc_path, - '--base-path', str(datadir), - '--unlock', str(author), - '--password', str(passwordfile), - ) - proc = subprocess.Popen( - run_parity_command, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - bufsize=1, - ) - try: - yield proc - finally: - kill_proc_gracefully(proc) - output, errors = proc.communicate() - print( - "Parity Process Exited:\n" - "stdout:{0}\n\n" - "stderr:{1}\n\n".format( - force_text(output), - force_text(errors), - ) + '--base-path', datadir, + '--password', passwordfile, ) + yield from get_process(command_arguments) @pytest.fixture(scope="session")
[BUG] fix erroneous direct passthrough in ColumnEnsembleForecaster `ThetaForecaster` stopped to work with `ColumnEnsembleForecaster` after its probabilistic upgrade, the forecasting horizon was not picked up correctly in the underlying forecasters inside the `ColumnEnsembleForecaster`. The argument of `_by_columns` should have been the outer methods, not the inner methods. This PR fixes that.
@@ -196,7 +196,7 @@ class ColumnEnsembleForecaster(_HeterogenousEnsembleForecaster): y_pred : pd.Series Point predictions """ - return self._by_column("_predict", fh=fh, X=X) + return self._by_column("predict", fh=fh, X=X) def _predict_quantiles(self, fh=None, X=None, alpha=None): """Compute/return prediction quantiles for a forecast. @@ -230,7 +230,7 @@ class ColumnEnsembleForecaster(_HeterogenousEnsembleForecaster): Row index is fh. Entries are quantile forecasts, for var in col index, at quantile probability in second-level col index, for each row index. """ - return self._by_column("_predict_quantiles", fh=fh, X=X, alpha=alpha) + return self._by_column("predict_quantiles", fh=fh, X=X, alpha=alpha) def _predict_interval(self, fh=None, X=None, coverage=None): """Compute/return prediction quantiles for a forecast. @@ -268,7 +268,7 @@ class ColumnEnsembleForecaster(_HeterogenousEnsembleForecaster): Upper/lower interval end forecasts are equivalent to quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage. """ - return self._by_column("_predict_interval", fh=fh, X=X, coverage=coverage) + return self._by_column("predict_interval", fh=fh, X=X, coverage=coverage) def _predict_var(self, fh, X=None, cov=False): """Forecast variance at future horizon. @@ -300,7 +300,7 @@ class ColumnEnsembleForecaster(_HeterogenousEnsembleForecaster): Entries are (co-)variance forecasts, for var in col index, and covariance between time index in row and col. """ - return self._by_column("_predict_var", fh=fh, X=X, cov=cov) + return self._by_column("predict_var", fh=fh, X=X, cov=cov) def get_params(self, deep=True): """Get parameters of estimator in `_forecasters`.
core: consistent _as_mpf_val() helpers for constants Workaround fredrik-johansson/mpmath#441
@@ -2351,7 +2351,7 @@ def __int__(self): return 2 def _as_mpf_val(self, prec): - return mlib.mpf_e(prec) + return mpmath.e(prec)._mpf_ def approximation_interval(self, number_cls): if issubclass(number_cls, Integer): @@ -2479,7 +2479,7 @@ def __int__(self): return 3 def _as_mpf_val(self, prec): - return mlib.mpf_pi(prec) + return mpmath.pi(prec)._mpf_ def approximation_interval(self, number_cls): if issubclass(number_cls, Integer): @@ -2525,9 +2525,7 @@ def __int__(self): return 1 def _as_mpf_val(self, prec): - # XXX track down why this has to be increased - rv = mlib.from_man_exp(mlib.phi_fixed(prec + 10), -prec - 10) - return mpf_norm(rv, prec) + return mpmath.phi(prec)._mpf_ def _eval_expand_func(self, **hints): from ..functions import sqrt @@ -2573,10 +2571,7 @@ def __int__(self): return 0 def _as_mpf_val(self, prec): - # XXX track down why this has to be increased - v = mlib.libhyper.euler_fixed(prec + 10) - rv = mlib.from_man_exp(v, -prec - 10) - return mpf_norm(rv, prec) + return mpmath.euler(prec)._mpf_ def approximation_interval(self, number_cls): if issubclass(number_cls, Integer): @@ -2616,10 +2611,7 @@ def __int__(self): return 0 def _as_mpf_val(self, prec): - # XXX track down why this has to be increased - v = mlib.catalan_fixed(prec + 10) - rv = mlib.from_man_exp(v, -prec - 10) - return mpf_norm(rv, prec) + return mpmath.catalan(prec)._mpf_ def approximation_interval(self, number_cls): if issubclass(number_cls, Integer):
Update agenttesla.txt ```phishing and an agenttesla```
@@ -591,3 +591,89 @@ moneytrade.trade # Reference: https://twitter.com/pancak3lullz/status/1207398461068529664 radianthospitals.org + +# Reference: https://twitter.com/JayTHL/status/1214332738167287810 +# Reference: https://pastebin.com/raw/c2JsbUeh + +adoptfashions.tk +agatamodels.ml +ahphaeg.ml +ahphaeg.tk +aldohawater.tk +allinkenya.ml +allinkenya.tk +alojobs.ml +andreyhosting.com +archiself.tk +artateknik.tk +avjrggs.ml +bargainsnyc.ml +baristageek.ml +bedrocktire.tk +blazonjewelry.ml +blazonjewelry.tk +bodyfitny.ml +boisegmc.ml +boisegmc.tk +bokkhao.ml +bokkhao.tk +bounuspornos.ml +brazosvalleypts.ml +bunnyby.ml +buyshares.ga +buyshares.ml +carriven.tk +casualfiber.tk +chefport.tk +chenfqi.tk +citjunta.ml +clanliqr.ml +coffeeod.tk +conanandjasmine.ml +cpajwood.ml +cpajwood.tk +cpanel.sunlitcars.tk +demonm.tk +destaquefitness.tk +dlskoda.ml +dombasticknas.tk +drysupplies.tk +dwgdhfy.tk +ecuacentauro.ml +ecuacentauro.tk +eleganteclub.ml +eleganteclub.tk +endzoneswagger.ml +endzoneswagger.tk +ezmoneymyteam.ml +fanbcanton.ml +finddrives.ml +finddrives.tk +fllwme.ml +fourwheller.tk +gbbpestcontrol.tk +greatpurity.ml +greatpurity.tk +hemorroidehq.ml +hemorroidehq.tk +henriquepneus.tk +hostarctic.ml +ilovesweetie.ml +ilovesweetie.tk +imagoindia.ml +instantqual.ml +interoutesme.tk +itechcity.ga +itechcity.ml +jademodern.tk +kedaisuki.ml +kedaisuki.tk +kinofkenefret.ml +laluney.ml +layingday.tk +lebanonoil.ml +lebanonoil.tk +litse.ml +lscucusc.tk +lvmotorsports.ml +lvmotorsports.tk
Deleted a line which was added to support the SubMenu feature The SubMenu feature is now a plugin in a separate repo and is no longer part of the main cmd2 package.
@@ -2464,7 +2464,6 @@ Usage: Usage: unalias [-a] name [name ...] def do_eof(self, _: str) -> bool: """Called when <Ctrl>-D is pressed.""" # End of script should not exit app, but <Ctrl>-D should. - print('') # Required for clearing line when exiting submenu return self._STOP_AND_EXIT def do_quit(self, _: str) -> bool:
Wrapper : Fix home directory expansion. This was broken by because ~ is ignored when inside double quotes.
@@ -81,7 +81,7 @@ popd &> /dev/null # $1 is the value to include in the path # $2 is the name of the path to edit # -# e.g. includeInPath ~/bin PATH +# e.g. prependToPath "$HOME/bin" PATH function prependToPath { if [[ ":${!2}:" != *":$1:"* ]] ; then @@ -114,19 +114,19 @@ prependToPath "$GAFFER_ROOT/glsl" IECOREGL_SHADER_PATHS prependToPath "$GAFFER_ROOT/glsl" IECOREGL_SHADER_INCLUDE_PATHS prependToPath "$GAFFER_ROOT/fonts" IECORE_FONT_PATHS -prependToPath "~/gaffer/ops:$GAFFER_ROOT/ops" IECORE_OP_PATHS +prependToPath "$HOME/gaffer/ops:$GAFFER_ROOT/ops" IECORE_OP_PATHS -prependToPath "~/gaffer/opPresets:$GAFFER_ROOT/opPresets" IECORE_OP_PRESET_PATHS -prependToPath "~/gaffer/procedurals:$GAFFER_ROOT/procedurals" IECORE_PROCEDURAL_PATHS -prependToPath "~/gaffer/proceduralPresets:$GAFFER_ROOT/proceduralPresets" IECORE_PROCEDURAL_PRESET_PATHS +prependToPath "$HOME/gaffer/opPresets:$GAFFER_ROOT/opPresets" IECORE_OP_PRESET_PATHS +prependToPath "$HOME/gaffer/procedurals:$GAFFER_ROOT/procedurals" IECORE_PROCEDURAL_PATHS +prependToPath "$HOME/gaffer/proceduralPresets:$GAFFER_ROOT/proceduralPresets" IECORE_PROCEDURAL_PRESET_PATHS if [[ -z $CORTEX_POINTDISTRIBUTION_TILESET ]] ; then export CORTEX_POINTDISTRIBUTION_TILESET="$GAFFER_ROOT/resources/cortex/tileset_2048.dat" fi -prependToPath "~/gaffer/apps:$GAFFER_ROOT/apps" GAFFER_APP_PATHS +prependToPath "$HOME/gaffer/apps:$GAFFER_ROOT/apps" GAFFER_APP_PATHS -prependToPath "~/gaffer/startup" GAFFER_STARTUP_PATHS +prependToPath "$HOME/gaffer/startup" GAFFER_STARTUP_PATHS appendToPath "$GAFFER_ROOT/startup" GAFFER_STARTUP_PATHS prependToPath "$GAFFER_ROOT/graphics" GAFFERUI_IMAGE_PATHS
Naive fix for deprecation warning. `DeprecationWarning: Importing from numpy.testing.decorators is deprecated, import from numpy.testing instead.`
try: - from numpy.testing.decorators import * + from numpy.testing import * from numpy.testing.noseclasses import KnownFailure except ImportError: from ._decorators import *
Use the python version for checking array flags. Thanks Thomas for pointing the bug.
@@ -14,9 +14,7 @@ from numpy cimport ndarray, import_array,\ NPY_UINT16, NPY_UINT32, NPY_UINT64, npy_intp,\ PyArray_SimpleNew, PyArray_ContiguousFromAny,\ PyArray_FROM_OTF, PyArray_DIM,\ - NPY_CONTIGUOUS, NPY_NOTSWAPPED, NPY_FORCECAST,\ - NPY_C_CONTIGUOUS, NPY_WRITEABLE - + NPY_CONTIGUOUS, NPY_NOTSWAPPED, NPY_FORCECAST # Initialization import_array() @@ -69,11 +67,11 @@ cdef int check_numpy(ndarray arr, hid_t space_id, int write): # Validate array flags if write: - if not (arr.flags & NPY_C_CONTIGUOUS and arr.flags & NPY_WRITEABLE): + if not (arr.flags["C_CONTIGUOUS"] and arr.flags["WRITEABLE"]): PyErr_SetString(TypeError, b"Array must be C-contiguous and writable") return -1 else: - if not (arr.flags & NPY_C_CONTIGUOUS): + if not arr.flags["C_CONTIGUOUS"]: PyErr_SetString(TypeError, b"Array must be C-contiguous") return -1
Added utf-8 encoding to fix html encoding error Trial this on Windows 10 to see if it fixes the error: 'charmap' codec can't decode byte 0x9d
@@ -68,7 +68,7 @@ class DialogInformation(QtWidgets.QDialog): scriptdir = os.path.dirname(os.path.abspath(__file__)) htmlFile = os.path.join(scriptdir, filename) try: - with open(htmlFile, 'r') as f: + with open(htmlFile, 'r', encoding='utf8') as f: self.text = f.read() self.ui.textEdit.setHtml(self.text) except Exception as e:
Catch exceptions for invalid archives and content types. I often run `insights-run <package>` instead of `insights-run -p <package>`. This results in an exception and traceback as insights tries to interpret the package as a directory or archive to analyze. This commit fixes by catching the exceptions and logging friendlier messages.
@@ -28,7 +28,7 @@ from .core import FileListing, LegacyItemAccess, SysconfigOptions # noqa: F401 from .core import YAMLParser, JSONParser, XMLParser, CommandParser # noqa: F401 from .core import AttributeDict # noqa: F401 from .core import Syslog # noqa: F401 -from .core.archives import COMPRESSION_TYPES, extract # noqa: F401 +from .core.archives import COMPRESSION_TYPES, extract, InvalidArchive, InvalidContentType # noqa: F401 from .core import dr # noqa: F401 from .core.context import ClusterArchiveContext, HostContext, HostArchiveContext, SerializedArchiveContext # noqa: F401 from .core.dr import SkipComponent # noqa: F401 @@ -260,6 +260,7 @@ def run(component=None, root=None, print_summary=False, broker = dr.Broker() + try: if formatter: formatter.preprocess(broker) broker = _run(broker, graph, root, context=context, inventory=inventory) @@ -271,6 +272,13 @@ def run(component=None, root=None, print_summary=False, broker = _run(broker, graph, root, context=context, inventory=inventory) return broker + except (InvalidContentType, InvalidArchive): + if args and args.archive: + path = args.archive + msg = "Invalid directory or archive. Did you mean to pass -p {p}?" + log.error(msg.format(p=path)) + else: + raise def main():
test targets fast-forward attack recovery This test simulates the targets fast-forward attack recovery. It simulates that the targets keys were compromised, the attacker generated a new high version of the targets. The repository generates new key for snapshot to rollback the targets version to the initial version.
@@ -499,6 +499,41 @@ class TestRefresh(unittest.TestCase): self._assert_version_equals("timestamp", 3) self._assert_version_equals("snapshot", 3) + def test_new_targets_fast_forward_recovery(self) -> None: + """Test targets fast-forward recovery using key rotation. + + The targets recovery is made by issuing new Snapshot keys, by following + steps: + - Remove the snapshot key + - Create and add a new key for snapshot + - Bump and publish root + - Rollback the target version + """ + # attacker updates to a higher version + self.sim.targets.version = 99999 + self.sim.update_snapshot() + + # client refreshes the metadata and see the new targets version + self._run_refresh() + self._assert_version_equals(Targets.type, 99999) + + # repo add new snapshot keys and recovers the targets version + self.sim.root.roles[Snapshot.type].keyids.clear() + self.sim.signers[Snapshot.type].clear() + snapshot_key, snapshot_signer = self.sim.create_key() + self.sim.root.add_key(Snapshot.type, snapshot_key) + self.sim.add_signer(Snapshot.type, snapshot_signer) + + self.sim.root.version += 1 + self.sim.publish_root() + + self.sim.targets.version = 1 + self.sim.update_snapshot() + + # client refreshes the metadata version and see initial targets version + self._run_refresh() + self._assert_version_equals(Targets.type, 1) + if __name__ == "__main__": if "--dump" in sys.argv:
Turn on include_deployment_info for all. With next release, will not be experimental feature.
@@ -1434,9 +1434,9 @@ class KubernetesMonitor( ScalyrMonitor ): self.__kubelet_api = None self.__gather_k8s_pod_info = self._config.get('gather_k8s_pod_info') - # This is currently an experimental feature. Including deployment information for every event uploaded about - # a pod (cluster name, deployment name, deployment labels) - self.__include_deployment_info = self._config.get('include_deployment_info', convert_to=bool, default=False) + # Including deployment information for every event uploaded about a pod (cluster name, deployment name, + # deployment labels) + self.__include_deployment_info = self._config.get('include_deployment_info', convert_to=bool, default=True) self.__container_checker = None if self._config.get('log_mode') != 'syslog':
Add private hook for changing the API version This is mainly a temporary, undocumented, stop gap for bots that are screwed over from waiting for Discord to grant them the message content intent.
@@ -262,6 +262,22 @@ def handle_message_parameters( return MultipartParameters(payload=payload, multipart=multipart, files=files) +INTERNAL_API_VERSION: int = 10 + + +def _set_api_version(value: int): + global INTERNAL_API_VERSION + + if not isinstance(value, int): + raise TypeError(f'expected int not {value.__class__!r}') + + if value not in (9, 10): + raise ValueError(f'expected either 9 or 10 not {value}') + + INTERNAL_API_VERSION = value + Route.BASE = f'https://discord.com/api/v{value}' + + class Route: BASE: ClassVar[str] = 'https://discord.com/api/v10' @@ -1988,10 +2004,10 @@ class HTTPClient: except HTTPException as exc: raise GatewayNotFound() from exc if zlib: - value = '{0}?encoding={1}&v=10&compress=zlib-stream' + value = '{0}?encoding={1}&v={2}&compress=zlib-stream' else: - value = '{0}?encoding={1}&v=10' - return value.format(data['url'], encoding) + value = '{0}?encoding={1}&v={2}' + return value.format(data['url'], encoding, INTERNAL_API_VERSION) async def get_bot_gateway(self, *, encoding: str = 'json', zlib: bool = True) -> Tuple[int, str]: try: @@ -2000,10 +2016,10 @@ class HTTPClient: raise GatewayNotFound() from exc if zlib: - value = '{0}?encoding={1}&v=10&compress=zlib-stream' + value = '{0}?encoding={1}&v={2}&compress=zlib-stream' else: - value = '{0}?encoding={1}&v=10' - return data['shards'], value.format(data['url'], encoding) + value = '{0}?encoding={1}&v={2}' + return data['shards'], value.format(data['url'], encoding, INTERNAL_API_VERSION) def get_user(self, user_id: Snowflake) -> Response[user.User]: return self.request(Route('GET', '/users/{user_id}', user_id=user_id))
make --test-mode work with uncompressed data Closes Relates
@@ -544,7 +544,7 @@ def post_process_for_test_mode(t): document_set.document_archive = "%s-1k%s%s" % (path_2, ext_2, ext) document_set.document_file = "%s-1k%s" % (path_2, ext_2) elif document_set.has_uncompressed_corpus(): - path, ext = io.splitext(corpus.document_file) + path, ext = io.splitext(document_set.document_file) document_set.document_file = "%s-1k%s" % (path, ext) else: raise exceptions.RallyAssertionError("Document corpus [%s] has neither compressed nor uncompressed corpus." %
Add support for vmin, vmax in plot grid When plotting images on a grid, it's sometimes useful to set all windows to the same min and max values for purposes of comparison. The simplest way to do that is to set the window limits using vmin and vmax.
@@ -158,6 +158,8 @@ def plot_grid( figsize=1.0, rpad=0, cpad=0, + vmin=None, + vmax=None, # title arguments title=None, tfontsize=20, @@ -417,7 +419,7 @@ def plot_grid( sliceidx = slices[rowidx][colidx] if not one_slice else slices tmpslice = slice_image(tmpimg, tmpaxis, sliceidx) tmpslice = reorient_slice(tmpslice, tmpaxis) - ax.imshow(tmpslice, cmap="Greys_r", aspect="auto") + ax.imshow(tmpslice, cmap="Greys_r", aspect="auto", vmin=vmin, vmax=vmax) ax.axis("off") if filename is not None:
Ensures the current working directory won't be deleted As part of the cleanup procedure
@@ -16,6 +16,8 @@ rm -rf /root/.cache/pip/* apt-get autoremove -y # Delete downloaded archive files apt-get clean +# Ensures the current working directory won't be deleted +cd /usr/local/src/ # Delete source files used for building binaries rm -rf /usr/local/src/* # Delete conda downloaded tarballs
fix default flag val for get_searches use saved by default, quick disabled
@@ -106,7 +106,8 @@ def saved_search(search_id, sort, pretty, limit): @cli.command('searches') @click.option('--quick', is_flag=True, help='Quick searches') [email protected]('--saved', is_flag=True, help='Saved searches (default)') [email protected]('--saved', default=True, is_flag=True, + help='Saved searches (default)') def get_searches(quick, saved): '''List searches''' cl = clientv1()
Token remover: escape dashes in regex They need to be escaped when they're in a character set. By default, they are interpreted as part of the character range syntax.
@@ -34,7 +34,7 @@ TOKEN_EPOCH = 1_293_840_000 # The HMAC isn't parsed further, but it's in the regex to ensure it at least exists in the string. # Each part only matches base64 URL-safe characters. # Padding has never been observed, but the padding character '=' is matched just in case. -TOKEN_RE = re.compile(r"[\w-=]+\.[\w-=]+\.[\w-=]+", re.ASCII) +TOKEN_RE = re.compile(r"[\w\-=]+\.[\w\-=]+\.[\w\-=]+", re.ASCII) class TokenRemover(Cog):
Fix duplicates on principalfield Fixes
add: function add(people) { var self = this; - function comparePersons(first, second) { - if (first.id !== undefined && second.id !== undefined) { - return first.id == second.id; - } else { - return first.email && second.email && first.email.toLowerCase() == second.email.toLowerCase(); + function getUserId(obj) { + if (obj._type === 'Avatar') { + return obj.id; + } else if (obj._type === 'EventPerson') { + return obj.user_id; } } - function cleanDuplicates(people) { + function comparePersons(a, b) { + var aUserId = getUserId(a); + var bUserId = getUserId(b); + + + if (a._type === b._type + && a.id !== undefined && b.id !== undefined + && a.id === b.id) { + return true; + } + + if (aUserId !== undefined && bUserId !== undefined + && aUserId === bUserId) { + return true; + } + + return !!a.email && !!b.email && a.email.toLowerCase() === b.email.toLowerCase(); + } + + function deduplicate(people) { + var newPeople = []; + people.forEach(function(person) { + var found = _.find(newPeople, function(newPerson) { + return comparePersons(person, newPerson); + }); + if (!found) { + newPeople.push(person); + } + }); + return newPeople; + } + + function skipExisting(people) { _.each(self.people, function(person) { people = _.without(people, _.find(people, function(p) { return comparePersons(person, p); return people; } - self.people = self.options.overwriteChoice ? people : self.people.concat(cleanDuplicates(people)); + people = deduplicate(people); + self.people = self.options.overwriteChoice ? people : self.people.concat(skipExisting(people)); self.people = _.sortBy(self.people, 'name'); self.options.onAdd(self.people); self._update();
work around pypy ctypes incompatibility The following works with cpython, but not with pypy: import ctypes class Test: def __init__(self, value): self._as_parameter_ = value ctypes.cast(Test(0), ctypes.POINTER(ctypes.c_int64))
@@ -854,12 +854,12 @@ class c_array(TestCase): def test_list(self): a = [1,2,3] a_ct = nutils.types.c_array[numpy.int64](a) - self.assertEqual(ctypes.cast(a_ct, ctypes.POINTER(ctypes.c_int64)).contents.value, 1) + self.assertEqual(a_ct.data_as(ctypes.POINTER(ctypes.c_int64)).contents.value, 1) def test_array(self): a = numpy.array([1,2,3], dtype=numpy.int64) a_ct = nutils.types.c_array[numpy.int64](a) - self.assertEqual(ctypes.cast(a_ct, ctypes.POINTER(ctypes.c_int64)).contents.value, 1) + self.assertEqual(a_ct.data_as(ctypes.POINTER(ctypes.c_int64)).contents.value, 1) def test_array_invalid_dtype(self): a = numpy.array([1,2,3], dtype=numpy.int32)
Actually browse into the WeakProxy This doesn't change the behavior observably, but it could have kept a widget alive when it wasn't meant to be, and I feel like it's bad to make the inspector change the behavior of the app under inspection.
@@ -533,7 +533,6 @@ class Inspector(FloatLayout): node = None if type(widget) is WeakProxy: wk_widget = widget.__ref__ - widget = wk_widget() else: wk_widget = weakref.ref(widget) for key in keys:
travis v3 Attempt 3 to get Travis CI to work...
@@ -4,10 +4,6 @@ python: - 2.7 - 3.6 -env: - global: - - CMD='./runtests.py' - before_install: - wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh - chmod +x miniconda.sh @@ -22,6 +18,6 @@ install: - pip install -U pip - python setup.py install -script: $CMD +script: python runtests.py after_success: coveralls
[kubernetes_state] correcting conf.example The `tags:` entry should be in the `instances:` section, and not in the `init_config:` section
init_config: - # Custom tags can be added to all metrics reported by this integration - # tags: - # - optional_tag1 - # - optional_tag2 instances: # To enable Kube State metrics you must specify the url exposing the API @@ -23,3 +19,8 @@ instances: # label_to_match: deployment # labels_to_get: # - label_addonmanager_kubernetes_io_mode + + # You can also specify here custom tags to be added to all metrics reported by this integration + # tags: + # - optional_tag1 + # - optional_tag2
Update options.md Add note that --atomic is not compatible with Cython.
@@ -468,7 +468,7 @@ Order imports by type, which is determined by case, in addition to alphabeticall ## Atomic -Ensures the output doesn't save if the resulting file contains syntax errors. +Ensures the output doesn't save if the resulting file contains syntax errors. This option is not compatible with Cython code. **Type:** Bool **Default:** `False`
ci: allow empty ref in main link This commit enables an empty href replacement with the main jobs URL.
@@ -60,6 +60,6 @@ find ./$PIPELINE_ID -type f -name '*.html' -exec sed -i -e 's#>ara #>KubeInit #g find ./$PIPELINE_ID -type f -name '*.html' -exec sed -i -E -e "/href=\".+\">Playbooks/ s/href=\".+\"/href=\"https:\/\/storage.googleapis.com\/kubeinit-ci\/jobs\/${PIPELINE_ID}\/index.html\"/g" {} \; find ./$PIPELINE_ID -type f -name '*.html' -exec sed -i -E -e "/href=\".+\">Hosts/ s/href=\".+\"/href=\"https:\/\/storage.googleapis.com\/kubeinit-ci\/jobs\/${PIPELINE_ID}\/hosts\/index.html\"/g" {} \; -find ./$PIPELINE_ID -type f -name '*.html' -exec sed -i -E -e "/class=\"navbar-brand\" href=\".+\">/ s/href=\".+\"/href=\"https:\/\/storage.googleapis.com\/kubeinit-ci\/index.html\"/g" {} \; +find ./$PIPELINE_ID -type f -name '*.html' -exec sed -i -E -e "/class=\"navbar-brand\" href=\".*\">/ s/href=\".+\"/href=\"https:\/\/storage.googleapis.com\/kubeinit-ci\/index.html\"/g" {} \; echo "(launch_e2e_ara.sh) ==> Finishing the bash executor"
[Chore] Fix docker tests Problem: to allow for lower version releases, we have changed the docker static build script to use 'make' instead of 'make build'. This now leads to the static profile being overridden and docker tests failing. Solution: change it back.
@@ -21,5 +21,5 @@ source "$HOME/.cargo/env" opam init --bare --disable-sandboxing make build-deps -eval "$(opam env)" && PROFILE="static" make && make build-sandbox +eval "$(opam env)" && PROFILE="static" make build && make build-sandbox chmod +w tezos-*
Update file_manager.py default setting to 0
@@ -22,7 +22,7 @@ mod.list('file_manager_directory_exclusions', desc='list of titles that are excl mod.setting('file_manager_auto_show_pickers', 'int') ctx = Context() -ctx.settings["self.file_manager_auto_show_pickers"] = 1 +ctx.settings["self.file_manager_auto_show_pickers"] = 0 user_path = os.path.expanduser('~')
fix: reverse tab navigation Reverse tab navigation seems to be broken from years, instead of going back to previous field it jumps to primary button. Root cause: when all fields are exhausted it's suppose to go to primary button, however that's not the case for shift-tab.
@@ -408,7 +408,8 @@ frappe.ui.form.Layout = class Layout { // next row grid_row.grid.grid_rows[grid_row.doc.idx].toggle_view(true); } - } else { + } else if (!shift) { + // End of tab navigation $(this.primary_button).focus(); } }
[client] Fix symlink bug That happens only on task output with a symlink.
@@ -1278,8 +1278,9 @@ def _enqueue_dir(dirpath, blacklist, tempdir, hash_algo, hash_algo_name): files = {} for item, relpath, meta in _directory_to_metadata( dirpath, hash_algo, blacklist): + # item is None for a symlink. files[relpath] = meta - if item.digest: + if item: yield item data = {
Fixing small typo in documentation. Fixing typo in documentation.
@@ -54,7 +54,7 @@ class BasePredictionWriter(Callback): def __init__(self, output_dir: str, write_interval: str): super().__init__(write_interval) - self.output_dir + self.output_dir = output_dir def write_on_batch_end( self, trainer, pl_module: 'LightningModule', prediction: Any, batch_indices: List[int], batch: Any,
More structures very cool nice.
@@ -186,7 +186,7 @@ class FunctionReference(Structure): self.name = branches[0] def transpile(self) -> str: - return "" + return "stack.append(FN_{})" class ListLiteral(Structure): @@ -195,7 +195,17 @@ class ListLiteral(Structure): self.items = branches def transpile(self) -> str: - return "" + # We have to manually build this because we don't know how + # many list items there will be. + + temp = "temporary_list = []" + temp += ( + "\ndef list_item(s, ctx):\n stack = s[::]\n " + "{}\n return pop(stack, ctx=ctx)\n" + "temp_list.append(list_item(stack))\n" + ) * len(self.items) + temp += "stack.append(temp_list[::])" + return temp class MonadicModifier(Structure):
Update README.md Added "sudo rosdep init" to the "Installing submodules and dependencies" section.
@@ -41,6 +41,7 @@ cd ~/catkin_ws ``` cd ~/catkin_ws/src/soccer_ws git submodule update --recursive --init +sudo rosdep init # If first time using ROS in your environment. rosdep update cd ~/catkin_ws/ rosdep install --from-paths src --ignore-src -r -y --rosdistro melodic # To install all dependencies (use correct ROS distro version), add --os ubuntu:xenial if your linux is based on it but has different distro name and version. Ubuntu 16.04 uses kinetic instead of melodic. For Jetson TX2 use kinetic.
[tests] Fix error message in need_right show username instead of <bound method ...> (https://travis-ci.org/github/wikimedia/pywikibot/jobs/663162283#L3814)
@@ -1336,7 +1336,7 @@ def need_right(right=None): elif right is not None and not self.has_right(right): raise UserRightsError('User "{}" does not have required ' 'user right "{}"' - .format(self.user, right)) + .format(self.user(), right)) return fn(self, *args, **kwargs) if not __debug__:
New: Strokes are converted to path after svg node generation Makes colorization and scaling of snippets easier in Inkscape. Resolves
@@ -387,6 +387,7 @@ class TexText(inkex.EffectExtension): converter = TexToPdfConverter(self.requirements_checker) converter.tex_to_pdf(tex_executable, text, preamble_file) converter.pdf_to_svg() + converter.stroke_to_path() tt_node = TexTextElement(converter.tmp("svg"), self.svg.unittouu("1mm")) # -- Store textext attributes @@ -580,6 +581,21 @@ class TexToPdfConverter: ] ) + def stroke_to_path(self): + """ + Convert stroke elements to path elements for easier colorization and scaling in Inkscape + + E.g. $\\overline x$ -> the line above x is converted from stroke to path + """ + exec_command([ + self.checker.inkscape_executable, + "-g", + "--batch-process", + "--actions=EditSelectAll;StrokeToPath;export-filename:{0};export-do;FileClose".format(self.tmp('svg')), + self.tmp('svg') + ] + ) + def pdf_to_png(self, white_bg): """Convert the PDF file to a SVG file""" cmd = [
added Working function to check for dupes in database This new function checks the database instead of checking the folder exists.
@@ -1395,3 +1395,36 @@ def database_updater(args, job, wait_time=90): else: logging.debug("successfully written to the database") return True + + +def job_dupe_check(job): + """ + function for checking the database to look for jobs that have completed + successfully with the same crc + + :param job: The job obj so we can use the crc/title etc + :return: True if we have found dupes with the same crc + False if we didnt find any with the same crc + """ + from arm.models.models import Job, Config, Track, User, Alembic_version # noqa: F401 + jobs = Job.query.filter_by(crc_id=job.crc_id, status="success") + logging.debug("search - posts=" + str(jobs)) + r = {} + i = 0 + for j in jobs: + logging.debug("job obj= " + str(j.get_d())) + x = j.get_d().items() + r[i] = {} + for key, value in iter(x): + r[i][str(key)] = str(value) + # logging.debug(str(key) + "= " + str(value)) + i += 1 + + logging.debug(r) + logging.debug("r len=" + str(len(r))) + if jobs is not None and len(r) > 0: + logging.debug("jobs is none or len(r) - we have jobs") + return True + else: + logging.debug("jobs is none or len(r) is 0 - we have no jobs") + return False
Publish packages only on lest/prometheus-rpm repo Only run `publish_packages` job on `lest/prometheus-rpm` repo.
@@ -51,7 +51,7 @@ jobs: publish_packages: needs: build_packages - if: github.ref == 'refs/heads/master' + if: github.ref == 'refs/heads/master' && github.repository == 'lest/prometheus-rpm' strategy: matrix: version:
Update flan-t5 original model link Update flan-t5.mdx
@@ -46,4 +46,4 @@ Google has released the following variants: One can refer to [T5's documentation page](t5) for all tips, code examples and notebooks. As well as the FLAN-T5 model card for more details regarding training and evaluation of the model. -The original checkpoints can be found [here](https://github.com/google-research/t5x/blob/main/docs/models.md#mixture-of-experts-moe-checkpoints). \ No newline at end of file +The original checkpoints can be found [here](https://github.com/google-research/t5x/blob/main/docs/models.md#flan-t5-checkpoints).
Version/PEP440: move the git tag to the local version. Closes
@@ -56,13 +56,13 @@ Following Segment of this file was taken from the pandas project(https://github. MAJOR = 0 MINOR = 3 MICRO = 0 +DEV = 0 # For multiple dev pre-releases, please increment this value ISRELEASED = False VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) QUALIFIER = '' FULLVERSION = VERSION if not ISRELEASED: - FULLVERSION += '.dev' try: import subprocess try: @@ -78,9 +78,11 @@ if not ISRELEASED: if sys.version_info[0] >= 3: rev = rev.decode('ascii') - FULLVERSION += "-%s" % rev + # Use a local version tag to include the git revision + FULLVERSION += ".dev{}+git.{}".format(DEV, rev) except: - warnings.warn("WARNING: Couldn't get git revision") + FULLVERSION += ".dev{}".format(DEV) + warnings.warn('WARNING: Could not get the git revision, version will be "{}"'.format(FULLVERSION)) else: FULLVERSION += QUALIFIER
Remove task record retries_left because it is no longer used A previous commit replaced its use with fail_count
@@ -401,7 +401,6 @@ class DataFlowKernel(object): # executor or memoization hash function. logger.debug("Got an exception launching task", exc_info=True) - self.tasks[task_id]['retries_left'] = 0 exec_fu = Future() exec_fu.set_exception(e) else: @@ -415,7 +414,6 @@ class DataFlowKernel(object): task_log_info = self._create_task_log_info(task_record) self.monitoring.send(MessageType.TASK_INFO, task_log_info) - self.tasks[task_id]['retries_left'] = 0 exec_fu = Future() exec_fu.set_exception(DependencyError(exceptions, task_id)) @@ -484,8 +482,6 @@ class DataFlowKernel(object): task_log_info = self._create_task_log_info(self.tasks[task_id]) self.monitoring.send(MessageType.TASK_INFO, task_log_info) - self.tasks[task_id]['retries_left'] = self._config.retries - \ - self.tasks[task_id]['fail_count'] logger.info("Task {} launched on executor {}".format(task_id, executor.label)) return exec_fu
Rename "commune" into "municipality" in default postcode boost fix addok/addok#314
@@ -107,7 +107,7 @@ FIELDS = [ {'key': 'name', 'boost': 4, 'null': False}, {'key': 'street'}, {'key': 'postcode', - 'boost': lambda doc: 1.2 if doc.get('type') == 'commune' else 1}, + 'boost': lambda doc: 1.2 if doc.get('type') == 'municipality' else 1}, {'key': 'city'}, {'key': 'housenumbers'}, {'key': 'context'},
Remove Indeed Indeed has discontinued their API. See
@@ -566,7 +566,6 @@ API | Description | Auth | HTTPS | CORS | | [Careerjet](https://www.careerjet.com/partners/api/) | Job search engine | `apiKey` | No | Unknown | | [Github Jobs](https://jobs.github.com/api) | Jobs for software developers | No | Yes | Yes | | [GraphQL Jobs](https://graphql.jobs/docs/api/) | Jobs with GraphQL | No | Yes | Yes | -| [Indeed](https://www.indeed.com/publisher) | Job board aggregator | `apiKey` | Yes | Unknown | | [Jobs2Careers](http://api.jobs2careers.com/api/spec.pdf) | Job aggregator | `apiKey` | Yes | Unknown | | [Jooble](https://us.jooble.org/api/about) | Job search engine | `apiKey` | Yes | Unknown | | [Juju](http://www.juju.com/publisher/spec/) | Job search engine | `apiKey` | No | Unknown |
test_neutron_resources.py exception handler The debug helper part used in neutron_resources exception handler, was hidden the original TimeoutException by a KeyError exception, it was not a helpful behavior. This change logs the situation and re raise the original exception.
@@ -84,12 +84,15 @@ class NeutronResourcesTestJSON(base.BaseOrchestrationTest): # to heat. body = cls.client.show_resource(cls.stack_identifier, 'Server') - server_id = body['physical_resource_id'] + server_id = body.get('physical_resource_id') + if server_id: LOG.debug('Console output for %s', server_id) output = cls.servers_client.get_console_output( server_id)['output'] LOG.debug(output) - raise + else: + LOG.debug('Server resource is %s', body) + raise # original exception cls.test_resources = {} for resource in resources:
Fix newmm long test test case Deal with ValueError in thai_strftime Add more soundex test cases
@@ -318,8 +318,6 @@ def thaiword_to_time(text: str, padding: bool = True) -> str: if padding and len(text) == 1: text = "0" + text - if text == "0": - text = "00" text += ":" if minute != 0:
ctcdecode_numpy: fix an unused variable warning I'm commenting it out, because it's "used" by other commented-out code below. It should probably all be cleaned up at some point, but I don't have time to deal with it right now.
@@ -177,7 +177,7 @@ std::vector<std::pair<float, Output>> ctc_beam_search_decoder( std::vector<int> output; std::vector<int> timesteps; prefixes[i]->get_path_vec(output, timesteps); - auto prefix_length = output.size(); + //auto prefix_length = output.size(); auto words = ext_scorer->split_labels(output); // remove word insert //approx_ctc = approx_ctc - prefix_length * ext_scorer->beta;
Translate superscripts to parenthesized numbers. This ensures that mixes of superscripts and normal characters fail.
@@ -586,3 +586,14 @@ def test_powers(power, expected): def test_unicode(string, unit): assert u_format.Generic.parse(string) == unit assert u.Unit(string) == unit + + [email protected]('string', [ + 'g\N{MICRO SIGN}', + 'g\N{MINUS SIGN}', + 'm\N{SUPERSCRIPT MINUS}1', + 'm+\N{SUPERSCRIPT ONE}', + 'm\N{MINUS SIGN}\N{SUPERSCRIPT ONE}']) +def test_unicode_failures(string): + with pytest.raises(ValueError): + u.Unit(string)
Update jenkins.sh Added google to make extras.
@@ -12,7 +12,7 @@ virtualenv --system-site-packages --never-download venv make prepare # Install Toil and its runtime requirements -make develop extras=[aws,mesos,azure,encryption,cwl] +make develop extras=[aws,mesos,azure,google,encryption,cwl] # Required for running Mesos master and slave daemons as part of the tests export LIBPROCESS_IP=127.0.0.1
fix: Close <li> tag Fix formatting
@@ -417,17 +417,27 @@ frappe.ui.Page = Class.extend({ if (shortcut) { let shortcut_obj = this.prepare_shortcut_obj(shortcut, click, label); - $li = $(`<li><a class="grey-link dropdown-item" href="#" onClick="return false;"> + $li = $(` + <li> + <a class="grey-link dropdown-item" href="#" onClick="return false;"> ${$icon} <span class="menu-item-label">${label}</span> <kbd class="pull-right"> <span>${shortcut_obj.shortcut_label}</span> </kbd> - </a><li>`); + </a> + </li> + `); frappe.ui.keys.add_shortcut(shortcut_obj); } else { - $li = $(`<li><a class="grey-link dropdown-item" href="#" onClick="return false;"> - ${$icon}<span class="menu-item-label">${label}</span></a><li>`); + $li = $(` + <li> + <a class="grey-link dropdown-item" href="#" onClick="return false;"> + ${$icon} + <span class="menu-item-label">${label}</span> + </a> + </li> + `); } var $link = $li.find("a").on("click", click);
Move DataLoader instantiation further back in ketos new torchvision version run len() on dataset immediately when it hasn't been encoded yet.
@@ -194,8 +194,6 @@ def train(ctx, pad, output, spec, append, load, savefreq, report, quit, epochs, except KrakenInputException as e: logger.warning(str(e)) - train_loader = DataLoader(gt_set, batch_size=1, shuffle=True, pin_memory=True) - test_set = GroundTruthDataset(normalization=normalization, reorder=reorder, im_transforms=transforms, preload=preload) with log.progressbar(te_im, label='Building test set') as bar: for im in bar: @@ -283,6 +281,8 @@ def train(ctx, pad, output, spec, append, load, savefreq, report, quit, epochs, # initialize codec message('\u2713', fg='green') + train_loader = DataLoader(gt_set, batch_size=1, shuffle=True, pin_memory=True) + # don't encode test set as the alphabets may not match causing encoding failures test_set.training_set = list(zip(test_set._images, test_set._gt))
update test sparse cluster torch-sparse==0.6.13 torch-cluster==1.6.0
@@ -39,8 +39,8 @@ jobs: pip install torch==1.11.0+cpu -f https://download.pytorch.org/whl/torch_stable.html pip install torchvision==0.12.0+cpu -f https://download.pytorch.org/whl/torch_stable.html pip install torch-scatter==2.0.9 -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html - pip install torch-sparse==0.6.12 -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html - pip install torch-cluster==1.5.9 -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html + pip install torch-sparse==0.6.13 -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html + pip install torch-cluster==1.6.0 -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html pip install torch-spline-conv==1.2.1 -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html pip install torch-geometric pip install -e .[dev]
Azure : Add `-stopOnFailure` to test flags This is a temporary measure we hope will shed some light on current problems.
@@ -80,7 +80,7 @@ jobs: AZURE: 1 - script: | - ./install/$(Gaffer.Build.Name)/bin/gaffer test + ./install/$(Gaffer.Build.Name)/bin/gaffer test -stopOnFailure displayName: 'Test' env: DISPLAY: :99.0
Make HTML conversion more explicit and less error-prone Made code to display reports when non-owner admins edit the page much clearer
@@ -50,8 +50,8 @@ hqDefine('hqwebapp/js/multiselect_utils', function () { ) { if (!isOwner) { $('#' + multiselectId).hide().after( - $('#' + multiselectId).children().each(function (el) { - $(el).text(); + $('#' + multiselectId).children().map(function () { + return $("<div>").text($(this).text()).get(0); }) ); return;
Fixed bug where selection wasn't updating if the user scrolled. Fixed warning that was occuring when scale_x of selection was 0.
@@ -578,10 +578,14 @@ class TextField(Entity): if key in self.shortcuts['scroll_up'] and self.scroll_enabled and mouse.hovered_entity == self.bg: self.scroll_position = (self.scroll_position[0], max(self.scroll_position[1] - 1, 0)) self.cursor.y = max(self.cursor.y - 1, 0) + if self.selection: + self.draw_selection() if key in self.shortcuts['scroll_down'] and self.scroll_enabled and mouse.hovered_entity == self.bg: self.scroll_position = (self.scroll_position[0], min(self.scroll_position[1] + 1, max(0, len(lines) - self.scroll_size[1]))) self.cursor.y = min(self.cursor.y + 1, len(lines) - 1) + if self.selection: + self.draw_selection() if key in self.shortcuts['undo']: if not on_undo: @@ -787,7 +791,7 @@ class TextField(Entity): for c in self.selection_parent.children: c.y -= self.scroll_position[1] if c.y < 0 or c.y >= self.scroll_size[1]: - c.scale_x = 0 + destroy(c) else: c.x -= self.scroll_position[0] if c.x < 0:
Disable debug logging for streaming http requests consuming the response content breaks streaming responses in e.g. watch_list
@@ -63,7 +63,7 @@ class Client(object): def _call(self, method, url, body=None, timeout=DEFAULT_TIMEOUT_SECONDS, **kwargs): self.init_session() resp = self._session.request(method, config.api_server + url, json=body, timeout=timeout, **kwargs) - if config.debug: + if config.debug and not kwargs.get('stream', False): message = ['{:d} for url: {:s}'.format(resp.status_code, resp.url)] Client._add_request(message, resp.request) Client._add_response(message, resp)
Reduce request count to avoid blocking Incorrect loading of chapters from mtlnation.com
@@ -16,7 +16,7 @@ class MTLNation(Crawler): has_mtl = True def initialize(self): - pass + self.init_executor(3) def login(self, email: str, password: str) -> None: self.post_json(
Integ-tests: fix test_cluster_in_no_internet_subnet 1. Specify a deployment type of FSx that is available in cn-north-1, us-east-1, us-gov-west-1 2. Only enable Intel HPC platform when CentOS7 and x86
@@ -64,8 +64,9 @@ SharedStorage: Name: myfsx StorageType: FsxLustre FsxLustreSettings: - StorageCapacity: 3600 -{% if os == "centos7" %} + StorageCapacity: 1200 + DeploymentType: SCRATCH_2 +{% if os == "centos7" and architechture=="x86_64"%} AdditionalPackages: IntelSoftware: IntelHpcPlatform: true
parent: remove unused imports The stray functools import must have been there forever! Instant 4kb knocked off wire footprint.
@@ -53,16 +53,6 @@ import zlib # Absolute imports for <2.5. select = __import__('select') -try: - from cStringIO import StringIO -except ImportError: - from io import StringIO - -try: - from functools import lru_cache -except ImportError: - from mitogen.compat.functools import lru_cache - import mitogen.core from mitogen.core import b from mitogen.core import LOG
docstring arange docstring linspace
@@ -334,6 +334,59 @@ Raises: """) + _docs.set_doc( + chainerx.arange, + """arange(start, stop=None, step=1, dtype=None, device=None) +Returns an array with evenly spaced values within a given interval. + +Values are generated within the half-open interval [start, stop). The first +three arguments are mapped like the ``range`` built-in function, i.e. start +and step are optional. + +Args: + start: Start of the interval. + stop: End of the interval. + step: Step width between each pair of consecutive values. + dtype: Data type specifier. It is inferred from other arguments by + default. + device (~chainerx.Device): Device on which the array is allocated. + If omitted, :ref:`the default device <chainerx_device>` is chosen. + +Returns: + ~chainerx.ndarray: The 1-D array of range values. + +.. seealso:: :func:`numpy.arange` +""") + + _docs.set_doc( + chainerx.linspace, + """linspace(start, stop, num=50, endpoint=True, dtype=None, +device=None) +Returns an array with evenly-spaced values within a given interval. + +Instead of specifying the step width like :func:`chainerx.arange()`, +this function requires the total number of elements specified. + +Args: + start: Start of the interval. + stop: End of the interval. + num: Number of elements. + endpoint (bool): If ``True``, the stop value is included as the last + element. Otherwise, the stop value is omitted. + retstep (bool): If ``True``, this function returns (array, step). + Otherwise, it returns only the array. + dtype: Data type specifier. It is inferred from the start and stop + arguments by default. + device (~chainerx.Device): Device on which the array is allocated. + If omitted, :ref:`the default device <chainerx_device>` is chosen. + +Returns: + ~chainerx.ndarray: The 1-D array of ranged values. + +.. seealso:: :func:`numpy.linspace` +""") + + def _docs_indexing(): pass
unpin dask Summary: seems like it might have been a transient bad version resolves Test Plan: bk Reviewers: catherinewu, nate, sandyryza
@@ -31,7 +31,7 @@ def get_version() -> str: install_requires=[ "bokeh", "dagster", - "dask[dataframe]>=1.2.2,<=2.30.0", + "dask[dataframe]>=1.2.2", "distributed>=1.28.1,<=2.30.1", # resolve issue with aiohttp pin of chardet for aiohttp<=3.7.3, req'd by dask-kubernetes # https://github.com/dagster-io/dagster/issues/3539
Remove an obsolete PyCharm workaround TN:
@@ -1281,7 +1281,6 @@ class FieldsDictProxy(DictProxy): # StructMetaclass will see these None values. Struct = None ASTNode = None -_ = Struct, ASTNode # Workaround PyCharm useless warnings class StructMetaclass(CompiledTypeMetaclass):
explicitly provide memory format when calling to clone() at TensorShape.cpp Summary: Pull Request resolved: Test Plan: Imported from OSS
@@ -411,7 +411,7 @@ Tensor narrow_copy_sparse(const Tensor& self, int64_t dim, int64_t start, int64_ } Tensor narrow_copy_dense(const Tensor& self, int64_t dim, int64_t start, int64_t length){ - return self.narrow(dim, start, length).clone(); + return self.narrow(dim, start, length).clone(at::MemoryFormat::Contiguous); } Tensor narrow(const Tensor& self, int64_t dim, int64_t start, int64_t length) {
Deterministic dataset order fix * Deterministic dataset order fix In order for the order of the files to be deterministic, in `tf.data.Dataset.list_files(..., shuffle)`, shuffle needs to be True, otherwise different iterator inits will yield different file orders * removed unnecessary shuffle of filenames * Removed the `_FILE_SHUFFLE_BUFFER` definition
@@ -58,8 +58,6 @@ import tensorflow as tf from official.utils.misc import model_helpers -# Use the number of training files as the shuffle buffer. -_FILE_SHUFFLE_BUFFER = 100 # Buffer size for reading records from a TFRecord file. Each training file is # 7.2 MB, so 8 MB allows an entire file to be kept in memory. _READ_RECORD_BUFFER = 8 * 1000 * 1000 @@ -220,11 +218,7 @@ def _read_and_batch_from_files( Returns: tf.data.Dataset object containing examples loaded from the files. """ - dataset = tf.data.Dataset.list_files(file_pattern) - - if shuffle: - # Shuffle filenames - dataset = dataset.shuffle(buffer_size=_FILE_SHUFFLE_BUFFER) + dataset = tf.data.Dataset.list_files(file_pattern, shuffle=shuffle) # Read files and interleave results. When training, the order of the examples # will be non-deterministic.
Update installation doc with minor improvements Make some minor improvements to the install from source doc about flags to enable, package managers, and virtual environments.
@@ -67,6 +67,7 @@ The minimal building requirements are - CMake 3.5 or higher - We highly recommend to build with LLVM to enable all the features. - If you want to use CUDA, CUDA toolkit version >= 8.0 is required. If you are upgrading from an older version, make sure you purge the older version and reboot after installation. +- On macOS, you may want to install `Homebrew <https://brew.sh>` to easily install and manage dependencies. We use cmake to build the library. @@ -85,8 +86,9 @@ The configuration of TVM can be modified by `config.cmake`. - Edit ``build/config.cmake`` to customize the compilation options - On macOS, for some versions of Xcode, you need to add ``-lc++abi`` in the LDFLAGS or you'll get link errors. - - Change ``set(USE_CUDA OFF)`` to ``set(USE_CUDA ON)`` to enable CUDA backend. So do other backends and libraries - (OpenCL, RCOM, METAL, VULKAN, ...). + - Change ``set(USE_CUDA OFF)`` to ``set(USE_CUDA ON)`` to enable CUDA backend. Do the same for other backends and libraries + you want to build for (OpenCL, RCOM, METAL, VULKAN, ...). + - To help with debugging, ensure the embedded graph runtime and debugging functions are enabled with ``set(USE_GRAPH_RUNTIME ON)`` and ``set(USE_GRAPH_RUNTIME_DEBUG ON)`` - TVM optionally depends on LLVM. LLVM is required for CPU codegen that needs LLVM. @@ -94,7 +96,6 @@ The configuration of TVM can be modified by `config.cmake`. - Since LLVM takes long time to build from source, you can download pre-built version of LLVM from `LLVM Download Page <http://releases.llvm.org/download.html>`_. - - Unzip to a certain location, modify ``build/config.cmake`` to add ``set(USE_LLVM /path/to/your/llvm/bin/llvm-config)`` - You can also directly set ``set(USE_LLVM ON)`` and let cmake search for a usable version of LLVM. @@ -154,6 +155,11 @@ Python Package Installation TVM package ~~~~~~~~~~~ +Depending on your development environment, you may want to use a virtual environment and package manager, such +as ``virtualenv`` or ``conda``, to manage your python packages and dependencies. + +to install and maintain your python development environment. + The python package is located at `tvm/python` There are two ways to install the package: @@ -187,6 +193,10 @@ Method 2 Python dependencies ~~~~~~~~~~~~~~~~~~~ + +Note that the ``--user`` flag is not necessary if you're installing to a managed local environment, +like ``virtualenv``. + * Necessary dependencies: .. code:: bash
mkdir adjustments wrap mkdir calls in try/except loops when output_dir is set, make sure to output to a subdir to radarr/sonarr can pick up the files
@@ -123,8 +123,18 @@ if settings.uTorrent['convert']: log.info("Multi File Torrent") settings.output_dir = os.path.abspath(os.path.join(path, '..', ("%s-%s" % (name, suffix)))) if not os.path.exists(settings.output_dir): + try: os.mkdir(settings.output_dir) - delete_dir = settings.output_dir + except: + log.exception("Error creating output directory.") + else: + settings.output_dir = os.path.abspath(os.path.join(settings.output_dir, name)) + if not os.path.exists(settings.output_dir): + try: + os.makedirs(settings.output_dir) + except: + log.exception("Error creating output sub directory.") + converter = MkvtoMp4(settings) @@ -160,6 +170,7 @@ if settings.uTorrent['convert']: log.debug("Ignoring file %s." % inputfile) path = settings.output_dir + delete_dir = settings.output_dir else: suffix = "copy" # name = name[:260-len(suffix)] @@ -170,8 +181,11 @@ else: log.info("Multi File Torrent") newpath = os.path.abspath(os.path.join(path, '..', ("%s-%s" % (name, suffix)))) if not os.path.exists(newpath): + try: os.mkdir(newpath) log.debug("Creating temporary directory %s" % newpath) + except: + log.exception("Error creating temporary directory.") if kind == 'single': inputfile = os.path.join(path, filename) shutil.copy(inputfile, newpath)
test account autocreate listing format Related-Change:
@@ -7636,8 +7636,7 @@ class TestContainerController(unittest.TestCase): # return 404 (as account is not found) and don't cache container test_status_map((404, 404, 404), 404, None, 404) - # cache a 204 for the account because it's sort of like it - # exists + # cache a 200 for the account because it appears to be created self.app.account_autocreate = True test_status_map((404, 404, 404), 404, None, 200) @@ -8809,15 +8808,39 @@ class TestAccountController(unittest.TestCase): # ALL nodes are asked to create the account # If successful, the GET request is repeated. controller.app.account_autocreate = True - self.assert_status_map(controller.GET, - (404, 404, 404), 200) - self.assert_status_map(controller.GET, - (404, 503, 404), 200) - + expected = 200 + self.assert_status_map(controller.GET, (404, 404, 404), expected) + self.assert_status_map(controller.GET, (404, 503, 404), expected) # We always return 503 if no majority between 4xx, 3xx or 2xx found self.assert_status_map(controller.GET, (500, 500, 400), 503) + def _check_autocreate_listing_with_query_string(self, query_string): + controller = proxy_server.AccountController(self.app, 'a') + controller.app.account_autocreate = True + statuses = (404, 404, 404) + expected = 200 + # get the response to check it has json content + with save_globals(): + set_http_connect(*statuses) + req = Request.blank('/v1/a' + query_string) + self.app.update_request(req) + res = controller.GET(req) + headers = res.headers + self.assertEqual( + 'yes', headers.get('X-Backend-Fake-Account-Listing')) + self.assertEqual( + 'application/json; charset=utf-8', + headers.get('Content-Type')) + self.assertEqual([], json.loads(res.body)) + self.assertEqual(res.status_int, expected) + + def test_auto_create_account_listing_response_is_json(self): + self._check_autocreate_listing_with_query_string('') + self._check_autocreate_listing_with_query_string('?format=plain') + self._check_autocreate_listing_with_query_string('?format=json') + self._check_autocreate_listing_with_query_string('?format=xml') + def test_HEAD(self): # Same behaviour as GET with save_globals():
No need to utf-8 response message Fixes AttributeErrors raised by other tests
@@ -450,7 +450,7 @@ class API(object): try: response_data = json.loads(response.text) if "feedback_required" in str( - response_data.get("message").encode('utf-8')): + response_data.get("message")): self.logger.error( "ATTENTION!: `feedback_required`" + str(response_data.get("feedback_message"
DOC: Use x1 and x2 in the heaviside docstring. Also remove a duplicate $PARAMS. Closes
@@ -928,25 +928,24 @@ def add_newdoc(place, name, doc): The Heaviside step function is defined as:: - 0 if x < 0 - heaviside(x, h0) = h0 if x == 0 - 1 if x > 0 + 0 if x1 < 0 + heaviside(x1, x2) = x2 if x1 == 0 + 1 if x1 > 0 - where `h0` is often taken to be 0.5, but 0 and 1 are also sometimes used. + where `x2` is often taken to be 0.5, but 0 and 1 are also sometimes used. Parameters ---------- - x : array_like + x1 : array_like Input values. - $PARAMS - h0 : array_like - The value of the function at x = 0. + x2 : array_like + The value of the function when x1 is 0. $PARAMS Returns ------- out : ndarray - The output array, element-wise Heaviside step function of `x`. + The output array, element-wise Heaviside step function of `x1`. Notes -----
[core/output] Cache widget output for selective redraw To make redraw work correctly and elegantly with supplementary elements (e.g. prefix, postfix, separators), always do a full redraw of the bar (to make the theme update correctly, but cache the actual *data* to display inside the output.
@@ -70,13 +70,14 @@ class i3(object): 'full_text': self._theme.separator(), 'color': self._theme.bg(widget), 'background': self._theme.bg('previous'), + '_decorator': True, }) return [attr] - def __main(self, module, widget): + def __main(self, module, widget, text): attr = self.__common_attributes(module, widget) attr.update({ - 'full_text': self.__decorate(module, widget, widget.full_text()), + 'full_text': self.__decorate(module, widget, text), 'color': self._theme.fg(widget), 'background': self._theme.bg(widget), }) @@ -86,7 +87,7 @@ class i3(object): widgets = [] for widget in module.widgets(): widgets += self.__separator(module, widget) - widgets += self.__main(module, widget) + widgets += self.__main(module, widget, self._status[widget]) core.event.trigger('next-widget') return widgets @@ -95,13 +96,13 @@ class i3(object): if affected_modules and not module.id() in affected_modules: continue module.update() - self._status[module] = self.widgets(module) + for widget in module.widgets(): + self._status[widget] = widget.full_text() def statusline(self): widgets = [] for module in self._modules: - if module in self._status: - widgets += self._status[module] + widgets += self.widgets(module) return { 'data': widgets, 'suffix': ','
Adds __reduce__ implementations to LabelTup and LabelStr. So now labels can be pickled properly (just copied existing __pygsti_reduce__ functions used for JSON serialization, which already worked).
@@ -188,6 +188,9 @@ class LabelTup(Label,tuple): return tuple.__gt__(self,tuple(x)) def __pygsti_reduce__(self): + return self.__reduce__() + + def __reduce__(self): # Need to tell serialization logic how to create a new Label since it's derived # from the immutable tuple type (so cannot have its state set after creation) return (LabelTup, (self[0],self[1:]), None) @@ -263,6 +266,9 @@ class LabelStr(Label,str): return str.__gt__(self,str(x)) def __pygsti_reduce__(self): + return self.__reduce__() + + def __reduce__(self): # Need to tell serialization logic how to create a new Label since it's derived # from the immutable tuple type (so cannot have its state set after creation) return (LabelStr, (str(self),), None)
add support for new "save" case_references format depends on
@@ -745,8 +745,26 @@ def form_casexml(request, domain, form_unique_id): def _get_case_references(data): - def is_valid(value): + def is_valid_property_list(value): return isinstance(value, list) and all(isinstance(v, unicode) for v in value) + + def is_valid_save(save_block): + if save_block == {}: + # empty is valid + return True + + return ( + not (set(save_block) - {'case_type', 'properties'}) # only allowed properties + and is_valid_property_list(save_block.get('properties', [])) # properties must be valid + ) + + def is_valid_case_references(refs): + return ( + not (set(refs) - {"load", "save"}) + and all(is_valid_property_list(v) for v in refs["load"].values()) + and all(is_valid_save(v) for v in refs.get('save', {}).values()) + ) + if "references" in data: # old/deprecated format preload = json.loads(data['references'])["preload"] @@ -755,6 +773,7 @@ def _get_case_references(data): } else: refs = json.loads(data.get('case_references', '{}')) - if set(refs) - {"load"} or not all(is_valid(v) for v in refs["load"].values()): + + if not is_valid_case_references(refs): raise ValueError("bad case references data: {!r}".format(refs)) return refs
Fix TimelineTest in OSS Summary: Pull Request resolved: Newer version of Spark has `aprrox_percentile`, which is functionally equivalent to `fb_approx_percentile`.
@@ -549,6 +549,9 @@ class TimelineTest extends PipelineTester { import sqlCtx.implicits._ val sparkContext = sqlCtx.sparkContext + val percentileFunc = + if (sparkContext.version >= "2.3.0") "approx_percentile" else "fb_approx_percentile" + // Setup configuration val config = TimelineConfiguration("2018-01-01", "2018-01-01", @@ -559,7 +562,7 @@ class TimelineTest extends PipelineTester { null, 1, Some(0.95), - "fb_approx_percentile") + percentileFunc) // destroy previous schema Timeline.validateOrDestroyTrainingTable(sqlContext,
Added pdf_to_image a simple tool to convert multiple pdf to png
-import os, sys -from PIL import Image +import os +import sys from PyPDF2 import PdfFileMerger -from fnmatch import fnmatch from pdf2image import convert_from_path -from pdf2image.exceptions import ( - PDFInfoNotInstalledError, - PDFPageCountError, - PDFSyntaxError -) -sys.path.insert(1, 'B:/Production_Programs/Github/Automation-scripts/Pdf_to_image/sample_files') + +sys.path.insert(1, '''B:/Production_Programs/Github/ + Automation-scripts/Pdf_to_image/sample_files''') fileName = "myfile.pdf" if os.path.exists(fileName): os.remove(fileName) -##merge pdfs into one +# merge pdfs into one x = [a for a in os.listdir() if a.endswith(".pdf")] merger = PdfFileMerger() for pdf in x: @@ -23,7 +19,7 @@ for pdf in x: with open(fileName, "wb") as fout: merger.write(fout) -##convert merged pdf to png files +# convert merged pdf to png files images = convert_from_path(fileName) for i, image in enumerate(images): @@ -31,5 +27,3 @@ for i, image in enumerate(images): image.save(fname, "PNG") print("all file converted") - -
Release notes for Botorch 0.6.6 Summary: ## Motivation Self-explanatory ### Have you read the [Contributing Guidelines on pull requests](https://github.com/pytorch/botorch/blob/main/CONTRIBUTING.md#pull-requests)? Yes Pull Request resolved:
The release log for BoTorch. +## [0.6.6] - Aug 12, 2022 + +#### Compatibility +* Require GPyTorch >= 1.8.1 (#1347). + +#### New Features +* Support batched models in `RandomFourierFeatures` (#1336). +* Add a `skip_expand` option to `AppendFeatures` (#1344). + +#### Other Changes +* Allow `qProbabilityOfImprovement` to use batch-shaped `best_f` (#1324). +* Make `optimize_acqf` re-attempt failed optimization runs and handle optimization +errors in `optimize_acqf` and `gen_candidates_scipy` better (#1325). +* Reduce memory overhead in `MARS.set_baseline_Y` (#1346). + +#### Bug Fixes +* Fix bug where `outcome_transform` was ignored for `ModelListGP.fantasize` (#1338). +* Fix bug causing `get_polytope_samples` to sample incorrectly when variables +live in multiple dimensions (#1341). + +#### Documentation +* Add more descriptive docstrings for models (#1327, #1328, #1329, #1330) and for other +classes (#1313). +* Expanded on the model documentation at [botorch.org/docs/models](https://botorch.org/docs/models) (#1337). + ## [0.6.5] - Jul 15, 2022 #### Compatibility
Correcting App filtering while sorting 1. setting the filter term to last_submission for app filtering
@@ -194,9 +194,15 @@ class ApplicationStatusReport(GetParamsMixin, PaginatedReportMixin, DeploymentsR .size(self.pagination.count) .start(self.pagination.start)) if self.selected_app_id: + # selecting reporting_metadata.last_sudmission as filter_path as this has the mapping + # for app_id. This will also exclude the chances of + # error when sorts happens by username. Because username doesn't have the + # app_id mapping. + filter_path = 'reporting_metadata.last_submissions' + filter_term = 'reporting_metadata.last_submissions.app_id' user_query = user_query.nested( - self.sort_base, - filters.term(self.sort_filter, self.selected_app_id) + filter_path, + filters.term(filter_term, self.selected_app_id) ) return user_query
dcos-image-deps: remove chardet dependency dcos-image-deps already pulls in python-requests which brings along chardet.
"url": "https://pypi.python.org/packages/40/66/06130724e8205fc8c105db7edb92871c7fff7d31324d7f4405c762624a43/certifi-2017.7.27.1-py2.py3-none-any.whl", "sha1": "aeb9761eb1231d8309a0b593747d0c95303467b5" }, - "chardet": { - "kind": "url_extract", - "url": "https://pypi.python.org/packages/source/c/chardet/chardet-2.3.0.tar.gz", - "sha1": "50af8f8771ecbeb7a22567129c6c281b8bec3b1c" - }, "checksumdir": { "kind": "url_extract", "url": "https://pypi.python.org/packages/84/8b/0e800a118ed64091245feddef0159d7979bb42915f65db5d2dafd6b12864/checksumdir-1.1.4.tar.gz",
Fix regex string for **Del and **DelVals The regex string that updates the Registry.pol file wasn't properly matching instances where the reg_key was prepended with **Del or **DelVals due to the utf-16-le encoding of the Registry.pol file. This adds the null byte characters to the regex portion that searches for those values.
@@ -6777,13 +6777,16 @@ def _regexSearchKeyValueCombo(policy_data, policy_regpath, policy_regkey): for a policy_regpath and policy_regkey combo ''' if policy_data: - specialValueRegex = salt.utils.stringutils.to_bytes(r'(\*\*Del\.|\*\*DelVals\.){0,1}') + regex_str = [r'(\*', r'\*', 'D', 'e', 'l', r'\.', r'|\*', r'\*', 'D', + 'e', 'l', 'V', 'a', 'l', 's', r'\.', '){0,1}'] + specialValueRegex = '\x00'.join(regex_str) + specialValueRegex = salt.utils.stringutils.to_bytes(specialValueRegex) _thisSearch = b''.join([salt.utils.stringutils.to_bytes(r'\['), re.escape(policy_regpath), - b'\00;', + b'\x00;\x00', specialValueRegex, - re.escape(policy_regkey), - b'\00;']) + re.escape(policy_regkey.lstrip(b'\x00')), + b'\x00;']) match = re.search(_thisSearch, policy_data, re.IGNORECASE) if match: # add 2 so we get the ']' and the \00
Updating the file watching update to work for referenced object caches. Fixes
@@ -124,6 +124,7 @@ class PodCache(object): meta['cache'].reset() def update(self, dep_cache=None, obj_cache=None): + """Update the values in the dependency cache and/or the object cache.""" if dep_cache: self._dependency_graph.add_all(dep_cache) @@ -131,6 +132,10 @@ class PodCache(object): for key, meta in obj_cache.iteritems(): if not key in self._object_caches: self.create_object_cache(key, **meta) + else: + if isinstance(meta, basestring): + # Ignore if the object cache is referenced to a different file. + pass else: self._object_caches[key]['cache'].add_all(meta['values']) self._object_caches[key][
Update schemas.py minor language
@@ -340,8 +340,8 @@ class CertificateOutputSchema(LemurOutputSchema): @post_dump def handle_subject_details(self, data): - # Remove subject details if authority is CAB compliant. The code will use default set of values in that case. - # If CAB compliance of an authority is unknown (None), it is safe to fallback to default values. Thus below + # Remove subject details if authority is CA/Browser Forum compliant. The code will use default set of values in that case. + # If CA/Browser Forum compliance of an authority is unknown (None), it is safe to fallback to default values. Thus below # condition checks for 'not False' ==> 'True or None' if data.get("authority"): is_cab_compliant = data.get("authority").get("isCabCompliant")
expressions/base.py: update __future__ imports TN:
+from __future__ import (absolute_import, division, print_function, + unicode_literals) + from contextlib import contextmanager from copy import copy from functools import partial @@ -659,7 +662,7 @@ def auto_attr_custom(name, *partial_args, **partial_kwargs): decorator = (attr_expr if nb_args == 1 else attr_call) decorator(attr_name)(type( - '{}'.format(attr_name), + b'{}'.format(attr_name), (AbstractExpression, ), { 'construct': construct, '__init__': __init__,
Update list_view_select.js make list view switcher transalable
@@ -10,7 +10,7 @@ frappe.views.ListViewSelect = class ListViewSelect { add_view_to_menu(view, action) { let $el = this.page.add_custom_menu_item( this.parent, - view, + __(view), action, true, null,
Prevent non-logged in user from rerunning jobs As this can be abused by overloading the queue and potentially injecting dangerous cli command (haven't tested)
@@ -176,7 +176,7 @@ export class JobComponent extends React.Component<JobProps, { {job.nick} <span className="tinyGrayJobValue">submitted {date}</span> </div> {details} - { this.state.detailed ? <div className="jobButtons" ><Button bsSize="xsmall" onClick={this.onRerunClick.bind(this)}>Clone</Button></div> : null } + { this.state.detailed ? <div className="jobButtons" ><Button bsSize="xsmall" disabled={!appStore.isLoggedIn} onClick={this.onRerunClick.bind(this)}>Clone</Button></div> : null } </div> <div> {cancel}{' '}
Fix: ResourceWarning unclosed socket Close socket if error occurs during opening socket
@@ -184,6 +184,8 @@ def _open_socket(addrinfo_list, sockopt, timeout): err = error continue else: + if sock: + sock.close() raise error else: break
remove uifaces service since they shut down Some quick Googling showed that UIfaces has pretty much wrapped up its life online.
@@ -177,7 +177,6 @@ For information on contributing to this project, please see the [contributing gu | RoboHash | Generate random robot/alien avatars | No | Yes | [Go!](https://robohash.org/) | | StackExchange | Q&A forum for developers | `OAuth` | Yes | [Go!](https://api.stackexchange.com/) | | Stormpath | User Authentication | `apiKey` | Yes | [Go!](https://stormpath.com/) | -| UI Faces | Find and generate sample avatars for user interfaces | No | No | [Go!](http://uifaces.com/) | | UI Names | Generate random fake names | No | Yes | [Go!](https://github.com/thm/uinames) | ### Documents & Productivity
FIX: always use origin='lower' in LiveRaster This fixes a bug where the ticks were wrong, but does not plot the in the right 'sense' (ex positive numbers go down). closes
@@ -323,15 +323,19 @@ class LiveGrid(CallbackBase): raise RuntimeError("Can not re-use LiveGrid") self._Idata = np.ones(self.raster_shape) * np.nan # The user can control origin by specific 'extent'. - origin = None extent = self.extent - if extent is not None: - if extent[2] < extent[3]: - origin = 'lower' + # origin must be 'lower' for the plot to fill in correctly + # (the first voxel filled must be closest to what mpl thinks + # is the 'lower left' of the image) im = self.ax.imshow(self._Idata, norm=self._norm, cmap=self.cmap, interpolation='none', extent=extent, aspect=self.aspect, - origin=origin) + origin='lower') + + # make sure the 'sense' of the axes is 'y values increasing up' + ymin, ymax = ax.get_ylim() + if ymin > ymax: + ax.set_ylim(ymax, ymin) self.im = im self.ax.set_title('scan {uid} [{sid}]'.format(sid=doc['scan_id'],
Stop calling .disconnect() from .__del__() It was causing some strange behaviour with the synchronized Queue used by the UpdateState class. Calling .get() with any timeout would block forever. Perhaps something else got released when the script ended and then any call would block forever, thus the thread never joining.
@@ -255,9 +255,6 @@ class TelegramBareClient: self._first_request = True # On reconnect it will be first again self.session.close() - def __del__(self): - self.disconnect() - def _reconnect(self, new_dc=None): """If 'new_dc' is not set, only a call to .connect() will be made since it's assumed that the connection has been lost and the
PEP8/MyPy warning removal Fixes the removal of **kwargs that broke the tests. Tested-by: David Kelly
@@ -275,7 +275,7 @@ class Cluster(object): if not authenticator: raise ArgumentError("Authenticator is mandatory") cluster_opts.update( - bucket_class=lambda connstr, bname=None, **kwargs: Bucket(connstr, name=bname, admin=self.admin)) + bucket_class=lambda connstr, bname=None, **kwargs: Bucket(connstr, name=bname, admin=self.admin, **kwargs)) self._cluster = CoreCluster(connection_string, **cluster_opts) # type: CoreCluster self._authenticate(authenticator)
zulip_ops: Add nagios-plugins-contrib. This has a number of useful nagios plugins.
@@ -32,6 +32,7 @@ class zulip_ops::base { "strace", "host", "git", + "nagios-plugins-contrib", ] package { $org_base_packages: ensure => "installed" }
Foy python 3.5 match.__getitem__ is supported python >3.6
@@ -181,7 +181,7 @@ def guess_lang_ids_of_file(filename: str, code: bytes, language_dict, cxx_latest for lang_id in ids: m = re.search(r'c\+\+\w\w', language_dict[lang_id]['description'].lower()) if m: - version_of[lang_id] = m[0] + version_of[lang_id] = m.group(0) ids.sort(key=lambda lang_id: version_of.get(lang_id, '')) lang_ids += [ ids[-1] ] # since C++11 < C++1y < ... as strings lang_ids = list(set(lang_ids))
Add package_data in setup.py add package requirement data in setup.py
@@ -56,6 +56,7 @@ setuptools.setup( 'nni_gpu_tool': '../../tools/nni_gpu_tool', 'nni': '../../src/sdk/pynni/nni' }, + package_data = {'nni': ['**/requirements.txt']}, python_requires = '>=3.5', install_requires = [ 'schema',
Update grillo-openeew.yaml added publication on Nepal EEW
@@ -44,6 +44,9 @@ DataAtWork: AuthorName: Grillo AuthorURL: https://grillo.io/ Publications: + - Title: NepalEEW: Testing the feasibility of an Earthquake Early Warning System in Nepal + URL: https://library.seg.org/doi/epdfplus/10.1190/image2022-3751420.1 + AuthorName: Vaclav M. Kuna - Title: How Grillo Built a Low-Cost Earthquake Early Warning System on AWS URL: https://aws.amazon.com/blogs/aws/how-grillo-built-a-low-cost-earthquake-early-warning-system-on-aws/ AuthorName: Marcia Villalba
`runtests.py`: point pycln to `pyproject.toml` for config settings An oversight in that was noticed by post-merge (thanks!)
@@ -58,7 +58,7 @@ def main() -> None: # Run formatters first. Order matters. print("\nRunning pycln...") - subprocess.run([sys.executable, "-m", "pycln", path, "--all"]) + subprocess.run([sys.executable, "-m", "pycln", path, "--config=pyproject.toml"]) print("\nRunning isort...") subprocess.run([sys.executable, "-m", "isort", path]) print("\nRunning Black...")
Perf Analyzer C API documentation Documentation for and triton-inference-server/client#8
<!-- -# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -400,3 +400,31 @@ can achieve by using shared memory in your application. Use By default perf_analyzer uses HTTP to communicate with Triton. The GRPC protocol can be specificed with the -i option. If GRPC is selected the --streaming option can also be specified for GRPC streaming. + +## Benchmarking Triton directly via C API + +Besides using HTTP or gRPC server endpoints to communicate with Triton, perf_analyzer also allows user to benchmark Triton directly using C API. HTTP/gRPC endpoints introduce an additional latency in the pipeline which may not be of interest to the user who is using Triton via C API within their application. Specifically, this feature is useful to benchmark bare minimum Triton without additional overheads from HTTP/gRPC communication. + +### Prerequisite +Pull the Triton SDK and the Inference Server container images on target machine. +Since you will need access to the Tritonserver install, it might be easier if +you copy the perf_analyzer binary to the Inference Server container. + +### Required Parameters +Use the --help option to see complete list of supported command line arguments. +By default perf_analyzer expects the Triton instance to already be running. You can configure the C API mode using the `--service-kind` option. In additon, you will need to point +perf_analyzer to the Triton server library path using the `--triton-server-directory` option and the model +repository path using the `--model-repository` option. +If the server is run successfully, there is a prompt: "server is alive!" and perf_analyzer will print the stats, as normal. +An example run would look like: +``` +perf_analyzer -m graphdef_int32_int32_int32 --service-kind=triton_c_api --triton-server-directory=/opt/tritonserver --model-repository=/workspace/qa/L0_perf_analyzer_capi/models +``` + +### Non-supported functionalities +There are a few functionalities that are missing from the C API. They are: +1. Async mode (`-a`) +2. Using shared memory mode (`--shared-memory=cuda` or `--shared-memory=system`) +3. Request rate range mode +4. For additonal known non-working cases, please refer to + [qa/L0_perf_analyzer_capi/test.sh](https://github.com/triton-inference-server/server/blob/main/qa/L0_perf_analyzer_capi/test.sh#L239-L277)
Handle ServerError while getting difference Closes
@@ -278,6 +278,11 @@ class UpdateMethods: self._log[__name__].info('Getting difference for account updates') try: diff = await self(get_diff) + except (errors.ServerError, ValueError) as e: + # Telegram is having issues + self._log[__name__].info('Cannot get difference since Telegram is having issues: %s', type(e).__name__) + self._message_box.end_difference() + continue except (errors.UnauthorizedError, errors.AuthKeyError) as e: # Not logged in or broken authorization key, can't get difference self._log[__name__].info('Cannot get difference since the account is not logged in: %s', type(e).__name__) @@ -295,6 +300,7 @@ class UpdateMethods: except ( errors.PersistentTimestampOutdatedError, errors.PersistentTimestampInvalidError, + errors.ServerError, errors.UnauthorizedError, errors.AuthKeyError, ValueError
DOC: added repr to Files class Added __str__ and __repr__ function to the Files class.
@@ -180,6 +180,23 @@ class Files(object): # couldn't find stored info, load file list and then store self.refresh() + def __repr__(self): + return self.__str__() + + def __str__(self): + num_files = len(self.files) + output_str += 'Local File Statistics\n' + output_str += '---------------------\n' + output_str += 'Number of files: {:d}\n'.format(num_files) + + if num_files > 0: + output_str += 'Date Range: ' + output_str += self.files.index[0].strftime('%d %B %Y') + output_str += ' --- ' + output_str += self.files.index[-1].strftime('%d %B %Y') + + return output_str + def _filter_empty_files(self): """Update the file list (files) with empty files ignored"""
Replace Policy.Type with Policy.EntryType in policy list This was missed when the attribute name was changed.
@@ -318,8 +318,8 @@ def _do_identity_policy_list(args): width = width if width > _MIN_PRINT_WIDTH else _MIN_PRINT_WIDTH value = "Entries:\n" for entry in policy.entries: - entry_string = (" " * 4) + Policy.Type.Name(entry.type) + " " \ - + entry.key + entry_string = (" " * 4) + Policy.EntryType.Name(entry.type) \ + + " " + entry.key value += (entry_string[:width] + '...' if len(entry_string) > width else entry_string) + "\n"
MAINT: add missing dunder methods to nditer type hints The presence of __getattr__ is not sufficient to enable all protocols so explicitely add the dunder methods supported by nditer
Generic, IO, Iterable, + Iterator, List, Mapping, NoReturn, @@ -778,6 +779,21 @@ def __new__( buffersize: Any = ..., ) -> Any: ... def __getattr__(self, key: str) -> Any: ... + def __enter__(self) -> nditer: ... + def __exit__( + self, + exc_type: None | Type[BaseException], + exc_value: None | BaseException, + traceback: None | TracebackType, + ) -> None: ... + def __iter__(self) -> Iterator[Any]: ... + def __next__(self) -> Any: ... + def __len__(self) -> int: ... + def __copy__(self) -> nditer: ... + def __getitem__(self, index: SupportsIndex | slice) -> Any: ... + def __setitem__(self, index: SupportsIndex | slice, value: Any) -> None: ... + def __delitem__(self, key: SupportsIndex | slice) -> None: ... + class poly1d: def __init__(
Remove Py3compat from the save magic code. closes
from IPython.core.magic import Magics, magics_class, line_magic from IPython.core.oinspect import find_file, find_source_lines from IPython.testing.skipdoctest import skip_doctest -from IPython.utils import py3compat from IPython.utils.contexts import preserve_keys from IPython.utils.path import get_py_filename from warnings import warn @@ -214,9 +213,9 @@ def save(self, parameter_s=''): force = 'f' in opts append = 'a' in opts mode = 'a' if append else 'w' - ext = u'.ipy' if raw else u'.py' + ext = '.ipy' if raw else '.py' fname, codefrom = args[0], " ".join(args[1:]) - if not fname.endswith((u'.py',u'.ipy')): + if not fname.endswith(('.py','.ipy')): fname += ext file_exists = os.path.isfile(fname) if file_exists and not force and not append: @@ -233,14 +232,13 @@ def save(self, parameter_s=''): except (TypeError, ValueError) as e: print(e.args[0]) return - out = py3compat.cast_unicode(cmds) with io.open(fname, mode, encoding="utf-8") as f: if not file_exists or not append: - f.write(u"# coding: utf-8\n") - f.write(out) + f.write("# coding: utf-8\n") + f.write(cmds) # make sure we end on a newline - if not out.endswith(u'\n'): - f.write(u'\n') + if not cmds.endswith('\n'): + f.write('\n') print('The following commands were written to file `%s`:' % fname) print(cmds)