message
stringlengths
13
484
diff
stringlengths
38
4.63k
Enforce usage of sqlite3 for running tests Simplifies tests by creating a database in memory Does not affect the user setup at all
@@ -159,7 +159,19 @@ WSGI_APPLICATION = 'InvenTree.wsgi.application' DATABASES = {} +""" +When running unit tests, enforce usage of sqlite3 database, +so that the tests can be run in RAM without any setup requirements +""" +if 'test' in sys.argv: + eprint('Running tests - Using sqlite3 memory database') + DATABASES['default'] = { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': 'test_db.sqlite3' + } + # Database backend selection +else: if 'database' in CONFIG: DATABASES['default'] = CONFIG['database'] else:
settings_config: Add waiting_period_threshold_dropdown_values. This list will be used in further commits in settings_org.js code for waiting period threshold setting.
@@ -349,6 +349,21 @@ export const time_limit_dropdown_values = [ export const msg_edit_limit_dropdown_values = time_limit_dropdown_values; export const msg_delete_limit_dropdown_values = time_limit_dropdown_values; +export const waiting_period_threshold_dropdown_values = [ + { + description: $t({defaultMessage: "None"}), + code: 0, + }, + { + description: $t({defaultMessage: "3 days"}), + code: 3, + }, + { + description: $t({defaultMessage: "Custom"}), + code: "custom_period", + }, +]; + export const retain_message_forever = -1; export const user_role_values = {
Change layout for the index page Expanding one table doesn't force the neighbouring one to be expanded also
@@ -8,41 +8,19 @@ InvenTree | Index <h3>InvenTree</h3> <hr> -<div class="row"> - <div class="col-sm-6"> +<div class='col-sm-6'> {% include "InvenTree/latest_parts.html" with collapse_id="latest_parts" %} - </div> - <div class="col-sm-6"> - {% include "InvenTree/starred_parts.html" with collapse_id="starred" %} - </div> -</div> - - -<div class="row"> - <div class="col-sm-6"> {% include "InvenTree/bom_invalid.html" with collapse_id="bom_invalid" %} - </div> - <div class="col-sm-6"> - {% include "InvenTree/build_pending.html" with collapse_id="build_pending" %} - </div> -</div> - -<div class="row"> - <div class="col-sm-6"> {% include "InvenTree/low_stock.html" with collapse_id="order" %} - </div> - <div class="col-sm-6"> - {% include "InvenTree/required_stock_build.html" with collapse_id="stock_to_build" %} - </div> -</div> - -<div class="row"> - <div class="col-sm-6"> {% include "InvenTree/po_outstanding.html" with collapse_id="po_outstanding" %} + </div> - <div class="col-sm-6"> +<div class='col-sm-6'> + {% include "InvenTree/starred_parts.html" with collapse_id="starred" %} + {% include "InvenTree/build_pending.html" with collapse_id="build_pending" %} + {% include "InvenTree/required_stock_build.html" with collapse_id="stock_to_build" %} {% include "InvenTree/so_outstanding.html" with collapse_id="so_outstanding" %} - </div> + </div> {% endblock %}
Skip codecov uploads in scheduled daily tests We can only upload a limited number of coverage reports for any commit anyway, this avoids turning these into test failures.
@@ -611,6 +611,7 @@ jobs: timeout-minutes: 6 - name: Upload coverage + if: ${{ github.event_name != 'schedule' }} uses: codecov/[email protected] with: file: ./coverage.xml
Remove my name :( ok max
@@ -12,7 +12,7 @@ GitHub. From this directory, ```console -jayden@NAGA:~/dev/holodeck/docs$ make clean && make html +~/dev/holodeck/docs$ make clean && make html ``` [This VSCode extension](https://marketplace.visualstudio.com/items?itemName=lextudio.restructuredtext)
Djongo A connector for using Django with MongoDB
@@ -114,7 +114,7 @@ various Python frameworks and libraries. Django, an `example: <https://github.com/MongoEngine/django-mongoengine/tree/master/example/tumblelog>`_. For more information `<http://docs.mongoengine.org/en/latest/django.html>`_ -* `Djongo <https://nesdis.github.io/djongo/>`_ Djongo is a connector for using +* `Djongo <https://nesdis.github.io/djongo/>`_ is a connector for using Django with MongoDB as the database backend. Use the Django Admin GUI to add and modify documents in MongoDB. * `mongodb_beaker <http://pypi.python.org/pypi/mongodb_beaker>`_ is a
Force VSTS to update status For PR builds on branches we don't build anything, but we need to do something so that VSTS updates the status on GitHub for the pr builds.
@@ -123,3 +123,12 @@ phases: - task: mspremier.PostBuildCleanup.PostBuildCleanup-task.PostBuildCleanup@3 condition: always() + +- phase: Skip_libchromiumcontent_PR_build + condition: and(eq(variables['System.PullRequest.IsFork'], 'False'), eq(variables['Build.Reason'], 'PullRequest')) + steps: + - bash: | + echo "Skipping PR build for PR requested from branch." + + - task: mspremier.PostBuildCleanup.PostBuildCleanup-task.PostBuildCleanup@3 + condition: always()
Refactor methods to use _get_text_and_embed This changes the converters used by caesarcipher_encrypt and caesarcipher_decrypt in order to accomodate for the manual conversion that _get_text_and_embed does, which allows for this feature to be easily disabled.
@@ -131,7 +131,7 @@ class Fun(Cog): await ctx.send(embed=embed) @staticmethod - async def _caesar_cipher(ctx: Context, offset: int, msg: Union[Message, str], left_shift: bool = False) -> None: + async def _caesar_cipher(ctx: Context, offset: int, msg: str, left_shift: bool = False) -> None: """ Given a positive integer `offset`, translates and sends the given `msg`. @@ -150,10 +150,7 @@ class Fun(Cog): """Encrypts the given string using the Caesar Cipher.""" return "".join(caesar_cipher(text, offset)) - is_message = isinstance(msg, Message) - - text = msg.content if is_message else msg - embed = msg.embeds[0] if is_message and msg.embeds else None + text, embed = await Fun._get_text_and_embed(ctx, msg) if embed is not None: embed = Fun._convert_embed(conversion_func, embed) @@ -166,7 +163,7 @@ class Fun(Cog): await ctx.send(content=converted_text, embed=embed) @caesarcipher_group.command(name="encrypt", aliases=("rightshift", "rshift", "enc",)) - async def caesarcipher_encrypt(self, ctx: Context, offset: int, *, msg: Union[Message, str]) -> None: + async def caesarcipher_encrypt(self, ctx: Context, offset: int, *, msg: str) -> None: """ Given a positive integer `offset`, encrypt the given `msg`. @@ -177,7 +174,7 @@ class Fun(Cog): await self._caesar_cipher(ctx, offset, msg, left_shift=False) @caesarcipher_group.command(name="decrypt", aliases=("leftshift", "lshift", "dec",)) - async def caesarcipher_decrypt(self, ctx: Context, offset: int, *, msg: Union[Message, str]) -> None: + async def caesarcipher_decrypt(self, ctx: Context, offset: int, *, msg: str) -> None: """ Given a positive integer `offset`, decrypt the given `msg`.
Fix assertion for createami test Revert
@@ -59,14 +59,23 @@ def test_createami(region, os, instance, request, pcluster_config_reader, vpc_st + networking_args ) - pcluster_createami_result_stdout_list = [s.lower() for s in pcluster_createami_result.stdout.split("\n")] - assert_that( - any("downloading https://{0}-aws-parallelcluster.s3".format(region) in pcluster_createami_result_stdout_list) + any( + "downloading https://{0}-aws-parallelcluster.s3".format(region).lower() in s.lower() + for s in pcluster_createami_result.stdout.split("\n") + ) + ).is_true() + assert_that( + any("chef.io/chef/install.sh".lower() in s.lower() for s in pcluster_createami_result.stdout.split("\n")) + ).is_false() + assert_that( + any("packages.chef.io".lower() in s.lower() for s in pcluster_createami_result.stdout.split("\n")) + ).is_false() + assert_that( + any("Thank you for installing Chef".lower() in s.lower() for s in pcluster_createami_result.stdout.split("\n")) + ).is_true() + assert_that( + any("Starting Chef Client".lower() in s.lower() for s in pcluster_createami_result.stdout.split("\n")) ).is_true() - assert_that(any("chef.io/chef/install.sh" in pcluster_createami_result_stdout_list)).is_false() - assert_that(any("packages.chef.io" in pcluster_createami_result_stdout_list)).is_false() - assert_that(any("Thank you for installing Chef".lower() in pcluster_createami_result_stdout_list)).is_true() - assert_that(any("Starting Chef Client".lower() in pcluster_createami_result_stdout_list)).is_true() assert_that(pcluster_createami_result.stdout).does_not_contain("No custom AMI created")
Remove dead code for automatic Self promotion into Entity TN:
@@ -489,20 +489,6 @@ class FieldAccess(AbstractExpression): self.receiver_expr, self.node_data, self.type ) - def wrap_prefix_in_entity(self): - """ - Mutate this expression so that it wraps the prefix into an entity. - """ - from langkit.expressions.envs import make_as_entity - - assert not self.implicit_deref - assert not self.simple_field_access - # The current receiver expression already contains a null check, so - # we don't have to go through the full complexity of .as_entity. - self.receiver_expr = make_as_entity(self.receiver_expr, - null_check=False) - self.implicit_deref = True - @property def wrap_result_in_entity(self): """
Update tracking-arduino.py updated header comment to reflect as-is mrl development build number
# A script to test tracking on the Raspberry Pi driving servos with the AdaFruit16ServoDriver service -# as at mrl development build version 2423 +# as at mrl development build version 2489 # a mashup of code taken from Mats: # https://github.com/MyRobotLab/pyrobotlab/blob/master/home/Mats/Tracking.py # and also from Grog:
Move DispersiveQED tests into dispersive regime Prevents a warning being emitted, and makes sure the test is testing the behaviour it's intended to.
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### -import warnings import numpy as np import pytest import qutip @@ -70,7 +69,7 @@ single_gate_tests = [ device_lists = [ - pytest.param(DispersiveCavityQED, {"g":0.1}, id = "DispersiveCavityQED"), + pytest.param(DispersiveCavityQED, {"g": 0.02}, id="DispersiveCavityQED"), pytest.param(LinearSpinChain, {}, id = "LinearSpinChain"), pytest.param(CircularSpinChain, {}, id = "CircularSpinChain"), ] @@ -112,7 +111,6 @@ def test_numerical_evolution( circuit = qutip.qip.circuit.QubitCircuit(num_qubits) for gate in gates: circuit.add_gate(gate) - with warnings.catch_warnings(record=True): device = device_class(num_qubits, **kwargs) device.load_circuit(circuit) @@ -152,14 +150,14 @@ circuit2.add_gate("SQRTISWAP", targets=[0, 2]) # supported only by SpinChain @pytest.mark.parametrize(("circuit", "device_class", "kwargs"), [ - pytest.param(circuit, DispersiveCavityQED, {"g":0.1}, id = "DispersiveCavityQED"), + pytest.param(circuit, DispersiveCavityQED, {"g": 0.02}, + id="DispersiveCavityQED"), pytest.param(circuit2, LinearSpinChain, {}, id = "LinearSpinChain"), pytest.param(circuit2, CircularSpinChain, {}, id = "CircularSpinChain"), ]) @pytest.mark.parametrize(("schedule_mode"), ["ASAP", "ALAP", None]) def test_numerical_circuit(circuit, device_class, kwargs, schedule_mode): num_qubits = circuit.N - with warnings.catch_warnings(record=True): device = device_class(circuit.N, **kwargs) device.load_circuit(circuit, schedule_mode=schedule_mode)
Speed up verify_broadcast() and verify_unicast() which had unnecessarily high timers, and improve their diagnostic messages. This improves overall integration test run time.
@@ -1427,13 +1427,13 @@ dbs: 'ether dst host ff:ff:ff:ff:ff:ff and ether src host %s' % host_a.MAC()) partials = [partial(host_a.cmd, self.scapy_bcast(host_a))] * packets tcpdump_txt = self.tcpdump_helper( - host_b, tcpdump_filter, partials, packets=packets) + host_b, tcpdump_filter, partials, packets=(packets - 1), timeout=packets) + msg = '%s (%s) -> %s (%s): %s' % ( + host_a, host_a.MAC(), host_b, host_b.MAC(), tcpdump_txt) self.assertEqual( - broadcast_expected, - host_a.MAC() in tcpdump_txt, - msg=tcpdump_txt) + broadcast_expected, host_a.MAC() in tcpdump_txt, msg=msg) - def verify_unicast(self, hosts, unicast_expected=True, packets=3, timeout=2): + def verify_unicast(self, hosts, unicast_expected=True, packets=3): host_a = self.net.hosts[0] host_b = self.net.hosts[-1] if hosts is not None: @@ -1446,14 +1446,16 @@ dbs: # Wait for at least one packet. tcpdump_txt = self.tcpdump_helper( host_b, tcpdump_filter, [partial(host_a.cmd, scapy_cmd)], - timeout=(packets * timeout), vflags='-vv', packets=1) + timeout=(packets - 1), vflags='-vv', packets=1) received_no_packets = self.tcpdump_rx_packets(tcpdump_txt, packets=0) + msg = '%s (%s) -> %s (%s): %s' % ( + host_a, host_a.MAC(), host_b, host_b.MAC(), tcpdump_txt) if unicast_expected: # We expect unicast connectivity, so we should have got at least one packet. - self.assertFalse(received_no_packets) + self.assertFalse(received_no_packets, msg=msg) else: # We expect no unicast connectivity, so we must get no packets. - self.assertTrue(received_no_packets) + self.assertTrue(received_no_packets, msg=msg) def verify_empty_caps(self, cap_files): cap_file_cmds = [ @@ -1477,7 +1479,7 @@ dbs: host.cmd(mininet_test_util.timeout_cmd(bcast_cmd, timeout)) self.verify_empty_caps(bcast_cap_files) - def verify_unicast_not_looped(self): + def verify_unicast_not_looped(self, packets=3): unicast_mac1 = '0e:00:00:00:00:02' unicast_mac2 = '0e:00:00:00:00:03' hello_template = ( @@ -1501,8 +1503,8 @@ dbs: self.scapy_template( hello_template % (unicast_mac1, unicast_mac2), host.defaultIntf(), - count=3)))], - timeout=5, vflags='-vv', packets=1) + count=packets)))], + timeout=(packets - 1), vflags='-vv', packets=1) self.verify_no_packets(tcpdump_txt) def verify_controller_fping(self, host, faucet_vip,
Bug fix for Bug fix for improper handling of packages with - (dash) in name
@@ -265,7 +265,7 @@ class ReqsBaseFinder(BaseFinder): Flask-RESTFul -> flask_restful """ if self.mapping: - name = self.mapping.get(name, name) + name = self.mapping.get(name.replace("-","_"), name) return name.lower().replace("-", "_") def find(self, module_name: str) -> Optional[str]:
Fixed manifest details Path to PNGs Removed tests from the distro
@@ -4,12 +4,12 @@ include kicost/HISTORY.rst include LICENSE include README.rst include kicost/kicost.ico -include block_diag.png -include gui.png +include docs/block_diag.png +include docs/gui.png include kicost/kitspace.png -recursive-include tests * +#recursive-include tests * recursive-exclude * __pycache__ recursive-exclude * *.py[co]
Set unit caches as filled even when ref-counting is not involved ... as results must be invalidated anyway, it's not just about ref-counting. TN:
@@ -100,11 +100,12 @@ begin % if property.memoized: Self.${property.memoization_state_field_name} := Computed; + Self.${property.memoization_value_field_name} := Property_Result; + Set_Filled_Caches (Self.Unit); + % if property.type.is_refcounted: Inc_Ref (Property_Result); - Set_Filled_Caches (Self.Unit); % endif - Self.${property.memoization_value_field_name} := Property_Result; % endif return Property_Result;
llvm: Drop compiled function pointer update. This has been basically a dead code since ("llvm: Remove function pointer management and create ctype function directly")
@@ -141,9 +141,6 @@ def _updateNativeBinaries(module, buffer): # one passed to getrefcount function if sys.getrefcount(v) == 4: to_delete.append(k) - else: - new_ptr = _cpu_engine._engine.get_function_address(k) - v.ptr = new_ptr for d in to_delete: del _binaries[d]
Recommended fix for Issue Fixes
@@ -22,7 +22,7 @@ class Command(BaseCommand): def handle(self, *args, **options): """Create Customer objects for Subscribers without Customer objects associated.""" - for subscriber in get_subscriber_model().objects.filter(customer__isnull=True): + for subscriber in get_subscriber_model().objects.filter(djstripe_customers=None): # use get_or_create in case of race conditions on large subscriber bases Customer.get_or_create(subscriber=subscriber) print("Created subscriber for {0}".format(subscriber.email))
Add note When `step` argument is given, `suggest_float` falls back to `suggest_discrete_uniform` which includes both `low` and `high`.
@@ -136,6 +136,12 @@ class Trial(BaseTrial): high: Upper endpoint of the range of suggested values. ``high`` is excluded from the range. + + .. note:: + If ``step`` is specified, ``high`` is included as well as ``low`` because + this method falls back to :func:`~optuna.trial.Trial.suggest_discrete_uniform` + with ``step`` argument. + step: A step of discretization.
Update example from .summary() add scale_factor argument to the output
@@ -170,6 +170,7 @@ class Element(ABC): Ip 0.329564 tag None color #b2182b + scale_factor 1 dof_global_index None type DiskElement dtype: object
Remove DGL_LOADALL=true DGL_LOADALL=true is not used in the code anymore.
@@ -19,7 +19,7 @@ mxnet: @echo "# Step 1: Building MXNet tutorials #" @echo "# #" @echo "##################################################################" - @DGLBACKEND=mxnet DGL_LOADALL=true $(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + @DGLBACKEND=mxnet $(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) pytorch: @echo "##################################################################" @@ -27,7 +27,7 @@ pytorch: @echo "# Step 2: Building PyTorch tutorials #" @echo "# #" @echo "##################################################################" - @DGLBACKEND=pytorch DGL_LOADALL=true $(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + @DGLBACKEND=pytorch $(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) tensorflow: @echo "##################################################################" @@ -35,10 +35,10 @@ tensorflow: @echo "# Step 3: Building Tensorflow tutorials #" @echo "# #" @echo "##################################################################" - @DGLBACKEND=tensorflow DGL_LOADALL=true $(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + @DGLBACKEND=tensorflow $(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) html-noexec: - DGL_LOADALL=true $(SPHINXBUILD) -D plot_gallery=0 -b html "$(SOURCEDIR)" "$(BUILDDIR)/html" + $(SPHINXBUILD) -D plot_gallery=0 -b html "$(SOURCEDIR)" "$(BUILDDIR)/html" @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
UI: Progress during optimization passes was off by one * Starting a module was counted as completing it, which is of course wrong.
@@ -241,7 +241,7 @@ def _restartProgress(): ) -def _traceProgress(current_module): +def _traceProgressModuleStart(current_module): optimization_logger.info_fileoutput( """\ Optimizing module '{module_name}', {remaining:d} more modules to go \ @@ -258,6 +258,7 @@ after that.""".format( item=current_module.getFullName(), total=ModuleRegistry.getRemainingModulesCount() + ModuleRegistry.getDoneModulesCount(), + update=False, ) if _progress and Options.isShowMemory(): @@ -268,6 +269,15 @@ after that.""".format( memory_logger.info(output) +def _traceProgressModuleEnd(current_module): + reportProgressBar( + item=current_module.getFullName(), + total=ModuleRegistry.getRemainingModulesCount() + + ModuleRegistry.getDoneModulesCount(), + update=True, + ) + + def _endProgress(): closeProgressBar() @@ -302,10 +312,12 @@ def makeOptimizationPass(): # optimizeModule(getInternalModule()) break - _traceProgress(current_module) + _traceProgressModuleStart(current_module) changed = optimizeModule(current_module) + _traceProgressModuleEnd(current_module) + if changed: finished = False
Update brocade_fastiron_telnet.py Formatting changes to be PEP8 compliant
@@ -32,7 +32,6 @@ class BrocadeFastironTelnet(CiscoBaseConnection): delay_factor=delay_factor, max_loops=max_loops) - def _test_channel_read(self, count=40, pattern=""): """Try to read the channel (generally post login) verify you receive data back.""" @@ -133,16 +132,6 @@ class BrocadeFastironTelnet(CiscoBaseConnection): print(output) return check_string in output - def check_enable_mode(self, check_string=''): - """Check if in enable mode. Return boolean.""" - debug = False - """Must send \r\n for CER""" - self.write_channel('\r\n') - output = self.read_until_prompt() - if debug: - print(output) - return check_string in output - def check_config_mode(self, check_string=')#', pattern=''): """Checks if the device is in configuration mode or not.""" debug = False
tests/transfer_mechanism/uniform_to_normal_noise: Set seeds of noise function instead of RandomState Results change since the 'seed' parameter is converted to list before use.
@@ -301,6 +301,8 @@ class TestDistributionFunctions: def test_transfer_mech_uniform_to_normal_noise(self): try: import scipy + except ModuleNotFoundError: + with pytest.raises(FunctionError) as error_text: T = TransferMechanism( name='T', default_variable=[0, 0, 0, 0], @@ -308,12 +310,8 @@ class TestDistributionFunctions: noise=UniformToNormalDist(), integration_rate=1.0 ) - T.noise.base.parameters.random_state.get(None).seed(22) - val = T.execute([0, 0, 0, 0]) - assert np.allclose(val, [[-0.81177443, -0.04593492, -0.20051725, 1.07665147]]) - - except: - with pytest.raises(FunctionError) as error_text: + assert "The UniformToNormalDist function requires the SciPy package." in str(error_text.value) + else: T = TransferMechanism( name='T', default_variable=[0, 0, 0, 0], @@ -321,7 +319,12 @@ class TestDistributionFunctions: noise=UniformToNormalDist(), integration_rate=1.0 ) - assert "The UniformToNormalDist function requires the SciPy package." in str(error_text.value) + # This is equivalent to + # T.noise.base.parameters.random_state.get(None).seed([22]) + T.noise.base.parameters.seed.set(22, None) + val = T.execute([0, 0, 0, 0]) + assert np.allclose(val, [[1.73027452, -1.07866481, -1.98421126, 2.99564032]]) + @pytest.mark.mechanism
Change unclaimed username As requested by
"url": "https://github.community/u/{}/summary", "urlMain": "https://github.community", "username_claimed": "jperl", - "username_unclaimed": "blue" + "username_unclaimed": "noonewouldusethis298" }, "GitLab": { "errorMsg": "[]",
portico: Update advance clicking on tour carousel. We shouldn't move the slide forward if the user is on the last slide. This commit adds an exception for the same.
@@ -243,8 +243,16 @@ var load = function () { // Move to the next slide on clicking inside the carousel container $(".carousel-inner .item-container").click(function (e) { - // We don't want to trigger this event if user clicks on a link - if (e.target.tagName.toLowerCase() !== "a" && e.target.tagName.toLowerCase() !== "button") { + var get_tag_name = e.target.tagName.toLowerCase(); + var is_button = get_tag_name === "button"; + var is_link = get_tag_name === "a"; + var is_last_slide = $("#tour-carousel .carousel-inner .item:last-child").hasClass("active"); + + // Do not trigger this event if user clicks on a button, link + // or if it's the last slide + var move_slide_forward = !is_button && !is_link && !is_last_slide; + + if (move_slide_forward) { $(this).closest('.carousel').carousel('next'); } });
Update hue-dimmer-switch.yml Wrong class listed
name: Hue Dimmer switch (Philips) device_support: - - Light (E1744LightController; 350ms delay) + - Light (HueDimmerController; 350ms delay) integrations: - name: Zigbee2mqtt codename: z2m
Eliminate race condition between the snapshot creation wf and the scheduled execution by creating the snapshot before scheduling the execution
@@ -982,16 +982,16 @@ class ExecutionsTest(AgentlessTestCase): dep_id = dep.id do_retries(verify_deployment_env_created, 30, deployment_id=dep_id) + # Create snapshot and keep it's status 'started' + snapshot = self._create_snapshot_and_modify_execution_status( + Execution.STARTED) + scheduled_time = generate_scheduled_for_date() execution = self.client.executions.start(deployment_id=dep_id, workflow_id='install', schedule=scheduled_time) self._assert_execution_status(execution.id, Execution.SCHEDULED) - # Create snapshot and keep it's status 'started' - snapshot = self._create_snapshot_and_modify_execution_status( - Execution.STARTED) - time.sleep(62) # Wait for exec to 'wake up' self._assert_execution_status(execution.id, Execution.QUEUED) self.client.executions.update(snapshot.id, Execution.TERMINATED)
create_spoken_forms: correct file extension regex The regex was previously matching any character, rather than just the period. This was resulting in some unsupported symbols in the relevant lists and other unintended behavior
@@ -22,11 +22,16 @@ mod = Module() DEFAULT_MINIMUM_TERM_LENGTH = 3 FANCY_REGULAR_EXPRESSION = r"[A-Z]?[a-z]+|[A-Z]+(?![a-z])|[0-9]+" FILE_EXTENSIONS_REGEX = "|".join( - file_extension.strip() + "$" for file_extension in file_extensions.values() + re.escape(file_extension.strip()) + "$" for file_extension in file_extensions.values() ) SYMBOLS_REGEX = "|".join(re.escape(symbol) for symbol in set(symbol_key_words.values())) REGEX_NO_SYMBOLS = re.compile( - "|".join([FANCY_REGULAR_EXPRESSION, FILE_EXTENSIONS_REGEX,]) + "|".join( + [ + FANCY_REGULAR_EXPRESSION, + FILE_EXTENSIONS_REGEX, + ] + ) ) REGEX_WITH_SYMBOLS = re.compile(
improve tutorial notebook and test_piecewise_linear Delete notebook (to be merged as markdown file), pwl tests name fixes
@@ -8,7 +8,7 @@ from gluonts.distribution import PiecewiseLinear @pytest.mark.parametrize( - "distr, target, expected_cdf, expected_crps", + "distr, target, expected_target_cdf, expected_target_crps", [ ( PiecewiseLinear( @@ -39,15 +39,19 @@ from gluonts.distribution import PiecewiseLinear def test_values( distr: PiecewiseLinear, target: List[float], - expected_cdf: List[float], - expected_crps: List[float], + expected_target_cdf: List[float], + expected_target_crps: List[float], ): target = mx.nd.array(target).reshape(shape=(len(target),)) - expected_cdf = np.array(expected_cdf).reshape((len(expected_cdf),)) - expected_crps = np.array(expected_crps).reshape((len(expected_crps),)) + expected_target_cdf = np.array(expected_target_cdf).reshape( + (len(expected_target_cdf),) + ) + expected_target_crps = np.array(expected_target_crps).reshape( + (len(expected_target_crps),) + ) - assert all(np.isclose(distr._cdf(target).asnumpy(), expected_cdf)) - assert all(np.isclose(distr.crps(target).asnumpy(), expected_crps)) + assert all(np.isclose(distr._cdf(target).asnumpy(), expected_target_cdf)) + assert all(np.isclose(distr.crps(target).asnumpy(), expected_target_crps)) @pytest.mark.parametrize( @@ -58,7 +62,7 @@ def test_shapes(batch_shape: Tuple, num_pieces: int, num_samples: int): gamma = mx.nd.ones(shape=(*batch_shape,)) slopes = mx.nd.ones(shape=(*batch_shape, num_pieces)) # all positive knot_spacings = ( - mx.nd.ones(shape=(*batch_shape, num_pieces)) / 10 + mx.nd.ones(shape=(*batch_shape, num_pieces)) / num_pieces ) # positive and sum to 1 target = mx.nd.ones(shape=batch_shape) # shape of gamma
[ci] small fix to test_integration_wandb.py trial should be a fixture, instead of a function.
@@ -152,7 +152,7 @@ def wandb_env(): class TestWandbLogger: def test_wandb_logger_project_group(self, monkeypatch): monkeypatch.setenv(WANDB_PROJECT_ENV_VAR, "test_project_from_env_var") - monkeypatch.setenv(WANDB_GROUP_ENV_VAR, "test_group_env_var") + monkeypatch.setenv(WANDB_GROUP_ENV_VAR, "test_group_from_env_var") # Read project and group name from environment variable logger = WandbTestExperimentLogger(api_key="1234") logger.setup() @@ -191,7 +191,7 @@ class TestWandbLogger: logger.setup() assert os.environ[WANDB_ENV_VAR] == "abcd" - def test_wandb_logger_start(self, monkeypatch): + def test_wandb_logger_start(self, monkeypatch, trial): monkeypatch.setenv(WANDB_ENV_VAR, "9012") # API Key in env logger = WandbTestExperimentLogger(project="test_project") @@ -225,7 +225,7 @@ class TestWandbLogger: del logger - def test_wandb_logger_reporting(trial): + def test_wandb_logger_reporting(self, trial): logger = WandbTestExperimentLogger( project="test_project", api_key="1234", excludes=["metric2"] )
Update exporting_models.md Conflicts with comment on following line, the comment is correct.
@@ -8,7 +8,7 @@ graph proto. A checkpoint will typically consist of three files: * model.ckpt-${CHECKPOINT_NUMBER}.meta After you've identified a candidate checkpoint to export, run the following -command from tensorflow/models/research/object_detection: +command from tensorflow/models/research/: ``` bash # From tensorflow/models/research/
Fix for grains with list of objects This commit will make it so that grains that are lists of complex objects will be flattened ensuring that comparisons can be made Fixes
@@ -44,6 +44,13 @@ def exists(name, delimiter=DEFAULT_TARGET_DELIM): ret['comment'] = 'Grain does not exist' return ret +def flatten(li, flattened = list()): + for subli in li: + if type(subli) == list: + flatten(subli, flattened) + else: + flattened.append(frozenset(subli)) + return set(flattened) def present(name, value, delimiter=DEFAULT_TARGET_DELIM, force=False): ''' @@ -174,7 +181,7 @@ def list_present(name, value, delimiter=DEFAULT_TARGET_DELIM): ret['comment'] = 'Grain {0} is not a valid list'.format(name) return ret if isinstance(value, list): - if set(value).issubset(set(__salt__['grains.get'](name))): + if flatten(value).issubset(flatten(__salt__['grains.get'](name))): ret['comment'] = 'Value {1} is already in grain {0}'.format(name, value) return ret elif name in __context__.get('pending_grains', {}):
[tests] Remove site_detect_tests.APIDisabledTestCase API is available for
@@ -176,15 +176,6 @@ class FailingSiteTestCase(SiteDetectionTestCase): self.assertNoSite('http://wiki.animutationportal.com/index.php/$1') -class APIDisabledTestCase(SiteDetectionTestCase): - - """Test MediaWiki sites without an enabled API.""" - - def test_linuxquestions(self): - """Test detection of MediaWiki sites for wiki.linuxquestions.org.""" - self.assertNoSite('http://wiki.linuxquestions.org/wiki/$1') - - class NoSiteTestCase(SiteDetectionTestCase): """Test detection of non-wiki sites."""
Fix passing env vars to CmdAction This fixes issue
@@ -189,8 +189,13 @@ class CmdAction(BaseAction): "CmdAction Error creating command string", exc) # set environ to change output buffering + subprocess_pkwargs = self.pkwargs.copy() env = None + if 'env' in subprocess_pkwargs: + env = subprocess_pkwargs['env'] + del subprocess_pkwargs['env'] if self.buffering: + if not env: env = os.environ.copy() env['PYTHONUNBUFFERED'] = '1' @@ -201,7 +206,7 @@ class CmdAction(BaseAction): #bufsize=2, # ??? no effect use PYTHONUNBUFFERED instead stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, - **self.pkwargs) + **subprocess_pkwargs) output = StringIO() errput = StringIO()
fix breadcrumbs for root page on page listing when breadcrumbs show in the root page - it is the only item ensure the content is inset so that it shows correctly
{% load wagtailadmin_tags i18n %} {% block header_content %} - {% breadcrumbs parent_page 'wagtailadmin_explore' url_root_name='wagtailadmin_explore_root' %} - + {# Accessible page title #} <h1 class="w-sr-only"> {{ title }} </h1> + {# breadcrumbs #} + {% if parent_page.is_root %} + <div class="w-pl-3">{% breadcrumbs parent_page 'wagtailadmin_explore' url_root_name='wagtailadmin_explore_root' is_expanded=True %}</div> + {% else %} + {% breadcrumbs parent_page 'wagtailadmin_explore' url_root_name='wagtailadmin_explore_root' %} + {% endif %} {# Actions divider #} <div class="w-w-px w-h-[30px] w-ml-auto sm:w-ml-0 w-bg-grey-100"></div> {# Page actions dropdown #}
Always lowercase role name Due to [1] ansible always access servers lowcase. Also, in respect to [2], this patch lowercase name which is use in fqdn, hostname, ssh_known_hosts and other places. [1] [2] Resolves: rhbz#1619556
@@ -411,7 +411,7 @@ resources: user_data: {get_resource: UserData} name: yaql: - expression: $.data.hostname_map.get($.data.hostname, $.data.hostname) + expression: $.data.hostname_map.get($.data.hostname, $.data.hostname).toLower() data: hostname: {get_param: Hostname} hostname_map: {get_param: HostnameMap}
admin: fix checkboxes widget This change from a function to an arrow function allows us to access the containing class with `this` in the line below.
@@ -43,7 +43,7 @@ export class CheckboxesInputWidget extends React.PureComponent { let { className, value, placeholder, type, onChange, ...otherProps } = this.props className = (className || '') + ' checkbox' - function onChangeHandler (field, event) { + const onChangeHandler = (field, event) => { const newValue = flipSetValue(this.props.value, field, event.target.checked) onChange(newValue) }
invitations: Make stream labels click targets. Label tags can't be nested in each other. Fixes
{{#if default_stream}}checked="checked"{{/if}} /> <span></span> {{#if invite_only}}<i class="fa fa-lock" aria-hidden="true"></i>{{/if}} - <label class="inline-block">{{name}}</label> + {{name}} </label> {{/each}} </div>
Set max bar width for BarChart Center bar chart when max width is reached. Close
@@ -366,6 +366,7 @@ class BarChart extends BaseChart { this.x1 = scaleBand(); this.y = scaleLinear(); this.selections = {}; + this.maxColumnWidth = 100; this.xAxis = axisBottom(this.x0) .tickSizeOuter(0); @@ -418,7 +419,11 @@ class BarChart extends BaseChart { } update() { - this.width = parseInt(container.style('width'), 10) - this.margin.left - this.margin.right; + const screenWidth = parseInt(container.style('width'), 10) - this.margin.left - this.margin.right; + const maxWidth = this.selections.groups.size() * this.maxColumnWidth; + const offset = this.margin.left + (Math.max(0, screenWidth - maxWidth) / 2); + + this.width = Math.min(screenWidth, maxWidth); this.height = 250 - this.margin.top - this.margin.bottom; this.y.range([this.height, 0]); @@ -426,9 +431,9 @@ class BarChart extends BaseChart { this.x1.range([0, this.x0.bandwidth()]); this.svg - .attr('width', this.width + this.margin.left + this.margin.right) + .attr('width', screenWidth + this.margin.left + this.margin.right) .attr('height', this.height + this.margin.top + this.margin.bottom); - this.canvas.attr('transform', `translate(${this.margin.left},${this.margin.top})`); + this.canvas.attr('transform', `translate(${offset},${this.margin.top})`); this.yAxis.tickSize(-this.width, 0); this.selections.xAxis.attr('transform', `translate(0,${this.height})`);
zapping the cmake files in package From my understanding these are not even required because they are generated by conan at isntall
@@ -39,20 +39,11 @@ class WebsocketPPConan(ConanFile): def package(self): self._patch_sources() - cmake = CMake(self) - cmake.configure() - cmake.install() self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder) # We have to copy the headers manually, since the current install() step in the 0.8.1 release doesn't do so. self.copy(pattern="*.hpp", dst="include/websocketpp", src=self._source_subfolder + '/websocketpp') def package_info(self): - # https://github.com/zaphoyd/websocketpp/blob/0.8.1/CMakeLists.txt#L42-L47 - if self.settings.os == "Windows": - self.cpp_info.builddirs.append(os.path.join(self.package_folder, 'cmake')) - else: - self.cpp_info.builddirs.append(os.path.join(self.package_folder, 'lib', 'cmake', 'websocketpp')) - if self.options.asio == 'standalone': self.cpp_info.defines.extend(['ASIO_STANDALONE', '_WEBSOCKETPP_CPP11_STL_'])
Update ug012_storm_ref_datamod.rst Minor tweaks for wording.
@@ -12,7 +12,7 @@ The operators below can be used to modify the Synapse hypergraph by: All of these operators are defined in `storm.py`__. -**IMPORTANT:** Synapse does not have an "are you sure?" prompt. Caution should be used with operators that can modify Synapse data, especially when used on the output of complex queries that may modify (or delete) large numbers of nodes. It is **strongly recommended** that you validate the output of a query (does the query return the expected results?) by first running the query on its own before applying any operator that will modify that data. +**IMPORTANT:** Synapse does not have an "are you sure?" prompt. Caution should be used with operators that can modify Synapse data, especially when used on the output of complex queries that may modify (or delete) large numbers of nodes. It is **strongly recommended** that you validate the output of a query by first running the query on its own to ensure it returns the expected results before applying any operator that will modify that data. See the `Storm Syntax Conventions`__ for an explanation of the usage format used below. @@ -80,7 +80,7 @@ Todo * When creating a ``<form>`` whose ``<valu>`` consists of multiple components, the components must be passed as a comma-separated list enclosed in parentheses. * ``addnode()`` will create non-deconflictable node types. * ``addnode()`` will check whether a deconflictable node type already exists and either create it or return information on the existing node. -* Secondary properties must be specified by their relative property name (``:baz`` instead of ``foo:bar:baz``). +* Secondary properties must be specified by their relative property name. For the form ``foo:bar`` and the property ``baz`` (e.g., ``foo:bar:baz``) the relative property name is specified as ``:baz``. * Specifying one or more secondary properties will set the ``<prop>=<pval>`` if it does not exist, or modify (overwrite) the ``<prop>=<pval>`` if it already exists. **Operator Syntax Notes:**
Improve part information display Better terminology
<td><b>Units</b></td> <td>{{ part.units }}</td> </tr> + {% if part.minimum_stock > 0 %} + <tr> + <td><b>Minimum Stock</b></td> + <td>{{ part.minimum_stock }}</td> + </tr> + {% endif %} </table> </div> <div class='col-sm-6'> <table class='table table-striped'> + {% if part.buildable %} <tr> - <td><b>Buildable</b></td> - <td>{% include "yesnolabel.html" with value=part.buildable %}</td> + <td><b>Assembly</b></td> + <td><i>This part can be assembled from other parts</i></td> </tr> + {% endif %} + {% if part.consumable %} <tr> - <td><b>Consumable</b></td> - <td>{% include "yesnolabel.html" with value=part.consumable %}</td> + <td><b>Component</b></td> + <td><i>This part can be used in assemblies</i></td> </tr> + {% endif %} + {% if part.trackable %} <tr> <td><b>Trackable</b></td> - <td>{% include "yesnolabel.html" with value=part.trackable %}</td> + <td><i>Stock for this part will be tracked by (serial or batch)</i></td> </tr> + {% endif %} + {% if part.purchaseable %} <tr> <td><b>Purchaseable</b></td> - <td>{% include "yesnolabel.html" with value=part.purchaseable %}</td> + <td><i>This part can be purchased from external suppliers</i></td> </tr> + {% endif %} + {% if part.salable %} <tr> <td><b>Salable</b></td> - <td>{% include "yesnolabel.html" with value=part.salable %}</td> - </tr> - {% if part.minimum_stock > 0 %} - <tr> - <td><b>Minimum Stock</b></td> - <td>{{ part.minimum_stock }}</td> + <td><i>This part can be sold to customers</i></td> </tr> {% endif %} </table>
Conditions: validate for self-dependencies - when setting the owner of a Condition, a warning will be thrown if the owner is a dependency of the Condition
@@ -273,7 +273,9 @@ Class Reference """ +import collections import logging +import warnings from psyneulink.core.globals.parameters import parse_execution_context from psyneulink.core.globals.utilities import call_with_pruned_args @@ -457,6 +459,26 @@ class Condition(object): return call_with_pruned_args(self.func, *(self.args + args), **kwargs_to_pass) + +class _DependencyValidation: + @Condition.owner.setter + def owner(self, value): + # "dependency" or "dependencies" is always the first positional argument + if not isinstance(self.args[0], collections.abc.Iterable): + dependencies = [self.args[0]] + else: + dependencies = self.args[0] + + if value in dependencies: + warnings.warn( + f'{self} is dependent on {value}, but you are assigning {value} as its owner.' + ' This may result in infinite loops or unknown behavior.', + stacklevel=5 + ) + + self._owner = value + + ######################################################################################################### # Included Conditions ######################################################################################################### @@ -1129,7 +1151,7 @@ class AfterNRuns(Condition): ###################################################################### -class BeforeNCalls(Condition): +class BeforeNCalls(_DependencyValidation, Condition): """BeforeNCalls Parameters: @@ -1165,7 +1187,7 @@ class BeforeNCalls(Condition): # Since this condition is unlikely to be used, it's best to leave it for now -class AtNCalls(Condition): +class AtNCalls(_DependencyValidation, Condition): """AtNCalls Parameters: @@ -1195,7 +1217,7 @@ class AtNCalls(Condition): super().__init__(func, dependency, n) -class AfterCall(Condition): +class AfterCall(_DependencyValidation, Condition): """AfterCall Parameters: @@ -1225,7 +1247,7 @@ class AfterCall(Condition): super().__init__(func, dependency, n) -class AfterNCalls(Condition): +class AfterNCalls(_DependencyValidation, Condition): """AfterNCalls Parameters: @@ -1255,7 +1277,7 @@ class AfterNCalls(Condition): super().__init__(func, dependency, n) -class AfterNCallsCombined(Condition): +class AfterNCallsCombined(_DependencyValidation, Condition): """AfterNCallsCombined Parameters: @@ -1294,7 +1316,7 @@ class AfterNCallsCombined(Condition): super().__init__(func, *dependencies, n=n) -class EveryNCalls(Condition): +class EveryNCalls(_DependencyValidation, Condition): """EveryNCalls Parameters: @@ -1342,7 +1364,7 @@ class EveryNCalls(Condition): super().__init__(func, dependency, n) -class JustRan(Condition): +class JustRan(_DependencyValidation, Condition): """JustRan Parameters: @@ -1370,7 +1392,7 @@ class JustRan(Condition): super().__init__(func, dependency) -class AllHaveRun(Condition): +class AllHaveRun(_DependencyValidation, Condition): """AllHaveRun Parameters: @@ -1409,7 +1431,7 @@ class AllHaveRun(Condition): super().__init__(func, *dependencies) -class WhenFinished(Condition): +class WhenFinished(_DependencyValidation, Condition): """WhenFinished Parameters: @@ -1438,7 +1460,7 @@ class WhenFinished(Condition): super().__init__(func, dependency) -class WhenFinishedAny(Condition): +class WhenFinishedAny(_DependencyValidation, Condition): """WhenFinishedAny Parameters: @@ -1475,7 +1497,7 @@ class WhenFinishedAny(Condition): super().__init__(func, *dependencies) -class WhenFinishedAll(Condition): +class WhenFinishedAll(_DependencyValidation, Condition): """WhenFinishedAll Parameters:
Add major axis calculation To calculate the major axis we apply the shape functions to the complex values from the vector and then calculate the orbit for each of those values.
@@ -381,11 +381,14 @@ class Shape(Results): zn[pos0:pos1] = (node_pos * onn + Le * zeta).reshape(nn) # major axes calculation - # select orbits - orbits = self.orbits[n : n + 2] - major[pos0:pos1] = Nx @ np.array( - [orbits[0].major_axes, 0, orbits[1].major_axes, 0] - ) + xn_complex[pos0:pos1] = Nx @ evec[xx] + yn_complex[pos0:pos1] = Ny @ evec[yy] + for i in range(pos0, pos1): + orb = Orbit(node=0, node_pos=0, ru_e=xn_complex[i], rv_e=yn_complex[i]) + major[i] = orb.major_axes + + if self.normalize: + major /= max(major) self.xn = xn self.yn = yn
Add Imputer for missing values Uses median strategy for numeric columns, and adds alternate value for missing values in categorical columns.
@@ -3,6 +3,7 @@ import numpy as np import pandas as pd import tensorflow as tf from sklearn.compose import ColumnTransformer +from sklearn.impute import SimpleImputer from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler @@ -17,8 +18,6 @@ import cv2 # Preprocesses the data appropriately for single reg data def structured_preprocesser(data): - data.fillna(0, inplace=True) - # identifies the categorical and numerical columns categorical_columns = data.select_dtypes(exclude=["number"]).columns numeric_columns = data.columns[data.dtypes.apply( @@ -33,6 +32,7 @@ def structured_preprocesser(data): categorical_cols = data.columns[categorical_feature_mask].tolist() labeled_df = data[categorical_cols] + labeled_df.fillna("", inplace=True) enc = OneHotEncoder() enc.fit(labeled_df) onehotlabels = enc.transform(labeled_df).toarray() @@ -51,6 +51,10 @@ def structured_preprocesser(data): del data[x] if(len(numeric_columns) != 0): + # Imputes numeric columns with median + imputer = SimpleImputer(strategy="median") + data[numeric_columns] = imputer.fit_transform(data[numeric_columns]) + # Scales numeric data scaler = StandardScaler() data[numeric_columns] = scaler.fit_transform(data[numeric_columns])
TargetDescription: generate default config Added a method to TargetDescrition to generate a dict with the default config for that description.
@@ -82,6 +82,14 @@ class TargetDescription(object): self._set('platform_params', platform_params) self._set('conn_params', conn_params) + def get_default_config(self): + param_attrs = ['target_params', 'platform_params', 'conn_params'] + config = {} + for pattr in param_attrs: + for n, p in getattr(self, pattr).itervalues(): + config[n] = p.default + return config + def _set(self, attr, vals): if vals is None: vals = {}
[dagit] Point to AssetMaterialization Summary: Resolves Test Plan: Buildkite. Reviewers: schrockn, sandyryza
@@ -72,10 +72,11 @@ export const AssetsCatalogTable: React.FunctionComponent<{prefixPath: string[]}> <p> There are no {prefixPath.length ? 'matching ' : 'known '} materialized assets with {prefixPath.length ? 'the ' : 'a '} - specified asset key. Any asset keys that have been specified with a{' '} - <code>Materialization</code> during a pipeline run will appear here. See the{' '} + specified asset key. Any asset keys that have been specified with an{' '} + <code>AssetMaterialization</code> during a pipeline run will appear here. See + the{' '} <a href="https://docs.dagster.io/_apidocs/solids#dagster.AssetMaterialization"> - Materialization documentation + AssetMaterialization documentation </a>{' '} for more information. </p>
Adds ability to do inline translations Allows user to enter translation requests on one line rather than having to enter everything separately. Defaults to staged information input if the rest of the command after "translate" cannot be corrected parsed.
-from plugin import plugin, require +from plugin import plugin, require, alias from googletrans import Translator from googletrans.constants import LANGCODES, LANGUAGES, SPECIAL_CASES @require(network=True) +@alias('trans') @plugin('translate') def translate(jarvis, s): """ translates from one language to another. """ +# Check whether user has entered translate by itself or with extra parameters + if s != "": + words = s.lower().split() + currentPos = 0 + finalPos = 0 +# Search input string for source language + for i in range(len(words)): + word = words[i] + currentPos = i +# Do not include LANGCODES in the tests when using full sentence command since words can conflict with them (Eg. hi -> Hindi) + if (word in LANGUAGES): + srcs = word + break + elif srcs in SPECIAL_CASES: + srcs = SPECIAL_CASES[word] + break +# Search input string for destination language starting from the word after the source language + for i in range(currentPos + 1, len(words)): + word = words[i] + finalPos = i +# Do not include LANGCODES in the tests when using full sentence command since words can conflict with them (Eg. hi -> Hindi) + if (word in LANGUAGES): + des = word + break + elif srcs in SPECIAL_CASES: + des = SPECIAL_CASES[word] + break + +# If both languages found, work out where the text to be translated is in the sentence and perform the translation + if (des and srcs): + if(currentPos < 2): + tex = " ".join(words[4:]) + else: + tex = " ".join(words[:len(words) - (4 + (len(words) - finalPos - 1))]) # Discards extra words at the end of the sentence + performTranslation(srcs, des, tex) +# Otherwise perform the default method for translation + else: + jarvis.say("\nSorry, I couldn't understand your translation request. Please enter the request in steps.") + default(jarvis) + else: + default(jarvis) + + +def default(jarvis): + """ + Default function that is called when translate is entered alone or + when input is not understood when translate is entered with additional parameters + """ +# Get source language jarvis.say('\nEnter source language ') srcs = jarvis.input().lower().strip() +# Check source language while ( srcs not in LANGUAGES) and ( srcs not in SPECIAL_CASES) and ( @@ -23,8 +74,10 @@ def translate(jarvis, s): else: jarvis.say("\nInvalid source language\nEnter again") srcs = jarvis.input().lower() +# Get destination language jarvis.say('\nEnter destination language ') des = jarvis.input().lower().strip() +# Check destination language while ( des not in LANGUAGES) and ( des not in SPECIAL_CASES) and ( @@ -36,8 +89,17 @@ def translate(jarvis, s): else: jarvis.say("\nInvalid destination language\nEnter again") des = jarvis.input().lower() + jarvis.say('\nEnter text ') tex = jarvis.input() + + performTranslation(srcs, des, tex) + + +def performTranslation(srcs, des, tex): + """ + Function to actually perform the translation of text and print the result + """ translator = Translator() result = translator.translate(tex, dest=des, src=srcs) result = u""" @@ -47,4 +109,4 @@ def translate(jarvis, s): [pron.] {pronunciation} """.strip().format(src=result.src, dest=result.dest, original=result.origin, text=result.text, pronunciation=result.pronunciation) - print(result) + print("\n" + result)
Document python_app target in Python readme. Support for `python_app` targets was added in - here we add documentation in the Python readme.
@@ -151,6 +151,39 @@ Use `test` to run the tests. This uses `pytest`: SUCCESS $ +Python Apps for Deployment +-------------------------- + +For deploying your Python apps, Pants can create archives (e.g.: tar.gz, zip) that contain an +executable pex along with other files it needs at runtime (e.g.: config files, data sets). +These archives can be extracted and run on production machines as part of your deployment process. + +To create a Python app for deployment, define a `python_app` target. Notice how the `python_app` +target combines an existing `python_binary` with `bundles` that describe the other files to +include in the archive. + +!inc[start-at=python_binary](hello/main/BUILD) + +Use `./pants bundle` to create the archive. + + $ ./pants bundle examples/src/python/example/hello/main/:hello-app --bundle-py-archive=tgz + <output omitted for brevity> + 00:59:52 00:02 [bundle] + 00:59:52 00:02 [py] + created bundle copy dist/examples.src.python.example.hello.main.hello-app-bundle + created archive copy dist/examples.src.python.example.hello.main.hello-app.tar.gz + 00:59:53 00:03 [complete] + +The archive contains an executable pex file, along with a loose file matched by the bundle glob. + + $ tar -tzvf dist/examples.src.python.example.hello.main.hello-app.tar.gz + drwxr-xr-x root/root 0 2018-05-02 02:16 ./ + -rwxr-xr-x root/root 474997 2018-05-02 02:16 ./main.pex + -rw-rw-r-- root/root 562 2018-05-01 13:34 ./BUILD + +See <a pantsref="bdict_bundle">bundle</a> in the BUILD dictionary for additional details about +defining the layout of files in your archive. + Debugging Tests --------------- Pants scrubs the environment's `PYTHONPATH` when running tests, to ensure a hermetic, repeatable test run.
docs: Update README.md * Update README.md Adding a SAM workshop * capitalize * using url shortener to track
![Install](https://img.shields.io/badge/brew-aws--sam--cli-orange) ![pip](https://img.shields.io/badge/pip-aws--sam--cli-9cf) -[Installation](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) | [Blogs](https://serverlessland.com/blog?tag=AWS%20SAM) | [Videos](https://serverlessland.com/video?tag=AWS%20SAM) | [AWS Docs](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/what-is-sam.html) | [Roadmap](https://github.com/aws/aws-sam-cli/wiki/SAM-CLI-Roadmap) +[Installation](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) | [Blogs](https://serverlessland.com/blog?tag=AWS%20SAM) | [Videos](https://serverlessland.com/video?tag=AWS%20SAM) | [AWS Docs](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/what-is-sam.html) | [Roadmap](https://github.com/aws/aws-sam-cli/wiki/SAM-CLI-Roadmap) | [Try It Out](https://s12d.com/Tq9ZE-Br) The AWS Serverless Application Model (SAM) CLI is an open-source CLI tool that helps you develop serverless applications containing [Lambda functions](https://aws.amazon.com/lambda/), [Step Functions](https://aws.amazon.com/step-functions/), [API Gateway](https://aws.amazon.com/api-gateway/), [EventBridge](https://aws.amazon.com/eventbridge/), [SQS](https://aws.amazon.com/sqs/), [SNS](https://aws.amazon.com/sns/) and more. Some of the features it provides are: - **Initialize serverless applications** in minutes with AWS provided infrastructure templates with `sam init`
Improve run_docker_test to handle multiple compose files This feature is required for cross-repository testing.
@@ -66,6 +66,14 @@ def main(): '-f', compose_file ] + # Search for extra compose files and add them to the compose command + if args.extra_file: + for file in args.extra_file: + extra_file = _get_compose_file(file) + compose = compose + [ + '-f', extra_file + ] + compose_up = compose + [ 'up', '--abort-on-container-exit' ] @@ -249,6 +257,12 @@ def parse_args(): "compose_file", help="docker-compose.yaml file that contains the test") + parser.add_argument( + "-e", "--extra-file", + help="extra compose file with additional components. can be specified \ + more than once", + action='append') + parser.add_argument( "-c", "--clean", help="don't run the test, just cleanup a previous run",
Update README.md Forgot to remove old license reference
@@ -216,5 +216,3 @@ We now have a [blogpost](https://medium.com/twentybn/towards-situated-visual-ai- The code is copyright (c) 2020 Twenty Billion Neurons GmbH under an MIT Licence. See the file LICENSE for details. Note that this license only covers the source code of this repo. Pretrained weights come with a separate license available [here](https://20bn.com/licensing/sdk/evaluation). - -This repo uses PyTorch, which is licensed under a 3-clause BSD License. See the file LICENSE_PYTORCH for details.
users: Pass email_address_visibility as parameter to can_access_delivery_email. This is a prep commit for adding user-level email visibility setting.
@@ -394,12 +394,11 @@ def validate_user_custom_profile_data( raise JsonableError(error.message) -def can_access_delivery_email(user_profile: UserProfile) -> bool: - realm = user_profile.realm - if realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS: +def can_access_delivery_email(user_profile: UserProfile, email_address_visibility: int) -> bool: + if email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS: return user_profile.is_realm_admin - if realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_MODERATORS: + if email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_MODERATORS: return user_profile.is_realm_admin or user_profile.is_moderator return False @@ -477,7 +476,9 @@ def format_user_row( client_gravatar=client_gravatar, ) - if acting_user is not None and can_access_delivery_email(acting_user): + if acting_user is not None and can_access_delivery_email( + acting_user, realm.email_address_visibility + ): result["delivery_email"] = row["delivery_email"] if is_bot:
TrivialFix: Move portbindings to neutron-lib Now that neutron-lib houses portbindings api, move the constants section from neutron to neutron-lib in networking-odl. TrivialFix Partially-implements: blueprint neutron-lib-adoption
# License for the specific language governing permissions and limitations # under the License. # - -from neutron.extensions import portbindings from neutron.services.trunk import constants as t_consts +from neutron_lib.api.definitions import portbindings + SUPPORTED_INTERFACES = ( portbindings.VIF_TYPE_OVS,
improved Embed integer check Previous check was not accepting some valid integer types such as int8.
@@ -373,7 +373,7 @@ class Embed(Module): Output which is embedded input data. The output shape follows the input, with an additional `features` dimension appended. """ - if inputs.dtype not in [jnp.int32, jnp.int64, jnp.uint32, jnp.uint64]: + if not jnp.issubdtype(inputs.dtype, jnp.integer): raise ValueError('Input type must be an integer or unsigned integer.') return self.embedding[inputs]
Update FORWARD_TRAFFIC.yml Support for fortinet (FortiOS 5.4)
-# Logs files identified as type=traffic and subtype=forward (FortiOs 5.4) +# Logs files identified as type=traffic and subtype=forward (FortiOs 5.4). # # <189>date=2019-04-09 time=04:27:29 devname=fw01 devid=FG800D0123456789 logid=0000000013 type=traffic subtype=forward # level=notice vd=root srcip=1.1.1.1 srcport=19982 srcintf="port1" dstip=10.10.10.10 dstport=179 dstintf="port3"
Remove complex statement from `if`, instead use let binding Clippy warns that the statement used in the `if` clause is complex and should be part of a let binding and then test the variable that is bound.
@@ -123,9 +123,11 @@ impl<'b, 't, B: BatchIndex + 'b, T: TransactionIndex + 't> ChainCommitState<'b, return Err(ChainCommitStateError::DuplicateBatch((*id).into())); } - if self.batch_index.contains(&id).map_err(|err| { + let batch_is_contained = self.batch_index.contains(&id).map_err(|err| { ChainCommitStateError::Error(format!("Reading contains on BatchIndex: {:?}", err)) - })? { + }); + + if batch_is_contained? { if let Some(ref block) = self .batch_index .get_block_by_id(&id)
WIP add an accept rule instead of modifying surt in place for seed redirects
@@ -203,10 +203,12 @@ class Site(doublethink.Document, ElapsedMixIn): def note_seed_redirect(self, url): new_scope_surt = brozzler.site_surt_canon(url).surt().decode("ascii") + if not "accepts" in self.scope: + self.scope["accepts"] = [] if not new_scope_surt.startswith(self.scope["surt"]): - self.logger.info("changing site scope surt from {} to {}".format( - self.scope["surt"], new_scope_surt)) - self.scope["surt"] = new_scope_surt + self.logger.info( + "adding surt %s to scope accept rules", new_scope_surt) + self.scope.accepts.append({"surt": new_scope_surt}) def extra_headers(self): hdrs = {}
[java-services] add new transient error See We encounter a `is.hail.relocated.com.google.cloud.storage.StorageException` which is caused by a `com.google.api.client.http.HttpResponseException`. The latter exception is not currently considered a transient error. This PR changes isTransientError to recognize `HttpResponseException` as a transient error.
@@ -14,6 +14,7 @@ import scala.util.Random import java.io._ import com.google.cloud.storage.StorageException import com.google.api.client.googleapis.json.GoogleJsonResponseException +import com.google.api.client.http.HttpResponseException package object services { lazy val log: Logger = LogManager.getLogger("is.hail.services") @@ -36,6 +37,8 @@ package object services { e match { case e: NoHttpResponseException => true + case e: HttpResponseException => + RETRYABLE_HTTP_STATUS_CODES.contains(e.getStatusCode()) case e: ClientResponseException => RETRYABLE_HTTP_STATUS_CODES.contains(e.status) case e: GoogleJsonResponseException =>
using new conan 1.24.0 tools.cppstd_flag in boost recipe fixes
from conans import ConanFile from conans import tools -from conans.client.build.cppstd_flags import cppstd_flag -from conans.tools import Version +from conans.tools import Version, cppstd_flag from conans.errors import ConanException from conans.errors import ConanInvalidConfiguration @@ -568,12 +567,7 @@ class BoostConan(ConanFile): flags.append("toolset=%s" % self._toolset) if self.settings.get_safe("compiler.cppstd"): - flags.append("cxxflags=%s" % cppstd_flag( - self.settings.get_safe("compiler"), - self.settings.get_safe("compiler.version"), - self.settings.get_safe("compiler.cppstd") - ) - ) + flags.append("cxxflags=%s" % cppstd_flag(self.settings)) # CXX FLAGS cxx_flags = []
Maybe I don't have to be that pedantic I still think that some data can't be trusted.
@@ -324,6 +324,7 @@ class URLPlaylistEntry(BasePlaylistEntry): # Move the temporary file to it's final location. os.rename(unhashed_fname, self.filename) + if self.duration == None: # Get duration from the file after downloaded args = [ 'ffprobe',
Fixed CircleCI Errors Idea for the fix by
@@ -9,6 +9,11 @@ jobs: steps: - checkout + - run: + name: Level synthesis test + command: | + echo -e "y\n" | python3 ./make_level.py testskill testlevel1 + echo -e "y\n" | python3 ./make_level.py testskill testlevel2 - run: name: Set up environment command: | @@ -17,11 +22,6 @@ jobs: pip3 install --user flake8 git config --global user.name "CircleCI" git config --global user.email "[email protected]" - - run: - name: Level synthesis test - command: | - echo -e "y\n" | python3 ./make_level.py testskill testlevel1 - echo -e "y\n" | python3 ./make_level.py testskill testlevel2 - run: name: Running tests command: |
Update video.py Adding title and parentTitle to Episode variables (Lines 475 & 476)
@@ -438,6 +438,8 @@ class Episode(Video, Playable): parentKey (str): Key to this episodes :class:`~plexapi.video.Season`. parentRatingKey (int): Unique key for this episodes :class:`~plexapi.video.Season`. parentThumb (str): Key to this episodes thumbnail. + parentTitle (str): Name of this episode's season + title (str): Name of this Episode rating (float): Movie rating (7.9; 9.8; 8.1). viewOffset (int): View offset in milliseconds. year (int): Year episode was released. @@ -470,6 +472,8 @@ class Episode(Video, Playable): self.parentKey = data.attrib.get('parentKey') self.parentRatingKey = utils.cast(int, data.attrib.get('parentRatingKey')) self.parentThumb = data.attrib.get('parentThumb') + self.parentTitle = data.attrib.get('parentTitle') + self.title = data.attrib.get('title') self.rating = utils.cast(float, data.attrib.get('rating')) self.viewOffset = utils.cast(int, data.attrib.get('viewOffset', 0)) self.year = utils.cast(int, data.attrib.get('year'))
fixed a bug in the serialisation of Exceptions do not assume that all errors inherit form `MaestralApiError`, e.g., `DropboxDeletedError` inherits from `Exception` list all parent classes in serialisation
@@ -30,11 +30,10 @@ def dropbox_stone_to_dict(obj): def maestral_error_to_dict(err): - assert isinstance(err, MaestralApiError) dictionary = dict( type=err.__class__.__name__, - inherits=MaestralApiError.__name__, + inherits=[str(b) for b in err.__class__.__bases__], cause=err.__cause__, traceback=traceback.format_exception(err.__class__, err, err.__traceback__) )
Changes made in code to open website as well Checks if internet is active, if active the code will prompt whether to open a website (optional). If internet is not active, code will output "No internet connection!" and exit. *Download geckodriver from (Browser to open website) *Install required modules "selenium"
-import urllib.request - +import urllib2 +import os +from selenium import webdriver +from selenium.webdriver.common.keys import Keys +print "Testing Internet Connection" +print try: - urllib.request.urlopen('http://google.com') - print ("working connection") + urllib2.urlopen("http://google.com", timeout=2)#Tests if connection is up and running + print "Internet is working fine!" + print + question = raw_input("Do you want to open a website? (Y/N): ") + if question == 'Y': + print + search = raw_input("Input website to open (http://website.com) : ") + else: + os._exit(0) + +except urllib2.URLError: + print ("No internet connection!")#Output if no connection -except urllib.error.URLError: - print ("No internet connection") +browser = webdriver.Firefox() +browser.get(search) +os.system('cls')#os.system('clear') if Linux +print "[+] Website "+search + " opened!" +browser.close()
Update Changelog.md readme.md typo fix
@@ -5,6 +5,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). ## [Unreleased] +## Fixed +- README.md typo ## [2.4] - 2019-07-31 ### Added - Tversky index (TI)
Update README.md The extra backslash is unnecessary and breaks the script. PDFs and screenshots are not generated.
@@ -18,7 +18,7 @@ Those numbers are from running it single-threaded on my i5 machine with 50mbps d ```bash # On Mac: brew install Caskroom/versions/google-chrome-canary wget python3 -echo -e '#!/bin/bash\n/Applications/Google\ Chrome\ Canary.app/Contents/MacOS/Google\ Chrome\ Canary \"$@"' > /usr/local/bin/google-chrome +echo -e '#!/bin/bash\n/Applications/Google\ Chrome\ Canary.app/Contents/MacOS/Google\ Chrome\ Canary "$@"' > /usr/local/bin/google-chrome chmod +x /usr/local/bin/google-chrome # On Linux: wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
HotFix Helping out Luis to parerallize in a scenario basis. We found that CEA needs a temp file per scenario. This should not change main functionality. It will just make it easier to parerallize
@@ -1011,7 +1011,11 @@ class InputLocator(object): # OTHER def get_temporary_folder(self): """Temporary folder as returned by `tempfile`.""" - return tempfile.gettempdir() + #every scneario should have its temp folder, otherwise we will have problems with paralellization + temp_folder = os.path.join(tempfile.gettempdir(), self.scenario.split("\\")[-1]) + if not os.path.exists(temp_folder): + os.makedirs(temp_folder) + return temp_folder def get_temporary_file(self, filename): """Returns the path to a file in the temporary folder with the name `filename`"""
Fix for iptables.build_rule does not give full rule even when full=True is provided Changed as suggested by Daniel Yes, that should be changed to if full is True
@@ -501,7 +501,7 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None, rule += after_jump - if full in ['True', 'true']: + if full is True: if not table: return 'Error: Table needs to be specified' if not chain:
cleaning up some cems column descriptions Based on Karl's and Greg's suggestions. Working on Issue
{ "name": "plant_id_eia", "type": "integer", - "description": "EIA Plant Identification number. One to five digit numeric.", + "description": "The unique six-digit facility identification number, also called an ORISPL, assigned by the Energy Information Administration.", "format": "default" }, { { "name": "operating_datetime_utc", "type": "datetime", - "description": "Date and time measurement began.", + "description": "Date and time measurement began (UTC).", "format": "default" }, { { "name": "gross_load_mw", "type": "number", - "description": "Power delivered during time interval measured.", + "description": "Average power in megawatts produced during time interval measured.", "format": "default" }, { { "name": "nox_mass_lbs", "type": "number", - "description": "Nitrogen oxide emissions in pounds.", + "description": "NOx emissions in pounds.", "format": "default" }, { { "name": "heat_content_mmbtu", "type": "number", - "description": "The measure of utilization that is calculated by multiplying the quantity of fuel by the fuel's heat content.", + "description": "The energy contained in fuel burned, measured in million BTU.", "format": "default" }, { "name": "facility_id", "type": "integer", - "description": "The unique six-digit facility identification number, also called an ORISPL, assigned by the Energy Information Administration.", + "description": "Plant ID, assigned by the Energy Information Administration.", "format": "default" }, { "name": "unit_id_epa", "type": "integer", - "description": "Unique EPA identifier for each unit at a facility.", + "description": "Unit ID for each unit at a facility, assigned by the Energy Information Administration.", "format": "default" } ],
ensure class state is set on refresh this was broken previously
@@ -239,6 +239,7 @@ function _showContentList(store, options) { const promises = [ _setContentSummary(store, options.contentScopeId, reportPayload), _setContentReport(store, reportPayload), + setClassState(store, options.classId), ]; Promise.all(promises).then( () => { @@ -272,6 +273,7 @@ function _showLearnerList(store, options) { const promises = [ _setContentSummary(store, options.contentScopeId, reportPayload), _setLearnerReport(store, reportPayload), + setClassState(store, options.classId), ]; Promise.all(promises).then( () => {
Update REQUEST-942-APPLICATION-ATTACK-SQLI.conf removed unnecessary +
@@ -1433,7 +1433,7 @@ SecRule REQUEST_COOKIES|!REQUEST_COOKIES:/__utm/|REQUEST_COOKIES_NAMES|ARGS_NAME # to the Regexp::Assemble output: # (?:ASSEMBLE_OUTPUT) # -SecRule REQUEST_COOKIES|!REQUEST_COOKIES:/__utm/|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/* "@rx (?:[\"'`][\s\d]*?[^\w\s]{1,10}\W*?\d\W*?.*?[\"'`\d])" \ +SecRule REQUEST_COOKIES|!REQUEST_COOKIES:/__utm/|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/* "@rx (?:[\"'`][\s\d]*?[^\w\s]\W*?\d\W*?.*?[\"'`\d])" \ "id:942490,\ phase:2,\ block,\
Added instantiation tests for qubit objects, should catch the most obvious mess ups
@@ -8,6 +8,9 @@ import pycqed.instrument_drivers.meta_instrument.qubit_objects.CCL_Transmon as c from pycqed.measurement import measurement_control from qcodes import station +from pycqed.instrument_drivers.meta_instrument.qubit_objects.QuDev_transmon import QuDev_transmon +from pycqed.instrument_drivers.meta_instrument.qubit_objects.Tektronix_driven_transmon import Tektronix_driven_transmon +from pycqed.instrument_drivers.meta_instrument.qubit_objects.CC_transmon import CBox_v3_driven_transmon, QWG_driven_transmon class Test_Qubit_Object(unittest.TestCase): @@ -41,6 +44,23 @@ class Test_Qubit_Object(unittest.TestCase): self.CCL_qubit.instr_cw_source(self.MW2.name) self.CCL_qubit.instr_td_source(self.MW3.name) + def test_instantiate_QuDevTransmon(self): + QDT = QuDev_transmon('QuDev_transmon', + MC=None, heterodyne_instr=None, cw_source=None) + QDT.close() + + def test_instantiate_TekTransmon(self): + TT = Tektronix_driven_transmon('TT') + TT.close() + + def test_instantiate_CBoxv3_transmon(self): + CT = CBox_v3_driven_transmon('CT') + CT.close() + + def test_instantiate_QWG_transmon(self): + QT = QWG_driven_transmon('QT') + QT.close() + def test_prepare_for_timedomain(self): self.CCL_qubit.prepare_for_timedomain()
[modules/redshift] No digits for transition anymore Having 2 digits *after* the comma for transitions seems excessive - truncate value at the digit sign.
@@ -13,13 +13,9 @@ Parameters: * redshift.lon : longitude if location is set to 'manual' """ +import re import threading -import logging -log = logging.getLogger(__name__) -try: import requests -except ImportError: - log.warning('unable to import module "requests": Location via IP disabled') import core.module import core.widget @@ -64,7 +60,8 @@ def get_redshift_value(module): widget.set('state', 'night') else: widget.set('state', 'transition') - widget.set('transition', ' '.join(line.split(' ')[2:])) + match = re.search(r'(\d+)\.\d+% ([a-z]+)', line) + widget.set('transition', '({}% {})'.format(match.group(1), match.group(2))) core.event.trigger('update', [ widget.module().id ], redraw_only=True) class Module(core.module.Module): @@ -73,7 +70,7 @@ class Module(core.module.Module): widget = core.widget.Widget(self.text) super().__init__(config, widget) - self.__thread = threading.Thread(target=get_redshift_value, args=(self,)) + self.__thread = None if self.parameter('location', '') == 'ipinfo': # override lon/lat with ipinfo @@ -97,8 +94,9 @@ class Module(core.module.Module): return val def update(self): - if self.__thread.isAlive(): + if self.__thread is not None and self.__thread.isAlive(): return + self.__thread = threading.Thread(target=get_redshift_value, args=(self,)) self.__thread.start() def state(self, widget):
Updates run_default_protocols and adds write_empty_protocol_data. These two functions should work now.
@@ -787,11 +787,28 @@ class ProtocolDirectory(object): def run_default_protocols(data): - default_protocols = data.input.default_protocols - if len(default_protocols) > 1: - proto = MultiProtocol(default_protocols) - elif len(default_protocols) == 1: - proto = default_protocols[list(default_protocols.keys())[0]] + return DefaultRunner().run(data) + + +def write_empty_protocol_data(inpt, dirname, sparse="auto"): + dirname = _pathlib.Path(dirname) + data_dir = dirname / 'data' + circuits = inpt.all_circuits_needing_data + nQubits = len(inpt.qubit_labels) + if sparse == "auto": + sparse = bool(nQubits > 3) # HARDCODED + + if sparse: + header_str = "# Note: on each line, put comma-separated <outcome:count> items, i.e. 00110:23" + nZeroCols = 0 else: - raise ValueError("No default protocol(s) to run!") - return proto.run(data) + fstr = '{0:0%db} count' % nQubits + nZeroCols = 2**nQubits + header_str = "## Columns = " + ", ".join([fstr.format(i) for i in range(nZeroCols)]) + + pth = data_dir / 'dataset.txt' + if pth.exists(): + raise ValueError("Template data file would clobber %s, which already exists!" % pth) + data_dir.mkdir(parents=True, exist_ok=True) + inpt.write(dirname) + _io.write_empty_dataset(pth, circuits, header_str, nZeroCols)
Possible fix for fixing code smells
@@ -209,7 +209,7 @@ def skip_transcode_movie(files, job, raw_path): # move others into extras folder if file == largest_file_name: # largest movie - utils.move_files(raw_path, file, job, False) + utils.move_files(raw_path, file, job, True) else: # If mainfeature is enabled - skip to the next file if job.config.MAINFEATURE:
Log service ID for invalid inbound SMS This could help identify issues with inbound SMS for a service.
@@ -135,7 +135,7 @@ def create_inbound_sms_object(service, content, from_number, provider_ref, date_ user_number = try_validate_and_format_phone_number( from_number, international=True, - log_msg='Invalid from_number received' + log_msg=f'Invalid from_number received for service "{service.id}"' ) provider_date = date_received
fix regression test for showing buildspec content. We dont raise exception BuildtestError for invalid entry instead we just print message
@@ -318,7 +318,7 @@ def test_buildspec_show(): # run buildtest buildspec <test> show --theme monokai show_buildspecs(test_name, configuration, theme="monokai") - with pytest.raises(BuildTestError): + # testing invalid buildspec name, it should not raise exception random_testname = "".join(random.choices(string.ascii_letters, k=10)) show_buildspecs(test_names=[random_testname], configuration=configuration) @@ -327,14 +327,12 @@ def test_buildspec_show(): def test_buildspec_show_fail(): # Query some random test name that doesn't exist - with pytest.raises(BuildTestError): + random_testname = "".join(random.choices(string.ascii_letters, k=10)) - show_failed_buildspecs( - configuration=configuration, test_names=[random_testname] - ) + show_failed_buildspecs(configuration=configuration, test_names=[random_testname]) # Query a test that is NOT in state=FAIL - with pytest.raises(BuildTestError): + results = Report() pass_test = random.sample(results.get_test_by_state(state="PASS"), 1) show_failed_buildspecs(configuration=configuration, test_names=[pass_test])
word-count: Remove unicode test case As discussed in Fixes
from collections import Counter -# to be backwards compatible with the old Python 2.X -def decode_if_needed(string): - try: - return string.decode('utf-8') - except AttributeError: - return string - - def word_count(text): def replace_nonalpha(char): return char.lower() if char.isalnum() else ' ' - text = ''.join(replace_nonalpha(c) for c in decode_if_needed(text)) + text = ''.join(replace_nonalpha(c) for c in text) return Counter(text.split())
version: Update API_FEATURE_LEVEL. This was missed in the original commit
@@ -33,7 +33,7 @@ DESKTOP_WARNING_VERSION = "5.4.3" # Changes should be accompanied by documentation explaining what the # new level means in templates/zerver/api/changelog.md, as well as # "**Changes**" entries in the endpoint's documentation in `zulip.yaml`. -API_FEATURE_LEVEL = 130 +API_FEATURE_LEVEL = 131 # Bump the minor PROVISION_VERSION to indicate that folks should provision # only when going from an old version of the code to a newer version. Bump
Moved qualys test to skipped due to expired account issues currently fails content build nightly
"integrations": "PostgreSQL", "playbookID": "PostgreSQL Test" }, - { - "integrations": "Qualys", - "playbookID": "Qualys-Test", - "nightly": true - }, { "integrations": { "name": "google", { "integrations": "AlphaSOC Wisdom", "playbookID": "AlphaSOC-Wisdom-Test" + }, + { + "integrations": "Qualys", + "playbookID": "Qualys-Test", + "nightly": true } ] }
Removing mention of "Core Access" program I'm not aware of one existing
@@ -25,7 +25,7 @@ Every month, the Mattermost community plans, builds, tests, documents, releases, 2. When a feature idea does not fit the scope of Team Edition, as [defined in the Mattermost Manifesto](http://www.mattermost.org/manifesto/#mattermost-teams), but benefits Enterprise Edition subscribers, a similar process as above is undertaken by the Enterprise Team. - 1. If change is considered high priority without prerequisites, a Fix Version may be applied to the Jira ticket for the Mattermost Enterprise Team to add for an upcoming monthly release. Priority decisions are influenced by discussion with Enterprise Edition subscribers, with special attention paid to members of the [Core Access](https://about.mattermost.com/core-access/) program. + 1. If change is considered high priority without prerequisites, a Fix Version may be applied to the Jira ticket for the Mattermost Enterprise Team to add for an upcoming monthly release. Priority decisions are influenced by discussion with Enterprise Edition subscribers. 2. If change is not seen as high priority or has prerequisites, it is assigned a Fix Version of `backlog` until this situation changes. 3. If an existing or potential Enterprise Edition subscriber needs a specific change that has not yet been assigned a Fix Version, they can contact the Mattermost Enterprise Team to discuss sponsoring the feature with Non-Recurring Engineering (NRE) funding to have it delivered before it would otherwise be added. Feature sponsorship can only be applied to features where Jira tickets exist.
Add note on trial data deletion window, plus link to submit feedback on additional Cloud data regions * Update cloud-subscriptions.rst * Updating sentence structure for consistency See feedback at
@@ -43,6 +43,13 @@ Monthly Cloud subscriptions renew automatically. Frequently Asked Questions --------------------------- +What happens when my 14-day trial period ends? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +At the end of the 14-day trial, you will lose access to your workspace until you have added your payment information to continue using your Mattermost Cloud workspace. + +If you do not add your payment information within 30 days, we will delete your Cloud workspace permanently and you will lose any associated data. + How am I billed for my Cloud monthly subscription? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -69,7 +76,9 @@ To cancel your subscription, please `contact us <https://customers.mattermost.co When will support for other regions be available? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Mattermost Cloud Enterprise will support data residency based on feedback from our customers. We appreciate feedback from our customers on regional support. +Mattermost Cloud Enterprise will support data residency based on feedback from our customers. + +If you require your data to reside in an area outside of the United States, please contact the product team via `[email protected] <[email protected]>`_, or consider `deploying one of our Self-Hosted options <https://mattermost.com/deploy>`_ that provides full control of your data. You may also work with `one of our European partners <https://mattermost.com/partners>`_ for deploying and hosting your Mattermost server. How is Mattermost Cloud secured? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Use logging instead of traceback Instead of just outputting to stderr the rtm connect failure it is more appropriate to use the python logging system and output a appropriate message (and pass in exc_info=True so that the traceback gets logged as well).
# mostly a proxy object to abstract how some of this works import json -import traceback +import logging from .server import Server from .exceptions import ParseResponseError +LOG = logging.getLogger(__name__) + class SlackClient(object): ''' @@ -52,7 +54,7 @@ class SlackClient(object): self.server.rtm_connect(use_rtm_start=with_team_state, **kwargs) return self.server.connected except Exception: - traceback.print_exc() + LOG.warn("Failed RTM connect", exc_info=True) return False def api_call(self, method, timeout=None, **kwargs):
Update test_walllet_commands Add test for CreateAddress
@@ -4,7 +4,7 @@ from neo.Implementations.Wallets.peewee.UserWallet import UserWallet from neo.Core.Blockchain import Blockchain from neocore.UInt160 import UInt160 from neocore.Fixed8 import Fixed8 -from neo.Prompt.Commands.Wallet import DeleteAddress, ImportToken, ImportWatchAddr, ShowUnspentCoins, SplitUnspentCoin +from neo.Prompt.Commands.Wallet import CreateAddress, DeleteAddress, ImportToken, ImportWatchAddr, ShowUnspentCoins, SplitUnspentCoin import shutil @@ -126,3 +126,23 @@ class UserWalletTestCase(WalletFixtureTestCase): wallet = self.GetWallet1(True) tx = SplitUnspentCoin(wallet, ['APRgMZHZubii29UXF9uFa6sohrsYupNAvx', 'gas', 0, 3], prompt_passwd=False) self.assertIsNotNone(tx) + + def test_7_create_address(self): + + wallet = self.GetWallet1(True) + + #not specifying a number of addresses + CreateAddress(None, wallet, None) + self.assertEqual(len(wallet.Addresses), 1) + + #trying to create too many addresses + CreateAddress(None, wallet, 5) + self.assertEqual(len(wallet.Addresses), 1) + + #should pass + success = CreateAddress(None, wallet, 1) + self.assertTrue(success) + + #check the number of addresses + self.assertEqual(len(wallet.Addresses), 2) +
feat(ldap): Validate additional required fields. If the user selects 'Custom' LDAP Directory, when they hit save, validate the additional required fields ('ldap_group_objectclass' and 'ldap_group_member_attribute') for this selection to function. Issue
@@ -44,6 +44,11 @@ class LDAPSettings(Document): frappe.throw(_("Ensure the user and group search paths are correct."), title=_("Misconfigured")) + if self.ldap_directory_server.lower() == 'custom': + if not self.ldap_group_member_attribute or not self.ldap_group_mappings_section: + frappe.throw(_("Custom LDAP Directoy Selected, please ensure 'LDAP Group Member attribute' and 'LDAP Group Mappings' are entered"), + title=_("Misconfigured")) + else: frappe.throw(_("LDAP Search String must be enclosed in '()' and needs to contian the user placeholder {0}, eg sAMAccountName={0}"))
Fix error when no standards are found If the year of building does not fit in the date range of any standards, just return the first standard.
@@ -165,11 +165,19 @@ def zone_helper(locator, config): calculate_typology_file(locator, zone_df, year_construction, occupancy_type, typology_output_path) -def calc_category(standard_DB, year_array): +def calc_category(standard_db, year_array): def category_assignment(year): - return (standard_DB[(standard_DB['YEAR_START'] <= year) & (standard_DB['YEAR_END'] >= year)].STANDARD.values[0]) + within_year = (standard_db['YEAR_START'] <= year) & (standard_db['YEAR_END'] >= year) + standards = standard_db.STANDARD.values - category = np.vectorize(category_assignment)(year_array) + # Filter standards if found + if within_year.any(): + standards = standards[within_year] + + # Just return first value + return standards[0] + + category = np.array([category_assignment(y) for y in year_array]) return category
Update README.rst Improved explanation of optional components. Hopefully avoids further user misunderstandings such as
@@ -45,6 +45,11 @@ You can do a minimal installation of ``MushroomRL`` with: Installing everything --------------------- +``MushroomRL`` contains also some optional components e.g., support for ``OpenAI Gym`` +environments, Atari 2600 games from the ``Arcade Learning Environment``, and the support +for physics simulators such as ``Pybullet`` and ``MuJoCo``. +Support for these classes is not enabled by default. + To install the whole set of features, you will need additional packages installed. You can install everything by running:
Correct intro sentence re number of nodes The Docker and Kubernetes procedures create five nodes, but the Ubuntu procedure only creates two nodes.
@@ -23,8 +23,9 @@ environment on one of the following platforms: .. note:: - The guides in this chapter set up an environment with five Sawtooth validator - nodes. For a single-node environment, see :doc:`installing_sawtooth`. + The guides in this chapter set up an environment with multiple Sawtooth + validator nodes. For a single-node environment, see + :doc:`installing_sawtooth`. To get started, choose the guide for the platform of your choice:
Change special webcast filtering logic Check if event is ongoing
@@ -225,7 +225,7 @@ class MainCompetitionseasonHandler(CacheableHandler): for special_webcast in FirebasePusher.get_special_webcasts(): add = True for event in week_events: - if event.webcast: + if event.now and event.webcast: for event_webcast in event.webcast: if (special_webcast.get('type', '') == event_webcast.get('type', '') and special_webcast.get('channel', '') == event_webcast.get('channel', '') and
Update gridding_functions.py Changes interpolate function docstring to offer more specific explanation of 'hres' parameter
@@ -150,7 +150,8 @@ def interpolate(x, y, z, interp_type='linear', hres=50000, 2) "natural_neighbor", "barnes", or "cressman" from Metpy.mapping . Default "linear". hres: float - The horizontal resolution of the generated grid. Default 50000 meters. + The horizontal resolution of the generated grid, given in the same units as the + x and y parameters. Default 50000 meters. minimum_neighbors: int Minimum number of neighbors needed to perform barnes or cressman interpolation for a point. Default is 3.
Update Dockerfile SImplified Dockerfile to just include an xcube environment and CLI
# Image from https://hub.docker.com (syntax: repo/image:version) FROM continuumio/miniconda3:latest -# Person responsible -MAINTAINER [email protected] - +# Metadata +LABEL maintainer="[email protected]" LABEL name=xcube -LABEL version=0.7.1 +LABEL version=0.8.0.dev7 LABEL conda_env=xcube -# Ensure usage of bash (simplifies source activate calls) +# Ensure usage of bash (ensures conda calls succeed) SHELL ["/bin/bash", "-c"] # Update system and install dependencies RUN apt-get -y update && apt-get -y upgrade +# Allow editing files in container RUN apt-get -y install vim -# && apt-get -y install git build-essential libyaml-cpp-dev - # Install mamba as a much faster conda replacement. We specify an # explicit version number because (1) it makes installation of mamba # much faster and (2) mamba is still in beta, so it's best to stick # to a known-good version. -RUN conda install mamba=0.1.2 -c conda-forge +RUN conda install -c conda-forge mamba=0.7.14 # Setup conda environment # Copy yml config into image -ADD environment.yml /tmp/environment.yml +COPY environment.yml /tmp/environment.yml # Use mamba to create an environment based on the specifications in -# environment.yml. At present, evironments created by mamba can't be -# referenced by name from conda (presumably a bug), so we use --preix -# to specify an explicit path instead. +# environment.yml. RUN mamba env create --file /tmp/environment.yml -# Set work directory for xcube_server installation +# Set work directory for xcube installation RUN mkdir /xcube WORKDIR /xcube -# Copy local github repo into image (will be replaced by either git clone or as a conda dep) -RUN git clone https://github.com/dcs4cop/xcube-cds.git -RUN git clone https://github.com/dcs4cop/xcube-sh.git -RUN git clone https://github.com/dcs4cop/xcube-cci.git - -RUN echo 'HH2' - -RUN source activate xcube && cd xcube-sh && python setup.py develop && sed "s/- xcube/# - xcube/g" -i environment.yml && mamba env update -n xcube -RUN source activate xcube && cd xcube-cds && python setup.py develop && sed "s/- xcube/# - xcube/g" -i environment.yml && mamba env update -n xcube -RUN source activate xcube && cd xcube-cci && python setup.py develop && sed "s/- xcube/# - xcube/g" -i environment.yml && mamba env update -n xcube - -ADD . /xcube - -# Setup xcube_server package, specifying the environment by path rather -# than by name (see above). -RUN source activate xcube && python setup.py develop - - -ADD --chown=1000:1000 store_config.json store_config.json -ADD .cdsapirc /root/.cdsapirc +# Copy sources into xcube +COPY . /xcube -# Test xcube package -# ENV NUMBA_DISABLE_JIT 1 -# RUN source activate xcube && pytest +# Setup xcube package. +RUN source activate xcube && python setup.py install # Export web server port 8000 EXPOSE 8000 -# Start server +# Start bash, so we can invoke xcube CLI. ENTRYPOINT ["/bin/bash", "-c"] +# By default, activate xcube environment and print usage help. CMD ["source activate xcube && xcube --help"]
Inline enter/exit_call recursive guards GL issue libadalang#918
@@ -112,7 +112,8 @@ private package ${ada_lib_name}.Implementation is -- recursive calls. procedure Enter_Call - (Context : Internal_Context; Call_Depth : access Natural); + (Context : Internal_Context; Call_Depth : access Natural) + with Inline_Always; -- Increment the call depth in Context. If the depth exceeds Context's -- maximum, raise a Property_Error for "stack overflow". -- @@ -122,7 +123,8 @@ private package ${ada_lib_name}.Implementation is -- -- Put in Call_Depth the incremented call depth. - procedure Exit_Call (Context : Internal_Context; Call_Depth : Natural); + procedure Exit_Call (Context : Internal_Context; Call_Depth : Natural) + with Inline_Always; -- Decrement the call depth in Context. If Call_Depth does not match the -- current call depth, raise an Unexpected_Call_Depth.
Improve LocalRunner.initialize_tf_vars() Thanks to Fixes
@@ -132,11 +132,14 @@ class LocalRunner: def initialize_tf_vars(self): """Initialize all uninitialized variables in session.""" with tf.name_scope("initialize_tf_vars"): + uninited_set = [ + e.decode() + for e in self.sess.run(tf.report_uninitialized_variables()) + ] self.sess.run( tf.variables_initializer([ v for v in tf.global_variables() - if v.name.split(':')[0] in str( - self.sess.run(tf.report_uninitialized_variables())) + if v.name.split(':')[0] in uninited_set ])) def start_worker(self):
Remove broken test introduced here My guess is this broke when the year or month changed. Even checking out the original version now this test fails
@@ -62,13 +62,14 @@ describe('Kpi Directive', function () { assert.equal(result, expected); }); - it('tests shows percent info from month parameter', function () { - $location.search('month', new Date().getMonth()); - var expected = true; - - var result = controller.showPercentInfo(); - assert.equal(result, expected); - }); + // FIXME + // it('tests shows percent info from month parameter', function () { + // $location.search('month', new Date().getMonth()); + // var expected = true; + + // var result = controller.showPercentInfo(); + // assert.equal(result, expected); + // }); it('tests not shows percent info from wrong month parameter', function () { $location.search('month', new Date().getMonth() + 1);
Slack sanitization Multi links will be converted to original content
@@ -21,6 +21,7 @@ Added - Add command line argument ``rasa x --config CONFIG``, to specify path to the policy and NLU pipeline configuration of your bot (default: ``config.yml``) + Changed ------- - Do not retrain the entire Core model if only the ``templates`` section of the domain is changed. @@ -37,6 +38,7 @@ Fixed - Fixed rasa init showing traceback error when user does Keyboard Interrupt before choosing a project path - ``CountVectorsFeaturizer`` featurizes intents only if its analyzer is set to ``word`` - fixed bug where facebooks generic template was not rendered when buttons were ``None`` +- Fixed issue in converting multi links in incoming message as part of Slack sanitization [1.4.5] - 2019-11-14 ^^^^^^^^^^^^^^^^^^^^
Upload validations to swift on undercloud install Implements: blueprint store-validations-in-swift Depends-On:
@@ -133,5 +133,8 @@ if [ "$(hiera mistral_api_enabled)" = "true" ]; then if [ "$(hiera enable_validations)" = "true" ]; then echo Execute copy_ssh_key validations openstack workflow execution create tripleo.validations.v1.copy_ssh_key + + echo Upload validations to Swift + openstack action execution run tripleo.validations.upload fi fi
Fix get_force_authn return value to be compatible with older pysaml2 older pysaml2 cannot parse False as a value for force_authn
@@ -58,7 +58,7 @@ def get_force_authn(context, config, sp_config): - the cookie, as it has been stored by the proxy on a redirect to the DS note: the frontend should have been set to mirror the force_authn value. - The value is either "true" or False + The value is either "true" or None """ mirror = config.get(SAMLBackend.KEY_MIRROR_FORCE_AUTHN) from_state = mirror and context.state.get(Context.KEY_FORCE_AUTHN) @@ -67,7 +67,7 @@ def get_force_authn(context, config, sp_config): ) from_config = sp_config.getattr("force_authn", "sp") is_set = str(from_state or from_context or from_config).lower() == "true" - value = is_set and "true" + value = "true" if is_set else None return value
input kpoints for bands mode can be a list of fractional coords or a list of Kpoint objects
@@ -122,9 +122,9 @@ class BoltztrapRunner(object): shape. This is useful to correct the often underestimated band gap in DFT. Default is 0.0 (no scissor) kpt_line: - list/array of kpoints in fractional coordinates for BANDS mode - calculation (standard path of high symmetry k-points is - automatically set as default) + list/array of kpoints in fractional coordinates or list of + Kpoint objects for BANDS mode calculation (standard path of + high symmetry k-points is automatically set as default) tmax: Maximum temperature (K) for calculation (default=1300) tgrid: @@ -457,9 +457,8 @@ class BoltztrapRunner(object): in kpath.get_kpoints(coords_are_cartesian=False)[ 0]] - self.kpt_line = np.array( - [kp.frac_coords for kp in self.kpt_line]) - else: + self.kpt_line = [kp.frac_coords for kp in self.kpt_line] + elif type(self.kpt_line[0]) == Kpoint: self.kpt_line = [kp.frac_coords for kp in self.kpt_line] with open(output_file, 'w') as fout: