message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Removing extra iter around items
Removing extra iter around items | @@ -2562,7 +2562,7 @@ def _hw_data(osdata):
'productname': 'hw.product',
'serialnumber': 'hw.serialno',
'uuid': 'hw.uuid'}
- for key, oid in iter(hwdata.items()):
+ for key, oid in hwdata.items():
value = __salt__['cmd.run']('{0} -n {1}'.format(sysctl, oid))
if not value.endswith(' value is not available'):
grains[key] = _clean_value(key, value)
|
[IMPR] Fix html2unicode
Fix ignoring of convertIllegalHtmlEntities. Previously, ignoring any
entity inside range(128, 160) would work.
Do not attempt to resolve protected entites (note that this didn't
work when '&' was protected different way).
Add a lot of tests. | @@ -5535,7 +5535,7 @@ def html2unicode(text, ignore=None):
# This regular expression will match any decimal and hexadecimal entity and
# also entities that might be named entities.
entityR = re.compile(
- r'&(?:amp;)?(#(?P<decimal>\d+)|#x(?P<hex>[0-9a-fA-F]+)|(?P<name>[A-Za-z]+));')
+ r'&(#(?P<decimal>\d+)|#x(?P<hex>[0-9a-fA-F]+)|(?P<name>[A-Za-z]+));')
# These characters are Html-illegal, but sadly you *can* find some of
# these and converting them to chr(decimal) is unsuitable
convertIllegalHtmlEntities = {
@@ -5569,7 +5569,8 @@ def html2unicode(text, ignore=None):
}
# ensuring that illegal   and , which have no known values,
# don't get converted to chr(129), chr(141) or chr(157)
- ignore = set(ignore) | set([129, 141, 157])
+ ignore = (set(map(lambda x: convertIllegalHtmlEntities.get(x, x), ignore)) |
+ set([129, 141, 157]))
def handle_entity(match):
if match.group('decimal'):
@@ -5583,10 +5584,10 @@ def html2unicode(text, ignore=None):
unicodeCodepoint = htmlentitydefs.name2codepoint[name]
else:
unicodeCodepoint = False
- try:
+
+ if unicodeCodepoint in convertIllegalHtmlEntities:
unicodeCodepoint = convertIllegalHtmlEntities[unicodeCodepoint]
- except KeyError:
- pass
+
if unicodeCodepoint and unicodeCodepoint not in ignore:
if unicodeCodepoint > sys.maxunicode:
# solve narrow Python 2 build exception (UTF-16)
|
Update OledSsd1306.py
Updated to be able to test on virtualArduinio | +# config
+port = "COM3"
+# Code to be able to use this script with virtalArduino
+if ('virtual' in globals() and virtual):
+ virtualArduino = Runtime.start("virtualArduino", "VirtualArduino")
+ virtualArduino.connect(port)
+#
# Initiate the Arduino
-arduino = Runtime.createAndStart("Arduino","Arduino")
-arduino.connect("COM3")
+arduino = Runtime.start("Arduino","Arduino")
+arduino.connect(port)
# Alternativley you can use the RasPi service to connect the OLED to the GPIO pins
# raspi = Runtime.createAndStart("RasPi","RasPi")
# Select the Arduino as controller for the OLED on bus 1 and i2c address 0x3C
-oled = Runtime.createAndStart("OLED","OledSsd1306")
+oled = Runtime.start("OLED","OledSsd1306")
# From version 1.0.2316 use attach instead of setController
# oled.setController(arduino,"1","0x3C")
oled.attach(arduino,"1","0x3C")
|
abc.GuildChannel.set_permissions can raise NotFound.
Fix | @@ -549,6 +549,8 @@ class GuildChannel:
You do not have permissions to edit channel specific permissions.
HTTPException
Editing channel specific permissions failed.
+ NotFound
+ The role or member being edited is not part of the guild.
InvalidArgument
The overwrite parameter invalid or the target type was not
:class:`Role` or :class:`Member`.
|
confirm_dialog: Fix loading spinner in confirm button in night mode.
We change the color of loading indicator to black only if we are not
using night mode, and in night mode indicator remains white. | @@ -5,6 +5,7 @@ import render_confirm_dialog_heading from "../templates/confirm_dialog_heading.h
import * as blueslip from "./blueslip";
import * as overlays from "./overlays";
+import * as settings_data from "./settings_data";
/*
Look for confirm_dialog in settings_user_groups
@@ -45,12 +46,14 @@ export function show_confirm_dialog_spinner() {
$(".confirm_dialog_submit_button .loader").css("display", "inline-block");
$(".confirm_dialog_submit_button span").hide();
$(".confirm_dialog_submit_button").prop("disabled", true);
+ if (!settings_data.using_dark_theme()) {
$(".confirm_dialog_submit_button object").on("load", function () {
const doc = this.getSVGDocument();
const $svg = $(doc).find("svg");
$svg.find("rect").css("fill", "#000");
});
}
+}
export function launch(conf) {
const html = render_confirm_dialog({fade: conf.fade});
|
Fix not exist MO in Report Pending Links
HG--
branch : feature/microservices | @@ -88,7 +88,10 @@ class ReportPendingLinks(object):
"remote_id": discovery["problems"]["lldp"][iface]}}
if "Pending link:" in discovery["problems"]["lldp"][iface]:
pend_str = rg.match(discovery["problems"]["lldp"][iface])
+ try:
rmo = ManagedObject.objects.get(name=pend_str.group("remote_mo"))
+ except ManagedObject.DoesNotExist:
+ continue
# mo = mos_id.get(mo_id, ManagedObject.get_by_id(mo_id))
problems[mo_id][iface] = {
"problem": "Not found iface on remote",
|
Update README.md
comma | @@ -26,7 +26,7 @@ MPF is written in Python 3. It is compatible with Windows, Mac, and Linux using
MPF is MIT-licensed, developed by fun people, and supported by a vibrant pinball-loving community. It is a work in progress we are actively developing. We review commits weekly.
-See also the [MPF Media Controller](https://github.com/missionpinball/mpf-mc/) (based on [Kivy](http://kivy.org))
+See also the [MPF Media Controller](https://github.com/missionpinball/mpf-mc/) (based on [Kivy](http://kivy.org)),
which is used to control graphics and sounds, including high-res LCD displays, classic DMDs, and modern RGB LED DMDs.
Visit the MPF project homepage is here : http://missionpinball.org
|
windows.cmake fixes:
on UWP, always set /INCREMENTAL:NO as the cmake default
is YES, which clashes with other UWP linker options
make stack size configurable via cmake option | # fips cmake settings file for Windows platform with MSVC.
#-------------------------------------------------------------------------------
+set(FIPS_WINDOWS_STACK_SIZE 4194304 CACHE STRING "Windows process stack size (/STACK:xxx)")
+
# detect 32-bit or 64-bit target platform
if (CMAKE_CL_64)
set(FIPS_PLATFORM WIN64)
@@ -65,7 +67,11 @@ set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${FIPS_VS_CRT_FLAGS}d")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${FIPS_VS_CRT_FLAGS}")
# define exe linker/librarian flags
-set(CMAKE_EXE_LINKER_FLAGS "/STACK:5000000 /machine:${FIPS_WINDOWS_PLATFORM_NAME}")
+set(CMAKE_EXE_LINKER_FLAGS "/STACK:${FIPS_WINDOWS_STACK_SIZE} /machine:${FIPS_WINDOWS_PLATFORM_NAME}")
+# cmake sets INCREMENTAL:YES, but UWP doesn't like that
+if (FIPS_UWP)
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /INCREMENTAL:NO")
+endif()
set(CMAKE_EXE_LINKER_FLAGS_DEBUG "/DEBUG")
# librarian flags
|
m1n1.constructutils: Add a global struct address tracer
Currently unconditional, kind of hacky. Good for HV use. | @@ -304,6 +304,7 @@ class ConstructClassBase(Reloadable, metaclass=ReloadableConstructMeta):
self._apply(obj)
+ g_struct_trace.add((self._addr, f"{cls.name} (end: {self._addr + size:#x})"))
return self
@classmethod
@@ -398,6 +399,23 @@ class ConstructClass(ConstructClassBase, Container):
if addr is not None:
setattr(obj, addr_field, addr)
+ @classmethod
+ def _parse(cls, stream, context, path):
+ self = ConstructClassBase._parse.__func__(cls, stream, context, path)
+
+ for key in self:
+ if key.startswith('_'):
+ continue
+ try:
+ val = int(self[key])
+ except:
+ continue
+ if (0x1000000000 <= val <= 0x1f00000000 or
+ 0xf8000000000 <= val <= 0xff000000000 or
+ 0xffffff8000000000 <= val <= 0xfffffff000000000):
+ g_struct_trace.add((val, f"{cls.name}.{key}"))
+ return self
+
def _apply(self, obj):
self.update(obj)
@@ -420,4 +438,8 @@ class ConstructValueClass(ConstructClassBase):
def _apply(self, obj):
self.value = obj
-__all__ = ["ConstructClass", "ConstructValueClass", "Dec"]
+def show_struct_trace(log=print):
+ for addr, desc in sorted(list(g_struct_trace)):
+ log(f"{addr:>#18x}: {desc}")
+
+__all__ = ["ConstructClass", "ConstructValueClass", "Dec", "ROPointer", "show_struct_trace"]
|
Suppressed error in filelock stubs
Added a # type: ignore comment to the `timeout` property setter in filelock to suppress errors about type mismatch between setter and getter. | @@ -23,7 +23,7 @@ class BaseFileLock:
@property
def timeout(self) -> float: ...
@timeout.setter
- def timeout(self, value: Union[int, str, float]) -> None: ...
+ def timeout(self, value: Union[int, str, float]) -> None: ... # type: ignore
@property
def is_locked(self) -> bool: ...
def acquire(self, timeout: Optional[float] = ..., poll_intervall: float = ...) -> _Acquire_ReturnProxy: ...
|
Accept InputFile/InputFileBig on .upload_file for
Now an input file thumbnail can also be specified, instead
needing to reupload the file every time. | @@ -624,6 +624,9 @@ class TelegramBareClient:
part_size_kb = get_appropriated_part_size(file_size)
file_name = os.path.basename(file_path)
"""
+ if isinstance(file, (InputFile, InputFileBig)):
+ return file # Already uploaded
+
if isinstance(file, str):
file_size = os.path.getsize(file)
elif isinstance(file, bytes):
|
Update docs/modeling/example-fitting-model-sets.rst
Update docs/modeling/example-fitting-model-sets.rst | @@ -8,7 +8,7 @@ But getting the data into the right shape can be a bit tricky.
The time savings could be worth the effort. In the example below, if we change
the width*height of the data cube to 500*500 it takes 140 ms on a 2015 MacBook Pro
to fit the models using model sets. Doing the same fit by looping over the 500*500 models
-takes 1.5 minutes, more 600 times slower..
+takes 1.5 minutes, more than 600 times slower.
In the example below, we create a 3D data cube where the first dimension is a ramp --
for example as from non-destructive readouts of an IR detector. So each pixel has a
@@ -16,7 +16,9 @@ depth along a time axis, and flux that results a total number of counts that is
increasing with time. We will be fitting a 1D polynomial vs. time to estimate the
flux in counts/second (the slope of the fit).
-First, import the necessary libraries::
+First, import the necessary libraries:
+
+.. doctest-requires:: matplotlib
>>> import numpy as np
>>> import matplotlib.pyplot as plt
@@ -47,7 +49,7 @@ parametric model (model sets currently only work with linear models and fitters)
We need to get the data to be fit into the right shape. It's not possible to just feed
the 3D data cube. In this case, the time axis can be one dimensional.
-The fluxes have to be organized into an array that is of shape `width*height,depth` -- in
+The fluxes have to be organized into an array that is of shape ``width*height,depth`` -- in
other words, we are reshaping to flatten last two axes and transposing to put them first::
>>> pixels = image.reshape((depth, width*height))
@@ -57,7 +59,7 @@ other words, we are reshaping to flatten last two axes and transposing to put th
x axis is one dimensional: (10,)
y axis is two dimensional, N by len(x): (12, 10)
-Fit the model. It does the looping over the N models implicitly::
+Fit the model. It fits the N models simultaneously::
>>> new_model = fit(line, x=t, y=y)
>>> print("We fit %d models" % len(new_model))
@@ -102,7 +104,9 @@ Now inspect the model::
[ 0. 1.01197745 2.04089388 3.10007038 3.8945121 4.98888927
6.0580331 7.07146685 7.66714363 9.11671214 10.12808644 11.43471289]]
-Plot the fit along a couple of pixels::
+Plot the fit along a couple of pixels:
+
+.. doctest-requires:: matplotlib
>>> def plotramp(t, image, best_fit, row, col):
>>> plt.plot(t, image[:, row, col], '.', label='data pixel %d,%d' % (row, col))
|
Fix for session shenanigans with WebsocketDemultiplexer
* Fix for session shenanigans with WebsocketDemultiplexer
Session data was getting lost in the demux due to the session getting
saved after only the first connect/disconnect consumer was run.
* fix for flake8
* flake8 again
flake8 again | @@ -42,7 +42,13 @@ def channel_session(func):
def inner(message, *args, **kwargs):
# Make sure there's NOT a channel_session already
if hasattr(message, "channel_session"):
+ try:
return func(message, *args, **kwargs)
+ finally:
+ # Persist session if needed
+ if message.channel_session.modified:
+ message.channel_session.save()
+
# Make sure there's a reply_channel
if not message.reply_channel:
raise ValueError(
@@ -155,7 +161,13 @@ def http_session(func):
def inner(message, *args, **kwargs):
# Make sure there's NOT a http_session already
if hasattr(message, "http_session"):
+ try:
return func(message, *args, **kwargs)
+ finally:
+ # Persist session if needed (won't be saved if error happens)
+ if message.http_session is not None and message.http_session.modified:
+ message.http_session.save()
+
try:
# We want to parse the WebSocket (or similar HTTP-lite) message
# to get cookies and GET, but we need to add in a few things that
|
Fix policy for DNS section
All parameters under DNS section have update policy UNSUPPORTED, hence also the parent section should be UNSUPPORTED. | @@ -1197,7 +1197,7 @@ class SlurmSettingsSchema(BaseSchema):
"""Represent the schema of the Scheduling Settings."""
scaledown_idletime = fields.Int(metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP})
- dns = fields.Nested(DnsSchema, metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP})
+ dns = fields.Nested(DnsSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
queue_update_strategy = fields.Str(
validate=validate.OneOf([strategy.value for strategy in QueueUpdateStrategy]),
metadata={"update_policy": UpdatePolicy.IGNORED},
|
Update install.rst
Correcting typo Postge -> Postgre | @@ -9,7 +9,7 @@ The Data Cube is a set of python code with dependencies including:
* Python 3.5+ (3.6 recommended)
* GDAL
-* PostgeSQL database
+* PostgreSQL database
These dependencies along with the target operating system environment should be considered when deciding how to install Data Cube to meet your system requirements.
|
setup.py: fix installation
fix all remaining issues of | @@ -46,7 +46,7 @@ recursively_include(package_data, 'pythonforandroid/recipes',
recursively_include(package_data, 'pythonforandroid/bootstraps',
['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',
'*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',
- '*.gradle', ])
+ '*.gradle', '.gitkeep', 'gradlew*', '*.jar', ])
recursively_include(package_data, 'pythonforandroid/bootstraps',
['sdl-config', ])
recursively_include(package_data, 'pythonforandroid/bootstraps/webview',
|
[travis] add all dependencies
to make all unit tests run, and none skipped (hopefully), add a list of
dependencies | @@ -10,6 +10,15 @@ before_install:
install:
- pip install -U coverage==4.3 pytest pytest-mock
- pip install codeclimate-test-reporter
+ - pip install i3-py Pillow Babel DateTime python-dateutil
+ - pip install dbus-python docker feedparser i3ipc
+ - pip install libvirt-python math-utils
+ - pip install multiprocessing netifaces power
+ - pip install psutil pygit2 pytz random
+ - pip install requests simplejson
+ - pip install socket speedtest suntime
+ - pip install tempfile tkinter tzlocal
+ - pip install taskw webbrowser xkbgroup yubico
script:
- coverage run --source=. -m pytest tests -v
- CODECLIMATE_REPO_TOKEN=40cb00907f7a10e04868e856570bb997ab9c42fd3b63d980f2b2269433195fdf codeclimate-test-reporter
|
Document single-read rescue
See | @@ -185,7 +185,7 @@ Paired-end read name check
When reading paired-end files, Cutadapt checks whether the read names match.
Only the part of the read name before the first space is considered. If the
-read name ends with ``/1`` or ``/2``, then that is also ignored. For example,
+read name ends with ``1`` or ``2``, then that is also ignored. For example,
two FASTQ headers that would be considered to denote properly paired reads are::
@my_read/1 a comment
@@ -202,10 +202,35 @@ and::
@my_read/2;1
-Since the ``/1`` and ``/2`` are ignored only if the occur at the end of the read
+Since the ``1`` and ``2`` are ignored only if the occur at the end of the read
name, and since the ``;1`` is considered to be part of the read name, these
reads will not be considered to be propely paired.
+
+Rescuing single reads from paired-end reads that were filtered
+--------------------------------------------------------------
+
+When trimming and filtering paired-end reads, Cutadapt always discards entire read pairs. If you
+want to keep one of the reads, you need to write the filtered read pairs to an output file and
+postprocess it.
+
+For example, assume you are using ``-m 30`` to discard too short reads. Cutadapt discards all
+read pairs in which just one of the reads is too short (but see the ``--pair-filter`` option).
+To recover those (individual) reads that are long enough, you can first use the
+``--too-short-(paired)-output`` options to write the filtered pairs to a file, and then postprocess
+those files to keep only the long enough reads.
+
+
+ cutadapt -m 30 -q 20 -o out.1.fastq.gz -p out.2.fastq.gz --too-short-output=tooshort.1.fastq.gz --too-short-paired-output=tooshort.2.fastq.gz in.1.fastq.gz in.2.fastq.gz
+ cutadapt -m 30 -o rescued.a.fastq.gz tooshort.1.fastq.gz
+ cutadapt -m 30 -o rescued.b.fastq.gz tooshort.2.fastq.gz
+
+The two output files ``rescued.a.fastq.gz`` and ``rescued.b.fastq.gz`` contain those individual
+reads that are long enough. Note that the file names do not end in ``.1.fastq.gz`` and
+``.2.fastq.gz`` to make it very clear that these files no longer contain synchronized paired-end
+reads.
+
+
Other things (unfinished)
-------------------------
|
Remove instructions to push to ross repository
Core developers should push to their own fork as well. | @@ -132,18 +132,4 @@ The following blog posts have some good information on how to write commit messa
Step 5: Push changes to the main repo
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-For contributors
-++++++++++++++++
To create a Pull Request (PR), refer to `the github PR guide <https://help.github.com/articles/about-pull-requests/>`_.
-
-For core developers
-+++++++++++++++++++
-If there are only a few, unrelated commits:
-
-::
-
- git fetch upstream
- git rebase upstream/master
- git push upstream my-feature-branch:master
-
|
Change websocket ping thread closing to be more responsive
Use threading.Event to close the thread more easily | @@ -123,7 +123,7 @@ class BitSharesWebsocket(Events):
self.user = user
self.password = password
self.keep_alive = keep_alive
- self.running = True
+ self.run_event = threading.Event()
if isinstance(urls, cycle):
self.urls = urls
elif isinstance(urls, list):
@@ -197,19 +197,17 @@ class BitSharesWebsocket(Events):
self.__events__.index('on_market'),
market[0], market[1])
- # We keep the connetion alive by requesting a short object
- def ping(self):
- while self.running:
- log.debug('Sending ping')
- self.get_objects(["2.8.0"])
- time.sleep(self.keep_alive)
-
self.keepalive = threading.Thread(
- target=ping,
- args=(self,)
+ target=self._ping
)
self.keepalive.start()
+ def _ping(self):
+ # We keep the connection alive by requesting a short object
+ while not self.run_event.wait(self.keep_alive):
+ log.debug('Sending ping')
+ self.get_objects(["2.8.0"])
+
def process_notice(self, notice):
""" This method is called on notices that need processing. Here,
we call ``on_object`` and ``on_account`` slots.
@@ -282,9 +280,6 @@ class BitSharesWebsocket(Events):
""" Called when websocket connection is closed
"""
log.debug('Closing WebSocket connection with {}'.format(self.url))
- if self.keepalive and self.keepalive.is_alive():
- self.keepalive.do_run = False
- self.keepalive.join()
def run_forever(self):
""" This method is used to run the websocket app continuously.
@@ -292,7 +287,7 @@ class BitSharesWebsocket(Events):
connected with the provided APIs
"""
cnt = 0
- while self.running:
+ while not self.run_event.is_set():
cnt += 1
self.url = next(self.urls)
log.debug("Trying to connect to node %s" % self.url)
@@ -328,10 +323,11 @@ class BitSharesWebsocket(Events):
def close(self):
""" Closes the websocket connection and waits for the ping thread to close
- Can only be called after run_forever() is called
"""
- self.running = False
+ self.run_event.set()
self.ws.close()
+
+ if self.keepalive and self.keepalive.is_alive():
self.keepalive.join()
def get_request_id(self):
|
llvm, state: Export params and context structures and initializers
Forward both form the underlying function. States don't add anything
extra. | @@ -2023,6 +2023,18 @@ class State_Base(State):
def _assign_default_state_name(self, context=None):
return False
+ def get_param_struct_type(self):
+ return self.function_object.get_param_struct_type()
+
+ def get_param_initializer(self):
+ return self.function_object.get_param_initializer()
+
+ def get_context_struct_type(self):
+ return self.function_object.get_context_struct_type()
+
+ def get_context_initializer(self):
+ return self.function_object.get_context_initializer()
+
@staticmethod
def _get_state_function_value(owner, function, variable):
"""Execute the function of a State and return its value
|
Update basic-skill.md
* Update basic-skill.md
Small wording changes
* Update docs/tutorials/basic-skill.md | # Creating a Basic Skill
-We will create a basic skill that makes opsdroid answer the text "how are you". This skill will be very similar to the [hello skill](../extending/skills.md#hello-world) found in the documentation.
-The video tutorial for creating your own skill is also available [here](https://www.youtube.com/watch?v=gk7JN4e5l_4&index=3&list=PLViQCHlMbEq5nZL6VNrUxu--Of1uCpflq)
+
+We will create a basic skill that makes opsdroid respond to the text "how are you". This skill will be very similar to the [hello skill](docs/extending/skills/#hello-world) found in the documentation.
+The video tutorial for creating your own skill is also available [here](https://www.youtube.com/watch?v=gk7JN4e5l_4&index=3&list=PLViQCHlMbEq5nZL6VNrUxu--Of1uCpflq).
*If you need help or if you are unsure about something join our* [matrix channel](https://riot.im/app/#/room/#opsdroid-general:matrix.org) *and ask away! We are more than happy to help you.*
@@ -82,7 +83,7 @@ class MySkill(Skill):
await message.respond('Good thanks! My load average is 0.2, 0.1, 0.1.')
```
-Our skill is done and opsdroid will be able to answer like in the [Opsdroid main page](https://opsdroid.github.io). The next thing that is left to do is to add the skill to the opsdroid configuration file.
+Our skill is done and opsdroid will be able to respond like in the [Opsdroid main page](https://opsdroid.github.io). The final thing left to do is to add the skill to the opsdroid configuration file.
#### Adding the Skill To Configuration
@@ -90,7 +91,7 @@ For the sake of simplicity we will assume the following:
- The configuration file is located at: `~/.opsdroid/configuration.yaml`
- Your skill is located at: `~/documents/skill-howareyou`
-Open your `configuration.yaml` file and under the skills add the name and path of your skill like such:
+Open your `configuration.yaml` file and under the skills section, add the name and path of your skill:
```yaml
skills:
@@ -98,5 +99,4 @@ skills:
path: /Users/username/documents/skill-howareyou
```
-As mentioned in the [introduction](introduction.md), the indentation and the use of spaces instead of tabs is very important. If you have any issues when running opsdroid check both of this things, it might be a problem with a space.
-
+As mentioned in the [introduction](introduction.md), the indentation and the use of spaces instead of tabs is very important. If you have any issues when running opsdroid check both of these things; it might be a problem with a space.
\ No newline at end of file
|
[DOCS] Add a Spark DataFrame example
* Update explore_expectations_in_a_notebook.rst
Add a Spark DataFrame example. This brings visibility to the SparkDFDataset API.
As a Spark user, I assumed that it was necessary to configure data context to connect to a Spark DataFrame, but I realized after that `SparkDFDataset` API is all I need. | @@ -51,7 +51,9 @@ All of these steps take place within your notebook:
If you wish to load data from somewhere else (e.g. from a SQL database or blob store), please fetch a copy of the data locally. Alternatively, you can :ref:`configure a Data Context with Datasources <tutorials__getting_started__connect_to_data>`, which will allow you to take advantage of more of Great Expectations' advanced features.
- As another option, if you have already instantiated a ``pandas.Dataframe``, you can use ``from_pandas``:
+ As alternatives, if you have already instantiated :
+
+ - a ``pandas.Dataframe``, you can use ``from_pandas``:
.. code-block:: python
@@ -61,6 +63,18 @@ All of these steps take place within your notebook:
This method will convert your boring old pandas ``DataFrame`` into a new and exciting great_expectations ``PandasDataset``. The two classes are absolutely identical, except that ``PandasDataset`` has access to Great Expectations' methods.
+ - a ``Spark DataFrame``, you can use ``SparkDFDataset``:
+
+ .. code-block:: python
+
+ from great_expectations.dataset.sparkdf_dataset import SparkDFDataset
+
+ my_df = SparkDFDataset(my_spark_dataframe)
+
+ This method will create an object with access to Great Expectations' methods, such as ``ProfilingResultsPageRenderer``.
+
+
+
3. **Explore your data and add Expectations.**
Each of the methods in step 1 will produce ``my_df``, a ``PandasDataset``. ``PandasDataset`` is a subclass of ``pandas.DataFrame``, which means that you can use all of pandas' normal methods on it.
|
Increase timeout for terraform commands
Fixes: | @@ -30,7 +30,7 @@ class Terraform(object):
cmd = f"terraform init -upgrade {self.path}"
else:
cmd = f"terraform init {self.path}"
- run_cmd(cmd)
+ run_cmd(cmd, timeout=1200)
def apply(self, tfvars, bootstrap_complete=False):
"""
@@ -45,7 +45,7 @@ class Terraform(object):
cmd = f"terraform apply '-var-file={tfvars}' -auto-approve -var bootstrap_complete=true '{self.path}'"
else:
cmd = f"terraform apply '-var-file={tfvars}' -auto-approve '{self.path}'"
- run_cmd(cmd, timeout=900)
+ run_cmd(cmd, timeout=1500)
def destroy(self, tfvars):
"""
@@ -56,7 +56,10 @@ class Terraform(object):
"""
logger.info("Destroying the cluster")
- run_cmd(f"terraform destroy '-var-file={tfvars}' -auto-approve {self.path}")
+ run_cmd(
+ f"terraform destroy '-var-file={tfvars}' -auto-approve {self.path}",
+ timeout=1200,
+ )
def output(self, tfstate, module, json_format=True):
"""
|
Adds DataSet.rename_outcome_labels(...)
A convenience method for cases when outcome labels in old datasets
need to be updated (e.g. from 'up' & 'down' to '0' and '1') | @@ -1838,3 +1838,35 @@ class DataSet(object):
self.cnt_cache = None
if bOpen: f.close()
+
+ def rename_outcome_labels(self, old_to_new_dict):
+ """
+ Replaces existing output labels with new ones as per `old_to_new_dict`.
+
+ Parameters
+ ----------
+ old_to_new_dict : dict
+ A mapping from old/existing outcome labels to new ones. Strings
+ in keys or values are automatically converted to 1-tuples. Missing
+ outcome labels are left unaltered.
+
+ Returns
+ -------
+ None
+ """
+ mapdict = {}
+ for old,new in old_to_new_dict.items():
+ if _compat.isstr(old): old = (old,)
+ if _compat.isstr(new): new = (new,)
+ mapdict[old] = new
+
+ new_olIndex = _OrderedDict()
+ for ol,i in self.olIndex.items():
+ if ol in mapdict:
+ new_olIndex[mapdict[ol]] = i
+ else:
+ new_olIndex[ol] = i
+
+ #Note: rebuild reverse-dict self.ol:
+ self.olIndex = new_olIndex
+ self.ol = _OrderedDict( [(i,ol) for (ol,i) in self.olIndex.items()] )
|
documentation: add optional total width arguments to formatting funcs
TN: | @@ -705,27 +705,32 @@ def _render(ctx, entity, **kwargs):
return text
-def get_available_width(indent_level):
+def get_available_width(indent_level, width=None):
"""
Return the number of available columns on source code lines.
- :param indent_level: Identation level of the source code lines.
+ :param int indent_level: Identation level of the source code lines.
+ :param int|None width: Total available width on source code lines. By
+ default, use 79.
"""
- return 79 - indent_level
+ if width is None:
+ width = 79
+ return width - indent_level
text_wrapper = textwrap.TextWrapper(drop_whitespace=True)
-def format_text(text, column):
+def format_text(text, column, width=None):
"""
Format some text as mere indented text.
:param str text: Text to format.
:param int column: Indentation level for the result.
+ :param int|None width: See get_available_width.
:rtype: str
"""
- text_wrapper.available_width = get_available_width(column)
+ text_wrapper.available_width = get_available_width(column, width)
lines = []
for i, paragraph in enumerate(split_paragraphs(text)):
if i > 0:
|
[modules/vpn] Add tk requirement to documentation
fixes | a VPN connection using that profile.
Prerequisites:
+ * tk python library (usually python-tk or python3-tk, depending on your distribution)
* nmcli needs to be installed and configured properly.
To quickly test, whether nmcli is working correctly, type "nmcli -g NAME,TYPE,DEVICE con" which
lists all the connection profiles that are configured. Make sure that your VPN profile is in that list!
|
MAINT: doc: Refer to _rational_tests.c.src in the user-defined types section.
Also removed a few sentence written in the first person that express
opinions about the code. | @@ -217,14 +217,13 @@ type will behave much like a regular data-type except ufuncs must have
1-d loops registered to handle it separately. Also checking for
whether or not other data-types can be cast "safely" to and from this
new type or not will always return "can cast" unless you also register
-which types your new data-type can be cast to and from. Adding
-data-types is one of the less well-tested areas for NumPy 1.0, so
-there may be bugs remaining in the approach. Only add a new data-type
-if you can't do what you want to do using the OBJECT or VOID
-data-types that are already available. As an example of what I
-consider a useful application of the ability to add data-types is the
-possibility of adding a data-type of arbitrary precision floats to
-NumPy.
+which types your new data-type can be cast to and from.
+
+The NumPy source code includes an example of a custom data-type as part
+of its test suite. The file ``_rational_tests.c.src`` in the source code
+directory ``numpy/numpy/core/src/umath/`` contains an implementation of
+a data-type that represents a rational number as the ratio of two 32 bit
+integers.
.. index::
pair: dtype; adding new
|
help_docs: Update instructions for change stream color.
Updates instructions for changing a stream's color so that
there are no missing or incorrect steps. | Zulip assigns each of your streams a color when you subscribe to the
stream. Changing a stream's color does not change it for anyone else.
-### Change the color of a stream
+## Change the color of a stream
{start_tabs}
{!stream-actions.md!}
-1. Pick a color from the grid, or select **Change color**.
+1. Click **Change color**.
+
+1. Select a color from the grid, use the color picker, or enter a hex code.
+
+1. Click **Confirm** to save and apply the color change.
+
+1. Click outside the box to close the menu.
{end_tabs}
@@ -21,8 +27,12 @@ stream. Changing a stream's color does not change it for anyone else.
1. Select a stream.
-1. Click on the colored square to the right of **Stream color**.
+1. Select the **Personal** tab on the right.
+
+1. Click on the colored square below **Stream color**.
1. Select a color from the grid, use the color picker, or enter a hex code.
+1. Click **Choose** to save and apply the color change.
+
{end_tabs}
|
Temporarily remove `ezancestry` test with Python 3.10
See | @@ -121,7 +121,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest]
- python-version: ['3.7', '3.8', '3.9', '3.10']
+ python-version: ['3.7', '3.8', '3.9']
steps:
- uses: actions/checkout@v2
|
Generators play nice with assignment now
Also, fixed an issue with dereferencing generators | @@ -107,6 +107,12 @@ class Generator:
self.__next__()
return self.generated[position]
+ def __setitem__(self, position, value):
+ if position >= len(self.generated):
+ temp = self.__getitem__(position)
+ self.generated[position] = value
+
+
def __len__(self):
return len(self._dereference())
def __next__(self):
@@ -140,8 +146,9 @@ class Generator:
'''
Only call this when it is absolutely neccesary to convert to a list.
'''
- d = list(self.gen)
+ d = self.generated + list(self.gen)
self.gen = iter(d[::])
+ self.generated = []
return d
def _print(self, end="\n"):
main = self.generated
@@ -495,9 +502,9 @@ def group_consecutive(vector):
return ret
def inclusive_range(lhs, rhs):
if (VY_type(lhs), VY_type(rhs)) != (Number, Number):
- lhs, rhs = str(lhs), str(rhs)
- pobj = regex.compile(lhs)
- return pobj.split(rhs)
+ lhs, rhs = VY_str(lhs), VY_str(rhs)
+ pobj = regex.compile(rhs)
+ return pobj.split(lhs)
if lhs < rhs:
return Generator(range(int(lhs), int(rhs) + 1))
|
pytest: drop test against py <3.8
since the bump to ansible-core 2.12, we have to drop
testing against py36 and py37 given that ansible 2.12 requires python
>=3.8 | @@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: [3.6, 3.7, 3.8]
+ python-version: [3.8, 3.9]
name: Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v2
|
core: don't restart on SIGHUP
("Respond to SIGHUP") adds a restart handler for SIGHUP, but
that doesn't really make sense. SIGHUP means the session has died, so
there's no use in restarting.
This breaks stuff like:
loginctrl terminate-session $XDG_SESSION_ID
Instead, let's just gracefully stop qtile.
Fixes | @@ -236,7 +236,7 @@ class Qtile(CommandObject):
async with LoopContext({
signal.SIGTERM: self.stop,
signal.SIGINT: self.stop,
- signal.SIGHUP: self.restart,
+ signal.SIGHUP: self.stop,
}), ipc.Server(
self._prepare_socket_path(self.socket_path),
self.server.call,
|
Uninstall python-openssl in chromium/base.
Otherwise we get an exception when running pip. | @@ -40,6 +40,7 @@ RUN apt-get update && \
xvfb && \
# 16.04's pyOpenSSL (installed by install-build-deps.sh) is too old for
# Google Cloud SDK.
+ sudo apt-get remove -y python-openssl && \
sudo pip install pyOpenSSL==19.0.0
# Needed for older versions of Chrome.
|
Hotfix for `poetry_requirements` not being recognized as a macro in build files
Added `poetry_requirements` in `register.py` to recognize it in build files.
[ci skip-rust] | @@ -19,6 +19,7 @@ from pants.backend.python.goals import (
)
from pants.backend.python.macros.pants_requirement import PantsRequirement
from pants.backend.python.macros.pipenv_requirements import PipenvRequirements
+from pants.backend.python.macros.poetry_requirements import PoetryRequirements
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.macros.python_requirements import PythonRequirements
from pants.backend.python.subsystems import python_native_code
@@ -46,6 +47,7 @@ def build_file_aliases():
objects={"python_artifact": PythonArtifact, "setup_py": PythonArtifact},
context_aware_object_factories={
"python_requirements": PythonRequirements,
+ "poetry_requirements": PoetryRequirements,
"pipenv_requirements": PipenvRequirements,
PantsRequirement.alias: PantsRequirement,
},
|
Minor fix in Gym environment
added call to env.close() inside stop method.
surrounded with try catch, in order to avoid possible errors due to
gym inconsistency and bugs | @@ -81,7 +81,9 @@ class Gym(Environment):
self.env.render(mode=mode)
def stop(self):
- #self.env.close()
+ try:
+ self.env.close()
+ except:
pass
@staticmethod
|
fix(packaging): fix import error while installing
Previously an import error stating that typing-extensions is not installed
could occur while installing the Lona package | +try:
from .exceptions import * # NOQA: F403
from .routing import MATCH_ALL, Route
from .errors import * # NOQA: F403
from .view import LonaView
from .app import LonaApp
+except ImportError as e:
+ # this can happen while installing the package and can be ignored
+ if e.name != 'typing_extensions':
+ raise
+
VERSION = (1, 8, 5)
VERSION_STRING = '.'.join(str(i) for i in VERSION)
|
Updating FrugalScore metric card
* Updating FrugalScore metric card
removing duplicate paragraph
* Update README.md
added the acronym description
* Update README.md | ## Metric Description
-FrugalScore is a reference-based metric for NLG models evaluation. It is based on a distillation approach that allows to learn a fixed, low cost version of any expensive NLG metric, while retaining most of its original performance.
-
-The FrugalScore models are obtained by continuing the pretraining of small models on a synthetic dataset constructed using summarization, backtranslation and denoising models. During the training, the small models learn the internal mapping of the expensive metric, including any similarity function.
+FrugalScore is a reference-based metric for Natural Language Generation (NLG) model evaluation. It is based on a distillation approach that allows to learn a fixed, low cost version of any expensive NLG metric, while retaining most of its original performance.
The FrugalScore models are obtained by continuing the pretraining of small models on a synthetic dataset constructed using summarization, backtranslation and denoising models. During the training, the small models learn the internal mapping of the expensive metric, including any similarity function.
|
Enable pool_pre_ping in the sqlalchemy connection engine
This makes the connection not die due to being idle for too long FeelsGoodMan | @@ -38,7 +38,7 @@ def check_connection(dbapi_con, con_record, con_proxy):
class DBManager:
def init(url):
- DBManager.engine = create_engine(url)
+ DBManager.engine = create_engine(url, pool_pre_ping=True)
DBManager.Session = sessionmaker(bind=DBManager.engine, autoflush=False)
DBManager.ScopedSession = scoped_session(sessionmaker(bind=DBManager.engine))
|
request: method: use simple string instead of lona._types.Symbol
This is part of an patch series to get rid of an non standard Enum
implementation. | -from lona._types import Symbol
-
-
class Request:
def __init__(self, view_runtime, connection):
self._view_runtime = view_runtime
@@ -16,7 +13,7 @@ class Request:
self.GET = {}
self.POST = {}
- self.method = Symbol('POST' if self.POST else 'GET')
+ self.method = 'POST' if self.POST else 'GET'
@property
def user(self):
|
Change field disk on Host API.
The field was renamed to disks and now return all disks(active and
inactive), with export_id too. | @@ -12,7 +12,7 @@ class HostSerializer(serializers.ModelSerializer):
env_name = serializers.SerializerMethodField('get_env_name')
region_name = serializers.SerializerMethodField('get_region_name')
offering = serializers.SerializerMethodField('get_offering')
- disk = serializers.SerializerMethodField('get_disk')
+ disks = serializers.SerializerMethodField('get_disks')
class Meta:
model = Host
@@ -25,7 +25,7 @@ class HostSerializer(serializers.ModelSerializer):
'env_name',
'region_name',
'offering',
- 'disk',
+ 'disks',
)
def get_database(self, host):
@@ -56,14 +56,14 @@ class HostSerializer(serializers.ModelSerializer):
except ObjectDoesNotExist:
return
- def get_disk(self, host):
- try:
- return {
- 'total': host.active_disk.nfsaas_size_kb,
- 'used': host.active_disk.nfsaas_used_size_kb,
- }
- except ObjectDoesNotExist:
- return
+ def get_disks(self, host):
+ return map(
+ lambda d: {
+ 'active': d.is_active,
+ 'total': d.nfsaas_size_kb,
+ 'used': d.nfsaas_used_size_kb,
+ 'export_id': d.nfsaas_export_id
+ }, host.nfsaas_host_attributes.all())
def get_offering(self, host):
offering = host.offering
|
Update hc.front.views.cron_preview to catch CronSimError explicitly
With a catch-all "except:" rule, we would swallow any unexpected
exceptions (ValueError, etc.) in cronsim. But we want to know
about them. cron_preview is a place where we can afford to crash, and generate a crash report. | @@ -7,7 +7,7 @@ from secrets import token_urlsafe
from urllib.parse import urlencode
from cron_descriptor import ExpressionDescriptor
-from cronsim import CronSim
+from cronsim.cronsim import CronSim, CronSimError
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
@@ -496,16 +496,13 @@ def cron_preview(request):
zone = pytz.timezone(tz)
now_local = timezone.localtime(timezone.now(), zone)
- if len(schedule.split()) != 5:
- raise ValueError()
-
it = CronSim(schedule, now_local)
for i in range(0, 6):
ctx["dates"].append(next(it))
except UnknownTimeZoneError:
ctx["bad_tz"] = True
- except:
+ except CronSimError:
ctx["bad_schedule"] = True
if ctx["dates"]:
|
Rename resources to paths
We name the parameter paths in all other calls | @@ -47,7 +47,7 @@ def create_translation(request):
ignore_warnings = form.cleaned_data["ignore_warnings"]
approve = form.cleaned_data["approve"]
force_suggestions = form.cleaned_data["force_suggestions"]
- resources = form.cleaned_data["paths"]
+ paths = form.cleaned_data["paths"]
project = entity.resource.project
@@ -117,7 +117,7 @@ def create_translation(request):
{
"status": True,
"translation": translation.serialize(),
- "stats": TranslatedResource.objects.stats(project, resources, locale),
+ "stats": TranslatedResource.objects.stats(project, paths, locale),
}
)
|
message_edit: Focus on dropdown-widget if only changing stream is allowed.
Previously, content box was focused inspite of it being disabled in case
when only stream editing was allowed. Now we instead focus on the stream
down. | @@ -599,6 +599,8 @@ function edit_message($row, raw_content) {
$message_edit_topic.trigger("focus");
} else if (editability === editability_types.TOPIC_ONLY) {
$row.find(".message_edit_topic").trigger("focus");
+ } else if (editability !== editability_types.FULL && is_stream_editable) {
+ $row.find(".select_stream_setting .dropdown-toggle").trigger("focus");
} else {
$message_edit_content.trigger("focus");
// Put cursor at end of input.
|
Upgrade to Node 12
New dependencies no longer support Node 10. | @@ -5,7 +5,8 @@ WORKDIR /usr/src/app
ENV APP_NAME respa
-RUN apt-get update && apt-get install -y gdal-bin postgresql-client gettext npm
+RUN curl -sL https://deb.nodesource.com/setup_12.x | bash -
+RUN apt-get update && apt-get install -y gdal-bin postgresql-client gettext nodejs
COPY requirements.txt .
@@ -15,7 +16,7 @@ RUN python -m pip install --upgrade pip && pip install --no-cache-dir -r deploy/
COPY . .
-RUN npm install -g [email protected] && ./build-resources && apt-get remove -y npm && apt autoremove -y
+RUN ./build-resources
RUN mkdir -p www/media
|
Update the year in the LICENSE
The current LICENSE contains copyright with the year 2016, so I propose to update the year to 2017. | The MIT License (MIT)
-Copyright (c) 2015-2016 LBRY Inc
+Copyright (c) 2015-2017 LBRY Inc
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,
|
ensure default blinder source 'INTERMISSION' shows a SMTPE signal
Also removed SMTPE from the automatic generated test sources so that
'INTERMISSION' is the only test source that uses SMPTE (except you
configure it otherwise= | @@ -8,7 +8,7 @@ from configparser import SafeConfigParser
from lib.args import Args
from vocto.transitions import Composites, Transitions
-testPatternCount = -1
+testPatternCount = 0
GST_TYPE_VIDEO_TEST_SRC_PATTERN = [
"smpte",
@@ -92,8 +92,15 @@ class VocConfigParser(SafeConfigParser):
return self.get('source.{}'.format(source), 'location')
def getTestPattern(self, source):
- pattern = self.get('source.{}'.format(
- source), 'pattern', fallback=None)
+ if not self.has_section('source.{}'.format(source)):
+ # default blinder source shall be smpte (if not defined otherwise)
+ if source == "blinder-" + DEFAULT_BLINDER_SOURCE:
+ return "smpte"
+ # default background source shall be black (if not defined otherwise)
+ if source == "background":
+ return "black"
+
+ pattern = self.get('source.{}'.format(source), 'pattern', fallback=None)
if not pattern:
global testPatternCount
testPatternCount += 1
|
features: Make more responsive.
This fixes some responsiveness issues with the features page where the
text for headers would go off the screen on mobile and narrower devices. | @@ -421,7 +421,7 @@ nav ul li.active::after {
}
.portico-landing.features-app section.hero .copy {
- width: 800px;
+ max-width: 800px;
margin: 0 auto;
text-align: center;
@@ -436,7 +436,7 @@ nav ul li.active::after {
font-size: 1.8em;
margin: 30px auto 0 auto;
- width: 600px;
+ max-width: 600px;
line-height: 1.3;
}
@@ -514,7 +514,7 @@ nav ul li.active::after {
}
.portico-landing.features-app section.messages .features {
- width: 500px;
+ max-width: 500px;
}
.portico-landing.features-app section.messages .features h2 {
@@ -2920,6 +2920,14 @@ nav ul li.active::after {
width: calc(100% - 100px);
}
+ .portico-landing.features-app section.hero {
+ padding: 50px;
+ }
+
+ .portico-landing.features-app section.hero h1 {
+ font-size: 3em;
+ }
+
.portico-landing.integrations .main,
.main {
width: auto;
@@ -3227,6 +3235,22 @@ nav ul li.active::after {
}
@media (max-width: 550px) {
+ .portico-landing.features-app section.hero {
+ padding: 50px 20px 20px;
+ }
+
+ .portico-landing.features-app section.messages {
+ padding: 0px 20px;
+ }
+
+ .portico-landing.features-app section.messages h2 {
+ font-size: 2em;
+ }
+
+ .portico-landing.features-app section.messages .features .feature-block {
+ margin: 20px 0px;
+ }
+
.portico-landing.integrations .searchbar-reset {
width: 100%;
}
|
Fix after Code Review
Some comments adjusted, additional assertions added. | @@ -1498,9 +1498,8 @@ class TestMembershipCreateRollOver(TestBase):
def test_membership_rollover_negative_remaining_values(self):
"""If current membership has used more seats/workshops than allowed, and
- therefore its remaining values are negative, the roll-over form should
- have max values for rolled-over fields set to 0, instead of these negative
- values.
+ its remaining values are negative, the roll-over form should have max values
+ for rolled-over fields set to 0, instead of these negative values.
This is a regression test for https://github.com/carpentries/amy/issues/2056.
"""
@@ -1510,7 +1509,7 @@ class TestMembershipCreateRollOver(TestBase):
public_instructor_training_seats=0,
inhouse_instructor_training_seats=0,
)
- # add two instructor training seats, so as the remainings will be -1
+ # add two instructor training seats, so the remaining seats will be -1
event = Event.objects.create(
slug="event-centrally-organised",
host=self.org_beta,
@@ -1564,10 +1563,10 @@ class TestMembershipCreateRollOver(TestBase):
response = self.client.post(
reverse("membership_create_roll_over", args=[self.membership.pk]),
data=payload,
- follow=True,
)
# Assert
+ self.assertEqual(response.status_code, 200)
self.assertEqual(
self.membership.public_instructor_training_seats_remaining, -1
)
@@ -1578,6 +1577,14 @@ class TestMembershipCreateRollOver(TestBase):
"public_instructor_training_seats_rolled_from_previous",
"inhouse_instructor_training_seats_rolled_from_previous",
):
+ self.assertEqual(
+ response.context["form"].fields[field].max_value,
+ 0,
+ )
+ self.assertEqual(
+ response.context["form"].fields[field].min_value,
+ 0,
+ )
self.assertEqual(
response.context["form"].errors[field],
[expected_msg],
|
Document retry of exceptions while streaming 'read'/'execute_sql' results.
See: | @@ -64,9 +64,24 @@ fails if the result set is too large,
.. note::
- If streaming a chunk fails due to a "resumable" error,
- :meth:`Session.read` retries the ``StreamingRead`` API reqeust,
- passing the ``resume_token`` from the last partial result streamed.
+ If streaming a chunk raises an exception, the application can
+ retry the ``read``, passing the ``resume_token`` from ``StreamingResultSet``
+ which raised the error. E.g.:
+
+ .. code:: python
+
+ result = snapshot.read(table, columns, keys)
+ while True:
+ try:
+ for row in result.rows:
+ print row
+ except Exception:
+ result = snapshot.read(
+ table, columns, keys, resume_token=result.resume_token)
+ continue
+ else:
+ break
+
Execute a SQL Select Statement
@@ -97,6 +112,26 @@ fails if the result set is too large,
manually, perform all iteration within the context of the
``with database.snapshot()`` block.
+.. note::
+
+ If streaming a chunk raises an exception, the application can
+ retry the query, passing the ``resume_token`` from ``StreamingResultSet``
+ which raised the error. E.g.:
+
+ .. code:: python
+
+ result = snapshot.execute_sql(QUERY)
+ while True:
+ try:
+ for row in result.rows:
+ print row
+ except Exception:
+ result = snapshot.execute_sql(
+ QUERY, resume_token=result.resume_token)
+ continue
+ else:
+ break
+
Next Step
---------
|
NEW: auto correct preview size if previews of configured size won't fit onto screen
If used will output a warning message like:
'Resizing previews so that they fit onto screen to WxH' | @@ -5,7 +5,7 @@ import math
import os
from configparser import NoOptionError
-from gi.repository import Gtk, GObject
+from gi.repository import Gtk, Gdk, GObject
from lib.videodisplay import VideoDisplay
import lib.connection as Connection
@@ -31,6 +31,21 @@ class VideoPreviewsController(object):
accelerators = Gtk.AccelGroup()
win.add_accel_group(accelerators)
+ # count number of previews
+ num_previews = len(self.sources)
+ if Config.getLivePreviewEnabled():
+ num_previews += 1
+
+ # get preview size
+ self.previewSize = Config.getPreviewSize()
+
+ # recalculate preview size if in sum they are too large for screen
+ screen = Gdk.Screen.get_default()
+ if screen.get_height() < self.previewSize[1] * num_previews:
+ height = screen.get_height() / num_previews
+ self.previewSize = (Config.getVideoRatio()*height,height)
+ self.log.warning('Resizing previews so that they fit onto screen to WxH={}x{}'.format(*self.previewSize))
+
for idx, source in enumerate(self.sources):
self.addPreview(uibuilder,preview_box, source, Port.SOURCES_OUT + idx)
@@ -47,20 +62,20 @@ class VideoPreviewsController(object):
def addPreview(self, uibuilder, preview_box, source, port):
self.log.info('Initializing Video Preview %s', source)
- previewSize = Config.getPreviewSize()
+
preview = uibuilder.load_check_widget(
'widget_preview',
os.path.dirname(uibuilder.uifile) + "/widgetpreview.ui")
video = uibuilder.find_widget_recursive(preview, 'video')
- video.set_size_request(*previewSize)
+ video.set_size_request(*self.previewSize)
preview_box.pack_start(preview, fill=False,
expand=False, padding=0)
audio_level = uibuilder.find_widget_recursive(preview, 'audio_level_display')
player = VideoDisplay(video, port=port,
- width=previewSize[0],
- height=previewSize[1],
+ width=self.previewSize[0],
+ height=self.previewSize[1],
level_callback=audio_level.level_callback,
name=source.upper()
)
|
Pass in the correct output_dir to trainer_class.
Sometimes the output_dir needs to be formatted according to the env_name, the
work id, especially in sweeps.
This possibly got messup somewhere along the way. | @@ -201,7 +201,7 @@ def train_rl(
logging.info("Starting the training loop.")
trainer = trainer_class(
- output_dir=FLAGS.output_dir,
+ output_dir=output_dir,
train_env=train_env,
eval_env=eval_env,
)
|
sync.rsync: rsync_syncer: fix usersync handling
Fixes | @@ -56,18 +56,18 @@ class rsync_syncer(base.ExternalSyncer):
return proto[0], f"rsync:{proto[1]}"
pkgcore_config_type = ConfigHint({
- 'basedir': 'str', 'uri': 'str', 'conn_timeout': 'str',
+ 'basedir': 'str', 'uri': 'str', 'conn_timeout': 'str', 'usersync': 'bool',
'compress': 'bool', 'excludes': 'list', 'includes': 'list',
'retries': 'str', 'opts': 'list', 'extra_opts': 'list', 'proxy': 'str'},
typename='syncer')
def __init__(self, basedir, uri, conn_timeout=default_conn_timeout,
- compress=False, excludes=(), includes=(),
+ usersync=False, compress=False, excludes=(), includes=(),
retries=default_retries, proxy=None,
opts=(), extra_opts=()):
uri = uri.rstrip(os.path.sep) + os.path.sep
self.rsh, uri = self._parse_uri(uri)
- super().__init__(basedir, uri, default_verbosity=1)
+ super().__init__(basedir, uri, default_verbosity=1, usersync=usersync)
self.hostname = self.parse_hostname(self.uri)
if self.rsh:
self.rsh = self.require_binary(self.rsh)
|
Fix flow_extract_info() to return correct pon, onu and uni ids
This was also the root cause for auth failure after onu disable/re-enable. | @@ -174,8 +174,8 @@ class OpenOltPlatform(object):
if uni_port_no is None:
raise ValueError
- pon_intf = self.platform.intf_id_from_uni_port_num(uni_port_no)
- onu_id = self.platform.onu_id_from_uni_port_num(uni_port_no)
- uni_id = self.platform.uni_id_from_port_num(uni_port_no)
+ pon_intf = self.intf_id_from_uni_port_num(uni_port_no)
+ onu_id = self.onu_id_from_uni_port_num(uni_port_no)
+ uni_id = self.uni_id_from_port_num(uni_port_no)
return pon_intf, onu_id, uni_id
|
Swapped Lambda for itemgetter
For the sake of code style and consistency, the lambda has been swapped with operator.itemgetter | import logging
from datetime import datetime
+from operator import itemgetter
from discord import Colour, Embed, Member, utils
from discord.ext.commands import Bot, Cog, Context, command
@@ -79,7 +80,7 @@ class Free(Cog):
# Sort channels in descending order by seconds
# Get position in list, inactivity, and channel object
# For each channel, add to embed.description
- sorted_channels = sorted(free_channels, key=lambda free_channel: free_channel[0], reverse=True)
+ sorted_channels = sorted(free_channels, key=itemgetter(0), reverse=True)
for i, (inactive, channel) in enumerate(sorted_channels, 1):
minutes, seconds = divmod(inactive, 60)
if minutes > 59:
|
Function_Base: Reuse the PRNG type when crating a copy using new seed
The default is still Seeded(np.random.RandomState) | @@ -355,8 +355,10 @@ def _random_state_getter(self, owning_component, context):
assert seed_value != [DEFAULT_SEED], "Invalid seed for {} in context: {} ({})".format(owning_component, context.execution_id, seed_param)
current_state = self.values.get(context.execution_id, None)
- if current_state is None or current_state.used_seed != seed_value:
+ if current_state is None:
return SeededRandomState(seed_value)
+ if current_state.used_seed != seed_value:
+ return type(current_state)(seed_value)
return current_state
|
Correct a typo in the sessions.rst
typo | @@ -22,7 +22,7 @@ To use the middleware, wrap it around the appropriate level of consumer
in your ``routing.py``::
from channels.routing import ProtocolTypeRouter, URLRouter
- from channels.session import SessionMiddlewareStack
+ from channels.sessions import SessionMiddlewareStack
from myapp import consumers
|
Helpful tostring() for multiple values.
Useful in the new dumb repl. | @@ -62,8 +62,8 @@ class Values(W_ProtoObject):
return vals[0].tostring()
if len(vals) == 0:
return "(values)"
- else: #fixme
- return "MULTIPLE VALUES"
+ else: # This shouldn't be called in real code
+ return "\n".join([v.tostring() for v in vals])
class W_Cell(W_Object): # not the same as Racket's box
|
use evaluable methods where available
This patch function-wraps evaluables where available, such as subtract and
divide, rather than repeating the logic found in the evaluable module. | @@ -1220,7 +1220,7 @@ def subtract(__left: IntoArray, __right: IntoArray) -> Array:
:class:`Array`
'''
- return add(__left, negative(__right))
+ return _Wrapper.broadcasted_arrays(evaluable.subtract, __left, __right)
@implements(numpy.positive)
@@ -1252,7 +1252,7 @@ def negative(__arg: IntoArray) -> Array:
:class:`Array`
'''
- return multiply(__arg, -1)
+ return _Wrapper.broadcasted_arrays(evaluable.negative, __arg)
@implements(numpy.multiply)
@@ -1322,7 +1322,7 @@ def reciprocal(__arg: IntoArray) -> Array:
:class:`Array`
'''
- return power(__arg, -1.)
+ return _Wrapper.broadcasted_arrays(evaluable.reciprocal, __arg)
@implements(numpy.power)
@@ -1354,7 +1354,7 @@ def sqrt(__arg: IntoArray) -> Array:
:class:`Array`
'''
- return power(__arg, .5)
+ return _Wrapper.broadcasted_arrays(evaluable.sqrt, __arg, min_dtype=float)
@implements(numpy.square)
@@ -1596,8 +1596,7 @@ def cosh(__arg: IntoArray) -> Array:
:class:`Array`
'''
- arg = Array.cast(__arg)
- return .5 * (exp(arg) + exp(-arg))
+ return _Wrapper.broadcasted_arrays(evaluable.cosh, __arg, min_dtype=float)
@implements(numpy.sinh)
@@ -1613,8 +1612,7 @@ def sinh(__arg: IntoArray) -> Array:
:class:`Array`
'''
- arg = Array.cast(__arg)
- return .5 * (exp(arg) - exp(-arg))
+ return _Wrapper.broadcasted_arrays(evaluable.sinh, __arg, min_dtype=float)
@implements(numpy.tanh)
@@ -1630,8 +1628,7 @@ def tanh(__arg: IntoArray) -> Array:
:class:`Array`
'''
- arg = Array.cast(__arg)
- return 1 - 2. / (exp(2*arg) + 1)
+ return _Wrapper.broadcasted_arrays(evaluable.tanh, __arg, min_dtype=float)
@implements(numpy.arctanh)
@@ -1647,8 +1644,7 @@ def arctanh(__arg: IntoArray) -> Array:
:class:`Array`
'''
- arg = Array.cast(__arg)
- return .5 * (ln(1+arg) - ln(1-arg))
+ return _Wrapper.broadcasted_arrays(evaluable.arctanh, __arg, min_dtype=float)
@implements(numpy.exp)
@@ -1699,7 +1695,7 @@ def log2(__arg: IntoArray) -> Array:
:class:`Array`
'''
- return log(__arg) / log(2)
+ return _Wrapper.broadcasted_arrays(evaluable.log2, __arg, min_dtype=float)
@implements(numpy.log10)
@@ -1715,7 +1711,7 @@ def log10(__arg: IntoArray) -> Array:
:class:`Array`
'''
- return log(__arg) / log(10)
+ return _Wrapper.broadcasted_arrays(evaluable.log10, __arg, min_dtype=float)
# COMPARISON
|
Add support for on update/delete clause in inline FK.
Refs | @@ -221,11 +221,16 @@ class SchemaMigrator(object):
return ctx
def add_inline_fk_sql(self, ctx, field):
- return (ctx
+ ctx = (ctx
.literal(' REFERENCES ')
.sql(Entity(field.rel_model._meta.table_name))
.literal(' ')
.sql(EnclosedNodeList((Entity(field.rel_field.column_name),))))
+ if field.on_delete is not None:
+ ctx = ctx.literal(' ON DELETE %s' % field.on_delete)
+ if field.on_update is not None:
+ ctx = ctx.literal(' ON UPDATE %s' % field.on_update)
+ return ctx
@operation
def add_foreign_key_constraint(self, table, column_name, rel, rel_column,
|
Comment out unused Fluent Bit config
The metrics server is disabled, so we don't need to configure it. | @@ -2722,8 +2722,8 @@ package:
# ===========
# Enable/Disable the built-in HTTP Server for metrics
HTTP_Server Off
- HTTP_Listen 0.0.0.0
- HTTP_Port 62020
+ #HTTP_Listen 0.0.0.0
+ #HTTP_Port 62020
[INPUT]
Name systemd
Tag host.*
|
Added another image
Image added of woman hit with rubber bullet. | @@ -59,6 +59,7 @@ A woman who says she was simply walking home with groceries was shot in the face
**Links**
* https://mobile.twitter.com/KevinRKrause/status/1266898396339675137
+* https://i.redd.it/ns0uj557x0251.jpg
### Police use flashbangs and tear gas on protestors | May 31st
|
[StatsWidget] fix issue when fail to import OpenGL
Management of lazy import if OpenGL is not installed.
closes | @@ -350,6 +350,12 @@ class _StatsWidgetBase(object):
:param Union[PlotWidget,SceneWidget,None] plot:
The plot containing the items on which statistics are applied
"""
+ try:
+ import OpenGL
+ except ImportError:
+ has_opengl = False
+ else:
+ has_opengl = True
from ..plot3d.SceneWidget import SceneWidget # Lazy import
self._dealWithPlotConnection(create=False)
self.clear()
@@ -357,10 +363,14 @@ class _StatsWidgetBase(object):
self._plotWrapper = _Wrapper()
elif isinstance(plot, PlotWidget):
self._plotWrapper = _PlotWidgetWrapper(plot)
- elif isinstance(plot, SceneWidget):
+ else:
+ if has_opengl is True:
+ if isinstance(plot, SceneWidget):
self._plotWrapper = _SceneWidgetWrapper(plot)
else: # Expect a ScalarFieldView
self._plotWrapper = _ScalarFieldViewWrapper(plot)
+ else:
+ _logger.warning('OpenGL not installed, %s not managed' % ('SceneWidget qnd ScalarFieldView'))
self._dealWithPlotConnection(create=True)
def setStats(self, statsHandler):
|
auto_properties_dsl.py: work around a circular dependency issue
TN: | @@ -9,8 +9,6 @@ import docutils.parsers.rst
from docutils.statemachine import StringList
from sphinx.util.docstrings import prepare_docstring
-from langkit.expressions import AbstractExpression
-
class AutoPropertiesDSL(docutils.parsers.rst.Directive):
"""
@@ -71,6 +69,16 @@ class AutoPropertiesDSL(docutils.parsers.rst.Directive):
return def_list_item
def run(self):
+ # Do not put this at the top-level to avoid nasty circular dependency
+ # issues.
+ #
+ # TODO: this is just a workaround. We should remove this circular
+ # dependency instead, but this deserves its own refactoring:
+ # compiled_types imports langkit.expressions at import time and
+ # conversely. This works only when loading compiled_types first
+ # "thanks" to the way the dependencies are used.
+ from langkit.expressions import AbstractExpression
+
document = self.state.document
def_list = nodes.definition_list()
result = [def_list]
|
Travis: Reverting to autodocs deployment on `gcc-5` builder
Also disabling autodocs for YASK backend, as it is not available on that
builder. | @@ -92,5 +92,5 @@ script:
# Docs generation and deployment
- sphinx-apidoc -f -o docs/ examples
- - sphinx-apidoc -f -o docs/ devito
- - if [[ $DEVITO_BACKEND == 'yask' ]]; then ./docs/deploy.sh; fi
+ - sphinx-apidoc -f -o docs/ devito devito/yask/*
+ - if [[ $DEVITO_ARCH == 'gcc-5' ]]; then ./docs/deploy.sh; fi
|
langkit.expression.base: avoid the confusion on Any
Modules in the langkit.expression package have to deal with both
langkit.expression.logic.Any and typing.Any. Import the latter as _Any
to avoid the confusion.
TN: | @@ -4,8 +4,8 @@ from contextlib import contextmanager
from functools import partial
import inspect
from itertools import count
-from typing import (Any, Callable, Dict, List, Optional as Opt, Set, Tuple,
- Union)
+from typing import (Any as _Any, Callable, Dict, List, Optional as Opt, Set,
+ Tuple, Union)
from enum import Enum
@@ -473,8 +473,8 @@ class AbstractExpression(Frozable):
# NOTE: not bothering to type this further, because hopefully we'll get rid
# of AbstractExpressions pretty soon.
- attrs_dict: Dict[Any, Any] = {}
- constructors: List[Any] = []
+ attrs_dict: Dict[_Any, _Any] = {}
+ constructors: List[_Any] = []
@property
def diagnostic_context(self):
@@ -2750,7 +2750,7 @@ class Block(Let):
...
"""
- blocks: List[Any] = []
+ blocks: List[_Any] = []
@classmethod
@contextmanager
|
Add type hint for cuda.set_rng_state
Summary:
Fixes
Pull Request resolved: | @@ -40,3 +40,5 @@ def max_memory_cached(device: Optional[_device_t]=...) -> int: ...
def reset_max_memory_cached(device: Optional[_device_t]=...) -> None: ...
def cudart() -> ctypes.CDLL: ...
def find_cuda_windows_lib() -> Optional[ctypes.CDLL]: ...
+def set_rng_state(new_state): ...
+def get_rng_state(): ...
|
Eagerly set mode when changing partition sets
Summary: Tiny nit
Test Plan: Run tests
Reviewers: dish | @@ -72,6 +72,7 @@ export const ConfigEditorConfigPicker: React.FC<ConfigEditorConfigPickerProps> =
const onSelectPartitionSet = (partitionSet: PartitionSet) => {
onSaveSession({
+ mode: partitionSet.mode,
base: {
partitionsSetName: partitionSet.name,
partitionName: null,
|
Update avemaria.txt
Cleaning ```nvpn.so``` VPN nodes. IP:port is currently in ```remcos``` trail. | @@ -553,10 +553,6 @@ info1.dynu.net
193.161.193.99:27522
server12511.sytes.net
-# Reference: https://app.any.run/tasks/f8d3ae21-bb4f-4d17-8559-b83b602d36a9/
-
-u868328.nvpn.so
-
# Reference: https://twitter.com/JAMESWT_MHT/status/1238208398069465088
# Reference: https://app.any.run/tasks/552ebaee-410b-4928-bcb2-7d65f7666297/
|
Kill TH(C)Blas kwarg_only declarations.
Summary:
Pull Request resolved:
Since we don't generate these as end-user bindings, and we no longer reorder based on this property, we can just get rid of the property.
Test Plan: Imported from OSS | - THTensor* mat2
- arg: real beta
default: AS_REAL(1)
- kwarg_only: True
- arg: real alpha
default: AS_REAL(1)
- kwarg_only: True
]]
[[
name: _th_addmm_
- THTensor* mat2
- arg: real beta
default: AS_REAL(1)
- kwarg_only: True
- arg: real alpha
default: AS_REAL(1)
- kwarg_only: True
]]
[[
name: _th_addmv
- THTensor* vec
- arg: real beta
default: AS_REAL(1)
- kwarg_only: True
- arg: real alpha
default: AS_REAL(1)
- kwarg_only: True
]]
[[
name: _th_addmv_
- THTensor* vec
- arg: real beta
default: AS_REAL(1)
- kwarg_only: True
- arg: real alpha
default: AS_REAL(1)
- kwarg_only: True
]]
[[
name: _th_addr
- THTensor* vec2
- arg: real beta
default: AS_REAL(1)
- kwarg_only: True
- arg: real alpha
default: AS_REAL(1)
- kwarg_only: True
]]
[[
name: _th_addr_
- THTensor* vec2
- arg: real beta
default: AS_REAL(1)
- kwarg_only: True
- arg: real alpha
default: AS_REAL(1)
- kwarg_only: True
]]
[[
name: _th_ger
- THTensor* batch2
- arg: real beta
default: AS_REAL(1)
- kwarg_only: True
- arg: real alpha
default: AS_REAL(1)
- kwarg_only: True
]]
[[
name: _th_addbmm_
- THTensor* batch2
- arg: real beta
default: AS_REAL(1)
- kwarg_only: True
- arg: real alpha
default: AS_REAL(1)
- kwarg_only: True
]]
[[
name: _th_baddbmm
- THTensor* batch2
- arg: real beta
default: AS_REAL(1)
- kwarg_only: True
- arg: real alpha
default: AS_REAL(1)
- kwarg_only: True
]]
[[
name: _th_gels
|
TST: added broadcasting unit test
Added a unit test for new behaviour. | @@ -109,7 +109,7 @@ class TestLonSLT():
assert (abs(self.py_inst['slt'] - self.py_inst['slt2'])).max() < 1.0e-6
def test_bad_lon_name_calc_solar_local_time(self):
- """Test calc_solar_local_time with a bad longitude name"""
+ """Test calc_solar_local_time with a bad longitude name."""
self.py_inst = pysat.Instrument(platform='pysat', name="testing")
self.py_inst.load(date=self.inst_time)
@@ -118,3 +118,14 @@ class TestLonSLT():
coords.calc_solar_local_time(self.py_inst,
lon_name="not longitude",
slt_name='slt')
+
+ def test_lon_broadcasting_calc_solar_local_time(self):
+ """Test calc_solar_local_time with longitude coordinates."""
+
+ self.py_inst = pysat.Instrument(platform='pysat', name="testmodel")
+ self.py_inst.load(date=self.inst_time)
+ coords.calc_solar_local_time(self.py_inst, lon_name="longitude",
+ slt_name='slt')
+
+ assert self.py_inst['slt'].max() < 24.0
+ assert self.py_inst['slt'].min() >= 0.0
|
Fix double loading of external plugins
`register_external_command` was receiving an instance of a class for each new external script. This lead to a double initialization when calling `gef.gdb.load(cls)`. Fixed by registering directly a class (just like `register_command`) | @@ -4554,9 +4554,8 @@ def register_external_context_pane(pane_name: str, display_pane_function: Callab
# Commands
#
-def register_external_command(obj: "GenericCommand") -> Type["GenericCommand"]:
+def register_external_command(cls: Type["GenericCommand"]) -> Type["GenericCommand"]:
"""Registering function for new GEF (sub-)command to GDB."""
- cls = obj.__class__
__registered_commands__.append(cls)
gef.gdb.load(initial=False)
gef.gdb.doc.add_command_to_doc((cls._cmdline_, cls, None))
@@ -10545,14 +10544,14 @@ class GefCommand(gdb.Command):
directories = gef.config["gef.extra_plugins_dir"]
if directories:
for directory in directories.split(";"):
- directory = os.path.realpath(os.path.expanduser(directory))
- if os.path.isdir(directory):
- sys.path.append(directory)
- for fname in os.listdir(directory):
- if not fname.endswith(".py"): continue
- fpath = f"{directory}/{fname}"
- if os.path.isfile(fpath):
- gdb.execute(f"source {fpath}")
+ directory = pathlib.Path(directory).expanduser()
+ if not directory.is_dir():
+ continue
+ for entry in directory.iterdir():
+ if not entry.is_file(): continue
+ if entry.suffix != ".py": continue
+ if entry.name == "__init__.py": continue
+ gdb.execute(f"source {entry}")
nb_added = len(self.loaded_commands) - nb_inital
if nb_added > 0:
ok(f"{Color.colorify(nb_added, 'bold green')} extra commands added from "
|
Make bga clearer.
Just moving a chunk of lines so that the process is more clear. | @@ -248,6 +248,14 @@ def boiler_generator_assn(eia_transformed_dfs,
'report_date'],
how='outer')
+ # Create a set of bga's that are linked, directly from bga8
+ bga_assn = bga_compiled_1[bga_compiled_1['boiler_id'].notnull()].copy()
+ bga_assn['bga_source'] = 'eia860_org'
+
+ # Create a set of bga's that were not linked directly through bga8
+ bga_unassn = bga_compiled_1[bga_compiled_1['boiler_id'].isnull()].copy()
+ bga_unassn = bga_unassn.drop(['boiler_id'], axis=1)
+
# Side note: there are only 6 generators that appear in bga8 that don't
# apear in gens9 or gens8 (must uncomment-out the og_tag creation above)
# bga_compiled_1[bga_compiled_1['og_tag'].isnull()]
@@ -267,14 +275,6 @@ def boiler_generator_assn(eia_transformed_dfs,
bf_eia923.drop_duplicates(
subset=['plant_id_eia', 'report_date', 'boiler_id'], inplace=True)
- # Create a set of bga's that are linked, directly from bga8
- bga_assn = bga_compiled_1[bga_compiled_1['boiler_id'].notnull()].copy()
- bga_assn['bga_source'] = 'eia860_org'
-
- # Create a set of bga's that were not linked directly through bga8
- bga_unassn = bga_compiled_1[bga_compiled_1['boiler_id'].isnull()].copy()
- bga_unassn = bga_unassn.drop(['boiler_id'], axis=1)
-
# Create a list of boilers that were not in bga8
bf9_bga = bf_eia923.merge(bga_compiled_1,
on=['plant_id_eia', 'boiler_id', 'report_date'],
|
Temporary solution for having access to Python installation path.
* Temporary solution for having access to the root path for python installations until Caffe2/PyTorch figure out the best way to build.
* Update build.sh
Increasing the verbosity of HIP errors. | @@ -33,9 +33,10 @@ if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
git clone https://github.com/ROCm-Developer-Tools/pyHIPIFY.git
chmod a+x pyHIPIFY/*.py
sudo cp -p pyHIPIFY/*.py /opt/rocm/bin
+ sudo chown -R jenkins:jenkins /usr/local
rm -rf "$(dirname "${BASH_SOURCE[0]}")/../../../pytorch_amd/" || true
python "$(dirname "${BASH_SOURCE[0]}")/../../tools/amd_build/build_pytorch_amd.py"
- HIPCC_VERBOSE=1 VERBOSE=1 WITH_ROCM=1 python setup.py install
+ HIPCC_VERBOSE=7 VERBOSE=1 WITH_ROCM=1 python setup.py install
exit
fi
|
Process all arguments before listing out the rules.
Without this, the list option was listing out the default rflint rules,
not the defaults set by CumulusCI or those set on the command line. | @@ -103,7 +103,8 @@ class RobotLint(BaseTask):
result = 0
if self.options["list"]:
- linter.run(["--list"])
+ args = self._get_args()
+ linter.run(args + ["--list"])
else:
files = self._get_files()
|
Update tileApp.py
Added log to description to make it more specific | @@ -38,9 +38,9 @@ def get_tileApp(files_found, report_folder, seeker):
data_list.append((datestamp, lat.lstrip(), longi.lstrip(), counter, head_tail[1]))
if len(data_list) > 0:
- description = 'Tile app recorded langitude and longitude coordinates.'
+ description = 'Tile app log recorded langitude and longitude coordinates.'
report = ArtifactHtmlReport('Locations')
- report.start_artifact_report(report_folder, 'Tile App Geolocation', description)
+ report.start_artifact_report(report_folder, 'Tile App Geolocation Logs', description)
report.add_script()
data_headers = ('Timestamp', 'Latitude', 'Longitude', 'Row Number', 'Source File' )
report.write_artifact_data_table(data_headers, data_list, head_tail[0])
|
Improve docker compose template
This patch sets proper DB collation and bump MySQL version to 5.7. | db:
- image: mysql:5.6
+ image: mysql:5.7
environment:
MYSQL_DATABASE: ralph_ng
MYSQL_ROOT_PASSWORD: ralph_ng
@@ -7,6 +7,7 @@ db:
MYSQL_PASSWORD: ralph_ng
volumes_from:
- data
+ command: mysqld --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
web:
build: ralph
@@ -35,7 +36,7 @@ nginx:
- web
data:
- image: mysql:5.6
+ image: mysql:5.7
volumes:
- /opt/static
- /opt/media
|
mypy: Drop now-redundant `-i` option.
This used to be a synonym for `--incremental`. Since mypy 0.590,
incremental mode is the default, and the flag is ignored; so we
can happily drop it. | @@ -84,7 +84,7 @@ if not python_files and not pyi_files:
sys.exit(0)
extra_args = ["--follow-imports=silent",
- "-i", "--cache-dir=var/mypy-cache"]
+ "--cache-dir=var/mypy-cache"]
if args.linecoverage_report:
extra_args.append("--linecoverage-report")
extra_args.append("var/linecoverage-report")
|
fix: db ssl connection
ref:
[skip ci] | @@ -129,12 +129,11 @@ class MariaDBConnectionUtil:
conn_settings["local_infile"] = frappe.conf.local_infile
if frappe.conf.db_ssl_ca and frappe.conf.db_ssl_cert and frappe.conf.db_ssl_key:
- ssl_params = {
+ conn_settings["ssl"] = {
"ca": frappe.conf.db_ssl_ca,
"cert": frappe.conf.db_ssl_cert,
"key": frappe.conf.db_ssl_key,
}
- conn_settings |= ssl_params
return conn_settings
|
Avoid error sorting by relationships if related tables are not allowed
Refs | @@ -78,6 +78,7 @@ class IndexView(BaseView):
# We will be sorting by number of relationships, so populate that field
all_foreign_keys = await db.get_all_foreign_keys()
for table, foreign_keys in all_foreign_keys.items():
+ if table in tables.keys():
count = len(foreign_keys["incoming"] + foreign_keys["outgoing"])
tables[table]["num_relationships_for_sorting"] = count
|
moved refresh code out of container setter
when removing a child, its window will be None | @@ -42,8 +42,6 @@ class Widget:
child._impl.container = container
self.rehint()
- if self.interface.window and self.interface.window._impl.native.isVisible:
- self.interface.window.content.refresh()
def set_enabled(self, value):
self.native.enabled = self.interface.enabled
@@ -76,6 +74,7 @@ class Widget:
# if we don't have a window, we won't have a container
if self.interface.window:
child.container = self.container or self.interface.window.content._impl
+ self.interface.window.content.refresh()
def insert_child(self, index, child):
self.add_child(child)
@@ -84,7 +83,6 @@ class Widget:
# if we don't have a window, we won't have a container
if self.interface.window:
child.container = None
- # fresh manually here because child has window == None
self.interface.window.content.refresh()
def add_constraints(self):
|
Update data.json [ Repl.it -> Replit.com ]
Repl.it changed their domain and url to Replit.com.
"Repl.it/@" still works, but not for much longer. | "username_claimed": "blue",
"username_unclaimed": "noonewouldeverusethis7"
},
- "Repl.it": {
+ "Replit.com": {
"errorType": "status_code",
- "url": "https://repl.it/@{}",
- "urlMain": "https://repl.it/",
+ "url": "https://replit.com/@{}",
+ "urlMain": "https://replit.com/",
"username_claimed": "blue",
"username_unclaimed": "noonewouldeverusethis7"
},
|
Integration test
integration testing only everything is working, not logic of PQ
use method
create slot attribute in constructor
corect class for test case
stop crawler in teardown method
use class
correct entity naming
python 2 adaptation
integration test with crawler and spider | @@ -2,12 +2,17 @@ import shutil
import tempfile
import unittest
+from twisted.internet import defer
+from twisted.trial.unittest import TestCase
+
from scrapy.crawler import Crawler
from scrapy.core.scheduler import Scheduler
from scrapy.http import Request
from scrapy.pqueues import _scheduler_slot_read, _scheduler_slot_write
from scrapy.signals import request_reached_downloader, response_downloaded
from scrapy.spiders import Spider
+from scrapy.utils.test import get_crawler
+from tests.mockserver import MockServer
class MockCrawler(Crawler):
@@ -223,6 +228,13 @@ class TestSchedulerWithDownloaderAwareInMemory(BaseSchedulerInMemoryTester,
self.assertEqual(len(part), len(set(part)))
+def _is_slots_unique(base_slots, result_slots):
+ unique_slots = len(set(s for _, s in base_slots))
+ for i in range(0, len(result_slots), unique_slots):
+ part = result_slots[i:i + unique_slots]
+ assert len(part) == len(set(part))
+
+
class TestSchedulerWithDownloaderAwareOnDisk(BaseSchedulerOnDiskTester,
unittest.TestCase):
priority_queue_cls = 'scrapy.pqueues.DownloaderAwarePriorityQueue'
@@ -259,7 +271,33 @@ class TestSchedulerWithDownloaderAwareOnDisk(BaseSchedulerOnDiskTester,
spider=self.spider
)
- unique_slots = len(set(s for _, s in _SLOTS))
- for i in range(0, len(_SLOTS), unique_slots):
- part = slots[i:i + unique_slots]
- self.assertEqual(len(part), len(set(part)))
+ _is_slots_unique(_SLOTS, slots)
+
+
+class StartUrlsSpider(Spider):
+
+ def __init__(self, start_urls):
+ self.start_urls = start_urls
+
+
+class TestIntegrationWithDownloaderAwareOnDisk(TestCase):
+ def setUp(self):
+ self.crawler = get_crawler(
+ StartUrlsSpider,
+ {'SCHEDULER_PRIORITY_QUEUE': 'scrapy.pqueues.DownloaderAwarePriorityQueue',
+ 'DUPEFILTER_CLASS': 'scrapy.dupefilters.BaseDupeFilter'}
+ )
+
+ @defer.inlineCallbacks
+ def tearDown(self):
+ yield self.crawler.stop()
+
+ @defer.inlineCallbacks
+ def test_integration_downloader_aware_priority_queue(self):
+ with MockServer() as mockserver:
+
+ url = mockserver.url("/status?n=200", is_secure=False)
+ slots = [url] * 6
+ yield self.crawler.crawl(slots)
+ self.assertEqual(self.crawler.stats.get_value('downloader/response_count'),
+ len(slots))
|
Added translation and sound improvements
Added Shin chan - Aventuras en Cineland (Spain) [T-En by LeonarthCG]
Added Breath of Fire - Sound Restoration (Europe) [Hack by Bregalad]
Added Breath of Fire II - Sound Restoration (Europe) [Hack by Bregalad] | @@ -146,3 +146,21 @@ game (
rom ( name "Rhythm Tengoku (Japan).gba" size 16777216 crc baf7ffd2 md5 3c29922b367de3f58587a8f92d1280b7 sha1 e79563406966a66958b280c9b70cdc6c49c35f54 )
patch "https://www.romhacking.net/translations/1762/"
)
+game (
+ name "Shin chan - Aventuras en Cineland (Spain) [T-En by LeonarthCG]"
+ description "English translation by LeonarthCG (1.0)"
+ rom ( name "Shin chan - Aventuras en Cineland (Spain).gba" size 17359992 crc 4c5ffba8 md5 34d50b2e3fa34a53632f631b0605170a sha1 ecf588ec3a435eafc28fcb86c59ad39d3b5d59fa8 )
+ patch "https://www.romhacking.net/translations/4567/"
+)
+game (
+ name "Breath of Fire - Sound Restoration (Europe) [Hack by Bregalad]"
+ description "Sound Restoration hack by Bregalad (1.0)"
+ rom ( name "Breath of Fire (Europe).gba" size 4194304 crc 31db82cf md5 b2731bf52975b5163129666690160a2a sha1 5ec64e7c4649043aab25bd5cf8eee627f1abdb58 )
+ patch "https://www.romhacking.net/hacks/3764/"
+)
+game (
+ name "Breath of Fire II - Sound Restoration (Europe) [Hack by Bregalad]"
+ description "Sound Restoration hack by Bregalad (1.2)"
+ rom ( name "Breath of Fire II (Europe).gba" size 4292756 crc 3d868bbf md5 d80b43e555efd0b3a1a9d34bd96f6e3e sha1 26cbba9cb34c380344092ef07a710594fa0b51d4 )
+ patch "https://www.romhacking.net/hacks/3765/"
+)
|
fix: Leaking color in bench --help
Since the character to render NC was cut off due to the char limit, the
whole list of following commands and descriptions would also turn
yellow. Let's keep it colourless in --help. Only, make it yellow when
the command is executed directly. | @@ -12,10 +12,9 @@ from frappe.exceptions import SiteNotSpecifiedError
from frappe.utils import update_progress_bar, cint
from frappe.coverage import CodeCoverage
-DATA_IMPORT_DEPRECATION = click.style(
+DATA_IMPORT_DEPRECATION = (
"[DEPRECATED] The `import-csv` command used 'Data Import Legacy' which has been deprecated.\n"
- "Use `data-import` command instead to import data via 'Data Import'.",
- fg="yellow"
+ "Use `data-import` command instead to import data via 'Data Import'."
)
@@ -364,7 +363,7 @@ def import_doc(context, path, force=False):
@click.option('--no-email', default=True, is_flag=True, help='Send email if applicable')
@pass_context
def import_csv(context, path, only_insert=False, submit_after_import=False, ignore_encoding_errors=False, no_email=True):
- click.secho(DATA_IMPORT_DEPRECATION)
+ click.secho(DATA_IMPORT_DEPRECATION, fg="yellow")
sys.exit(1)
|
Now applying UTM correction to mesh
Now applying the UTM correction to each mesh rather than to
the point cloud data. | @@ -105,10 +105,13 @@ class pointCloudTextureMapper(object):
# Create a texture map image by finding the nearest point to a pixel and using
# its value to set the color.
- def texture_sample(self, img, mesh):
+ def texture_sample(self, img, mesh, utm_shift):
faces = mesh.faces()
- vertices = mesh.vertices()
+ vertices = np.array(mesh.vertices())
+
+ for v in vertices:
+ v += utm_shift
img_size = img.shape
img_dx = 1./img_size[0]
@@ -149,6 +152,24 @@ class pointCloudTextureMapper(object):
for px, ci in zip(pixel_indices, closest_indices[1]):
img[px[0], px[1], :] = self.data[ci]
+ def utm_shift(self, meshfile):
+ # Check for UTM corrections in mesh file header
+ with open(meshfile, "r") as in_f:
+ header = [next(in_f) for x in range(3)]
+
+ # Set the shift values for the mesh from header data
+ shift = np.zeros(3)
+ for l in header:
+ cols = l.split()
+ if '#x' in cols[0]:
+ shift[0] = float(cols[2])
+ elif '#y' in cols[0]:
+ shift[1] = float(cols[2])
+ elif '#z' in cols[0]:
+ shift[2] = float(cols[2])
+
+ return shift
+
def process_mesh(self, meshfile):
print('Processing mesh ', meshfile)
new_mesh = Mesh.from_obj_file(str(meshfile))
@@ -160,7 +181,7 @@ class pointCloudTextureMapper(object):
# Create the texture image
img_arr = np.zeros(self.img_size, dtype=np.float32)
- self.texture_sample(img_arr, new_mesh)
+ self.texture_sample(img_arr, new_mesh, self.utm_shift(meshfile))
new_name = self.output_dir / meshfile.stem
@@ -196,24 +217,8 @@ def main(args):
else:
output_dir = Path(args.mesh_dir)
- # Check for UTM corrections in mesh file header
- with open(mesh_files[0], "r") as in_f:
- header = [next(in_f) for x in range(3)]
-
- # Set the shift values for the mesh from header data
- utm_shift = np.zeros(3)
- for l in header:
- cols = l.split()
- if '#x' in cols[0]:
- utm_shift[0] = float(cols[2])
- elif '#y' in cols[0]:
- utm_shift[1] = float(cols[2])
- elif '#z' in cols[0]:
- utm_shift[2] = float(cols[2])
-
pc_data = load_point_cloud(args.point_cloud_file)
- points = (np.stack([pc_data['X'], pc_data['Y'], pc_data['Z']], axis=1)
- - utm_shift)
+ points = np.stack([pc_data['X'], pc_data['Y'], pc_data['Z']], axis=1)
# Currently just transfer point color to mesh
rgb_data = np.stack([pc_data['Red'], pc_data['Green'], pc_data['Blue']], axis=1)
|
fix(get_czce_rank_table): 0.4.35: fix: get_czce_rank_table history-20071228 format
0.4.35: fix: get_czce_rank_table history-20071228 format | @@ -15,6 +15,7 @@ import warnings
from io import StringIO
import pandas as pd
+import requests
from bs4 import BeautifulSoup
from akshare.futures import cons
@@ -302,10 +303,12 @@ def get_czce_rank_table(date=None, vars_list=cons.contract_symbols):
return {}
if date <= datetime.date(2010, 8, 25):
url = cons.CZCE_VOL_RANK_URL_1 % (date.strftime('%Y%m%d'))
+ r = requests.get(url)
+ r.encoding = "utf-8"
+ soup = BeautifulSoup(r.text, "lxml")
data = _czce_df_read(url, skip_rows=0)
r = requests_link(url, 'utf-8')
r.encoding = 'utf-8'
- soup = BeautifulSoup(r.text, 'lxml', from_encoding="gb2312")
symbols = []
for link in soup.find_all('b'):
strings = (str(link).split(' '))
@@ -318,7 +321,7 @@ def get_czce_rank_table(date=None, vars_list=cons.contract_symbols):
big_dict = {}
for i in range(len(symbols)):
symbol = symbols[i]
- table_cut = data[i + 2]
+ table_cut = data[i + 1]
table_cut.columns = rank_columns
table_cut = table_cut.iloc[:-1, :]
table_cut.loc[:, 'rank'] = table_cut.index
@@ -538,7 +541,7 @@ def _table_cut_cal(table_cut, symbol):
if __name__ == '__main__':
- get_czce_rank_table_first_df = get_czce_rank_table(date='20131227')
+ get_czce_rank_table_first_df = get_czce_rank_table(date='20061227')
print(get_czce_rank_table_first_df)
get_czce_rank_table_second_df = get_czce_rank_table(date='20171227')
print(get_czce_rank_table_second_df)
@@ -548,8 +551,12 @@ if __name__ == '__main__':
print(get_cffex_rank_table_df)
get_shfe_rank_table_df = get_shfe_rank_table(date='20190711')
print(get_shfe_rank_table_df)
- get_shfe_rank_table_df = get_dce_rank_table(date='20180404')
- print(get_shfe_rank_table_df)
+ get_shfe_rank_table_first_df = get_dce_rank_table(date='20131227')
+ print(get_shfe_rank_table_first_df)
+ get_shfe_rank_table_second_df = get_dce_rank_table(date='20171227')
+ print(get_shfe_rank_table_second_df)
+ get_shfe_rank_table_third_df = get_dce_rank_table(date='20191227')
+ print(get_shfe_rank_table_third_df)
# for k, v in dfs.items():
# print(type(v['long_open_interest'].tolist()[-1]))
# get_rank_sum("20180301")
|
docs: installation: Add link to Quickstart after install
Once Flask is installed many users will want to proceed to the next section
in the docs (Quickstart). Add a link to the end of the Install section for
this. | @@ -142,6 +142,8 @@ update the code from the master branch:
pip install -U https://github.com/pallets/flask/archive/master.tar.gz
+Once you've installed Flask you can continue to :ref:`quickstart`.
+
.. _install-install-virtualenv:
Install virtualenv
|
Updating to just cancel update on first keyboard interrupt.
Instead of stopping grow, just stop asking to update and keep grow going.
Fixes | @@ -102,8 +102,11 @@ class Updater(object):
logging.info(' > Auto-updating to version: {}'.format(
colors.stylize(str(sem_latest), colors.HIGHLIGHT)))
else: # pragma: no cover
+ try:
choice = raw_input(
'Auto update now? [Y]es / [n]o / [a]lways: ').strip().lower()
+ except KeyboardInterrupt:
+ choice = 'n'
if choice not in ('y', 'a', ''):
return
if choice == 'a':
|
Fix typo
NXF_SINGULARITY_CACHE -> NXF_SINGULARITY_CACHEDIR | @@ -86,10 +86,10 @@ For example, to launch the `viralrecon` pipeline:
docker run -itv `pwd`:`pwd` -w `pwd` nfcore/tools launch viralrecon -r 1.1.0
```
-If you use `$NXF_SINGULARITY_CACHE` for downloads, you'll also need to make this folder and environment variable available to the continer:
+If you use `$NXF_SINGULARITY_CACHEDIR` for downloads, you'll also need to make this folder and environment variable available to the continer:
```bash
-docker run -itv `pwd`:`pwd` -v $NXF_SINGULARITY_CACHE:$NXF_SINGULARITY_CACHE -e NXF_SINGULARITY_CACHE -w `pwd` nfcore/tools launch viralrecon -r 1.1.0
+docker run -itv `pwd`:`pwd` -v $NXF_SINGULARITY_CACHEDIR:$NXF_SINGULARITY_CACHEDIR -e NXF_SINGULARITY_CACHEDIR -w `pwd` nfcore/tools launch viralrecon -r 1.1.0
```
#### Docker bash alias
|
timings: add a wrapper to get a precise timestamp
HG--
branch : dev00 | @@ -106,6 +106,7 @@ The Following are the individual timing settings that can be adjusted:
"""
+import six
import time
import operator
from functools import wraps
@@ -274,6 +275,20 @@ class TimeoutError(RuntimeError):
pass
+#=========================================================================
+if six.PY3:
+ _clock_func = time.perf_counter
+else:
+ _clock_func = time.clock
+
+
+def timestamp():
+ """
+ Get a precise timestamp
+ """
+ return _clock_func()
+
+
#=========================================================================
def always_wait_until(timeout,
retry_interval,
@@ -322,14 +337,14 @@ def wait_until(timeout,
except TimeoutError as e:
print("timed out")
"""
- start = time.time()
+ start = timestamp()
func_val = func(*args)
# while the function hasn't returned what we are waiting for
while not op(func_val, value):
# find out how much of the time is left
- time_left = timeout - (time.time() - start)
+ time_left = timeout - (timestamp() - start)
# if we have to wait some more
if time_left > 0:
@@ -394,7 +409,7 @@ def wait_until_passes(timeout,
print("timed out")
raise e.
"""
- start = time.time()
+ start = timestamp()
# keep trying until the timeout is passed
while True:
@@ -409,7 +424,7 @@ def wait_until_passes(timeout,
except exceptions as e:
# find out how much of the time is left
- time_left = timeout - (time.time() - start)
+ time_left = timeout - (timestamp() - start)
# if we have to wait some more
if time_left > 0:
|
Drop workaround for CPython 3.9 (kiwi released)
See diofant/diofant#1071 | @@ -30,12 +30,7 @@ jobs:
fonts-freefont-otf latexmk lmodern
- name: Install dependencies
run: |
- if [ "${{ matrix.python-version }}" != "3.9" ]
- then
pip install -U .[interactive,develop,gmpy,exports,plot,docs]
- else
- pip install -U .[interactive,develop,gmpy,exports,docs]
- fi
- name: Linting with flake8, flake8-rst and pylint
run: |
python -We:invalid -m compileall -f diofant -q
|
GUI: Run WISDEM support
Note where we can run WISDEM if OK is pressed. | @@ -390,7 +390,9 @@ class FormAndMenuWindow(QMainWindow):
msg.setInformativeText("Click cancel to back out and continue editing. Click OK to run WISDEM.")
msg.addButton(QMessageBox.Cancel)
msg.addButton(QMessageBox.Ok)
- msg.exec()
+ choice = msg.exec()
+ if choice == QMessageBox.Ok:
+ print("This is where we would run WISDEM")
def write_configuration_files(self):
"""
|
Tweak and clarify MLP code:
- Change order of keyword arguments (visually cleaner, and matches order in
mlp_mnist.gin).
- Separate out and add comment to code that creates multiple hidden layers. | @@ -22,17 +22,22 @@ from __future__ import print_function
from trax import layers as tl
-def MLP(n_hidden_layers=2,
- d_hidden=512,
+def MLP(d_hidden=512,
+ n_hidden_layers=2,
activation_fn=tl.Relu,
n_output_classes=10,
mode='train'):
"""A multi-layer feedforward (perceptron) network."""
del mode
+ # Define a function rather than a variable, so that multiple copies will
+ # each be their own object with their own weights.
+ def DensePlusActivation():
+ return [tl.Dense(d_hidden), activation_fn()]
+
return tl.Serial(
tl.Flatten(),
- [[tl.Dense(d_hidden), activation_fn()] for _ in range(n_hidden_layers)],
+ [DensePlusActivation() for _ in range(n_hidden_layers)],
tl.Dense(n_output_classes),
tl.LogSoftmax(),
)
|
Add a new section for using pyupdi as upload tool
Resolve // platformio/platform-atmelmegaavr#7 | @@ -25,6 +25,44 @@ incorrect upload flags. It's highly recommended to use the
:ref:`projectconf_upload_command` option that gives the full control over flags used
for uploading. Please read :ref:`atmelavr_upload_via_programmer` for more information.
+Upload using pyupdi
+^^^^^^^^^^^^^^^^^^^
+
+``pyupdi`` is a Python-based tool for programming tinyAVR and megaAVR devices with UPDI
+interface via a standard serial port. It can be installed directly in the PlatformIO
+virtual environment using the following command:
+
+.. code-block:: bash
+
+ pip install https://github.com/mraardvark/pyupdi/archive/master.zip
+
+
+Once ``pyupdi`` is installed it can be used as the uploader via a custom
+:ref:`projectconf_upload_command` option, for example:
+
+.. code-block:: ini
+
+ [env:ATmega3209_pyupdi_upload]
+ platform = atmelmegaavr
+ framework = arduino
+ board = ATmega3209
+ upload_speed = 115200
+ upload_flags =
+ -d
+ mega3209
+ -c
+ $UPLOAD_PORT
+ -b
+ $UPLOAD_SPEED
+ upload_command = pyupdi $UPLOAD_FLAGS -f $SOURCE
+
+.. warning::
+
+ Device names used in in ``pyupdi`` differ from MCU names used in the ``atmelmegaavr``
+ platform. Run ``pyupdi --help`` to see the list of supported devices.
+
+More information and a typical circuit diagram can be found in the official
+`pyupdi repository <https://github.com/mraardvark/pyupdi>`_ repository.
Fuses programming
~~~~~~~~~~~~~~~~~
|
More consistency checks for dependencies
Closes | @@ -128,7 +128,28 @@ def check_versions():
assert not extra, f"Versions not in modules: {extra}"
+def _strip_dep_version(dependency):
+ dep_version_pos = len(dependency)
+ for pos, c in enumerate(dependency):
+ if c in "=<>":
+ dep_version_pos = pos
+ break
+ stripped = dependency[:dep_version_pos]
+ rest = dependency[dep_version_pos:]
+ if not rest:
+ return stripped, "", ""
+ number_pos = 0
+ for pos, c in enumerate(rest):
+ if c not in "=<>":
+ number_pos = pos
+ break
+ relation = rest[:number_pos]
+ version = rest[number_pos:]
+ return stripped, relation, version
+
+
def check_metadata():
+ known_distributions = set(os.listdir("stubs"))
for distribution in os.listdir("stubs"):
with open(os.path.join("stubs", distribution, "METADATA.toml")) as f:
data = toml.loads(f.read())
@@ -146,9 +167,20 @@ def check_metadata():
assert isinstance(data.get("python3", True), bool), f"Invalid python3 value for {distribution}"
assert isinstance(data.get("requires", []), list), f"Invalid requires value for {distribution}"
for dep in data.get("requires", []):
- # TODO: add more validation here.
assert isinstance(dep, str), f"Invalid dependency {dep} for {distribution}"
assert dep.startswith("types-"), f"Only stub dependencies supported, got {dep}"
+ dep = dep[len("types-"):]
+ for space in " \t\n":
+ assert space not in dep, f"For consistency dependency should not have whitespace: {dep}"
+ assert ";" not in dep, f"Semicolons in dependencies are not supported, got {dep}"
+ stripped, relation, dep_version = _strip_dep_version(dep)
+ assert stripped in known_distributions, f"Only dependencies from typeshed are supported, got {stripped}"
+ if relation:
+ msg = f"Bad version in dependency {dep}"
+ assert relation in {"==", ">", ">=", "<", "<="}, msg
+ assert version.count(".") <= 2, msg
+ for part in version.split("."):
+ assert part.isnumeric(), msg
if __name__ == "__main__":
|
config/core: Update decription of execution order
Remove reference to "classic" execution order. | @@ -574,9 +574,7 @@ class RunConfiguration(Configuration):
``"by_spec"``
All iterations of the first spec are executed before moving on
- to the next spec. E.g. A1 A2 A3 B1 C1 C2 This may also be
- specified as ``"classic"``, as this was the way workloads were
- executed in earlier versions of WA.
+ to the next spec. E.g. A1 A2 A3 B1 C1 C2.
``"random"``
Execution order is entirely random.
|
Import T2T envs into PPO.
Add flags/code in PPO to support construction of ClientEnv. | @@ -49,12 +49,14 @@ import gin
import jax
from jax.config import config
import numpy as onp
+from tensor2tensor import envs # pylint: disable=unused-import
from tensor2tensor.envs import gym_env_problem
from tensor2tensor.envs import rendered_env_problem
from tensor2tensor.rl import gym_utils
+from tensor2tensor.rl.google import atari_utils # GOOGLE-INTERNAL:
from tensor2tensor.trax import layers
from tensor2tensor.trax import models
-from tensor2tensor.trax.rlax import envs # pylint: disable=unused-import
+from tensor2tensor.trax.rlax import envs as rlax_envs # pylint: disable=unused-import
from tensor2tensor.trax.rlax import ppo
@@ -105,6 +107,11 @@ flags.DEFINE_boolean("parallelize_envs", False,
"If true, sets parallelism to number of cpu cores.")
+# TODO(afrozm): Find a better way to do these configurations.
+flags.DEFINE_string("train_server_bns", "", "Train Server's BNS.")
+flags.DEFINE_string("eval_server_bns", "", "Eval Server's BNS.")
+
+
def common_layers():
# TODO(afrozm): Refactor.
if "NoFrameskip" in FLAGS.env_problem_name:
@@ -196,6 +203,15 @@ def main(argv):
eval_env_kwargs["output_dir"] = os.path.join(FLAGS.output_dir,
"envs/eval")
+ if "ClientEnv" in FLAGS.env_problem_name:
+ train_env_kwargs["per_env_kwargs"] = [{
+ "remote_env_address": os.path.join(FLAGS.train_server_bns, str(replica))
+ } for replica in range(FLAGS.batch_size)]
+
+ eval_env_kwargs["per_env_kwargs"] = [{
+ "remote_env_address": os.path.join(FLAGS.eval_server_bns, str(replica))
+ } for replica in range(FLAGS.eval_batch_size)]
+
# Make an env here.
env = make_env(batch_size=FLAGS.batch_size, **train_env_kwargs)
assert env
|
fix - wrong icon used
Icon of provider from last configured site was used | @@ -1109,8 +1109,8 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
return provider
sync_sett = self.sync_system_settings
- for site, detail in sync_sett.get("sites", {}).items():
- sites[site] = detail.get("provider")
+ for conf_site, detail in sync_sett.get("sites", {}).items():
+ sites[conf_site] = detail.get("provider")
return sites.get(site, 'N/A')
|
alexfren/update_vsphere_docs
Link 8, was pointing to a broken link: this change points to | @@ -113,7 +113,7 @@ See our [blog post][11] on monitoring vSphere environments with Datadog.
[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent
[6]: https://pubs.vmware.com/vsphere-51/index.jsp?topic=%2Fcom.vmware.powercli.cmdletref.doc%2FSet-CustomField.html
[7]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information
-[8]: https://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.vsphere.monitoring.doc_50%2FGUID-25800DE4-68E5-41CC-82D9-8811E27924BC.html
+[8]: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.monitoring.doc/GUID-25800DE4-68E5-41CC-82D9-8811E27924BC.html
[9]: https://github.com/DataDog/integrations-core/blob/master/vsphere/metadata.csv
[10]: https://docs.datadoghq.com/integrations/faq/can-i-limit-the-number-of-vms-that-are-pulled-in-via-the-vmware-integration/
[11]: https://www.datadoghq.com/blog/unified-vsphere-app-monitoring-datadog/#auto-discovery-across-vm-and-app-layers
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.