message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
[Fix] Update transforms.py
ScalePadding: Fixed interpolation of masks. | @@ -836,7 +836,7 @@ class ScalePadding:
if label is not None:
label = np.uint8(new_label)
label = functional.resize(
- label, self.target_size, interp=cv2.INTER_CUBIC)
+ label, self.target_size, interp=cv2.INTER_NEAREST)
if label is None:
return (im, )
else:
|
companion: Fix pairing requirement flag
Relates to | @@ -39,8 +39,9 @@ _LOGGER = logging.getLogger(__name__)
# Observed values of rpfl (zeroconf):
# 0x62792 -> All on the same network (Unsupported/Mandatory)
# 0x627B6 -> Only devices in same home (Disabled)
-# Mask = 0x62792 & ~0x627B6 = 0x24
-PAIRING_DISABLED_MASK = 0x24
+# 0xB67A2 -> Same as above
+# Mask = 0x62792 & ~0xB67A2 & ~0x627B6 & ~0xB67A2 = 0x20
+PAIRING_DISABLED_MASK = 0x04
# Pairing with PIN seems to be supported according to this pattern
# (again, observed from values of rpfl):
|
Fix typo in logging.rst
Remove extra parenthesis from RequestFormatter constructor. | @@ -129,7 +129,7 @@ handler. ::
formatter = RequestFormatter(
'[%(asctime)s] %(remote_addr)s requested %(url)s\n'
'%(levelname)s in %(module)s: %(message)s'
- ))
+ )
default_handler.setFormatter(formatter)
mail_handler.setFormatter(formatter)
|
fix(currency_boc_sina): fix currency_boc_sina interface
fix currency_boc_sina interface | @@ -21,9 +21,9 @@ def currency_latest(base: str = "USD", api_key: str = "") -> pd.DataFrame:
:return: Latest data of base currency
:rtype: pandas.DataFrame
"""
- payload = {"base": base, "api_key": api_key}
+ params = {"base": base, "api_key": api_key}
url = "https://api.currencyscoop.com/v1/latest"
- r = requests.get(url, params=payload)
+ r = requests.get(url, params=params)
temp_df = pd.DataFrame.from_dict(r.json()["response"])
temp_df["date"] = pd.to_datetime(temp_df["date"])
return temp_df
@@ -86,7 +86,9 @@ def currency_time_series(
return temp_df
-def currency_currencies(c_type: str = "fiat", api_key: str = "") -> pd.DataFrame:
+def currency_currencies(
+ c_type: str = "fiat", api_key: str = ""
+) -> pd.DataFrame:
"""
currencies data from currencyscoop.com
https://currencyscoop.com/api-documentation
@@ -105,7 +107,10 @@ def currency_currencies(c_type: str = "fiat", api_key: str = "") -> pd.DataFrame
def currency_convert(
- base: str = "USD", to: str = "CNY", amount: str = "10000", api_key: str = ""
+ base: str = "USD",
+ to: str = "CNY",
+ amount: str = "10000",
+ api_key: str = "",
) -> pd.Series:
"""
currencies data from currencyscoop.com
@@ -131,14 +136,16 @@ def currency_convert(
r = requests.get(url, params=payload)
temp_se = pd.Series(r.json()["response"])
temp_se["timestamp"] = pd.to_datetime(temp_se["timestamp"], unit="s")
+
return temp_se
if __name__ == "__main__":
currency_latest_df = currency_latest(
- base="USD", api_key=""
+ base="USD", api_key="e2cd623e630613c89ba9cee546ae5108"
)
print(currency_latest_df)
+
currency_history_df = currency_history(
base="USD", date="2020-02-03", api_key=""
)
@@ -150,9 +157,7 @@ if __name__ == "__main__":
# api_key="",
# )
# print(currency_time_series_df)
- currency_currencies_df = currency_currencies(
- c_type="fiat", api_key=""
- )
+ currency_currencies_df = currency_currencies(c_type="fiat", api_key="")
print(currency_currencies_df)
currency_convert_se = currency_convert(
base="USD", to="CNY", amount="10000", api_key=""
|
Update runserver_plus message to account for https
This change makes the startup message correctly print "https" vs "http" if `ssl_context` is available | @@ -273,8 +273,6 @@ class Command(BaseCommand):
open_browser = options.get('open_browser', False)
cert_path = options.get("cert_path")
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
- bind_url = "http://%s:%s/" % (
- self.addr if not self._raw_ipv6 else '[%s]' % self.addr, self.port)
extra_files = options.get('extra_files', None) or []
reloader_interval = options.get('reloader_interval', 1)
reloader_type = options.get('reloader_type', 'auto')
@@ -291,20 +289,12 @@ class Command(BaseCommand):
self.check_migrations()
except ImproperlyConfigured:
pass
- if self.show_startup_messages:
- print("\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE))
- print("Development server is running at %s" % (bind_url,))
- print("Using the Werkzeug debugger (http://werkzeug.pocoo.org/)")
- print("Quit the server with %s." % quit_command)
handler = get_internal_wsgi_application()
if USE_STATICFILES:
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
handler = StaticFilesHandler(handler)
- if open_browser:
- import webbrowser
- webbrowser.open(bind_url)
if cert_path:
"""
OpenSSL is needed for SSL support.
@@ -344,6 +334,19 @@ class Command(BaseCommand):
else:
ssl_context = None
+ bind_url = "%s://%s:%s/" % (
+ "https" if ssl_context else "http", self.addr if not self._raw_ipv6 else '[%s]' % self.addr, self.port)
+
+ if self.show_startup_messages:
+ print("\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE))
+ print("Development server is running at %s" % (bind_url,))
+ print("Using the Werkzeug debugger (http://werkzeug.pocoo.org/)")
+ print("Quit the server with %s." % quit_command)
+
+ if open_browser:
+ import webbrowser
+ webbrowser.open(bind_url)
+
if use_reloader and settings.USE_I18N:
extra_files.extend(filter(lambda filename: filename.endswith('.mo'), gen_filenames()))
|
Xin tone curve failure, fix
The tone curve doesn't work on proper images it needs a grayscale conversion to work. Added that in for xin. | @@ -177,6 +177,11 @@ class RasterScripts(Module):
'units': 0,
'step': 2
})
+ ops.append({
+ 'name': 'grayscale',
+ 'enable': True,
+ 'invert': False,
+ })
ops.append({
'name': 'tone',
'type': 'spline',
|
removing resetting delay to 0 in UHFQC set default settings
Not to mess up a hack that compensates for delay when measuring with optimal weights | @@ -272,8 +272,8 @@ class UHFQC(Instrument):
# detect when the measurement is complete, and then manually fetch the results using the 'get'
# command. Disabling the automatic result readout speeds up the operation a bit, since we avoid
# sending the same data twice.
- self.quex_iavg_readout(0)
- self.quex_rl_readout(0)
+ # self.quex_iavg_readout(0)
+ # self.quex_rl_readout(0)
# The custom firmware will feed through the signals on Signal Input 1 to Signal Output 1 and Signal Input 2 to Signal Output 2
# when the AWG is OFF. For most practical applications this is not really useful. We, therefore, disable the generation of
|
Convert typed dict earlier in new_context
To avoid possible argument mutation for arguments that have pointers, like dictionaries | @@ -725,6 +725,7 @@ class PlaywrightState(LibraryComponent):
[https://forum.robotframework.org/t/comments-for-new-context/4307|Comment >>]
"""
params = locals_to_params(locals())
+ params = convert_typed_dict(self.new_context.__annotations__, params)
params = self._set_video_path(params)
params = self._set_video_size_to_int(params)
reduced_motion = str(params.get("reducedMotion"))
@@ -739,7 +740,6 @@ class PlaywrightState(LibraryComponent):
httpCredentials, params.get("httpCredentials"), "httpCredentials"
)
params["httpCredentials"] = secret
- params = convert_typed_dict(self.new_context.__annotations__, params)
if not videosPath:
params.pop("videoSize", None)
trace_file = params.pop("tracing", None)
|
Allow uploads to user AND group libraries
fixes | @@ -1678,7 +1678,8 @@ class Zupload(object):
reg_data = {"upload": authdata.get("uploadKey")}
upload_reg = requests.post(
url=self.zinstance.endpoint
- + "/users/{u}/items/{i}/file".format(
+ + "/{t}/{u}/items/{i}/file".format(
+ t=self.zinstance.library_type,
u=self.zinstance.library_id, i=reg_key
),
data=reg_data,
|
fix various typos and backtick usage in 4.0 changelog/release notes
adds a ref to the new setting added for `WAGTAILADMIN_UNSAFE_PAGE_DELETION_LIMIT` - see | @@ -261,6 +261,8 @@ The interval (in milliseconds) to check for changes made in the page editor befo
`WAGTAILADMIN_GLOBAL_PAGE_EDIT_LOCK` can be set to `True` to prevent users from editing pages that they have locked.
+(wagtailadmin_unsafe_page_deletion_limit)=
+
### `WAGTAILADMIN_UNSAFE_PAGE_DELETION_LIMIT`
```python
|
Update mobileInstall.py
Assigment error fix. | @@ -114,7 +114,7 @@ def get_mobileInstall(files_found, report_folder, seeker):
datainsert,
)
db.commit()
-
+ path = ''
tsv_tml_data_list.append((inserttime, actiondesc, bundleid, path))
# logfunc()
|
fix: Changed one more parameter
repo --> root | @@ -36,8 +36,8 @@ def FileExists(filename):
# GIT
@Statement.from_func(historical=True, quantitative=False)
-def LastCommitted(_start_, _end_, repo=None):
- repo = Repo(repo)
+def LastCommitted(_start_, _end_, root=None):
+ repo = Repo(root)
last_commit = next(iter(repo.iter_commits()))
dt = pd.Timestamp(last_commit.committed_date, unit="s")
return _start_ <= dt <= _end_
\ No newline at end of file
|
Add proper Apache copyright notice in the about dialog
Including a link to the license. | <property name="comments" translatable="yes">Gaphor is the simple modeling tool written in Python</property>
<property name="website">https://github.com/gaphor/gaphor</property>
<property name="website_label" translatable="yes">Fork me on GitHub</property>
- <property name="license" translatable="yes">This software is published under the terms of the
-Apache Software License, version 2.0.
-See the LICENSE.txt file for details.</property>
+ <property name="license" translatable="yes">Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this application except in compliance with the
+License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+express or implied. See the License for the specific language
+governing permissions and limitations under the License.
+</property>
<property name="authors">Arjan Molenaar, Artur Wroblewski,
Jeroen Vloothuis, Dan Yeaw,
Enno Groeper, Adam Boduch,
|
Delete sphere
Until we can be sure non-orthogonal lattices will work | @@ -276,56 +276,6 @@ class VolumetricData(MSONable):
total = np.sum(np.sum(m, axis=0), 0)
return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]
- def mask_sphere(self, radius: float, fcoord: npt.ArrayLike):
- """
- Create a mask for a sphere in the data
-
- Args:
- radius: Radius of the mask in Angstroms
- fcoord: The fractional coordinates of the center of the sphere
- """
- # makesure fcoord is an array
- fcoord = np.array(fcoord)
-
- def _dist_mat(pos_frac):
- # return a matrix that contains the distances
- aa = np.linspace(0, 1, len(self.get_axis_grid(0)), endpoint=False)
- bb = np.linspace(0, 1, len(self.get_axis_grid(1)), endpoint=False)
- cc = np.linspace(0, 1, len(self.get_axis_grid(2)), endpoint=False)
- AA, BB, CC = np.meshgrid(aa, bb, cc, indexing="ij")
- dist_from_pos = self.structure.lattice.get_all_distances(
- fcoords1=np.vstack([AA.flatten(), BB.flatten(), CC.flatten()]).T,
- fcoords2=pos_frac,
- )
- return dist_from_pos.reshape(AA.shape)
-
- if np.any(fcoord < 0) or np.any(fcoord > 1):
- raise ValueError("f_coords must be in [0,1)")
- return _dist_mat(fcoord) < radius
-
- def average_in_sphere(self, radius: float, fcoord: npt.ArrayLike):
- """
- Return an average of the total data within a sphere.
-
- Args:
- radius: Radius of the mask in Angstroms
- fcoord: The fractional coordinates of the center of the sphere
- """
- mask = self.mask_sphere(radius, fcoord)
- vol_sphere = self.structure.volume * (mask.sum() / self.ngridpts)
- return np.sum(self.data["total"] * mask) / mask.size / vol_sphere
-
- def sum_in_sphere(self, radius: float, fcoord: npt.ArrayLike):
- """
- Return the sum of the total data within a sphere.
-
- Args:
- radius: Radius of the mask in Angstroms
- fcoord: The fractional coordinates of the center of the sphere
- """
- mask = self.mask_sphere(radius, fcoord)
- return np.sum(self.data["total"] * mask)
-
def to_hdf5(self, filename):
"""
Writes the VolumetricData to a HDF5 format, which is a highly optimized
@@ -474,6 +424,4 @@ class VolumetricData(MSONable):
coords_are_cartesian=True,
)
- # Volumetric data
- data = np.reshape(np.array(file.read().split()).astype(float), (num_x_voxels, num_y_voxels, num_z_voxels))
return cls(structure=structure, data={"total": data})
|
Trying to fix the travis build
Something went wrong with travis a couple commits back, so I'm cleaning
out the unnecessary code | namespace py = pybind11;
struct JaggedArraySrc {
-private:
-
- /*template <typename T>
- static void set_native_endian(py::array_t<T> input) {
- if (!input.dtype().isnative()) {
- input = input.byteswap().newbyteorder();
- }
- }*/
-
public:
template <typename T>
@@ -240,7 +231,6 @@ public:
PYBIND11_MODULE(_jagged, m) {
py::class_<JaggedArraySrc>(m, "JaggedArraySrc")
.def(py::init<>())
- DEF(test)
DEF(offsets2parents)
DEF(counts2offsets)
DEF(startsstops2parents)
|
Add documentation for DELETE method for Swift Object Store API.
Account API does not document 'DELETE' verb, which is a valid request to
delete an account.
This is a doc addition request.
Closes-Bug: | @@ -363,3 +363,67 @@ Response Parameters
- X-Account-Meta-Quota-Bytes: X-Account-Meta-Quota-Bytes_resp
- X-Account-Access-Control: X-Account-Access-Control_resp
- Content-Type: Content-Type_cud_resp
+
+
+Delete the specified account
+============================
+
+.. rest_method:: DELETE /v1/{account}
+
+Deletes the specified account when a reseller admin issues this request.
+Accounts are only deleted by (1) having a reseller admin level auth token (2)
+sending a DELETE to a proxy server for the account to be deleted and (3) that
+proxy server having the allow_account_management" config option set to true.
+
+Note that an issuing a DELETE request simply marks the account for deletion
+later as outlined in the link: https://docs.openstack.org/swift/latest/overview_reaper.html.
+
+Take care when performing this operation because deleting an account is a
+one-way operation that is not trivially recoverable. It's crucial to note that in
+an OpenStack context, you should delete an account after the project/tenant has been deleted from Keystone.
+
+
+::
+
+ curl -i $publicURL -X DELETE -H 'X-Auth-Token: $<reseller admin token>'
+
+
+
+::
+
+ HTTP/1.1 204 No Content
+ Content-Length: 0
+ Content-Type: text/html; charset=UTF-8
+ X-Account-Status: Deleted
+ X-Trans-Id: tx91ce60a640cc42eca198a-006128c180
+ X-Openstack-Request-Id: tx91ce60a640cc42eca198a-006128c180
+ Date: Fri, 27 Aug 2021 11:42:08 GMT
+
+If the account or authentication token is not valid, the operation
+returns the ``Unauthorized (401)``. If you try to delete an account with a
+non-admin token, a ``403 Forbidden`` response code is returned.
+If you give a non-existent account or an invalid URL, a ``404 Not Found`` response code is returned.
+
+Error response codes:204,401,403,404.
+
+
+Request
+-------
+
+.. rest_parameters:: parameters.yaml
+
+ - account: account
+ - X-Auth-Token: X-Auth-Token
+
+Response Parameters
+-------------------
+
+.. rest_parameters:: parameters.yaml
+
+ - Date: Date
+ - X-Timestamp: X-Timestamp
+ - Content-Length: Content-Length_cud_resp
+ - Content-Type: Content-Type_cud_resp
+ - X-Trans-Id: X-Trans-Id
+ - X-Openstack-Request-Id: X-Openstack-Request-Id
+
|
Bugfix: brew_update_formula.py
Sample command output is:
Error: This command updates brew itself, and does not take formula names.
Use 'brew upgrade thefuck' instead.
This will never match the previous `"Use 'brew upgrade <formula>'" in command.output` test. | @@ -5,7 +5,8 @@ from thefuck.utils import for_app
def match(command):
return ('update' in command.script
and "Error: This command updates brew itself" in command.output
- and "Use 'brew upgrade <formula>'" in command.output)
+ and "Use 'brew upgrade" in command.output
+ and "instead" in command.output)
def get_new_command(command):
|
[client] tweak order of resolved_visibility conversion
This is required because resolved_visibility attribute may return RequestedVisibility in some cases. | @@ -1605,11 +1605,11 @@ def convert_shared_link_metadata(res: sharing.SharedLinkMetadata) -> SharedLinkM
effective_audience = LinkAudience.Public
elif res.link_permissions.resolved_visibility.is_team_only():
effective_audience = LinkAudience.Team
+ elif res.link_permissions.resolved_visibility.is_password():
+ require_password = True
elif res.link_permissions.resolved_visibility.is_team_and_password():
effective_audience = LinkAudience.Team
require_password = True
- elif res.link_permissions.resolved_visibility.is_password():
- require_password = True
elif res.link_permissions.resolved_visibility.is_no_one():
effective_audience = LinkAudience.NoOne
|
Updated Show_Interface.py Regex
Regex pattern to match VRF name has been updated to include the ':' character (new regex patter is below).
(?P<vrf_name>[A-Za-z0-9:]+) | @@ -78,7 +78,7 @@ class ShowIpInterfaceBrief(ShowIpInterfaceBriefSchema):
# Loopback500 192.168.220.1 Up Up default
p = re.compile(r'^\s*(?P<interface>[a-zA-Z0-9\/\.\-]+) '
'+(?P<ip_address>[a-z0-9\.]+) +(?P<interface_status>[a-zA-Z]+) '
- '+(?P<protocol_status>[a-zA-Z]+) +(?P<vrf_name>[A-Za-z0-9]+)$')
+ '+(?P<protocol_status>[a-zA-Z]+) +(?P<vrf_name>[A-Za-z0-9:]+)$')
interface_dict = {}
for line in out.splitlines():
|
Update messages.json
Unsure how 0.9.4's message was delivered to users | "0.9.0": "messages/0.9.0.txt",
"0.9.1": "messages/0.9.1.txt",
"0.9.2": "messages/0.9.2.txt",
- "0.9.3": "messages/0.9.3.txt"
+ "0.9.3": "messages/0.9.3.txt",
+ "0.9.4": "messages/0.9.4.txt",
+ "0.9.5": "messages/0.9.5.txt"
}
|
Changed abstract method TTSPlugin.say
Added the "voice" parameter to the abstract class for text to
speech plugins. | @@ -126,7 +126,7 @@ class TTSPlugin(GenericPlugin, metaclass=abc.ABCMeta):
Generic parent class for all speakers
"""
@abc.abstractmethod
- def say(self, phrase):
+ def say(self, phrase, voice):
pass
def mp3_to_wave(self, filename):
|
changed postcard naming convention
will need to be adjusted again once sector keyword is available in the FITS header | @@ -13,7 +13,7 @@ import numpy as np
from time import strftime
from astropy.wcs import WCS
-from .version import __version__
+from version import __version__
def make_postcards(fns, outdir, width=104, height=148, wstep=None, hstep=None):
@@ -52,7 +52,7 @@ def make_postcards(fns, outdir, width=104, height=148, wstep=None, hstep=None):
# Set the output filename format
info = (primary_header["CAMERA"], primary_header["CCD"],
primary_header["IMAGTYPE"].strip())
- outfn_fmt = "elliepostcard-{0}-{{0:02d}}-{{1:02d}}.fits".format(
+ outfn_fmt = "hlsp_ellie_tess_ffi_postcard-{0}-{{0:04d}}-{{1:04d}}.fits".format(
"-".join(map("{0}".format, info)))
outfn_fmt = os.path.join(outdir, outfn_fmt).format
@@ -125,7 +125,7 @@ def make_postcards(fns, outdir, width=104, height=148, wstep=None, hstep=None):
for j, w in enumerate(ws):
dw = min(width, total_width - w)
dh = min(height, total_height - h)
- outfn = outfn_fmt(i+1, j+1)
+# outfn = outfn_fmt(i+1, j+1)
hdr = fitsio.FITSHDR(primary_header)
@@ -148,6 +148,9 @@ def make_postcards(fns, outdir, width=104, height=148, wstep=None, hstep=None):
xcen = h + 0.5*dh
ycen = w + 0.5*dw
+
+ outfn = outfn_fmt(xcen, ycen)
+
rd = primary_wcs.all_pix2world(xcen, ycen, 1)
hdr.add_record(
dict(name="CEN_X", value=xcen,
|
simplify conditions and utilize lazy eval
of elif | @@ -64,12 +64,9 @@ def run_prettier_on_file(file):
If Prettier is not installed, a warning is logged.
"""
- _prettier_installed = not shutil.which("prettier") is None
- _pre_commit_installed = not shutil.which("pre-commit") is None
-
- if _prettier_installed:
+ if shutil.which("prettier"):
_run_prettier_on_file(file)
- elif _pre_commit_installed:
+ elif shutil.which("pre-commit"):
_run_pre_commit_prettier_on_file(file)
else:
log.warning(
|
remove dynamic dependencies from setup.py
they are in violation of PEP517 and PEP518 | # system imports
from setuptools import setup, find_packages
-import importlib.util
# proceed with actual install
@@ -34,12 +33,6 @@ gui_requires = [
syslog_requires = ["systemd-python"]
-# if GUI is installed, always update it as well
-if importlib.util.find_spec("maestral_qt") or importlib.util.find_spec(
- "maestral_cocoa"
-):
- install_requires.extend(gui_requires)
-
setup(
name="maestral",
|
[Doc] Small graphs readme.txt, edit pass
* [Doc] Small graphs readme.txt, edit pass
Edit for grammar and style. Should this be an .rst on .txt?
* Update README.txt | .. _tutorials2-index:
-Dealing with many small graphs
+Batching many small graphs
==============================
* **Tree-LSTM** `[paper] <https://arxiv.org/abs/1503.00075>`__ `[tutorial]
- <2_small_graph/3_tree-lstm.html>`__ `[code]
+ <2_small_graph/3_tree-lstm.html>`__ `[PyTorch code]
<https://github.com/dmlc/dgl/blob/master/examples/pytorch/tree_lstm>`__:
- sentences of natural languages have inherent structures, which are thrown
+ Sentences have inherent structures that are thrown
away by treating them simply as sequences. Tree-LSTM is a powerful model
- that learns the representation by leveraging prior syntactic structures
- (e.g. parse-tree). The challenge to train it well is that simply by padding
- a sentence to the maximum length no longer works, since trees of different
+ that learns the representation by using prior syntactic structures such as a parse-tree.
+ The challenge in training is that simply by padding
+ a sentence to the maximum length no longer works. Trees of different
sentences have different sizes and topologies. DGL solves this problem by
- throwing the trees into a bigger "container" graph, and use message-passing
- to explore maximum parallelism. The key API we use is batching.
+ adding the trees to a bigger container graph, and then using message-passing
+ to explore maximum parallelism. Batching is a key API for this.
|
[Hexagon] Skip HexagonThreadManagerTest.thread_order_signal_wait unit test
skip test | @@ -259,6 +259,7 @@ TEST_F(HexagonThreadManagerTest, thread_order) {
}
TEST_F(HexagonThreadManagerTest, thread_order_signal_wait) {
+ GTEST_SKIP() << "Skipping due to: https://github.com/apache/tvm/issues/13169";
std::vector<int> arr;
htm->Wait(streams[1], 1);
|
docs(event.py): format event.py
format event.py | @@ -524,12 +524,15 @@ if __name__ == "__main__":
print(macro_cons_gold_change_df)
macro_cons_gold_amount_df = macro_cons_gold_amount()
print(macro_cons_gold_amount_df)
+ print(pd.concat([macro_cons_gold_volume_df, macro_cons_gold_change_df, macro_cons_gold_amount_df], axis=1))
+
macro_cons_silver_volume_df = macro_cons_silver_volume()
print(macro_cons_silver_volume_df)
macro_cons_silver_change_df = macro_cons_silver_change()
print(macro_cons_silver_change_df)
macro_cons_silver_amount_df = macro_cons_silver_amount()
print(macro_cons_silver_amount_df)
+ print(pd.concat([macro_cons_silver_volume_df, macro_cons_silver_change_df, macro_cons_silver_amount_df], axis=1))
macro_cons_opec_near_change_df = macro_cons_opec_near_change()
print(macro_cons_opec_near_change_df)
macro_cons_opec_month_df = macro_cons_opec_month()
|
Add test case for only pulp repos, no packages or modules (currently fails)
This is part of work to fix | @@ -659,6 +659,23 @@ class TestResolveComposes(object):
self.run_plugin_with_args(workflow, expect_error=error_message,
reactor_config_map=reactor_config_map)
+ def test_only_pulp_repos(self, workflow, reactor_config_map): # noqa:F811
+ mock_repo_config(workflow._tmpdir,
+ dedent("""\
+ compose:
+ pulp_repos: true
+ """))
+ mock_content_sets_config(workflow._tmpdir)
+ (flexmock(ODCSClient)
+ .should_receive('start_compose')
+ .with_args(
+ source_type='pulp',
+ source='pulp-spam pulp-bacon pulp-eggs',
+ sigkeys=[],
+ arches=['x86_64'])
+ .and_return(ODCS_COMPOSE))
+ self.run_plugin_with_args(workflow, reactor_config_map=reactor_config_map)
+
@pytest.mark.parametrize(('state_name', 'time_to_expire_delta', 'expect_renew'), ( # noqa:F811
('removed', timedelta(), True),
('removed', timedelta(hours=-2), True),
|
added specialization of Factorize operation
in case we factorize by a variable V, which is stupid, we replace Factorize<F,V> by F. This should be usefull when implementing automatic factorization recursively. | @@ -407,15 +407,18 @@ using _P = Param<N>;
// the computation of G, meaning that if G appears several times inside the
// formula F, we will compute it once only
+template < class F, class G > struct FactorizeAlias;
+template < class F, class G > using Factorize = typename FactorizeAlias<F,G>::type;
+
template < class F, class G >
-struct Factorize : BinaryOp<Factorize,F,G>
+struct FactorizeImpl : BinaryOp<FactorizeImpl,F,G>
{
static const int DIM = F::DIM;
static void PrintIdString() { cout << "Factorize"; }
- using THIS = Factorize<F,G>;
+ using THIS = FactorizeImpl<F,G>;
using Factor = G;
@@ -441,6 +444,17 @@ struct Factorize : BinaryOp<Factorize,F,G>
};
+template < class F, class G >
+struct FactorizeAlias {
+ using type = FactorizeImpl<F,G>;
+};
+
+// specialization in case G is of type Var : in this case there is no need for copying a Var into another Var,
+// so we replace Factorize<F,Var> simply by F. This is usefull to avoid factorizing several times the same sub-formula
+template < class F, int N, int DIM, int CAT >
+struct FactorizeAlias<F,Var<N,DIM,CAT>> {
+ using type = F;
+};
//////////////////////////////////////////////////////////////
|
[tests] Use Portalwiki for logentries_tests.py
btrfswiki is MW 1.19 which is no longer supported.
Remove btrfswiki from test matrix and replace it with
portalwiki which is 1.23 | @@ -50,10 +50,9 @@ class TestLogentriesBase(TestCase):
'target': None,
},
'old': {
- 'family': AutoFamily('btrfs',
- # /api.php required for scriptpath()
- 'https://btrfs.wiki.kernel.org/api.php'),
- 'code': 'btrfs',
+ 'family': AutoFamily('portalwiki',
+ 'https://theportalwiki.com/wiki/Main_Page'),
+ 'code': 'en',
'target': None,
}
}
@@ -64,7 +63,7 @@ class TestLogentriesBase(TestCase):
# This is an assertion as the tests don't make sense with newer
# MW versions and otherwise it might not be visible that the test
# isn't run on an older wiki.
- self.assertLess(self.site.mw_version, '1.20')
+ self.assertLess(self.site.mw_version, '1.24')
try:
le = next(iter(self.site.logevents(logtype=logtype, total=1)))
except StopIteration:
|
Remove unused field in models.py
replacement_costs no longer reported by REopt.jl | @@ -873,10 +873,6 @@ class FinancialOutputs(BaseModel, models.Model):
null=True, blank=True,
help_text="Up-front capital costs for all technologies, in present value, excluding replacement costs, including incentives."
)
- replacement_costs = models.FloatField(
- null=True, blank=True,
- help_text="Net replacement costs for all technologies, in future value, excluding incentives."
- )
om_and_replacement_present_cost_after_tax = models.FloatField(
null=True, blank=True,
help_text="Net O&M and replacement costs in present value, after-tax."
|
fix for phantom migration warning
Closes | @@ -55,7 +55,7 @@ class InvenTreeModelMoneyField(ModelMoneyField):
def __init__(self, **kwargs):
# detect if creating migration
- if 'makemigrations' in sys.argv:
+ if 'migrate' in sys.argv or 'makemigrations' in sys.argv:
# remove currency information for a clean migration
kwargs['default_currency'] = ''
kwargs['currency_choices'] = []
|
DOC: Update mode parameter description to account for shape
The `shape` parameter must be specified when opened in appending mode. Docstring
and exception message wording are updated to reflect this. | @@ -59,6 +59,7 @@ class memmap(ndarray):
| 'r+' | Open existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'w+' | Create or overwrite existing file for reading and writing. |
+ | | If ``mode == 'w+'`` then `shape` must also be specified. |
+------+-------------------------------------------------------------+
| 'c' | Copy-on-write: assignments affect data in memory, but |
| | changes are not saved to disk. The file on disk is |
@@ -220,7 +221,7 @@ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
) from None
if mode == 'w+' and shape is None:
- raise ValueError("shape must be given")
+ raise ValueError("shape must be given if mode == 'w+'")
if hasattr(filename, 'read'):
f_ctx = nullcontext(filename)
|
Remove dependency on requests library from crhelper
This is done in order to avoid packaging additional dependencies when
running AWS Lambda backed custom resources | # Imported from https://github.com/aws-cloudformation/custom-resource-helper
+# The file has been modified to drop dependency on requests package
# flake8: noqa
from __future__ import print_function
-import requests
import json
import logging as logging
import time
+from urllib.parse import urlsplit, urlunsplit
+from http.client import HTTPSConnection
logger = logging.getLogger(__name__)
-def _send_response(response_url, response_body, put=requests.put):
+def _send_response(response_url, response_body):
try:
json_response_body = json.dumps(response_body)
except Exception as e:
@@ -20,9 +22,14 @@ def _send_response(response_url, response_body, put=requests.put):
logger.debug("CFN response URL: {}".format(response_url))
logger.debug(json_response_body)
headers = {'content-type': '', 'content-length': str(len(json_response_body))}
+ split_url = urlsplit(response_url)
+ host = split_url.netloc
+ url = urlunsplit(("", "", *split_url[2:]))
while True:
try:
- response = put(response_url, data=json_response_body, headers=headers)
+ connection = HTTPSConnection(host)
+ connection.request(method="PUT", url=url, body=json_response_body, headers=headers)
+ response = connection.getresponse()
logger.info("CloudFormation returned status code: {}".format(response.reason))
break
except Exception as e:
|
not creating the source_dist (as should be done by travis)
Only uploading whl files and not eggs | @@ -50,7 +50,7 @@ after_test:
# Again, you only need build.cmd if you're building C extensions for
# 64-bit Python 3.3/3.4. And you need to use %PYTHON% to get the correct
# interpreter
- - python setup.py sdist bdist_wheel
+ - python setup.py bdist_wheel
artifacts:
# bdist_wheel puts your built wheel in the dist directory
@@ -64,4 +64,4 @@ deploy_script:
- echo password=%pypi_password% >> %USERPROFILE%\\.pypirc
# - twine upload dist\* --skip-existing
# deploy on dev or master
- - ps: If ($env:APPVEYOR_REPO_TAG -eq "true" ) { Invoke-Expression "twine upload dist\* --skip-existing" 2>$null } Else { write-output "Not on a tag, won't deploy to pypi"}
+ - ps: If ($env:APPVEYOR_REPO_TAG -eq "true" ) { Invoke-Expression "twine upload dist\*.whl --skip-existing" 2>$null } Else { write-output "Not on a tag, won't deploy to pypi"}
|
Adds a fast-kron implementation of ComputationalSPAMVec.todense()
It turns out that when creating large (~15Q) models the bottleneck
is in the creation of the state prep vectors within
ComputationalSPAMVec.todense(). This commit increases the performance
of this function by using the Cython-implemented fast_kron when
available. | @@ -37,6 +37,12 @@ from .polynomial import Polynomial as _Polynomial
from . import replib
from .opcalc import bulk_eval_compact_polys_complex as _bulk_eval_compact_polys_complex
+try:
+ from ..tools import fastcalc as _fastcalc
+except ImportError:
+ _fastcalc = None
+
+
IMAG_TOL = 1e-8 # tolerance for imaginary part being considered zero
@@ -3469,9 +3475,11 @@ class ComputationalSPAMVec(SPAMVec):
in `scratch` maybe used when it is not-None.
"""
if self._evotype == "densitymx":
+ factor_dim = 4
v0 = 1.0 / _np.sqrt(2) * _np.array((1, 0, 0, 1), 'd') # '0' qubit state as Pauli dmvec
v1 = 1.0 / _np.sqrt(2) * _np.array((1, 0, 0, -1), 'd') # '1' qubit state as Pauli dmvec
elif self._evotype in ("statevec", "stabilizer"):
+ factor_dim = 2
v0 = _np.array((1, 0), complex) # '0' qubit state as complex state vec
v1 = _np.array((0, 1), complex) # '1' qubit state as complex state vec
elif self._evotype in ("svterm", "cterm"):
@@ -3480,7 +3488,19 @@ class ComputationalSPAMVec(SPAMVec):
else: raise ValueError("Invalid `evotype`: %s" % self._evotype)
v = (v0, v1)
+
+ if _fastcalc is None: # do it the slow way using numpy
return _functools.reduce(_np.kron, [v[i] for i in self._zvals])
+ else:
+ typ = 'd' if self._evotype == "densitymx" else complex
+ fast_kron_array = _np.ascontiguousarray(
+ _np.empty((len(self._zvals), factor_dim), typ))
+ fast_kron_factordims = _np.ascontiguousarray(_np.array([factor_dim] * len(self._zvals), _np.int64))
+ for i, zi in enumerate(self._zvals):
+ fast_kron_array[i, :] = v[zi]
+ ret = _np.ascontiguousarray(_np.empty(factor_dim**len(self._zvals), typ))
+ _fastcalc.fast_kron(ret, fast_kron_array, fast_kron_factordims)
+ return ret
#def torep(self, typ, outvec=None):
# if typ == "prep":
|
Fix ingestor
## Purpose
make sure the ingest can deal with temporary directory
## Changes
add code to `get_egap_assets` code
## QA Notes
No new details should work now.
## Documentation
Not user-facing
## Side Effects
None that I know of.
## Ticket
None | @@ -119,6 +119,14 @@ def get_egap_assets(guid, creator_auth):
with ZipFile(egap_assets_path, 'r') as zipObj:
zipObj.extractall(temp_path)
+ zip_parent = [file for file in os.listdir(temp_path) if os.path.isdir(file) and file != '__MACOSX']
+ if zip_parent:
+ zip_parent = os.listdir(temp_path)[0]
+ for i in os.listdir(os.path.join(temp_path, zip_parent)):
+ shutil.move(os.path.join(temp_path, zip_parent, i), temp_path)
+
+ if zip_parent:
+ os.rmdir(os.path.join(temp_path, zip_parent))
return temp_path
|
Fix issue with assertion style in test_fields
Fixes an issue in test_fields.py where the old assertion style was being used and causing an error | @@ -398,9 +398,9 @@ class TestSelectField:
F = make_form(a=SelectField(choices=[]))
form = F(DummyPostData(a=["b"]))
assert not form.validate()
- self.assertEqual(form.a.data, "b")
- self.assertEqual(len(form.a.errors), 1)
- self.assertEqual(form.a.errors[0], "Not a valid choice")
+ assert form.a.data == "b"
+ assert len(form.a.errors) == 1
+ assert form.a.errors[0] == "Not a valid choice"
def test_validate_choices_when_none(self):
F = make_form(a=SelectField())
|
Fix bug in unembed_response
* The binary quadratic model should be a subset of the embedding,
not the other way around.
* Uses the new BinaryQuadraticModel.__contains__ syntax | @@ -387,7 +387,7 @@ def unembed_response(target_response, embedding, source_bqm, chain_break_method=
The method used to resolve chain breaks.
"""
- if any(v not in source_bqm.linear for v in embedding):
+ if any(v not in embedding for v in source_bqm):
raise ValueError("given bqm does not match the embedding")
energies = []
|
Fix unexpected redirection behavior
New behavior:
help > name with space
- redirects to a file called "name" (without the quotes)
help > "name with space"
- redirects to a file called "name with space" (without the quotes) | @@ -1840,7 +1840,7 @@ class Cmd(cmd.Cmd):
# REDIRECTION_APPEND or REDIRECTION_OUTPUT
if statement.output == constants.REDIRECTION_APPEND:
mode = 'a'
- sys.stdout = self.stdout = open(os.path.expanduser(statement.output_to), mode)
+ sys.stdout = self.stdout = open(os.path.expanduser(shlex.split(statement.output_to)[0]), mode)
else:
# going to a paste buffer
sys.stdout = self.stdout = tempfile.TemporaryFile(mode="w+")
|
docs: Remove securesystemslib mock import
We want to document some securesystemslib classes (Key gets documented
with this change already as it's part of the metadata API). | @@ -61,8 +61,6 @@ html_favicon = "tuf-icon-32.png"
# -- Autodoc configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
-autodoc_mock_imports = ["securesystemslib"]
-
# Tone down the "tuf.api.metadata." repetition
add_module_names = False
python_use_unqualified_type_names = True
|
Block any non-GET request to the dashboard
Thanks | @@ -31,4 +31,8 @@ class DashboardSite(AdminSite):
urls = filter(self.valid_url, self.get_urls())
return list(urls), 'admin', self.name
+ def has_permission(self, req):
+ return False if req.method != 'GET' else super().has_permission(req)
+
+
dashboard = DashboardSite()
|
[dagit] preview wrong type config errors same as missing
Summary: resolves
Test Plan:
The red solid entries remain red while value is `None` or the wrong type
{F418767}
Reviewers: dish, bengotow, sashank | @@ -243,22 +243,25 @@ export class RunPreview extends React.Component<RunPreviewProps, RunPreviewState
validation.errors.forEach((e) => {
const path = errorStackToYamlPath(e.stack.entries);
+ errorsAndPaths.push({pathKey: path.join('.'), error: e});
+
if (e.__typename === 'MissingFieldConfigError') {
missingNodes.push([...path, e.field.name].join('.'));
} else if (e.__typename === 'MissingFieldsConfigError') {
for (const field of e.fields) {
missingNodes.push([...path, field.name].join('.'));
}
- } else {
- if (e.__typename === 'FieldNotDefinedConfigError') {
+ } else if (e.__typename === 'FieldNotDefinedConfigError') {
extraNodes.push([...path, e.fieldName].join('.'));
} else if (e.__typename === 'FieldsNotDefinedConfigError') {
for (const fieldName of e.fieldNames) {
extraNodes.push([...path, fieldName].join('.'));
}
+ } else if (e.__typename === 'RuntimeMismatchConfigError') {
+ // If an entry at a path is the wrong type,
+ // it is equivalent to it being missing
+ missingNodes.push(path.join('.'));
}
- }
- errorsAndPaths.push({pathKey: path.join('.'), error: e});
});
}
|
Typo in "Page Detected" description
The description beneath "Page Detected" had the word "automatically" spelled incorrectly. | @@ -141,7 +141,7 @@ page_detect_list:
public: true
title: 'Pages Detected'
desc: |
- This list was created automatcally by guessing which URLs are web pages in this archive file.
+ This list was created automatically by guessing which URLs are web pages in this archive file.
# WARC Paths and Names
|
Added additional entrypoint script.
Added a third entrypoint to use python's minor version as well.
This can help when testing out differences of python versions. One could easily open "ipython3.10" and test it's differences with "ipython3.8". | @@ -211,14 +211,16 @@ def find_entry_points():
use, our own build_scripts_entrypt class below parses these and builds
command line scripts.
- Each of our entry points gets both a plain name, e.g. ipython, and one
- suffixed with the Python major version number, e.g. ipython3.
+ Each of our entry points gets a plain name, e.g. ipython, a name
+ suffixed with the Python major version number, e.g. ipython3, and
+ a name suffixed with the Python major.minor version number, eg. ipython3.8.
"""
ep = [
'ipython%s = IPython:start_ipython',
]
- suffix = str(sys.version_info[0])
- return [e % '' for e in ep] + [e % suffix for e in ep]
+ major_suffix = str(sys.version_info[0])
+ minor_suffix = ".".join([str(sys.version_info[0]), str(sys.version_info[1])])
+ return [e % '' for e in ep] + [e % major_suffix for e in ep] + [e % minor_suffix for e in ep]
class install_lib_symlink(Command):
user_options = [
|
Update gandcrab.txt
Some detect optimization. | @@ -38,10 +38,6 @@ gdcbmuveqjsli57x.onion.rip
gdcbmuveqjsli57x.onion.plus
gdcbmuveqjsli57x.onion.to
-# Reference: https://twitter.com/blackorbird/status/1108200419543535616
-
-kakaocorp.link/includes/assets/zufufu.gif
-
# Reference: https://blog.talosintelligence.com/2019/03/threat-roundup-0315-0322.html (Win.Ransomware.Gandcrab-6900355-0)
carder.bit
@@ -61,3 +57,9 @@ http://185.105.4.112
# Reference: https://twitter.com/GrujaRS/status/1123678562765168643
gandcrabmfe6mnef.onion
+
+# Reference: https://twitter.com/blackorbird/status/1108200419543535616
+# Reference: https://twitter.com/dvk01uk/status/1126044416966365184
+# Reference: https://app.any.run/tasks/abfb50a4-02a7-424e-a430-76d056973968
+
+kakaocorp.link
|
Corrects the Jacobian matrix in Autograd tutorial
Fixes | @@ -114,23 +114,23 @@ print(x.grad)
#
# .. math::
# J=\left(\begin{array}{ccc}
-# \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{1}}\\
+# \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{1}}{\partial x_{n}}\\
# \vdots & \ddots & \vdots\\
-# \frac{\partial y_{1}}{\partial x_{n}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}}
+# \frac{\partial y_{m}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}}
# \end{array}\right)
#
# Generally speaking, ``torch.autograd`` is an engine for computing
-# Jacobian-vector product. That is, given any vector
+# vector-Jacobian product. That is, given any vector
# :math:`v=\left(\begin{array}{cccc} v_{1} & v_{2} & \cdots & v_{m}\end{array}\right)^{T}`,
-# compute the product :math:`J\cdot v`. If :math:`v` happens to be
+# compute the product :math:`v^{T}\cdot J`. If :math:`v` happens to be
# the gradient of a scalar function :math:`l=g\left(\vec{y}\right)`,
# that is,
# :math:`v=\left(\begin{array}{ccc}\frac{\partial l}{\partial y_{1}} & \cdots & \frac{\partial l}{\partial y_{m}}\end{array}\right)^{T}`,
-# then by the chain rule, the Jacobian-vector product would be the
+# then by the chain rule, the vector-Jacobian product would be the
# gradient of :math:`l` with respect to :math:`\vec{x}`:
#
# .. math::
-# J\cdot v=\left(\begin{array}{ccc}
+# J^{T}\cdot v=\left(\begin{array}{ccc}
# \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{1}}\\
# \vdots & \ddots & \vdots\\
# \frac{\partial y_{1}}{\partial x_{n}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}}
@@ -144,12 +144,15 @@ print(x.grad)
# \frac{\partial l}{\partial x_{n}}
# \end{array}\right)
#
-# This characteristic of Jacobian-vector product makes it very
+# (Note that :math:`v^{T}\cdot J` gives a row vector which can be
+# treated as a column vector by taking :math:`J^{T}\cdot v`.)
+#
+# This characteristic of vector-Jacobian product makes it very
# convenient to feed external gradients into a model that has
# non-scalar output.
###############################################################
-# Now let's take a look at an example of Jacobian-vector product:
+# Now let's take a look at an example of vector-Jacobian product:
x = torch.randn(3, requires_grad=True)
@@ -162,7 +165,7 @@ print(y)
###############################################################
# Now in this case ``y`` is no longer a scalar. ``torch.autograd``
# could not compute the full Jacobian directly, but if we just
-# want the Jacobian-vector product, simply pass the vector to
+# want the vector-Jacobian product, simply pass the vector to
# ``backward`` as argument:
v = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float)
y.backward(v)
|
Guard GUILD_MEMBER_ADD/GUILD_MEMBER_REMOVE from errors
If the guilds intent is disabled all guilds are unavailable. This means
we don't receive a member_count attribute and cannot update it. | @@ -731,13 +731,22 @@ class ConnectionState:
member = Member(guild=guild, data=data, state=self)
if self._member_cache_flags.joined:
guild._add_member(member)
+
+ try:
guild._member_count += 1
+ except AttributeError:
+ pass
+
self.dispatch('member_join', member)
def parse_guild_member_remove(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
+ try:
guild._member_count -= 1
+ except AttributeError:
+ pass
+
user_id = int(data['user']['id'])
member = guild.get_member(user_id)
if member is not None:
|
Update icedid.txt
Updated Reference + generic trails. | @@ -40,6 +40,7 @@ nejokexulang.example.com
payfinance.net
# Reference: https://www.crowdstrike.com/blog/bokbots-man-in-the-browser-overview/
+# Reference: https://otx.alienvault.com/pulse/5c99fb543acc7f5eb0e7e933
acquistic.space
ambusted.space
@@ -60,3 +61,5 @@ tybalties.com
ugrigo.space
waharactic.com
yorubal.space
+/data100.php
+/data2.php
|
updated gdrive to use sync-server-sites
Contains changes from | @@ -73,13 +73,7 @@ class GDriveHandler(AbstractProvider):
format(site_name))
return
- provider_presets = self.presets.get(self.CODE)
- if not provider_presets:
- msg = "Sync Server: No provider presets for {}".format(self.CODE)
- log.info(msg)
- return
-
- cred_path = self.presets[self.CODE].get("credentials_url", {}).\
+ cred_path = self.presets.get("credentials_url", {}).\
get(platform.system().lower()) or ''
if not os.path.exists(cred_path):
msg = "Sync Server: No credentials for gdrive provider " + \
@@ -87,6 +81,8 @@ class GDriveHandler(AbstractProvider):
log.info(msg)
return
+ self.service = None
+ if self.presets["enabled"]:
self.service = self._get_gd_service(cred_path)
self._tree = tree
@@ -98,7 +94,7 @@ class GDriveHandler(AbstractProvider):
Returns:
(boolean)
"""
- return self.service is not None
+ return self.presets["enabled"] and self.service is not None
@classmethod
def get_system_settings_schema(cls):
@@ -125,9 +121,11 @@ class GDriveHandler(AbstractProvider):
editable = [
# credentials could be overriden on Project or User level
{
- 'key': "credentials_url",
- 'label': "Credentials url",
- 'type': 'text'
+ "type": "path",
+ "key": "credentials_url",
+ "label": "Credentials url",
+ "multiplatform": True,
+ "placeholder": "Credentials url"
},
# roots could be overriden only on Project leve, User cannot
{
@@ -136,7 +134,7 @@ class GDriveHandler(AbstractProvider):
"type": "dict-roots",
"object_type": {
"type": "path",
- "multiplatform": True,
+ "multiplatform": False,
"multipath": False
}
}
@@ -176,7 +174,7 @@ class GDriveHandler(AbstractProvider):
Format is importing for usage of python's format ** approach
"""
# GDrive roots cannot be locally overridden
- return self.presets['root']
+ return self.presets['roots']
def get_tree(self):
"""
|
Update to conda-build badge link
Updates link used for the conda-build badge. The upstream-dev-ci workflow is now used. | @@ -69,7 +69,7 @@ https://geocat-comp.readthedocs.io/en/latest/citation.html) page.
[github-ci-badge]: https://img.shields.io/github/workflow/status/NCAR/geocat-comp/CI?label=CI&logo=github&style=for-the-badge
[github-conda-build-badge]: https://img.shields.io/github/workflow/status/NCAR/geocat-comp/build_test?label=conda-builds&logo=github&style=for-the-badge
[github-ci-link]: https://github.com/NCAR/geocat-comp/actions?query=workflow%3ACI
-[github-conda-build-link]: https://github.com/NCAR/geocat-comp/actions?query=workflow%3Abuild_test
+[github-conda-build-link]: https://github.com/NCAR/geocat-comp/actions/workflows/upstream-dev-ci.yml
[codecov-badge]: https://img.shields.io/codecov/c/github/NCAR/geocat-comp.svg?logo=codecov&style=for-the-badge
[codecov-link]: https://codecov.io/gh/NCAR/geocat-comp
[rtd-badge]: https://img.shields.io/readthedocs/geocat-comp/latest.svg?style=for-the-badge
|
Extend pantsd test timeout
This is how long it takes for tests to pass on my laptop | @@ -47,7 +47,7 @@ class PantsDaemonMonitor(ProcessManager):
self._check_pantsd_is_alive()
return self._pid
- def assert_pantsd_runner_started(self, client_pid, timeout=4):
+ def assert_pantsd_runner_started(self, client_pid, timeout=12):
return self.await_metadata_by_name(
name='nailgun-client',
metadata_key=str(client_pid),
|
fix comment on dnnlowp op arguments
Summary:
Pull Request resolved:
Fix comment | @@ -55,14 +55,15 @@ namespace caffe2 {
* this option is intended for debugging accuracy issues.
*
* For the following quantization method related options, please refer
- * to deeplearning/quantization/dnnlowp/dnnlowp.cc for more details.
+ * to caffe2/quantization/server/dnnlowp.cc for more details.
*
* - activation_quantization_precision (default=8)
* - weight_quantization_precision (default=8)
* - requantization_multiplier_precision (default=32)
* - eltwise_quantization_precision (default=16)
* - force_scale_power_of_two (default=0)
- * - preserve_sparsity (default=0)
+ * - preserve_activation_sparsity (default=0)
+ * - preserve_weight_sparsity (default=0)
* - activation_quantization_kind (default=min_max)
* - weight_quantization_kind (default=min_max)
*/
|
Add point mass position
This adds the point mass position to the DataFrame. It loops through
the point mass elements compares it's node to the bearings nodes. The
point mass location is then assigned the value of the bearing top part. | @@ -392,6 +392,15 @@ class Rotor(object):
df.loc[df.tag == t, "y_pos"] = y_pos
y_pos += mean_od / 2
+ # define position for point mass elements
+ dfb = df[df.type == "BearingElement"]
+ for p in point_mass_elements:
+ z_pos = dfb[dfb.n_l == p.n]["nodes_pos_l"].values[0]
+ y_pos = dfb[dfb.n_l == p.n]["y_pos"].values[0]
+ df.loc[df.tag == p.tag, "nodes_pos_l"] = z_pos
+ df.loc[df.tag == p.tag, "nodes_pos_r"] = z_pos
+ df.loc[df.tag == p.tag, "y_pos"] = y_pos
+
self.df = df
self.run_modal()
|
Update operators - editing
Added link dissolve operator:
Reparents all children of selected link to its effective parent and
delete the link
Added getLeave operator;
Gets all leaves of the spanning tree of the currently selected objects | @@ -34,6 +34,84 @@ import phobos.utils.io as ioUtils
import phobos.defs as defs
+def dissolveLink(obj):
+ """ Remove the selected link and reparent all children to its effective Parent.
+
+ Args:
+ obj(bpy.types.Object): the link to dissolve
+ """
+
+ # Store original layers
+ originallayers = list(bpy.context.scene.layers)
+ # Select all layers
+ bpy.context.scene.layers = [True for i in range(20)]
+
+ if not obj.phobostype == 'link':
+ return
+
+ else:
+ print('Starting \n')
+ # Get all children
+ children = sUtils.getImmediateChildren(obj , include_hidden=True)
+ # Get the parent
+ parent = obj.parent
+ print(parent)
+ print(children)
+ # Reparent
+ parentObjectsTo(children, parent, clear=True)
+ # Delete the objects
+ sUtils.selectObjects([obj], clear=True, active=-1)
+ bpy.ops.object.delete()
+
+ # Restore original layers
+ bpy.context.scene.layers = originallayers
+
+
+def getLeaves(roots, objects = []):
+ """Returns the links representating the leaves of the spanning tree starting with an object
+ inside the model spanning tree.
+
+ Args:
+ root(list of bpy.types.Object) : Root objects from where to start the search from
+ objects(list) : List of objects to which the search is restricted.
+
+ Returns:
+ list : List of the leaves of the kinematic spanning tree.
+ """
+ leaves = []
+
+ if isinstance(roots, list):
+ for root in roots:
+ leaves += getLeaves(root, objects = objects)
+
+ else:
+ if roots.phobostype != 'link':
+ roots = sUtils.getEffectiveParent(roots, objectlist = objects)
+ print('Root : {0} \n'.format(roots))
+ candidates = sUtils.getImmediateChildren(roots, phobostypes=('link'))
+ print('Candidates : {0} \n'.format(candidates))
+ if objects and candidates:
+ candidates = [candidate for candidate in candidates if candidate in objects]
+ print('Candidates : {0} \n\n'.format(candidates))
+
+ if candidates:
+ leaves += getLeaves(candidates, objects = objects)
+ else:
+ leaves.append(roots)
+
+ if objects:
+ leaves = [leave for leave in leaves if leave in objects]
+
+ # Remove possible doubles
+ outputs = []
+
+ for leave in leaves:
+ if leave not in outputs:
+ outputs.append(leave)
+
+ return outputs
+
+
def getCombinedTransform(obj, effectiveparent):
"""Get the combined transform of the object relative to the effective parent.
@@ -163,6 +241,13 @@ def parentObjectsTo(objects, parent, clear=False):
if not isinstance(objects, list):
objects = [objects]
+ # Store original layers
+ originallayers = list(bpy.context.scene.layers)
+ # Select all layers
+ bpy.context.scene.layers = [True for i in range(20)]
+ # Restore original layers
+ bpy.context.scene.layers = originallayers
+
if clear:
sUtils.selectObjects(objects, active=0, clear=True)
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
|
Upgrade GitPython 3.1.27 -> 3.1.29, and its deps: gitdb,smmap,typing-extensions
GitPython 3.1.27 -> 3.1.29
gitdb 4.0.9 -> no upgrade
smmap 5.0.0 -> no upgrade
typing-extensions 4.3.0 -> 4.4.0 | @@ -52,7 +52,7 @@ genshi==0.7.7
# via creoleparser
gitdb==4.0.9
# via gitpython
-gitpython==3.1.27
+gitpython==3.1.29
# via -r requirements.in
gunicorn==20.1.0
# via -r requirements.in
@@ -190,7 +190,7 @@ translationstring==1.4
# via colander
turbogears2==2.3.12
# via -r requirements.in
-typing-extensions==4.3.0
+typing-extensions==4.4.0
# via
# gitpython
# importlib-metadata
|
Fix bug in Ngram splitting logic
Rather than returning the TemporarySpan, along with its splits, Snorkel
was returning the TemporarySpan twice, and only the 2nd split. Hiromu
Hota fixed this bug in Fonduer in [1]. This commit fixes it for Snorkel.
[1] | @@ -172,7 +172,7 @@ class Ngrams(CandidateSpace):
ts1 = TemporarySpan(char_start=start, char_end=start + m.start(1) - 1, sentence=context)
if ts1 not in seen:
seen.add(ts1)
- yield ts
+ yield ts1
ts2 = TemporarySpan(char_start=start + m.end(1), char_end=end, sentence=context)
if ts2 not in seen:
seen.add(ts2)
|
Upgrade to Beta status
With the h2spec and autobahn websocket compliance test passes and my
own production testing I think it is safe to upgrade the status. | @@ -38,7 +38,7 @@ setup(
author_email='[email protected]',
license='MIT',
classifiers=[
- 'Development Status :: 3 - Alpha',
+ 'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
|
Enable test_nested_rpc in rpc_test.py
Summary:
Pull Request resolved:
As after we only test RPC using spawn, the multi-thread/fork
error should disappear.
Test Plan: Imported from OSS | @@ -679,7 +679,6 @@ class RpcTest(object):
with self.assertRaisesRegex(Exception, "ValueError"):
fut.wait()
- @unittest.skip("Test is flaky, see https://github.com/pytorch/pytorch/issues/29381")
@dist_init
def test_nested_rpc(self):
n = self.rank + 1
|
Serialiser : Fix syntax error created by `moduleDependencies()`
Classes that didn't exist in any module were creating an empty `import` statement, which was the trigger for the crash described in | @@ -320,7 +320,11 @@ Serialisation::SerialiserMap &Serialisation::serialiserMap()
void Serialisation::Serialiser::moduleDependencies( const Gaffer::GraphComponent *graphComponent, std::set<std::string> &modules, const Serialisation &serialisation ) const
{
- modules.insert( Serialisation::modulePath( graphComponent ) );
+ const std::string module = Serialisation::modulePath( graphComponent );
+ if( !module.empty() )
+ {
+ modules.insert( module );
+ }
}
std::string Serialisation::Serialiser::constructor( const Gaffer::GraphComponent *graphComponent, const Serialisation &serialisation ) const
|
Use validator_for to get validator for specific schema
In case schema is missing, will fall back on default defined in python jsonschema | @@ -38,7 +38,7 @@ def validate(data, schema, set_default=True):
"""
try:
import jsonschema
- from jsonschema import Draft4Validator, validators, RefResolver
+ from jsonschema import validators, RefResolver
except ImportError:
raise WorkflowError("The Python 3 package jsonschema must be installed "
"in order to use the validate directive.")
@@ -73,7 +73,9 @@ def validate(data, schema, set_default=True):
return validators.extend(
validator_class, {"properties" : set_defaults},
)
- DefaultValidator = extend_with_default(Draft4Validator)
+
+ Validator = validators.validator_for(schema)
+ DefaultValidator = extend_with_default(Validator)
if not isinstance(data, dict):
try:
|
code clean up
removed comments that are not necessary | @@ -104,8 +104,8 @@ def extract_images(i):
imName=os.path.split(filedata.iloc[i].loc['file'])[1][:-4] #get file name ex: IM-0107-0022
#check for existence of patient folder, create if needed
- if not (os.path.exists(png_destination + folderName)): # it is completely possible for multiple proceses to run this check at this time.
- os.mkdir(png_destination+folderName) # TODO: ADD TRY STATEMENT TO FAIL GRACEFULY
+ if not (os.path.exists(png_destination + folderName)): # it is completely possible for multiple proceses to run this check at same time.
+ os.mkdir(png_destination+folderName)
shape = ds.pixel_array.shape
@@ -248,12 +248,9 @@ export_csv = data.to_csv (csvDestination, index = None, header=True)
fields=df.keys()
count = 0; #potential painpoint
-#current assumption is that processes will have acces to global variables
-#meaning that i can get away with ismple just call imap on the range of indices to idne
+
#%% print images
-#todo: in consumer loop add sgment that checks if an error has occured and updates error count
-#split extracting pngs to different cores. Receive error codes as output
#writting of log handled by main process
if print_images:
print("Start processing Images")
|
Update bootstrap
Removed groupby operations | @@ -3,6 +3,7 @@ from functools import wraps
from inspect import signature
from typing import Callable, Optional
+import numpy as np
import pandas as pd
import xarray as xr
from xarray.core.dataarray import DataArray
@@ -139,9 +140,9 @@ def _bootstrap_period(
exceedance_function: ExceedanceFunction,
) -> DataArray:
period_exceedance_rates = []
- for year_ds in ds_in_base_period.groupby("time.year"):
+ for year in np.unique(ds_in_base_period.time.dt.year):
period_exceedance_rates.append(
- _bootstrap_year(ds_in_base_period, year_ds[0], config, exceedance_function)
+ _bootstrap_year(ds_in_base_period, year, config, exceedance_function)
)
out = xr.concat(period_exceedance_rates, dim="time")
# workaround to ensure unit is really "days"
@@ -159,13 +160,13 @@ def _bootstrap_year(
in_base = _build_virtual_in_base_period(ds_in_base_period, out_base_year)
out_base = ds_in_base_period.sel(time=str(out_base_year))
exceedance_rates = []
- for year_ds in in_base.groupby("time.year"):
- print(year_ds[0])
- replicated_year = in_base.sel(time=str(year_ds[0]))
+ for year in np.unique(in_base.time.dt.year):
+ print(year)
+ replicated_year = in_base.sel(time=str(year))
# it is necessary to change the time of the replicated year
# in order to not skip it in percentile calculation
replicated_year["time"] = replicated_year.time + pd.Timedelta(
- str(out_base_year - year_ds[0]) + "y"
+ str(out_base_year - year) + "y"
)
completed_in_base = xr.concat([in_base, replicated_year], dim="time")
thresholds = _calculate_thresholds(completed_in_base, config)
@@ -175,7 +176,7 @@ def _bootstrap_year(
exceedance_rates.append(exceedance_rate)
if len(exceedance_rates) == 1:
return exceedance_rates[0]
- return xr.concat(exceedance_rates, dim="time").groupby("time").mean()
+ return xr.concat(exceedance_rates, dim="in_base_period").mean(dim="in_base_period")
# Does not handle computation on a in_base_period of a single year,
|
Reformat prelude.lkt a little
TN: | -@builtin struct Int {}
+@builtin struct Int {
+}
+
@builtin struct BigInt {
@builtin fun as_int(): Int
}
-@builtin struct Symbol {}
-@builtin struct Regexp {}
+
+@builtin struct Symbol {
+}
+
+@builtin struct Regexp {
+}
+
@builtin @open enum Bool {
case false, true
}
+
@builtin trait Sized {
@builtin fun length(): Int
}
-@builtin generic[T] trait Indexable {
+
+@builtin generic[T]
+trait Indexable {
@builtin fun __call__(index: Int): T
}
-@builtin generic[T] trait Iterator {
- @builtin generic [U] fun map(map_fn: (T) -> U): Array[U]
- @builtin generic [U] fun filtermap (map_fn: (T) -> U, filter_fn: (T) -> Bool): Array[U]
+
+@builtin generic[T]
+trait Iterator {
+ @builtin generic[U]
+ fun map(map_fn: (T) -> U): Array[U]
+
+ @builtin generic[U]
+ fun filtermap(map_fn: (T) -> U, filter_fn: (T) -> Bool): Array[U]
}
-@builtin generic[T] struct Array implements Sized, Indexable[T], Iterator[T] {
+
+@builtin generic[T]
+struct Array implements Sized, Indexable[T], Iterator[T] {
@builtin @property fun to_iterator(): Iterator[T]
}
-@builtin generic[T] struct ASTList implements Sized, Indexable[T], Iterator[T] {
+
+@builtin generic[T]
+struct ASTList implements Sized, Indexable[T], Iterator[T] {
+}
+
+@builtin struct Char {
}
-@builtin struct Char {}
@builtin struct String implements Sized, Indexable[Char], Iterator[Char] {
@builtin @property fun to_symbol(): Symbol
}
+
@builtin class LogicalVar {
}
+
@builtin class Equation {
}
+
@builtin class LexicalEnv {
@builtin fun get(symbol: Symbol): Array[Node]
@builtin fun get_first(symbol: Symbol): Node
@builtin fun env_node(): Node
@builtin fun env_orphan(): LexicalEnv
}
+
@builtin class AnalysisUnit {
@builtin @property fun root(): Node
}
+
@builtin class Node {
@builtin @property fun parent(): Node
@builtin fun node_env(): LexicalEnv
@builtin fun unit(): AnalysisUnit
@builtin fun parents(with_self: Bool = true): Array[Node]
}
+
@builtin trait TokenNode {
@builtin @property fun symbol(): Symbol
}
+
@builtin trait ErrorNode {
}
|
Update indian_tokenizer.py
removed the '|' from the string punctuation and added it separately as '|+' | @@ -6,7 +6,7 @@ import string
__author__ = 'Anoop Kunchukuttan'
__copyright = 'GPL'
-indian_punctuation_pattern = re.compile('(['+string.punctuation+'\u0964\u0965'+'])')
+indian_punctuation_pattern = re.compile('(['+string.punctuation.replace("|","")+'\u0964\u0965'+']|\|+)')
def indian_punctuation_tokenize_regex(input_str):
|
HelpChannels: retrieve category channels more efficiently
The channels property of categories sorts the channels before returning
them.
* Add a generator function to get category channels | import asyncio
-import itertools
import json
import logging
import typing as t
@@ -85,16 +84,25 @@ class HelpChannels(Scheduler, commands.Cog):
async def get_available_candidate(self) -> discord.TextChannel:
"""Return a dormant channel to turn into an available channel."""
+ @staticmethod
+ def get_category_channels(category: discord.CategoryChannel) -> t.Iterable[discord.TextChannel]:
+ """Yield the channels of the `category` in an unsorted manner."""
+ # This is faster than using category.channels because the latter sorts them.
+ for channel in category.guild.channels:
+ if channel.category_id == category.id:
+ yield channel
+
def get_used_names(self) -> t.Set[str]:
"""Return channels names which are already being used."""
start_index = len("help-")
- channels = itertools.chain(
- self.available_category.channels,
- self.in_use_category.channels,
- self.dormant_category.channels,
- )
- return {c.name[start_index:] for c in channels}
+ names = set()
+ for cat in (self.available_category, self.in_use_category, self.dormant_category):
+ for channel in self.get_category_channels(cat):
+ name = channel.name[start_index:]
+ names.add(name)
+
+ return names
async def get_idle_time(self, channel: discord.TextChannel) -> int:
"""Return the time elapsed since the last message sent in the `channel`."""
|
ci: build: images: containers: Build with docker
Kaniko has issues running under GitHub Actions | @@ -58,19 +58,29 @@ jobs:
build:
name: Build container images
runs-on: ubuntu-latest
- container: gcr.io/kaniko-project/executor:latest
strategy:
fail-fast: false
max-parallel: 40
matrix: ${{ fromJSON(inputs.manifests) }}
steps:
+ - uses: actions/checkout@v3
+ with:
+ repository: '${{ github.repository }}'
+ ref: '${{ matrix.commit }}'
+ persist-credentials: false
+ fetch-depth: 1
+ lfs: true
+ submodules: true
+ - name: Install dependencies
+ run: |
+ set -x
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+ sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+ sudo apt-get update && sudo apt-get install -y docker-ce docker-ce-cli containerd.io
- name: Build image
env:
- OWNER: "${{ github.owner }}"
- REPOSITORY: "${{ github.repository }}"
- BRANCH: "${{ matrix.branch }}"
- COMMIT: "${{ matrix.commit }}"
IMAGE_NAME: "${{ matrix.image_name }}"
DOCKERFILE: "${{ matrix.dockerfile }}"
run: |
- /kaniko/executor --dockerfile "${DOCKERFILE}" --context "git://github.com/${OWNER}/${REPOSITORY}.git#refs/heads/${BRANCH}#${COMMIT}"
+ docker build --build-arg DFFML_RELEASE=main -t "${IMAGE_NAME}" -f "${DOCKERFILE}" .
+ # TODO Communicate built container hash for SLSA3 cosign send to transparency log
|
Use quotes when installing in the README
Other shells have a hard time with square brackets.
Closes | @@ -41,7 +41,7 @@ Otherwise to get voice support you should run the following command:
.. code:: sh
# Linux/macOS
- python3 -m pip install -U discord.py[voice]
+ python3 -m pip install -U "discord.py[voice]"
# Windows
py -3 -m pip install -U discord.py[voice]
|
Update votenet config README
* Update votenet config README
modified: configs/votenet/README.md
* updated .pre-commit-config.yaml and beautified style\
* deleted unused files
* update votenet config doc
* rephrase doc and remove markdownlint | # Deep Hough Voting for 3D Object Detection in Point Clouds
## Introduction
+
We implement VoteNet and provide the result and checkpoints on ScanNet and SUNRGBD datasets.
+
```
@inproceedings{qi2019deep,
author = {Qi, Charles R and Litany, Or and He, Kaiming and Guibas, Leonidas J},
@@ -14,11 +16,25 @@ We implement VoteNet and provide the result and checkpoints on ScanNet and SUNRG
## Results
### ScanNet
+
| Backbone | Lr schd | Mem (GB) | Inf time (fps) | [email protected] |[email protected]| Download |
| :---------: | :-----: | :------: | :------------: | :----: |:----: | :------: |
| [PointNet++](./votenet_8x8_scannet-3d-18class.py) | 3x |4.1||62.90|39.91|[model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/votenet/votenet_8x8_scannet-3d-18class/votenet_8x8_scannet-3d-18class_20200620_230238-2cea9c3a.pth) | [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/votenet/votenet_8x8_scannet-3d-18class/votenet_8x8_scannet-3d-18class_20200620_230238.log.json)|
### SUNRGBD
+
| Backbone | Lr schd | Mem (GB) | Inf time (fps) | [email protected] |[email protected]| Download |
| :---------: | :-----: | :------: | :------------: | :----: |:----: | :------: |
| [PointNet++](./votenet_16x8_sunrgbd-3d-10class.py) | 3x |8.1||59.07|35.77|[model](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/votenet/votenet_16x8_sunrgbd-3d-10class/votenet_16x8_sunrgbd-3d-10class_20200620_230238-4483c0c0.pth) | [log](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/votenet/votenet_16x8_sunrgbd-3d-10class/votenet_16x8_sunrgbd-3d-10class_20200620_230238.log.json)|
+
+**Notice**: If your current mmdetection3d version >= 0.6.0, and you are using the checkpoints downloaded from the above links or using checkpoints trained with mmdetection3d version < 0.6.0, the checkpoints have to be first converted via [tools/convert_votenet_checkpoints.py](../../tools/convert_votenet_checkpoints.py):
+
+```
+python ./tools/convert_votenet_checkpoints.py ${ORIGINAL_CHECKPOINT_PATH} --out=${NEW_CHECKPOINT_PATH}
+```
+
+Then you can use the converted checkpoints following [getting_started.md](../../docs/getting_started.md).
+
+## Indeterminism
+
+Since test data preparation randomly downsamples the points, and the test script uses fixed random seeds while the random seeds of validation in training are not fixed, the test results may be slightly different from the results reported above.
|
Admin Router: make dns mock a dependency for mocker as well
It uses it to resolve server_name variables, without it the testa can
stall for >60s. | @@ -33,7 +33,7 @@ def repo_is_ee():
@pytest.fixture(scope='session')
-def mocker_s(repo_is_ee, syslog_mock, extra_lo_ips):
+def mocker_s(repo_is_ee, syslog_mock, extra_lo_ips, dns_server_mock_s):
"""Provide a gc-ed mocker instance suitable for the repository flavour"""
if repo_is_ee:
from mocker.ee import Mocker
|
Update gmsh.py
According to the GMSH documentation, these are the available algorithms:
3D mesh algorithm (1: Delaunay, 3: Initial mesh only, 4: Frontal, 7: MMG3D, 9: R-tree, 10: HXT) | @@ -147,7 +147,7 @@ def to_volume(mesh,
import gmsh
# checks mesher selection
- if mesher_id not in [1, 4, 7, 10]:
+ if mesher_id not in [1, 3, 4, 7, 9, 10]:
raise ValueError('unavilable mesher selected!')
else:
mesher_id = int(mesher_id)
|
Fix typo in comment in cpp_extension
Summary:
From
Pull Request resolved: | @@ -104,7 +104,7 @@ COMMON_NVCC_FLAGS = [
# See comment in load_inline for more information
# The goal is to be able to call the safe version of the
-# function exactely as if it was the original one.
+# function exactly as if it was the original one.
# We need to create a pointer to this new function to give
# it to pybind later.
|
Still troubleshooting travis
I can get around the pypy issue, but now my tests are being skipped | @@ -52,11 +52,11 @@ install:
- python -c 'import awkward; print(awkward.__version__)'
- export AWKWARD_DEPLOYMENT=base
- pip install --upgrade pyOpenSSL # for deployment
- - pip install pybind11
+ - if [[ $TRAVIS_PYTHON_VERSION != pypy* ]] ; then pip install pybind11 ; fi
- ln -s ../awkward-cpp/awkward/cpp awkward/cpp
- python setup.py install
- cd awkward-cpp
- - if [[ $TRAVIS_PYTHON_VERSION != pypy* ]] ; then python setup.py install ; fi
+ - python setup.py install
- cd ..
notifications:
|
Update list.html
Fix the missing CSRF error in bootstrap 4 line editing | {% set form = list_forms[get_pk_value(row)] %}
{% if form.csrf_token %}
{{ form[c](pk=get_pk_value(row), display_value=get_value(row, c), csrf=form.csrf_token._value()) }}
+ {% elif csrf_token %}
+ {{ form[c](pk=get_pk_value(row), display_value=get_value(row, c), csrf=csrf_token()) }}
{% else %}
{{ form[c](pk=get_pk_value(row), display_value=get_value(row, c)) }}
{% endif %}
|
[query] avoid rare test collection bug
* [query] avoid rare test collection bug
Pytest sometimes uses a background thread to collect tests. That interacts badly
with asyncio. We avoid this by explicitly managing the event loop.
* fixg | import pytest
+import asyncio
import hail as hl
from hail.utils.java import Env, scala_object
@@ -47,6 +48,17 @@ def all_values_table_fixture():
return create_all_values_table()
+# pytest sometimes uses background threads, named "Dummy-1", to collect tests. asyncio will only
+# create an event loop when `asyncio.get_event_loop()` is called if the current thread is the main
+# thread. We therefore manually create an event loop which is used only for collecting the files.
+try:
+ old_loop = asyncio.get_event_loop()
+except RuntimeError as err:
+ assert 'There is no current event loop in thread' in err
+ old_loop = None
+loop = asyncio.new_event_loop()
+asyncio.set_event_loop(loop)
+try:
resource_dir = resource('backward_compatability')
fs = hl.current_backend().fs
try:
@@ -54,6 +66,10 @@ try:
mt_paths = [x.path for x in fs.ls(resource_dir + '/*/matrix_table/')]
finally:
hl.stop()
+finally:
+ loop.stop()
+ loop.close()
+ asyncio.set_event_loop(old_loop)
@pytest.mark.parametrize("path", mt_paths)
|
Constants: rename conflicting channel
There were two attributes named 'announcements' on the Channels class. | @@ -78,7 +78,7 @@ class Channels(NamedTuple):
voice_chat = 412357430186344448
# Core Dev Sprint channels
- announcements = 755958119963557958
+ sprint_announcements = 755958119963557958
information = 753338352136224798
organisers = 753340132639375420
general = 753340631538991305
@@ -230,7 +230,7 @@ WHITELISTED_CHANNELS = (
Channels.voice_chat,
# Core Dev Sprint Channels
- Channels.announcements,
+ Channels.sprint_announcements,
Channels.information,
Channels.organisers,
Channels.general,
|
Use existing CNF_INCLUDE_DIR to create mysql-flavor directory
Just replaced a hardcoded value with already defined variable.
Also removed the trailing slash from the path to pass the tests. | @@ -75,7 +75,7 @@ MYSQL_CONFIG = {operating_system.REDHAT: "/etc/my.cnf",
MYSQL_BIN_CANDIDATES = ["/usr/sbin/mysqld", "/usr/libexec/mysqld"]
MYSQL_OWNER = 'mysql'
CNF_EXT = 'cnf'
-CNF_INCLUDE_DIR = '/etc/mysql/conf.d/'
+CNF_INCLUDE_DIR = '/etc/mysql/conf.d'
CNF_MASTER = 'master-replication'
CNF_SLAVE = 'slave-replication'
@@ -764,9 +764,8 @@ class BaseMySqlApp(object):
pass
def _create_mysql_confd_dir(self):
- conf_dir = "/etc/mysql/conf.d"
- LOG.debug("Creating %s.", conf_dir)
- operating_system.create_directory(conf_dir, as_root=True)
+ LOG.debug("Creating %s.", CNF_INCLUDE_DIR)
+ operating_system.create_directory(CNF_INCLUDE_DIR, as_root=True)
def _enable_mysql_on_boot(self):
LOG.debug("Enabling MySQL on boot.")
|
Update readme for vae
formatting issues addressed | # Pyprobml VAE
Compare_results of different VAEs : <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/scripts/vae/compare_results.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
+
VAE tricks and what the different VAE try to address : <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/scripts/vae/vae_tricks.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
A collection of Variational AutoEncoders (VAEs) implemented in pytorch with focus on reproducibility and creating reusable blocks that can be used in any project. The aim of this project is to provide
|
Add service for zun-wsproxy console access
This adds an HAProxy instance for the Zun wsproxy service which
allows containers' console output to be streamed to the Horizon
dashboard. | @@ -531,6 +531,22 @@ haproxy_zun_api_service:
- "httpchk GET /v1 HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"
haproxy_service_enabled: "{{ groups['zun_api'] is defined and groups['zun_api'] | length > 0 }}"
+haproxy_zun_console_service:
+ haproxy_service_name: zun_console
+ haproxy_backend_nodes: "{{ groups['zun_api'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_ssl_all_vips: "{{ haproxy_ssl_all_vips }}"
+ haproxy_port: 6784
+ haproxy_balance_type: http
+ haproxy_timeout_client: 60m
+ haproxy_timeout_server: 60m
+ haproxy_balance_alg: source
+ haproxy_backend_options:
+ - "httpchk HEAD / HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"
+ haproxy_backend_httpcheck_options:
+ - "expect status 405"
+ haproxy_service_enabled: "{{ groups['zun_api'] is defined and groups['zun_api'] | length > 0 }}"
+
haproxy_default_services:
- service: "{{ haproxy_adjutant_api_service }}"
- service: "{{ haproxy_aodh_api_service }}"
@@ -573,3 +589,4 @@ haproxy_default_services:
- service: "{{ haproxy_tacker_service }}"
- service: "{{ haproxy_trove_service }}"
- service: "{{ haproxy_zun_api_service }}"
+ - service: "{{ haproxy_zun_console_service }}"
|
Resolve add-node error on aws(#960)
Error "Ensure clusterid is set along with the cloudprovider" occur
in add-node playbook runtime.
It is because node-setup.yaml is called without "openshift_clusterid"
variable setting.
With openshift_clusterid variable, we can avoid this issue. | openshift_hosted_router_replicas: 3
openshift_hosted_registry_replicas: 3
openshift_node_local_quota_per_fsgroup: 512Mi
+ openshift_clusterid: "{{ stack_name }}"
openshift_master_cluster_method: native
openshift_cloudprovider_kind: aws
openshift_master_cluster_hostname: "internal-openshift-master.{{ public_hosted_zone }}"
|
add download-backup and upload-backup
fixes | @@ -1925,14 +1925,22 @@ class Model:
return await app_facade.DestroyUnits(unit_names=list(unit_names))
destroy_units = destroy_unit
- def download_backup(self, archive_id):
+ async def download_backup(self, archive_id):
"""Download a backup archive file.
:param str archive_id: The id of the archive to download
:return str: Path to the archive file
"""
- raise NotImplementedError()
+
+ external_cmd = ['juju', 'download-backup', archive_id]
+ loop = asyncio.get_running_loop()
+ process = await asyncio.create_subprocess_exec(
+ *external_cmd, loop=loop, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
+ stdout, stderr = await process.communicate()
+ if process.returncode != 0:
+ raise JujuError("command failed: %s with %s" % (" ".join(external_cmd), stderr.decode()))
+ return stdout.decode('utf-8').strip()
def enable_ha(
self, num_controllers=0, constraints=None, series=None, to=None):
@@ -2308,13 +2316,22 @@ class Model:
"""
raise NotImplementedError()
- def upload_backup(self, archive_path):
+ async def upload_backup(self, archive_path):
"""Store a backup archive remotely in Juju.
:param str archive_path: Path to local archive
+ :return str created backup ID
"""
- raise NotImplementedError()
+
+ external_cmd = ['juju', 'upload-backup', archive_path]
+ loop = asyncio.get_running_loop()
+ process = await asyncio.create_subprocess_exec(
+ *external_cmd, loop=loop, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
+ stdout, stderr = await process.communicate()
+ if process.returncode != 0:
+ raise JujuError("command failed: %s with %s" % (" ".join(external_cmd), stderr.decode()))
+ return stdout.decode('utf-8').split()[-1]
async def get_metrics(self, *tags):
"""Retrieve metrics.
|
Securitycenter: overlooked synth changes.
The *effects* were merged in but not the synth changes themselves. | @@ -29,22 +29,9 @@ s.move(
]
)
-# Fix security_center_client.py docstrings.
+# Add encoding header to protoc-generated files.
+# See: https://github.com/googleapis/gapic-generator/issues/2097
s.replace(
- "google/cloud/securitycenter_v1beta1/gapic/security_center_client.py",
- "::\n\n\s+(compare_duration, but present at [a-z]+_time.)",
- " \g<1>"
-)
-
-s.replace(
- "google/cloud/securitycenter_v1beta1/gapic/security_center_client.py",
- "::\n\n\s+(compare_duration, but not present at [a-z]+_time.)",
- " \g<1>"
-)
-
-s.replace(
- "google/cloud/securitycenter_v1beta1/gapic/security_center_client.py",
- "(^\s+)::\n\n\s+(start and the end of the time period defined by\n)"
- "\s+(compare_duration and [a-z]+_time.)",
- "\g<1> \g<2>\g<1> \g<3>"
-)
+ '**/proto/*_pb2.py',
+ r"(^.*$\n)*",
+ r"# -*- coding: utf-8 -*-\n\g<0>")
|
Oops. Missed these when juggling locators around.
There's no longer a locators_48, so this bumps all versions up by
one (48->49, 49->50) | @@ -10,7 +10,7 @@ class TestLocators(unittest.TestCase):
@mock.patch("cumulusci.robotframework.Salesforce.Salesforce.get_latest_api_version")
def test_locators_in_robot_context(self, get_latest_api_version):
"""Verify we can get locators for the current org api version"""
- get_latest_api_version.return_value = 49.0
+ get_latest_api_version.return_value = 50.0
# This instantiates the robot library, mimicking a robot library import.
# We've mocked out the code that would otherwise throw an error since
@@ -18,7 +18,7 @@ class TestLocators(unittest.TestCase):
# return the latest version of the locators.
sf = Salesforce()
- expected = "cumulusci.robotframework.locators_49"
+ expected = "cumulusci.robotframework.locators_50"
actual = sf.locators_module.__name__
message = "expected to load '{}', actually loaded '{}'".format(expected, actual)
self.assertEqual(expected, actual, message)
@@ -37,32 +37,32 @@ class TestLocators(unittest.TestCase):
# we expect the library to still be instantiated, but with the latest
# version of the locators.
sf = Salesforce()
- expected = "cumulusci.robotframework.locators_49"
+ expected = "cumulusci.robotframework.locators_50"
actual = sf.locators_module.__name__
message = "expected to load '{}', actually loaded '{}'".format(expected, actual)
self.assertEqual(expected, actual, message)
- def test_locators_49(self):
- """Verify that locators_49 is a superset of the locators_48
+ def test_locators_50(self):
+ """Verify that locators_50 is a superset of the locators_49
This test is far from perfect, but it should at least flag a
catastrophic error in how locators for a version that augments
the locators from previous versions.
Note: this test assumes that locators_49 doesn't delete any of the
- keys from 48.
+ keys from 49.
"""
- import cumulusci.robotframework.locators_48 as locators_48
import cumulusci.robotframework.locators_49 as locators_49
+ import cumulusci.robotframework.locators_50 as locators_50
- keys_48 = set(locators_48.lex_locators)
keys_49 = set(locators_49.lex_locators)
+ keys_50 = set(locators_50.lex_locators)
self.assertNotEqual(
- id(locators_48.lex_locators),
id(locators_49.lex_locators),
- "locators_48.lex_locators and locators_49.lex_locators are the same object",
+ id(locators_50.lex_locators),
+ "locators_49.lex_locators and locators_50.lex_locators are the same object",
)
- self.assertTrue(len(keys_48) > 0)
- self.assertTrue(keys_48.issubset(keys_49))
+ self.assertTrue(len(keys_49) > 0)
+ self.assertTrue(keys_49.issubset(keys_50))
|
Remove docker network after acceptance tests
This change will configure tox to remove the docker network after every
acceptance test, cleaning up the environemnt. | @@ -40,7 +40,9 @@ commands =
dockeritest: -e ITEST_PYTHON_FACTOR={env:ITEST_PYTHON_FACTOR} \
dockeritest: -e ACCEPTANCE_TAGS={env:ACCEPTANCE_TAGS} \
dockeritest: itest /scripts/run_tests.sh; exit_status=$?; \
- dockeritest: docker-compose stop; exit $exit_status"
+ dockeritest: docker-compose stop; \
+ dockeritest: docker network rm kafkautils_default; \
+ dockeritest: exit $exit_status"
[testenv:coverage]
deps =
|
[MinecraftData] fix error on unload
RuntimeWarning: coroutine 'ClientSession.close' was never awaited | @@ -20,7 +20,7 @@ class MinecraftData(commands.Cog):
self.session = aiohttp.ClientSession(loop=self.bot.loop)
def __unload(self):
- self.session.close()
+ self.bot.loop.create_task(self.session.close())
@commands.group(name="minecraft", aliases=["mc"])
async def minecraft(self, ctx):
|
Add stronger typing to gradient accumulation scheduler callback
* Update gradient_accumulation_scheduler.py
add types for gradient accumulation scheduler callback
* Update gradient_accumulation_scheduler.py | @@ -21,6 +21,8 @@ Trainer also calls ``optimizer.step()`` for the last indivisible step number.
"""
+from typing import Dict
+
from pytorch_lightning.callbacks.base import Callback
@@ -44,7 +46,7 @@ class GradientAccumulationScheduler(Callback):
>>> trainer = Trainer(accumulate_grad_batches={5: 2})
"""
- def __init__(self, scheduling: dict):
+ def __init__(self, scheduling: Dict[int, int]):
super().__init__()
if not scheduling: # empty dict error
@@ -56,7 +58,9 @@ class GradientAccumulationScheduler(Callback):
minimal_epoch = min(scheduling.keys())
if minimal_epoch < 0:
- raise IndexError(f"Epochs indexing from 1, epoch {minimal_epoch} cannot be interpreted correct")
+ raise IndexError(
+ f"Epochs indexing from 1, epoch {minimal_epoch} cannot be interpreted correct"
+ )
if minimal_epoch != 0: # if user didnt define first epoch accumulation factor
scheduling.update({0: 1})
|
WordPress Site Health Exclusion Rule
This adds an exclusion rule for the Wordpress site health page which will trigger PHP and SQL leak rules present in RESPONSE-951-DATA-LEAKAGES-SQL.conf and RESPONSE-951-DATA-LEAKAGES-PHP.conf.
Implements | @@ -713,6 +713,18 @@ SecRule REQUEST_FILENAME "@endsWith /wp-admin/edit.php" \
ctl:ruleRemoveTargetByTag=OWASP_CRS;ARGS:s,\
ver:'OWASP_CRS/3.3.0'"
+# Wordpress Site Health
+# The wordpress site health page makes use of embedded SQL/PHP
+# which triggers PHP/MySQL leak rules.
+SecRule REQUEST_FILENAME "@rx /wp-admin/site-health.php" \
+ "id:9002910,\
+ phase:2,\
+ pass,\
+ t:none,\
+ nolog,\
+ ctl:ruleRemoveById=951220,\
+ ctl:ruleRemoveById=953110,\
+ ver:'OWASP_CRS/3.3.0'"
#
# [ Helpers ]
|
Update authentication.md
import 'Starlette' | @@ -5,6 +5,7 @@ interfaces will be available in your endpoints.
```python
+from starlette.applications import Starlette
from starlette.authentication import (
AuthenticationBackend, AuthenticationError, SimpleUser, UnauthenticatedUser,
AuthCredentials
|
Do not explicitly specify filesystem type when mounting
Resolves | @@ -201,9 +201,8 @@ class Mounter(object):
if device.is_mounted:
self._log.info(_('not mounting {0}: already mounted', device))
yield Return(True)
- fstype = str(device.id_type)
options = self._mount_options(device)
- kwargs = dict(fstype=fstype, options=options)
+ kwargs = dict(options=options)
self._log.debug(_('mounting {0} with {1}', device, kwargs))
mount_path = yield device.mount(**kwargs)
self._log.info(_('mounted {0} on {1}', device, mount_path))
|
Added nullptr check for pthradpool_get_threads_count
Summary:
We get seg fault without this in using XNNPACK.
Pull Request resolved: | @@ -28,7 +28,19 @@ void pthreadpool_compute_1d(
}
size_t pthreadpool_get_threads_count(pthreadpool_t threadpool) {
+ // The current fix only useful when XNNPACK calls pthreadpool_get_threads_count with nullptr.
+ if (threadpool == nullptr) {
+ return 1;
+ }
return reinterpret_cast<caffe2::ThreadPool*>(threadpool)->getNumThreads();
+ // TODO: Future fix: If we keep maintaining two different threadpools.
+ // Old C2 and new one for XNNPACK, then the we have two different pthreadpool pointer
+ // types. One is caffe2::Thredpool*, the other is pthreadpool* (pthreadpool_new_if_impl.c)
+ // XNNPACK calls pthreadpool_get_threads_count during op setup using pthreadpool*, and
+ // uses _parallelize_ interface for for actual work.
+ // While NNPACK uses caffe2::Threadpool*.
+ // Thus if pthreadpool_get_threads_count is getting called from XNNPACK we cannot
+ // reinterpret_cast it to ThreadPool. It will seg fault or worse will have unedfined behavior.
}
pthreadpool_t pthreadpool_create(size_t threads_count) {
|
Rename Reaction.custom_emoji to Reaction.is_custom_emoji
This legacy attribute was apparently never changed to be consistent
with the rest of the library | @@ -73,8 +73,7 @@ class Reaction:
self.count = data.get('count', 1)
self.me = data.get('me')
- @property
- def custom_emoji(self):
+ def is_custom_emoji(self):
""":class:`bool`: If this is a custom emoji."""
return not isinstance(self.emoji, str)
@@ -190,7 +189,7 @@ class Reaction:
if the member has left the guild.
"""
- if self.custom_emoji:
+ if not isinstance(self.emoji, str):
emoji = f'{self.emoji.name}:{self.emoji.id}'
else:
emoji = self.emoji
|
Use more efficient approximation for commcare.fix_user_types.unknown_user_count
as suggested in | @@ -4,6 +4,7 @@ from celery.schedules import crontab
from celery.task import periodic_task
from corehq.apps.es import FormES
+from corehq.apps.es.aggregations import CardinalityAggregation
from corehq.form_processor.interfaces.dbaccessors import FormAccessors
from corehq.form_processor.utils.xform import resave_form
from corehq.pillows.utils import get_user_type_deep_cache_for_unknown_users
@@ -16,7 +17,7 @@ from corehq.util.quickcache import quickcache
@quickcache([], timeout=9 * 60) # Protect from many runs after recovering from a backlog
def send_unknown_user_type_stats():
datadog_gauge('commcare.fix_user_types.unknown_user_count',
- len(_get_unknown_user_type_user_ids()))
+ _get_unknown_user_type_user_ids_approx_count())
datadog_gauge('commcare.fix_user_types.unknown_user_form_count',
FormES().user_type('unknown').count())
@@ -43,3 +44,8 @@ def resave_es_forms_with_unknown_user_type(user_id):
def _get_unknown_user_type_user_ids():
return FormES().user_type('unknown').user_aggregation().run().aggregations.user.keys
+
+
+def _get_unknown_user_type_user_ids_approx_count():
+ agg = CardinalityAggregation('users_count', 'form.meta.userID')
+ return FormES().user_type('unknown').aggregation(agg).run().aggregations.users_count.value
|
Distutils: Fix, directory handling wrong for more than one item to build
* Change back at the end of things, not inside the loop.
* Use absolute path, so "chdir" doesn't affect anything it's not
supposed to. | @@ -158,7 +158,7 @@ class build(distutils.command.build.build):
os.chdir(build_lib)
# Search in the build directory preferably.
- setMainScriptDirectory(".")
+ setMainScriptDirectory(os.path.abspath(old_dir))
to_builds = self._find_to_build()
for to_build in to_builds:
@@ -235,10 +235,10 @@ class build(distutils.command.build.build):
if fullpath.lower().endswith((".py", ".pyw", ".pyc", ".pyo")):
os.unlink(fullpath)
- os.chdir(old_dir)
-
self.build_lib = build_lib
+ os.chdir(old_dir)
+
# Required by distutils, used as command name, pylint: disable=invalid-name
class install(distutils.command.install.install):
|
Update README.rst
Added roadmap link | python-slackclient
===================
-A basic client for Slack.com, which can optionally connect to the Slack Real Time Messaging (RTM) API.
+A client for Slack, which supports the Slack Web API and Real Time Messaging (RTM) API.
|build-status| |windows-build-status| |codecov| |doc-status| |pypi-version| |python-version|
@@ -48,6 +48,9 @@ Documentation
For comprehensive method information and usage examples, see the `full documentation <http://slackapi.github.io/python-slackclient>`_.
+
+You may also review our `Development Roadmap <https://github.com/slackapi/python-slackclient/wiki/Slack-Python-SDK-Roadmap>`_ in the project wiki.
+
Getting Help
-------------
|
Update tutorial.md
Changes to load gen instructions | @@ -73,7 +73,14 @@ In a new browser tab, navigate to the Hipster Shop URL, where you can "purchase"
## Run the load generator
-In another browser tab, navigate to the load-generator URL, from which you can simulate users interacting with the application to generate traffic. For this application, values like 100 total users with a spawn rate of 2 users per second are reasonable. Fill in the **Host** field with the "Hipster shop web address" from the installation stage if it isn't prepopulated. Click the **Start swarming** button to begin generating traffic to the site.
+Cloud Ops Sandbox comes with [Locust load generator](https://locust.io/), to simulate users traffic.
+
+- In another browser tab, navigate to the load-generator URL(from the installation stage if it isn't populated).
+- Enter the number of **users** and **spawn rate**. For this application, we recommend to test 100 total users with a spawn rate of 2 users per second.
+- Fill in the **Host** field with the "Hipster shop web address" from the installation stage if it isn't populated.
+- Click the **Start swarming** button to begin generating traffic to the site.
+
+This will produce traffic on the store from a loadgenerator pod:

|
Update pinverse doc for recent commit
Summary: Pull Request resolved: | @@ -6353,12 +6353,12 @@ Please look at `Moore-Penrose inverse`_ for more details
See :meth:`~torch.svd` for more details.
Arguments:
- input (Tensor): The input 2D tensor of dimensions :math:`m \times n`
+ input (Tensor): The input tensor of size :math:`(*, m, n)` where :math:`*` is zero or more batch dimensions
rcond (float): A floating point value to determine the cutoff for small singular values.
Default: 1e-15
Returns:
- The pseudo-inverse of :attr:`input` of dimensions :math:`n \times m`
+ The pseudo-inverse of :attr:`input` of dimensions :math:`(*, n, m)`
Example::
@@ -6373,6 +6373,17 @@ Example::
[-0.7124, -0.1631, -0.2272],
[ 0.1356, 0.3933, -0.5023],
[-0.0308, -0.1725, -0.5216]])
+ >>> # Batched pinverse example
+ >>> a = torch.randn(2,6,3)
+ >>> b = torch.pinverse(a)
+ >>> torch.matmul(b, a)
+ tensor([[[ 1.0000e+00, 1.6391e-07, -1.1548e-07],
+ [ 8.3121e-08, 1.0000e+00, -2.7567e-07],
+ [ 3.5390e-08, 1.4901e-08, 1.0000e+00]],
+
+ [[ 1.0000e+00, -8.9407e-08, 2.9802e-08],
+ [-2.2352e-07, 1.0000e+00, 1.1921e-07],
+ [ 0.0000e+00, 8.9407e-08, 1.0000e+00]]])
.. _Moore-Penrose inverse: https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse
|
CompiledType.is_struct_type: new class attribute
TN: | @@ -257,6 +257,11 @@ class CompiledType(object):
* convert_to_storage_expr.
"""
+ is_struct_type = False
+ """
+ Whether this type is a subclass of Struct.
+ """
+
is_ast_node = False
"""
Whether this type represents an AST node type.
@@ -1869,6 +1874,8 @@ class Struct(CompiledType):
is_ptr = False
null_allowed = True
+ is_struct_type = True
+
is_env_metadata = False
"""
Whether this struct designates the env metadata struct.
|
Added ceiling to test_sympyissue_21651
// edited by skirpichev
* Minor formatting fixes
* drop redundant parens 2**(-x) -> 2**-x | from diofant import (And, Catalan, Derivative, E, Eq, EulerGamma, Float,
Function, I, Integer, Integral, KroneckerDelta, Le, Mod,
Ne, Or, Piecewise, Product, Rational, Sum, Symbol,
- binomial, cos, exp, factorial, floor, gamma, harmonic,
- log, lowergamma, nan, oo, pi, product, simplify, sin,
- sqrt, summation, symbols, sympify, zeta)
+ binomial, ceiling, cos, exp, factorial, floor, gamma,
+ harmonic, log, lowergamma, nan, oo, pi, product, simplify,
+ sin, sqrt, summation, symbols, sympify, zeta)
from diofant.abc import a, b, c, d, k, m, x, y, z
from diofant.concrete.summations import telescopic
@@ -923,10 +923,12 @@ def test_sympyissue_21557():
def test_sympyissue_21651():
- a = Sum(floor(2*2**(-n)), (n, 1, 2))
- b = floor(2*2**(-1)) + floor(2*2**(-2))
-
+ a = Sum(floor(2*2**-n), (n, 1, 2))
+ b = floor(2*2**-1) + floor(2*2**-2)
assert a.doit() == b.doit()
+ c = Sum(ceiling(2*2**-n), (n, 1, 2))
+ d = ceiling(2*2**-1) + ceiling(2*2**-2)
+ assert c.doit() == d.doit()
@pytest.mark.timeout(10)
|
cabana: display warning if failed to load dbc from clipboard
display warning if failed to load from clipboard | @@ -238,7 +238,11 @@ void MainWindow::loadDBCFromClipboard() {
remindSaveChanges();
QString dbc_str = QGuiApplication::clipboard()->text();
dbc()->open("from_clipboard.dbc", dbc_str);
+ if (dbc()->messages().size() > 0) {
QMessageBox::information(this, tr("Load From Clipboard"), tr("DBC Successfully Loaded!"));
+ } else {
+ QMessageBox::warning(this, tr("Load From Clipboard"), tr("Failed to parse dbc from clipboard!\nMake sure that you paste the text with correct format."));
+ }
}
void MainWindow::loadDBCFromFingerprint() {
|
GCE: configure the root volume size
Add the support to configure the root volume of GCE instances. If
CLOUD_RV exists and is not 0, set the root volume size to this value. | @@ -951,6 +951,11 @@ class GceCmds(CommonCloudFunctions) :
obj_attr_list["cloud_rv_type"] = "pd-standard"
_root_type = "zones/" + obj_attr_list["vmc_name"] + "/diskTypes/" + obj_attr_list["cloud_rv_type"]
+ if "cloud_rv" in obj_attr_list and obj_attr_list["cloud_rv"] != "0":
+ _rv_size = obj_attr_list["cloud_rv"]
+ else:
+ _rv_size = None
+
_config = {
'name': obj_attr_list["cloud_vm_name"],
'machineType': _machine_type,
@@ -963,6 +968,7 @@ class GceCmds(CommonCloudFunctions) :
'initializeParams': {
'sourceImage': _source_disk_image,
'diskType' : _root_type,
+ 'diskSizeGb': _rv_size,
}
}
],
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.