filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_10704 | import cv2
from CameraOrMarker import *
from scipy.linalg import sqrtm
class SolvePnpInputs:
def __init__(self, camera_not_marker, corners_f_images, t_world_xxx_cvecs):
self.camera_not_marker = camera_not_marker # t_world_xxx_cvecs are for a camera
self.corners_f_images = corners_f_images
self.t_world_xxx_cvecs = t_world_xxx_cvecs
class CameraMarkerCalculations:
def __init__(self, scenario):
self.sc = scenario
# Constants for Sigma Point generation
self.n0 = 8 # 8 coordinates of the corner points
self.n1 = 14 # 8 + 6
self.alpha = 1.0
self.betta = 2.0
self.k0 = 3.0 - self.n0
self.k1 = 3.0 - self.n1
self.lam0 = self.alpha ** 2 * (self.n0 + self.k0) - self.n0
self.lam1 = self.alpha ** 2 * (self.n1 + self.k1) - self.n1
self.gamma0 = np.sqrt(self.n0 + self.lam0)
self.gamma1 = np.sqrt(self.n1 + self.lam1)
def _get_corners_f_marker(self):
m2 = self.sc.marker_len / 2.0
marker_corners_f_marker = np.array([[-m2, m2, 0.], [m2, m2, 0.], [m2, -m2, 0.], [-m2, -m2, 0.]]).T
return marker_corners_f_marker
def _project_points(self, camera, marker):
# get corners in the marker frame
corners_f_marker = self._get_corners_f_marker().T
# get the transforms
t_world_camera = camera.t_world_xxx
t_world_marker = marker.t_world_xxx
# calculate rvec, tvec from t_camera_marker
t_camera_marker = t_world_camera.as_inverse() \
.as_right_combined(t_world_marker)
rvec, tvec = t_camera_marker.as_rvec_tvec()
# project the points using t_camera_marker
corners_f_image, _ = cv2.projectPoints(corners_f_marker,
rvec, tvec,
self.sc.camera_matrix, self.sc.dist_coeffs)
return corners_f_image.reshape(8, 1)
def generate_corners_f_image(self, camera, marker):
assert camera.camera_not_marker
assert not marker.camera_not_marker
return self._project_points(camera, marker)
def _generate_sigma_points(self, com, corners_f_image):
# TODO figure out real sigma points.
com_cvec = com.t_world_xxx.as_cvec()
gamma = self.gamma0 if com.simulated_not_derived else self.gamma1
corners_f_images = np.zeros([8, 17] if com.simulated_not_derived else [8, 29])
t_world_xxx_cvecs = np.zeros([6, 17] if com.simulated_not_derived else [6, 29])
var = self.sc.std_corners_f_image
f = corners_f_image * np.ones([8, 17])
f[:, 1:9] += np.eye(8) * var * gamma
f[:, 9:17] -= np.eye(8) * var * gamma
corners_f_images[:, :17] = f
t_world_xxx_cvecs[:, :17] = com_cvec * np.ones([6, 17])
if not com.simulated_not_derived:
s = sqrtm(com.cov)
# var = np.sqrt(np.diag(com.cov).reshape(6, 1))
# var = np.diag(com.cov).reshape(6, 1)
f = com_cvec * np.ones([6, 12])
f[:, :6] += s * gamma
f[:, 6:12] -= s * gamma
corners_f_images[:, 17:29] = corners_f_image * np.ones([8, 12])
t_world_xxx_cvecs[:, 17:29] = f
return SolvePnpInputs(com.camera_not_marker, corners_f_images, t_world_xxx_cvecs)
def generate_solve_pnp_inputs(self, camera_not_marker, camera, marker):
assert camera.camera_not_marker
assert not marker.camera_not_marker
com = camera if camera_not_marker else marker
# Get corners using the poses of the camera and marker
corners_f_image = self._project_points(camera, marker)
if self.sc.use_sigma_points:
return self._generate_sigma_points(com, corners_f_image)
# Make many of these corner bundles
corners_f_images = corners_f_image + \
np.random.normal(0.,
self.sc.std_corners_f_image ** 2,
size=[8, self.sc.num_generated_samples])
# Now get many cvecs
if com.simulated_not_derived:
t_world_xxx_cvec = com.t_world_xxx.as_cvec()
t_world_xxx_cvecs = np.tile(t_world_xxx_cvec, (1, self.sc.num_generated_samples))
elif self.sc.use_dist_params:
t_world_xxx_cvecs = np.random.multivariate_normal(com.mu[:, 0], com.cov, self.sc.num_generated_samples).T
else:
assert com.samples is not None
t_world_xxx_cvecs = com.samples
return SolvePnpInputs(camera_not_marker, corners_f_images, t_world_xxx_cvecs)
def solve_pnp(self, inputs):
t_world_xxx_cvecs = np.zeros(inputs.t_world_xxx_cvecs.shape)
# get corners in the marker frame
corners_f_marker = self._get_corners_f_marker().T
for i in range(t_world_xxx_cvecs.shape[1]):
# Given the location of the corners in the image, find the pose of the marker in the camera frame.
ret, rvecs, tvecs = cv2.solvePnP(corners_f_marker, inputs.corners_f_images[:, i].reshape(4, 2),
self.sc.camera_matrix, self.sc.dist_coeffs)
t_camera_marker = tf.Transformation.from_rodrigues(rvecs[:, 0], translation=tvecs[:, 0])
input_t_world_xxx = tf.Transformation.from_cvec(inputs.t_world_xxx_cvecs[:, i])
t_camera_marker_factor = t_camera_marker
if not inputs.camera_not_marker:
t_camera_marker_factor = t_camera_marker_factor.as_inverse()
output_t_world_xxx = input_t_world_xxx.as_right_combined(t_camera_marker_factor)
t_world_xxx_cvecs[:, i] = output_t_world_xxx.as_cvec().T
mu = np.mean(t_world_xxx_cvecs, axis=1).reshape(6, 1)
cov = np.cov(t_world_xxx_cvecs)
return CameraOrMarker.from_mu(not inputs.camera_not_marker, mu, cov, t_world_xxx_cvecs)
|
the-stack_0_10705 | # -*- coding: utf-8 -*-
# Copyright (C) 2017 - 2018 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and University of
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2009 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
# This is an example on how to import an sbml file
# create a report for a time course simulation
# and run a time course simulation
#
from COPASI import *
import sys
# create a datamodel
try:
dataModel = CRootContainer.addDatamodel()
except:
dataModel = CCopasiRootContainer.addDatamodel()
def main(args):
# the only argument to the main routine should be the name of an SBML file
if len(args) != 1:
sys.stderr.write("Usage: example3 SBMLFILE\n")
return 1;
filename = args[0]
try:
# load the model
if not dataModel.importSBML(filename):
print("Couldn't load {0}:".format(filename))
print(CCopasiMessage.getAllMessageText())
except:
sys.stderr.write("Error while importing the model from file named \"" + filename + "\".\n")
return 1
model = dataModel.getModel()
assert model is not None
# get the trajectory task object
trajectoryTask = dataModel.getTask("Time-Course")
assert (isinstance(trajectoryTask, CTrajectoryTask))
# run a deterministic time course
trajectoryTask.setMethodType(CTaskEnum.Method_deterministic)
# activate the task so that it will be run when the model is saved
# and passed to CopasiSE
trajectoryTask.setScheduled(True)
# create a new report that captures the time course result
report = create_report(model)
# set the report for the task
trajectoryTask.getReport().setReportDefinition(report)
# set the output filename
trajectoryTask.getReport().setTarget("example3.txt")
# don't append output if the file exists, but overwrite the file
trajectoryTask.getReport().setAppend(False)
# get the problem for the task to set some parameters
problem = trajectoryTask.getProblem()
assert (isinstance(problem, CTrajectoryProblem))
# simulate 100 steps
problem.setStepNumber(100)
# start at time 0
dataModel.getModel().setInitialTime(0.0)
# simulate a duration of 10 time units
problem.setDuration(10)
# tell the problem to actually generate time series data
problem.setTimeSeriesRequested(True)
# tell the problem, that we want exactly 100 simulation steps (not automatically controlled)
problem.setAutomaticStepSize(False)
# tell the problem, that we don't want additional output points for event assignments
problem.setOutputEvent(False)
# set some parameters for the LSODA method through the method
method = trajectoryTask.getMethod()
parameter = method.getParameter("Absolute Tolerance")
assert parameter is not None
assert parameter.getType() == CCopasiParameter.Type_UDOUBLE
parameter.setValue(1.0e-12)
try:
# now we run the actual trajectory
result=trajectoryTask.process(True)
except:
sys.stderr.write("Error. Running the time course simulation failed.\n")
# check if there are additional error messages
if CCopasiMessage.size() > 0:
# print the messages in chronological order
sys.stderr.write(CCopasiMessage.getAllMessageText(True))
return 1
if not result:
sys.stderr.write("Error. Running the time course simulation failed.\n" )
# check if there are additional error messages
if CCopasiMessage.size() > 0:
# print the messages in chronological order
sys.stderr.write(CCopasiMessage.getAllMessageText(True))
return 1
# look at the timeseries
print_results(trajectoryTask)
def print_results(trajectoryTask):
timeSeries = trajectoryTask.getTimeSeries()
# we simulated 100 steps, including the initial state, this should be
# 101 step in the timeseries
assert timeSeries.getRecordedSteps() == 101
print ("The time series consists of {0} steps.".format(timeSeries.getRecordedSteps()))
print ("Each step contains {0} variables.".format(timeSeries.getNumVariables()))
print ("\nThe final state is: ")
iMax = timeSeries.getNumVariables()
lastIndex = timeSeries.getRecordedSteps() - 1
for i in range(0, iMax):
# here we get the particle number (at least for the species)
# the unit of the other variables may not be particle numbers
# the concentration data can be acquired with getConcentrationData
print (" {0}: {1}".format(timeSeries.getTitle(i), timeSeries.getData(lastIndex, i)))
# the CTimeSeries class now has some new methods to get all variable titles
# as a python list (getTitles())
# and methods to get the complete time course data for a certain variable based on
# the variables index or the corresponding model object.
# E.g. to get the particle numbers of the second variable as a python list
# you can use getDataForIndex(1) and to get the concentration data you use
# getConcentrationDataForIndex(1)
# To get the complete particle number data for the second metabolite of the model
# you can use getDataForObject(model.getMetabolite(1)) and to get the concentration
# data you use getConcentrationDataForObject.
# print timeSeries.getTitles()
# print timeSeries.getDataForIndex(1)
# print timeSeries.getDataForObject(model)
def create_report(model):
# create a report with the correct filename and all the species against
# time.
reports = dataModel.getReportDefinitionList()
# create a report definition object
report = reports.createReportDefinition("Report", "Output for timecourse")
# set the task type for the report definition to timecourse
report.setTaskType(CTaskEnum.Task_timeCourse)
# we don't want a table
report.setIsTable(False)
# the entries in the output should be separated by a ", "
report.setSeparator(CCopasiReportSeparator(", "))
# we need a handle to the header and the body
# the header will display the ids of the metabolites and "time" for
# the first column
# the body will contain the actual timecourse data
header = report.getHeaderAddr()
body = report.getBodyAddr()
body.push_back(
CRegisteredCommonName(CCommonName(dataModel.getModel().getCN().getString() + ",Reference=Time").getString()))
body.push_back(CRegisteredCommonName(report.getSeparator().getCN().getString()))
header.push_back(CRegisteredCommonName(CDataString("time").getCN().getString()))
header.push_back(CRegisteredCommonName(report.getSeparator().getCN().getString()))
iMax = model.getMetabolites().size()
for i in range(0, iMax):
metab = model.getMetabolite(i)
assert metab is not None
# we don't want output for FIXED metabolites right now
if metab.getStatus() != CModelEntity.Status_FIXED:
# we want the concentration oin the output
# alternatively, we could use "Reference=Amount" to get the
# particle number
body.push_back(
CRegisteredCommonName(metab.getObject(CCommonName("Reference=Concentration")).getCN().getString()))
# add the corresponding id to the header
header.push_back(CRegisteredCommonName(CDataString(metab.getSBMLId()).getCN().getString()))
# after each entry, we need a separator
if i != iMax - 1:
body.push_back(CRegisteredCommonName(report.getSeparator().getCN().getString()))
header.push_back(CRegisteredCommonName(report.getSeparator().getCN().getString()))
return report
if __name__ == '__main__':
main(sys.argv[1:])
|
the-stack_0_10706 | from ..constants import _file_to_fh
from ..functions import open_files_threshold_exceeded, close_one_file #, abspath
from .umread.umfile import File, UMFileException
_file_to_UM = _file_to_fh.setdefault('UM', {})
def _open_um_file(filename, aggregate=True, fmt=None, word_size=None,
byte_ordering=None):
'''Open a UM fields file or PP file and read it into a
`umread.umfile.File` object.
If there is already a `umread.umfile.File` object for the file then it
is returned with an open file descriptor.
:Parameters:
filename : str
The file to be opened.
:Returns:
out : umread.umfile.File
The opened file with an open file descriptor.
:Examples:
'''
# filename = abspath(filename)
f = _file_to_UM.get(filename)
if f is not None:
if f.fd is None:
if open_files_threshold_exceeded():
# Close a random data array file to make way for this
# one
close_one_file()
f.open_fd()
#--- End: if
return f
if open_files_threshold_exceeded():
# Close a random data array file to make way for this one
close_one_file()
try:
f = File(filename,
byte_ordering=byte_ordering,
word_size=word_size,
format=fmt)
except Exception as error:
try:
f.close_fd()
except:
pass
raise Exception(error)
# Add a close method to the file object
f.close = f.close_fd
# Update the _file_to_UM dictionary
_file_to_UM[filename] = f
return f
#--- End: def
def _close_um_file(filename):
'''Close a PP or UM fields file.
Does nothing if the file is already closed.
:Parameters:
filename : str
The file to be closed.
:Returns:
None
:Examples:
'''
f = _file_to_UM.pop(filename, None)
if f is not None:
f.close_fd()
#--- End: def
|
the-stack_0_10708 | from pyxform.tests_v1.pyxform_test_case import PyxformTestCase
class XlsFormHeadersTest(PyxformTestCase):
def test_label_caps_alternatives(self):
"""
re: https://github.com/SEL-Columbia/pyxform/issues/76
Capitalization of 'label' column can lead to confusing errors.
"""
s1 = self.md_to_pyxform_survey("""
| survey | | | |
| | type | name | label |
| | note | q | Q |
""")
s2 = self.md_to_pyxform_survey("""
| survey | | | |
| | type | name | Label | # <-- note: capital L
| | note | q | Q |
""")
self.assertEqual(s1.to_xml(), s2.to_xml())
def test_calculate_alias(self):
self.assertPyxformXform(
name="calculatealias",
md="""
| survey | | | | |
| | type | name | label | calculate |
| | decimal | amount | Counter | |
| | calculate | doubled | Doubled | ${amount} * 2 |
""",
errored=False,
debug=True)
|
the-stack_0_10709 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Tristan Fischer ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from xbmcswift2 import Plugin, xbmc
from resources.lib.api import \
ItunesPodcastApi, NetworkError, NoEnclosureException
plugin = Plugin()
api = ItunesPodcastApi()
my_podcasts = plugin.get_storage('my_podcasts.json', file_format='json')
STRINGS = {
'all': 30000,
'browse_by_genre': 30002,
'show_my_podcasts': 30003,
'search_podcast': 30004,
'video': 30005,
'audio': 30006,
'add_to_my_podcasts': 30010,
'remove_from_my_podcasts': 30011,
'network_error': 30200,
'no_media_found': 30007,
}
@plugin.route('/')
def show_root():
content_type = plugin.request.args.get('content_type')
if not content_type:
url = plugin.url_for(endpoint='show_content_types')
return plugin.redirect(url)
if isinstance(content_type, (list, tuple)):
content_type = content_type[0]
items = (
{'label': _('browse_by_genre'), 'path': plugin.url_for(
endpoint='show_genres',
content_type=content_type
)},
{'label': _('show_my_podcasts'), 'path': plugin.url_for(
endpoint='show_my_podcasts',
content_type=content_type
)},
{'label': _('search_podcast'), 'path': plugin.url_for(
endpoint='search',
content_type=content_type
)},
)
return plugin.finish(items)
@plugin.route('/content_types/')
def show_content_types():
items = (
{'label': _('video'), 'path': plugin.url_for(
endpoint='show_root',
content_type='video'
)},
{'label': _('audio'), 'path': plugin.url_for(
endpoint='show_root',
content_type='audio'
)}
)
return plugin.finish(items)
@plugin.route('/<content_type>/genres/')
def show_genres(content_type):
show_subgenres = plugin.get_setting('show_subgenres', bool)
genres = api.get_genres(flat=show_subgenres)
items = []
for genre in genres:
if genre['name'] == 'Podcasts':
genre['name'] = _('all')
item = {
'label': genre['name'],
'path': plugin.url_for(
endpoint='show_podcasts',
content_type=content_type,
genre_id=genre['id']
)
}
items.append(item)
return plugin.finish(items)
@plugin.route('/<content_type>/podcasts/by-genre/<genre_id>/')
def show_podcasts(content_type, genre_id):
num_podcasts_list = plugin.get_setting('num_podcasts_list', int)
podcasts = api.get_podcasts(
content_type=content_type,
genre_id=genre_id,
limit=num_podcasts_list
)
return __add_podcasts(content_type, podcasts)
@plugin.route('/<content_type>/podcast/items/<podcast_id>/')
def show_items(content_type, podcast_id):
try:
podcast_items = api.get_podcast_items(
podcast_id=podcast_id
)
except NoEnclosureException:
plugin.notify(msg=_('no_media_found'))
return plugin.finish(succeeded=False)
return __add_podcast_items(content_type, podcast_id, podcast_items)
@plugin.route('/<content_type>/podcast/items/<podcast_id>/<item_url>')
def watch_item(content_type, podcast_id, item_url):
return plugin.set_resolved_url(item_url)
@plugin.route('/<content_type>/podcasts/my/')
def show_my_podcasts(content_type):
podcasts = my_podcasts.get(content_type, {}).values()
return __add_podcasts(content_type, podcasts)
@plugin.route('/<content_type>/podcasts/my/add/<podcast_id>')
def add_to_my_podcasts(content_type, podcast_id):
podcast = api.get_single_podcast(podcast_id=podcast_id)
if not content_type in my_podcasts:
my_podcasts[content_type] = {}
my_podcasts[content_type][podcast_id] = podcast
my_podcasts.sync()
@plugin.route('/<content_type>/podcasts/my/del/<podcast_id>')
def del_from_my_podcasts(content_type, podcast_id):
if podcast_id in my_podcasts.get(content_type, {}):
del my_podcasts[content_type][podcast_id]
my_podcasts.sync()
@plugin.route('/<content_type>/podcasts/search/')
def search(content_type):
search_string = plugin.keyboard(heading=_('search'))
if search_string:
url = plugin.url_for(
endpoint='search_result',
content_type=content_type,
search_string=search_string
)
plugin.redirect(url)
@plugin.route('/<content_type>/podcasts/search/<search_string>/')
def search_result(content_type, search_string):
num_podcasts_search = plugin.get_setting('num_podcasts_search', int)
podcasts = api.search_podcast(
search_term=search_string,
limit=num_podcasts_search
)
return __add_podcasts(content_type, podcasts)
def __add_podcasts(content_type, podcasts):
my_podcasts_ids = my_podcasts.get(content_type, {}).keys()
items = []
for i, podcast in enumerate(podcasts):
podcast_id = str(podcast['id'])
if not podcast_id in my_podcasts_ids:
context_menu = [(
_('add_to_my_podcasts'),
'XBMC.RunPlugin(%s)' % plugin.url_for(
endpoint='add_to_my_podcasts',
content_type=content_type,
podcast_id=podcast_id
)
)]
else:
context_menu = [(
_('remove_from_my_podcasts'),
'XBMC.RunPlugin(%s)' % plugin.url_for(
endpoint='del_from_my_podcasts',
content_type=content_type,
podcast_id=podcast_id
)
)]
item = {
'label': podcast['name'],
'thumbnail': podcast['thumb'],
'info': {
'title': podcast['name'],
'count': i,
'plot': podcast['summary'] or '',
'studio': podcast['author'] or '',
'genre': podcast['genre'] or '',
'tagline': podcast['rights'] or '',
'date': podcast['release_date'] or ''
},
'context_menu': context_menu,
'path': plugin.url_for(
endpoint='show_items',
content_type=content_type,
podcast_id=podcast_id
)
}
items.append(item)
finish_kwargs = {
'sort_methods': ('PLAYLIST_ORDER', 'TITLE', 'DATE')
}
if plugin.get_setting('force_viewmode_podcasts', bool):
finish_kwargs['view_mode'] = 'thumbnail'
return plugin.finish(items, **finish_kwargs)
def __add_podcast_items(content_type, podcast_id, podcast_items):
items = [{
'label': item['title'],
'thumbnail': item['thumb'],
'info': {
'title': item['title'],
'count': i,
'plot': item['summary'] or '',
'studio': item['author'] or '',
'size': item['size'] or 0,
'date': item['pub_date'] or '',
'tagline': item['rights'] or ''
},
'path': plugin.url_for(
endpoint='watch_item',
content_type=content_type,
podcast_id=podcast_id,
item_url=item['item_url'].encode('utf-8')
),
'is_playable': True
} for i, item in enumerate(podcast_items)]
finish_kwargs = {
'sort_methods': ('PLAYLIST_ORDER', 'TITLE', 'DATE', 'SIZE')
}
if plugin.get_setting('force_viewmode_items', bool):
finish_kwargs['view_mode'] = 'thumbnail'
return plugin.finish(items, **finish_kwargs)
def __get_country():
if not plugin.get_setting('country_already_set'):
lang_country_mapping = (
('chin', 'CN'),
('denm', 'DK'),
('fin', 'FI'),
('fre', 'FR'),
('germa', 'DE'),
('greec', 'GR'),
('ital', 'IT'),
('japa', 'JP'),
('kor', 'KR'),
('dutch', 'NL'),
('norw', 'NO'),
('pol', 'PL'),
('port', 'PT'),
('roma', 'RO'),
('russ', 'RU'),
('span', 'ES'),
('swed', 'SE'),
('turk', 'TR'),
('engl', 'US')
)
country = None
xbmc_language = xbmc.getLanguage().lower()
for lang, country_code in lang_country_mapping:
if xbmc_language.startswith(lang):
country = country_code
plugin.set_setting('country', country)
break
if not country:
plugin.open_settings()
country = plugin.get_setting('country') or 'US'
plugin.set_setting('country_already_set', '1')
return country
def _(string_id):
if string_id in STRINGS:
return plugin.get_string(STRINGS[string_id])
else:
plugin.log.warning('String is missing: %s' % string_id)
return string_id
if __name__ == '__main__':
country = __get_country()
api.set_country(country=country)
try:
plugin.run()
except NetworkError:
plugin.notify(msg=_('network_error'))
|
the-stack_0_10710 | import time
class MergeSort:
"""
This class is a python implementation of the problem discussed in this
video by mycodeschool - https://www.youtube.com/watch?v=TzeBrDU-JaY
:Authors: pranaychandekar
"""
@staticmethod
def merge(left: list, right: list, original: list):
"""
This method implements the merge logic to merge two halves of a list.
:param left: The left half of the original list.
:param right: The right half of the original list.
:param original: The original list.
:type left: list
:type right: list
:type original: list
"""
left_length = len(left)
right_length = len(right)
left_pointer = right_pointer = original_pointer = 0
while left_pointer < left_length and right_pointer < right_length:
if left[left_pointer] <= right[right_pointer]:
original[original_pointer] = left[left_pointer]
left_pointer = left_pointer + 1
else:
original[original_pointer] = right[right_pointer]
right_pointer = right_pointer + 1
original_pointer = original_pointer + 1
if left_pointer < left_length:
original[original_pointer:] = left[left_pointer:]
if right_pointer < right_length:
original[original_pointer:] = right[right_pointer:]
def merge_sort(self, unsorted_list: list):
"""
This method sorts a given list in ascending order using Merge Sort algorithm.
:param unsorted_list: The list which needs to be sorted.
:type unsorted_list: list
"""
unsorted_list_size = len(unsorted_list)
if unsorted_list_size < 2:
return
mid = int(unsorted_list_size / 2)
left = unsorted_list[0:mid]
right = unsorted_list[mid:]
self.merge_sort(left)
self.merge_sort(right)
self.merge(left, right, unsorted_list)
if __name__ == "__main__":
tic = time.time()
print("\nYou are currently running Merge Sort test case.")
unsorted_list = [2, 7, 4, 1, 5, 3]
print("\nUnsorted List: ")
for element in unsorted_list:
print(str(element), end=", ")
print()
solution = MergeSort()
solution.merge_sort(unsorted_list)
print("\nSorted List: ")
for element in unsorted_list:
print(str(element), end=", ")
print()
toc = time.time()
print("\nTotal time taken:", toc - tic, "seconds.")
|
the-stack_0_10711 | import http.client
import requests
import random
import string
import sqlite3
from sqlite3 import Error
import sys
from faker import Faker
fake = Faker()
withdraw = False
address = "D87S8xBmWjgy6UWUhBjeRs8cMjpMyXdQe5"
db = sqlite3.connect('database.db')
conn = http.client.HTTPSConnection("dogeminer.fun")
def query(sql):
cursor = db.cursor()
res = cursor.execute(sql)
db.commit()
return res
def getSession():
length_of_string = 40
letters_and_digits = string.ascii_lowercase + string.digits
random_string = ""
for _ in range(length_of_string):
random_string += random.choice(letters_and_digits)
print(random_string)
ci_session = "wolven_core_session=" + random_string
return ci_session
def getAddress():
URL = "http://localhost:3000/get-target-linked-address"
r = requests.get(url = URL)
data = r.json()
address = data['data']
return address.strip()
def register(username,address):
session_id = getSession()
payload = 'user_name='+username+'&email='+username+'%40gmail.com&email_repeat='+username+'%40gmail.com&password=123456&password_repeat=123456&address='+address+'&tos_agree=1®ister=Register%2BSecurely'
headers = {
'authority': 'dogeminer.fun',
'cache-control': 'max-age=0',
'upgrade-insecure-requests': '1',
'origin': 'https://dogeminer.fun',
'content-type': 'application/x-www-form-urlencoded',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://dogeminer.fun/account/register',
'accept-language': 'en-US,en;q=0.9',
'cookie': session_id
}
conn.request("POST", "/account/register", payload, headers)
res = conn.getresponse()
data = res.read()
print(data)
print(res.status)
if("Your account was successfully created" in str(data)):
print("Register Success : " + username)
query("insert into dogeminner_address (username,address,status) values('"+username+"','"+address+"','Pending')")
def withdraw_doge(username):
session_id = getSession()
payload = 'user_name='+username+'&password=123456&login=Login%2BSecurely'
headers = {
'authority': 'dogeminer.fun',
'cache-control': 'max-age=0',
'upgrade-insecure-requests': '1',
'origin': 'https://dogeminer.fun',
'content-type': 'application/x-www-form-urlencoded',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://dogeminer.fun/account/login',
'accept-language': 'en-US,en;q=0.9',
'cookie': session_id
}
conn.request("POST", "/account/login", payload, headers)
res = conn.getresponse()
res.read()
payload = 'claim=Start%2BAuto%2BFaucet'
headers = {
'authority': 'dogeminer.fun',
'cache-control': 'max-age=0',
'upgrade-insecure-requests': '1',
'origin': 'https://dogeminer.fun',
'content-type': 'application/x-www-form-urlencoded',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://dogeminer.fun/page/dashboard',
'accept-language': 'en-US,en;q=0.9',
'cookie': session_id
}
conn.request("POST", "/page/dashboard", payload, headers)
res = conn.getresponse()
res.read()
redirectUrl = res.headers['Location']
print("RedirectUrl: " + str(redirectUrl))
payload = ''
headers = {
'authority': 'dogeminer.fun',
'cache-control': 'max-age=0',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://dogeminer.fun/page/dashboard',
'accept-language': 'en-US,en;q=0.9',
'cookie': session_id
}
conn.request("GET", redirectUrl, payload, headers)
res = conn.getresponse()
res.read()
print("Withdraw Complete for User : " + username)
def initialize():
sql = """ CREATE TABLE IF NOT EXISTS dogeminner_address (
id integer PRIMARY KEY ,
username text,
address text,
status text
); """
query(sql)
res = query("select count(*) as count from dogeminner_address")
rows = res.fetchall()
data_count = rows[0][0]
min_data_count = 500
if(data_count < min_data_count):
for _ in range(min_data_count-data_count):
name = fake.name().split(" ")[1]
number = '{:03d}'.format(random.randrange(1, 999))
username = (name + number)
register(username,address)
def do_main_job():
cursor = db.cursor()
sql = "select username from dogeminner_address where status = 'Pending' LIMIT 1"
res = cursor.execute(sql)
rows = res.fetchall()
if(len(rows)==0):
sql = "update dogeminner_address set status = 'Pending'"
res = cursor.execute(sql)
db.commit()
do_main_job()
return
username = rows[0][0]
sql = "update dogeminner_address set status = 'Completed' where username = '"+username+"'"
res = cursor.execute(sql)
db.commit()
withdraw_doge(username)
do_main_job()
initialize()
do_main_job() |
the-stack_0_10713 | # standard library imports
import os
import secrets
from contextlib import closing
from urllib.parse import urlparse
from functools import cached_property
from mimetypes import guess_extension
# pip imports
from magic import from_buffer
import psycopg2
from flask import url_for, current_app
from werkzeug.datastructures import FileStorage
from werkzeug.utils import safe_join, secure_filename
# local imports
from app import config
from app.helpers.utils import create_hmac_hexdigest
from app.helpers.discord import FileEmbed, ShortUrlEmbed
conn = psycopg2.connect("user=postgres1 dbname=shrpy password=postgres")
conn.set_session(autocommit=True)
with conn.cursor() as cursor:
cursor.execute("CREATE TABLE IF NOT EXISTS urls (token VARCHAR(10) NOT NULL PRIMARY KEY, url TEXT NOT NULL)")
class File:
def __init__(self, file_instance: FileStorage, use_original_filename=True):
"""Class for uploaded files which takes the `werkzeug.datastructures.FileStorage` from `flask.Request.files` as first parameter."""
if isinstance(file_instance, FileStorage) is False:
raise InvalidFileException(file_instance)
self.use_original_filename = use_original_filename
# Private FileStorage instance
self.__file = file_instance
@cached_property
def filename(self) -> str:
"""Returns random filename."""
custom_filename = secrets.token_urlsafe(config.FILE_TOKEN_BYTES)
if self.use_original_filename:
filename = f'{custom_filename}-{self.original_filename_root[:config.ORIGINAL_FILENAME_LENGTH]}'
else:
filename = custom_filename
return f'{filename}.{self.extension}'
@cached_property
def extension(self) -> str:
"""Returns extension using `python-magic` and `mimetypes`."""
file_bytes = self.__file.read(config.MAGIC_BUFFER_BYTES)
mime = from_buffer(file_bytes, mime=True).lower()
ext = guess_extension(mime)
if ext is None:
current_app.logger.error(f'Unable to determine file extension for file {self.__file.filename} - MIME type {mime}')
return ext.replace('.', '')
@cached_property
def original_filename_root(self):
"""Returns the original filename without extension."""
sec_filename = secure_filename(self.__file.filename.lower())
root, ext = os.path.splitext(sec_filename)
return root
@cached_property
def hmac(self) -> str:
"""Returns HMAC digest calculated from filename, `flask.current_app.secret_key` is used as secret."""
return create_hmac_hexdigest(self.filename, current_app.secret_key)
@cached_property
def url(self) -> str:
"""Returns file URL using `flask.url_for`."""
return url_for('main.uploads', filename=self.filename, _external=True)
@cached_property
def deletion_url(self) -> str:
"""Returns deletion URL using `flask.url_for`."""
return url_for('api.delete_file', hmac_hash=self.hmac, filename=self.filename, _external=True)
@staticmethod
def delete(filename: str) -> bool:
"""Deletes the file from `config.UPLOAD_DIR`, if it exists."""
file_path = safe_join(config.UPLOAD_DIR, filename)
if os.path.isfile(file_path) is False:
return False
current_app.logger.info(f'Deleted file {file_path}')
os.remove(file_path)
return True
def is_allowed(self) -> bool:
"""Check if file is allowed, based on `config.ALLOWED_EXTENSIONS`."""
if not config.ALLOWED_EXTENSIONS:
return True
allowed = self.extension in config.ALLOWED_EXTENSIONS
if allowed is False:
current_app.logger.warning(f'File {self.__file.filename} (detected extension {self.extension}) is not allowed')
return allowed
def save(self, save_directory = config.UPLOAD_DIR) -> None:
"""Saves the file to `UPLOAD_DIR`."""
if os.path.isdir(save_directory) is False:
os.makedirs(save_directory)
save_path = safe_join(save_directory, self.filename)
current_app.logger.info(f'Saving file {self.__file.filename} to {save_path}')
current_app.logger.info(f'URLs: {self.url} - {self.deletion_url}')
# Set file descriptor back to beginning of the file so save works correctly
self.__file.seek(os.SEEK_SET)
self.__file.save(save_path)
def embed(self) -> FileEmbed:
"""Returns FileEmbed instance for this file."""
return FileEmbed(
content_url=self.url,
deletion_url=self.deletion_url
)
class InvalidFileException(Exception):
"""Raised when `File` is initialized using wrong `file_instance`."""
def __init__(self, file_instance, *args):
self.file_instance = file_instance
super().__init__(*args)
def __str__(self):
file_instance_type = type(self.file_instance)
return f'{self.file_instance} ({file_instance_type}) is not an instance of werkzeug.datastructures.FileStorage ({FileStorage})'
class ShortUrl:
def __init__(self, url: str):
url = ''.join(url.lower().split())
if not url.startswith(('https://', 'http://')):
url = f'https://{url}'
self.url = url
@cached_property
def token(self) -> str:
return secrets.token_urlsafe(config.URL_TOKEN_BYTES)
@cached_property
def hmac(self) -> str:
"""Returns HMAC hash calculated from token, `flask.current_app.secret_key` is used as secret."""
return create_hmac_hexdigest(self.token, current_app.secret_key)
@cached_property
def shortened_url(self) -> str:
"""Returns the shortened URL using `flask.url_for`."""
return url_for('main.short_url', token=self.token, _external=True)
@cached_property
def deletion_url(self) -> str:
"""Returns deletion URL using `flask.url_for`."""
return url_for('api.delete_short_url', hmac_hash=self.hmac, token=self.token, _external=True)
def is_valid(self) -> bool:
"""Checks if URL is valid"""
parsed = urlparse(self.url)
# Parsed URL must have at least scheme and netloc (e.g. domain name)
try:
valid = all([parsed.scheme, parsed.netloc]) and parsed.netloc.split('.')[1]
except IndexError:
valid = False
if valid is False:
current_app.logger.warning(f'URL {self.url} is invalid')
return valid
def add(self):
"""Inserts the URL and token to database."""
current_app.logger.info(f'Saving short URL for {self.url} as {self.shortened_url}')
current_app.logger.info(f'URLs: {self.shortened_url} - {self.deletion_url}')
with closing(self.get_cursor()) as cursor:
cursor.execute("INSERT INTO urls VALUES (%s, %s)", (
self.token,
self.url
))
def embed(self) -> ShortUrlEmbed:
"""Returns ShorturlEmbed instance for this URL."""
return ShortUrlEmbed(
content_url=self.shortened_url,
deletion_url=self.deletion_url,
original_url=self.url,
shortened_url=self.shortened_url
)
@classmethod
def get_by_token(cls, token: str):
"""Returns the URL for given token from database."""
result = None
with closing(cls.get_cursor()) as cursor:
row = cursor.execute("SELECT url FROM urls WHERE token = %s", (token,))
url_row = row.fetchone()
if url_row:
result = url_row['url']
return result
@classmethod
def delete(cls, token: str) -> bool:
"""DELETEs URL using given token from database."""
url = cls.get_by_token(token)
with closing(cls.get_cursor()) as cursor:
execute = cursor.execute("DELETE FROM urls WHERE token = %s", (token,))
deleted = execute.rowcount > 0
if deleted:
current_app.logger.info(f'Deleted short URL for {url} using token {token}')
return deleted
@staticmethod
def get_cursor():
cursor = conn.cursor()
return cursor
|
the-stack_0_10714 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this file contains helper methods for BBOX processing
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import random
import math
import cv2
def meet_emit_constraint(src_bbox, sample_bbox):
center_x = (src_bbox[2] + src_bbox[0]) / 2
center_y = (src_bbox[3] + src_bbox[1]) / 2
if center_x >= sample_bbox[0] and \
center_x <= sample_bbox[2] and \
center_y >= sample_bbox[1] and \
center_y <= sample_bbox[3]:
return True
return False
def clip_bbox(src_bbox):
src_bbox[0] = max(min(src_bbox[0], 1.0), 0.0)
src_bbox[1] = max(min(src_bbox[1], 1.0), 0.0)
src_bbox[2] = max(min(src_bbox[2], 1.0), 0.0)
src_bbox[3] = max(min(src_bbox[3], 1.0), 0.0)
return src_bbox
def bbox_area(src_bbox):
if src_bbox[2] < src_bbox[0] or src_bbox[3] < src_bbox[1]:
return 0.
else:
width = src_bbox[2] - src_bbox[0]
height = src_bbox[3] - src_bbox[1]
return width * height
def is_overlap(object_bbox, sample_bbox):
if object_bbox[0] >= sample_bbox[2] or \
object_bbox[2] <= sample_bbox[0] or \
object_bbox[1] >= sample_bbox[3] or \
object_bbox[3] <= sample_bbox[1]:
return False
else:
return True
def filter_and_process(sample_bbox,
bboxes,
labels,
scores=None,
keypoints=None):
new_bboxes = []
new_labels = []
new_scores = []
new_keypoints = []
new_kp_ignore = []
for i in range(len(bboxes)):
new_bbox = [0, 0, 0, 0]
obj_bbox = [bboxes[i][0], bboxes[i][1], bboxes[i][2], bboxes[i][3]]
if not meet_emit_constraint(obj_bbox, sample_bbox):
continue
if not is_overlap(obj_bbox, sample_bbox):
continue
sample_width = sample_bbox[2] - sample_bbox[0]
sample_height = sample_bbox[3] - sample_bbox[1]
new_bbox[0] = (obj_bbox[0] - sample_bbox[0]) / sample_width
new_bbox[1] = (obj_bbox[1] - sample_bbox[1]) / sample_height
new_bbox[2] = (obj_bbox[2] - sample_bbox[0]) / sample_width
new_bbox[3] = (obj_bbox[3] - sample_bbox[1]) / sample_height
new_bbox = clip_bbox(new_bbox)
if bbox_area(new_bbox) > 0:
new_bboxes.append(new_bbox)
new_labels.append([labels[i][0]])
if scores is not None:
new_scores.append([scores[i][0]])
if keypoints is not None:
sample_keypoint = keypoints[0][i]
for j in range(len(sample_keypoint)):
kp_len = sample_height if j % 2 else sample_width
sample_coord = sample_bbox[1] if j % 2 else sample_bbox[0]
sample_keypoint[j] = (
sample_keypoint[j] - sample_coord) / kp_len
sample_keypoint[j] = max(min(sample_keypoint[j], 1.0), 0.0)
new_keypoints.append(sample_keypoint)
new_kp_ignore.append(keypoints[1][i])
bboxes = np.array(new_bboxes)
labels = np.array(new_labels)
scores = np.array(new_scores)
if keypoints is not None:
keypoints = np.array(new_keypoints)
new_kp_ignore = np.array(new_kp_ignore)
return bboxes, labels, scores, (keypoints, new_kp_ignore)
return bboxes, labels, scores
def bbox_area_sampling(bboxes, labels, scores, target_size, min_size):
new_bboxes = []
new_labels = []
new_scores = []
for i, bbox in enumerate(bboxes):
w = float((bbox[2] - bbox[0]) * target_size)
h = float((bbox[3] - bbox[1]) * target_size)
if w * h < float(min_size * min_size):
continue
else:
new_bboxes.append(bbox)
new_labels.append(labels[i])
if scores is not None and scores.size != 0:
new_scores.append(scores[i])
bboxes = np.array(new_bboxes)
labels = np.array(new_labels)
scores = np.array(new_scores)
return bboxes, labels, scores
def generate_sample_bbox(sampler):
scale = np.random.uniform(sampler[2], sampler[3])
aspect_ratio = np.random.uniform(sampler[4], sampler[5])
aspect_ratio = max(aspect_ratio, (scale**2.0))
aspect_ratio = min(aspect_ratio, 1 / (scale**2.0))
bbox_width = scale * (aspect_ratio**0.5)
bbox_height = scale / (aspect_ratio**0.5)
xmin_bound = 1 - bbox_width
ymin_bound = 1 - bbox_height
xmin = np.random.uniform(0, xmin_bound)
ymin = np.random.uniform(0, ymin_bound)
xmax = xmin + bbox_width
ymax = ymin + bbox_height
sampled_bbox = [xmin, ymin, xmax, ymax]
return sampled_bbox
def generate_sample_bbox_square(sampler, image_width, image_height):
scale = np.random.uniform(sampler[2], sampler[3])
aspect_ratio = np.random.uniform(sampler[4], sampler[5])
aspect_ratio = max(aspect_ratio, (scale**2.0))
aspect_ratio = min(aspect_ratio, 1 / (scale**2.0))
bbox_width = scale * (aspect_ratio**0.5)
bbox_height = scale / (aspect_ratio**0.5)
if image_height < image_width:
bbox_width = bbox_height * image_height / image_width
else:
bbox_height = bbox_width * image_width / image_height
xmin_bound = 1 - bbox_width
ymin_bound = 1 - bbox_height
xmin = np.random.uniform(0, xmin_bound)
ymin = np.random.uniform(0, ymin_bound)
xmax = xmin + bbox_width
ymax = ymin + bbox_height
sampled_bbox = [xmin, ymin, xmax, ymax]
return sampled_bbox
def data_anchor_sampling(bbox_labels, image_width, image_height, scale_array,
resize_width):
num_gt = len(bbox_labels)
# np.random.randint range: [low, high)
rand_idx = np.random.randint(0, num_gt) if num_gt != 0 else 0
if num_gt != 0:
norm_xmin = bbox_labels[rand_idx][0]
norm_ymin = bbox_labels[rand_idx][1]
norm_xmax = bbox_labels[rand_idx][2]
norm_ymax = bbox_labels[rand_idx][3]
xmin = norm_xmin * image_width
ymin = norm_ymin * image_height
wid = image_width * (norm_xmax - norm_xmin)
hei = image_height * (norm_ymax - norm_ymin)
range_size = 0
area = wid * hei
for scale_ind in range(0, len(scale_array) - 1):
if area > scale_array[scale_ind] ** 2 and area < \
scale_array[scale_ind + 1] ** 2:
range_size = scale_ind + 1
break
if area > scale_array[len(scale_array) - 2]**2:
range_size = len(scale_array) - 2
scale_choose = 0.0
if range_size == 0:
rand_idx_size = 0
else:
# np.random.randint range: [low, high)
rng_rand_size = np.random.randint(0, range_size + 1)
rand_idx_size = rng_rand_size % (range_size + 1)
if rand_idx_size == range_size:
min_resize_val = scale_array[rand_idx_size] / 2.0
max_resize_val = min(2.0 * scale_array[rand_idx_size],
2 * math.sqrt(wid * hei))
scale_choose = random.uniform(min_resize_val, max_resize_val)
else:
min_resize_val = scale_array[rand_idx_size] / 2.0
max_resize_val = 2.0 * scale_array[rand_idx_size]
scale_choose = random.uniform(min_resize_val, max_resize_val)
sample_bbox_size = wid * resize_width / scale_choose
w_off_orig = 0.0
h_off_orig = 0.0
if sample_bbox_size < max(image_height, image_width):
if wid <= sample_bbox_size:
w_off_orig = np.random.uniform(xmin + wid - sample_bbox_size,
xmin)
else:
w_off_orig = np.random.uniform(xmin,
xmin + wid - sample_bbox_size)
if hei <= sample_bbox_size:
h_off_orig = np.random.uniform(ymin + hei - sample_bbox_size,
ymin)
else:
h_off_orig = np.random.uniform(ymin,
ymin + hei - sample_bbox_size)
else:
w_off_orig = np.random.uniform(image_width - sample_bbox_size, 0.0)
h_off_orig = np.random.uniform(image_height - sample_bbox_size,
0.0)
w_off_orig = math.floor(w_off_orig)
h_off_orig = math.floor(h_off_orig)
# Figure out top left coordinates.
w_off = float(w_off_orig / image_width)
h_off = float(h_off_orig / image_height)
sampled_bbox = [
w_off, h_off, w_off + float(sample_bbox_size / image_width),
h_off + float(sample_bbox_size / image_height)
]
return sampled_bbox
else:
return 0
def jaccard_overlap(sample_bbox, object_bbox):
if sample_bbox[0] >= object_bbox[2] or \
sample_bbox[2] <= object_bbox[0] or \
sample_bbox[1] >= object_bbox[3] or \
sample_bbox[3] <= object_bbox[1]:
return 0
intersect_xmin = max(sample_bbox[0], object_bbox[0])
intersect_ymin = max(sample_bbox[1], object_bbox[1])
intersect_xmax = min(sample_bbox[2], object_bbox[2])
intersect_ymax = min(sample_bbox[3], object_bbox[3])
intersect_size = (intersect_xmax - intersect_xmin) * (
intersect_ymax - intersect_ymin)
sample_bbox_size = bbox_area(sample_bbox)
object_bbox_size = bbox_area(object_bbox)
overlap = intersect_size / (
sample_bbox_size + object_bbox_size - intersect_size)
return overlap
def intersect_bbox(bbox1, bbox2):
if bbox2[0] > bbox1[2] or bbox2[2] < bbox1[0] or \
bbox2[1] > bbox1[3] or bbox2[3] < bbox1[1]:
intersection_box = [0.0, 0.0, 0.0, 0.0]
else:
intersection_box = [
max(bbox1[0], bbox2[0]), max(bbox1[1], bbox2[1]),
min(bbox1[2], bbox2[2]), min(bbox1[3], bbox2[3])
]
return intersection_box
def bbox_coverage(bbox1, bbox2):
inter_box = intersect_bbox(bbox1, bbox2)
intersect_size = bbox_area(inter_box)
if intersect_size > 0:
bbox1_size = bbox_area(bbox1)
return intersect_size / bbox1_size
else:
return 0.
def satisfy_sample_constraint(sampler,
sample_bbox,
gt_bboxes,
satisfy_all=False):
if sampler[6] == 0 and sampler[7] == 0:
return True
satisfied = []
for i in range(len(gt_bboxes)):
object_bbox = [
gt_bboxes[i][0], gt_bboxes[i][1], gt_bboxes[i][2], gt_bboxes[i][3]
]
overlap = jaccard_overlap(sample_bbox, object_bbox)
if sampler[6] != 0 and \
overlap < sampler[6]:
satisfied.append(False)
continue
if sampler[7] != 0 and \
overlap > sampler[7]:
satisfied.append(False)
continue
satisfied.append(True)
if not satisfy_all:
return True
if satisfy_all:
return np.all(satisfied)
else:
return False
def satisfy_sample_constraint_coverage(sampler, sample_bbox, gt_bboxes):
if sampler[6] == 0 and sampler[7] == 0:
has_jaccard_overlap = False
else:
has_jaccard_overlap = True
if sampler[8] == 0 and sampler[9] == 0:
has_object_coverage = False
else:
has_object_coverage = True
if not has_jaccard_overlap and not has_object_coverage:
return True
found = False
for i in range(len(gt_bboxes)):
object_bbox = [
gt_bboxes[i][0], gt_bboxes[i][1], gt_bboxes[i][2], gt_bboxes[i][3]
]
if has_jaccard_overlap:
overlap = jaccard_overlap(sample_bbox, object_bbox)
if sampler[6] != 0 and \
overlap < sampler[6]:
continue
if sampler[7] != 0 and \
overlap > sampler[7]:
continue
found = True
if has_object_coverage:
object_coverage = bbox_coverage(object_bbox, sample_bbox)
if sampler[8] != 0 and \
object_coverage < sampler[8]:
continue
if sampler[9] != 0 and \
object_coverage > sampler[9]:
continue
found = True
if found:
return True
return found
def crop_image_sampling(img, sample_bbox, image_width, image_height,
target_size):
# no clipping here
xmin = int(sample_bbox[0] * image_width)
xmax = int(sample_bbox[2] * image_width)
ymin = int(sample_bbox[1] * image_height)
ymax = int(sample_bbox[3] * image_height)
w_off = xmin
h_off = ymin
width = xmax - xmin
height = ymax - ymin
cross_xmin = max(0.0, float(w_off))
cross_ymin = max(0.0, float(h_off))
cross_xmax = min(float(w_off + width - 1.0), float(image_width))
cross_ymax = min(float(h_off + height - 1.0), float(image_height))
cross_width = cross_xmax - cross_xmin
cross_height = cross_ymax - cross_ymin
roi_xmin = 0 if w_off >= 0 else abs(w_off)
roi_ymin = 0 if h_off >= 0 else abs(h_off)
roi_width = cross_width
roi_height = cross_height
roi_y1 = int(roi_ymin)
roi_y2 = int(roi_ymin + roi_height)
roi_x1 = int(roi_xmin)
roi_x2 = int(roi_xmin + roi_width)
cross_y1 = int(cross_ymin)
cross_y2 = int(cross_ymin + cross_height)
cross_x1 = int(cross_xmin)
cross_x2 = int(cross_xmin + cross_width)
sample_img = np.zeros((height, width, 3))
sample_img[roi_y1: roi_y2, roi_x1: roi_x2] = \
img[cross_y1: cross_y2, cross_x1: cross_x2]
sample_img = cv2.resize(
sample_img, (target_size, target_size), interpolation=cv2.INTER_AREA)
return sample_img
def is_poly(segm):
assert isinstance(segm, (list, dict)), \
"Invalid segm type: {}".format(type(segm))
return isinstance(segm, list)
def gaussian_radius(bbox_size, min_overlap):
height, width = bbox_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1**2 - 4 * a1 * c1)
radius1 = (b1 + sq1) / (2 * a1)
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2**2 - 4 * a2 * c2)
radius2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3**2 - 4 * a3 * c3)
radius3 = (b3 + sq3) / 2
return min(radius1, radius2, radius3)
def draw_gaussian(heatmap, center, radius, k=1, delte=6):
diameter = 2 * radius + 1
sigma = diameter / delte
gaussian = gaussian2D((diameter, diameter), sigma_x=sigma, sigma_y=sigma)
x, y = center
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:
radius + right]
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
def gaussian2D(shape, sigma_x=1, sigma_y=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x / (2 * sigma_x * sigma_x) + y * y / (2 * sigma_y *
sigma_y)))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian(heatmap, center, radius, k=1):
"""
draw_umich_gaussian, refer to https://github.com/xingyizhou/CenterNet/blob/master/src/lib/utils/image.py#L126
"""
diameter = 2 * radius + 1
gaussian = gaussian2D(
(diameter, diameter), sigma_x=diameter / 6, sigma_y=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:
radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def get_border(border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
|
the-stack_0_10715 | from typing import Union, List
class DaysOfWeek:
"""An object that stores a boolean value for each day of the week.
It can read or produce a one byte code compatible with what AlarmClock
uses.
"""
days = {
'Monday': 1,
'Tuesday': 2,
'Wednesday': 3,
'Thursday': 4,
'Friday': 5,
'Saturday': 6,
'Sunday': 7,
}
def __init__(self, code: int = 0):
"""Initialize the DaysOfWeek object with specified code or 0x00.
Code is a single byte binary representation of the object.
0x00 means all stored values are False.
"""
# Filter out bit 0. It has no meaning and should always be zero.
self.code = code & 0xFE
@classmethod
def from_list(cls, li: Union[List[str], List[int]]):
"""Create an instance from a list of str names or numbers of days."""
dow = cls()
for day in li:
dow.set_day(day, True)
return dow
def get_day(self, day: Union[str, int]) -> bool:
"""Get the boolean value for a single day of the week."""
if isinstance(day, str):
if day not in self.days:
raise TypeError(f'unknown day: {repr(day)}')
day = self.days[day]
if day < 1 or day > 7:
raise ValueError(f"{day} is not a valid day of the week")
return self.code & (2**day) > 0
def set_day(self, day: Union[str, int], value: bool) -> None:
"""Set the boolean value for a single day of the week."""
if isinstance(day, str):
if day not in self.days:
raise TypeError(f'unknown day: {repr(day)}')
day = self.days[day]
if day < 1 or day > 7:
raise ValueError(f"{day} is not a valid day of the week")
if value:
self.code |= (2**day)
else:
self.code &= ~(2**day)
Monday = property(lambda self: self.get_day('Monday'),
lambda self, value: self.set_day('Monday', value))
Tuesday = property(lambda self: self.get_day('Tuesday'),
lambda self, value: self.set_day('Tuesday', value))
Wednesday = property(lambda self: self.get_day('Wednesday'),
lambda self, value: self.set_day('Wednesday', value))
Thursday = property(lambda self: self.get_day('Thursday'),
lambda self, value: self.set_day('Thursday', value))
Friday = property(lambda self: self.get_day('Friday'),
lambda self, value: self.set_day('Friday', value))
Saturday = property(lambda self: self.get_day('Saturday'),
lambda self, value: self.set_day('Saturday', value))
Sunday = property(lambda self: self.get_day('Sunday'),
lambda self, value: self.set_day('Sunday', value))
@property
def active_days(self) -> List[str]:
"""Get an array of days of the week for which the stored value is True.
Names of the days of the week are returned as strings with the first
letter capitalized.
"""
return [day for day in self.days if self.get_day(day)]
def __str__(self) -> str:
"""Get all days for which the stored value is True joined with ', '."""
return ', '.join(self.active_days)
def __repr__(self) -> str:
return f'{self.__class__.__name__}({repr(self.code)})'
def __eq__(self, other):
return self.code == other.code
|
the-stack_0_10717 | """Support for wired switches attached to a Konnected device."""
import logging
from homeassistant.components.konnected import (
DOMAIN as KONNECTED_DOMAIN, PIN_TO_ZONE, CONF_ACTIVATION, CONF_MOMENTARY,
CONF_PAUSE, CONF_REPEAT, STATE_LOW, STATE_HIGH)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import (
ATTR_STATE, CONF_DEVICES, CONF_NAME, CONF_PIN, CONF_SWITCHES)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['konnected']
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set switches attached to a Konnected device."""
if discovery_info is None:
return
data = hass.data[KONNECTED_DOMAIN]
device_id = discovery_info['device_id']
switches = [
KonnectedSwitch(device_id, pin_data.get(CONF_PIN), pin_data)
for pin_data in data[CONF_DEVICES][device_id][CONF_SWITCHES]]
async_add_entities(switches)
class KonnectedSwitch(ToggleEntity):
"""Representation of a Konnected switch."""
def __init__(self, device_id, pin_num, data):
"""Initialize the Konnected switch."""
self._data = data
self._device_id = device_id
self._pin_num = pin_num
self._activation = self._data.get(CONF_ACTIVATION, STATE_HIGH)
self._momentary = self._data.get(CONF_MOMENTARY)
self._pause = self._data.get(CONF_PAUSE)
self._repeat = self._data.get(CONF_REPEAT)
self._state = self._boolean_state(self._data.get(ATTR_STATE))
self._unique_id = '{}-{}'.format(device_id, PIN_TO_ZONE[pin_num])
self._name = self._data.get(CONF_NAME)
@property
def unique_id(self) -> str:
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return the status of the sensor."""
return self._state
@property
def client(self):
"""Return the Konnected HTTP client."""
return \
self.hass.data[KONNECTED_DOMAIN][CONF_DEVICES][self._device_id].\
get('client')
def turn_on(self, **kwargs):
"""Send a command to turn on the switch."""
resp = self.client.put_device(
self._pin_num,
int(self._activation == STATE_HIGH),
self._momentary,
self._repeat,
self._pause
)
if resp.get(ATTR_STATE) is not None:
self._set_state(True)
if self._momentary and resp.get(ATTR_STATE) != -1:
# Immediately set the state back off for momentary switches
self._set_state(False)
def turn_off(self, **kwargs):
"""Send a command to turn off the switch."""
resp = self.client.put_device(
self._pin_num, int(self._activation == STATE_LOW))
if resp.get(ATTR_STATE) is not None:
self._set_state(self._boolean_state(resp.get(ATTR_STATE)))
def _boolean_state(self, int_state):
if int_state is None:
return False
if int_state == 0:
return self._activation == STATE_LOW
if int_state == 1:
return self._activation == STATE_HIGH
def _set_state(self, state):
self._state = state
self.schedule_update_ha_state()
_LOGGER.debug('Setting status of %s actuator pin %s to %s',
self._device_id, self.name, state)
async def async_added_to_hass(self):
"""Store entity_id."""
self._data['entity_id'] = self.entity_id
|
the-stack_0_10718 | # coding: utf-8
import pprint
import re
import six
class VideoContrast:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'execution_order': 'int',
'contrast': 'str',
'brightness': 'str'
}
attribute_map = {
'name': 'name',
'execution_order': 'execution_order',
'contrast': 'contrast',
'brightness': 'brightness'
}
def __init__(self, name=None, execution_order=None, contrast=None, brightness=None):
"""VideoContrast - a model defined in huaweicloud sdk"""
self._name = None
self._execution_order = None
self._contrast = None
self._brightness = None
self.discriminator = None
if name is not None:
self.name = name
if execution_order is not None:
self.execution_order = execution_order
if contrast is not None:
self.contrast = contrast
if brightness is not None:
self.brightness = brightness
@property
def name(self):
"""Gets the name of this VideoContrast.
对比度算法名称\"hw-contrast\"。
:return: The name of this VideoContrast.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this VideoContrast.
对比度算法名称\"hw-contrast\"。
:param name: The name of this VideoContrast.
:type: str
"""
self._name = name
@property
def execution_order(self):
"""Gets the execution_order of this VideoContrast.
1 表示视频处理时第一个执行,2表示第二个执行,以此类推;除不执行,各视频处理算法的执行次序不可相同。
:return: The execution_order of this VideoContrast.
:rtype: int
"""
return self._execution_order
@execution_order.setter
def execution_order(self, execution_order):
"""Sets the execution_order of this VideoContrast.
1 表示视频处理时第一个执行,2表示第二个执行,以此类推;除不执行,各视频处理算法的执行次序不可相同。
:param execution_order: The execution_order of this VideoContrast.
:type: int
"""
self._execution_order = execution_order
@property
def contrast(self):
"""Gets the contrast of this VideoContrast.
对比度调节的程度, 值越大, 对比度越高。
:return: The contrast of this VideoContrast.
:rtype: str
"""
return self._contrast
@contrast.setter
def contrast(self, contrast):
"""Sets the contrast of this VideoContrast.
对比度调节的程度, 值越大, 对比度越高。
:param contrast: The contrast of this VideoContrast.
:type: str
"""
self._contrast = contrast
@property
def brightness(self):
"""Gets the brightness of this VideoContrast.
1 表示视频处理时第一个执行,2表示第二个执行,以此类推;除不执行,各视频处理算法的执行次序不可相同。
:return: The brightness of this VideoContrast.
:rtype: str
"""
return self._brightness
@brightness.setter
def brightness(self, brightness):
"""Sets the brightness of this VideoContrast.
1 表示视频处理时第一个执行,2表示第二个执行,以此类推;除不执行,各视频处理算法的执行次序不可相同。
:param brightness: The brightness of this VideoContrast.
:type: str
"""
self._brightness = brightness
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VideoContrast):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_10720 | import os
import lit.util # pylint: disable=import-error
import libcxx.test.config
import libcxx.test.target_info
import libcxx.android.build
import libcxx.ndk.test.format
class AndroidTargetInfo(libcxx.test.target_info.DefaultTargetInfo):
def platform(self):
return 'android'
def system(self):
raise NotImplementedError
def add_cxx_compile_flags(self, flags):
flags.extend(['-D__STDC_FORMAT_MACROS'])
def platform_ver(self):
raise NotImplementedError
def platform_name(self):
raise NotImplementedError
def supports_locale(self, loc):
raise NotImplementedError
class Configuration(libcxx.test.config.Configuration):
def __init__(self, lit_config, config):
super(Configuration, self).__init__(lit_config, config)
self.cxx_under_test = None
self.build_cmds_dir = None
self.cxx_template = None
self.link_template = None
self.with_availability = False
def configure(self):
self.configure_target_info()
self.configure_cxx()
self.configure_triple()
self.configure_src_root()
self.configure_obj_root()
self.configure_cxx_stdlib_under_test()
self.configure_cxx_library_root()
self.configure_compile_flags()
self.configure_link_flags()
self.configure_features()
def configure_target_info(self):
self.target_info = AndroidTargetInfo(self)
def configure_compile_flags(self):
super(Configuration, self).configure_compile_flags()
self.cxx.flags.append('-stdlib=libc++')
arch = self.get_lit_conf('arch')
if arch == 'arm':
self.cxx.flags.extend([
'-march=armv7-a',
'-mfloat-abi=softfp',
'-mfpu=vfpv3-d16',
'-mthumb',
])
def configure_link_flags(self):
triple = self.get_lit_conf('target_triple')
if triple.startswith('arm-') or triple.startswith('armv7-'):
self.cxx.link_flags.append('-Wl,--exclude-libs,libunwind.a')
self.cxx.link_flags.append('-lcompiler_rt-extras')
self.cxx.link_flags.append(
'-Wl,--exclude-libs,libcompiler_rt-extras.a')
self.cxx.link_flags.append('-Wl,--exclude-libs,libatomic.a')
self.cxx.link_flags.append('-Wl,--exclude-libs,libgcc.a')
self.cxx.link_flags.append('-pie')
def configure_features(self):
self.config.available_features.add(self.get_lit_conf('std'))
self.config.available_features.add('long_tests')
def get_test_format(self):
# Note that we require that the caller has cleaned this directory,
# ensured its existence, and copied libc++_shared.so into it.
tmp_dir = getattr(self.config, 'device_dir', '/data/local/tmp/libcxx')
build_only = self.get_lit_conf('build_only', False)
build_dir = self.get_lit_conf('build_dir')
return libcxx.ndk.test.format.TestFormat(
self.cxx,
self.libcxx_src_root,
self.libcxx_obj_root,
build_dir,
tmp_dir,
getattr(self.config, 'timeout', '300'),
exec_env={'LD_LIBRARY_PATH': tmp_dir},
build_only=build_only)
|
the-stack_0_10722 | # -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import functools
import os
import os.path
import numpy as np
from blueoil import data_processor
from blueoil.utils.image import load_image
from blueoil.datasets.base import Base
class Ilsvrc2012(Base):
extend_dir = "ILSVRC2012"
# classes = [str(n) for n in range(0, 1000)]
num_classes = 1000
# `test` subsets don't have ground truth.
available_subsets = ["train", "validation"]
def __init__(
self,
*args,
**kwargs
):
super().__init__(*args, **kwargs,)
self.dirs = {
"train": os.path.join(self.data_dir, "train"),
"validation": os.path.join(self.data_dir, "val"),
"test": os.path.join(self.data_dir, "test"),
}
self.texts = {
"train": os.path.join(self.data_dir, "train.txt"),
"validation": os.path.join(self.data_dir, "val.txt"),
"test": os.path.join(self.data_dir, "test.txt"),
}
self._init_files_and_annotations()
@property
@functools.lru_cache(maxsize=None)
def classes(self):
# wget https://raw.githubusercontent.com/Lasagne/Recipes/master/examples/resnet50/imagenet_classes.txt
with open(os.path.join(self.data_dir, 'imagenet_classes.txt')) as f:
return [line.rstrip('\n') for line in f]
@property
def num_per_epoch(self):
files, _ = self._files_and_annotations()
return len(files)
def _init_files_and_annotations(self):
self.files, self.annotations = self._files_and_annotations()
@functools.lru_cache(maxsize=None)
def _files_and_annotations(self):
txt_file = self.texts[self.subset]
files, labels = list(), list()
with open(txt_file) as f:
for line in f:
items = line.split()
files.append(items[0])
labels.append(int(items[1]))
files = [os.path.join(self.dirs[self.subset], filename) for filename in files]
return files, labels
def __getitem__(self, i):
filename = self.files[i]
image = load_image(filename)
label = data_processor.binarize(self.annotations[i], self.num_classes)
label = np.reshape(label, (self.num_classes))
return (image, label)
def __len__(self):
return self.num_per_epoch
|
the-stack_0_10728 | # 5.5.1 Storing High Scores for a Game_Optimistic version
class Scoreboard():
"""Fixed-length sequence of high scores in nondecreasing order."""
class _GameEntry:
"""Nonpublic class for storing entry.
Represents one entry of a list of high scores."""
__slots__ = '_name','_score'
def __init__(self,name,score):
self._name = name
self._score = score
def get_name(self):
return self._name
def get_score(self):
return self._score
def __str__(self):
return '({0}, {1})'.format(self._name,self._score) # e.g., 'Bob,98'
def __init__(self,capacity = 10):
"""Initialize scoreboard with given maximum capacity.
All entries are initially None.
"""
self._board = [None] * capacity # reserve space for future scores
self._n = 0 # number of actual entries
def __getitem__(self,k):
"""Return entry at index k."""
return self._board[k]
def __str__(self):
"""Return string representation of the high score list."""
return '\n'.join(str(self._board[j]) for j in range(self._n))
def add(self,name,score):
"""Consider adding entry to high scores."""
entry = self._GameEntry(name, score)
score = entry.get_score()
# Does new entry qualify as a high score?
# answer is yes if board not full or score is higher than last entry
good = self._n < len(self._board) or score > self._board[-1].get_score()
if good:
if self._n < len(self._board): # no score drops from list
self._n += 1 # so overall number increases
# shift lower scores rightward to make room for new entry
j = self._n - 1
while j > 0 and self._board[j-1].get_score() < score:
self._board[j] = self._board[j-1] # shift entry from j-1 to j
j -= 1 # and decrement j
self._board[j] = entry # when done, add new entry
#----------------------------- my main function -----------------------------
a = Scoreboard(3)
a.add('LiuPeng',93)
a.add('zhangDi',98)
a.add('LiYue',22)
print('---------- first record ----------')
print(a.__str__())
print('---------- get item ----------')
print(a.__getitem__(2))
a.add('Torvalds',100)
print('---------- second record ----------')
print(a.__str__()) |
the-stack_0_10729 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib as mpl
import numpy as np
import scipy as sc
NUMPY_VERSION = np.__version__
MATPLOTLIB_VERSION = mpl.__version__
SCIPY_VERSION = sc.__version__
message = f"""La versión de NumPy es {NUMPY_VERSION}.
La versión de Matplotlib es {MATPLOTLIB_VERSION}.
La versión de SciPy es {SCIPY_VERSION}."""
if __name__ == "__main__":
print(message)
|
the-stack_0_10733 | from universal_computation.experiment import run_experiment
if __name__ == '__main__':
experiment_name = 'fpt'
experiment_params = dict(
task='cifar100',
n=1000, # ignored if not a bit task
num_patterns=5, # ignored if not a bit task
patch_size=16,
model_name='gpt2',
pretrained=True, # if vit this is forced to true, if lstm this is forced to false
freeze_trans=True, # if False, we don't check arguments other than in and out
freeze_in=False,
freeze_pos=False,
freeze_ln=False,
freeze_attn=True,
freeze_ff=True,
freeze_out=False,
in_layer_sizes=None, # not in paper, but can specify layer sizes for an MLP,
out_layer_sizes=None, # ex. [32, 32] creates a 2-layer MLP with dimension 32
learning_rate=1e-3,
batch_size=4,
dropout=0.1,
orth_gain=1.41, # orthogonal initialization of input layer
)
exp_args = dict(
num_iters=10,
device="cpu",
)
run_experiment(experiment_name, experiment_params, exp_args)
|
the-stack_0_10734 |
from typing import Optional, Tuple
from cadquery import Workplane
from paramak import Shape
class RotateCircleShape(Shape):
"""Rotates a circular 3d CadQuery solid from a central point and a radius
Args:
radius: radius of the shape
rotation_angle: The rotation_angle to use when revolving the solid
(degrees). Defaults to 360.0.
"""
def __init__(
self,
radius: float,
rotation_angle: Optional[float] = 360.0,
color: Optional[Tuple[float, float, float, Optional[float]]] = (1., 1., 0.6),
**kwargs
):
super().__init__(
color=color,
**kwargs
)
self.radius = radius
self.rotation_angle = rotation_angle
@property
def rotation_angle(self):
return self._rotation_angle
@rotation_angle.setter
def rotation_angle(self, value):
self._rotation_angle = value
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, value):
self._radius = value
def create_solid(self):
"""Creates a rotated 3d solid using points with circular edges.
Returns:
A CadQuery solid: A 3D solid volume
"""
wire = (
Workplane(self.workplane)
.moveTo(self.points[0][0], self.points[0][1])
.circle(self.radius)
)
self.wire = wire
solid = wire.revolve(self.rotation_angle)
solid = self.rotate_solid(solid)
solid = self.perform_boolean_operations(solid)
self.solid = solid
return solid
|
the-stack_0_10735 | from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
from setuptools.command.sdist import sdist as _sdist
from distutils.command.clean import clean as _clean
from distutils.errors import CompileError
from warnings import warn
import os
import sys
from glob import glob
import tarfile
import shutil
import requests
from urllib.request import urlretrieve
# use cython if we can import it successfully
try:
from Cython.Distutils import build_ext as _build_ext
except ImportError:
use_cython = False
else:
use_cython = True
# wrap the build_ext command to handle numpy bootstrap and compilation errors
class build_ext(_build_ext):
# see http://stackoverflow.com/q/19919905 for explanation
def finalize_options(self):
_build_ext.finalize_options(self)
__builtins__.__NUMPY_SETUP__ = False
import numpy as np
self.include_dirs.append(np.get_include())
# if extension modules fail to build, keep going anyway
def run(self):
try:
_build_ext.run(self)
except CompileError:
warn('Failed to build extension modules')
import traceback
print(traceback.format_exc(), file=sys.stderr)
# wrap the sdist command to try to generate cython sources
class sdist(_sdist):
def run(self):
try:
from Cython.Build import cythonize
cythonize(os.path.join('pyhlm','**','*.pyx'), compiler_directives={'language_level' : "3"})
except:
warn('Failed to generate extension files from Cython sources')
finally:
_sdist.run(self)
# wrap the clean command to remove object files
class clean(_clean):
def run(self):
try:
for f in glob(os.path.join('pyhlm','**','*.so')): # not recursive before Python 3.5
os.remove(f)
except:
warn('Failed to remove all object files')
finally:
_clean.run(self)
# make dependency directory
if not os.path.exists('deps'):
os.mkdir('deps')
# download Eigen if we don't have it in deps
eigenurl = 'https://gitlab.com/libeigen/eigen/-/archive/3.3.7/eigen-3.3.7.tar.gz'
eigentarpath = os.path.join('deps', 'Eigen.tar.gz')
eigenpath = os.path.join('deps', 'Eigen')
if not os.path.exists(eigenpath):
print('Downloading Eigen...')
r = requests.get(eigenurl)
with open(eigentarpath, 'wb') as f:
f.write(r.content)
with tarfile.open(eigentarpath, 'r') as tar:
tar.extractall('deps')
thedir = glob(os.path.join('deps', 'eigen-*'))[0]
shutil.move(os.path.join(thedir, 'Eigen'), eigenpath)
print('...done!')
# make a list of extension modules
extension_pathspec = os.path.join('pyhlm','**','*.pyx') # not recursive before Python 3.5
paths = [os.path.splitext(fp)[0] for fp in glob(extension_pathspec)]
names = ['.'.join(os.path.split(p)) for p in paths]
with open("requirements.txt", "r") as f:
requirements = list(f.readlines())
ext_modules = [
Extension(
name, sources=[path + '.cpp'],
include_dirs=['deps'],
extra_compile_args=['-O3','-std=c++11','-DNDEBUG','-w','-DHLM_TEMPS_ON_HEAP'])
for name, path in zip(names,paths)]
# if using cython, rebuild the extension files from the .pyx sources
if use_cython:
from Cython.Build import cythonize
try:
ext_modules = cythonize(extension_pathspec, compiler_directives={'language_level' : "3"})
except:
warn('Failed to generate extension module code from Cython files')
# put it all together with a call to setup()
setup(name='pyhlm',
version='1.0.3',
description="Bayesian inference in HLMs",
author='Ryo Ozaki',
author_email='[email protected]',
url="https://github.com/RyoOzaki/npbdaa",
license='MIT',
packages=['pyhlm', 'pyhlm.internals', 'pyhlm.util'],
platforms='ALL',
keywords=['bayesian', 'inference', 'mcmc', 'time-series', 'monte-carlo',
'double articulation', 'hierarchical Dirichlet process hidden language model'],
install_requires=requirements,
setup_requires=['numpy', "future", "six"],
ext_modules=ext_modules,
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: C++'],
cmdclass={'build_ext': build_ext, 'sdist': sdist, 'clean': clean})
|
the-stack_0_10736 | """
===============================================================================
Phosphene drawings from Beyeler et al. (2019)
===============================================================================
This example shows how to use the Beyeler et al. (2019) dataset.
[Beyeler2019]_ asked Argus I/II users to draw what they see in response to
single-electrode stimulation.
.. important ::
For this dataset you will need to install both
`Pandas <https://pandas.pydata.org>`_ (``pip install pandas``) and
`HDF4 for Python <https://www.h5py.org>`_ (``pip install h5py``).
Loading the dataset
-------------------
Due to its size (263 MB), the dataset is not included with pulse2percept, but
can be downloaded from the Open Science Framework (OSF).
By default, the dataset will be stored in a local directory
‘~/pulse2percept_data/’ within your user directory (but a different path can be
specified).
This way, the datasets is only downloaded once, and future calls to the fetch
function will load the dataset from your local copy.
The data itself will be provided as a Pandas ``DataFrame``:
"""
# sphinx_gallery_thumbnail_number = 2
from pulse2percept.datasets import fetch_beyeler2019
data = fetch_beyeler2019()
print(data)
###############################################################################
#
# Inspecting the DataFrame tells us that there are 400 phosphene drawings
# (the rows) each with 16 different attributes (the columns).
#
# These attributes include specifiers such as "subject", "electrode", and
# "image". We can print all column names using:
data.columns
###############################################################################
# .. note ::
#
# The meaning of all column names is explained in the docstring of
# the :py:func:`~pulse2percept.datasets.fetch_beyeler2019` function.
#
# For example, "subject" contains the different subject IDs used in the study:
data.subject.unique()
###############################################################################
# To select all drawings from Subject 2, we can index into the DataFrame as
# follows:
print(data[data.subject == 'S2'])
###############################################################################
# This leaves us with 110 rows, each of which correspond to one phosphene
# drawings from a number of different electrodes and trials.
#
# An alternative to indexing into the DataFrame is to load only a subset of
# the data:
print(fetch_beyeler2019(subjects='S2'))
###############################################################################
# Plotting the data
# -----------------
#
# Arguably the most important column is "image". This is the phosphene drawing
# obtained during a particular trial.
#
# Each phosphene drawing is a 2D black-and-white NumPy array, so we can just
# plot it using Matplotlib like any other image:
import matplotlib.pyplot as plt
plt.imshow(data.loc[0, 'image'], cmap='gray')
###############################################################################
# However, we might be more interested in seeing how phosphene shape differs
# for different electrodes.
# For this we can use :py:func:`~pulse2percept.viz.plot_argus_phosphenes` from
# the :py:mod:`~pulse2percept.viz` module.
# In addition to the ``data`` matrix, the function will also want an
# :py:class:`~pulse2percept.implants.ArgusII` object implanted at the correct
# location.
#
# Consulting [Beyeler2019]_ tells us that the prosthesis was roughly implanted
# in the following location:
from pulse2percept.implants import ArgusII
argus = ArgusII(x=-1331, y=-850, rot=-0.495, eye='RE')
###############################################################################
# (We also need to specify the dimensions of the screens that the subject used,
# expressed in degrees of visual angle, so that we can scale the phosphene
# drawing appropriately. This should really be part of the Beyeler dataset and
# will be fixed in a future version.
# For now, we add the necessary columns ourselves.)
import pandas as pd
data = fetch_beyeler2019(subjects='S2')
data['img_x_dva'] = pd.Series([(-30, 30)] * len(data), index=data.index)
data['img_y_dva'] = pd.Series([(-22.5, 22.5)] * len(data), index=data.index)
###############################################################################
# Passing both ``data`` and ``argus`` to
# :py:func:`~pulse2percept.viz.plot_argus_phosphenes` will then allow the
# function to overlay the phosphene drawings over a schematic of the implant.
# Here, phosphene drawings from different trials are averaged, and aligned with
# the center of the electrode that was used to obtain the drawing:
from pulse2percept.viz import plot_argus_phosphenes
plot_argus_phosphenes(data, argus)
###############################################################################
# Great! We have just reproduced a panel from Figure 2 in [Beyeler2019]_.
#
# As [Beyeler2019]_ went on to show, the orientation of these phosphenes is
# well aligned with the map of nerve fiber bundles (NFBs) in each subject's
# eye.
#
# To see how the phosphene drawings line up with the NFBs, we can also pass an
# :py:class:`~pulse2percept.models.AxonMapModel` to the function.
# Of course, we need to make sure that we use the correct dimensions. Subject
# S2 had their optic disc center located 14 deg nasally, 2.4 deg superior from
# the fovea:
from pulse2percept.models import AxonMapModel
model = AxonMapModel(loc_od=(14, 2.4))
plot_argus_phosphenes(data, argus, axon_map=model)
###############################################################################
# Analyzing phosphene shape
# -------------------------
#
# The phosphene drawings also come annotated with different shape descriptors:
# area, orientation, and elongation.
# Elongation is also called eccentricity in the computer vision literature,
# which is not to be confused with retinal eccentricity. It is simply a number
# between 0 and 1, where 0 corresponds to a circle and 1 corresponds to an
# infinitesimally thin line (note that the Methods section of [Beyeler2019]_
# got it wrong).
#
# [Beyeler2019]_ made the point that if each phosphene could be considered a
# pixel (or essentially a blob), as is so often assumed in the literature, then
# most phosphenes should have zero elongation.
#
# Instead, using Matplotlib's histogram function, we can convince ourselves
# that most phosphenes are in fact elongated:
data = fetch_beyeler2019()
data.eccentricity.plot(kind='hist')
plt.xlabel('phosphene elongation')
###############################################################################
# Phosphenes are not pixels!
# And we have just reproduced Fig. 3C of [Beyeler2019]_.
|
the-stack_0_10738 | # -*- coding: utf-8 -*-
r"""
Free Dendriform Algebras
AUTHORS:
Frédéric Chapoton (2017)
"""
# ****************************************************************************
# Copyright (C) 2010-2015 Frédéric Chapoton <[email protected]>,
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
# ****************************************************************************
from six import iteritems
from sage.categories.hopf_algebras import HopfAlgebras
from sage.combinat.free_module import CombinatorialFreeModule
from sage.combinat.words.alphabet import Alphabet
from sage.combinat.binary_tree import (BinaryTrees, BinaryTree,
LabelledBinaryTrees,
LabelledBinaryTree)
from sage.categories.pushout import (ConstructionFunctor,
CompositeConstructionFunctor,
IdentityConstructionFunctor)
from sage.categories.rings import Rings
from sage.categories.functor import Functor
from sage.misc.lazy_attribute import lazy_attribute
from sage.misc.cachefunc import cached_method
from sage.sets.family import Family
class FreeDendriformAlgebra(CombinatorialFreeModule):
r"""
The free dendriform algebra.
Dendriform algebras are associative algebras, where the associative
product `*` is decomposed as a sum of two binary operations
.. MATH::
x * y = x \succ y + x \prec y
that satisfy the axioms:
.. MATH::
(x \succ y) \prec z = x \succ (y \prec z),
.. MATH::
(x \prec y) \prec z = x \prec (y * z).
.. MATH::
(x * y) \succ z = x \succ (y \succ z).
The free Dendriform algebra on a given set `E` has an explicit
description using (planar) binary trees, just as the free
associative algebra can be described using words. The underlying
vector space has a basis indexed by finite binary trees endowed
with a map from their vertices to `E`. In this basis, the
associative product of two (decorated) binary trees `S * T` is the
sum over all possible ways of identifying (glueing) the rightmost path in
`S` and the leftmost path in `T`.
The decomposition of the associative product as the sum of two
binary operations `\succ` and
`\prec` is made by separating the terms according to the origin of
the root vertex. For `x \succ y`, one keeps the terms where the root
vertex comes from `y`, whereas for `x \prec y` one keeps the terms
where the root vertex comes from `x`.
The free dendriform algebra can also be considered as the free
algebra over the Dendriform operad.
.. NOTE::
The usual binary operator `*` is used for the
associative product.
EXAMPLES::
sage: F = algebras.FreeDendriform(ZZ, 'xyz')
sage: x,y,z = F.gens()
sage: (x * y) * z
B[x[., y[., z[., .]]]] + B[x[., z[y[., .], .]]] + B[y[x[., .], z[., .]]] + B[z[x[., y[., .]], .]] + B[z[y[x[., .], .], .]]
The free dendriform algebra is associative::
sage: x * (y * z) == (x * y) * z
True
The associative product decomposes in two parts::
sage: x * y == F.prec(x, y) + F.succ(x, y)
True
The axioms hold::
sage: F.prec(F.succ(x, y), z) == F.succ(x, F.prec(y, z))
True
sage: F.prec(F.prec(x, y), z) == F.prec(x, y * z)
True
sage: F.succ(x * y, z) == F.succ(x, F.succ(y, z))
True
When there is only one generator, unlabelled trees are used instead::
sage: F1 = algebras.FreeDendriform(QQ)
sage: w = F1.gen(0); w
B[[., .]]
sage: w * w * w
B[[., [., [., .]]]] + B[[., [[., .], .]]] + B[[[., .], [., .]]] + B[[[., [., .]], .]] + B[[[[., .], .], .]]
REFERENCES:
- [LodayRonco]_
"""
@staticmethod
def __classcall_private__(cls, R, names=None):
"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: F1 = algebras.FreeDendriform(QQ, 'xyz')
sage: F2 = algebras.FreeDendriform(QQ, ['x','y','z'])
sage: F3 = algebras.FreeDendriform(QQ, Alphabet('xyz'))
sage: F1 is F2 and F1 is F3
True
"""
if names is not None:
if ',' in names:
names = [u for u in names if u != ',']
names = Alphabet(names)
if R not in Rings():
raise TypeError("argument R must be a ring")
return super(FreeDendriformAlgebra, cls).__classcall__(cls, R,
names)
def __init__(self, R, names=None):
"""
Initialize ``self``.
TESTS::
sage: A = algebras.FreeDendriform(QQ, '@'); A
Free Dendriform algebra on one generator ['@'] over Rational Field
sage: TestSuite(A).run() # long time (3s)
sage: F = algebras.FreeDendriform(QQ, 'xy')
sage: TestSuite(F).run() # long time (3s)
"""
if names is None:
Trees = BinaryTrees()
key = BinaryTree._sort_key
self._alphabet = Alphabet(['o'])
else:
Trees = LabelledBinaryTrees()
key = LabelledBinaryTree._sort_key
self._alphabet = names
# Here one would need LabelledBinaryTrees(names)
# so that one can restrict the labels to some fixed set
cat = HopfAlgebras(R).WithBasis().Graded().Connected()
CombinatorialFreeModule.__init__(self, R, Trees,
latex_prefix="",
sorting_key=key,
category=cat)
def variable_names(self):
r"""
Return the names of the variables.
EXAMPLES::
sage: R = algebras.FreeDendriform(QQ, 'xy')
sage: R.variable_names()
{'x', 'y'}
"""
return self._alphabet
def _repr_(self):
"""
Return the string representation of ``self``.
EXAMPLES::
sage: algebras.FreeDendriform(QQ, '@') # indirect doctest
Free Dendriform algebra on one generator ['@'] over Rational Field
"""
n = self.algebra_generators().cardinality()
if n == 1:
gen = "one generator"
else:
gen = "{} generators".format(n)
s = "Free Dendriform algebra on {} {} over {}"
try:
return s.format(gen, self._alphabet.list(), self.base_ring())
except NotImplementedError:
return s.format(gen, self._alphabet, self.base_ring())
def gen(self, i):
r"""
Return the ``i``-th generator of the algebra.
INPUT:
- ``i`` -- an integer
EXAMPLES::
sage: F = algebras.FreeDendriform(ZZ, 'xyz')
sage: F.gen(0)
B[x[., .]]
sage: F.gen(4)
Traceback (most recent call last):
...
IndexError: argument i (= 4) must be between 0 and 2
"""
G = self.algebra_generators()
n = G.cardinality()
if i < 0 or not i < n:
m = "argument i (= {}) must be between 0 and {}".format(i, n - 1)
raise IndexError(m)
return G[G.keys().unrank(i)]
@cached_method
def algebra_generators(self):
r"""
Return the generators of this algebra.
These are the binary trees with just one vertex.
EXAMPLES::
sage: A = algebras.FreeDendriform(ZZ, 'fgh'); A
Free Dendriform algebra on 3 generators ['f', 'g', 'h']
over Integer Ring
sage: list(A.algebra_generators())
[B[f[., .]], B[g[., .]], B[h[., .]]]
sage: A = algebras.FreeDendriform(QQ, ['x1','x2'])
sage: list(A.algebra_generators())
[B[x1[., .]], B[x2[., .]]]
"""
Trees = self.basis().keys()
return Family(self._alphabet, lambda a: self.monomial(Trees([], a)))
def change_ring(self, R):
"""
Return the free dendriform algebra in the same variables over `R`.
INPUT:
- ``R`` -- a ring
EXAMPLES::
sage: A = algebras.FreeDendriform(ZZ, 'fgh')
sage: A.change_ring(QQ)
Free Dendriform algebra on 3 generators ['f', 'g', 'h'] over
Rational Field
"""
return FreeDendriformAlgebra(R, names=self.variable_names())
def gens(self):
"""
Return the generators of ``self`` (as an algebra).
EXAMPLES::
sage: A = algebras.FreeDendriform(ZZ, 'fgh')
sage: A.gens()
(B[f[., .]], B[g[., .]], B[h[., .]])
"""
return tuple(self.algebra_generators())
def degree_on_basis(self, t):
"""
Return the degree of a binary tree in the free Dendriform algebra.
This is the number of vertices.
EXAMPLES::
sage: A = algebras.FreeDendriform(QQ,'@')
sage: RT = A.basis().keys()
sage: u = RT([], '@')
sage: A.degree_on_basis(u.over(u))
2
"""
return t.node_number()
@cached_method
def an_element(self):
"""
Return an element of ``self``.
EXAMPLES::
sage: A = algebras.FreeDendriform(QQ, 'xy')
sage: A.an_element()
B[x[., .]] + 2*B[x[., x[., .]]] + 2*B[x[x[., .], .]]
"""
o = self.gen(0)
return o + 2 * o * o
def some_elements(self):
"""
Return some elements of the free dendriform algebra.
EXAMPLES::
sage: A = algebras.FreeDendriform(QQ)
sage: A.some_elements()
[B[.],
B[[., .]],
B[[., [., .]]] + B[[[., .], .]],
B[.] + B[[., [., .]]] + B[[[., .], .]]]
With several generators::
sage: A = algebras.FreeDendriform(QQ, 'xy')
sage: A.some_elements()
[B[.],
B[x[., .]],
B[x[., x[., .]]] + B[x[x[., .], .]],
B[.] + B[x[., x[., .]]] + B[x[x[., .], .]]]
"""
u = self.one()
o = self.gen(0)
x = o * o
y = u + x
return [u, o, x, y]
def one_basis(self):
"""
Return the index of the unit.
EXAMPLES::
sage: A = algebras.FreeDendriform(QQ, '@')
sage: A.one_basis()
.
sage: A = algebras.FreeDendriform(QQ, 'xy')
sage: A.one_basis()
.
"""
Trees = self.basis().keys()
return Trees(None)
def product_on_basis(self, x, y):
r"""
Return the `*` associative dendriform product of two trees.
This is the sum over all possible ways of identifying the
rightmost path in `x` and the leftmost path in `y`. Every term
corresponds to a shuffle of the vertices on the rightmost path
in `x` and the vertices on the leftmost path in `y`.
.. SEEALSO::
- :meth:`succ_product_on_basis`, :meth:`prec_product_on_basis`
EXAMPLES::
sage: A = algebras.FreeDendriform(QQ)
sage: RT = A.basis().keys()
sage: x = RT([])
sage: A.product_on_basis(x, x)
B[[., [., .]]] + B[[[., .], .]]
"""
return self.sum(self.basis()[u] for u in x.dendriform_shuffle(y))
def succ_product_on_basis(self, x, y):
r"""
Return the `\succ` dendriform product of two trees.
This is the sum over all possible ways to identify the rightmost path
in `x` and the leftmost path in `y`, with the additional condition
that the root vertex of the result comes from `y`.
The usual symbol for this operation is `\succ`.
.. SEEALSO::
- :meth:`product_on_basis`, :meth:`prec_product_on_basis`
EXAMPLES::
sage: A = algebras.FreeDendriform(QQ)
sage: RT = A.basis().keys()
sage: x = RT([])
sage: A.succ_product_on_basis(x, x)
B[[[., .], .]]
TESTS::
sage: u = A.one().support()[0]
sage: A.succ_product_on_basis(u, u)
Traceback (most recent call last):
...
ValueError: dendriform products | < | and | > | are not defined
"""
if y.is_empty():
if x.is_empty():
raise ValueError("dendriform products | < | and | > | are "
"not defined")
else:
return []
if x.is_empty():
return [y]
K = self.basis().keys()
if hasattr(y, 'label'):
return self.sum(self.basis()[K([u, y[1]], y.label())]
for u in x.dendriform_shuffle(y[0]))
return self.sum(self.basis()[K([u, y[1]])]
for u in x.dendriform_shuffle(y[0]))
@lazy_attribute
def succ(self):
r"""
Return the `\succ` dendriform product.
This is the sum over all possible ways of identifying the
rightmost path in `x` and the leftmost path in `y`, with the
additional condition that the root vertex of the result comes
from `y`.
The usual symbol for this operation is `\succ`.
.. SEEALSO::
:meth:`product`, :meth:`prec`, :meth:`over`, :meth:`under`
EXAMPLES::
sage: A = algebras.FreeDendriform(QQ)
sage: RT = A.basis().keys()
sage: x = A.gen(0)
sage: A.succ(x, x)
B[[[., .], .]]
"""
suc = self.succ_product_on_basis
return self._module_morphism(self._module_morphism(suc, position=0,
codomain=self),
position=1)
def prec_product_on_basis(self, x, y):
r"""
Return the `\prec` dendriform product of two trees.
This is the sum over all possible ways of identifying the
rightmost path in `x` and the leftmost path in `y`, with the
additional condition that the root vertex of the result comes
from `x`.
The usual symbol for this operation is `\prec`.
.. SEEALSO::
- :meth:`product_on_basis`, :meth:`succ_product_on_basis`
EXAMPLES::
sage: A = algebras.FreeDendriform(QQ)
sage: RT = A.basis().keys()
sage: x = RT([])
sage: A.prec_product_on_basis(x, x)
B[[., [., .]]]
TESTS::
sage: u = A.one().support()[0]
sage: A.prec_product_on_basis(u, u)
Traceback (most recent call last):
...
ValueError: dendriform products | < | and | > | are not defined
"""
if x.is_empty() and y.is_empty():
raise ValueError("dendriform products | < | and | > | are "
"not defined")
if x.is_empty():
return []
if y.is_empty():
return [x]
K = self.basis().keys()
if hasattr(y, 'label'):
return self.sum(self.basis()[K([x[0], u], x.label())]
for u in x[1].dendriform_shuffle(y))
return self.sum(self.basis()[K([x[0], u])]
for u in x[1].dendriform_shuffle(y))
@lazy_attribute
def prec(self):
r"""
Return the `\prec` dendriform product.
This is the sum over all possible ways to identify the rightmost path
in `x` and the leftmost path in `y`, with the additional condition
that the root vertex of the result comes from `x`.
The usual symbol for this operation is `\prec`.
.. SEEALSO::
:meth:`product`, :meth:`succ`, :meth:`over`, :meth:`under`
EXAMPLES::
sage: A = algebras.FreeDendriform(QQ)
sage: RT = A.basis().keys()
sage: x = A.gen(0)
sage: A.prec(x, x)
B[[., [., .]]]
"""
pre = self.prec_product_on_basis
return self._module_morphism(self._module_morphism(pre, position=0,
codomain=self),
position=1)
@lazy_attribute
def over(self):
r"""
Return the over product.
The over product `x/y` is the binary tree obtained by
grafting the root of `y` at the rightmost leaf of `x`.
The usual symbol for this operation is `/`.
.. SEEALSO::
:meth:`product`, :meth:`succ`, :meth:`prec`, :meth:`under`
EXAMPLES::
sage: A = algebras.FreeDendriform(QQ)
sage: RT = A.basis().keys()
sage: x = A.gen(0)
sage: A.over(x, x)
B[[., [., .]]]
"""
def ov(x, y):
return self._monomial(x.over(y))
return self._module_morphism(self._module_morphism(ov, position=0,
codomain=self),
position=1)
@lazy_attribute
def under(self):
r"""
Return the under product.
The over product `x \backslash y` is the binary tree obtained by
grafting the root of `x` at the leftmost leaf of `y`.
The usual symbol for this operation is `\backslash`.
.. SEEALSO::
:meth:`product`, :meth:`succ`, :meth:`prec`, :meth:`over`
EXAMPLES::
sage: A = algebras.FreeDendriform(QQ)
sage: RT = A.basis().keys()
sage: x = A.gen(0)
sage: A.under(x, x)
B[[[., .], .]]
"""
def und(x, y):
return self._monomial(x.under(y))
return self._module_morphism(self._module_morphism(und, position=0,
codomain=self),
position=1)
def coproduct_on_basis(self, x):
"""
Return the coproduct of a binary tree.
EXAMPLES::
sage: A = algebras.FreeDendriform(QQ)
sage: x = A.gen(0)
sage: ascii_art(A.coproduct(A.one())) # indirect doctest
1 # 1
sage: ascii_art(A.coproduct(x)) # indirect doctest
1 # B + B # 1
o o
sage: A = algebras.FreeDendriform(QQ, 'xyz')
sage: x, y, z = A.gens()
sage: w = A.under(z,A.over(x,y))
sage: A.coproduct(z)
B[.] # B[z[., .]] + B[z[., .]] # B[.]
sage: A.coproduct(w)
B[.] # B[x[z[., .], y[., .]]] + B[x[., .]] # B[z[., y[., .]]] +
B[x[., .]] # B[y[z[., .], .]] + B[x[., y[., .]]] # B[z[., .]] +
B[x[z[., .], .]] # B[y[., .]] + B[x[z[., .], y[., .]]] # B[.]
"""
B = self.basis()
Trees = B.keys()
if not x.node_number():
return self.one().tensor(self.one())
L, R = list(x)
try:
root = x.label()
except AttributeError:
root = '@'
resu = self.one().tensor(self.monomial(x))
resu += sum(cL * cR *
self.monomial(Trees([LL[0], RR[0]], root)).tensor(
self.monomial(LL[1]) * self.monomial(RR[1]))
for LL, cL in self.coproduct_on_basis(L)
for RR, cR in self.coproduct_on_basis(R))
return resu
# after this line : coercion
def _element_constructor_(self, x):
r"""
Convert ``x`` into ``self``.
EXAMPLES::
sage: R = algebras.FreeDendriform(QQ, 'xy')
sage: x, y = R.gens()
sage: R(x)
B[x[., .]]
sage: R(x+4*y)
B[x[., .]] + 4*B[y[., .]]
sage: Trees = R.basis().keys()
sage: R(Trees([],'x'))
B[x[., .]]
sage: D = algebras.FreeDendriform(ZZ, 'xy')
sage: X, Y = D.gens()
sage: R(X-Y).parent()
Free Dendriform algebra on 2 generators ['x', 'y'] over Rational Field
"""
if x in self.basis().keys():
return self.monomial(x)
try:
P = x.parent()
if isinstance(P, FreeDendriformAlgebra):
if P is self:
return x
return self.element_class(self, x.monomial_coefficients())
except AttributeError:
raise TypeError('not able to coerce this in this algebra')
# Ok, not a dendriform algebra element (or should not be viewed as one).
def _coerce_map_from_(self, R):
r"""
Return ``True`` if there is a coercion from ``R`` into ``self``
and ``False`` otherwise.
The things that coerce into ``self`` are
- free dendriform algebras in a subset of variables of ``self``
over a base with a coercion map into ``self.base_ring()``
EXAMPLES::
sage: F = algebras.FreeDendriform(GF(7), 'xyz'); F
Free Dendriform algebra on 3 generators ['x', 'y', 'z']
over Finite Field of size 7
Elements of the free dendriform algebra canonically coerce in::
sage: x, y, z = F.gens()
sage: F.coerce(x+y) == x+y
True
The free dendriform algebra over `\ZZ` on `x, y, z` coerces in, since
`\ZZ` coerces to `\GF{7}`::
sage: G = algebras.FreeDendriform(ZZ, 'xyz')
sage: Gx,Gy,Gz = G.gens()
sage: z = F.coerce(Gx+Gy); z
B[x[., .]] + B[y[., .]]
sage: z.parent() is F
True
However, `\GF{7}` does not coerce to `\ZZ`, so the free dendriform
algebra over `\GF{7}` does not coerce to the one over `\ZZ`::
sage: G.coerce(y)
Traceback (most recent call last):
...
TypeError: no canonical coercion from Free Dendriform algebra
on 3 generators ['x', 'y', 'z'] over Finite Field of size
7 to Free Dendriform algebra on 3 generators ['x', 'y', 'z']
over Integer Ring
TESTS::
sage: F = algebras.FreeDendriform(ZZ, 'xyz')
sage: G = algebras.FreeDendriform(QQ, 'xyz')
sage: H = algebras.FreeDendriform(ZZ, 'y')
sage: F._coerce_map_from_(G)
False
sage: G._coerce_map_from_(F)
True
sage: F._coerce_map_from_(H)
True
sage: F._coerce_map_from_(QQ)
False
sage: G._coerce_map_from_(QQ)
False
sage: F.has_coerce_map_from(PolynomialRing(ZZ, 3, 'x,y,z'))
False
"""
# free dendriform algebras in a subset of variables
# over any base that coerces in:
if isinstance(R, FreeDendriformAlgebra):
if all(x in self.variable_names() for x in R.variable_names()):
if self.base_ring().has_coerce_map_from(R.base_ring()):
return True
return False
def construction(self):
"""
Return a pair ``(F, R)``, where ``F`` is a :class:`DendriformFunctor`
and `R` is a ring, such that ``F(R)`` returns ``self``.
EXAMPLES::
sage: P = algebras.FreeDendriform(ZZ, 'x,y')
sage: x,y = P.gens()
sage: F, R = P.construction()
sage: F
Dendriform[x,y]
sage: R
Integer Ring
sage: F(ZZ) is P
True
sage: F(QQ)
Free Dendriform algebra on 2 generators ['x', 'y'] over Rational Field
"""
return DendriformFunctor(self.variable_names()), self.base_ring()
class DendriformFunctor(ConstructionFunctor):
"""
A constructor for dendriform algebras.
EXAMPLES::
sage: P = algebras.FreeDendriform(ZZ, 'x,y')
sage: x,y = P.gens()
sage: F = P.construction()[0]; F
Dendriform[x,y]
sage: A = GF(5)['a,b']
sage: a, b = A.gens()
sage: F(A)
Free Dendriform algebra on 2 generators ['x', 'y']
over Multivariate Polynomial Ring in a, b over Finite Field of size 5
sage: f = A.hom([a+b,a-b],A)
sage: F(f)
Generic endomorphism of Free Dendriform algebra on 2 generators ['x', 'y']
over Multivariate Polynomial Ring in a, b over Finite Field of size 5
sage: F(f)(a * F(A)(x))
(a+b)*B[x[., .]]
"""
rank = 9
def __init__(self, vars):
"""
EXAMPLES::
sage: F = sage.combinat.free_dendriform_algebra.DendriformFunctor(['x','y'])
sage: F
Dendriform[x,y]
sage: F(ZZ)
Free Dendriform algebra on 2 generators ['x', 'y'] over Integer Ring
"""
Functor.__init__(self, Rings(), Rings())
self.vars = vars
def _apply_functor(self, R):
"""
Apply the functor to an object of ``self``'s domain.
EXAMPLES::
sage: R = algebras.FreeDendriform(ZZ, 'x,y,z')
sage: F = R.construction()[0]; F
Dendriform[x,y,z]
sage: type(F)
<class 'sage.combinat.free_dendriform_algebra.DendriformFunctor'>
sage: F(ZZ) # indirect doctest
Free Dendriform algebra on 3 generators ['x', 'y', 'z'] over Integer Ring
"""
return FreeDendriformAlgebra(R, self.vars)
def _apply_functor_to_morphism(self, f):
"""
Apply the functor ``self`` to the ring morphism `f`.
TESTS::
sage: R = algebras.FreeDendriform(ZZ, 'x').construction()[0]
sage: R(ZZ.hom(GF(3))) # indirect doctest
Generic morphism:
From: Free Dendriform algebra on one generator ['x'] over Integer Ring
To: Free Dendriform algebra on one generator ['x'] over Finite Field of size 3
"""
dom = self(f.domain())
codom = self(f.codomain())
def action(x):
return codom._from_dict({a: f(b)
for a, b in iteritems(x.monomial_coefficients())})
return dom.module_morphism(function=action, codomain=codom)
def __eq__(self, other):
"""
EXAMPLES::
sage: F = algebras.FreeDendriform(ZZ, 'x,y,z').construction()[0]
sage: G = algebras.FreeDendriform(QQ, 'x,y,z').construction()[0]
sage: F == G
True
sage: G == loads(dumps(G))
True
sage: G = algebras.FreeDendriform(QQ, 'x,y').construction()[0]
sage: F == G
False
"""
if not isinstance(other, DendriformFunctor):
return False
return self.vars == other.vars
def __ne__(self, other):
"""
EXAMPLES::
sage: F = algebras.FreeDendriform(ZZ, 'x,y,z').construction()[0]
sage: G = algebras.FreeDendriform(QQ, 'x,y,z').construction()[0]
sage: F != G
False
sage: G != loads(dumps(G))
False
sage: G = algebras.FreeDendriform(QQ, 'x,y').construction()[0]
sage: F != G
True
"""
return not (self == other)
def __mul__(self, other):
"""
If two Dendriform functors are given in a row, form a single Dendriform functor
with all of the variables.
EXAMPLES::
sage: F = sage.combinat.free_dendriform_algebra.DendriformFunctor(['x','y'])
sage: G = sage.combinat.free_dendriform_algebra.DendriformFunctor(['t'])
sage: G * F
Dendriform[x,y,t]
"""
if isinstance(other, IdentityConstructionFunctor):
return self
if isinstance(other, DendriformFunctor):
if set(self.vars).intersection(other.vars):
raise CoercionException("Overlapping variables (%s,%s)" %
(self.vars, other.vars))
return DendriformFunctor(other.vars + self.vars)
elif (isinstance(other, CompositeConstructionFunctor) and
isinstance(other.all[-1], DendriformFunctor)):
return CompositeConstructionFunctor(other.all[:-1],
self * other.all[-1])
else:
return CompositeConstructionFunctor(other, self)
def merge(self, other):
"""
Merge ``self`` with another construction functor, or return ``None``.
EXAMPLES::
sage: F = sage.combinat.free_dendriform_algebra.DendriformFunctor(['x','y'])
sage: G = sage.combinat.free_dendriform_algebra.DendriformFunctor(['t'])
sage: F.merge(G)
Dendriform[x,y,t]
sage: F.merge(F)
Dendriform[x,y]
Now some actual use cases::
sage: R = algebras.FreeDendriform(ZZ, 'x,y,z')
sage: x,y,z = R.gens()
sage: 1/2 * x
1/2*B[x[., .]]
sage: parent(1/2 * x)
Free Dendriform algebra on 3 generators ['x', 'y', 'z'] over Rational Field
sage: S = algebras.FreeDendriform(QQ, 'zt')
sage: z,t = S.gens()
sage: x + t
B[t[., .]] + B[x[., .]]
sage: parent(x + t)
Free Dendriform algebra on 4 generators ['z', 't', 'x', 'y'] over Rational Field
"""
if isinstance(other, DendriformFunctor):
if self.vars == other.vars:
return self
ret = list(self.vars)
cur_vars = set(ret)
for v in other.vars:
if v not in cur_vars:
ret.append(v)
return DendriformFunctor(Alphabet(ret))
else:
return None
def _repr_(self):
"""
TESTS::
sage: algebras.FreeDendriform(QQ,'x,y,z,t').construction()[0]
Dendriform[x,y,z,t]
"""
return "Dendriform[%s]" % ','.join(self.vars)
|
the-stack_0_10739 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.settings.app import Settings
class TestSettingsCellData(GaiaTestCase):
def test_enable_cell_data_via_settings_app(self):
"""
https://moztrap.mozilla.org/manage/case/1373/
"""
settings = Settings(self.marionette)
settings.launch()
cell_and_data_settings = settings.open_cell_and_data_settings()
# verify that a carrier is displayed
self.assertTrue(len(cell_and_data_settings.carrier_name) > 0)
# enable cell data
self.assertFalse(cell_and_data_settings.is_data_toggle_checked)
cell_data_prompt = cell_and_data_settings.enable_data()
# deal with prompt that sometimes appears (on first setting)
if cell_data_prompt.is_displayed:
# Cell data should not be enabled until we turn it on via the prompt
self.assertFalse(cell_and_data_settings.is_data_toggle_checked)
self.assertFalse(self.data_layer.get_setting('ril.data.enabled'), "Cell data was enabled before responding to the prompt")
cell_data_prompt.turn_on()
# Wait for cell data to be turned on
self.wait_for_condition(lambda m: cell_and_data_settings.is_data_toggle_checked)
# verify that cell data is now enabled and connected
self.assertTrue(self.data_layer.is_cell_data_enabled, "Cell data was not enabled via Settings app")
self.wait_for_condition(
lambda m: self.data_layer.is_cell_data_connected,
message='Cell data was not connected via Settings app')
|
the-stack_0_10740 | # MIT License
#
# Copyright (c) 2020-2021 Parakoopa and the SkyTemple Contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import argparse
import json
import os
import sys
from typing import Tuple, List, Union
from explorerscript.cli import check_settings, SETTINGS_PERFORMANCE_PROGRESS_LIST_VAR_NAME, SETTINGS, \
SETTINGS_DUNGEON_MODE_CONSTANTS, SETTINGS_DMC_OPEN, SETTINGS_DMC_CLOSED, SETTINGS_DMC_REQUEST, \
SETTINGS_DMC_OPEN_REQUEST
from explorerscript.ssb_converting.compiler.utils import Counter
from explorerscript.ssb_converting.ssb_data_types import SsbCoroutine, SsbOperation, SsbRoutineInfo, \
DungeonModeConstants, SsbRoutineType, SsbOpParamConstant, SsbOpParamConstString, SsbOpParamLanguageString, \
SsbOpParamPositionMarker, SsbOpCode
from explorerscript.ssb_converting.ssb_decompiler import ExplorerScriptSsbDecompiler
from explorerscript.util import open_utf8, exps_int
counter = Counter()
def parse_pos_mark_arg(arg_str):
arg_str_arr = arg_str.split('.')
if len(arg_str_arr) < 2:
return exps_int(arg_str), 0
if arg_str_arr[1] != '5' or len(arg_str_arr) > 2:
raise ValueError("Invalid position mark")
return exps_int(arg_str_arr[0]), 2
def read_ops(ops: List[dict]) -> List[SsbOperation]:
out_ops = []
for op in ops:
if "params" not in op:
raise ValueError("Params for an op not set.")
if "opcode" not in op:
raise ValueError("Opcode for an op not set.")
params = []
for param in op["params"]:
if isinstance(param, int):
params.append(param)
elif isinstance(param, dict):
if "type" not in param or "value" not in param:
raise ValueError("Invalid param for op.")
if param["type"] == "CONSTANT":
params.append(SsbOpParamConstant(param["value"]))
elif param["type"] == "CONST_STRING":
params.append(SsbOpParamConstString(param["value"]))
elif param["type"] == "LANG_STRING":
params.append(SsbOpParamLanguageString(param["value"]))
elif param["type"] == "POSITION_MARK":
x_offset, x_relative = parse_pos_mark_arg(param["value"]["x"])
y_offset, y_relative = parse_pos_mark_arg(param["value"]["y"])
params.append(SsbOpParamPositionMarker(
param["value"]["name"],
x_offset, y_offset, x_relative, y_relative
))
else:
raise ValueError("Invalid param for op.")
else:
raise ValueError("Invalid param for op.")
out_ops.append(SsbOperation(
counter(), SsbOpCode(-1, op["opcode"]), params
))
return out_ops
def read_routines(routines: List[dict]) -> Tuple[List[SsbRoutineInfo], List[SsbCoroutine], List[List[SsbOperation]]]:
routine_infos = []
named_coroutines = []
routine_ops = []
for r in routines:
if "ops" not in r:
raise ValueError("Ops for a routine not set.")
if "type" not in r:
raise ValueError("Type for a routine not set.")
if r["type"] == "COROUTINE":
if "name" not in r:
raise ValueError("Target for a routine not set.")
named_coroutines.append(SsbCoroutine(-1, r["name"]))
routine_infos.append(SsbRoutineInfo(SsbRoutineType.COROUTINE, -1))
routine_ops.append(read_ops(r["ops"]))
elif r["type"] == "GENERIC":
named_coroutines.append(SsbCoroutine(-1, "n/a"))
routine_infos.append(SsbRoutineInfo(SsbRoutineType.GENERIC, -1))
routine_ops.append(read_ops(r["ops"]))
elif r["type"] == "ACTOR":
if "target_id" not in r:
raise ValueError("Target for a routine not set.")
linked_to = -1
linked_to_name = None
if isinstance(r["target_id"], int):
linked_to = r["target_id"]
else:
linked_to_name = str(r["target_id"])
named_coroutines.append(SsbCoroutine(-1, "n/a"))
routine_infos.append(SsbRoutineInfo(SsbRoutineType.ACTOR, linked_to, linked_to_name))
routine_ops.append(read_ops(r["ops"]))
elif r["type"] == "OBJECT":
if "target_id" not in r:
raise ValueError("Target for a routine not set.")
linked_to = -1
linked_to_name = None
if isinstance(r["target_id"], int):
linked_to = r["target_id"]
else:
linked_to_name = str(r["target_id"])
named_coroutines.append(SsbCoroutine(-1, "n/a"))
routine_infos.append(SsbRoutineInfo(SsbRoutineType.OBJECT, linked_to, linked_to_name))
routine_ops.append(read_ops(r["ops"]))
elif r["type"] == "PERFORMER":
if "target_id" not in r:
raise ValueError("Target for a routine not set.")
linked_to = -1
linked_to_name = None
if isinstance(r["target_id"], int):
linked_to = r["target_id"]
else:
linked_to_name = str(r["target_id"])
named_coroutines.append(SsbCoroutine(-1, "n/a"))
routine_infos.append(SsbRoutineInfo(SsbRoutineType.PERFORMER, linked_to, linked_to_name))
routine_ops.append(read_ops(r["ops"]))
else:
raise ValueError(f"Invalid type for a routine: {r['type']}.")
return routine_infos, named_coroutines, routine_ops
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Decompile a JSON representation of PMD EoS SSB into ExplorerScript.')
parser.add_argument('ssb_path', metavar='SSB_JSON_PATH',
help='SSB JSON to decompile, including the compiler settings.')
parser.add_argument('--source-map', dest='source_map', default=None, metavar='PATH',
help='If specified, output a source map at that location.')
args = parser.parse_args()
if not os.path.exists(args.ssb_path):
print("JSON file does not exist.", file=sys.stderr)
exit(1)
with open_utf8(args.ssb_path, 'r') as f:
ssb_file = json.load(f)
check_settings(ssb_file)
routine_infos, named_coroutines, routine_ops = read_routines(ssb_file["routines"])
dmodes = ssb_file[SETTINGS][SETTINGS_DUNGEON_MODE_CONSTANTS]
decompiler = ExplorerScriptSsbDecompiler(
routine_infos, routine_ops, named_coroutines,
ssb_file[SETTINGS][SETTINGS_PERFORMANCE_PROGRESS_LIST_VAR_NAME],
DungeonModeConstants(
dmodes[SETTINGS_DMC_CLOSED], dmodes[SETTINGS_DMC_OPEN],
dmodes[SETTINGS_DMC_REQUEST], dmodes[SETTINGS_DMC_OPEN_REQUEST]
)
)
exps_src, source_map = decompiler.convert()
if args.source_map is not None:
with open_utf8(args.source_map, 'w') as f:
f.write(source_map.serialize())
print(exps_src)
|
the-stack_0_10741 | """
Authors:
Randy Heiland ([email protected])
Adam Morrow, Grant Waldrow, Drew Willis, Kim Crevecoeur
Dr. Paul Macklin ([email protected])
--- Versions ---
0.1 - initial version
"""
import sys
from PySide6 import QtCore, QtGui
from PySide6.QtWidgets import *
from PySide6.QtGui import QDoubleValidator
class QHLine(QFrame):
def __init__(self):
super(QHLine, self).__init__()
self.setFrameShape(QFrame.HLine)
self.setFrameShadow(QFrame.Sunken)
class SubstrateDef(QWidget):
def __init__(self):
super().__init__()
# global self.microenv_params
self.current_substrate = None
self.xml_root = None
self.celldef_tab = None
#---------------
# self.cell_defs = CellDefInstances()
self.microenv_hbox = QHBoxLayout()
splitter = QSplitter()
tree_widget_width = 160
self.tree = QTreeWidget()
# self.tree.setStyleSheet("background-color: lightgray")
self.tree.setFixedWidth(tree_widget_width)
# self.tree.currentChanged(self.tree_item_changed_cb)
self.tree.itemClicked.connect(self.tree_item_changed_cb)
# self.tree.itemSelectionChanged()
# self.tree.setColumnCount(1)
# self.tree.setCurrentItem(0) # rwh/TODO
header = QTreeWidgetItem(["--- Substrate ---"])
self.tree.setHeaderItem(header)
# cellname = QTreeWidgetItem(["virus"])
# self.tree.insertTopLevelItem(0,cellname)
# cellname = QTreeWidgetItem(["interferon"])
# self.tree.insertTopLevelItem(1,cellname)
self.microenv_hbox.addWidget(self.tree)
self.scroll_cell_def_tree = QScrollArea()
self.scroll_cell_def_tree.setWidget(self.tree)
# splitter.addWidget(self.tree)
splitter.addWidget(self.scroll_cell_def_tree)
#-------------------------------------------
# self.tab = QWidget()
# self.tabs.resize(200,5)
#-------------------------------------------
label_width = 150
units_width = 70
# self.scroll = QScrollArea()
self.scroll_area = QScrollArea()
splitter.addWidget(self.scroll_area)
# self.microenv_hbox.addWidget(self.scroll_area)
self.microenv_params = QWidget()
self.vbox = QVBoxLayout()
self.vbox.addStretch(0)
# self.microenv_hbox.addWidget(self.)
#------------------
controls_hbox = QHBoxLayout()
self.new_button = QPushButton("New")
controls_hbox.addWidget(self.new_button)
self.copy_button = QPushButton("Copy")
controls_hbox.addWidget(self.copy_button)
self.delete_button = QPushButton("Delete")
self.delete_button.clicked.connect(self.delete_substrate)
controls_hbox.addWidget(self.delete_button)
# self.vbox.addLayout(hbox)
# self.vbox.addWidget(QHLine())
#------------------
hbox = QHBoxLayout()
label = QLabel("Name of substrate:")
label.setFixedWidth(180)
label.setAlignment(QtCore.Qt.AlignRight)
hbox.addWidget(label)
self.substrate_name = QLineEdit()
# Want to validate name, e.g., starts with alpha, no special chars, etc.
# self.cycle_trate0_0.setValidator(QtGui.QDoubleValidator())
# self.cycle_trate0_1.enter.connect(self.save_xml)
hbox.addWidget(self.substrate_name)
self.vbox.addLayout(hbox)
#------------------
hbox = QHBoxLayout()
label = QLabel("diffusion coefficient")
label.setFixedWidth(label_width)
label.setAlignment(QtCore.Qt.AlignRight)
hbox.addWidget(label)
self.diffusion_coef = QLineEdit()
self.diffusion_coef.setValidator(QtGui.QDoubleValidator())
# self.diffusion_coef.enter.connect(self.save_xml)
hbox.addWidget(self.diffusion_coef)
units = QLabel("micron^2/min")
units.setFixedWidth(units_width)
hbox.addWidget(units)
self.vbox.addLayout(hbox)
#----------
hbox = QHBoxLayout()
label = QLabel("decay rate")
label.setFixedWidth(label_width)
label.setAlignment(QtCore.Qt.AlignRight)
hbox.addWidget(label)
self.decay_rate = QLineEdit()
self.decay_rate.setValidator(QtGui.QDoubleValidator())
# self.decay_rate.enter.connect(self.save_xml)
hbox.addWidget(self.decay_rate)
units = QLabel("1/min")
units.setFixedWidth(units_width)
hbox.addWidget(units)
self.vbox.addLayout(hbox)
#----------
hbox = QHBoxLayout()
label = QLabel("initial condition")
label.setFixedWidth(label_width)
label.setAlignment(QtCore.Qt.AlignRight)
hbox.addWidget(label)
self.init_cond = QLineEdit()
self.init_cond.setValidator(QtGui.QDoubleValidator())
# self.init_cond.enter.connect(self.save_xml)
hbox.addWidget(self.init_cond)
units = QLabel("mmol")
units.setFixedWidth(units_width)
hbox.addWidget(units)
self.vbox.addLayout(hbox)
#----------
hbox = QHBoxLayout()
label = QLabel("Dirichlet BC")
label.setFixedWidth(label_width)
label.setAlignment(QtCore.Qt.AlignRight)
hbox.addWidget(label)
self.dirichlet_bc = QLineEdit()
self.dirichlet_bc.setValidator(QtGui.QDoubleValidator())
# self.bdy_cond.enter.connect(self.save_xml)
hbox.addWidget(self.dirichlet_bc)
units = QLabel("mmol")
units.setFixedWidth(units_width)
hbox.addWidget(units)
self.dirichlet_bc_enabled = QCheckBox("on/off")
# self.motility_enabled.setAlignment(QtCore.Qt.AlignRight)
# label.setFixedWidth(label_width)
hbox.addWidget(self.dirichlet_bc_enabled)
self.vbox.addLayout(hbox)
#-------------
hbox = QHBoxLayout()
self.gradients = QCheckBox("calculate gradients")
hbox.addWidget(self.gradients)
self.vbox.addLayout(hbox)
hbox = QHBoxLayout()
self.track_in_agents = QCheckBox("track in agents")
hbox.addWidget(self.track_in_agents)
self.vbox.addLayout(hbox)
#--------------------------
# <Dirichlet_boundary_condition units="dimensionless" enabled="false">0</Dirichlet_boundary_condition>
# <!--
# <Dirichlet_options>
# <boundary_value ID="xmin" enabled="false">0</boundary_value>
# <boundary_value ID="xmax" enabled="false">0</boundary_value>
# <boundary_value ID="ymin" enabled="false">0</boundary_value>
# <boundary_value ID="ymax" enabled="false">0</boundary_value>
# <boundary_value ID="zmin" enabled="false">1</boundary_value>
# <boundary_value ID="zmax" enabled="false">0</boundary_value>
# </Dirichlet_options>
# -->
# </variable>
#--------------------------
# Dummy widget for filler??
# label = QLabel("")
# label.setFixedHeight(1000)
# # label.setStyleSheet("background-color: orange")
# label.setAlignment(QtCore.Qt.AlignCenter)
# self.vbox.addWidget(label)
#==================================================================
# self.vbox.setAlignment(QtCore.Qt.AlignTop)
# spacerItem = QSpacerItem(20, 237, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
# spacerItem = QSpacerItem(100,500)
# self.vbox.addItem(spacerItem)
self.vbox.addStretch()
self.microenv_params.setLayout(self.vbox)
self.scroll_area.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.scroll_area.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.scroll_area.setWidgetResizable(True)
self.scroll_area.setWidget(self.microenv_params)
# self.save_button = QPushButton("Save")
# self.text = QLabel("Hello World",alignment=QtCore.Qt.AlignCenter)
self.layout = QVBoxLayout(self)
self.layout.addLayout(controls_hbox)
# self.layout.addWidget(self.tabs)
# self.layout.addWidget(QHLine())
# self.layout.addWidget(self.params)
# self.layout.addWidget(self.scroll_area)
self.layout.addWidget(splitter)
# self.layout.addWidget(self.vbox)
# self.layout.addWidget(self.text)
# self.layout.addWidget(self.save_button)
# self.save_button.clicked.connect(self.save_xml)
@QtCore.Slot()
def delete_substrate(self):
print('------ delete_substrate')
item_idx = self.tree.indexFromItem(self.tree.currentItem()).row()
print('------ item_idx=',item_idx)
# self.tree.removeItemWidget(self.tree.currentItem(), 0)
self.tree.takeTopLevelItem(self.tree.indexOfTopLevelItem(self.tree.currentItem()))
self.celldef_tab.delete_substrate_from_comboboxes(item_idx)
print('------ new name=',self.tree.currentItem().text(0))
self.current_substrate = self.tree.currentItem().text(0)
self.fill_gui(self.current_substrate)
# @QtCore.Slot()
# def save_xml(self):
# # self.text.setText(random.choice(self.hello))
# pass
# def tree_item_changed(self,idx1,idx2):
def tree_item_changed_cb(self, it,col):
print('tree_item_changed:', it, col, it.text(col) )
self.current_substrate = it.text(col)
print('self.current_substrate= ',self.current_substrate )
# print('self.= ',self.tree.indexFromItem )
# fill in the GUI with this one's params
self.fill_gui(self.current_substrate)
def populate_tree(self):
uep = self.xml_root.find(".//microenvironment_setup")
if uep:
self.tree.clear()
idx = 0
# <microenvironment_setup>
# <variable name="food" units="dimensionless" ID="0">
for var in uep:
# print(cell_def.attrib['name'])
if var.tag == 'variable':
var_name = var.attrib['name']
subname = QTreeWidgetItem([var_name])
self.tree.insertTopLevelItem(idx,subname)
idx += 1
def first_substrate_name(self):
uep = self.xml_root.find(".//microenvironment_setup//variable")
if uep:
return(uep.attrib['name'])
def fill_gui(self, substrate_name):
# <microenvironment_setup>
# <variable name="food" units="dimensionless" ID="0">
uep = self.xml_root.find('.//microenvironment_setup') # find unique entry point
if substrate_name == None:
substrate_name = self.xml_root.find(".//microenvironment_setup//variable").attrib['name']
self.substrate_name.setText(substrate_name)
vp = [] # pointers to <variable> nodes
if uep:
# self.tree.clear()
idx = 0
for var in uep.findall('variable'):
vp.append(var)
# print(var.attrib['name'])
name = var.attrib['name']
subname = QTreeWidgetItem([name])
# self.tree.insertTopLevelItem(idx,substrate_name)
if subname.text(0) == substrate_name:
print("break out of substrate (variable) name loop with idx=",idx)
break
idx += 1
# self.tree.setCurrentItem(substrate_name,0) # RWH/TODO: select 1st (0th?) item upon startup or loading new model
idx += 1 # we use 1-offset indices below
var_param_path = self.xml_root.find(".//microenvironment_setup//variable[" + str(idx) + "]//physical_parameter_set")
var_path = self.xml_root.find(".//microenvironment_setup//variable[" + str(idx) + "]")
# uep = self.xml_root.find('.//microenvironment_setup') # find unique entry point
# <variable name="oxygen" units="mmHg" ID="0">
# <physical_parameter_set>
# <diffusion_coefficient units="micron^2/min">100000.0</diffusion_coefficient>
# <decay_rate units="1/min">0.1</decay_rate>
# </physical_parameter_set>
# <initial_condition units="mmHg">38.0</initial_condition>
# <Dirichlet_boundary_condition units="mmHg" enabled="true">38.0</
# self.substrate_name.setText(var.attrib['name'])
self.diffusion_coef.setText(var_param_path.find('.//diffusion_coefficient').text)
self.decay_rate.setText(var_param_path.find('.//decay_rate').text)
self.init_cond.setText(var_path.find('.initial_condition').text)
self.dirichlet_bc.setText(var_path.find('.Dirichlet_boundary_condition').text)
# self.chemical_A_decay_rate.value = float(vp[0].find('.//decay_rate').text)
# self.chemical_A_initial_condition.value = float(vp[0].find('.//initial_condition').text)
# self.chemical_A_Dirichlet_boundary_condition.value = float(vp[0].find('.//Dirichlet_boundary_condition').text)
# if vp[0].find('.//Dirichlet_boundary_condition').attrib['enabled'].lower() == 'true':
# self.chemical_A_Dirichlet_boundary_condition_toggle.value = True
# else:
# self.chemical_A_Dirichlet_boundary_condition_toggle.value = False
# self.chemical_B_diffusion_coefficient.value = float(vp[1].find('.//diffusion_coefficient').text)
# self.chemical_B_decay_rate.value = float(vp[1].find('.//decay_rate').text)
# self.chemical_B_initial_condition.value = float(vp[1].find('.//initial_condition').text)
# self.chemical_B_Dirichlet_boundary_condition.value = float(vp[1].find('.//Dirichlet_boundary_condition').text)
# if vp[1].find('.//Dirichlet_boundary_condition').attrib['enabled'].lower() == 'true':
# self.chemical_B_Dirichlet_boundary_condition_toggle.value = True
# else:
# self.chemical_B_Dirichlet_boundary_condition_toggle.value = False
# self.chemical_C_diffusion_coefficient.value = float(vp[2].find('.//diffusion_coefficient').text)
# self.chemical_C_decay_rate.value = float(vp[2].find('.//decay_rate').text)
# self.chemical_C_initial_condition.value = float(vp[2].find('.//initial_condition').text)
# self.chemical_C_Dirichlet_boundary_condition.value = float(vp[2].find('.//Dirichlet_boundary_condition').text)
# if vp[2].find('.//Dirichlet_boundary_condition').attrib['enabled'].lower() == 'true':
# self.chemical_C_Dirichlet_boundary_condition_toggle.value = True
# else:
# self.chemical_C_Dirichlet_boundary_condition_toggle.value = False
# if uep.find('.//options//calculate_gradients').text.lower() == 'true':
# self.calculate_gradient.value = True
# else:
# self.calculate_gradient.value = False
# if uep.find('.//options//track_internalized_substrates_in_each_agent').text.lower() == 'true':
# self.track_internal.value = True
# else:
# self.track_internal.value = False
#----------------------------------------------------------------------------
# Read values from the GUI widgets and generate/write a new XML
# <microenvironment_setup>
# <variable name="director signal" units="dimensionless" ID="0">
# <physical_parameter_set>
# <diffusion_coefficient units="micron^2/min">1000</diffusion_coefficient>
# <decay_rate units="1/min">.1</decay_rate>
# </physical_parameter_set>
# <initial_condition units="dimensionless">0</initial_condition>
# <Dirichlet_boundary_condition units="dimensionless" enabled="false">1</Dirichlet_boundary_condition>
# </variable>
# <variable name="cargo signal" units="dimensionless" ID="1">
# <physical_parameter_set>
# <diffusion_coefficient units="micron^2/min">1000</diffusion_coefficient>
# <decay_rate units="1/min">.4</decay_rate>
# </physical_parameter_set>
# <initial_condition units="dimensionless">0</initial_condition>
# <Dirichlet_boundary_condition units="dimensionless" enabled="false">1</Dirichlet_boundary_condition>
# </variable>
# <options>
# <calculate_gradients>true</calculate_gradients>
# <track_internalized_substrates_in_each_agent>false</track_internalized_substrates_in_each_agent>
# <initial_condition type="matlab" enabled="false">
# <filename>./config/initial.mat</filename>
# </initial_condition>
# <dirichlet_nodes type="matlab" enabled="false">
# <filename>./config/dirichlet.mat</filename>
# </dirichlet_nodes>
# </options>
# </microenvironment_setup>
def fill_xml(self):
# pass
uep = self.xml_root.find('.//microenvironment_setup')
vp = [] # pointers to <variable> nodes
if uep:
for var in uep.findall('variable'):
vp.append(var)
uep = self.xml_root.find('.//microenvironment_setup')
# self.diffusion_coef.setText(var_param_path.find('.//diffusion_coefficient').text)
# self.decay_rate.setText(var_param_path.find('.//decay_rate').text)
# self.init_cond.setText(var_path.find('.initial_condition').text)
# self.dirichlet_bc.setText(var_path.find('.Dirichlet_boundary_condition').text)
vp[0].find('.//diffusion_coefficient').text = str(self.diffusion_coef.text)
# vp[0].find('.//diffusion_coefficient').text = str(self.director_signal_diffusion_coefficient.value)
# vp[0].find('.//decay_rate').text = str(self.director_signal_decay_rate.value)
# vp[0].find('.//initial_condition').text = str(self.director_signal_initial_condition.value)
# vp[0].find('.//Dirichlet_boundary_condition').text = str(self.director_signal_Dirichlet_boundary_condition.value)
# vp[0].find('.//Dirichlet_boundary_condition').attrib['enabled'] = str(self.director_signal_Dirichlet_boundary_condition_toggle.value).lower()
# vp[1].find('.//diffusion_coefficient').text = str(self.cargo_signal_diffusion_coefficient.value)
# vp[1].find('.//decay_rate').text = str(self.cargo_signal_decay_rate.value)
# vp[1].find('.//initial_condition').text = str(self.cargo_signal_initial_condition.value)
# vp[1].find('.//Dirichlet_boundary_condition').text = str(self.cargo_signal_Dirichlet_boundary_condition.value)
# vp[1].find('.//Dirichlet_boundary_condition').attrib['enabled'] = str(self.cargo_signal_Dirichlet_boundary_condition_toggle.value).lower()
# uep.find('.//options//calculate_gradients').text = str(self.calculate_gradient.value)
# uep.find('.//options//track_internalized_substrates_in_each_agent').text = str(self.track_internal.value)
def clear_gui(self):
pass |
the-stack_0_10742 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from typing import Any, Dict
from unittest import mock
from google.cloud.language_v1.proto.language_service_pb2 import Document
from airflow.providers.google.cloud.hooks.natural_language import CloudNaturalLanguageHook
from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_no_default_project_id
API_RESPONSE = {} # type: Dict[Any, Any]
DOCUMENT = Document(
content="Airflow is a platform to programmatically author, schedule and monitor workflows."
)
ENCODING_TYPE = "UTF32"
class TestCloudNaturalLanguageHook(unittest.TestCase):
def setUp(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = CloudNaturalLanguageHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook.client_info",
new_callable=mock.PropertyMock,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook." "_get_credentials"
)
@mock.patch("airflow.providers.google.cloud.hooks.natural_language.LanguageServiceClient")
def test_language_service_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.hook.get_conn()
mock_client.assert_called_once_with(
credentials=mock_get_creds.return_value, client_info=mock_client_info.return_value
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.hook._conn, result)
@mock.patch(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook.get_conn",
)
def test_analyze_entities(self, get_conn):
get_conn.return_value.analyze_entities.return_value = API_RESPONSE
result = self.hook.analyze_entities(document=DOCUMENT, encoding_type=ENCODING_TYPE)
self.assertEqual(result, API_RESPONSE)
get_conn.return_value.analyze_entities.assert_called_once_with(
document=DOCUMENT, encoding_type=ENCODING_TYPE, retry=None, timeout=None, metadata=None
)
@mock.patch(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook.get_conn",
)
def test_analyze_entity_sentiment(self, get_conn):
get_conn.return_value.analyze_entity_sentiment.return_value = API_RESPONSE
result = self.hook.analyze_entity_sentiment(document=DOCUMENT, encoding_type=ENCODING_TYPE)
self.assertEqual(result, API_RESPONSE)
get_conn.return_value.analyze_entity_sentiment.assert_called_once_with(
document=DOCUMENT, encoding_type=ENCODING_TYPE, retry=None, timeout=None, metadata=None
)
@mock.patch(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook.get_conn",
)
def test_analyze_sentiment(self, get_conn):
get_conn.return_value.analyze_sentiment.return_value = API_RESPONSE
result = self.hook.analyze_sentiment(document=DOCUMENT, encoding_type=ENCODING_TYPE)
self.assertEqual(result, API_RESPONSE)
get_conn.return_value.analyze_sentiment.assert_called_once_with(
document=DOCUMENT, encoding_type=ENCODING_TYPE, retry=None, timeout=None, metadata=None
)
@mock.patch(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook.get_conn",
)
def test_analyze_syntax(self, get_conn):
get_conn.return_value.analyze_syntax.return_value = API_RESPONSE
result = self.hook.analyze_syntax(document=DOCUMENT, encoding_type=ENCODING_TYPE)
self.assertEqual(result, API_RESPONSE)
get_conn.return_value.analyze_syntax.assert_called_once_with(
document=DOCUMENT, encoding_type=ENCODING_TYPE, retry=None, timeout=None, metadata=None
)
@mock.patch(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook.get_conn",
)
def test_annotate_text(self, get_conn):
get_conn.return_value.annotate_text.return_value = API_RESPONSE
result = self.hook.annotate_text(document=DOCUMENT, encoding_type=ENCODING_TYPE, features=None)
self.assertEqual(result, API_RESPONSE)
get_conn.return_value.annotate_text.assert_called_once_with(
document=DOCUMENT,
encoding_type=ENCODING_TYPE,
features=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook.get_conn",
)
def test_classify_text(self, get_conn):
get_conn.return_value.classify_text.return_value = API_RESPONSE
result = self.hook.classify_text(document=DOCUMENT)
self.assertEqual(result, API_RESPONSE)
get_conn.return_value.classify_text.assert_called_once_with(
document=DOCUMENT, retry=None, timeout=None, metadata=None
)
|
the-stack_0_10743 | import os
import subprocess
import sys
from typing import List
import psutil
import logging
def pip_install(pkg: str, pipbin: str = "pip3.10"):
all_pkgs = [_.decode('ascii').lower() for _ in subprocess.check_output(
[f"{pipbin}", 'list']).split()]
if pkg in all_pkgs:
logging.info(f"pkg= , {pkg}, is present")
return "already present"
else:
os.system(f"{pipbin} install {pkg}")
return "installed"
class Error:
pass
error = Error()
def exec_cmd(cmdl: List[str], errcheck: str = ""):
"""
"""
try:
res = subprocess.check_output(cmdl, stderr=subprocess.STDOUT)
return res
except subprocess.CalledProcessError as e:
if errcheck in str(e.output):
logging.info(f"{cmdl[0]} already active")
else:
raise e
def append_to_file(fullfp: str, apstr: str):
dirn = os.path.dirname(fullfp)
fn = os.path.basename(fullfp)
if os.path.exists(fullfp):
os.system(f"doas cp {fullfp} {fullfp}.premod")
os.system(f"cp {fullfp} /tmp/{fn}")
with open(f"/tmp/{fn}", "a") as fh:
fh.write(apstr)
os.system(f"doas mv /tmp/{fn} {fullfp}")
def checkIfProcessRunning(processName):
'''
Check if there is any running process that contains the given name processName.
'''
# Iterate over the all the running process
for proc in psutil.process_iter():
try:
# Check if process name contains the given name string.
if processName.lower() in proc.name().lower():
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False
|
the-stack_0_10744 | from copy import deepcopy
import numpy as np
import pandas as pd
import torch
from torch.optim import Adam
import gym
import time
import spinup.algos.pytorch.ddpg.core as core
from spinup.utils.logx import EpochLogger
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for DDPG agents.
"""
def __init__(self, obs_dim, act_dim, size):
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.obs2_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr+1) % self.max_size
self.size = min(self.size+1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
batch = dict(obs=self.obs_buf[idxs],
obs2=self.obs2_buf[idxs],
act=self.act_buf[idxs],
rew=self.rew_buf[idxs],
done=self.done_buf[idxs])
return {k: torch.as_tensor(v, dtype=torch.float32) for k,v in batch.items()}
"""
The following parameters have been changed:
env_fn: default value set to core.ALMEnv
epochs: default value changed from 100 to 300
act_noise: default value changed from .1 to .01
time_horizon: added parameter, with default value 80
discount_rate: added parameter, with default value .06
"""
def ddpg(env_fn = core.ALMEnv, actor_critic = core.MLPActorCritic,
ac_kwargs = dict(), seed = 0, steps_per_epoch = 4000, epochs = 300,
replay_size = int(1e6), gamma = 0.99, polyak=0.995, pi_lr = 1e-3,
q_lr = 1e-3, batch_size = 100, start_steps = 10000, update_after = 1000,
update_every = 50, act_noise = .01, num_test_episodes = 10,
max_ep_len = 1000, logger_kwargs = dict(), save_freq = 1,
time_horizon = 80, discount_rate = .06):
"""
Deep Deterministic Policy Gradient (DDPG)
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
In this version, the default environment is 'ALMEnv'
actor_critic: The constructor method for a PyTorch Module with an ``act``
method, a ``pi`` module, and a ``q`` module. The ``act`` method and
``pi`` module should accept batches of observations as inputs,
and ``q`` should accept a batch of observations and a batch of
actions as inputs. When called, these should return:
=========== ================ ======================================
Call Output Shape Description
=========== ================ ======================================
``act`` (batch, act_dim) | Numpy array of actions for each
| observation.
``pi`` (batch, act_dim) | Tensor containing actions from policy
| given observations.
``q`` (batch,) | Tensor containing the current estimate
| of Q* for the provided observations
| and actions. (Critical: make sure to
| flatten this!)
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the ActorCritic object
you provided to DDPG.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
pi_lr (float): Learning rate for policy.
q_lr (float): Learning rate for Q-networks.
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
update_after (int): Number of env interactions to collect before
starting to do gradient descent updates. Ensures replay buffer
is full enough for useful updates.
update_every (int): Number of env interactions that should elapse
between gradient descent updates. Note: Regardless of how long
you wait between updates, the ratio of env steps to gradient steps
is locked to 1.
act_noise (float): Stddev for Gaussian exploration noise added to
policy at training time. (At test time, no noise is added.)
num_test_episodes (int): Number of episodes to test the deterministic
policy at the end of each epoch.
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
torch.manual_seed(seed)
np.random.seed(seed)
# env, test_env = env_fn(), env_fn() original OpenAI SpinningUp entry
env = env_fn(T = time_horizon, rate = discount_rate) # Added by the author
test_env = env_fn(T = time_horizon, rate = discount_rate) # Added by the author
obs_dim = env.observation_space.shape
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Create actor-critic module and target networks
ac = actor_critic(env.observation_space, env.action_space, **ac_kwargs)
ac_targ = deepcopy(ac)
# Freeze target networks with respect to optimizers (only update via polyak averaging)
for p in ac_targ.parameters():
p.requires_grad = False
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
# Count variables (protip: try to get a feel for how different size networks behave!)
var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.q])
logger.log('\nNumber of parameters: \t pi: %d, \t q: %d\n'%var_counts)
# Set up function for computing DDPG Q-loss
def compute_loss_q(data):
o, a, r, o2, d = data['obs'], data['act'], data['rew'], data['obs2'], data['done']
q = ac.q(o,a)
# Bellman backup for Q function
with torch.no_grad():
q_pi_targ = ac_targ.q(o2, ac_targ.pi(o2))
backup = r + gamma * (1 - d) * q_pi_targ
# MSE loss against Bellman backup
loss_q = ((q - backup)**2).mean()
# Useful info for logging
loss_info = dict(QVals=q.detach().numpy())
return loss_q, loss_info
# Set up function for computing DDPG pi loss
def compute_loss_pi(data):
o = data['obs']
q_pi = ac.q(o, ac.pi(o))
return -q_pi.mean()
# Set up optimizers for policy and q-function
pi_optimizer = Adam(ac.pi.parameters(), lr=pi_lr)
q_optimizer = Adam(ac.q.parameters(), lr=q_lr)
# Set up model saving
logger.setup_pytorch_saver(ac)
def update(data):
# First run one gradient descent step for Q.
q_optimizer.zero_grad()
loss_q, loss_info = compute_loss_q(data)
loss_q.backward()
q_optimizer.step()
# Freeze Q-network so you don't waste computational effort
# computing gradients for it during the policy learning step.
for p in ac.q.parameters():
p.requires_grad = False
# Next run one gradient descent step for pi.
pi_optimizer.zero_grad()
loss_pi = compute_loss_pi(data)
loss_pi.backward()
pi_optimizer.step()
# Unfreeze Q-network so you can optimize it at next DDPG step.
for p in ac.q.parameters():
p.requires_grad = True
# Record things
logger.store(LossQ=loss_q.item(), LossPi=loss_pi.item(), **loss_info)
# Finally, update target networks by polyak averaging.
with torch.no_grad():
for p, p_targ in zip(ac.parameters(), ac_targ.parameters()):
# NB: We use an in-place operations "mul_", "add_" to update target
# params, as opposed to "mul" and "add", which would make new tensors.
p_targ.data.mul_(polyak)
p_targ.data.add_((1 - polyak) * p.data)
def get_action(o, noise_scale):
a = ac.act(torch.as_tensor(o, dtype=torch.float32))
a = a * (noise_scale * np.random.randn(act_dim) + 1) # Added by the author
return (a / np.sum(a)) # Added by the author
# a += noise_scale * np.random.randn(act_dim) Original OpenAI SpinningUp entry
# return np.clip(a, -act_limit, act_limit) Original OpenAI SpinningUp entry
def test_agent():
for j in range(num_test_episodes):
o, d, ep_ret, ep_len = test_env.reset(), False, 0, 0
while not(d or (ep_len == max_ep_len)):
# Take deterministic actions at test time (noise_scale=0)
o, r, d, _ = test_env.step(get_action(o, 0))
ep_ret += r
ep_len += 1
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
# Prepare for interaction with environment
total_steps = steps_per_epoch * epochs
start_time = time.time()
o, ep_ret, ep_len = env.reset(), 0, 0
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
"""
Until start_steps have elapsed, randomly sample actions
from a uniform distribution for better exploration. Afterwards,
use the learned policy (with some noise, via act_noise).
"""
if t > start_steps:
a = get_action(o, act_noise)
else:
a = env.action_space.sample()
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len==max_ep_len else d
# Store experience to replay buffer
replay_buffer.store(o, a, r, o2, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
# End of trajectory handling
if d or (ep_len == max_ep_len):
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, ep_ret, ep_len = env.reset(), 0, 0
# Update handling
if t >= update_after and t % update_every == 0:
for _ in range(update_every):
batch = replay_buffer.sample_batch(batch_size)
update(data=batch)
# End of epoch handling
if (t+1) % steps_per_epoch == 0:
epoch = (t+1) // steps_per_epoch
# Save model
if (epoch % save_freq == 0) or (epoch == epochs):
logger.save_state({'env': env}, None)
# Test the performance of the deterministic version of the agent.
test_agent()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('QVals', with_min_and_max=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument('--env', type=str, default='HalfCheetah-v2') Original OpenAI SpinningUp entry
parser.add_argument('--hid', type=int, default=256)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--time_horizon', type = int, default = 80) # Added by the author
parser.add_argument('--discount_rate', type = float, default = 0.06) # Added by the author
parser.add_argument('--exp_name', type=str, default='ddpg')
args = parser.parse_args()
from spinup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
ddpg(env_fn = core.ALMEnv, actor_critic=core.MLPActorCritic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l),
gamma=args.gamma, seed=args.seed, epochs=args.epochs,
logger_kwargs=logger_kwargs, time_horizon = args.time_horizon,
discount_rate = args.discount_rate)
|
the-stack_0_10745 | import os
outlines = (
"""#!/usr/bin/env bash
####################################
# USAGE: ./sequential_projids.sh & #
####################################
# sector 6, first galactic field reduction
"""
)
projidlines = []
for projid in range(1500,1517):
projidlines.append(
'( source activate trex_37; source projid_{}.sh; wait ) & wait\n'.
format(projid)
)
outname = 'tess_tuning_scripts/sector6_reduction.sh'
with open(outname, 'w') as f:
f.writelines(outlines)
f.writelines(projidlines)
print('wrote {}'.format(outname))
|
the-stack_0_10746 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that we can actually build a simple program using our generated
Visual Studio 10.0 project (.vcxproj) and solution (.sln) files
using Visual Studio 10.0 (Professional edition).
"""
import os
import sys
import TestSConsMSVS
test = TestSConsMSVS.TestSConsMSVS()
if sys.platform != 'win32':
msg = "Skipping Visual Studio test on non-Windows platform '%s'\n" % sys.platform
test.skip_test(msg)
msvs_version = '10.0'
if not msvs_version in test.msvs_versions():
msg = "Visual Studio %s not installed; skipping test.\n" % msvs_version
test.skip_test(msg)
# Let SCons figure out the Visual Studio environment variables for us and
# print out a statement that we can exec to suck them into our external
# environment so we can execute devenv and really try to build something.
test.run(arguments = '-n -q -Q -f -', stdin = """\
env = Environment(tools = ['msvc'], MSVS_VERSION='%(msvs_version)s')
print("os.environ.update(%%s)" %% repr(env['ENV']))
""" % locals())
exec(test.stdout())
test.subdir('sub dir')
test.write(['sub dir', 'SConstruct'], """\
env=Environment(MSVS_VERSION = '%(msvs_version)s')
env.MSVSProject(target = 'foo.vcxproj',
srcs = ['foo.c'],
buildtarget = 'foo.exe',
variant = 'Release')
env.Program('foo.c')
""" % locals())
test.write(['sub dir', 'foo.c'], r"""
int
main(int argc, char *argv)
{
printf("foo.c\n");
exit (0);
}
""")
test.run(chdir='sub dir', arguments='.')
test.vcproj_sys_path(test.workpath('sub dir', 'foo.vcxproj'))
import SCons.Platform.win32
system_dll_path = os.path.join( SCons.Platform.win32.get_system_root(), 'System32' )
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + system_dll_path
test.run(chdir='sub dir',
program=[test.get_msvs_executable(msvs_version)],
arguments=['foo.sln', '/build', 'Release'])
test.run(program=test.workpath('sub dir', 'foo'), stdout="foo.c\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_0_10749 | from __future__ import unicode_literals
import os.path
import threading
import time
from future.builtins import str
import zmq
from zmq.eventloop import ioloop, zmqstream
import tornado.testing
ioloop.install()
def test_server_creation():
from pseud import Server
user_id = b'echo'
server = Server(user_id)
assert server.user_id == user_id
assert server.security_plugin == 'noop_auth_backend'
def test_server_can_bind():
from pseud import Server
user_id = b'echo'
endpoint = 'inproc://{}'.format(__name__).encode()
server = Server(user_id,
security_plugin='noop_auth_backend')
server.bind(endpoint)
def test_server_can_connect():
from pseud import Server
user_id = b'echo'
endpoint = b'tcp://127.0.0.1:5000'
server = Server(user_id,
security_plugin='noop_auth_backend')
server.connect(endpoint)
def test_server_with_its_loop_instance():
from pseud import SyncClient, Server
endpoint = b'ipc:///tmp/test_socket'
def start_server():
server = Server(b'a')
server.bind(endpoint)
server.register_rpc(str.lower)
server.io_loop.add_timeout(server.io_loop.time() + .2,
server.stop)
server.start()
server_thread = threading.Thread(target=start_server)
server_thread.start()
client = SyncClient()
client.connect(endpoint)
result = client.lower('TOTO')
assert result == 'toto'
class ServerTestCase(tornado.testing.AsyncTestCase):
timeout = 2
def make_one_client_socket(self, endpoint):
context = zmq.Context.instance()
req_sock = context.socket(zmq.ROUTER)
req_sock.connect(endpoint)
return req_sock
def make_one_server(self, user_id, endpoint):
from pseud import Server
server = Server(user_id, io_loop=self.io_loop)
server.bind(endpoint)
return server
@tornado.testing.gen_test
def test_job_running(self):
from pseud.interfaces import EMPTY_DELIMITER, OK, VERSION, WORK
from pseud.packer import Packer
from pseud.utils import register_rpc
user_id = b'echo'
endpoint = 'inproc://{}'.format(self.__class__.__name__).encode()
@register_rpc
def job_success(a, b, c, d=None):
time.sleep(.2)
return True
server = self.make_one_server(user_id, endpoint)
socket = self.make_one_client_socket(endpoint)
stream = zmqstream.ZMQStream(socket, io_loop=self.io_loop)
work = Packer().packb(('job_success', (1, 2, 3), {'d': False}))
yield tornado.gen.Task(stream.send_multipart,
[user_id, EMPTY_DELIMITER, VERSION, b'',
WORK, work])
yield server.start()
response = yield tornado.gen.Task(stream.on_recv)
assert response == [user_id, EMPTY_DELIMITER, VERSION, b'',
OK, Packer().packb(True)]
server.stop()
@tornado.testing.gen_test
def test_job_not_found(self):
import pseud
from pseud.interfaces import EMPTY_DELIMITER, ERROR, VERSION, WORK
from pseud.packer import Packer
user_id = b'echo'
endpoint = 'inproc://{}'.format(self.__class__.__name__).encode()
server = self.make_one_server(user_id, endpoint)
socket = self.make_one_client_socket(endpoint)
stream = zmqstream.ZMQStream(socket, io_loop=self.io_loop)
work = Packer().packb(('thisIsNotAFunction', (), {}))
yield server.start()
yield tornado.gen.Task(stream.send_multipart,
[user_id, EMPTY_DELIMITER, VERSION, b'', WORK,
work])
response = yield tornado.gen.Task(stream.on_recv)
assert response[:-1] == [user_id, EMPTY_DELIMITER, VERSION, b'',
ERROR]
klass, message, traceback = Packer().unpackb(response[-1])
assert klass == 'ServiceNotFoundError'
assert message == 'thisIsNotAFunction'
# pseud.__file__ might ends with .pyc
assert os.path.dirname(pseud.__file__) in traceback
server.stop()
@tornado.testing.gen_test
def test_job_raise(self):
from pseud.interfaces import ERROR, VERSION, WORK
from pseud.packer import Packer
from pseud.utils import register_rpc
user_id = b'echo'
endpoint = 'inproc://{}'.format(self.__class__.__name__).encode()
@register_rpc
def job_buggy(*args, **kw):
raise ValueError('too bad')
server = self.make_one_server(user_id, endpoint)
socket = self.make_one_client_socket(endpoint)
stream = zmqstream.ZMQStream(socket, io_loop=self.io_loop)
work = Packer().packb(('job_buggy', (), {}))
yield server.start()
yield tornado.gen.Task(stream.send_multipart,
[user_id, b'', VERSION, b'', WORK, work])
response = yield tornado.gen.Task(stream.on_recv)
assert response[:-1] == [user_id, b'', VERSION, b'', ERROR]
klass, message, traceback = Packer().unpackb(response[-1])
assert klass == 'ValueError'
assert message == 'too bad'
assert __file__ in traceback
server.stop()
|
the-stack_0_10750 | """
Copyright 2020 Google LLC
Copyright 2020 PerfectVIPs Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import argparse
import logging
import sys
import vsc
from bitstring import BitArray
from pygen_src.riscv_instr_pkg import mtvec_mode_t, f_rounding_mode_t, \
riscv_reg_t, privileged_mode_t, \
riscv_instr_group_t
from pygen_src.target.rv32i import riscv_core_setting as rcs
@vsc.randobj
class riscv_instr_gen_config:
def __init__(self, argv):
# TODO Support for command line argument
self.main_program_instr_cnt = 100 # count of main_prog
self.sub_program_instr_cnt = [] # count of sub_prog
self.debug_program_instr_cnt = 0 # count of debug_rom
self.debug_sub_program_instr_cnt = [] # count of debug sub_progrms
# Commenting out for now
# self.data_page_pattern = list(
# map(lambda dta_pg: dta_pg.name, data_pattern_t))
# dicts for exception_cause_t & interrupt_cause_t Enum classes
self.m_mode_exception_delegation = {}
self.s_mode_exception_delegation = {}
self.m_mode_interrupt_delegation = {}
self.s_mode_interrupt_delegation = {}
# init_privileged_mode default to MACHINE_MODE
self.init_privileged_mode = privileged_mode_t.MACHINE_MODE
self.mstatus = BitArray(bin(0b0), length=rcs.XLEN - 1)
self.mie = BitArray(bin(0b0), length=rcs.XLEN - 1)
self.sstatus = BitArray(bin(0b0), length=rcs.XLEN - 1)
self.sie = BitArray(bin(0b0), length=rcs.XLEN - 1)
self.ustatus = BitArray(bin(0b0), length=rcs.XLEN - 1)
self.uie = BitArray(bin(0b0), length=rcs.XLEN - 1)
self.mstatus_mprv = 0
self.mstatus_mxr = 0
self.mstatus_sum = 0
self.mstatus_tvm = 0
self.mstatus_fs = BitArray(bin(0b0), length=2)
self.mstatus_vs = BitArray(bin(0b0), length=2)
self.mtvec_mode = vsc.rand_enum_t(mtvec_mode_t)
self.tvec_alignment = argv.tvec_alignment
self.fcsr_rm = list(map(lambda csr_rm: csr_rm.name, f_rounding_mode_t))
self.enable_sfence = 0
self.gpr = vsc.rand_list_t(vsc.enum_t(riscv_reg_t), sz =4)
self.scratch_reg = vsc.rand_enum_t(riscv_reg_t)
self.pmp_reg = vsc.rand_enum_t(riscv_reg_t)
self.sp = vsc.rand_enum_t(riscv_reg_t)
self.tp = vsc.rand_enum_t(riscv_reg_t)
self.ra = vsc.rand_enum_t(riscv_reg_t)
self.check_misa_init_val = 0
self.check_xstatus = 1
self.virtual_addr_translation_on = 0
# Commenting out for now
# vector_cfg = riscv_vector_cfg # TODO
# pmp_cfg = riscv_pmp_cfg # TODO
# self.mem_region = [] # TODO
# Self.amo_region = [] # TODO
self.stack_len = 5000
# Self.s_mem_region = [] # TODO
self.kernel_stack_len = 4000
self.kernel_program_instr_cnt = 400
# list of main implemented CSRs
self.invalid_priv_mode_csrs = []
self.num_of_sub_program = argv.num_of_sub_program
self.instr_cnt = argv.instr_cnt
self.num_of_tests = argv.num_of_tests
self.no_data_page = argv.no_data_page
self.no_branch_jump = argv.no_branch_jump
self.no_load_store = argv.no_load_store
self.no_csr_instr = argv.no_csr_instr
self.no_ebreak = argv.no_ebreak
self.no_dret = argv.no_dret
self.no_fence = argv.no_fence
self.no_wfi = argv.no_wfi
self.enable_unaligned_load_store = argv.enable_unaligned_load_store
self.illegal_instr_ratio = argv.illegal_instr_ratio
self.hint_instr_ratio = argv.hint_instr_ratio
self.num_of_harts = argv.num_of_harts
self.fix_sp = argv.fix_sp
self.use_push_data_section = argv.use_push_data_section
self.boot_mode_opts = ""
self.enable_page_table_exception = argv.enable_page_table_exception
self.no_directed_instr = argv.no_directed_instr
self.asm_test_suffix = ""
self.enable_interrupt = argv.enable_interrupt
self.enable_nested_interrupt = argv.enable_nested_interrupt
self.enable_timer_irq = argv.enable_timer_irq
self.bare_program_mode = argv.bare_program_mode
self.enable_illegal_csr_instruction = argv.enable_illegal_csr_instruction
self.enable_access_invalid_csr_level = argv.enable_access_invalid_csr_level
self.enable_misaligned_instr = argv.enable_misaligned_instr
self.enable_dummy_csr_write = argv.enable_dummy_csr_write
self.randomize_csr = argv.randomize_csr
self.allow_sfence_exception = argv.allow_sfence_exception
self.no_delegation = argv.no_delegation
self.force_m_delegation = argv.force_m_delegation
self.force_s_delegation = argv.force_s_delegation
self.support_supervisor_mode = 0
self.disable_compressed_instr = argv.disable_compressed_instr
self.require_signature_addr = argv.require_signature_addr
if(self.require_signature_addr):
self.signature_addr = int(argv.signature_addr, 16)
else:
self.signature_addr = 0xdeadbeef
self.gen_debug_section = argv.gen_debug_section
self.enable_ebreak_in_debug_rom = argv.enable_ebreak_in_debug_rom
self.set_dcsr_ebreak = argv.set_dcsr_ebreak
self.num_debug_sub_program = argv.num_debug_sub_program
self.enable_debug_single_step = argv.enable_debug_single_step
self.single_step_iterations = 0
self.set_mstatus_tw = argv.set_mstatus_tw
self.set_mstatus_mprv = argv.set_mstatus_mprv
self.min_stack_len_per_program = 10 * (rcs.XLEN / 8)
self.max_stack_len_per_program = 16 * (rcs.XLEN / 8)
self.max_branch_step = 20
self.max_directed_instr_stream_seq = 20
self.reserved_regs = vsc.list_t(vsc.enum_t(riscv_reg_t))
self.enable_floating_point = argv.enable_floating_point
self.enable_vector_extension = argv.enable_vector_extension
self.enable_b_extension = argv.enable_b_extension
# Commenting out for now
# self.enable_bitmanip_groups = ['ZBB', 'ZBS', 'ZBP', 'ZBE', 'ZBF',
# 'ZBC', 'ZBR', 'ZBM', 'ZBT', 'ZB_TMP']
self.dist_control_mode = 0
self.category_dist = {}
@vsc.constraint
def gpr_c(self):
pass # TODO
def check_setting(self):
support_64b = 0
support_128b = 0
# list of satp_mode_t from riscv_core_setting.py
stp_md_lst = rcs.SATP_MODE
# list of riscv_instr_group_t with names of riscv_instr_name_t in it.
supported_isa_lst = list(map(lambda z: z.name, riscv_instr_group_t))
# check the valid isa support
for x in rcs.supported_isa:
if x == (supported_isa_lst[1] or supported_isa_lst[3] or supported_isa_lst[5] or
supported_isa_lst[8] or supported_isa_lst[11] or supported_isa_lst[13] or
supported_isa_lst[19]):
support_64b = 1
logging.info("support_64b=%d" % support_64b)
logging.debug("Supported ISA=%s" % x)
elif x == (supported_isa_lst[14] or supported_isa_lst[15]):
support_128b = 1
logging.info("support_128b=%d" % support_128b)
logging.debug("Supported ISA=%s" % x)
if (support_128b == 1) and (rcs.XLEN != 128):
logging.critical("XLEN should be set to 128 based on \
riscv_core_setting.supported_isa setting")
logging.info("XLEN Value=%d" % rcs.XLEN)
sys.exit("XLEN is not equal to 128, set it Accordingly!")
if (support_128b == 0) and (support_64b == 1) and (rcs.XLEN != 64):
logging.critical("XLEN should be set to 64 based on \
riscv_core_setting.supported_isa setting")
logging.info("XLEN Value=%d" % rcs.XLEN)
sys.exit("XLEN is not equal to 64, set it Accordingly!")
if not(support_128b or support_64b) and (rcs.XLEN != 32):
logging.critical("XLEN should be set to 32 based on \
riscv_core_setting.supported_isa setting")
logging.info("XLEN Value=%d" % rcs.XLEN)
sys.exit("XLEN is not equal to 32, set it Accordingly!")
if not(support_128b or support_64b) and not(('SV32' in stp_md_lst) or
('BARE' in stp_md_lst)):
logging.critical("SATP mode is not supported for RV32G ISA")
logging.info(stp_md_lst)
sys.exit("Supported SATP mode is not provided")
# TODO
def setup_instr_distribution(self):
pass
def init_delegation(self):
for i in self.mode_exp_lst:
if i == self.mode_exp_lst[0]:
continue
self.m_mode_exception_delegation[i] = 0
self.s_mode_exception_delegation[i] = 0
for j in self.mode_intrpt_lst:
if j == self.mode_intrpt_lst[0]:
continue
self.m_mode_interrupt_delegation[j] = 0
self.s_mode_interrupt_delegation[j] = 0
def pre_randomize(self):
for x in rcs.supported_privileged_mode:
if(x == "SUPERVISOR_MODE"):
self.support_supervisor_mode = 1
def get_non_reserved_gpr(self):
pass
def post_randomize(self):
self.reserved_regs.append(self.tp)
self.reserved_regs.append(self.sp)
self.reserved_regs.append(self.scratch_reg)
self.min_stack_len_per_program = 2 * (rcs.XLEN / 8)
logging.info("min_stack_len_per_program value = %d"
% self.min_stack_len_per_program)
self.check_setting() # to check the setting is legal
# TODO, Need to change the logic once the constraints are up.
if "USER_MODE" == self.init_privileged_mode:
logging.info("mode=%s" % "USER_MODE")
self.no_wfi = 1
def get_invalid_priv_lvl_csr(self):
invalid_lvl = []
# Debug CSRs are inaccessible from all but Debug Mode
# and we cannot boot into Debug Mode.
invalid_lvl.append('D')
# TODO Need to change the logic once the constraints are up.
for mode in self.init_privileged_mode:
if mode == "MACHINE_MODE":
continue
elif mode == 'SUPERVISOR_MODE':
invalid_lvl.append('M')
logging.info("supr_mode---")
logging.debug(invalid_lvl)
elif mode == 'USER_MODE':
invalid_lvl.append('S')
invalid_lvl.append('M')
logging.info("usr_mode---")
logging.debug(invalid_lvl)
else:
logging.critical("Unsupported initialization privilege mode")
# implemented_csr from riscv_core_setting.py
for x in rcs.implemented_csr:
if x[0] in invalid_lvl:
self.invalid_priv_mode_csrs.append(x)
# This function calls all the above defined function which should
# be called in init function as per SV logic.This function as to be
# called after every instance of the gen_config handle
def func_call_init(self):
self.init_delegation()
# self.setup_instr_distribution() # TODO
self.get_invalid_priv_lvl_csr()
def parse_args():
parse = argparse.ArgumentParser()
parse.add_argument('--num_of_tests', help = 'num_of_tests', type = int, default = 1)
parse.add_argument('--enable_page_table_exception',
help = 'enable_page_table_exception', type = int, default = 0)
parse.add_argument('--enable_interrupt', help = 'enable_interrupt',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_nested_interrupt', help = 'enable_nested_interrupt',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_timer_irq', help = 'enable_timer_irq',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--num_of_sub_program', help = 'num_of_sub_program', type = int, default = 5)
parse.add_argument('--instr_cnt', help = 'instr_cnt', type = int, default = 200)
parse.add_argument('--tvec_alignment', help = 'tvec_alignment', type = int, default = 2)
parse.add_argument('--no_ebreak', help = 'no_ebreak', choices = [0, 1], type = int, default = 1)
parse.add_argument('--no_dret', help = 'no_dret', choices = [0, 1], type = int, default = 1)
parse.add_argument('--no_wfi', help = 'no_wfi', choices = [0, 1], type = int, default = 1)
parse.add_argument('--no_branch_jump', help = 'no_branch_jump',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--no_load_store', help = 'no_load_store',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--no_csr_instr', help = 'no_csr_instr',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--fix_sp', help = 'fix_sp', choices = [0, 1], type = int, default = 0)
parse.add_argument('--use_push_data_section', help = 'use_push_data_section',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_illegal_csr_instruction',
help = 'enable_illegal_csr_instruction', choices = [0, 1],
type = int, default = 0)
parse.add_argument('--enable_access_invalid_csr_level',
help = 'enable_access_invalid_csr_level', choices = [0, 1],
type = int, default = 0)
parse.add_argument('--enable_misaligned_instr', help = 'enable_misaligned_instr',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_dummy_csr_write', help = 'enable_dummy_csr_write',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--allow_sfence_exception', help = 'allow_sfence_exception',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--no_data_page', help = 'no_data_page',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--no_directed_instr', help = 'no_directed_instr',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--no_fence', help = 'no_fence', choices = [0, 1], type = int, default = 1)
parse.add_argument('--no_delegation', help = 'no_delegation',
choices = [0, 1], type = int, default = 1)
parse.add_argument('--illegal_instr_ratio',
help = 'illegal_instr_ratio', type = int, default = 0)
parse.add_argument('--hint_instr_ratio', help = 'hint_instr_ratio', type = int, default = 0)
parse.add_argument('--num_of_harts', help = 'num_of_harts', type = int, default = rcs.NUM_HARTS)
parse.add_argument('--enable_unaligned_load_store',
help = 'enable_unaligned_load_store', choices = [0, 1],
type = int, default = 0)
parse.add_argument('--force_m_delegation', help = 'force_m_delegation',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--force_s_delegation', help = 'force_s_delegation',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--require_signature_addr', help = 'require_signature_addr',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--signature_addr', help = 'signature_addr', default = 0xdeadbeef)
parse.add_argument('--disable_compressed_instr',
help = 'disable_compressed_instr', choices = [0, 1], type = int, default = 0)
parse.add_argument('--randomize_csr', help = 'randomize_csr',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--gen_debug_section', help = 'gen_debug_section',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--bare_program_mode', help = 'bare_program_mode',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--num_debug_sub_program',
help = 'num_debug_sub_program', type = int, default = 0)
parse.add_argument('--enable_ebreak_in_debug_rom',
help = 'enable_ebreak_in_debug_rom', choices = [0, 1],
type = int, default = 0)
parse.add_argument('--set_dcsr_ebreak', help = 'set_dcsr_ebreak',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_debug_single_step',
help = 'enable_debug_single_step', choices = [0, 1], type = int, default = 0)
parse.add_argument('--set_mstatus_tw', help = 'set_mstatus_tw',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--set_mstatus_mprv', help = 'set_mstatus_mprv',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_floating_point', help = 'enable_floating_point',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_vector_extension', help = 'enable_vector_extension',
choices = [0, 1], type = int, default = 0)
parse.add_argument('--enable_b_extension', help = 'enable_b_extension',
choices = [0, 1], type = int, default = 0)
# TODO
'''
cmdline_enum_processor #(b_ext_group_t)::get_array_values("+enable_bitmanip_groups=",
enable_bitmanip_groups);
if(inst.get_arg_value("+boot_mode=", boot_mode_opts)) begin
`uvm_info(get_full_name(), $sformatf(
"Got boot mode option - %0s", boot_mode_opts), UVM_LOW)
case(boot_mode_opts)
"m" : init_privileged_mode = MACHINE_MODE;
"s" : init_privileged_mode = SUPERVISOR_MODE;
"u" : init_privileged_mode = USER_MODE;
default: `uvm_fatal(get_full_name(),
$sformatf("Illegal boot mode option - %0s", boot_mode_opts))
endcase
init_privileged_mode.rand_mode(0);
addr_translaction_rnd_order_c.constraint_mode(0);
end
`uvm_info(`gfn, $sformatf("riscv_instr_pkg::supported_privileged_mode = %0d",
riscv_instr_pkg::supported_privileged_mode.size()), UVM_LOW)
void'(inst.get_arg_value("+asm_test_suffix=", asm_test_suffix));
// Directed march list from the runtime options, ex. RV32I, RV32M etc.
cmdline_enum_processor #(riscv_instr_group_t)::get_array_values("+march=", march_isa);
if (march_isa.size != 0) riscv_instr_pkg::supported_isa = march_isa;
'''
args = parse.parse_args()
return args
args = parse_args()
cfg = riscv_instr_gen_config(args)
|
the-stack_0_10753 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
COUNTRY_REGION = [
("Myanmar", "Asia"),
("Angola","Africa"),
("Cambodia","Asia"),
("Cayman Islands","Caribbean"),
("Dominica","Caribbean"),
("Greenland","Europe"),
("Honduras","Latin America"),
("Hong Kong","Asia"),
("Iraq","Middle East"),
("Jordan","Middle East"),
("Macao","Asia"),
("Papua New Guinea","Pacific"),
("Russia","Europe"),
("Rwanda","Africa"),
("Seychelles","Africa"),
("Timor-Leste","Asia"),
("Uzbekistan","Asia"),
]
def get_region_map(CountryRegion):
from django_countries import countries
region_map = {}
# Map new country code(s) to regions
for country_region in COUNTRY_REGION:
code = countries.by_name(country_region[0])
if code:
if country_region[1] in region_map:
region_map[country_region[1]].append(code)
else:
region_map[country_region[1]] = [code]
return region_map
def code(apps, schema_editor):
CountryRegion = apps.get_model("forms", "CountryRegion")
db_alias = schema_editor.connection.alias
# Update countries regions
CountryRegion.objects.using(db_alias).filter(country="CY").update(region="Europe")
CountryRegion.objects.using(db_alias).filter(country="KZ").update(region="Asia")
CountryRegion.objects.using(db_alias).filter(country="PR").update(region="Caribbean")
CountryRegion.objects.using(db_alias).filter(country="VU").update(region="Pacific")
# Create CountryRegion objects for supplied pairs
region_map = get_region_map(CountryRegion)
for region, country_code_list in region_map.items():
for country_code in country_code_list:
CountryRegion.objects.using(db_alias).create(
country=country_code, region=region
)
def reverse_code(apps, schema_editor):
CountryRegion = apps.get_model("forms", "CountryRegion")
db_alias = schema_editor.connection.alias
# Reverse Update regions
CountryRegion.objects.using(db_alias).filter(country="CY").update(region="Middle East")
CountryRegion.objects.using(db_alias).filter(country="KZ").update(region="Europe")
# Delete CountryRegion objects for supplied pairs
region_map = get_region_map(CountryRegion)
for region, country_code_list in region_map.items():
for country_code in country_code_list:
CountryRegion.objects.using(db_alias).filter(
country=country_code, region=region
).delete()
class Migration(migrations.Migration):
dependencies = [
("forms", "0058_add_deleted_attribute"),
]
operations = [
migrations.RunPython(code, reverse_code),
]
|
the-stack_0_10754 | from sympy import *
import sys
sys.path.insert(1, '..')
from rodrigues_R_utils import *
px, py, pz = symbols('px py pz')
sx, sy, sz = symbols('sx sy sz')
tie_px, tie_py, tie_pz = symbols('tie_px tie_py tie_pz');
cols, rows = symbols('cols rows');
pi = symbols('pi')
u_kp, v_kp = symbols('u_kp v_kp')
position_symbols = [px, py, pz]
rodrigues_symbols = [sx, sy, sz]
tie_point_symbols = [tie_px, tie_py, tie_pz]
all_symbols = position_symbols + rodrigues_symbols + tie_point_symbols
RT_wc = matrix44FromRodrigues(px, py, pz, sx, sy, sz)
r=RT_wc[:-1,:-1]
t=Matrix([px, py, pz]).vec()
pos_w=Matrix([tie_px, tie_py, tie_pz]).vec()
bearing = r * pos_w + t;
norm = sqrt(bearing[0]*bearing[0] + bearing[1]*bearing[1] + bearing[2]*bearing[2])
bearing=bearing/norm
latitude=-asin(bearing[1])
longitude=atan2(bearing[0], bearing[2])
u=cols*(0.5 + longitude / (2.0 * pi))
v=rows*(0.5 - latitude/pi)
u_delta = u_kp - u;
v_delta = v_kp - v;
obs_eq = Matrix([u_delta, v_delta]).vec()
obs_eq_jacobian = obs_eq.jacobian(all_symbols)
print(obs_eq)
print(obs_eq_jacobian)
with open("equirectangular_camera_colinearity_rodrigues_wc_jacobian.h",'w') as f_cpp:
f_cpp.write("inline void observation_equation_equrectangular_camera_colinearity_rodrigues_wc(Eigen::Matrix<double, 2, 1> &delta, double rows, double cols, double pi, double px, double py, double pz, double sx, double sy, double sz, double tie_px, double tie_py, double tie_pz, double u_kp, double v_kp)\n")
f_cpp.write("{")
f_cpp.write("delta.coeffRef(0,0) = %s;\n"%(ccode(obs_eq[0,0])))
f_cpp.write("delta.coeffRef(1,0) = %s;\n"%(ccode(obs_eq[1,0])))
f_cpp.write("}")
f_cpp.write("\n")
f_cpp.write("inline void observation_equation_equrectangular_camera_colinearity_rodrigues_wc_jacobian(Eigen::Matrix<double, 2, 9, Eigen::RowMajor> &j, double rows, double cols, double pi, double px, double py, double pz, double sx, double sy, double sz, double tie_px, double tie_py, double tie_pz, double u_kp, double v_kp)\n")
f_cpp.write("{")
for i in range (2):
for j in range (9):
f_cpp.write("j.coeffRef(%d,%d) = %s;\n"%(i,j, ccode(obs_eq_jacobian[i,j])))
f_cpp.write("}")
|
the-stack_0_10760 | #!/usr/bin/env python3
import app_api
import argparse
import random
import sys
import os
import time
import subprocess
import pickle
from datetime import datetime, timezone, timedelta
import json
from signal import SIGTERM
from app_api import CH_CONF_PATH as CONFIG_PATH
def init_tel(logger):
name = 'initial_tel_data'
args = '''["-f","tel-data/CLYDE_EPS.csv", "/challenge/mission-apps/tel-data/NOVATEL_GPS.csv",
"/challenge/mission-apps/tel-data/REACTION_WHEEL.csv"]'''
start_app_request = '''
mutation{
startApp(name: "%s", config: "%s", args: %s){
success,
errors,
pid
}
}
''' % (name, CONFIG_PATH, args)
app_status_req = '{appStatus(name: "initial_tel_data"){pid}}'
app_unin_req = 'mutation{uninstall(name: "initial_tel_data"){success, errors}}'
try:
response = SERVICES.query(service='app-service', query=start_app_request)
# #print(response)
pid = response['startApp']['pid']
# #print(pid)
while((SERVICES.query(service='app-service', query=app_status_req))['appStatus'][0]['pid'] == pid):
#print(f'{logger} info: Waiting for initalization to finish...')
time.sleep(1)
response = SERVICES.query(service='app-service', query=app_unin_req)
# #print(response)
except Exception as e:
error_msg = "Initializing Telemetry data failed: " + str(e) + ""
print(error_msg)
return
def find_kill_services(logger):
# Kill all kubos services.
pid_list = []
list_cmds = []
ports = ['8000', '8010', '8020', '8030', '8110', '8120']
for port in ports:
list_cmds.append(['lsof', '-t','-i:%s' % port])
for cmd in list_cmds:
start_sch = subprocess.Popen(cmd, stdout=subprocess.PIPE)
ppid = start_sch.stdout.read().decode('ascii').split('\n')
for pid in ppid:
if pid != '':
pid_list.append(pid)
for pid in pid_list:
#print("Killing %s" % pid)
os.kill(int(pid), SIGTERM)
return
def delete_by_force(logger):
sch_path = '/challenge/target/release/schedules/stage_one/'
if os.path.exists(sch_path):
f_list = os.listdir(sch_path)
for f in f_list:
os.unlink(sch_path + f.split('/')[-1]) # in case of malicious file names.
os.rmdir(sch_path)
return
def delete_all_info(logger):
requests = []
requestSafeMode = 'mutation{safeMode{success, errors}}'
requestRemoveMode = 'mutation{removeMode(name:"stage_one"){success, errors}}'
mapps = ['critical_tel_check', 'initial_tel_data', 'update_tel', 'housekeeping',
'request_flag_telemetry', 'sunpoint', 'enable_groundlink', 'groundpoint', 'sunpoint']
for mapp in mapps:
requestUninstall = 'mutation{uninstall(name: "%s"){success, errors}}' % mapp
requests.append(requestUninstall)
app_count = 0
for request in requests:
try:
response = SERVICES.query(service='app-service', query=request)
#print(f"Deleted %s: {response['uninstall']['success']} | {response['uninstall']['errors']}" % mapps[app_count]) # DEBUG TAKE OUT
except Exception as e:
error_msg = "Deleting app %s failed: " % request + str(e) + ""
print(error_msg)
app_count += 1
try:
response = SERVICES.query(service='scheduler-service', query=requestSafeMode)
#print("Turned on safe mode.")
time.sleep(1)
response = SERVICES.query(service='scheduler-service', query=requestRemoveMode)
#print("Removed mode stage_one.")
except Exception as e:
error_msg = "Deleting all schedules failed: " + str(e) + ""
print(error_msg)
delete_by_force(logger)
find_kill_services(logger)
return
def activate_mode(logger, mode_name):
request = '''
mutation {
activateMode(name: "%s") {
success
errors
}
} ''' % mode_name
#print(f"{logger} info: Activating mode %s." % mode_name) # DEBUG
try:
response = SERVICES.query(service='scheduler-service', query=request)
# #print(response)
except Exception as e:
error_msg = "Starting mode %s failed: " % mode_name + str(e) + ""
print(error_msg) # DEBUG
return
def create_mode(logger, mode_name): #Create the stage 1 mode
request = """
mutation {
createMode(name: "%s") {
success
errors
}
}""" % mode_name
#print(f"{logger} info: Creating mode %s." % mode_name) # DEBUG
try:
response = SERVICES.query(service='scheduler-service', query=request)
# #print(response)
except Exception as e:
error_msg = "Creating mode %s failed: " % mode_name + str(e) + ""
print(error_msg) # DEBUG
return
def create_mode_schedule(logger, mode_name):
sch_name = "nominal-op"
schedule_path = os.getcwd() + f"/../{mode_name}.json"
request = '''
mutation {
importTaskList(name: "%s", path: "%s", mode: "%s") {
success
errors
}
} ''' % (sch_name, schedule_path, mode_name)
#print(f"{logger} info: Creating schedule %s for mode %s." % (sch_name, mode_name))
try:
response = SERVICES.query(service='scheduler-service', query=request)
#print(response)
except Exception as e:
error_msg = "Importing schedule %s in mode %s failed: " % (sch_name, mode_name) + str(e) + ""
print(error_msg)
return
def register_all_apps(logger):
cwd = os.getcwd()
# Stage one apps to register
init_tel_app = cwd + "/../initial_tel_data"
crit_check_app = cwd + "/../critical_tel_check"
eps_tel_app = cwd + "/../update_tel"
# Stage two apps to register
p_apps_app = cwd + "/../../stage_two/print_all_apps"
req_flag_app = cwd + "/../../stage_two/req_flag_base"
en_down_app = cwd + "/../../stage_two/enable_downlink"
gndpoint_app = cwd + "/../../stage_two/groundpoint"
sunpoint_app = cwd + "/../../stage_two/sunpoint"
low_power_app = cwd + "/../../stage_two/low_power"
act_trans_app = cwd + "/../../stage_two/activate_tranmsission_mode"
registrations = [init_tel_app, crit_check_app, eps_tel_app, p_apps_app, req_flag_app, en_down_app, gndpoint_app, sunpoint_app, low_power_app, act_trans_app]
for mapp in registrations:
#print(f"{logger} info: Registering, %s" % mapp.split('/')[-1] + " app.")
register_app(logger, mapp)
return
def register_app(logger, app_path):
request = """
mutation {
register(path: "%s") {
success,
errors,
entry {
app {
name
executable
}
}
}
} """ % app_path
try:
response = SERVICES.query(service='app-service', query=request)
##print(response)
except Exception as e:
error_msg = "Registering %s failed: " % app_path + str(e) + ""
print(error_msg)
def store_latest_json(logger):
current_tel = ""
with open('/challenge/mission-apps/pkls/current_tel.json', 'w') as js_file:
json.dump(current_tel, js_file)
return
def store_low_power_json(logger, low_power_time):
with open('../low_power.json', 'r') as js_file:
low_power_json = json.load(js_file)
low_power_json['tasks'][1]['time'] = low_power_time
with open('../low_power.json', 'w') as js_file:
json.dump(low_power_json, js_file)
def store_transmission_json(logger):
SEED = int(os.getenv('SEED', 0))
random.seed(SEED)
utc_dt = datetime.now(tz=timezone.utc)
base_time = utc_dt + timedelta( seconds= ( (60.0) * (50.0 + random.randrange(0,10)) ) )
time_offsets = [0.0, 20.0, 25.0, 30.0]
with open('../transmission.json', 'r') as js_file:
transmission_json = json.load(js_file)
for app, offset in zip(transmission_json['tasks'], time_offsets):
app['time'] = (base_time + timedelta(seconds=offset)).strftime("%Y-%m-%d %H:%M:%S")
# print("DEBUG: " + str(transmission_json))
with open('../transmission.json', 'w') as js_file:
json.dump(transmission_json, js_file)
low_power_time_str = (base_time + timedelta(seconds=-10)).strftime("%Y-%m-%d %H:%M:%S")
#low_power_time_obj = (base_time + timedelta(seconds=-10))
with open('/challenge/mission-apps/pkls/transmission_time.pkl', 'wb') as pkl_file:
pickle.dump(low_power_time_str, pkl_file)
return low_power_time_str
def main():
logger = "start_stage_one "
parser = argparse.ArgumentParser()
parser.add_argument('--config', '-c', nargs=1)
parser.add_argument('--delete', '-d', help='Deletes all app and service data', action='store_true')
args = parser.parse_args()
if args.config is not None:
global SERVICES
SERVICES = app_api.Services(args.config[0])
else:
SERVICES = app_api.Services(CONFIG_PATH)
#print(f"{logger} info: Begining stage_one")
if args.delete:
#print(f"{logger} info: Deleting app and service data.")
delete_all_info(logger)
else:
register_all_apps(logger)
init_tel(logger)
create_mode(logger, "transmission")
low_power_time_str = store_transmission_json(logger)
create_mode_schedule(logger, "transmission")
create_mode(logger, "low_power")
store_low_power_json(logger, low_power_time_str)
create_mode_schedule(logger, "low_power")
create_mode(logger, "station-keeping")
create_mode_schedule(logger, "station-keeping")
activate_mode(logger, "station-keeping")
store_latest_json(logger)
print("\n** Welcome to spaceDB **\n" + '-'* 25)
if __name__ == "__main__":
main()
|
the-stack_0_10761 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import, print_function, unicode_literals
import math
from lifecycle.metrics.app_code import get_app_code_count
from lifecycle.metrics.cost import (
get_cost_score,
get_hdfs_capacity,
get_tspider_capacity,
)
from lifecycle.metrics.heat import (
get_heat_score,
get_query_count,
get_rd_query_list_group,
)
from lifecycle.metrics.importance import (
biz_data_correct,
get_asset_value_score,
get_biz_info,
get_dataset_info,
get_heat_range_importance_score,
get_project_score,
)
from lifecycle.metrics.range import get_node_info, get_range_info, get_range_score
# 数据敏感度
SENSITIVITY_DICT = {"public": 0, "private": 1, "confidential": 2, "topsecret": 3}
# 存储评分阈值
THRESHOLD = 20.41
class Entity(object):
def _get_data(self):
return {}
def __getattr__(self, item):
"""
get the attribute of a object when the attribute can not be gotten directly
:param item:
:return:
"""
if item in self._get_data():
return self._get_data()[item]
super(Entity, self).__getattr__(item)
class Project(Entity):
def __init__(self, project_id):
self.project_id = project_id
self.get_project_score()
def get_project_score(self):
"""
get project score and project active
:return:
"""
self.project_score, self.active = get_project_score(self.project_id)
class BkBiz(Entity):
def __init__(self, biz_config):
self.biz_config = biz_config
self.biz_data_correct()
def _get_data(self):
return self.biz_config
def biz_data_correct(self):
"""
correct biz attributes
:return:
"""
if self.biz_config:
(
self.bip_grade_id,
self.oper_state,
self.app_important_level,
self.oper_state_name,
self.bip_grade_name,
self.app_important_level_name,
) = biz_data_correct(self.biz_config)
else:
self.bip_grade_id = None
self.oper_state = None
self.app_important_level = None
self.oper_state_name = None
self.bip_grade_name = None
self.app_important_level_name = None
@property
def is_bip(self):
"""
is_bip
:return:
"""
return True if self.biz_config.get("BizV1.IsBip") == "是" else False
@property
def biz_score(self):
"""
biz_score
:return:
"""
if (
self.oper_state is None
and self.bip_grade_id is None
and self.app_important_level is None
):
return 0
return (
self.oper_state + self.bip_grade_id + self.app_important_level + 1
if self.is_bip is True
else self.oper_state + self.bip_grade_id + self.app_important_level
)
class Range(Entity):
def __init__(self, dataset_id, dataset_type):
self.dataset_id = dataset_id
self.dataset_type = dataset_type
self.biz_count = 1
self.project_count = 1
self.weighted_node_count = 0.0
self.node_count_list = "[]"
self.node_count = 0
self.depth = 0
self.range_score = 0.0
self.normalized_range_score = 13.1
self.app_code_count = 0
self.range_related_dict = self.get_range_info()
self.set_range_dict()
def get_range_info(self):
return get_range_info(self.dataset_id, self.dataset_type)
def get_biz_count(self):
"""
get the count of bizs which apply the dataset
if the dataset does not have lineage, biz_count=1 (the biz of itslef)
:return:
"""
biz_list = self.range_related_dict.get("data", {}).get("bk_biz_id", [])
return len(biz_list[0].get("@groupby")) if biz_list else 1
def get_project_count(self):
"""
get the count of projects which apply the dataset
if the dataset does not have lineage, project_count=1 (the project of itslef)
:return:
"""
project_list = self.range_related_dict.get("data", {}).get("project_count", [])
return (
project_list[0].get("count")
if project_list and project_list[0].get("count") > 0
else 1
)
def get_node_info(self):
"""
get the successor node info
:return:
"""
return get_node_info(self.dataset_id, self.dataset_type)
def get_range_score(self):
"""
get range_score & normalized_range_score
:return:
"""
return get_range_score(
self.weighted_node_count, self.biz_count, self.project_count
)
def get_app_code_count(self):
"""
get the count of apps which query the dataset
:return:
"""
return get_app_code_count(self.dataset_id)
def set_range_dict(self):
if self.range_related_dict:
self.biz_count = self.get_biz_count()
self.project_count = self.get_project_count()
(
self.weighted_node_count,
self.node_count_list,
self.node_count,
self.depth,
) = self.get_node_info()
self.range_score, self.normalized_range_score = self.get_range_score()
self.app_code_count = self.get_app_code_count()
class Heat(Entity):
def __init__(self, heat_config):
self.heat_config = heat_config
self.queue_service_count = 0
self.heat_score = 0.0
def _get_data(self):
return self.heat_config
def set_heat_score(self):
self.heat_score = get_heat_score(
self.heat_config["dataset_id"],
self.heat_config["dataset_type"],
self.query_count,
)
class StorageCapacity(Entity):
def __init__(self, dataset_id, dataset_type):
self.dataset_id = dataset_id
self.dataset_type = dataset_type
self.set_hdfs_capacity()
self.set_tspider_capacity()
self.set_total_capacity()
self.set_log_capacity()
self.set_capacity_score(THRESHOLD)
def set_hdfs_capacity(self):
if self.dataset_type == "result_table":
self.hdfs_capacity = get_hdfs_capacity(self.dataset_id)
else:
self.hdfs_capacity = 0
def set_tspider_capacity(self):
if self.dataset_type == "result_table":
self.tspider_capacity = get_tspider_capacity(self.dataset_id)
else:
self.tspider_capacity = 0
def set_total_capacity(self):
"""
get total capacity (hdfs_capacity + tspider_capacity)
:return:
"""
self.total_capacity = self.hdfs_capacity + self.tspider_capacity
def set_log_capacity(self):
"""
get log capacity
:return:
"""
if self.total_capacity:
self.log_capacity = math.log(self.total_capacity)
else:
self.log_capacity = -1
def set_capacity_score(self, threshold):
"""
get capacity_score
:return:
"""
if self.log_capacity and self.log_capacity >= 0:
self.capacity_score = (
99.99
if self.log_capacity / float(threshold) * 100 > 99.99
else round(self.log_capacity / float(threshold) * 100, 2)
)
else:
self.capacity_score = -1
class DataSet(Entity):
def __init__(self, dataset_config):
self.dataset_config = dataset_config
self.heat_score = 0
self.normalized_range_score = 0
self.importance_score = 0
self.asset_value_score = 0
self.query_count = 0
def _get_data(self):
return self.dataset_config
def set_dataset_info(self):
"""
get the dataset attributes
:return:
"""
(
self.biz_id,
self.project_id,
self.sensitivity,
self.generate_type,
) = get_dataset_info(self.data_set_id, self.data_set_type)
def get_storage_capacity(self):
return StorageCapacity(
self.dataset_config["data_set_id"], self.dataset_config["data_set_type"]
)
@property
def storage_capacity(self):
"""
storage_capacity stucture
:return:
"""
if not hasattr(self, "_storage_capacity"):
self._storage_capacity = self.get_storage_capacity()
return self._storage_capacity
def get_biz(self):
"""
get the biz structure
:return:
"""
biz_dict = get_biz_info(self.biz_id)
return BkBiz(biz_dict)
@property
def biz(self):
"""
biz structure
:return:
"""
if not hasattr(self, "_biz"):
self._biz = self.get_biz()
return self._biz
def get_range(self):
"""
get the range structure
:return:
"""
return Range(
self.dataset_config["data_set_id"], self.dataset_config["data_set_type"]
)
@property
def range(self):
"""
range structure
:return:
"""
if not hasattr(self, "_range"):
self._range = self.get_range()
return self._range
def get_heat(self):
"""
get the heat structure
:return:
"""
return Heat(
{
"dataset_id": self.dataset_config["data_set_id"],
"dataset_type": self.dataset_config["data_set_type"],
"query_count": self.query_count,
}
)
@property
def heat(self):
"""
heat structure
:return:
"""
if not hasattr(self, "_heat"):
self._heat = self.get_heat()
return self._heat
def get_project(self):
"""
get the project structure
:return:
"""
return Project(self.project_id)
@property
def project(self):
"""
project structure
:return:
"""
if not hasattr(self, "_project"):
self._project = self.get_project()
return self._project
@property
def dataset_score(self):
"""
dataset_score
:return:
"""
return (
SENSITIVITY_DICT.get(self.sensitivity, 0) + 1
if self.generate_type == "user"
else SENSITIVITY_DICT.get(self.sensitivity, 0)
)
@property
def id(self):
"""
id
:return:
"""
return "{}_{}".format(self.data_set_id, self.data_set_type)
def get_importance_score(self, norm_score=18):
"""
get importance_score
:param norm_score: normalized_score
:return:
"""
importance_score = round(
(self.dataset_score + self.biz.biz_score + self.project.project_score)
/ float(norm_score)
* 100,
2,
)
importance_score = importance_score if importance_score <= 99.99 else 99.99
self.importance_score = importance_score
return importance_score
def set_heat_range_importance_score(self):
"""
get heat_score & range_score & importance_score
:return:
"""
(
self.heat_score,
self.normalized_range_score,
self.importance_score,
) = get_heat_range_importance_score(self.id)
def get_importance_dict(self, norm_score=18):
"""
get all properties related to importance
:param norm_score: normalized_score
:return:
"""
self.set_dataset_info()
if self.biz_id and self.sensitivity and self.generate_type:
return {
"id": self.id,
"data_set_id": self.data_set_id,
"data_set_type": self.data_set_type,
"dataset_score": self.dataset_score,
"biz_score": self.biz.biz_score,
"is_bip": self.biz.is_bip,
"oper_state_name": self.biz.oper_state_name,
"oper_state": self.biz.oper_state,
"bip_grade_name": self.biz.bip_grade_name,
"bip_grade_id": self.biz.bip_grade_id,
"app_important_level_name": self.biz.app_important_level_name,
"app_important_level": self.biz.app_important_level,
"project_score": self.project.project_score,
"active": self.project.active,
"importance_score": self.get_importance_score(norm_score),
}
else:
return {}
def get_asset_value_score(self):
self.asset_value_score = round(
(self.importance_score + self.heat_score + self.normalized_range_score)
/ 3.0,
2,
)
@property
def target_id(self):
return self.dataset_config["data_set_id"]
@property
def target_type(self):
return (
"access_raw_data"
if self.dataset_config["data_set_type"] == "raw_data"
else self.dataset_config["data_set_type"]
)
def get_asset_value_dict(self):
"""
get all properties related to asset_value
:return:
"""
self.set_heat_range_importance_score()
if (
self.heat_score is not None
and self.normalized_range_score is not None
and self.importance_score is not None
):
self.get_asset_value_score()
return {
"id": self.id,
"data_set_id": self.data_set_id,
"data_set_type": self.data_set_type,
"range_id": self.id,
"normalized_range_score": self.normalized_range_score,
"heat_id": self.id,
"heat_score": self.heat_score,
"importance_id": self.id,
"importance_score": self.importance_score,
"asset_value_score": self.asset_value_score,
"target_id": self.target_id,
"target_type": self.target_type,
}
else:
return {}
def get_storage_capacity_dict(self):
"""
get all properties related to storage_capacity
:return:
"""
return {
"id": self.id,
"hdfs_capacity": self.storage_capacity.hdfs_capacity,
"tspider_capacity": self.storage_capacity.tspider_capacity,
"total_capacity": self.storage_capacity.total_capacity,
"log_capacity": self.storage_capacity.log_capacity,
"capacity_score": self.storage_capacity.capacity_score,
}
def get_cost_dict(self):
"""
get all properties related to cost
:return:
"""
return {
"id": self.id,
"target_id": self.target_id,
"target_type": self.target_type,
"capacity_id": self.id,
"capacity_score": self.storage_capacity.capacity_score,
}
def set_cost_score(self):
"""
get cost_score
:return:
"""
self.cost_score = get_cost_score(self.id)
def set_asset_value_score_via_erp(self):
"""
get asset_value_score
:return:
"""
self.asset_value_score = get_asset_value_score(self.id)
@property
def assetvalue_to_cost(self):
if self.cost_score and self.cost_score != -1:
return (
round(self.asset_value_score / float(self.cost_score), 2)
if self.cost_score >= 0
else -1.0
)
else:
return -1.0
def get_lifecycle_dict(self):
"""
get all properties related to lifecycle
:return:
"""
self.set_asset_value_score_via_erp()
self.set_cost_score()
if self.asset_value_score is not None and self.cost_score is not None:
return {
"id": self.id,
"data_set_id": self.data_set_id,
"data_set_type": self.data_set_type,
"target_id": self.target_id,
"target_type": self.target_type,
"range_id": self.id,
"heat_id": self.id,
"importance_id": self.id,
"asset_value_id": self.id,
"cost_id": self.id,
"assetvalue_to_cost": self.assetvalue_to_cost,
}
else:
return {}
def get_range_dict(self):
"""
get all properties related to range
:return:
"""
return {
"id": self.id,
"data_set_id": self.data_set_id,
"data_set_type": self.data_set_type,
"biz_count": self.range.biz_count,
"project_count": self.range.project_count,
"depth": self.range.depth,
"node_count_list": self.range.node_count_list,
"node_count": self.range.node_count,
"weighted_node_count": self.range.weighted_node_count,
"app_code_count": self.range.app_code_count,
"range_score": self.range.range_score,
"normalized_range_score": self.range.normalized_range_score,
}
def set_query_count(self):
self.query_count = get_query_count(
self.data_set_id, self.data_set_type, self.query_dataset_dict
)
def get_heat_dict(self):
"""
get all properties related to heat
:return:
"""
self.set_query_count()
self.heat.set_heat_score()
return {
"data_set_id": self.data_set_id,
"data_set_type": self.data_set_type,
"id": self.id,
"query_count": self.heat.query_count,
"queue_service_count": self.heat.queue_service_count,
"heat_score": self.heat.heat_score,
}
def get_day_query_count_dict(self, query_config_dict):
"""
get the query_count_list of dataset per day
:return:
"""
heat = self.get_heat()
heat.set_heat_score()
metric_dict = {
"queue_service_count": heat.queue_service_count,
"app_query_count": query_config_dict["app_query_count"],
"day_query_count": query_config_dict["day_query_count"],
"query_count": heat.query_count,
"statistics_timestamp": query_config_dict["timestamp"],
"heat_score": heat.heat_score,
}
dimension_dict = {
"app_code": query_config_dict["app_code"],
"statistics_time": query_config_dict["time_str"],
}
return metric_dict, dimension_dict
def get_day_query_count_list(self):
"""
get the query_count_list of dataset per day, the query_count of rd is gotten by the clean rts of rd
:return:
"""
day_query_count_list = []
if (
self.data_set_type == "result_table"
and self.data_set_id in self.day_query_rt_list
):
for each_day in self.day_query_rt_dict[self.data_set_id]["query_list"]:
self.query_count = self.day_query_rt_dict[self.data_set_id][
"query_count"
]
metric_dict, dimension_dict = self.get_day_query_count_dict(each_day)
day_query_count_list.append(
{"metric_dict": metric_dict, "dimension_dict": dimension_dict}
)
elif self.data_set_type == "raw_data":
rd_query_list_group = get_rd_query_list_group(
self.data_set_id, self.day_query_rt_list, self.day_query_rt_dict
)
for key, group in rd_query_list_group:
sum_query_count = 0
for g in group:
sum_query_count += g.get("day_query_count")
g["app_query_count"] = 0
g["day_query_count"] = sum_query_count
self.query_count = 0
metric_dict, dimension_dict = self.get_day_query_count_dict(g)
day_query_count_list.append(
{"metric_dict": metric_dict, "dimension_dict": dimension_dict}
)
return day_query_count_list
|
the-stack_0_10763 | import json
import requests
from .constants import HTTP_STATUS_CODE, ERROR_CODE, URL
from .errors import (BadRequestError,
GatewayError,
ServerError)
from . import resources
from types import ModuleType
def capitalize_camel_case(string):
return "".join(map(str.capitalize, string.split('_')))
# Create a dict of resource classes
RESOURCE_CLASSES = {}
for name, module in resources.__dict__.items():
if isinstance(module, ModuleType) and \
capitalize_camel_case(name) in module.__dict__:
RESOURCE_CLASSES[name] = module.__dict__[capitalize_camel_case(name)]
class Client:
"""TapPayment client class"""
DEFAULTS = {
'base_url': URL.BASE_URL,
}
def __init__(self, session=None, api_token=None, **options):
"""
Initialize a Client object with session,
optional auth handler, and options
"""
self.session = session or requests.Session()
self.api_token = api_token
self.auth = None
self.base_url = self.DEFAULTS['base_url']
# intializes each resource
# injecting this client object into the constructor
for name, Klass in RESOURCE_CLASSES.items():
setattr(self, name, Klass(self))
def _update_header(self, options):
token_header = {'Authorization': 'Bearer ' + self.api_token }
if 'headers' not in options:
options['headers'] = {}
options['headers'].update({'Content-type': 'application/json'})
options['headers'].update(token_header)
return options
def request(self, method, path, **options):
"""
Dispatches a request to the TapPayment HTTP API
"""
options = self._update_header(options)
url = "{}{}".format(self.base_url, path)
print(url)
response = getattr(self.session, method)(url, auth=self.auth,
**options)
print(response.json())
if ((response.status_code >= HTTP_STATUS_CODE.OK) and
(response.status_code < HTTP_STATUS_CODE.REDIRECT)):
return response.json()
else:
msg = ""
code = ""
json_response = response.json()
if 'error' in json_response:
if 'description' in json_response['error']:
msg = json_response['error']['description']
if 'code' in json_response['error']:
code = str(json_response['error']['code'])
if str.upper(code) == ERROR_CODE.BAD_REQUEST_ERROR:
raise BadRequestError(msg)
elif str.upper(code) == ERROR_CODE.GATEWAY_ERROR:
raise GatewayError(msg)
elif str.upper(code) == ERROR_CODE.SERVER_ERROR:
raise ServerError(msg)
else:
raise ServerError(msg)
def get(self, path, params, **options):
"""
Parses GET request options and dispatches a request
"""
return self.request('get', path, params=params, **options)
def post(self, path, data, **options):
"""
Parses POST request options and dispatches a request
"""
data, options = self._update_request(data, options)
return self.request('post', path, data=data, **options)
def patch(self, path, data, **options):
"""
Parses PATCH request options and dispatches a request
"""
data, options = self._update_request(data, options)
return self.request('patch', path, data=data, **options)
def delete(self, path, data, **options):
"""
Parses DELETE request options and dispatches a request
"""
data, options = self._update_request(data, options)
return self.request('delete', path, data=data, **options)
def put(self, path, data, **options):
"""
Parses PUT request options and dispatches a request
"""
data, options = self._update_request(data, options)
return self.request('put', path, data=data, **options)
def _update_request(self, data, options):
"""
Updates The resource data and header options
"""
data = json.dumps(data)
token_header = {'Authorization': 'Bearer ' + self.api_token }
if 'headers' not in options:
options['headers'] = {}
options['headers'].update({'Content-type': 'application/json'})
options['headers'].update(token_header)
return data, options
|
the-stack_0_10764 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementations of various third-party authentication schemes.
All the classes in this file are class Mixins designed to be used with
web.py RequestHandler classes. The primary methods for each service are
authenticate_redirect(), authorize_redirect(), and get_authenticated_user().
The former should be called to redirect the user to, e.g., the OpenID
authentication page on the third party service, and the latter should
be called upon return to get the user data from the data returned by
the third party service.
They all take slightly different arguments due to the fact all these
services implement authentication and authorization slightly differently.
See the individual service classes below for complete documentation.
Example usage for Google OpenID::
class GoogleHandler(tornado.web.RequestHandler, tornado.auth.GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
# Save the user with, e.g., set_secure_cookie()
"""
from __future__ import absolute_import, division, with_statement
import base64
import binascii
import hashlib
import hmac
import logging
import time
import urllib
import urlparse
import uuid
from tornado import httpclient
from tornado import escape
from tornado.httputil import url_concat
from tornado.util import bytes_type, b
class OpenIdMixin(object):
"""Abstract implementation of OpenID and Attribute Exchange.
See GoogleMixin below for example implementations.
"""
def authenticate_redirect(self, callback_uri=None,
ax_attrs=["name", "email", "language", "username"]):
"""Returns the authentication URL for this service.
After authentication, the service will redirect back to the given
callback URI.
We request the given attributes for the authenticated user by
default (name, email, language, and username). If you don't need
all those attributes for your app, you can request fewer with
the ax_attrs keyword argument.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib.urlencode(args))
def get_authenticated_user(self, callback, http_client=None):
"""Fetches the authenticated user data upon redirect.
This method should be called by the handler that receives the
redirect from the authenticate_redirect() or authorize_redirect()
methods.
"""
# Verify the OpenID response via direct request to the OP
args = dict((k, v[-1]) for k, v in self.request.arguments.iteritems())
args["openid.mode"] = u"check_authentication"
url = self._OPENID_ENDPOINT
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(url, self.async_callback(
self._on_authentication_verified, callback),
method="POST", body=urllib.urlencode(args))
def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None):
url = urlparse.urljoin(self.request.full_url(), callback_uri)
args = {
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.claimed_id":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.identity":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.return_to": url,
"openid.realm": urlparse.urljoin(url, '/'),
"openid.mode": "checkid_setup",
}
if ax_attrs:
args.update({
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
})
ax_attrs = set(ax_attrs)
required = []
if "name" in ax_attrs:
ax_attrs -= set(["name", "firstname", "fullname", "lastname"])
required += ["firstname", "fullname", "lastname"]
args.update({
"openid.ax.type.firstname":
"http://axschema.org/namePerson/first",
"openid.ax.type.fullname":
"http://axschema.org/namePerson",
"openid.ax.type.lastname":
"http://axschema.org/namePerson/last",
})
known_attrs = {
"email": "http://axschema.org/contact/email",
"language": "http://axschema.org/pref/language",
"username": "http://axschema.org/namePerson/friendly",
}
for name in ax_attrs:
args["openid.ax.type." + name] = known_attrs[name]
required.append(name)
args["openid.ax.required"] = ",".join(required)
if oauth_scope:
args.update({
"openid.ns.oauth":
"http://specs.openid.net/extensions/oauth/1.0",
"openid.oauth.consumer": self.request.host.split(":")[0],
"openid.oauth.scope": oauth_scope,
})
return args
def _on_authentication_verified(self, callback, response):
if response.error or b("is_valid:true") not in response.body:
logging.warning("Invalid OpenID response: %s", response.error or
response.body)
callback(None)
return
# Make sure we got back at least an email from attribute exchange
ax_ns = None
for name in self.request.arguments.iterkeys():
if name.startswith("openid.ns.") and \
self.get_argument(name) == u"http://openid.net/srv/ax/1.0":
ax_ns = name[10:]
break
def get_ax_arg(uri):
if not ax_ns:
return u""
prefix = "openid." + ax_ns + ".type."
ax_name = None
for name in self.request.arguments.iterkeys():
if self.get_argument(name) == uri and name.startswith(prefix):
part = name[len(prefix):]
ax_name = "openid." + ax_ns + ".value." + part
break
if not ax_name:
return u""
return self.get_argument(ax_name, u"")
email = get_ax_arg("http://axschema.org/contact/email")
name = get_ax_arg("http://axschema.org/namePerson")
first_name = get_ax_arg("http://axschema.org/namePerson/first")
last_name = get_ax_arg("http://axschema.org/namePerson/last")
username = get_ax_arg("http://axschema.org/namePerson/friendly")
locale = get_ax_arg("http://axschema.org/pref/language").lower()
user = dict()
name_parts = []
if first_name:
user["first_name"] = first_name
name_parts.append(first_name)
if last_name:
user["last_name"] = last_name
name_parts.append(last_name)
if name:
user["name"] = name
elif name_parts:
user["name"] = u" ".join(name_parts)
elif email:
user["name"] = email.split("@")[0]
if email:
user["email"] = email
if locale:
user["locale"] = locale
if username:
user["username"] = username
claimed_id = self.get_argument("openid.claimed_id", None)
if claimed_id:
user["claimed_id"] = claimed_id
callback(user)
def get_auth_http_client(self):
"""Returns the AsyncHTTPClient instance to be used for auth requests.
May be overridden by subclasses to use an http client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuthMixin(object):
"""Abstract implementation of OAuth.
See TwitterMixin and FriendFeedMixin below for example implementations.
"""
def authorize_redirect(self, callback_uri=None, extra_params=None,
http_client=None):
"""Redirects the user to obtain OAuth authorization for this service.
Twitter and FriendFeed both require that you register a Callback
URL with your application. You should call this method to log the
user in, and then call get_authenticated_user() in the handler
you registered as your Callback URL to complete the authorization
process.
This method sets a cookie called _oauth_request_token which is
subsequently used (and cleared) in get_authenticated_user for
security purposes.
"""
if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
raise Exception("This service does not support oauth_callback")
if http_client is None:
http_client = self.get_auth_http_client()
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
http_client.fetch(
self._oauth_request_token_url(callback_uri=callback_uri,
extra_params=extra_params),
self.async_callback(
self._on_request_token,
self._OAUTH_AUTHORIZE_URL,
callback_uri))
else:
http_client.fetch(
self._oauth_request_token_url(),
self.async_callback(
self._on_request_token, self._OAUTH_AUTHORIZE_URL,
callback_uri))
def get_authenticated_user(self, callback, http_client=None):
"""Gets the OAuth authorized user and access token on callback.
This method should be called from the handler for your registered
OAuth Callback URL to complete the registration process. We call
callback with the authenticated user, which in addition to standard
attributes like 'name' includes the 'access_key' attribute, which
contains the OAuth access you can use to make authorized requests
to this service on behalf of the user.
"""
request_key = escape.utf8(self.get_argument("oauth_token"))
oauth_verifier = self.get_argument("oauth_verifier", None)
request_cookie = self.get_cookie("_oauth_request_token")
if not request_cookie:
logging.warning("Missing OAuth request token cookie")
callback(None)
return
self.clear_cookie("_oauth_request_token")
cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
if cookie_key != request_key:
logging.info((cookie_key, request_key, request_cookie))
logging.warning("Request token does not match cookie")
callback(None)
return
token = dict(key=cookie_key, secret=cookie_secret)
if oauth_verifier:
token["verifier"] = oauth_verifier
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(self._oauth_access_token_url(token),
self.async_callback(self._on_access_token, callback))
def _oauth_request_token_url(self, callback_uri=None, extra_params=None):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_REQUEST_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version=getattr(self, "_OAUTH_VERSION", "1.0a"),
)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
if callback_uri == "oob":
args["oauth_callback"] = "oob"
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
if extra_params:
args.update(extra_params)
signature = _oauth10a_signature(consumer_token, "GET", url, args)
else:
signature = _oauth_signature(consumer_token, "GET", url, args)
args["oauth_signature"] = signature
return url + "?" + urllib.urlencode(args)
def _on_request_token(self, authorize_url, callback_uri, response):
if response.error:
raise Exception("Could not get request token")
request_token = _oauth_parse_response(response.body)
data = (base64.b64encode(request_token["key"]) + b("|") +
base64.b64encode(request_token["secret"]))
self.set_cookie("_oauth_request_token", data)
args = dict(oauth_token=request_token["key"])
if callback_uri == "oob":
self.finish(authorize_url + "?" + urllib.urlencode(args))
return
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
self.redirect(authorize_url + "?" + urllib.urlencode(args))
def _oauth_access_token_url(self, request_token):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(request_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version=getattr(self, "_OAUTH_VERSION", "1.0a"),
)
if "verifier" in request_token:
args["oauth_verifier"] = request_token["verifier"]
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, "GET", url, args,
request_token)
else:
signature = _oauth_signature(consumer_token, "GET", url, args,
request_token)
args["oauth_signature"] = signature
return url + "?" + urllib.urlencode(args)
def _on_access_token(self, callback, response):
if response.error:
logging.warning("Could not fetch access token")
callback(None)
return
access_token = _oauth_parse_response(response.body)
self._oauth_get_user(access_token, self.async_callback(
self._on_oauth_get_user, access_token, callback))
def _oauth_get_user(self, access_token, callback):
raise NotImplementedError()
def _on_oauth_get_user(self, access_token, callback, user):
if not user:
callback(None)
return
user["access_token"] = access_token
callback(user)
def _oauth_request_parameters(self, url, access_token, parameters={},
method="GET"):
"""Returns the OAuth parameters as a dict for the given request.
parameters should include all POST arguments and query string arguments
that will be sent with the request.
"""
consumer_token = self._oauth_consumer_token()
base_args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(access_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version=getattr(self, "_OAUTH_VERSION", "1.0a"),
)
args = {}
args.update(base_args)
args.update(parameters)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, method, url, args,
access_token)
else:
signature = _oauth_signature(consumer_token, method, url, args,
access_token)
base_args["oauth_signature"] = signature
return base_args
def get_auth_http_client(self):
"""Returns the AsyncHTTPClient instance to be used for auth requests.
May be overridden by subclasses to use an http client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuth2Mixin(object):
"""Abstract implementation of OAuth v 2."""
def authorize_redirect(self, redirect_uri=None, client_id=None,
client_secret=None, extra_params=None):
"""Redirects the user to obtain OAuth authorization for this service.
Some providers require that you register a Callback
URL with your application. You should call this method to log the
user in, and then call get_authenticated_user() in the handler
you registered as your Callback URL to complete the authorization
process.
"""
args = {
"redirect_uri": redirect_uri,
"client_id": client_id
}
if extra_params:
args.update(extra_params)
self.redirect(
url_concat(self._OAUTH_AUTHORIZE_URL, args))
def _oauth_request_token_url(self, redirect_uri=None, client_id=None,
client_secret=None, code=None,
extra_params=None):
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
redirect_uri=redirect_uri,
code=code,
client_id=client_id,
client_secret=client_secret,
)
if extra_params:
args.update(extra_params)
return url_concat(url, args)
class TwitterMixin(OAuthMixin):
"""Twitter OAuth authentication.
To authenticate with Twitter, register your application with
Twitter at http://twitter.com/apps. Then copy your Consumer Key and
Consumer Secret to the application settings 'twitter_consumer_key' and
'twitter_consumer_secret'. Use this Mixin on the handler for the URL
you registered as your application's Callback URL.
When your application is set up, you can use this Mixin like this
to authenticate the user with Twitter and get access to their stream::
class TwitterHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("oauth_token", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authorize_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Twitter auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by get_authenticated_user() includes the
attributes 'username', 'name', and all of the custom Twitter user
attributes describe at
http://apiwiki.twitter.com/Twitter-REST-API-Method%3A-users%C2%A0show
in addition to 'access_token'. You should save the access token with
the user; it is required to make requests on behalf of the user later
with twitter_request().
"""
_OAUTH_REQUEST_TOKEN_URL = "http://api.twitter.com/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "http://api.twitter.com/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "http://api.twitter.com/oauth/authorize"
_OAUTH_AUTHENTICATE_URL = "http://api.twitter.com/oauth/authenticate"
_OAUTH_NO_CALLBACKS = False
_TWITTER_BASE_URL = "http://api.twitter.com/1"
def authenticate_redirect(self, callback_uri=None):
"""Just like authorize_redirect(), but auto-redirects if authorized.
This is generally the right interface to use if you are using
Twitter for single-sign on.
"""
http = self.get_auth_http_client()
http.fetch(self._oauth_request_token_url(callback_uri=callback_uri), self.async_callback(
self._on_request_token, self._OAUTH_AUTHENTICATE_URL, None))
def twitter_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given API path, e.g., "/statuses/user_timeline/btaylor"
The path should not include the format (we automatically append
".json" and parse the JSON output).
If the request is a POST, post_args should be provided. Query
string arguments should be given as keyword arguments.
All the Twitter methods are documented at
http://apiwiki.twitter.com/Twitter-API-Documentation.
Many methods require an OAuth access token which you can obtain
through authorize_redirect() and get_authenticated_user(). The
user returned through that process includes an 'access_token'
attribute that can be used to make authenticated requests via
this method. Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.twitter_request(
"/statuses/update",
post_args={"status": "Testing Tornado Web Server"},
access_token=user["access_token"],
callback=self.async_callback(self._on_post))
def _on_post(self, new_entry):
if not new_entry:
# Call failed; perhaps missing permission?
self.authorize_redirect()
return
self.finish("Posted a message!")
"""
if path.startswith('http:') or path.startswith('https:'):
# Raw urls are useful for e.g. search which doesn't follow the
# usual pattern: http://search.twitter.com/search.json
url = path
else:
url = self._TWITTER_BASE_URL + path + ".json"
# Add the OAuth resource request signature if we have credentials
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args:
url += "?" + urllib.urlencode(args)
callback = self.async_callback(self._on_twitter_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_twitter_request(self, callback, response):
if response.error:
logging.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
callback(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("twitter_consumer_key", "Twitter OAuth")
self.require_setting("twitter_consumer_secret", "Twitter OAuth")
return dict(
key=self.settings["twitter_consumer_key"],
secret=self.settings["twitter_consumer_secret"])
def _oauth_get_user(self, access_token, callback):
callback = self.async_callback(self._parse_user_response, callback)
self.twitter_request(
"/users/show/" + escape.native_str(access_token[b("screen_name")]),
access_token=access_token, callback=callback)
def _parse_user_response(self, callback, user):
if user:
user["username"] = user["screen_name"]
callback(user)
class FriendFeedMixin(OAuthMixin):
"""FriendFeed OAuth authentication.
To authenticate with FriendFeed, register your application with
FriendFeed at http://friendfeed.com/api/applications. Then
copy your Consumer Key and Consumer Secret to the application settings
'friendfeed_consumer_key' and 'friendfeed_consumer_secret'. Use
this Mixin on the handler for the URL you registered as your
application's Callback URL.
When your application is set up, you can use this Mixin like this
to authenticate the user with FriendFeed and get access to their feed::
class FriendFeedHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("oauth_token", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authorize_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "FriendFeed auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by get_authenticated_user() includes the
attributes 'username', 'name', and 'description' in addition to
'access_token'. You should save the access token with the user;
it is required to make requests on behalf of the user later with
friendfeed_request().
"""
_OAUTH_VERSION = "1.0"
_OAUTH_REQUEST_TOKEN_URL = "https://friendfeed.com/account/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "https://friendfeed.com/account/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "https://friendfeed.com/account/oauth/authorize"
_OAUTH_NO_CALLBACKS = True
_OAUTH_VERSION = "1.0"
def friendfeed_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/bret/friends"
If the request is a POST, post_args should be provided. Query
string arguments should be given as keyword arguments.
All the FriendFeed methods are documented at
http://friendfeed.com/api/documentation.
Many methods require an OAuth access token which you can obtain
through authorize_redirect() and get_authenticated_user(). The
user returned through that process includes an 'access_token'
attribute that can be used to make authenticated requests via
this method. Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.friendfeed_request(
"/entry",
post_args={"body": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"],
callback=self.async_callback(self._on_post))
def _on_post(self, new_entry):
if not new_entry:
# Call failed; perhaps missing permission?
self.authorize_redirect()
return
self.finish("Posted a message!")
"""
# Add the OAuth resource request signature if we have credentials
url = "http://friendfeed-api.com/v2" + path
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args:
url += "?" + urllib.urlencode(args)
callback = self.async_callback(self._on_friendfeed_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_friendfeed_request(self, callback, response):
if response.error:
logging.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
callback(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("friendfeed_consumer_key", "FriendFeed OAuth")
self.require_setting("friendfeed_consumer_secret", "FriendFeed OAuth")
return dict(
key=self.settings["friendfeed_consumer_key"],
secret=self.settings["friendfeed_consumer_secret"])
def _oauth_get_user(self, access_token, callback):
callback = self.async_callback(self._parse_user_response, callback)
self.friendfeed_request(
"/feedinfo/" + access_token["username"],
include="id,name,description", access_token=access_token,
callback=callback)
def _parse_user_response(self, callback, user):
if user:
user["username"] = user["id"]
callback(user)
class GoogleMixin(OpenIdMixin, OAuthMixin):
"""Google Open ID / OAuth authentication.
No application registration is necessary to use Google for authentication
or to access Google resources on behalf of a user. To authenticate with
Google, redirect with authenticate_redirect(). On return, parse the
response with get_authenticated_user(). We send a dict containing the
values for the user, including 'email', 'name', and 'locale'.
Example usage::
class GoogleHandler(tornado.web.RequestHandler, tornado.auth.GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
# Save the user with, e.g., set_secure_cookie()
"""
_OPENID_ENDPOINT = "https://www.google.com/accounts/o8/ud"
_OAUTH_ACCESS_TOKEN_URL = "https://www.google.com/accounts/OAuthGetAccessToken"
def authorize_redirect(self, oauth_scope, callback_uri=None,
ax_attrs=["name", "email", "language", "username"]):
"""Authenticates and authorizes for the given Google resource.
Some of the available resources are:
* Gmail Contacts - http://www.google.com/m8/feeds/
* Calendar - http://www.google.com/calendar/feeds/
* Finance - http://finance.google.com/finance/feeds/
You can authorize multiple resources by separating the resource
URLs with a space.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs,
oauth_scope=oauth_scope)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib.urlencode(args))
def get_authenticated_user(self, callback):
"""Fetches the authenticated user data upon redirect."""
# Look to see if we are doing combined OpenID/OAuth
oauth_ns = ""
for name, values in self.request.arguments.iteritems():
if name.startswith("openid.ns.") and \
values[-1] == u"http://specs.openid.net/extensions/oauth/1.0":
oauth_ns = name[10:]
break
token = self.get_argument("openid." + oauth_ns + ".request_token", "")
if token:
http = self.get_auth_http_client()
token = dict(key=token, secret="")
http.fetch(self._oauth_access_token_url(token),
self.async_callback(self._on_access_token, callback))
else:
OpenIdMixin.get_authenticated_user(self, callback)
def _oauth_consumer_token(self):
self.require_setting("google_consumer_key", "Google OAuth")
self.require_setting("google_consumer_secret", "Google OAuth")
return dict(
key=self.settings["google_consumer_key"],
secret=self.settings["google_consumer_secret"])
def _oauth_get_user(self, access_token, callback):
OpenIdMixin.get_authenticated_user(self, callback)
class FacebookMixin(object):
"""Facebook Connect authentication.
New applications should consider using `FacebookGraphMixin` below instead
of this class.
To authenticate with Facebook, register your application with
Facebook at http://www.facebook.com/developers/apps.php. Then
copy your API Key and Application Secret to the application settings
'facebook_api_key' and 'facebook_secret'.
When your application is set up, you can use this Mixin like this
to authenticate the user with Facebook::
class FacebookHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("session", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Facebook auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by get_authenticated_user() includes the
attributes 'facebook_uid' and 'name' in addition to session attributes
like 'session_key'. You should save the session key with the user; it is
required to make requests on behalf of the user later with
facebook_request().
"""
def authenticate_redirect(self, callback_uri=None, cancel_uri=None,
extended_permissions=None):
"""Authenticates/installs this app for the current user."""
self.require_setting("facebook_api_key", "Facebook Connect")
callback_uri = callback_uri or self.request.uri
args = {
"api_key": self.settings["facebook_api_key"],
"v": "1.0",
"fbconnect": "true",
"display": "page",
"next": urlparse.urljoin(self.request.full_url(), callback_uri),
"return_session": "true",
}
if cancel_uri:
args["cancel_url"] = urlparse.urljoin(
self.request.full_url(), cancel_uri)
if extended_permissions:
if isinstance(extended_permissions, (unicode, bytes_type)):
extended_permissions = [extended_permissions]
args["req_perms"] = ",".join(extended_permissions)
self.redirect("http://www.facebook.com/login.php?" +
urllib.urlencode(args))
def authorize_redirect(self, extended_permissions, callback_uri=None,
cancel_uri=None):
"""Redirects to an authorization request for the given FB resource.
The available resource names are listed at
http://wiki.developers.facebook.com/index.php/Extended_permission.
The most common resource types include:
* publish_stream
* read_stream
* email
* sms
extended_permissions can be a single permission name or a list of
names. To get the session secret and session key, call
get_authenticated_user() just as you would with
authenticate_redirect().
"""
self.authenticate_redirect(callback_uri, cancel_uri,
extended_permissions)
def get_authenticated_user(self, callback):
"""Fetches the authenticated Facebook user.
The authenticated user includes the special Facebook attributes
'session_key' and 'facebook_uid' in addition to the standard
user attributes like 'name'.
"""
self.require_setting("facebook_api_key", "Facebook Connect")
session = escape.json_decode(self.get_argument("session"))
self.facebook_request(
method="facebook.users.getInfo",
callback=self.async_callback(
self._on_get_user_info, callback, session),
session_key=session["session_key"],
uids=session["uid"],
fields="uid,first_name,last_name,name,locale,pic_square,"
"profile_url,username")
def facebook_request(self, method, callback, **args):
"""Makes a Facebook API REST request.
We automatically include the Facebook API key and signature, but
it is the callers responsibility to include 'session_key' and any
other required arguments to the method.
The available Facebook methods are documented here:
http://wiki.developers.facebook.com/index.php/API
Here is an example for the stream.get() method::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.facebook_request(
method="stream.get",
callback=self.async_callback(self._on_stream),
session_key=self.current_user["session_key"])
def _on_stream(self, stream):
if stream is None:
# Not authorized to read the stream yet?
self.redirect(self.authorize_redirect("read_stream"))
return
self.render("stream.html", stream=stream)
"""
self.require_setting("facebook_api_key", "Facebook Connect")
self.require_setting("facebook_secret", "Facebook Connect")
if not method.startswith("facebook."):
method = "facebook." + method
args["api_key"] = self.settings["facebook_api_key"]
args["v"] = "1.0"
args["method"] = method
args["call_id"] = str(long(time.time() * 1e6))
args["format"] = "json"
args["sig"] = self._signature(args)
url = "http://api.facebook.com/restserver.php?" + \
urllib.urlencode(args)
http = self.get_auth_http_client()
http.fetch(url, callback=self.async_callback(
self._parse_response, callback))
def _on_get_user_info(self, callback, session, users):
if users is None:
callback(None)
return
callback({
"name": users[0]["name"],
"first_name": users[0]["first_name"],
"last_name": users[0]["last_name"],
"uid": users[0]["uid"],
"locale": users[0]["locale"],
"pic_square": users[0]["pic_square"],
"profile_url": users[0]["profile_url"],
"username": users[0].get("username"),
"session_key": session["session_key"],
"session_expires": session.get("expires"),
})
def _parse_response(self, callback, response):
if response.error:
logging.warning("HTTP error from Facebook: %s", response.error)
callback(None)
return
try:
json = escape.json_decode(response.body)
except Exception:
logging.warning("Invalid JSON from Facebook: %r", response.body)
callback(None)
return
if isinstance(json, dict) and json.get("error_code"):
logging.warning("Facebook error: %d: %r", json["error_code"],
json.get("error_msg"))
callback(None)
return
callback(json)
def _signature(self, args):
parts = ["%s=%s" % (n, args[n]) for n in sorted(args.keys())]
body = "".join(parts) + self.settings["facebook_secret"]
if isinstance(body, unicode):
body = body.encode("utf-8")
return hashlib.md5(body).hexdigest()
def get_auth_http_client(self):
"""Returns the AsyncHTTPClient instance to be used for auth requests.
May be overridden by subclasses to use an http client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class FacebookGraphMixin(OAuth2Mixin):
"""Facebook authentication using the new Graph API and OAuth2."""
_OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?"
_OAUTH_AUTHORIZE_URL = "https://graph.facebook.com/oauth/authorize?"
_OAUTH_NO_CALLBACKS = False
def get_authenticated_user(self, redirect_uri, client_id, client_secret,
code, callback, extra_fields=None):
"""Handles the login for the Facebook user, returning a user object.
Example usage::
class FacebookGraphLoginHandler(LoginHandler, tornado.auth.FacebookGraphMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("code", False):
self.get_authenticated_user(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
client_secret=self.settings["facebook_secret"],
code=self.get_argument("code"),
callback=self.async_callback(
self._on_login))
return
self.authorize_redirect(redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
extra_params={"scope": "read_stream,offline_access"})
def _on_login(self, user):
logging.error(user)
self.finish()
"""
http = self.get_auth_http_client()
args = {
"redirect_uri": redirect_uri,
"code": code,
"client_id": client_id,
"client_secret": client_secret,
}
fields = set(['id', 'name', 'first_name', 'last_name',
'locale', 'picture', 'link'])
if extra_fields:
fields.update(extra_fields)
http.fetch(self._oauth_request_token_url(**args),
self.async_callback(self._on_access_token, redirect_uri, client_id,
client_secret, callback, fields))
def _on_access_token(self, redirect_uri, client_id, client_secret,
callback, fields, response):
if response.error:
logging.warning('Facebook auth error: %s' % str(response))
callback(None)
return
args = escape.parse_qs_bytes(escape.native_str(response.body))
session = {
"access_token": args["access_token"][-1],
"expires": args.get("expires")
}
self.facebook_request(
path="/me",
callback=self.async_callback(
self._on_get_user_info, callback, session, fields),
access_token=session["access_token"],
fields=",".join(fields)
)
def _on_get_user_info(self, callback, session, fields, user):
if user is None:
callback(None)
return
fieldmap = {}
for field in fields:
fieldmap[field] = user.get(field)
fieldmap.update({"access_token": session["access_token"], "session_expires": session.get("expires")})
callback(fieldmap)
def facebook_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/btaylor/picture"
If the request is a POST, post_args should be provided. Query
string arguments should be given as keyword arguments.
An introduction to the Facebook Graph API can be found at
http://developers.facebook.com/docs/api
Many methods require an OAuth access token which you can obtain
through authorize_redirect() and get_authenticated_user(). The
user returned through that process includes an 'access_token'
attribute that can be used to make authenticated requests via
this method. Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.facebook_request(
"/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"],
callback=self.async_callback(self._on_post))
def _on_post(self, new_entry):
if not new_entry:
# Call failed; perhaps missing permission?
self.authorize_redirect()
return
self.finish("Posted a message!")
"""
url = "https://graph.facebook.com" + path
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(args)
if all_args:
url += "?" + urllib.urlencode(all_args)
callback = self.async_callback(self._on_facebook_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_facebook_request(self, callback, response):
if response.error:
logging.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
callback(escape.json_decode(response.body))
def get_auth_http_client(self):
"""Returns the AsyncHTTPClient instance to be used for auth requests.
May be overridden by subclasses to use an http client other than
the default.
"""
return httpclient.AsyncHTTPClient()
def _oauth_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth signature for the given request.
See http://oauth.net/core/1.0/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(consumer_token["secret"])]
key_elems.append(escape.utf8(token["secret"] if token else ""))
key = b("&").join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request.
See http://oauth.net/core/1.0a/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(urllib.quote(consumer_token["secret"], safe='~'))]
key_elems.append(escape.utf8(urllib.quote(token["secret"], safe='~') if token else ""))
key = b("&").join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth_escape(val):
if isinstance(val, unicode):
val = val.encode("utf-8")
return urllib.quote(val, safe="~")
def _oauth_parse_response(body):
p = escape.parse_qs(body, keep_blank_values=False)
token = dict(key=p[b("oauth_token")][0], secret=p[b("oauth_token_secret")][0])
# Add the extra parameters the Provider included to the token
special = (b("oauth_token"), b("oauth_token_secret"))
token.update((k, p[k][0]) for k in p if k not in special)
return token
|
the-stack_0_10765 | import demoDay21_recsys_music.hyj.config_hyj as conf
import pandas as pd
import demoDay21_recsys_music.hyj.gen_cf_data_hyj as gen
import tensorflow as tf
import numpy as np
data=gen.user_item_socre(nrows=500)
# 定义label stay_seconds/total_timelen>0.9 -> 1
data['label']=data['score'].apply(lambda x:1 if x>=0.9 else 0)
# 关联用户信息和item信息到data
user_profile=conf.user_profile()
music_meta=conf.music_data()
# data数据结构
# user_id item_id score label gender age salary province item_name total_timelen location tags
# 0 0000066b1be6f28ad5e40d47b8d3e51c 426100349 1.280 1 女 26-35 10000-20000 香港 刘德华 - 回家的路 2015央视春晚 现场版 250 港台 -
# 1 000072fc29132acaf20168c589269e1c 426100349 1.276 1 女 36-45 5000-10000 湖北 刘德华 - 回家的路 2015央视春晚 现场版 250 港台 -
# 2 000074ec4874ab7d99d543d0ce419118 426100349 1.084 1 男 36-45 2000-5000 宁夏 刘德华 - 回家的路 2015央视春晚 现场版 250 港台 -
data=data.merge(user_profile,how='inner',on='user_id').merge(music_meta,how='inner',on='item_id')
''' 定义特征X'''
#用户特征
user_feat=['age','gender','salary','province']
# 物品特征
item_feat=['location','total_timelen']
item_text_feat=['item_name','tags']
# 交叉特征
watch_feat=['stay_seconds','score','hour']
# 离散和连续特征
dispersed_feat=user_feat+['location']
continue_feat=['score']
# 获取Y (label)
labels=data['label']
del data['label']
# 离散特征-one-hot处理
# df数据结构 :
# age_0-18 age_19-25 age_26-35 age_36-45 age_46-100 gender_女 gender_男 salary_0-2000 salary_10000-20000 salary_2000-5000 ... province_香港 province_黑龙江 location_- location_亚洲 location_国内 location_日韩 location_日韩,日本 location_日韩,韩国 location_欧美 location_港台
# 0 0 0 1 0 0 1 0 0 1 0 ... 1 0 0 0 0 0 0 0 0 1
# 1 0 0 0 1 0 1 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 1
# 2 0 0 0 1 0 0 1 0 0 1 ... 0 0 0 0 0 0 0 0 0 1
# 3 0 1 0 0 0 1 0 0 0 1 ... 0 0 0 0 0 0 0 0 0 1
# 4 0 0 0 1 0 0 1 0 1 0 ... 0 0 0 0 0 0 0 0 0 1
# get_dummies one-hot处理: 将每列展开成多列,有值的为1,否则为0(根据每列的所有取值,如用户性别男 gender取值有男女 则展开为gender_女 0 gender_男 1)
df=pd.get_dummies(data[dispersed_feat])
# 离散特征数组
# one-hot数据结构
# Index(['age_0-18', 'age_19-25', 'age_26-35', 'age_36-45', 'age_46-100',
# 'gender_女', 'gender_男', 'salary_0-2000', 'salary_10000-20000',
# 'salary_2000-5000', 'salary_20000-100000', 'salary_5000-10000',
# 'province_上海', 'province_云南', 'province_内蒙古', 'province_北京',
# 'province_台湾', 'province_吉林', 'province_四川', 'province_天津',
# 'province_宁夏', 'province_安徽', 'province_山东', 'province_山西',
# 'province_广东', 'province_广西', 'province_新疆', 'province_江苏',
# 'province_江西', 'province_河北', 'province_河南', 'province_浙江',
# 'province_海南', 'province_湖北', 'province_湖南', 'province_澳门',
# 'province_甘肃', 'province_福建', 'province_西藏', 'province_贵州',
# 'province_辽宁', 'province_重庆', 'province_陕西', 'province_青海',
# 'province_香港', 'province_黑龙江', 'location_-', 'location_亚洲',
# 'location_国内', 'location_日韩', 'location_日韩,日本', 'location_日韩,韩国',
# 'location_欧美', 'location_港台'],
# dtype='object')
one_hot_cols=df.columns
# 连续特征不做one-hot 直接存储
df[continue_feat]=data[continue_feat]
df['label']=labels
pre='F:\\00-data\\nn\\'
df_file=pre+'music_data.csv'
df.to_csv(df_file,index=False)
print('df to csv done')
chunks = pd.read_csv(df_file,iterator = True,chunksize=1000)
chunk = chunks.get_chunk(5)
print(chunk)
print(np.array(chunk))
# 定义超参数
sample_num=len(df)
x_col_num=len(df.columns)-1
y_class_num=2
x=tf.placeholder(dtype=tf.float32,shape=[None,x_col_num],name='X')
y=tf.placeholder(dtype=tf.float32,shape=[None,y_class_num],name='Y')
max_epoches=10000
learning_rate=0.01
batch_size=50
seed=0
n_hidden=30
del df
# 定义权重和偏置变量
w={'h1':tf.Variable(tf.random_normal([x_col_num,n_hidden],seed=seed)),
'res':tf.Variable(tf.random_normal([n_hidden,y_class_num],seed=seed))
}
b={'h1':tf.Variable(tf.random_normal([1,n_hidden],seed=seed)),
'res':tf.Variable(tf.random_normal([1,y_class_num],seed=seed))
}
# 创建模型
def multilayer(x,w,b):
# 隐藏层net值
h_1=tf.add(tf.matmul(x,w['h1']),b['h1'])
# 隐藏层out值
h1_out=tf.sigmoid(h_1)
# 输出层net值
res_net=tf.add(tf.matmul(h1_out,w['res']),b['res'])
return res_net,res_net,h1_out,h_1
# 为正向传播、误差、梯度和更新计算创建计算图
y_hat,y_net,h_out,h_net=multilayer(x,w,b)
'''
# softmax_cross_entropy_with_logits 表示先求softmax,再求交叉熵 等价于
loss2 = (-tf.reduce_sum(y*tf.log(tf.clip_by_value(softmax(y_hat),1e-10,1.0))))
clip_by_value防止y_hat取对数为0
'''
loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=y_hat))
# 梯度优化器
optimizer=tf.train.AdamOptimizer().minimize(loss)
# 变量初始化器
init=tf.global_variables_initializer()
# 读取数据
def data_gen(filename):
f_queue=tf.train.string_input_producer(filename)
reader=tf.TextLineReader(skip_header_lines=1)
_,value=reader.read(f_queue)
record_defaults=[[0.0] for _ in range(x_col_num+1)]
data=tf.decode_csv(value,record_defaults=record_defaults)
print(type(data))
feats=tf.stack(tf.gather_nd(data,indices=[[i] for i in range(x_col_num)]))
label=data[-1]
dad=10*batch_size
cap=20*batch_size
feat_batch,label_batch=tf.train.shuffle_batch([feats,label],batch_size=batch_size,min_after_dequeue=dad,capacity=cap)
return feat_batch,label_batch
def gen_data(feat_batch,label_batch):
with tf.Session() as sess:
coord=tf.train.Coordinator()
threads=tf.train.start_queue_runners(coord=coord)
feats,labels=sess.run([feat_batch,label_batch])
# for _ in range(5):
coord.request_stop()
coord.join(threads)
return feats,labels
auc_bool_arr=tf.equal(tf.argmax(y,1),tf.argmax(y_hat,1))
auc=tf.reduce_mean(tf.cast(auc_bool_arr,tf.float32))
# 保存模型
saver = tf.train.Saver()
def train():
for i in range(5):
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# 每次训练batch_size的样本量
batch_xs, batch_ys = data_gen([df_file])
train_auc(batch_xs, batch_ys)
coord.request_stop()
coord.join(threads)
# try:
# while not coord.should_stop():
#
# except tf.errors.OutOfRangeError:
# print('read done')
# finally:
# pass
# Wait for threads to finish.
def train_auc(batch_xs, batch_ys):
print(batch_xs.shape)
batch_xs_val, batch_ys_val = sess.run([batch_xs, batch_ys])
print(batch_xs_val[-1], batch_ys_val[-1])
batch_ys2 = []
for j in batch_ys_val:
if j < 1.0:
batch_ys2.append([1., 0.])
else:
batch_ys2.append([0., 1.])
y_hat_out, _, loss_val, auc_val = sess.run([y_hat, optimizer, loss, auc],
feed_dict={x: batch_xs_val, y: batch_ys2})
print('loss %s,auc %s' % (loss_val, auc_val))
with tf.Session() as sess:
sess.run(init)
# writer=tf.summary.FileWriter('graphs',graph=sess.graph)
for epoch in range(20):
loss_avg=0.
loss_avg2=0.
# 总的批次数
num_of_batch=int(sample_num/batch_size)
print('epoch %s'%epoch)
train()
saver.save(sess,save_path=pre+'nn_softmax_model.ckpt')
print('main done')
|
the-stack_0_10766 | # -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404, redirect
from django.template import loader
from django.http import HttpResponse
from django import template
from .forms import *
from .models import *
from django.core.paginator import Paginator
from django.shortcuts import render
from django.contrib import messages
import os
from django.shortcuts import render, redirect, get_object_or_404
# from .models import Document
# from .forms import DocumentForm
from .helper_functions import similarity
from datetime import datetime
@login_required(login_url="/login/")
def index(request):
context = {}
context['segment'] = 'index'
html_template = loader.get_template('index.html')
return HttpResponse(html_template.render(context, request))
def file_uploader_view(request):
msg = None
if request.method == "POST":
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
form.save()
msg = 'User created.'
return redirect("/documents/repository/")
else:
msg = 'Form is not valid'
else:
form = UploadFileForm()
return render(request, "uploader.html", {"form": form, "msg": msg})
def checker_view(request):
msg = None
if request.method == "POST":
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
instance = form.save()
msg = 'User created.'
uploaded_file_name = instance.file.name
print(f'name of file = {uploaded_file_name}')
percentages = []
directory = os.listdir("core/media/documents/")
if len(directory) != 1:
print(f'dirtype = {type(directory)}')
for file in directory:
filename = os.fsdecode(file)
print(f'name of FILE4 = {file}')
if filename.endswith(".docx") and uploaded_file_name != 'documents/' + filename:
print(f'name of file2 = {filename}')
percentage = similarity(uploaded_file_name, filename)
percentages.append(percentage)
actual_percentage = round(float(sum(percentages) / len(percentages) - 1))
print(f'Actual %age = {actual_percentage}')
return render(request, 'result.html', {'result': actual_percentage})
else:
form = UploadFileForm()
messages.error(request, "Sorry! No file to check against.")
return render(request, "checker.html", {"form": form, "msg": msg})
else:
msg = 'Form is not valid'
else:
form = UploadFileForm()
return render(request, "checker.html", {"form": form, "msg": msg})
def uploads_view(request):
uploads = Uploads.objects.all()
paginator = Paginator(uploads, 5)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request, "uploads.html", {'page_obj': page_obj})
@login_required(login_url="/login/")
def pages(request):
context = {}
# All resource paths end in .html.
# Pick out the html file name from the url. And load that template.
try:
load_template = request.path.split('/')[-1]
context['segment'] = load_template
html_template = loader.get_template(load_template)
return HttpResponse(html_template.render(context, request))
except template.TemplateDoesNotExist:
html_template = loader.get_template('page-404.html')
return HttpResponse(html_template.render(context, request))
except:
html_template = loader.get_template('page-500.html')
return HttpResponse(html_template.render(context, request))
|
the-stack_0_10767 | import sys
import pytest
import textwrap
import subprocess
import numpy as np
import numpy.core._multiarray_tests as _multiarray_tests
from numpy import array, arange, nditer, all
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises,
HAS_REFCOUNT, suppress_warnings
)
def iter_multi_index(i):
ret = []
while not i.finished:
ret.append(i.multi_index)
i.iternext()
return ret
def iter_indices(i):
ret = []
while not i.finished:
ret.append(i.index)
i.iternext()
return ret
def iter_iterindices(i):
ret = []
while not i.finished:
ret.append(i.iterindex)
i.iternext()
return ret
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_iter_refcount():
# Make sure the iterator doesn't leak
# Basic
a = arange(6)
dt = np.dtype('f4').newbyteorder()
rc_a = sys.getrefcount(a)
rc_dt = sys.getrefcount(dt)
with nditer(a, [],
[['readwrite', 'updateifcopy']],
casting='unsafe',
op_dtypes=[dt]) as it:
assert_(not it.iterationneedsapi)
assert_(sys.getrefcount(a) > rc_a)
assert_(sys.getrefcount(dt) > rc_dt)
# del 'it'
it = None
assert_equal(sys.getrefcount(a), rc_a)
assert_equal(sys.getrefcount(dt), rc_dt)
# With a copy
a = arange(6, dtype='f4')
dt = np.dtype('f4')
rc_a = sys.getrefcount(a)
rc_dt = sys.getrefcount(dt)
it = nditer(a, [],
[['readwrite']],
op_dtypes=[dt])
rc2_a = sys.getrefcount(a)
rc2_dt = sys.getrefcount(dt)
it2 = it.copy()
assert_(sys.getrefcount(a) > rc2_a)
assert_(sys.getrefcount(dt) > rc2_dt)
it = None
assert_equal(sys.getrefcount(a), rc2_a)
assert_equal(sys.getrefcount(dt), rc2_dt)
it2 = None
assert_equal(sys.getrefcount(a), rc_a)
assert_equal(sys.getrefcount(dt), rc_dt)
del it2 # avoid pyflakes unused variable warning
def test_iter_best_order():
# The iterator should always find the iteration order
# with increasing memory addresses
# Test the ordering for 1-D to 5-D shapes
for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
a = arange(np.prod(shape))
# Test each combination of positive and negative strides
for dirs in range(2**len(shape)):
dirs_index = [slice(None)]*len(shape)
for bit in range(len(shape)):
if ((2**bit) & dirs):
dirs_index[bit] = slice(None, None, -1)
dirs_index = tuple(dirs_index)
aview = a.reshape(shape)[dirs_index]
# C-order
i = nditer(aview, [], [['readonly']])
assert_equal([x for x in i], a)
# Fortran-order
i = nditer(aview.T, [], [['readonly']])
assert_equal([x for x in i], a)
# Other order
if len(shape) > 2:
i = nditer(aview.swapaxes(0, 1), [], [['readonly']])
assert_equal([x for x in i], a)
def test_iter_c_order():
# Test forcing C order
# Test the ordering for 1-D to 5-D shapes
for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
a = arange(np.prod(shape))
# Test each combination of positive and negative strides
for dirs in range(2**len(shape)):
dirs_index = [slice(None)]*len(shape)
for bit in range(len(shape)):
if ((2**bit) & dirs):
dirs_index[bit] = slice(None, None, -1)
dirs_index = tuple(dirs_index)
aview = a.reshape(shape)[dirs_index]
# C-order
i = nditer(aview, order='C')
assert_equal([x for x in i], aview.ravel(order='C'))
# Fortran-order
i = nditer(aview.T, order='C')
assert_equal([x for x in i], aview.T.ravel(order='C'))
# Other order
if len(shape) > 2:
i = nditer(aview.swapaxes(0, 1), order='C')
assert_equal([x for x in i],
aview.swapaxes(0, 1).ravel(order='C'))
def test_iter_f_order():
# Test forcing F order
# Test the ordering for 1-D to 5-D shapes
for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
a = arange(np.prod(shape))
# Test each combination of positive and negative strides
for dirs in range(2**len(shape)):
dirs_index = [slice(None)]*len(shape)
for bit in range(len(shape)):
if ((2**bit) & dirs):
dirs_index[bit] = slice(None, None, -1)
dirs_index = tuple(dirs_index)
aview = a.reshape(shape)[dirs_index]
# C-order
i = nditer(aview, order='F')
assert_equal([x for x in i], aview.ravel(order='F'))
# Fortran-order
i = nditer(aview.T, order='F')
assert_equal([x for x in i], aview.T.ravel(order='F'))
# Other order
if len(shape) > 2:
i = nditer(aview.swapaxes(0, 1), order='F')
assert_equal([x for x in i],
aview.swapaxes(0, 1).ravel(order='F'))
def test_iter_c_or_f_order():
# Test forcing any contiguous (C or F) order
# Test the ordering for 1-D to 5-D shapes
for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
a = arange(np.prod(shape))
# Test each combination of positive and negative strides
for dirs in range(2**len(shape)):
dirs_index = [slice(None)]*len(shape)
for bit in range(len(shape)):
if ((2**bit) & dirs):
dirs_index[bit] = slice(None, None, -1)
dirs_index = tuple(dirs_index)
aview = a.reshape(shape)[dirs_index]
# C-order
i = nditer(aview, order='A')
assert_equal([x for x in i], aview.ravel(order='A'))
# Fortran-order
i = nditer(aview.T, order='A')
assert_equal([x for x in i], aview.T.ravel(order='A'))
# Other order
if len(shape) > 2:
i = nditer(aview.swapaxes(0, 1), order='A')
assert_equal([x for x in i],
aview.swapaxes(0, 1).ravel(order='A'))
def test_iter_best_order_multi_index_1d():
# The multi-indices should be correct with any reordering
a = arange(4)
# 1D order
i = nditer(a, ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)])
# 1D reversed order
i = nditer(a[::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)])
def test_iter_best_order_multi_index_2d():
# The multi-indices should be correct with any reordering
a = arange(6)
# 2D C-order
i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)])
# 2D Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)])
# 2D reversed C-order
i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)])
i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)])
i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)])
# 2D reversed Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)])
i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)])
i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)])
def test_iter_best_order_multi_index_3d():
# The multi-indices should be correct with any reordering
a = arange(12)
# 3D C-order
i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)])
# 3D Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0),
(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)])
# 3D reversed C-order
i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1),
(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)])
i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1),
(1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)])
i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0),
(1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)])
# 3D reversed Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0),
(1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0),
(0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1),
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)])
def test_iter_best_order_c_index_1d():
# The C index should be correct with any reordering
a = arange(4)
# 1D order
i = nditer(a, ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 1, 2, 3])
# 1D reversed order
i = nditer(a[::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [3, 2, 1, 0])
def test_iter_best_order_c_index_2d():
# The C index should be correct with any reordering
a = arange(6)
# 2D C-order
i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5])
# 2D Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F'),
['c_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5])
# 2D reversed C-order
i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2])
i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3])
i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0])
# 2D reversed Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F')[::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2])
i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3])
i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0])
def test_iter_best_order_c_index_3d():
# The C index should be correct with any reordering
a = arange(12)
# 3D C-order
i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
# 3D Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F'),
['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11])
# 3D reversed C-order
i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5])
i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7])
i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10])
# 3D reversed Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10])
def test_iter_best_order_f_index_1d():
# The Fortran index should be correct with any reordering
a = arange(4)
# 1D order
i = nditer(a, ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 1, 2, 3])
# 1D reversed order
i = nditer(a[::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [3, 2, 1, 0])
def test_iter_best_order_f_index_2d():
# The Fortran index should be correct with any reordering
a = arange(6)
# 2D C-order
i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5])
# 2D Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F'),
['f_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5])
# 2D reversed C-order
i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4])
i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1])
i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0])
# 2D reversed Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F')[::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4])
i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1])
i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0])
def test_iter_best_order_f_index_3d():
# The Fortran index should be correct with any reordering
a = arange(12)
# 3D C-order
i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11])
# 3D Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F'),
['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
# 3D reversed C-order
i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10])
i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7])
i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5])
# 3D reversed Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5])
def test_iter_no_inner_full_coalesce():
# Check no_inner iterators which coalesce into a single inner loop
for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
size = np.prod(shape)
a = arange(size)
# Test each combination of forward and backwards indexing
for dirs in range(2**len(shape)):
dirs_index = [slice(None)]*len(shape)
for bit in range(len(shape)):
if ((2**bit) & dirs):
dirs_index[bit] = slice(None, None, -1)
dirs_index = tuple(dirs_index)
aview = a.reshape(shape)[dirs_index]
# C-order
i = nditer(aview, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (size,))
# Fortran-order
i = nditer(aview.T, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (size,))
# Other order
if len(shape) > 2:
i = nditer(aview.swapaxes(0, 1),
['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (size,))
def test_iter_no_inner_dim_coalescing():
# Check no_inner iterators whose dimensions may not coalesce completely
# Skipping the last element in a dimension prevents coalescing
# with the next-bigger dimension
a = arange(24).reshape(2, 3, 4)[:,:, :-1]
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 2)
assert_equal(i[0].shape, (3,))
a = arange(24).reshape(2, 3, 4)[:, :-1,:]
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 2)
assert_equal(i[0].shape, (8,))
a = arange(24).reshape(2, 3, 4)[:-1,:,:]
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (12,))
# Even with lots of 1-sized dimensions, should still coalesce
a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1)
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (24,))
def test_iter_dim_coalescing():
# Check that the correct number of dimensions are coalesced
# Tracking a multi-index disables coalescing
a = arange(24).reshape(2, 3, 4)
i = nditer(a, ['multi_index'], [['readonly']])
assert_equal(i.ndim, 3)
# A tracked index can allow coalescing if it's compatible with the array
a3d = arange(24).reshape(2, 3, 4)
i = nditer(a3d, ['c_index'], [['readonly']])
assert_equal(i.ndim, 1)
i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']])
assert_equal(i.ndim, 3)
i = nditer(a3d.T, ['c_index'], [['readonly']])
assert_equal(i.ndim, 3)
i = nditer(a3d.T, ['f_index'], [['readonly']])
assert_equal(i.ndim, 1)
i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']])
assert_equal(i.ndim, 3)
# When C or F order is forced, coalescing may still occur
a3d = arange(24).reshape(2, 3, 4)
i = nditer(a3d, order='C')
assert_equal(i.ndim, 1)
i = nditer(a3d.T, order='C')
assert_equal(i.ndim, 3)
i = nditer(a3d, order='F')
assert_equal(i.ndim, 3)
i = nditer(a3d.T, order='F')
assert_equal(i.ndim, 1)
i = nditer(a3d, order='A')
assert_equal(i.ndim, 1)
i = nditer(a3d.T, order='A')
assert_equal(i.ndim, 1)
def test_iter_broadcasting():
# Standard NumPy broadcasting rules
# 1D with scalar
i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 6)
assert_equal(i.shape, (6,))
# 2D with scalar
i = nditer([arange(6).reshape(2, 3), np.int32(2)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 6)
assert_equal(i.shape, (2, 3))
# 2D with 1D
i = nditer([arange(6).reshape(2, 3), arange(3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 6)
assert_equal(i.shape, (2, 3))
i = nditer([arange(2).reshape(2, 1), arange(3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 6)
assert_equal(i.shape, (2, 3))
# 2D with 2D
i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 6)
assert_equal(i.shape, (2, 3))
# 3D with scalar
i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
# 3D with 1D
i = nditer([arange(3), arange(24).reshape(4, 2, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
i = nditer([arange(3), arange(8).reshape(4, 2, 1)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
# 3D with 2D
i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
# 3D with 3D
i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3),
arange(4).reshape(4, 1, 1)],
['multi_index'], [['readonly']]*3)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
def test_iter_itershape():
# Check that allocated outputs work with a specified shape
a = np.arange(6, dtype='i2').reshape(2, 3)
i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
op_axes=[[0, 1, None], None],
itershape=(-1, -1, 4))
assert_equal(i.operands[1].shape, (2, 3, 4))
assert_equal(i.operands[1].strides, (24, 8, 2))
i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']],
op_axes=[[0, 1, None], None],
itershape=(-1, -1, 4))
assert_equal(i.operands[1].shape, (3, 2, 4))
assert_equal(i.operands[1].strides, (8, 24, 2))
i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']],
order='F',
op_axes=[[0, 1, None], None],
itershape=(-1, -1, 4))
assert_equal(i.operands[1].shape, (3, 2, 4))
assert_equal(i.operands[1].strides, (2, 6, 12))
# If we specify 1 in the itershape, it shouldn't allow broadcasting
# of that dimension to a bigger value
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['writeonly', 'allocate']],
op_axes=[[0, 1, None], None],
itershape=(-1, 1, 4))
# Test bug that for no op_axes but itershape, they are NULLed correctly
i = np.nditer([np.ones(2), None, None], itershape=(2,))
def test_iter_broadcasting_errors():
# Check that errors are thrown for bad broadcasting shapes
# 1D with 1D
assert_raises(ValueError, nditer, [arange(2), arange(3)],
[], [['readonly']]*2)
# 2D with 1D
assert_raises(ValueError, nditer,
[arange(6).reshape(2, 3), arange(2)],
[], [['readonly']]*2)
# 2D with 2D
assert_raises(ValueError, nditer,
[arange(6).reshape(2, 3), arange(9).reshape(3, 3)],
[], [['readonly']]*2)
assert_raises(ValueError, nditer,
[arange(6).reshape(2, 3), arange(4).reshape(2, 2)],
[], [['readonly']]*2)
# 3D with 3D
assert_raises(ValueError, nditer,
[arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)],
[], [['readonly']]*2)
assert_raises(ValueError, nditer,
[arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)],
[], [['readonly']]*2)
# Verify that the error message mentions the right shapes
try:
nditer([arange(2).reshape(1, 2, 1),
arange(3).reshape(1, 3),
arange(6).reshape(2, 3)],
[],
[['readonly'], ['readonly'], ['writeonly', 'no_broadcast']])
raise AssertionError('Should have raised a broadcast error')
except ValueError as e:
msg = str(e)
# The message should contain the shape of the 3rd operand
assert_(msg.find('(2,3)') >= 0,
'Message "%s" doesn\'t contain operand shape (2,3)' % msg)
# The message should contain the broadcast shape
assert_(msg.find('(1,2,3)') >= 0,
'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg)
try:
nditer([arange(6).reshape(2, 3), arange(2)],
[],
[['readonly'], ['readonly']],
op_axes=[[0, 1], [0, np.newaxis]],
itershape=(4, 3))
raise AssertionError('Should have raised a broadcast error')
except ValueError as e:
msg = str(e)
# The message should contain "shape->remappedshape" for each operand
assert_(msg.find('(2,3)->(2,3)') >= 0,
'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg)
assert_(msg.find('(2,)->(2,newaxis)') >= 0,
('Message "%s" doesn\'t contain remapped operand shape' +
'(2,)->(2,newaxis)') % msg)
# The message should contain the itershape parameter
assert_(msg.find('(4,3)') >= 0,
'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg)
try:
nditer([np.zeros((2, 1, 1)), np.zeros((2,))],
[],
[['writeonly', 'no_broadcast'], ['readonly']])
raise AssertionError('Should have raised a broadcast error')
except ValueError as e:
msg = str(e)
# The message should contain the shape of the bad operand
assert_(msg.find('(2,1,1)') >= 0,
'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg)
# The message should contain the broadcast shape
assert_(msg.find('(2,1,2)') >= 0,
'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg)
def test_iter_flags_errors():
# Check that bad combinations of flags produce errors
a = arange(6)
# Not enough operands
assert_raises(ValueError, nditer, [], [], [])
# Too many operands
assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100)
# Bad global flag
assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']])
# Bad op flag
assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']])
# Bad order parameter
assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G')
# Bad casting parameter
assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon')
# op_flags must match ops
assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2)
# Cannot track both a C and an F index
assert_raises(ValueError, nditer, a,
['c_index', 'f_index'], [['readonly']])
# Inner iteration and multi-indices/indices are incompatible
assert_raises(ValueError, nditer, a,
['external_loop', 'multi_index'], [['readonly']])
assert_raises(ValueError, nditer, a,
['external_loop', 'c_index'], [['readonly']])
assert_raises(ValueError, nditer, a,
['external_loop', 'f_index'], [['readonly']])
# Must specify exactly one of readwrite/readonly/writeonly per operand
assert_raises(ValueError, nditer, a, [], [[]])
assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']])
assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']])
assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']])
assert_raises(ValueError, nditer, a,
[], [['readonly', 'writeonly', 'readwrite']])
# Python scalars are always readonly
assert_raises(TypeError, nditer, 1.5, [], [['writeonly']])
assert_raises(TypeError, nditer, 1.5, [], [['readwrite']])
# Array scalars are always readonly
assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']])
assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']])
# Check readonly array
a.flags.writeable = False
assert_raises(ValueError, nditer, a, [], [['writeonly']])
assert_raises(ValueError, nditer, a, [], [['readwrite']])
a.flags.writeable = True
# Multi-indices available only with the multi_index flag
i = nditer(arange(6), [], [['readonly']])
assert_raises(ValueError, lambda i:i.multi_index, i)
# Index available only with an index flag
assert_raises(ValueError, lambda i:i.index, i)
# GotoCoords and GotoIndex incompatible with buffering or no_inner
def assign_multi_index(i):
i.multi_index = (0,)
def assign_index(i):
i.index = 0
def assign_iterindex(i):
i.iterindex = 0
def assign_iterrange(i):
i.iterrange = (0, 1)
i = nditer(arange(6), ['external_loop'])
assert_raises(ValueError, assign_multi_index, i)
assert_raises(ValueError, assign_index, i)
assert_raises(ValueError, assign_iterindex, i)
assert_raises(ValueError, assign_iterrange, i)
i = nditer(arange(6), ['buffered'])
assert_raises(ValueError, assign_multi_index, i)
assert_raises(ValueError, assign_index, i)
assert_raises(ValueError, assign_iterrange, i)
# Can't iterate if size is zero
assert_raises(ValueError, nditer, np.array([]))
def test_iter_slice():
a, b, c = np.arange(3), np.arange(3), np.arange(3.)
i = nditer([a, b, c], [], ['readwrite'])
with i:
i[0:2] = (3, 3)
assert_equal(a, [3, 1, 2])
assert_equal(b, [3, 1, 2])
assert_equal(c, [0, 1, 2])
i[1] = 12
assert_equal(i[0:2], [3, 12])
def test_iter_assign_mapping():
a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
casting='same_kind', op_dtypes=[np.dtype('f4')])
with it:
it.operands[0][...] = 3
it.operands[0][...] = 14
assert_equal(a, 14)
it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
casting='same_kind', op_dtypes=[np.dtype('f4')])
with it:
x = it.operands[0][-1:1]
x[...] = 14
it.operands[0][...] = -1234
assert_equal(a, -1234)
# check for no warnings on dealloc
x = None
it = None
def test_iter_nbo_align_contig():
# Check that byte order, alignment, and contig changes work
# Byte order change by requesting a specific dtype
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
assert_(a.dtype.byteorder != au.dtype.byteorder)
i = nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv',
op_dtypes=[np.dtype('f4')])
with i:
# context manager triggers UPDATEIFCOPY on i at exit
assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
assert_equal(i.operands[0], a)
i.operands[0][:] = 2
assert_equal(au, [2]*6)
del i # should not raise a warning
# Byte order change by requesting NBO
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
assert_(a.dtype.byteorder != au.dtype.byteorder)
with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']],
casting='equiv') as i:
# context manager triggers UPDATEIFCOPY on i at exit
assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
assert_equal(i.operands[0], a)
i.operands[0][:] = 12345
i.operands[0][:] = 2
assert_equal(au, [2]*6)
# Unaligned input
a = np.zeros((6*4+1,), dtype='i1')[1:]
a.dtype = 'f4'
a[:] = np.arange(6, dtype='f4')
assert_(not a.flags.aligned)
# Without 'aligned', shouldn't copy
i = nditer(a, [], [['readonly']])
assert_(not i.operands[0].flags.aligned)
assert_equal(i.operands[0], a)
# With 'aligned', should make a copy
with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i:
assert_(i.operands[0].flags.aligned)
# context manager triggers UPDATEIFCOPY on i at exit
assert_equal(i.operands[0], a)
i.operands[0][:] = 3
assert_equal(a, [3]*6)
# Discontiguous input
a = arange(12)
# If it is contiguous, shouldn't copy
i = nditer(a[:6], [], [['readonly']])
assert_(i.operands[0].flags.contiguous)
assert_equal(i.operands[0], a[:6])
# If it isn't contiguous, should buffer
i = nditer(a[::2], ['buffered', 'external_loop'],
[['readonly', 'contig']],
buffersize=10)
assert_(i[0].flags.contiguous)
assert_equal(i[0], a[::2])
def test_iter_array_cast():
# Check that arrays are cast as requested
# No cast 'f4' -> 'f4'
a = np.arange(6, dtype='f4').reshape(2, 3)
i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')])
with i:
assert_equal(i.operands[0], a)
assert_equal(i.operands[0].dtype, np.dtype('f4'))
# Byte-order cast '<f4' -> '>f4'
a = np.arange(6, dtype='<f4').reshape(2, 3)
with nditer(a, [], [['readwrite', 'updateifcopy']],
casting='equiv',
op_dtypes=[np.dtype('>f4')]) as i:
assert_equal(i.operands[0], a)
assert_equal(i.operands[0].dtype, np.dtype('>f4'))
# Safe case 'f4' -> 'f8'
a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2)
i = nditer(a, [], [['readonly', 'copy']],
casting='safe',
op_dtypes=[np.dtype('f8')])
assert_equal(i.operands[0], a)
assert_equal(i.operands[0].dtype, np.dtype('f8'))
# The memory layout of the temporary should match a (a is (48,4,16))
# except negative strides get flipped to positive strides.
assert_equal(i.operands[0].strides, (96, 8, 32))
a = a[::-1,:, ::-1]
i = nditer(a, [], [['readonly', 'copy']],
casting='safe',
op_dtypes=[np.dtype('f8')])
assert_equal(i.operands[0], a)
assert_equal(i.operands[0].dtype, np.dtype('f8'))
assert_equal(i.operands[0].strides, (96, 8, 32))
# Same-kind cast 'f8' -> 'f4' -> 'f8'
a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
with nditer(a, [],
[['readwrite', 'updateifcopy']],
casting='same_kind',
op_dtypes=[np.dtype('f4')]) as i:
assert_equal(i.operands[0], a)
assert_equal(i.operands[0].dtype, np.dtype('f4'))
assert_equal(i.operands[0].strides, (4, 16, 48))
# Check that WRITEBACKIFCOPY is activated at exit
i.operands[0][2, 1, 1] = -12.5
assert_(a[2, 1, 1] != -12.5)
assert_equal(a[2, 1, 1], -12.5)
a = np.arange(6, dtype='i4')[::-2]
with nditer(a, [],
[['writeonly', 'updateifcopy']],
casting='unsafe',
op_dtypes=[np.dtype('f4')]) as i:
assert_equal(i.operands[0].dtype, np.dtype('f4'))
# Even though the stride was negative in 'a', it
# becomes positive in the temporary
assert_equal(i.operands[0].strides, (4,))
i.operands[0][:] = [1, 2, 3]
assert_equal(a, [1, 2, 3])
def test_iter_array_cast_errors():
# Check that invalid casts are caught
# Need to enable copying for casts to occur
assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
[['readonly']], op_dtypes=[np.dtype('f8')])
# Also need to allow casting for casts to occur
assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
[['readonly', 'copy']], casting='no',
op_dtypes=[np.dtype('f8')])
assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
[['readonly', 'copy']], casting='equiv',
op_dtypes=[np.dtype('f8')])
assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
[['writeonly', 'updateifcopy']],
casting='no',
op_dtypes=[np.dtype('f4')])
assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
[['writeonly', 'updateifcopy']],
casting='equiv',
op_dtypes=[np.dtype('f4')])
# '<f4' -> '>f4' should not work with casting='no'
assert_raises(TypeError, nditer, arange(2, dtype='<f4'), [],
[['readonly', 'copy']], casting='no',
op_dtypes=[np.dtype('>f4')])
# 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't
assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
[['readwrite', 'updateifcopy']],
casting='safe',
op_dtypes=[np.dtype('f8')])
assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
[['readwrite', 'updateifcopy']],
casting='safe',
op_dtypes=[np.dtype('f4')])
# 'f4' -> 'i4' is neither a safe nor a same-kind cast
assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
[['readonly', 'copy']],
casting='same_kind',
op_dtypes=[np.dtype('i4')])
assert_raises(TypeError, nditer, arange(2, dtype='i4'), [],
[['writeonly', 'updateifcopy']],
casting='same_kind',
op_dtypes=[np.dtype('f4')])
def test_iter_scalar_cast():
# Check that scalars are cast as requested
# No cast 'f4' -> 'f4'
i = nditer(np.float32(2.5), [], [['readonly']],
op_dtypes=[np.dtype('f4')])
assert_equal(i.dtypes[0], np.dtype('f4'))
assert_equal(i.value.dtype, np.dtype('f4'))
assert_equal(i.value, 2.5)
# Safe cast 'f4' -> 'f8'
i = nditer(np.float32(2.5), [],
[['readonly', 'copy']],
casting='safe',
op_dtypes=[np.dtype('f8')])
assert_equal(i.dtypes[0], np.dtype('f8'))
assert_equal(i.value.dtype, np.dtype('f8'))
assert_equal(i.value, 2.5)
# Same-kind cast 'f8' -> 'f4'
i = nditer(np.float64(2.5), [],
[['readonly', 'copy']],
casting='same_kind',
op_dtypes=[np.dtype('f4')])
assert_equal(i.dtypes[0], np.dtype('f4'))
assert_equal(i.value.dtype, np.dtype('f4'))
assert_equal(i.value, 2.5)
# Unsafe cast 'f8' -> 'i4'
i = nditer(np.float64(3.0), [],
[['readonly', 'copy']],
casting='unsafe',
op_dtypes=[np.dtype('i4')])
assert_equal(i.dtypes[0], np.dtype('i4'))
assert_equal(i.value.dtype, np.dtype('i4'))
assert_equal(i.value, 3)
# Readonly scalars may be cast even without setting COPY or BUFFERED
i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')])
assert_equal(i[0].dtype, np.dtype('f8'))
assert_equal(i[0], 3.)
def test_iter_scalar_cast_errors():
# Check that invalid casts are caught
# Need to allow copying/buffering for write casts of scalars to occur
assert_raises(TypeError, nditer, np.float32(2), [],
[['readwrite']], op_dtypes=[np.dtype('f8')])
assert_raises(TypeError, nditer, 2.5, [],
[['readwrite']], op_dtypes=[np.dtype('f4')])
# 'f8' -> 'f4' isn't a safe cast if the value would overflow
assert_raises(TypeError, nditer, np.float64(1e60), [],
[['readonly']],
casting='safe',
op_dtypes=[np.dtype('f4')])
# 'f4' -> 'i4' is neither a safe nor a same-kind cast
assert_raises(TypeError, nditer, np.float32(2), [],
[['readonly']],
casting='same_kind',
op_dtypes=[np.dtype('i4')])
def test_iter_object_arrays_basic():
# Check that object arrays work
obj = {'a':3,'b':'d'}
a = np.array([[1, 2, 3], None, obj, None], dtype='O')
if HAS_REFCOUNT:
rc = sys.getrefcount(obj)
# Need to allow references for object arrays
assert_raises(TypeError, nditer, a)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(obj), rc)
i = nditer(a, ['refs_ok'], ['readonly'])
vals = [x_[()] for x_ in i]
assert_equal(np.array(vals, dtype='O'), a)
vals, i, x = [None]*3
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(obj), rc)
i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
['readonly'], order='C')
assert_(i.iterationneedsapi)
vals = [x_[()] for x_ in i]
assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F'))
vals, i, x = [None]*3
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(obj), rc)
i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
['readwrite'], order='C')
with i:
for x in i:
x[...] = None
vals, i, x = [None]*3
if HAS_REFCOUNT:
assert_(sys.getrefcount(obj) == rc-1)
assert_equal(a, np.array([None]*4, dtype='O'))
def test_iter_object_arrays_conversions():
# Conversions to/from objects
a = np.arange(6, dtype='O')
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='i4')
with i:
for x in i:
x[...] += 1
assert_equal(a, np.arange(6)+1)
a = np.arange(6, dtype='i4')
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='O')
with i:
for x in i:
x[...] += 1
assert_equal(a, np.arange(6)+1)
# Non-contiguous object array
a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')])
a = a['a']
a[:] = np.arange(6)
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='i4')
with i:
for x in i:
x[...] += 1
assert_equal(a, np.arange(6)+1)
#Non-contiguous value array
a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')])
a = a['a']
a[:] = np.arange(6) + 98172488
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='O')
with i:
ob = i[0][()]
if HAS_REFCOUNT:
rc = sys.getrefcount(ob)
for x in i:
x[...] += 1
if HAS_REFCOUNT:
assert_(sys.getrefcount(ob) == rc-1)
assert_equal(a, np.arange(6)+98172489)
def test_iter_common_dtype():
# Check that the iterator finds a common data type correctly
i = nditer([array([3], dtype='f4'), array([0], dtype='f8')],
['common_dtype'],
[['readonly', 'copy']]*2,
casting='safe')
assert_equal(i.dtypes[0], np.dtype('f8'))
assert_equal(i.dtypes[1], np.dtype('f8'))
i = nditer([array([3], dtype='i4'), array([0], dtype='f4')],
['common_dtype'],
[['readonly', 'copy']]*2,
casting='safe')
assert_equal(i.dtypes[0], np.dtype('f8'))
assert_equal(i.dtypes[1], np.dtype('f8'))
i = nditer([array([3], dtype='f4'), array(0, dtype='f8')],
['common_dtype'],
[['readonly', 'copy']]*2,
casting='same_kind')
assert_equal(i.dtypes[0], np.dtype('f4'))
assert_equal(i.dtypes[1], np.dtype('f4'))
i = nditer([array([3], dtype='u4'), array(0, dtype='i4')],
['common_dtype'],
[['readonly', 'copy']]*2,
casting='safe')
assert_equal(i.dtypes[0], np.dtype('u4'))
assert_equal(i.dtypes[1], np.dtype('u4'))
i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')],
['common_dtype'],
[['readonly', 'copy']]*2,
casting='safe')
assert_equal(i.dtypes[0], np.dtype('i8'))
assert_equal(i.dtypes[1], np.dtype('i8'))
i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'),
array([2j], dtype='c8'), array([9], dtype='f8')],
['common_dtype'],
[['readonly', 'copy']]*4,
casting='safe')
assert_equal(i.dtypes[0], np.dtype('c16'))
assert_equal(i.dtypes[1], np.dtype('c16'))
assert_equal(i.dtypes[2], np.dtype('c16'))
assert_equal(i.dtypes[3], np.dtype('c16'))
assert_equal(i.value, (3, -12, 2j, 9))
# When allocating outputs, other outputs aren't factored in
i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [],
[['readonly', 'copy'],
['writeonly', 'allocate'],
['writeonly']],
casting='safe')
assert_equal(i.dtypes[0], np.dtype('i4'))
assert_equal(i.dtypes[1], np.dtype('i4'))
assert_equal(i.dtypes[2], np.dtype('c16'))
# But, if common data types are requested, they are
i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')],
['common_dtype'],
[['readonly', 'copy'],
['writeonly', 'allocate'],
['writeonly']],
casting='safe')
assert_equal(i.dtypes[0], np.dtype('c16'))
assert_equal(i.dtypes[1], np.dtype('c16'))
assert_equal(i.dtypes[2], np.dtype('c16'))
def test_iter_copy_if_overlap():
# Ensure the iterator makes copies on read/write overlap, if requested
# Copy not needed, 1 op
for flag in ['readonly', 'writeonly', 'readwrite']:
a = arange(10)
i = nditer([a], ['copy_if_overlap'], [[flag]])
with i:
assert_(i.operands[0] is a)
# Copy needed, 2 ops, read-write overlap
x = arange(10)
a = x[1:]
b = x[:-1]
with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
assert_(not np.shares_memory(*i.operands))
# Copy not needed with elementwise, 2 ops, exactly same arrays
x = arange(10)
a = x
b = x
i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'],
['readwrite', 'overlap_assume_elementwise']])
with i:
assert_(i.operands[0] is a and i.operands[1] is b)
with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b))
# Copy not needed, 2 ops, no overlap
x = arange(10)
a = x[::2]
b = x[1::2]
i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']])
assert_(i.operands[0] is a and i.operands[1] is b)
# Copy needed, 2 ops, read-write overlap
x = arange(4, dtype=np.int8)
a = x[3:]
b = x.view(np.int32)[:1]
with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i:
assert_(not np.shares_memory(*i.operands))
# Copy needed, 3 ops, read-write overlap
for flag in ['writeonly', 'readwrite']:
x = np.ones([10, 10])
a = x
b = x.T
c = x
with nditer([a, b, c], ['copy_if_overlap'],
[['readonly'], ['readonly'], [flag]]) as i:
a2, b2, c2 = i.operands
assert_(not np.shares_memory(a2, c2))
assert_(not np.shares_memory(b2, c2))
# Copy not needed, 3 ops, read-only overlap
x = np.ones([10, 10])
a = x
b = x.T
c = x
i = nditer([a, b, c], ['copy_if_overlap'],
[['readonly'], ['readonly'], ['readonly']])
a2, b2, c2 = i.operands
assert_(a is a2)
assert_(b is b2)
assert_(c is c2)
# Copy not needed, 3 ops, read-only overlap
x = np.ones([10, 10])
a = x
b = np.ones([10, 10])
c = x.T
i = nditer([a, b, c], ['copy_if_overlap'],
[['readonly'], ['writeonly'], ['readonly']])
a2, b2, c2 = i.operands
assert_(a is a2)
assert_(b is b2)
assert_(c is c2)
# Copy not needed, 3 ops, write-only overlap
x = np.arange(7)
a = x[:3]
b = x[3:6]
c = x[4:7]
i = nditer([a, b, c], ['copy_if_overlap'],
[['readonly'], ['writeonly'], ['writeonly']])
a2, b2, c2 = i.operands
assert_(a is a2)
assert_(b is b2)
assert_(c is c2)
def test_iter_op_axes():
# Check that custom axes work
# Reverse the axes
a = arange(6).reshape(2, 3)
i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]])
assert_(all([x == y for (x, y) in i]))
a = arange(24).reshape(2, 3, 4)
i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None])
assert_(all([x == y for (x, y) in i]))
# Broadcast 1D to any dimension
a = arange(1, 31).reshape(2, 3, 5)
b = arange(1, 3)
i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]])
assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel())
b = arange(1, 4)
i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]])
assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel())
b = arange(1, 6)
i = nditer([a, b], [], [['readonly']]*2,
op_axes=[None, [np.newaxis, np.newaxis, 0]])
assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel())
# Inner product-style broadcasting
a = arange(24).reshape(2, 3, 4)
b = arange(40).reshape(5, 2, 4)
i = nditer([a, b], ['multi_index'], [['readonly']]*2,
op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]])
assert_equal(i.shape, (2, 3, 5, 2))
# Matrix product-style broadcasting
a = arange(12).reshape(3, 4)
b = arange(20).reshape(4, 5)
i = nditer([a, b], ['multi_index'], [['readonly']]*2,
op_axes=[[0, -1], [-1, 1]])
assert_equal(i.shape, (3, 5))
def test_iter_op_axes_errors():
# Check that custom axes throws errors for bad inputs
# Wrong number of items in op_axes
a = arange(6).reshape(2, 3)
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0], [1], [0]])
# Out of bounds items in op_axes
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[2, 1], [0, 1]])
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [2, -1]])
# Duplicate items in op_axes
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 0], [0, 1]])
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [1, 1]])
# Different sized arrays in op_axes
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [0, 1, 0]])
# Non-broadcastable dimensions in the result
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [1, 0]])
def test_iter_copy():
# Check that copying the iterator works correctly
a = arange(24).reshape(2, 3, 4)
# Simple iterator
i = nditer(a)
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
i.iterindex = 3
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
# Buffered iterator
i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3)
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
i.iterindex = 3
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
i.iterrange = (3, 9)
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
i.iterrange = (2, 18)
next(i)
next(i)
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
# Casting iterator
with nditer(a, ['buffered'], order='F', casting='unsafe',
op_dtypes='f8', buffersize=5) as i:
j = i.copy()
assert_equal([x[()] for x in j], a.ravel(order='F'))
a = arange(24, dtype='<i4').reshape(2, 3, 4)
with nditer(a, ['buffered'], order='F', casting='unsafe',
op_dtypes='>f8', buffersize=5) as i:
j = i.copy()
assert_equal([x[()] for x in j], a.ravel(order='F'))
@pytest.mark.parametrize("dtype", np.typecodes["All"])
@pytest.mark.parametrize("loop_dtype", np.typecodes["All"])
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
def test_iter_copy_casts(dtype, loop_dtype):
# Ensure the dtype is never flexible:
if loop_dtype.lower() == "m":
loop_dtype = loop_dtype + "[ms]"
elif np.dtype(loop_dtype).itemsize == 0:
loop_dtype = loop_dtype + "50"
# Make things a bit more interesting by requiring a byte-swap as well:
arr = np.ones(1000, dtype=np.dtype(dtype).newbyteorder())
try:
expected = arr.astype(loop_dtype)
except Exception:
# Some casts are not possible, do not worry about them
return
it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"],
op_dtypes=[loop_dtype], casting="unsafe")
if np.issubdtype(np.dtype(loop_dtype), np.number):
# Casting to strings may be strange, but for simple dtypes do not rely
# on the cast being correct:
assert_array_equal(expected, np.ones(1000, dtype=loop_dtype))
it_copy = it.copy()
res = next(it)
del it
res_copy = next(it_copy)
del it_copy
assert_array_equal(res, expected)
assert_array_equal(res_copy, expected)
def test_iter_copy_casts_structured():
# Test a complicated structured dtype for casting, as it requires
# both multiple steps and a more complex casting setup.
# Includes a structured -> unstructured (any to object), and many other
# casts, which cause this to require all steps in the casting machinery
# one level down as well as the iterator copy (which uses NpyAuxData clone)
in_dtype = np.dtype([("a", np.dtype("i,")),
("b", np.dtype(">i,<i,>d,S17,>d,(3)f,O,i1"))])
out_dtype = np.dtype([("a", np.dtype("O")),
("b", np.dtype(">i,>i,S17,>d,>U3,(3)d,i1,O"))])
arr = np.ones(1000, dtype=in_dtype)
it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"],
op_dtypes=[out_dtype], casting="unsafe")
it_copy = it.copy()
res1 = next(it)
del it
res2 = next(it_copy)
del it_copy
expected = arr["a"].astype(out_dtype["a"])
assert_array_equal(res1["a"], expected)
assert_array_equal(res2["a"], expected)
for field in in_dtype["b"].names:
# Note that the .base avoids the subarray field
expected = arr["b"][field].astype(out_dtype["b"][field].base)
assert_array_equal(res1["b"][field], expected)
assert_array_equal(res2["b"][field], expected)
def test_iter_allocate_output_simple():
# Check that the iterator will properly allocate outputs
# Simple case
a = arange(6)
i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')])
assert_equal(i.operands[1].shape, a.shape)
assert_equal(i.operands[1].dtype, np.dtype('f4'))
def test_iter_allocate_output_buffered_readwrite():
# Allocated output with buffering + delay_bufalloc
a = arange(6)
i = nditer([a, None], ['buffered', 'delay_bufalloc'],
[['readonly'], ['allocate', 'readwrite']])
with i:
i.operands[1][:] = 1
i.reset()
for x in i:
x[1][...] += x[0][...]
assert_equal(i.operands[1], a+1)
def test_iter_allocate_output_itorder():
# The allocated output should match the iteration order
# C-order input, best iteration order
a = arange(6, dtype='i4').reshape(2, 3)
i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')])
assert_equal(i.operands[1].shape, a.shape)
assert_equal(i.operands[1].strides, a.strides)
assert_equal(i.operands[1].dtype, np.dtype('f4'))
# F-order input, best iteration order
a = arange(24, dtype='i4').reshape(2, 3, 4).T
i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')])
assert_equal(i.operands[1].shape, a.shape)
assert_equal(i.operands[1].strides, a.strides)
assert_equal(i.operands[1].dtype, np.dtype('f4'))
# Non-contiguous input, C iteration order
a = arange(24, dtype='i4').reshape(2, 3, 4).swapaxes(0, 1)
i = nditer([a, None], [],
[['readonly'], ['writeonly', 'allocate']],
order='C',
op_dtypes=[None, np.dtype('f4')])
assert_equal(i.operands[1].shape, a.shape)
assert_equal(i.operands[1].strides, (32, 16, 4))
assert_equal(i.operands[1].dtype, np.dtype('f4'))
def test_iter_allocate_output_opaxes():
# Specifying op_axes should work
a = arange(24, dtype='i4').reshape(2, 3, 4)
i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']],
op_dtypes=[np.dtype('u4'), None],
op_axes=[[1, 2, 0], None])
assert_equal(i.operands[0].shape, (4, 2, 3))
assert_equal(i.operands[0].strides, (4, 48, 16))
assert_equal(i.operands[0].dtype, np.dtype('u4'))
def test_iter_allocate_output_types_promotion():
# Check type promotion of automatic outputs
i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('f8'))
i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('f8'))
i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('f4'))
i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('u4'))
i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('i8'))
def test_iter_allocate_output_types_byte_order():
# Verify the rules for byte order changes
# When there's just one input, the output type exactly matches
a = array([3], dtype='u4').newbyteorder()
i = nditer([a, None], [],
[['readonly'], ['writeonly', 'allocate']])
assert_equal(i.dtypes[0], i.dtypes[1])
# With two or more inputs, the output type is in native byte order
i = nditer([a, a, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate']])
assert_(i.dtypes[0] != i.dtypes[2])
assert_equal(i.dtypes[0].newbyteorder('='), i.dtypes[2])
def test_iter_allocate_output_types_scalar():
# If the inputs are all scalars, the output should be a scalar
i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [],
[['writeonly', 'allocate']] + [['readonly']]*4)
assert_equal(i.operands[0].dtype, np.dtype('complex128'))
assert_equal(i.operands[0].ndim, 0)
def test_iter_allocate_output_subtype():
# Make sure that the subtype with priority wins
class MyNDArray(np.ndarray):
__array_priority__ = 15
# subclass vs ndarray
a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = np.arange(4).reshape(2, 2).T
i = nditer([a, b, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate']])
assert_equal(type(a), type(i.operands[2]))
assert_(type(b) is not type(i.operands[2]))
assert_equal(i.operands[2].shape, (2, 2))
# If subtypes are disabled, we should get back an ndarray.
i = nditer([a, b, None], [],
[['readonly'], ['readonly'],
['writeonly', 'allocate', 'no_subtype']])
assert_equal(type(b), type(i.operands[2]))
assert_(type(a) is not type(i.operands[2]))
assert_equal(i.operands[2].shape, (2, 2))
def test_iter_allocate_output_errors():
# Check that the iterator will throw errors for bad output allocations
# Need an input if no output data type is specified
a = arange(6)
assert_raises(TypeError, nditer, [a, None], [],
[['writeonly'], ['writeonly', 'allocate']])
# Allocated output should be flagged for writing
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['allocate', 'readonly']])
# Allocated output can't have buffering without delayed bufalloc
assert_raises(ValueError, nditer, [a, None], ['buffered'],
['allocate', 'readwrite'])
# Must specify at least one input
assert_raises(ValueError, nditer, [None, None], [],
[['writeonly', 'allocate'],
['writeonly', 'allocate']],
op_dtypes=[np.dtype('f4'), np.dtype('f4')])
# If using op_axes, must specify all the axes
a = arange(24, dtype='i4').reshape(2, 3, 4)
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')],
op_axes=[None, [0, np.newaxis, 1]])
# If using op_axes, the axes must be within bounds
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')],
op_axes=[None, [0, 3, 1]])
# If using op_axes, there can't be duplicates
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')],
op_axes=[None, [0, 2, 1, 0]])
# Not all axes may be specified if a reduction. If there is a hole
# in op_axes, this is an error.
a = arange(24, dtype='i4').reshape(2, 3, 4)
assert_raises(ValueError, nditer, [a, None], ["reduce_ok"],
[['readonly'], ['readwrite', 'allocate']],
op_dtypes=[None, np.dtype('f4')],
op_axes=[None, [0, np.newaxis, 2]])
def test_iter_remove_axis():
a = arange(24).reshape(2, 3, 4)
i = nditer(a, ['multi_index'])
i.remove_axis(1)
assert_equal([x for x in i], a[:, 0,:].ravel())
a = a[::-1,:,:]
i = nditer(a, ['multi_index'])
i.remove_axis(0)
assert_equal([x for x in i], a[0,:,:].ravel())
def test_iter_remove_multi_index_inner_loop():
# Check that removing multi-index support works
a = arange(24).reshape(2, 3, 4)
i = nditer(a, ['multi_index'])
assert_equal(i.ndim, 3)
assert_equal(i.shape, (2, 3, 4))
assert_equal(i.itviews[0].shape, (2, 3, 4))
# Removing the multi-index tracking causes all dimensions to coalesce
before = [x for x in i]
i.remove_multi_index()
after = [x for x in i]
assert_equal(before, after)
assert_equal(i.ndim, 1)
assert_raises(ValueError, lambda i:i.shape, i)
assert_equal(i.itviews[0].shape, (24,))
# Removing the inner loop means there's just one iteration
i.reset()
assert_equal(i.itersize, 24)
assert_equal(i[0].shape, tuple())
i.enable_external_loop()
assert_equal(i.itersize, 24)
assert_equal(i[0].shape, (24,))
assert_equal(i.value, arange(24))
def test_iter_iterindex():
# Make sure iterindex works
buffersize = 5
a = arange(24).reshape(4, 3, 2)
for flags in ([], ['buffered']):
i = nditer(a, flags, buffersize=buffersize)
assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 2
assert_equal(iter_iterindices(i), list(range(2, 24)))
i = nditer(a, flags, order='F', buffersize=buffersize)
assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 5
assert_equal(iter_iterindices(i), list(range(5, 24)))
i = nditer(a[::-1], flags, order='F', buffersize=buffersize)
assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 9
assert_equal(iter_iterindices(i), list(range(9, 24)))
i = nditer(a[::-1, ::-1], flags, order='C', buffersize=buffersize)
assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 13
assert_equal(iter_iterindices(i), list(range(13, 24)))
i = nditer(a[::1, ::-1], flags, buffersize=buffersize)
assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 23
assert_equal(iter_iterindices(i), list(range(23, 24)))
i.reset()
i.iterindex = 2
assert_equal(iter_iterindices(i), list(range(2, 24)))
def test_iter_iterrange():
# Make sure getting and resetting the iterrange works
buffersize = 5
a = arange(24, dtype='i4').reshape(4, 3, 2)
a_fort = a.ravel(order='F')
i = nditer(a, ['ranged'], ['readonly'], order='F',
buffersize=buffersize)
assert_equal(i.iterrange, (0, 24))
assert_equal([x[()] for x in i], a_fort)
for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
i.iterrange = r
assert_equal(i.iterrange, r)
assert_equal([x[()] for x in i], a_fort[r[0]:r[1]])
i = nditer(a, ['ranged', 'buffered'], ['readonly'], order='F',
op_dtypes='f8', buffersize=buffersize)
assert_equal(i.iterrange, (0, 24))
assert_equal([x[()] for x in i], a_fort)
for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
i.iterrange = r
assert_equal(i.iterrange, r)
assert_equal([x[()] for x in i], a_fort[r[0]:r[1]])
def get_array(i):
val = np.array([], dtype='f8')
for x in i:
val = np.concatenate((val, x))
return val
i = nditer(a, ['ranged', 'buffered', 'external_loop'],
['readonly'], order='F',
op_dtypes='f8', buffersize=buffersize)
assert_equal(i.iterrange, (0, 24))
assert_equal(get_array(i), a_fort)
for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
i.iterrange = r
assert_equal(i.iterrange, r)
assert_equal(get_array(i), a_fort[r[0]:r[1]])
def test_iter_buffering():
# Test buffering with several buffer sizes and types
arrays = []
# F-order swapped array
arrays.append(np.arange(24,
dtype='c16').reshape(2, 3, 4).T.newbyteorder().byteswap())
# Contiguous 1-dimensional array
arrays.append(np.arange(10, dtype='f4'))
# Unaligned array
a = np.zeros((4*16+1,), dtype='i1')[1:]
a.dtype = 'i4'
a[:] = np.arange(16, dtype='i4')
arrays.append(a)
# 4-D F-order array
arrays.append(np.arange(120, dtype='i4').reshape(5, 3, 2, 4).T)
for a in arrays:
for buffersize in (1, 2, 3, 5, 8, 11, 16, 1024):
vals = []
i = nditer(a, ['buffered', 'external_loop'],
[['readonly', 'nbo', 'aligned']],
order='C',
casting='equiv',
buffersize=buffersize)
while not i.finished:
assert_(i[0].size <= buffersize)
vals.append(i[0].copy())
i.iternext()
assert_equal(np.concatenate(vals), a.ravel(order='C'))
def test_iter_write_buffering():
# Test that buffering of writes is working
# F-order swapped array
a = np.arange(24).reshape(2, 3, 4).T.newbyteorder().byteswap()
i = nditer(a, ['buffered'],
[['readwrite', 'nbo', 'aligned']],
casting='equiv',
order='C',
buffersize=16)
x = 0
with i:
while not i.finished:
i[0] = x
x += 1
i.iternext()
assert_equal(a.ravel(order='C'), np.arange(24))
def test_iter_buffering_delayed_alloc():
# Test that delaying buffer allocation works
a = np.arange(6)
b = np.arange(1, dtype='f4')
i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'],
['readwrite'],
casting='unsafe',
op_dtypes='f4')
assert_(i.has_delayed_bufalloc)
assert_raises(ValueError, lambda i:i.multi_index, i)
assert_raises(ValueError, lambda i:i[0], i)
assert_raises(ValueError, lambda i:i[0:2], i)
def assign_iter(i):
i[0] = 0
assert_raises(ValueError, assign_iter, i)
i.reset()
assert_(not i.has_delayed_bufalloc)
assert_equal(i.multi_index, (0,))
with i:
assert_equal(i[0], 0)
i[1] = 1
assert_equal(i[0:2], [0, 1])
assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6)))
def test_iter_buffered_cast_simple():
# Test that buffering can handle a simple cast
a = np.arange(10, dtype='f4')
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('f8')],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f4'))
def test_iter_buffered_cast_byteswapped():
# Test that buffering can handle a cast which requires swap->cast->swap
a = np.arange(10, dtype='f4').newbyteorder().byteswap()
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('f8').newbyteorder()],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f4'))
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning)
a = np.arange(10, dtype='f8').newbyteorder().byteswap()
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='unsafe',
op_dtypes=[np.dtype('c8').newbyteorder()],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f8'))
def test_iter_buffered_cast_byteswapped_complex():
# Test that buffering can handle a cast which requires swap->cast->copy
a = np.arange(10, dtype='c8').newbyteorder().byteswap()
a += 2j
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('c16')],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
a = np.arange(10, dtype='c8')
a += 2j
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('c16').newbyteorder()],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap()
a += 2j
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('c16')],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j)
a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap()
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('f4')],
buffersize=7)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype=np.longdouble))
def test_iter_buffered_cast_structured_type():
# Tests buffering of structured types
# simple -> struct type (duplicates the value)
sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
a = np.arange(3, dtype='f4') + 0.5
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt)
vals = [np.array(x) for x in i]
assert_equal(vals[0]['a'], 0.5)
assert_equal(vals[0]['b'], 0)
assert_equal(vals[0]['c'], [[(0.5)]*3]*2)
assert_equal(vals[0]['d'], 0.5)
assert_equal(vals[1]['a'], 1.5)
assert_equal(vals[1]['b'], 1)
assert_equal(vals[1]['c'], [[(1.5)]*3]*2)
assert_equal(vals[1]['d'], 1.5)
assert_equal(vals[0].dtype, np.dtype(sdt))
# object -> struct type
sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
a = np.zeros((3,), dtype='O')
a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5)
a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5)
a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5)
if HAS_REFCOUNT:
rc = sys.getrefcount(a[0])
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt)
vals = [x.copy() for x in i]
assert_equal(vals[0]['a'], 0.5)
assert_equal(vals[0]['b'], 0)
assert_equal(vals[0]['c'], [[(0.5)]*3]*2)
assert_equal(vals[0]['d'], 0.5)
assert_equal(vals[1]['a'], 1.5)
assert_equal(vals[1]['b'], 1)
assert_equal(vals[1]['c'], [[(1.5)]*3]*2)
assert_equal(vals[1]['d'], 1.5)
assert_equal(vals[0].dtype, np.dtype(sdt))
vals, i, x = [None]*3
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(a[0]), rc)
# single-field struct type -> simple
sdt = [('a', 'f4')]
a = np.array([(5.5,), (8,)], dtype=sdt)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes='i4')
assert_equal([x_[()] for x_ in i], [5, 8])
# make sure multi-field struct type -> simple doesn't work
sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
assert_raises(TypeError, lambda: (
nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes='i4')))
# struct type -> struct type (field-wise copy)
sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')]
a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
assert_equal([np.array(x_) for x_ in i],
[np.array((1, 2, 3), dtype=sdt2),
np.array((4, 5, 6), dtype=sdt2)])
def test_iter_buffered_cast_structured_type_failure_with_cleanup():
# make sure struct type -> struct type with different
# number of fields fails
sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
sdt2 = [('b', 'O'), ('a', 'f8')]
a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
for intent in ["readwrite", "readonly", "writeonly"]:
# If the following assert fails, the place where the error is raised
# within nditer may change. That is fine, but it may make sense for
# a new (hard to design) test to replace it. The `simple_arr` is
# designed to require a multi-step cast (due to having fields).
assert np.can_cast(a.dtype, sdt2, casting="unsafe")
simple_arr = np.array([1, 2], dtype="i,i") # requires clean up
with pytest.raises(ValueError):
nditer((simple_arr, a), ['buffered', 'refs_ok'], [intent, intent],
casting='unsafe', op_dtypes=["f,f", sdt2])
def test_buffered_cast_error_paths():
with pytest.raises(ValueError):
# The input is cast into an `S3` buffer
np.nditer((np.array("a", dtype="S1"),), op_dtypes=["i"],
casting="unsafe", flags=["buffered"])
# The `M8[ns]` is cast into the `S3` output
it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"],
op_flags=["writeonly"], casting="unsafe", flags=["buffered"])
with pytest.raises(ValueError):
with it:
buf = next(it)
buf[...] = "a" # cannot be converted to int.
@pytest.mark.skipif(not HAS_REFCOUNT, reason="PyPy seems to not hit this.")
def test_buffered_cast_error_paths_unraisable():
# The following gives an unraisable error. Pytest sometimes captures that
# (depending python and/or pytest version). So with Python>=3.8 this can
# probably be cleaned out in the future to check for
# pytest.PytestUnraisableExceptionWarning:
code = textwrap.dedent("""
import numpy as np
it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"],
op_flags=["writeonly"], casting="unsafe", flags=["buffered"])
buf = next(it)
buf[...] = "a"
del buf, it # Flushing only happens during deallocate right now.
""")
res = subprocess.check_output([sys.executable, "-c", code],
stderr=subprocess.STDOUT, text=True)
assert "ValueError" in res
def test_iter_buffered_cast_subarray():
# Tests buffering of subarrays
# one element -> many (copies it to all)
sdt1 = [('a', 'f4')]
sdt2 = [('a', 'f8', (3, 2, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
for x, count in zip(i, list(range(6))):
assert_(np.all(x['a'] == count))
# one element -> many -> back (copies it to all)
sdt1 = [('a', 'O', (1, 1))]
sdt2 = [('a', 'O', (3, 2, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'][:, 0, 0] = np.arange(6)
i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
casting='unsafe',
op_dtypes=sdt2)
with i:
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_(np.all(x['a'] == count))
x['a'][0] += 2
count += 1
assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2)
# many -> one element -> back (copies just element 0)
sdt1 = [('a', 'O', (3, 2, 2))]
sdt2 = [('a', 'O', (1,))]
a = np.zeros((6,), dtype=sdt1)
a['a'][:, 0, 0, 0] = np.arange(6)
i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
casting='unsafe',
op_dtypes=sdt2)
with i:
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'], count)
x['a'] += 2
count += 1
assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2)
# many -> one element -> back (copies just element 0)
sdt1 = [('a', 'f8', (3, 2, 2))]
sdt2 = [('a', 'O', (1,))]
a = np.zeros((6,), dtype=sdt1)
a['a'][:, 0, 0, 0] = np.arange(6)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'], count)
count += 1
# many -> one element (copies just element 0)
sdt1 = [('a', 'O', (3, 2, 2))]
sdt2 = [('a', 'f4', (1,))]
a = np.zeros((6,), dtype=sdt1)
a['a'][:, 0, 0, 0] = np.arange(6)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'], count)
count += 1
# many -> matching shape (straightforward copy)
sdt1 = [('a', 'O', (3, 2, 2))]
sdt2 = [('a', 'f4', (3, 2, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'], a[count]['a'])
count += 1
# vector -> smaller vector (truncates)
sdt1 = [('a', 'f8', (6,))]
sdt2 = [('a', 'f4', (2,))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*6).reshape(6, 6)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'], a[count]['a'][:2])
count += 1
# vector -> bigger vector (pads with zeros)
sdt1 = [('a', 'f8', (2,))]
sdt2 = [('a', 'f4', (6,))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*2).reshape(6, 2)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'][:2], a[count]['a'])
assert_equal(x['a'][2:], [0, 0, 0, 0])
count += 1
# vector -> matrix (broadcasts)
sdt1 = [('a', 'f8', (2,))]
sdt2 = [('a', 'f4', (2, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*2).reshape(6, 2)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'][0], a[count]['a'])
assert_equal(x['a'][1], a[count]['a'])
count += 1
# vector -> matrix (broadcasts and zero-pads)
sdt1 = [('a', 'f8', (2, 1))]
sdt2 = [('a', 'f4', (3, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*2).reshape(6, 2, 1)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'][:2, 0], a[count]['a'][:, 0])
assert_equal(x['a'][:2, 1], a[count]['a'][:, 0])
assert_equal(x['a'][2,:], [0, 0])
count += 1
# matrix -> matrix (truncates and zero-pads)
sdt1 = [('a', 'f8', (2, 3))]
sdt2 = [('a', 'f4', (3, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*2*3).reshape(6, 2, 3)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'][:2, 0], a[count]['a'][:, 0])
assert_equal(x['a'][:2, 1], a[count]['a'][:, 1])
assert_equal(x['a'][2,:], [0, 0])
count += 1
def test_iter_buffering_badwriteback():
# Writing back from a buffer cannot combine elements
# a needs write buffering, but had a broadcast dimension
a = np.arange(6).reshape(2, 3, 1)
b = np.arange(12).reshape(2, 3, 2)
assert_raises(ValueError, nditer, [a, b],
['buffered', 'external_loop'],
[['readwrite'], ['writeonly']],
order='C')
# But if a is readonly, it's fine
nditer([a, b], ['buffered', 'external_loop'],
[['readonly'], ['writeonly']],
order='C')
# If a has just one element, it's fine too (constant 0 stride, a reduction)
a = np.arange(1).reshape(1, 1, 1)
nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'],
[['readwrite'], ['writeonly']],
order='C')
# check that it fails on other dimensions too
a = np.arange(6).reshape(1, 3, 2)
assert_raises(ValueError, nditer, [a, b],
['buffered', 'external_loop'],
[['readwrite'], ['writeonly']],
order='C')
a = np.arange(4).reshape(2, 1, 2)
assert_raises(ValueError, nditer, [a, b],
['buffered', 'external_loop'],
[['readwrite'], ['writeonly']],
order='C')
def test_iter_buffering_string():
# Safe casting disallows shrinking strings
a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_)
assert_equal(a.dtype, np.dtype('S4'))
assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
op_dtypes='S2')
i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6')
assert_equal(i[0], b'abc')
assert_equal(i[0].dtype, np.dtype('S6'))
a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode_)
assert_equal(a.dtype, np.dtype('U4'))
assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
op_dtypes='U2')
i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6')
assert_equal(i[0], u'abc')
assert_equal(i[0].dtype, np.dtype('U6'))
def test_iter_buffering_growinner():
# Test that the inner loop grows when no buffering is needed
a = np.arange(30)
i = nditer(a, ['buffered', 'growinner', 'external_loop'],
buffersize=5)
# Should end up with just one inner loop here
assert_equal(i[0].size, a.size)
@pytest.mark.slow
def test_iter_buffered_reduce_reuse():
# large enough array for all views, including negative strides.
a = np.arange(2*3**5)[3**5:3**5+1]
flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok']
op_flags = [('readonly',), ('readwrite', 'allocate')]
op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]]
# wrong dtype to force buffering
op_dtypes = [float, a.dtype]
def get_params():
for xs in range(-3**2, 3**2 + 1):
for ys in range(xs, 3**2 + 1):
for op_axes in op_axes_list:
# last stride is reduced and because of that not
# important for this test, as it is the inner stride.
strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize)
arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides)
for skip in [0, 1]:
yield arr, op_axes, skip
for arr, op_axes, skip in get_params():
nditer2 = np.nditer([arr.copy(), None],
op_axes=op_axes, flags=flags, op_flags=op_flags,
op_dtypes=op_dtypes)
with nditer2:
nditer2.operands[-1][...] = 0
nditer2.reset()
nditer2.iterindex = skip
for (a2_in, b2_in) in nditer2:
b2_in += a2_in.astype(np.int_)
comp_res = nditer2.operands[-1]
for bufsize in range(0, 3**3):
nditer1 = np.nditer([arr, None],
op_axes=op_axes, flags=flags, op_flags=op_flags,
buffersize=bufsize, op_dtypes=op_dtypes)
with nditer1:
nditer1.operands[-1][...] = 0
nditer1.reset()
nditer1.iterindex = skip
for (a1_in, b1_in) in nditer1:
b1_in += a1_in.astype(np.int_)
res = nditer1.operands[-1]
assert_array_equal(res, comp_res)
def test_iter_no_broadcast():
# Test that the no_broadcast flag works
a = np.arange(24).reshape(2, 3, 4)
b = np.arange(6).reshape(2, 3, 1)
c = np.arange(12).reshape(3, 4)
nditer([a, b, c], [],
[['readonly', 'no_broadcast'],
['readonly'], ['readonly']])
assert_raises(ValueError, nditer, [a, b, c], [],
[['readonly'], ['readonly', 'no_broadcast'], ['readonly']])
assert_raises(ValueError, nditer, [a, b, c], [],
[['readonly'], ['readonly'], ['readonly', 'no_broadcast']])
class TestIterNested:
def test_basic(self):
# Test nested iteration basic usage
a = arange(12).reshape(2, 3, 2)
i, j = np.nested_iters(a, [[0], [1, 2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[0, 1], [2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
i, j = np.nested_iters(a, [[0, 2], [1]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
def test_reorder(self):
# Test nested iteration basic usage
a = arange(12).reshape(2, 3, 2)
# In 'K' order (default), it gets reordered
i, j = np.nested_iters(a, [[0], [2, 1]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[1, 0], [2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
i, j = np.nested_iters(a, [[2, 0], [1]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
# In 'C' order, it doesn't
i, j = np.nested_iters(a, [[0], [2, 1]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]])
i, j = np.nested_iters(a, [[1, 0], [2]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]])
i, j = np.nested_iters(a, [[2, 0], [1]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]])
def test_flip_axes(self):
# Test nested iteration with negative axes
a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1]
# In 'K' order (default), the axes all get flipped
i, j = np.nested_iters(a, [[0], [1, 2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[0, 1], [2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
i, j = np.nested_iters(a, [[0, 2], [1]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
# In 'C' order, flipping axes is disabled
i, j = np.nested_iters(a, [[0], [1, 2]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]])
i, j = np.nested_iters(a, [[0, 1], [2]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]])
i, j = np.nested_iters(a, [[0, 2], [1]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]])
def test_broadcast(self):
# Test nested iteration with broadcasting
a = arange(2).reshape(2, 1)
b = arange(3).reshape(1, 3)
i, j = np.nested_iters([a, b], [[0], [1]])
vals = [list(j) for _ in i]
assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]])
i, j = np.nested_iters([a, b], [[1], [0]])
vals = [list(j) for _ in i]
assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]])
def test_dtype_copy(self):
# Test nested iteration with a copy to change dtype
# copy
a = arange(6, dtype='i4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
op_flags=['readonly', 'copy'],
op_dtypes='f8')
assert_equal(j[0].dtype, np.dtype('f8'))
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2], [3, 4, 5]])
vals = None
# writebackifcopy - using context manager
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
op_flags=['readwrite', 'updateifcopy'],
casting='same_kind',
op_dtypes='f8')
with i, j:
assert_equal(j[0].dtype, np.dtype('f8'))
for x in i:
for y in j:
y[...] += 1
assert_equal(a, [[0, 1, 2], [3, 4, 5]])
assert_equal(a, [[1, 2, 3], [4, 5, 6]])
# writebackifcopy - using close()
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
op_flags=['readwrite', 'updateifcopy'],
casting='same_kind',
op_dtypes='f8')
assert_equal(j[0].dtype, np.dtype('f8'))
for x in i:
for y in j:
y[...] += 1
assert_equal(a, [[0, 1, 2], [3, 4, 5]])
i.close()
j.close()
assert_equal(a, [[1, 2, 3], [4, 5, 6]])
def test_dtype_buffered(self):
# Test nested iteration with buffering to change dtype
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
flags=['buffered'],
op_flags=['readwrite'],
casting='same_kind',
op_dtypes='f8')
assert_equal(j[0].dtype, np.dtype('f8'))
for x in i:
for y in j:
y[...] += 1
assert_equal(a, [[1, 2, 3], [4, 5, 6]])
def test_0d(self):
a = np.arange(12).reshape(2, 3, 2)
i, j = np.nested_iters(a, [[], [1, 0, 2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[1, 0, 2], []])
vals = [list(j) for _ in i]
assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]])
i, j, k = np.nested_iters(a, [[2, 0], [], [1]])
vals = []
for x in i:
for y in j:
vals.append([z for z in k])
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
def test_iter_nested_iters_dtype_buffered(self):
# Test nested iteration with buffering to change dtype
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
flags=['buffered'],
op_flags=['readwrite'],
casting='same_kind',
op_dtypes='f8')
with i, j:
assert_equal(j[0].dtype, np.dtype('f8'))
for x in i:
for y in j:
y[...] += 1
assert_equal(a, [[1, 2, 3], [4, 5, 6]])
def test_iter_reduction_error():
a = np.arange(6)
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0], [-1]])
a = np.arange(6).reshape(2, 3)
assert_raises(ValueError, nditer, [a, None], ['external_loop'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0, 1], [-1, -1]])
def test_iter_reduction():
# Test doing reductions with the iterator
a = np.arange(6)
i = nditer([a, None], ['reduce_ok'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0], [-1]])
# Need to initialize the output operand to the addition unit
with i:
i.operands[1][...] = 0
# Do the reduction
for x, y in i:
y[...] += x
# Since no axes were specified, should have allocated a scalar
assert_equal(i.operands[1].ndim, 0)
assert_equal(i.operands[1], np.sum(a))
a = np.arange(6).reshape(2, 3)
i = nditer([a, None], ['reduce_ok', 'external_loop'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0, 1], [-1, -1]])
# Need to initialize the output operand to the addition unit
with i:
i.operands[1][...] = 0
# Reduction shape/strides for the output
assert_equal(i[1].shape, (6,))
assert_equal(i[1].strides, (0,))
# Do the reduction
for x, y in i:
# Use a for loop instead of ``y[...] += x``
# (equivalent to ``y[...] = y[...].copy() + x``),
# because y has zero strides we use for the reduction
for j in range(len(y)):
y[j] += x[j]
# Since no axes were specified, should have allocated a scalar
assert_equal(i.operands[1].ndim, 0)
assert_equal(i.operands[1], np.sum(a))
# This is a tricky reduction case for the buffering double loop
# to handle
a = np.ones((2, 3, 5))
it1 = nditer([a, None], ['reduce_ok', 'external_loop'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[None, [0, -1, 1]])
it2 = nditer([a, None], ['reduce_ok', 'external_loop',
'buffered', 'delay_bufalloc'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[None, [0, -1, 1]], buffersize=10)
with it1, it2:
it1.operands[1].fill(0)
it2.operands[1].fill(0)
it2.reset()
for x in it1:
x[1][...] += x[0]
for x in it2:
x[1][...] += x[0]
assert_equal(it1.operands[1], it2.operands[1])
assert_equal(it2.operands[1].sum(), a.size)
def test_iter_buffering_reduction():
# Test doing buffered reductions with the iterator
a = np.arange(6)
b = np.array(0., dtype='f8').byteswap().newbyteorder()
i = nditer([a, b], ['reduce_ok', 'buffered'],
[['readonly'], ['readwrite', 'nbo']],
op_axes=[[0], [-1]])
with i:
assert_equal(i[1].dtype, np.dtype('f8'))
assert_(i[1].dtype != b.dtype)
# Do the reduction
for x, y in i:
y[...] += x
# Since no axes were specified, should have allocated a scalar
assert_equal(b, np.sum(a))
a = np.arange(6).reshape(2, 3)
b = np.array([0, 0], dtype='f8').byteswap().newbyteorder()
i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'],
[['readonly'], ['readwrite', 'nbo']],
op_axes=[[0, 1], [0, -1]])
# Reduction shape/strides for the output
with i:
assert_equal(i[1].shape, (3,))
assert_equal(i[1].strides, (0,))
# Do the reduction
for x, y in i:
# Use a for loop instead of ``y[...] += x``
# (equivalent to ``y[...] = y[...].copy() + x``),
# because y has zero strides we use for the reduction
for j in range(len(y)):
y[j] += x[j]
assert_equal(b, np.sum(a, axis=1))
# Iterator inner double loop was wrong on this one
p = np.arange(2) + 1
it = np.nditer([p, None],
['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[-1, 0], [-1, -1]],
itershape=(2, 2))
with it:
it.operands[1].fill(0)
it.reset()
assert_equal(it[0], [1, 2, 1, 2])
# Iterator inner loop should take argument contiguity into account
x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0)
x[...] = np.arange(x.size).reshape(x.shape)
y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4)
y_base_copy = y_base.copy()
y = y_base[::2,:,None]
it = np.nditer([y, x],
['buffered', 'external_loop', 'reduce_ok'],
[['readwrite'], ['readonly']])
with it:
for a, b in it:
a.fill(2)
assert_equal(y_base[1::2], y_base_copy[1::2])
assert_equal(y_base[::2], 2)
def test_iter_buffering_reduction_reuse_reduce_loops():
# There was a bug triggering reuse of the reduce loop inappropriately,
# which caused processing to happen in unnecessarily small chunks
# and overran the buffer.
a = np.zeros((2, 7))
b = np.zeros((1, 7))
it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'],
op_flags=[['readonly'], ['readwrite']],
buffersize=5)
with it:
bufsizes = [x.shape[0] for x, y in it]
assert_equal(bufsizes, [5, 2, 5, 2])
assert_equal(sum(bufsizes), a.size)
def test_iter_writemasked_badinput():
a = np.zeros((2, 3))
b = np.zeros((3,))
m = np.array([[True, True, False], [False, True, False]])
m2 = np.array([True, True, False])
m3 = np.array([0, 1, 1], dtype='u1')
mbad1 = np.array([0, 1, 1], dtype='i1')
mbad2 = np.array([0, 1, 1], dtype='f4')
# Need an 'arraymask' if any operand is 'writemasked'
assert_raises(ValueError, nditer, [a, m], [],
[['readwrite', 'writemasked'], ['readonly']])
# A 'writemasked' operand must not be readonly
assert_raises(ValueError, nditer, [a, m], [],
[['readonly', 'writemasked'], ['readonly', 'arraymask']])
# 'writemasked' and 'arraymask' may not be used together
assert_raises(ValueError, nditer, [a, m], [],
[['readonly'], ['readwrite', 'arraymask', 'writemasked']])
# 'arraymask' may only be specified once
assert_raises(ValueError, nditer, [a, m, m2], [],
[['readwrite', 'writemasked'],
['readonly', 'arraymask'],
['readonly', 'arraymask']])
# An 'arraymask' with nothing 'writemasked' also doesn't make sense
assert_raises(ValueError, nditer, [a, m], [],
[['readwrite'], ['readonly', 'arraymask']])
# A writemasked reduction requires a similarly smaller mask
assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'],
[['readonly'],
['readwrite', 'writemasked'],
['readonly', 'arraymask']])
# But this should work with a smaller/equal mask to the reduction operand
np.nditer([a, b, m2], ['reduce_ok'],
[['readonly'],
['readwrite', 'writemasked'],
['readonly', 'arraymask']])
# The arraymask itself cannot be a reduction
assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'],
[['readonly'],
['readwrite', 'writemasked'],
['readwrite', 'arraymask']])
# A uint8 mask is ok too
np.nditer([a, m3], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']],
op_dtypes=['f4', None],
casting='same_kind')
# An int8 mask isn't ok
assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']],
op_dtypes=['f4', None],
casting='same_kind')
# A float32 mask isn't ok
assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']],
op_dtypes=['f4', None],
casting='same_kind')
def _is_buffered(iterator):
try:
iterator.itviews
except ValueError:
return True
return False
@pytest.mark.parametrize("a",
[np.zeros((3,), dtype='f8'),
np.zeros((9876, 3*5), dtype='f8')[::2, :],
np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :]])
def test_iter_writemasked(a):
# Note, the slicing above is to ensure that nditer cannot combine multiple
# axes into one. The repetition is just to make things a bit more
# interesting.
shape = a.shape
reps = shape[-1] // 3
msk = np.empty(shape, dtype=bool)
msk[...] = [True, True, False] * reps
# When buffering is unused, 'writemasked' effectively does nothing.
# It's up to the user of the iterator to obey the requested semantics.
it = np.nditer([a, msk], [],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']])
with it:
for x, m in it:
x[...] = 1
# Because we violated the semantics, all the values became 1
assert_equal(a, np.broadcast_to([1, 1, 1] * reps, shape))
# Even if buffering is enabled, we still may be accessing the array
# directly.
it = np.nditer([a, msk], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']])
# @seberg: I honestly don't currently understand why a "buffered" iterator
# would end up not using a buffer for the small array here at least when
# "writemasked" is used, that seems confusing... Check by testing for
# actual memory overlap!
is_buffered = True
with it:
for x, m in it:
x[...] = 2.5
if np.may_share_memory(x, a):
is_buffered = False
if not is_buffered:
# Because we violated the semantics, all the values became 2.5
assert_equal(a, np.broadcast_to([2.5, 2.5, 2.5] * reps, shape))
else:
# For large sizes, the iterator may be buffered:
assert_equal(a, np.broadcast_to([2.5, 2.5, 1] * reps, shape))
a[...] = 2.5
# If buffering will definitely happening, for instance because of
# a cast, only the items selected by the mask will be copied back from
# the buffer.
it = np.nditer([a, msk], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']],
op_dtypes=['i8', None],
casting='unsafe')
with it:
for x, m in it:
x[...] = 3
# Even though we violated the semantics, only the selected values
# were copied back
assert_equal(a, np.broadcast_to([3, 3, 2.5] * reps, shape))
def test_iter_writemasked_decref():
# force casting (to make it interesting) by using a structured dtype.
arr = np.arange(10000).astype(">i,O")
original = arr.copy()
mask = np.random.randint(0, 2, size=10000).astype(bool)
it = np.nditer([arr, mask], ['buffered', "refs_ok"],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']],
op_dtypes=["<i,O", "?"])
singleton = object()
if HAS_REFCOUNT:
count = sys.getrefcount(singleton)
for buf, mask_buf in it:
buf[...] = (3, singleton)
del buf, mask_buf, it # delete everything to ensure corrrect cleanup
if HAS_REFCOUNT:
# The buffer would have included additional items, they must be
# cleared correctly:
assert sys.getrefcount(singleton) - count == np.count_nonzero(mask)
assert_array_equal(arr[~mask], original[~mask])
assert (arr[mask] == np.array((3, singleton), arr.dtype)).all()
del arr
if HAS_REFCOUNT:
assert sys.getrefcount(singleton) == count
def test_iter_non_writable_attribute_deletion():
it = np.nditer(np.ones(2))
attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc",
"iterationneedsapi", "has_multi_index", "has_index", "dtypes",
"ndim", "nop", "itersize", "finished"]
for s in attr:
assert_raises(AttributeError, delattr, it, s)
def test_iter_writable_attribute_deletion():
it = np.nditer(np.ones(2))
attr = [ "multi_index", "index", "iterrange", "iterindex"]
for s in attr:
assert_raises(AttributeError, delattr, it, s)
def test_iter_element_deletion():
it = np.nditer(np.ones(3))
try:
del it[1]
del it[1:2]
except TypeError:
pass
except Exception:
raise AssertionError
def test_iter_allocated_array_dtypes():
# If the dtype of an allocated output has a shape, the shape gets
# tacked onto the end of the result.
it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))])
for a, b in it:
b[0] = a - 1
b[1] = a + 1
assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]])
# Check the same (less sensitive) thing when `op_axes` with -1 is given.
it = np.nditer(([[1, 3, 20]], None), op_dtypes=[None, ('i4', (2,))],
flags=["reduce_ok"], op_axes=[None, (-1, 0)])
for a, b in it:
b[0] = a - 1
b[1] = a + 1
assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]])
# Make sure this works for scalars too
it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))])
for a, b, c in it:
c[0, 0] = a - b
c[0, 1] = a + b
c[1, 0] = a * b
c[1, 1] = a / b
assert_equal(it.operands[2], [[8, 12], [20, 5]])
def test_0d_iter():
# Basic test for iteration of 0-d arrays:
i = nditer([2, 3], ['multi_index'], [['readonly']]*2)
assert_equal(i.ndim, 0)
assert_equal(next(i), (2, 3))
assert_equal(i.multi_index, ())
assert_equal(i.iterindex, 0)
assert_raises(StopIteration, next, i)
# test reset:
i.reset()
assert_equal(next(i), (2, 3))
assert_raises(StopIteration, next, i)
# test forcing to 0-d
i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()])
assert_equal(i.ndim, 0)
assert_equal(len(i), 1)
i = nditer(np.arange(5), ['multi_index'], [['readonly']],
op_axes=[()], itershape=())
assert_equal(i.ndim, 0)
assert_equal(len(i), 1)
# passing an itershape alone is not enough, the op_axes are also needed
with assert_raises(ValueError):
nditer(np.arange(5), ['multi_index'], [['readonly']], itershape=())
# Test a more complex buffered casting case (same as another test above)
sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
a = np.array(0.5, dtype='f4')
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe', op_dtypes=sdt)
vals = next(i)
assert_equal(vals['a'], 0.5)
assert_equal(vals['b'], 0)
assert_equal(vals['c'], [[(0.5)]*3]*2)
assert_equal(vals['d'], 0.5)
def test_object_iter_cleanup():
# see gh-18450
# object arrays can raise a python exception in ufunc inner loops using
# nditer, which should cause iteration to stop & cleanup. There were bugs
# in the nditer cleanup when decref'ing object arrays.
# This test would trigger valgrind "uninitialized read" before the bugfix.
assert_raises(TypeError, lambda: np.zeros((17000, 2), dtype='f4') * None)
# this more explicit code also triggers the invalid access
arr = np.arange(np.BUFSIZE * 10).reshape(10, -1).astype(str)
oarr = arr.astype(object)
oarr[:, -1] = None
assert_raises(TypeError, lambda: np.add(oarr[:, ::-1], arr[:, ::-1]))
# followup: this tests for a bug introduced in the first pass of gh-18450,
# caused by an incorrect fallthrough of the TypeError
class T:
def __bool__(self):
raise TypeError("Ambiguous")
assert_raises(TypeError, np.logical_or.reduce,
np.array([T(), T()], dtype='O'))
def test_object_iter_cleanup_reduce():
# Similar as above, but a complex reduction case that was previously
# missed (see gh-18810).
# The following array is special in that it cannot be flattened:
arr = np.array([[None, 1], [-1, -1], [None, 2], [-1, -1]])[::2]
with pytest.raises(TypeError):
np.sum(arr)
@pytest.mark.parametrize("arr", [
np.ones((8000, 4, 2), dtype=object)[:, ::2, :],
np.ones((8000, 4, 2), dtype=object, order="F")[:, ::2, :],
np.ones((8000, 4, 2), dtype=object)[:, ::2, :].copy("F")])
def test_object_iter_cleanup_large_reduce(arr):
# More complicated calls are possible for large arrays:
out = np.ones(8000, dtype=np.intp)
# force casting with `dtype=object`
res = np.sum(arr, axis=(1, 2), dtype=object, out=out)
assert_array_equal(res, np.full(8000, 4, dtype=object))
def test_iter_too_large():
# The total size of the iterator must not exceed the maximum intp due
# to broadcasting. Dividing by 1024 will keep it small enough to
# give a legal array.
size = np.iinfo(np.intp).max // 1024
arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,))
assert_raises(ValueError, nditer, (arr, arr[:, None]))
# test the same for multiindex. That may get more interesting when
# removing 0 dimensional axis is allowed (since an iterator can grow then)
assert_raises(ValueError, nditer,
(arr, arr[:, None]), flags=['multi_index'])
def test_iter_too_large_with_multiindex():
# When a multi index is being tracked, the error is delayed this
# checks the delayed error messages and getting below that by
# removing an axis.
base_size = 2**10
num = 1
while base_size**num < np.iinfo(np.intp).max:
num += 1
shape_template = [1, 1] * num
arrays = []
for i in range(num):
shape = shape_template[:]
shape[i * 2] = 2**10
arrays.append(np.empty(shape))
arrays = tuple(arrays)
# arrays are now too large to be broadcast. The different modes test
# different nditer functionality with or without GIL.
for mode in range(6):
with assert_raises(ValueError):
_multiarray_tests.test_nditer_too_large(arrays, -1, mode)
# but if we do nothing with the nditer, it can be constructed:
_multiarray_tests.test_nditer_too_large(arrays, -1, 7)
# When an axis is removed, things should work again (half the time):
for i in range(num):
for mode in range(6):
# an axis with size 1024 is removed:
_multiarray_tests.test_nditer_too_large(arrays, i*2, mode)
# an axis with size 1 is removed:
with assert_raises(ValueError):
_multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode)
def test_writebacks():
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
assert_(a.dtype.byteorder != au.dtype.byteorder)
it = nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
with it:
it.operands[0][:] = 100
assert_equal(au, 100)
# do it again, this time raise an error,
it = nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
try:
with it:
assert_equal(au.flags.writeable, False)
it.operands[0][:] = 0
raise ValueError('exit context manager on exception')
except:
pass
assert_equal(au, 0)
assert_equal(au.flags.writeable, True)
# cannot reuse i outside context manager
assert_raises(ValueError, getattr, it, 'operands')
it = nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
with it:
x = it.operands[0]
x[:] = 6
assert_(x.flags.writebackifcopy)
assert_equal(au, 6)
assert_(not x.flags.writebackifcopy)
x[:] = 123 # x.data still valid
assert_equal(au, 6) # but not connected to au
it = nditer(au, [],
[['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
# reentering works
with it:
with it:
for x in it:
x[...] = 123
it = nditer(au, [],
[['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
# make sure exiting the inner context manager closes the iterator
with it:
with it:
for x in it:
x[...] = 123
assert_raises(ValueError, getattr, it, 'operands')
# do not crash if original data array is decrefed
it = nditer(au, [],
[['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
del au
with it:
for x in it:
x[...] = 123
# make sure we cannot reenter the closed iterator
enter = it.__enter__
assert_raises(RuntimeError, enter)
def test_close_equivalent():
''' using a context amanger and using nditer.close are equivalent
'''
def add_close(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
for (a, b, c) in it:
addop(a, b, out=c)
ret = it.operands[2]
it.close()
return ret
def add_context(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
with it:
for (a, b, c) in it:
addop(a, b, out=c)
return it.operands[2]
z = add_close(range(5), range(5))
assert_equal(z, range(0, 10, 2))
z = add_context(range(5), range(5))
assert_equal(z, range(0, 10, 2))
def test_close_raises():
it = np.nditer(np.arange(3))
assert_equal (next(it), 0)
it.close()
assert_raises(StopIteration, next, it)
assert_raises(ValueError, getattr, it, 'operands')
def test_close_parameters():
it = np.nditer(np.arange(3))
assert_raises(TypeError, it.close, 1)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_warn_noclose():
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
it = np.nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
del it
assert len(sup.log) == 1
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
@pytest.mark.parametrize(["in_dtype", "buf_dtype"],
[("i", "O"), ("O", "i"), # most simple cases
("i,O", "O,O"), # structured partially only copying O
("O,i", "i,O"), # structured casting to and from O
])
@pytest.mark.parametrize("steps", [1, 2, 3])
def test_partial_iteration_cleanup(in_dtype, buf_dtype, steps):
value = 123 # relies on python cache (leak-check will still find it)
arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
count = sys.getrefcount(value)
it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
for step in range(steps):
# The iteration finishes in 3 steps, the first two are partial
next(it)
# Note that resetting does not free references
del it
assert count == sys.getrefcount(value)
# Repeat the test with `iternext`
it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
for step in range(steps):
it.iternext()
del it # should ensure cleanup
assert count == sys.getrefcount(value)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
@pytest.mark.parametrize(["in_dtype", "buf_dtype"],
[("O", "i"), # most simple cases
("O,i", "i,O"), # structured casting to and from O
])
def test_partial_iteration_error(in_dtype, buf_dtype):
value = 123 # relies on python cache (leak-check will still find it)
arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
if in_dtype == "O":
arr[int(np.BUFSIZE * 1.5)] = None
else:
arr[int(np.BUFSIZE * 1.5)]["f0"] = None
count = sys.getrefcount(value)
it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
with pytest.raises(TypeError):
# pytest.raises seems to have issues with the error originating
# in the for loop, so manually unravel:
next(it)
next(it) # raises TypeError
# Repeat the test with `iternext` after resetting, the buffers should
# already be cleared from any references, so resetting is sufficient.
it.reset()
with pytest.raises(TypeError):
it.iternext()
it.iternext()
assert count == sys.getrefcount(value)
def test_debug_print(capfd):
"""
Matches the expected output of a debug print with the actual output.
Note that the iterator dump should not be considered stable API,
this test is mainly to ensure the print does not crash.
Currently uses a subprocess to avoid dealing with the C level `printf`s.
"""
# the expected output with all addresses and sizes stripped (they vary
# and/or are platform dependend).
expected = """
------ BEGIN ITERATOR DUMP ------
| Iterator Address:
| ItFlags: BUFFER REDUCE REUSE_REDUCE_LOOPS
| NDim: 2
| NOp: 2
| IterSize: 50
| IterStart: 0
| IterEnd: 50
| IterIndex: 0
| Iterator SizeOf:
| BufferData SizeOf:
| AxisData SizeOf:
|
| Perm: 0 1
| DTypes:
| DTypes: dtype('float64') dtype('int32')
| InitDataPtrs:
| BaseOffsets: 0 0
| Operands:
| Operand DTypes: dtype('int64') dtype('float64')
| OpItFlags:
| Flags[0]: READ CAST ALIGNED
| Flags[1]: READ WRITE CAST ALIGNED REDUCE
|
| BufferData:
| BufferSize: 50
| Size: 5
| BufIterEnd: 5
| REDUCE Pos: 0
| REDUCE OuterSize: 10
| REDUCE OuterDim: 1
| Strides: 8 4
| Ptrs:
| REDUCE Outer Strides: 40 0
| REDUCE Outer Ptrs:
| ReadTransferFn:
| ReadTransferData:
| WriteTransferFn:
| WriteTransferData:
| Buffers:
|
| AxisData[0]:
| Shape: 5
| Index: 0
| Strides: 16 8
| Ptrs:
| AxisData[1]:
| Shape: 10
| Index: 0
| Strides: 80 0
| Ptrs:
------- END ITERATOR DUMP -------
""".strip().splitlines()
arr1 = np.arange(100, dtype=np.int64).reshape(10, 10)[:, ::2]
arr2 = np.arange(5.)
it = np.nditer((arr1, arr2), op_dtypes=["d", "i4"], casting="unsafe",
flags=["reduce_ok", "buffered"],
op_flags=[["readonly"], ["readwrite"]])
it.debug_print()
res = capfd.readouterr().out
res = res.strip().splitlines()
assert len(res) == len(expected)
for res_line, expected_line in zip(res, expected):
# The actual output may have additional pointers listed that are
# stripped from the example output:
assert res_line.startswith(expected_line.strip())
|
the-stack_0_10768 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 28 23:36:19 2021
@author: Bruno Ferrari
"""
_path="C:/Users/Bruno Ferrari/Documents/Bruno/2019/2s/MC/artigos revisão/Artigos Mes/GD/"
import pandas as pd
import numpy as np
import bgraph
from concurrent.futures import ThreadPoolExecutor
def gera_txt(nome, G):
with open((_path+'bdp/dbdp_instances/GraphData/{inst}.txt').format(inst=nome), 'w') as arq:
arq.write(str(G.v1()) + "\n")
arq.write(str(G.v2()) + "\n")
arq.write(str(G.edges()))
def gera_grafo(path):
graph_data = pd.read_csv(path)
graph_data_exp = graph_data.iloc[:,0].str.split(" ",expand=True)
n_1 = int(graph_data_exp.iloc[0,0])
n_2 = int(graph_data_exp.iloc[0,1])
graph_edges = []
for key, row in enumerate(graph_data.iloc[1:n_1+1, 0]):
for adj in row.split(" ")[2:]:
#if int(adj) not in graph_adj_nodes_incre:
graph_edges.append((key, int(adj)))
order_1 = dict(zip(range(0,n_1),graph_data_exp.iloc[1:n_1+1,1].astype(int).values + 1))
order_2 = dict(zip(range(n_1,n_1+n_2),graph_data_exp.iloc[n_1+1:n_1+1+n_2,1].astype(int).values + 1))
New = bgraph.BGraph()
New.v1(list(order_1.keys()))
New.v2(list(order_2.keys()))
New.edges(graph_edges)
return New
# Descosidera vertices adicionais (ie com linha == 0)
def gera_grafo_2(path):
graph_data = pd.read_csv(path)
graph_data_exp = graph_data.iloc[:,0].str.split(" ",expand=True)
n_1 = int(graph_data_exp.iloc[0,0])
aux = (graph_data_exp[1:n_1+1].set_index(0).loc['1'])
graph_edges = [(int(v), int(u)) for v in aux.iloc[:,0] for u in aux.set_index(1).loc[v] if u != None]
New = bgraph.BGraph()
New.v1(np.unique(np.array(np.matrix(graph_edges)[:,0])).tolist())
New.v2(np.unique(np.array(np.matrix(graph_edges)[:,1])).tolist())
New.edges(graph_edges)
return New
def gera_features(output):
for o in output:
_aux_deg = list(o.degree().values())
yield (o.n_v(), #vertices
o.n_edge(), #arestas
abs(o.n_v1() - o.n_v2()), #diff nos
np.mean(_aux_deg), #mean deg
np.std(_aux_deg), #std deg
np.median(_aux_deg), #median deg
o.density(), #graph density
o.n_v1(), o.n_v2(), np.min(_aux_deg)) #aux feat
#df_results = pd.read_excel(_path+"bdp/dbdp_instances/stallman_reduced.xlsx", sheet_name="Sheet1")
df_results = pd.read_excel(_path+"bdp/dbdp_instances/instance.xlsx", sheet_name="instances").replace(regex=".txt", value="")
output_list = []
for i in range(130)[::10]:
with ThreadPoolExecutor(6) as ex:
output = list(ex.map(lambda x: gera_grafo_2((_path+"bdp/dbdp_instances/instances/{name}.txt").format(name=x)), list(df_results['Instance'][i-10:i])))
with ThreadPoolExecutor(6) as ex:
ex.map(lambda x: gera_txt(x[0], x[1]), zip(df_results['Instance'][i-10:i].replace(regex="inc", value=""), output))
output_list.append(pd.DataFrame(gera_features(output)))
if not(i % 20):
with pd.ExcelWriter(_path+"bdp/dbdp_instances/metafeat3.xlsx") as writer:
pd.concat([df_results.replace(regex="inc", value=""), pd.concat(output_list, axis=0).reset_index(drop=True)],axis=1).to_excel(writer, sheet_name="results", index=False)
|
the-stack_0_10769 | # Deepforest Preprocessing model
"""The preprocessing module is used to reshape data into format suitable for
training or prediction.
For example cutting large tiles into smaller images.
"""
import os
import numpy as np
import pandas as pd
try:
import slidingwindow
from PIL import Image
except:
pass
def image_name_from_path(image_path):
"""Convert path to image name for use in indexing."""
image_name = os.path.basename(image_path)
image_name = os.path.splitext(image_name)[0]
return image_name
def compute_windows(numpy_image, patch_size, patch_overlap):
"""Create a sliding window object from a raster tile.
Args:
numpy_image (array): Raster object as numpy array to cut into crops
Returns:
windows (list): a sliding windows object
"""
if patch_overlap > 1:
raise ValueError("Patch overlap {} must be between 0 - 1".format(patch_overlap))
# Generate overlapping sliding windows
windows = slidingwindow.generate(numpy_image,
slidingwindow.DimOrder.HeightWidthChannel,
patch_size, patch_overlap)
return (windows)
def select_annotations(annotations, windows, index, allow_empty=False):
"""Select annotations that overlap with selected image crop.
Args:
image_name (str): Name of the image in the annotations file to lookup.
annotations_file: path to annotations file in
the format -> image_path, xmin, ymin, xmax, ymax, label
windows: A sliding window object (see compute_windows)
index: The index in the windows object to use a crop bounds
allow_empty (bool): If True, allow window crops
that have no annotations to be included
Returns:
selected_annotations: a pandas dataframe of annotations
"""
# Window coordinates - with respect to tile
window_xmin, window_ymin, w, h = windows[index].getRect()
window_xmax = window_xmin + w
window_ymax = window_ymin + h
# buffer coordinates a bit to grab boxes that might start just against
# the image edge. Don't allow boxes that start and end after the offset
offset = 40
selected_annotations = annotations[(annotations.xmin > (window_xmin - offset)) &
(annotations.xmin < (window_xmax)) &
(annotations.xmax >
(window_xmin)) & (annotations.ymin >
(window_ymin - offset)) &
(annotations.xmax <
(window_xmax + offset)) & (annotations.ymin <
(window_ymax)) &
(annotations.ymax >
(window_ymin)) & (annotations.ymax <
(window_ymax + offset))].copy()
# change the image name
image_name = os.path.splitext("{}".format(annotations.image_path.unique()[0]))[0]
image_basename = os.path.splitext(image_name)[0]
selected_annotations.image_path = "{}_{}.png".format(image_basename, index)
# If no matching annotations, return a line with the image name, but no records
if selected_annotations.empty:
if allow_empty:
selected_annotations = pd.DataFrame(
["{}_{}.png".format(image_basename, index)], columns=["image_path"])
selected_annotations["xmin"] = ""
selected_annotations["ymin"] = ""
selected_annotations["xmax"] = ""
selected_annotations["ymax"] = ""
selected_annotations["label"] = ""
else:
return None
else:
# update coordinates with respect to origin
selected_annotations.xmax = (selected_annotations.xmin - window_xmin) + (
selected_annotations.xmax - selected_annotations.xmin)
selected_annotations.xmin = (selected_annotations.xmin - window_xmin)
selected_annotations.ymax = (selected_annotations.ymin - window_ymin) + (
selected_annotations.ymax - selected_annotations.ymin)
selected_annotations.ymin = (selected_annotations.ymin - window_ymin)
# cut off any annotations over the border.
selected_annotations.loc[selected_annotations.xmin < 0, "xmin"] = 0
selected_annotations.loc[selected_annotations.xmax > w, "xmax"] = w
selected_annotations.loc[selected_annotations.ymin < 0, "ymin"] = 0
selected_annotations.loc[selected_annotations.ymax > h, "ymax"] = h
return selected_annotations
def save_crop(base_dir, image_name, index, crop):
"""Save window crop as image file to be read by PIL.
Filename should match the image_name + window index
"""
# create dir if needed
if not os.path.exists(base_dir):
os.makedirs(base_dir)
im = Image.fromarray(crop)
image_basename = os.path.splitext(image_name)[0]
filename = "{}/{}_{}.png".format(base_dir, image_basename, index)
im.save(filename)
return filename
def split_raster(path_to_raster,
annotations_file,
base_dir=".",
patch_size=400,
patch_overlap=0.05,
allow_empty=False):
"""Divide a large tile into smaller arrays. Each crop will be saved to
file.
Args:
path_to_raster: (str): Path to a tile that can be read by rasterio on disk
annotations_file (str): Path to annotations file (with column names)
data in the format -> image_path, xmin, ymin, xmax, ymax, label
base_dir (str): Where to save the annotations and image
crops relative to current working dir
patch_size (int): Maximum dimensions of square window
patch_overlap (float): Percent of overlap among windows 0->1
allow_empty: If True, include images with no annotations
to be included in the dataset
Returns:
A pandas dataframe with annotations file for training.
"""
# Load raster as image
raster = Image.open(path_to_raster)
numpy_image = np.array(raster)
# Check that its 3 band
bands = numpy_image.shape[2]
if not bands == 3:
raise IOError("Input file {} has {} bands. DeepForest only accepts 3 band RGB "
"rasters in the order (height, width, channels). "
"If the image was cropped and saved as a .jpg, "
"please ensure that no alpha channel was used.".format(
path_to_raster, bands))
# Check that patch size is greater than image size
height = numpy_image.shape[0]
width = numpy_image.shape[1]
if any(np.array([height, width]) < patch_size):
raise ValueError("Patch size of {} is larger than the image dimensions {}".format(
patch_size, [height, width]))
# Compute sliding window index
windows = compute_windows(numpy_image, patch_size, patch_overlap)
# Get image name for indexing
image_name = os.path.basename(path_to_raster)
# Load annotations file and coerce dtype
annotations = pd.read_csv(annotations_file)
# open annotations file
image_annotations = annotations[annotations.image_path == image_name].copy()
# Sanity checks
if image_annotations.empty:
raise ValueError(
"No image names match between the file:{} and the image_path: {}. "
"Reminder that image paths should be the relative "
"path (e.g. 'image_name.tif'), not the full path "
"(e.g. path/to/dir/image_name.tif)".format(annotations_file, image_name))
if not annotations.shape[1] == 6:
raise ValueError("Annotations file has {} columns, should have "
"format image_path, xmin, ymin, xmax, ymax, label".format(
annotations.shape[1]))
annotations_files = []
for index, window in enumerate(windows):
#Crop image
crop = numpy_image[windows[index].indices()]
# Find annotations, image_name is the basename of the path
crop_annotations = select_annotations(image_annotations, windows, index,
allow_empty)
# If empty images not allowed, select annotations returns None
if crop_annotations is not None:
# save annotations
annotations_files.append(crop_annotations)
# save image crop
save_crop(base_dir, image_name, index, crop)
if len(annotations_files) == 0:
raise ValueError(
"Input file has no overlapping annotations and allow_empty is {}".format(
allow_empty))
annotations_files = pd.concat(annotations_files)
# Checkpoint csv files, useful for parallelization
# Use filename of the raster path to save the annotations
image_basename = os.path.splitext(image_name)[0]
file_path = image_basename + ".csv"
file_path = os.path.join(base_dir, file_path)
annotations_files.to_csv(file_path, index=False, header=False)
return annotations_files
|
the-stack_0_10773 | import json
import dml
import prov.model
import datetime
import uuid
import pandas as pd
class topCertifiedCompanies(dml.Algorithm):
contributor = 'ashwini_gdukuray_justini_utdesai'
reads = ['ashwini_gdukuray_justini_utdesai.topCompanies', 'ashwini_gdukuray_justini_utdesai.masterList']
writes = ['ashwini_gdukuray_justini_utdesai.topCertCompanies']
@staticmethod
def execute(trial=False):
'''Retrieve some data sets (not using the API here for the sake of simplicity).'''
startTime = datetime.datetime.now()
# Set up the database connection.
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('ashwini_gdukuray_justini_utdesai', 'ashwini_gdukuray_justini_utdesai')
masterList = repo['ashwini_gdukuray_justini_utdesai.masterList']
topCompanies = repo['ashwini_gdukuray_justini_utdesai.topCompanies']
masterListDF = pd.DataFrame(list(masterList.find()))
topCompaniesDF = pd.DataFrame(list(topCompanies.find()))
topCompaniesDF = topCompaniesDF.rename(index=str, columns={'Firm': 'Business Name'})
# create a more uniform ID
businessIDs = []
for index, row in topCompaniesDF.iterrows():
busName = row['Business Name']
cleanedText = busName.upper().strip().replace(' ','').replace('.','').replace(',','').replace('-','')
businessIDs.append(cleanedText)
topCompaniesDF['B_ID'] = pd.Series(businessIDs, index=topCompaniesDF.index)
merged = pd.merge(masterListDF, topCompaniesDF, how='inner', on=['B_ID'])
#print(merged['B_ID'])
#records = json.loads(topCompaniesDF.T.to_json()).values()
repo.dropCollection("topCertCompanies")
repo.createCollection("topCertCompanies")
repo['ashwini_gdukuray_justini_utdesai.topCertCompanies'].insert_many(merged.to_dict('records'))
repo['ashwini_gdukuray_justini_utdesai.topCertCompanies'].metadata({'complete': True})
print(repo['ashwini_gdukuray_justini_utdesai.topCertCompanies'].metadata())
repo.logout()
endTime = datetime.datetime.now()
return {"start": startTime, "end": endTime}
@staticmethod
def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):
'''
Create the provenance document describing everything happening
in this script. Each run of the script will generate a new
document describing that invocation event.
'''
# Set up the database connection.
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('ashwini_gdukuray_justini_utdesai', 'ashwini_gdukuray_justini_utdesai')
doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in <folder>#<filename> format.
doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in <user>#<collection> format.
doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.
doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.
this_script = doc.agent('alg:ashwini_gdukuray_justini_utdesai#topCertifiedCompanies',
{prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})
topCompanies = doc.entity('dat:ashwini_gdukuray_justini_utdesai#topCompanies',
{'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource',
'ont:Extension': 'json'})
masterList = doc.entity('dat:ashwini_gdukuray_justini_utdesai#masterList',
{'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource',
'ont:Extension': 'json'})
topCertCompanies = doc.entity('dat:ashwini_gdukuray_justini_utdesai#topCertCompanies',
{'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource',
'ont:Extension': 'json'})
act = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)
doc.wasAssociatedWith(act, this_script)
doc.usage(act, topCompanies, startTime, None,
{prov.model.PROV_TYPE: 'ont:Retrieval',
'ont:Query': '?type=Animal+Found&$select=type,latitude,longitude,OPEN_DT'
}
)
doc.usage(act, masterList, startTime, None,
{prov.model.PROV_TYPE: 'ont:Retrieval',
'ont:Query': '?type=Animal+Found&$select=type,latitude,longitude,OPEN_DT'
}
)
doc.wasAttributedTo(topCertCompanies, this_script)
doc.wasGeneratedBy(topCertCompanies, act, endTime)
doc.wasDerivedFrom(topCertCompanies, topCompanies, act, act, act)
doc.wasDerivedFrom(topCertCompanies, masterList, act, act, act)
repo.logout()
return doc
'''
# This is example code you might use for debugging this module.
# Please remove all top-level function calls before submitting.
example.execute()
doc = example.provenance()
print(doc.get_provn())
print(json.dumps(json.loads(doc.serialize()), indent=4))
'''
## eof
|
the-stack_0_10775 | # Copyright (C) 2020 The Electrum developers
# Copyright (C) 2021 The DECENOMY Core Developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QListWidget, QListWidgetItem
from electrum.i18n import _
from electrum.network import Network
from electrum.bip39_recovery import account_discovery
from electrum.logging import get_logger
from .util import WindowModalDialog, MessageBoxMixin, TaskThread, Buttons, CancelButton, OkButton
_logger = get_logger(__name__)
class Bip39RecoveryDialog(WindowModalDialog):
ROLE_ACCOUNT = Qt.UserRole
def __init__(self, parent: QWidget, get_account_xpub, on_account_select):
self.get_account_xpub = get_account_xpub
self.on_account_select = on_account_select
WindowModalDialog.__init__(self, parent, _('BIP39 Recovery'))
self.setMinimumWidth(400)
vbox = QVBoxLayout(self)
self.content = QVBoxLayout()
self.content.addWidget(QLabel(_('Scanning common paths for existing accounts...')))
vbox.addLayout(self.content)
self.ok_button = OkButton(self)
self.ok_button.clicked.connect(self.on_ok_button_click)
self.ok_button.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(self), self.ok_button))
self.finished.connect(self.on_finished)
self.show()
self.thread = TaskThread(self)
self.thread.finished.connect(self.deleteLater) # see #3956
self.thread.add(self.recovery, self.on_recovery_success, None, self.on_recovery_error)
def on_finished(self):
self.thread.stop()
def on_ok_button_click(self):
item = self.list.currentItem()
account = item.data(self.ROLE_ACCOUNT)
self.on_account_select(account)
def recovery(self):
network = Network.get_instance()
coroutine = account_discovery(network, self.get_account_xpub)
return network.run_from_another_thread(coroutine)
def on_recovery_success(self, accounts):
self.clear_content()
if len(accounts) == 0:
self.content.addWidget(QLabel(_('No existing accounts found.')))
return
self.content.addWidget(QLabel(_('Choose an account to restore.')))
self.list = QListWidget()
for account in accounts:
item = QListWidgetItem(account['description'])
item.setData(self.ROLE_ACCOUNT, account)
self.list.addItem(item)
self.list.clicked.connect(lambda: self.ok_button.setEnabled(True))
self.content.addWidget(self.list)
def on_recovery_error(self, exc_info):
self.clear_content()
self.content.addWidget(QLabel(_('Error: Account discovery failed.')))
_logger.error(f"recovery error", exc_info=exc_info)
def clear_content(self):
for i in reversed(range(self.content.count())):
self.content.itemAt(i).widget().setParent(None)
|
the-stack_0_10778 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def make_many_types(apps, schema_editor):
"""
Adds the Author object in Book.author to the
many-to-many relationship in Book.authors
"""
Organization = apps.get_model('organization', 'Organization')
for organization in Organization.objects.all():
if organization.organization_type:
organization.organization_types.add(organization.organization_type)
if organization.investor_type:
organization.investor_types.add(organization.investor_type)
class Migration(migrations.Migration):
dependencies = [
('organization', '0015_auto_20160405_1139'),
]
operations = [
migrations.RunPython(make_many_types),
]
|
the-stack_0_10781 | import logging
from typing import Text, Any, Dict, Optional, List
from rasa.core.constants import DEFAULT_REQUEST_TIMEOUT
from rasa.core.nlg.generator import NaturalLanguageGenerator
from rasa.core.trackers import DialogueStateTracker, EventVerbosity
from rasa.utils.endpoints import EndpointConfig
import os
logger = logging.getLogger(__name__)
NLG_QUERY = """
query(
$template: StringOrListOfStrings!
$arguments: Any
$tracker: ConversationInput
$channel: NlgRequestChannel
) {
getResponse(
template: $template
arguments: $arguments
tracker: $tracker
channel: $channel
) {
text
metadata
...on QuickReplyPayload { buttons { title, type, ...on WebUrlButton { url } ...on PostbackButton { payload } } }
...on ImagePayload { image }
...on CustomPayload { buttons { title, type, ...on WebUrlButton { url } ...on PostbackButton { payload } }, elements, attachment, image, custom }
}
}
"""
def nlg_response_format_spec():
"""Expected response schema for an NLG endpoint.
Used for validation of the response returned from the NLG endpoint."""
return {
"type": "object",
"properties": {
"text": {"type": ["string", "null"]},
"buttons": {"type": ["array", "null"], "items": {"type": "object"}},
"elements": {"type": ["array", "null"], "items": {"type": "object"}},
"attachment": {"type": ["object", "null"]},
"image": {"type": ["string", "null"]},
},
}
def nlg_request_format_spec():
"""Expected request schema for requests sent to an NLG endpoint."""
return {
"type": "object",
"properties": {
"template": {"type": "string"},
"arguments": {"type": "object"},
"tracker": {
"type": "object",
"properties": {
"sender_id": {"type": "string"},
"slots": {"type": "object"},
"latest_message": {"type": "object"},
"latest_event_time": {"type": "number"},
"paused": {"type": "boolean"},
"events": {"type": "array"},
},
},
"channel": {"type": "object", "properties": {"name": {"type": "string"}}},
},
}
def nlg_request_format(
template_name: Text,
tracker: DialogueStateTracker,
output_channel: Text,
**kwargs: Any,
) -> Dict[Text, Any]:
"""Create the json body for the NLG json body for the request."""
tracker_state = tracker.current_state(EventVerbosity.ALL)
return {
"template": template_name,
"arguments": kwargs,
"tracker": tracker_state,
"channel": {"name": output_channel},
}
class GraphQLNaturalLanguageGenerator(NaturalLanguageGenerator):
"""Like Rasa's CallbackNLG, but queries Botfront's GraphQL endpoint"""
def __init__(self, **kwargs) -> None:
endpoint_config = kwargs.get("endpoint_config")
self.nlg_endpoint = endpoint_config
async def generate(
self,
template_name: Text,
tracker: DialogueStateTracker,
output_channel: Text,
**kwargs: Any,
) -> List[Dict[Text, Any]]:
fallback_language_slot = tracker.slots.get("fallback_language")
fallback_language = fallback_language_slot.initial_value if fallback_language_slot else None
language = tracker.latest_message.metadata.get("language") or fallback_language
body = nlg_request_format(
template_name,
tracker,
output_channel,
**kwargs,
language=language,
projectId=os.environ.get("BF_PROJECT_ID"),
)
logger.debug(
"Requesting NLG for {} from {}."
"".format(template_name, self.nlg_endpoint.url)
)
try:
if "graphql" in self.nlg_endpoint.url:
from sgqlc.endpoint.http import HTTPEndpoint
response = HTTPEndpoint(self.nlg_endpoint.url)(NLG_QUERY, body)
response = response["data"]["getResponse"]
else:
response = await self.nlg_endpoint.request(
method="post", json=body, timeout=DEFAULT_REQUEST_TIMEOUT
)
response = response[0] # legacy route, use first message in seq
except Exception as e:
logger.error("NLG web endpoint returned an invalid response: {}".format(e))
return {"text": template_name}
if self.validate_response(response):
return response
else:
logger.error("NLG web endpoint returned an invalid response.")
return {"text": template_name}
@staticmethod
def validate_response(content: Optional[Dict[Text, Any]]) -> bool:
"""Validate the NLG response. Raises exception on failure."""
from jsonschema import validate
from jsonschema import ValidationError
try:
if content is None or content == "":
# means the endpoint did not want to respond with anything
return True
else:
validate(content, nlg_response_format_spec())
return True
except ValidationError as e:
e.message += (
". Failed to validate NLG response from API, make sure your "
"response from the NLG endpoint is valid. "
"For more information about the format please consult the "
"`nlg_response_format_spec` function from this same module: "
"https://github.com/RasaHQ/rasa/blob/master/rasa/core/nlg/callback.py#L12"
)
raise e
|
the-stack_0_10782 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import os
from .tools import logger, run_process, try_to_wrap_executable, find_output_arg, execute, check_program
from .constants import CC, CXX, WASI_SYSROOT, STUBS_SYSTEM_LIB, STUBS_SYSTEM_PREAMBLE
def run(args):
main_program = CXX if args[0].endswith("wasmc++") else CC
check_program(main_program)
if '--version' in args:
print('''wasienv (wasienv gcc/clang-like replacement)''')
return 0
if len(args) == 1 and args[0] == '-v': # -v with no inputs
# autoconf likes to see 'GNU' in the output to enable shared object support
print('wasienv (wasienv gcc/clang-like replacement + linker emulating GNU ld)', file=sys.stderr)
code = run_process([main_program, '-v'], check=False).returncode
return code
has_target = any([arg.startswith("--target") for arg in args])
# Flags decided by following: https://github.com/wasienv/wasienv/pull/8
args.append("--no-standard-libraries")
args.append("-Wl,--export-all")
args.append("-Wl,--no-entry")
if not has_target:
args.append("--target=wasm32-unknown-unknown")
proc_args = [main_program]+args[1:]
return_code = run_process(proc_args, check=False)
target, outargs = find_output_arg(args)
if target:
try_to_wrap_executable(target)
return return_code
if __name__ == '__main__':
execute(run)
|
the-stack_0_10783 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
import platform
from shutil import rmtree
from setuptools import find_packages, setup, Command
if platform.system() == 'Windows':
import py2exe
NAME = 'sysl'
DESCRIPTION = 'System specification language with compiler and code generator'
URL = 'https://github.com/anz-bank/sysl'
EMAIL = '[email protected]'
AUTHOR = 'ANZ'
REQUIRED = [
'httplib2',
'urllib3==1.24.2',
'openpyxl',
'plantuml',
'protobuf',
'pylint',
'PyYAML',
'requests',
'six'
]
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
about = {}
with open(os.path.join(here, 'src', NAME, '__version__.py')) as f:
exec(f.read(), about)
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
url=URL,
package_dir={'': 'src'},
packages=find_packages('src', exclude=('tests',)),
entry_points={
'console_scripts': [
'sysl=sysl.core.__main__:main',
'reljam=sysl.reljam.reljam:main',
],
},
install_requires=REQUIRED,
include_package_data=True,
license='Apache 2.0',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
],
extras_require={
'dev': [
'pytest',
'flake8',
]
},
# py2exe
options={'py2exe': {
'bundle_files': 1,
'dll_excludes': ['w9xpopen.exe', 'libstdc++-6.dll', 'libgcc_s_dw2-1.dll']
}},
console=[{
'script': 'src/sysl/core/__main__.py',
'dest_base': 'sysl',
'icon_resources': [(1, 'docs/favicon.ico')]
}, {
'script': 'src/sysl/reljam/__main__.py',
'dest_base': 'reljam',
'icon_resources': [(1, 'docs/favicon.ico')]
}],
zipfile=None
)
|
the-stack_0_10784 | """Provide access to Python's configuration information. The specific
configuration variables available depend heavily on the platform and
configuration. The values may be retrieved using
get_config_var(name), and the list of variables is available via
get_config_vars().keys(). Additional convenience functions are also
available.
Written by: Fred L. Drake, Jr.
Email: <[email protected]>
"""
__revision__ = "$Id$"
import os
import re
import string
import sys
from distutils.errors import DistutilsPlatformError
# These are needed in a couple of spots, so just compute them once.
PREFIX = os.path.normpath(sys.prefix)
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
# Path to the base directory of the project. On Windows the binary may
# live in project/PCBuild9. If we're dealing with an x64 Windows build,
# it'll live in project/PCbuild/amd64.
if sys.executable:
project_base = os.path.dirname(os.path.abspath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
project_base = os.getcwd()
if os.name == "nt" and "pcbuild" in project_base[-8:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in project_base[-10:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir,
os.path.pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in project_base[-14:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir,
os.path.pardir))
# set for cross builds
if "_PYTHON_PROJECT_BASE" in os.environ:
# this is the build directory, at least for posix
project_base = os.path.normpath(os.environ["_PYTHON_PROJECT_BASE"])
# python_build: (Boolean) if true, we're either building Python or
# building an extension with an un-installed Python, so we use
# different (hard-wired) directories.
# Setup.local is available for Makefile builds including VPATH builds,
# Setup.dist is available on Windows
def _python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(project_base, "Modules", fn)):
return True
return False
python_build = _python_build()
def get_python_version():
"""Return a string containing the major and minor Python version,
leaving off the patchlevel. Sample return values could be '1.5'
or '2.2'.
"""
return sys.version[:3]
def get_python_inc(plat_specific=0, prefix=None):
"""Return the directory containing installed Python header files.
If 'plat_specific' is false (the default), this is the path to the
non-platform-specific header files, i.e. Python.h and so on;
otherwise, this is the path to platform-specific header files
(namely pyconfig.h).
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
if python_build:
if sys.executable:
buildir = os.path.dirname(sys.executable)
else:
# sys.executable can be empty if argv[0] has been changed
# and Python is unable to retrieve the real program name
buildir = os.getcwd()
if plat_specific:
# python.h is located in the buildir
inc_dir = buildir
else:
# the source dir is relative to the buildir
srcdir = os.path.abspath(os.path.join(buildir,
get_config_var('srcdir')))
# Include is located in the srcdir
inc_dir = os.path.join(srcdir, "Include")
return inc_dir
return os.path.join(prefix, "include", "python" + get_python_version())
elif os.name == "nt":
return os.path.join(prefix, "include")
elif os.name == "os2":
return os.path.join(prefix, "Include")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its C header files "
"on platform '%s'" % os.name)
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"""Return the directory containing the Python library (standard or
site additions).
If 'plat_specific' is true, return the directory containing
platform-specific modules, i.e. any module from a non-pure-Python
module distribution; otherwise, return the platform-shared library
directory. If 'standard_lib' is true, return the directory
containing standard Python library modules; otherwise, return the
directory for site-specific modules.
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
libpython = os.path.join(prefix,
"lib", "python" + get_python_version())
if standard_lib:
return libpython
else:
return os.path.join(libpython, "site-packages")
elif os.name == "nt":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
if get_python_version() < "2.2":
return prefix
else:
return os.path.join(prefix, "Lib", "site-packages")
elif os.name == "os2":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
return os.path.join(prefix, "Lib", "site-packages")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its library "
"on platform '%s'" % os.name)
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
if sys.platform == "darwin":
# Perform first-time customization of compiler-related
# config vars on OS X now that we know we need a compiler.
# This is primarily to support Pythons from binary
# installers. The kind and paths to build tools on
# the user system may vary significantly from the system
# that Python itself was built on. Also the user OS
# version and build tools may not support the same set
# of CPU architectures for universal builds.
global _config_vars
# Use get_config_var() to ensure _config_vars is initialized.
if not get_config_var('CUSTOMIZED_OSX_COMPILER'):
import _osx_support
_osx_support.customize_compiler(_config_vars)
_config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
(cc, cxx, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \
get_config_vars('CC', 'CXX', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SO', 'AR',
'ARFLAGS')
if 'CC' in os.environ:
newcc = os.environ['CC']
if (sys.platform == 'darwin'
and 'LDSHARED' not in os.environ
and ldshared.startswith(cc)):
# On OS X, if CC is overridden, use that as the default
# command for LDSHARED as well
ldshared = newcc + ldshared[len(cc):]
cc = newcc
if 'CXX' in os.environ:
cxx = os.environ['CXX']
if 'LDSHARED' in os.environ:
ldshared = os.environ['LDSHARED']
if 'CPP' in os.environ:
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
cflags = cflags + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']
cflags = cflags + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
if 'AR' in os.environ:
ar = os.environ['AR']
if 'ARFLAGS' in os.environ:
archiver = ar + ' ' + os.environ['ARFLAGS']
else:
archiver = ar + ' ' + ar_flags
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc,
archiver=archiver)
compiler.shared_lib_extension = so_ext
def get_config_h_filename():
"""Return full pathname of installed pyconfig.h file."""
if python_build:
if os.name == "nt":
inc_dir = os.path.join(project_base, "PC")
else:
inc_dir = project_base
else:
inc_dir = get_python_inc(plat_specific=1)
if get_python_version() < '2.2':
config_h = 'config.h'
else:
# The name of the config.h file changed in 2.2
config_h = 'pyconfig.h'
return os.path.join(inc_dir, config_h)
def get_makefile_filename():
"""Return full pathname of installed Makefile from the Python build."""
if python_build:
return os.path.join(project_base, "Makefile")
lib_dir = get_python_lib(plat_specific=1, standard_lib=1)
return os.path.join(lib_dir, "config", "Makefile")
def parse_config_h(fp, g=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if g is None:
g = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
#
while 1:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
g[n] = v
else:
m = undef_rx.match(line)
if m:
g[m.group(1)] = 0
return g
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
def parse_makefile(fn, g=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
from distutils.text_file import TextFile
fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1)
if g is None:
g = {}
done = {}
notdone = {}
while 1:
line = fp.readline()
if line is None: # eof
break
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
while notdone:
for name in notdone.keys():
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
fp.close()
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
g.update(done)
return g
def expand_makefile_vars(s, vars):
"""Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
'string' according to 'vars' (a dictionary mapping variable names to
values). Variables not present in 'vars' are silently expanded to the
empty string. The variable values in 'vars' should not contain further
variable expansions; if 'vars' is the output of 'parse_makefile()',
you're fine. Returns a variable-expanded version of 's'.
"""
# This algorithm does multiple expansion, so if vars['foo'] contains
# "${bar}", it will expand ${foo} to ${bar}, and then expand
# ${bar}... and so forth. This is fine as long as 'vars' comes from
# 'parse_makefile()', which takes care of such expansions eagerly,
# according to make's variable expansion semantics.
while 1:
m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
if m:
(beg, end) = m.span()
s = s[0:beg] + vars.get(m.group(1)) + s[end:]
else:
break
return s
_config_vars = None
def _init_posix():
"""Initialize the module as appropriate for POSIX systems."""
# _sysconfigdata is generated at build time, see the sysconfig module
from _sysconfigdata import build_time_vars
global _config_vars
_config_vars = {}
_config_vars.update(build_time_vars)
def _init_nt():
"""Initialize the module as appropriate for NT"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['SO'] = '.pyd'
g['EXE'] = ".exe"
g['VERSION'] = get_python_version().replace(".", "")
g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable))
global _config_vars
_config_vars = g
def _init_os2():
"""Initialize the module as appropriate for OS/2"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['SO'] = '.pyd'
g['EXE'] = ".exe"
global _config_vars
_config_vars = g
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform. Generally this includes
everything needed to build extensions and install both pure modules and
extensions. On Unix, this means every variable defined in Python's
installed Makefile; on Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _config_vars
if _config_vars is None:
func = globals().get("_init_" + os.name)
if func:
func()
else:
_config_vars = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_config_vars['prefix'] = PREFIX
_config_vars['exec_prefix'] = EXEC_PREFIX
# OS X platforms require special customization to handle
# multi-architecture, multi-os-version installers
if sys.platform == 'darwin':
import _osx_support
_osx_support.customize_config_vars(_config_vars)
if args:
vals = []
for name in args:
vals.append(_config_vars.get(name))
return vals
else:
return _config_vars
def get_config_var(name):
"""Return the value of a single variable using the dictionary
returned by 'get_config_vars()'. Equivalent to
get_config_vars().get(name)
"""
return get_config_vars().get(name)
|
the-stack_0_10785 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import copy
import warnings
import paddle
from paddle.fluid.framework import dygraph_only
from paddle.fluid import compiler
from .role_maker import UserDefinedRoleMaker, PaddleCloudRoleMaker, RoleMakerBase
from .strategy_compiler import StrategyCompiler
from .distributed_strategy import DistributedStrategy
from .meta_optimizer_factory import MetaOptimizerFactory
from .runtime_factory import RuntimeFactory
from paddle.fluid.wrapped_decorator import wrap_decorator
from paddle.fluid.dygraph import parallel_helper
def _inited_runtime_handler_(func):
def __impl__(*args, **kwargs):
cls = args[0]
if cls._runtime_handle is None:
raise ValueError("Fleet can not find suitable runtime handler")
return func(*args, **kwargs)
return __impl__
def _is_non_distributed_check_(func):
def __impl__(*args, **kwargs):
cls = args[0]
if cls._role_maker is not None and cls._role_maker._is_non_distributed(
) is True:
warnings.warn(
"%s() function doesn't work when use non_distributed fleet." %
(func.__name__))
return
return func(*args, **kwargs)
return __impl__
inited_runtime_handler = wrap_decorator(_inited_runtime_handler_)
is_non_distributed_check = wrap_decorator(_is_non_distributed_check_)
class Fleet(object):
"""
Unified API for distributed training of PaddlePaddle
Please reference the https://github.com/PaddlePaddle/FleetX for details
Returns:
Fleet: A Fleet instance
Example for collective training:
.. code-block:: python
import paddle
paddle.enable_static()
import paddle.distributed.fleet as fleet
fleet.init(is_collective=True)
strategy = fleet.DistributedStrategy()
optimizer = paddle.optimizer.SGD(learning_rate=0.001)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
# do distributed training
Example for parameter server training:
.. code-block:: python
import paddle
paddle.enable_static()
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
fleet.init(strategy=strategy)
optimizer = paddle.optimizer.SGD(learning_rate=0.001)
optimizer = fleet.distributed_optimizer(optimizer)
if fleet.is_first_worker():
print("this is first worker")
print("current node index: {}".format(fleet.worker_index()))
print("total number of worker num: {}".format(fleet.worker_num()))
if fleet.is_worker():
print("this is worker")
print("worker endpoints: {}".format(fleet.worker_endpoints(to_string=True)))
print("server num: {}".format(fleet.server_num()))
print("server endpoints: {}".format(fleet.server_endpoints(to_string=True)))
if fleet.is_server():
print("this is server")
fleet.stop_worker()
"""
def __init__(self):
self._role_maker = None
self.strategy_compiler = None
self._is_collective = False
self._runtime_handle = None
self._util = None
self._context = {}
def init(self, role_maker=None, is_collective=False, strategy=None):
"""
Initialize role_maker in Fleet.
This function is responsible for the distributed architecture
what you want to run your code behind.
Args:
role_maker (RoleMakerBase, optional): A ``RoleMakerBase`` containing the configuration
of environment variables related to distributed training.If you did not initialize
the rolemaker by yourself, it will be automatically initialized to PaddleRoleMaker.
The default value is None.
is_collective (Boolean, optional): A ``Boolean`` variable determines whether the program
runs on the CPU or GPU. False means set distributed training using CPU, and True means
GPU.The default value is False.The default value is False.
strategy (DistributedStrategy): Extra properties for distributed training.
For details, please refer to paddle.distributed.fleet.DistributedStrategy. Default: None.
Returns:
None
Examples1:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init()
Examples2:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init(is_collective=True)
Examples3:
.. code-block:: python
import paddle.distributed.fleet as fleet
role = fleet.PaddleCloudRoleMaker()
fleet.init(role)
Examples4:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
fleet.init(strategy=strategy)
"""
if strategy is None:
strategy = DistributedStrategy()
self._user_defined_strategy = copy.deepcopy(strategy)
if role_maker is None:
if isinstance(is_collective, bool):
self._is_collective = is_collective
self._role_maker = PaddleCloudRoleMaker(
is_collective=self._is_collective)
else:
raise ValueError(
"`is_collective` should be instance of `bool`, but got {}".
format(type(is_collective)))
else:
if isinstance(role_maker, RoleMakerBase):
self._role_maker = role_maker
else:
raise ValueError(
"`role_maker` should be subclass of `RoleMakerBase`, but got {}".
format(type(role_maker)))
self._role_maker._generate_role()
import paddle.distributed.fleet as fleet
fleet.util._set_role_maker(self._role_maker)
self.strategy_compiler = StrategyCompiler()
if self._role_maker._is_non_distributed() and self._is_collective:
if paddle.fluid.core.is_compiled_with_cuda():
gpus_num = paddle.fluid.core.get_cuda_device_count()
if gpus_num != 1:
raise ValueError(
"CUDA_VISIBLE_DEVICES shoule be set only 1 card if you use `python` to launch fleet program."
)
if paddle.fluid.framework.in_dygraph_mode():
if self.worker_num() == 1:
return
if parallel_helper._is_parallel_ctx_initialized():
warnings.warn(
"The dygraph parallel environment has been initialized.")
else:
paddle.distributed.init_parallel_env()
def is_first_worker(self):
"""
Check whether the node is the first instance of worker.
Returns:
bool: True if this is the first node of worker,
False if not.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init()
fleet.is_first_worker()
"""
return self._role_maker._is_first_worker()
def worker_index(self):
"""
Get current worker index.
Returns:
int: node id
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init()
fleet.worker_index()
"""
return self._role_maker._worker_index()
def worker_num(self):
"""
Get current total worker number.
Returns:
int: worker numbers
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init()
fleet.worker_num()
"""
return self._role_maker._worker_num()
def is_worker(self):
"""
Check whether the node is an instance of worker.
Returns:
bool: True if this is a node of worker,
False if not.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init()
fleet.is_worker()
"""
return self._role_maker._is_worker()
def worker_endpoints(self, to_string=False):
"""
Get current worker endpoints, such as ["127.0.0.1:1001", "127.0.0.1:1002"].
Returns:
list/string: server endpoints
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init()
fleet.worker_endpoints()
"""
if to_string:
return ",".join(self._role_maker._get_trainer_endpoints())
else:
return self._role_maker._get_trainer_endpoints()
def server_num(self):
"""
Get current total worker number.
Returns:
int: server number
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init()
fleet.server_num()
"""
return len(self._role_maker._get_pserver_endpoints())
def server_index(self):
"""
Get current server index.
Returns:
int: node id
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init()
fleet.server_index()
"""
return self._role_maker._server_index()
def server_endpoints(self, to_string=False):
"""
Get current server endpoints, such as ["127.0.0.1:1001", "127.0.0.1:1002"].
Returns:
list/string: server endpoints
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init()
fleet.server_endpoints()
"""
if to_string:
return ",".join(self._role_maker._get_pserver_endpoints())
else:
return self._role_maker._get_pserver_endpoints()
def is_server(self):
"""
Check whether the node is an instance of server.
Returns:
bool: True if this is a node of server,
False if not.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init()
fleet.is_server()
"""
return self._role_maker._is_server(
) or self._role_maker._is_heter_worker()
def barrier_worker(self):
"""
barrier all workers
Returns:
None
"""
self._role_maker._barrier("worker")
@is_non_distributed_check
@inited_runtime_handler
def init_worker(self):
"""
initialize `Communicator` for parameter server training.
Returns:
None
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init()
# build net
# fleet.distributed_optimizer(...)
fleet.init_worker()
"""
self._runtime_handle._init_worker()
@is_non_distributed_check
@inited_runtime_handler
def init_server(self, *args, **kwargs):
"""
init_server executor to initialize startup program,
if the `args` is not empty, it will run load_persistables for increment training.
Returns:
None
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init()
# build net
# fleet.distributed_optimizer(...)
fleet.init_server()
"""
self._runtime_handle._init_server(*args, **kwargs)
@is_non_distributed_check
@inited_runtime_handler
def run_server(self):
"""
run server will run pserver main program with executor.
Returns:
None
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init()
# build net
# fleet.distributed_optimizer(...)
if fleet.is_server():
fleet.init_server()
"""
self._runtime_handle._run_server()
@is_non_distributed_check
@inited_runtime_handler
def stop_worker(self):
"""
stop `Communicator` and give training complete notice to parameter server.
Returns:
None
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init()
# build net
# fleet.distributed_optimizer(...)
fleet.init_server()
"""
self._runtime_handle._stop_worker()
def save_inference_model(self,
executor,
dirname,
feeded_var_names,
target_vars,
main_program=None,
export_for_deployment=True):
"""
save inference model for inference.
Returns:
None
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
fleet.init()
# build net
# fleet.distributed_optimizer(...)
fleet.init_server()
"""
self._runtime_handle._save_inference_model(
executor, dirname, feeded_var_names, target_vars, main_program,
export_for_deployment)
def save_persistables(self, executor, dirname, main_program=None, mode=1):
"""
saves all persistable tensors from :code:`main_program` to
the folder :code:`dirname`. You can refer to
The :code:`dirname` is used to specify the folder where persistable tensors
are going to be saved. If you would like to save tensors in separate
files, set :code:`filename` None.
Args:
executor(Executor): The executor to run for saving persistable tensors.
You can refer to :ref:`api_guide_executor_en` for
more details.
dirname(str, optional): The saving directory path.
When you need to save the parameter to the memory, set it to None.
main_program(Program, optional): The program whose persistbale tensors will
be saved. Default: None.
Returns:
None
Examples:
.. code-block:: text
import paddle
paddle.enable_static()
import paddle.distributed.fleet as fleet
fleet.init()
# build net
# fleet.distributed_optimizer(...)
exe = paddle.static.Executor(paddle.CPUPlace())
fleet.save_persistables(exe, "dirname", paddle.static.default_main_program())
"""
self._runtime_handle._save_persistables(executor, dirname, main_program,
mode)
def distributed_optimizer(self, optimizer, strategy=None):
"""
Optimizer for distributed training.
For the distributed training, this method would rebuild a new instance of DistributedOptimizer.
Which has basic Optimizer function and special features for distributed training.
Args:
optimizer(Optimizer): The executor to run for init server.
strategy(DistributedStrategy): Extra properties for distributed optimizer.
It is recommended to use DistributedStrategy in fleet.init(). The strategy
here is for compatibility. If the strategy in fleet.distributed_optimizer()
is not None, then it will overwrite the DistributedStrategy in fleet.init(),
which will take effect in distributed training.
Returns:
Fleet: instance of fleet.
Examples:
.. code-block:: python
import paddle
import paddle.distributed.fleet as fleet
fleet.init(is_collective=True)
strategy = fleet.DistributedStrategy()
optimizer = paddle.optimizer.SGD(learning_rate=0.001)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
"""
self.user_defined_optimizer = optimizer
if strategy is not None:
warnings.warn(
"It is recommended to use DistributedStrategy "
"in fleet.init(). The strategy here is only for compatibility. "
"If the strategy in fleet.distributed_optimizer() is "
"not None, then it will overwrite the DistributedStrategy in fleet.init(), "
"which will take effect in distributed training.")
self._user_defined_strategy = copy.deepcopy(strategy)
self._context = {}
return self
@dygraph_only
def distributed_model(self, model):
"""
Return distributed data parallel model (Only work in dygraph mode)
Args:
model (Layer): the user-defind model which inherits Layer.
Returns:
distributed data parallel model which inherits Layer.
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
from paddle.distributed import fleet
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear1 = nn.Linear(10, 10)
self._linear2 = nn.Linear(10, 1)
def forward(self, x):
return self._linear2(self._linear1(x))
# 1. initialize fleet environment
fleet.init(is_collective=True)
# 2. create layer & optimizer
layer = LinearNet()
loss_fn = nn.MSELoss()
adam = paddle.optimizer.Adam(
learning_rate=0.001, parameters=layer.parameters())
# 3. get data_parallel model using fleet
adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer)
# 4. run layer
inputs = paddle.randn([10, 10], 'float32')
outputs = dp_layer(inputs)
labels = paddle.randn([10, 1], 'float32')
loss = loss_fn(outputs, labels)
print("loss:", loss.numpy())
loss.backward()
adam.step()
adam.clear_grad()
"""
assert model is not None
self.model = paddle.DataParallel(
model,
comm_buffer_size=self._user_defined_strategy.fuse_grad_size_in_MB,
last_comm_buffer_size=self._user_defined_strategy.
last_comm_group_size_MB)
return self.model
@dygraph_only
def state_dict(self):
"""
Get state dict information from optimizer.
(Only work in dygraph mode)
Returns:
state_dict(dict) : dict contains all the Tensor used by optimizer
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
fleet.init(is_collective=True)
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
layer = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer)
state_dict = adam.state_dict()
"""
# imitate target optimizer retrieval
return self.user_defined_optimizer.state_dict()
@dygraph_only
def set_state_dict(self, state_dict):
"""
Load optimizer state dict.
(Only work in dygraph mode)
Args:
state_dict(dict) : Dict contains all the Tensor needed by optimizer
Returns:
None
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
fleet.init(is_collective=True)
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
layer = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer)
state_dict = adam.state_dict()
paddle.save(state_dict, "paddle_dy")
para_state_dict = paddle.load("paddle_dy")
adam.set_state_dict(para_state_dict)
"""
# imitate target optimizer retrieval
return self.user_defined_optimizer.set_state_dict(state_dict)
@dygraph_only
def set_lr(self, value):
"""
Set the value of the learning rate manually in the optimizer.
(Only work in dygraph mode)
Args:
value (float|Tensor): the value of learning rate
Returns:
None
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
fleet.init(is_collective=True)
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
layer = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer)
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
for i in range(5):
adam.set_lr(lr_list[i])
lr = adam.get_lr()
print("current lr is {}".format(lr))
# Print:
# current lr is 0.2
# current lr is 0.3
# current lr is 0.4
# current lr is 0.5
# current lr is 0.6
"""
# imitate target optimizer retrieval
return self.user_defined_optimizer.set_lr(value)
@dygraph_only
def get_lr(self):
"""
Get current step learning rate.
(Only work in dygraph mode)
Returns:
float: The learning rate of the current step.
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.distributed import fleet
fleet.init(is_collective=True)
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
layer = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer)
lr = adam.get_lr()
print(lr) # 0.01
"""
# imitate target optimizer retrieval
return self.user_defined_optimizer.get_lr()
@dygraph_only
def step(self):
"""
Execute the optimizer once.
(Only work in dygraph mode)
Returns:
None
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
from paddle.distributed import fleet
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear1 = nn.Linear(10, 10)
self._linear2 = nn.Linear(10, 1)
def forward(self, x):
return self._linear2(self._linear1(x))
# 1. initialize fleet environment
fleet.init(is_collective=True)
# 2. create layer & optimizer
layer = LinearNet()
loss_fn = nn.MSELoss()
adam = paddle.optimizer.Adam(
learning_rate=0.001, parameters=layer.parameters())
# 3. get data_parallel model using fleet
adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer)
# 4. run layer
inputs = paddle.randn([10, 10], 'float32')
outputs = dp_layer(inputs)
labels = paddle.randn([10, 1], 'float32')
loss = loss_fn(outputs, labels)
print("loss:", loss.numpy())
loss.backward()
adam.step()
adam.clear_grad()
"""
# imitate target optimizer retrieval
return self.user_defined_optimizer.step()
@dygraph_only
def clear_grad(self):
"""
Clear the gradients of all optimized parameters for model.
(Only work in dygraph mode)
Returns:
None
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
from paddle.distributed import fleet
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear1 = nn.Linear(10, 10)
self._linear2 = nn.Linear(10, 1)
def forward(self, x):
return self._linear2(self._linear1(x))
# 1. initialize fleet environment
fleet.init(is_collective=True)
# 2. create layer & optimizer
layer = LinearNet()
loss_fn = nn.MSELoss()
adam = paddle.optimizer.Adam(
learning_rate=0.001, parameters=layer.parameters())
# 3. get data_parallel model using fleet
adam = fleet.distributed_optimizer(adam)
dp_layer = fleet.distributed_model(layer)
# 4. run layer
inputs = paddle.randn([10, 10], 'float32')
outputs = dp_layer(inputs)
labels = paddle.randn([10, 1], 'float32')
loss = loss_fn(outputs, labels)
print("loss:", loss.numpy())
loss.backward()
adam.step()
adam.clear_grad()
"""
# imitate target optimizer retrieval
return self.user_defined_optimizer.clear_grad()
def _final_strategy(self):
if "valid_strategy" not in self._context:
print(
"WARNING: You may need to call minimize function before this function is called"
)
return {}
else:
return self._context["valid_strategy"]
def _get_applied_meta_list(self):
if "applied_meta_list" not in self._context:
print(
"WARNING: You may need to call minimize function before _get_applied_meta_list called"
)
return []
else:
return self._context["applied_meta_list"]
def _get_applied_graph_list(self):
if "applied_graph_list" not in self._context:
print(
"WARNING: You may need to call minimize function before _get_applied_graph_list called"
)
return []
else:
return self._context["applied_graph_list"]
def minimize(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None):
"""
Add distributed operations to minimize ``loss`` by updating ``parameter_list``.
Args:
loss (Tensor): A ``Tensor`` containing the value to minimize.
startup_program (Program, optional): :ref:`api_fluid_Program` for
initializing parameters in ``parameter_list``. The default value
is None, at this time :ref:`api_fluid_default_startup_program` will be used.
parameter_list (Iterable, optional): Iterable of ``Tensor`` or ``Tensor.name`` to update
to minimize ``loss``. The default value is None, at this time all parameters
will be updated.
no_grad_set (set, optional): Set of ``Tensor`` or ``Tensor.name`` that don't need
to be updated. The default value is None.
Returns:
tuple: tuple (optimize_ops, params_grads), A list of operators appended
by minimize and a list of (param, grad) tensor pairs, param is
``Parameter``, grad is the gradient value corresponding to the parameter.
The returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
indicate program pruning. If so, the program will be pruned by ``feed`` and
``fetch_list`` before run, see details in ``Executor``.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
import paddle.distributed.fleet as fleet
import paddle.nn.functional as F
hid_dim = 10
label_dim = 2
input_x = paddle.static.data(name='x', shape=[None, 13], dtype='float32')
input_y = paddle.static.data(name='y', shape=[None, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=hid_dim, activation='tanh')
fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim, activation='tanh')
prediction = paddle.static.nn.fc(x=[fc_2], size=label_dim, activation='softmax')
cost = F.cross_entropy(input=prediction, label=input_y)
avg_cost = paddle.mean(x=cost)
fleet.init(is_collective=True)
strategy = fleet.DistributedStrategy()
optimizer = paddle.optimizer.SGD(learning_rate=0.001)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
# for more examples, please reference https://github.com/PaddlePaddle/FleetX
"""
context = {}
context["user_defined_strategy"] = copy.deepcopy(
self._user_defined_strategy)
if paddle.fluid.framework.in_dygraph_mode():
# imitate target optimizer retrieval
target_opt = self.user_defined_optimizer
self._context = context
return target_opt.minimize(loss)
# cache original feed forward program
self.origin_main_program = loss.block.program
context["origin_main_program"] = self.origin_main_program
context["loss"] = loss
if startup_program == None:
self.origin_startup_program = \
paddle.static.default_startup_program().clone(for_test=False)
startup_program = paddle.static.default_startup_program()
else:
self.origin_startup_program = \
startup_program.clone(for_test=False)
context["origin_startup_program"] = startup_program
context["role_maker"] = self._role_maker
# compile time
distributed_optimizer_list = \
MetaOptimizerFactory()._get_valid_meta_optimizers(
self.user_defined_optimizer)
context["user_defined_strategy"] = copy.deepcopy(
self._user_defined_strategy)
copy_user_defined_strategy = copy.deepcopy(self._user_defined_strategy)
# trigger the auto-parallel in very strict condition
# strategy = DistributedStrategy()
# strategy.auto = True
# optimizer = paddle.optimizer.SGD(learning_rate=0.1)
# optimizer = fleet.distributed_optimizer(optimizer, strategy)
if copy_user_defined_strategy._is_strict_auto():
# turn on all the strategy for each optimizer
for opt in distributed_optimizer_list:
opt._enable_strategy(copy_user_defined_strategy, context)
valid_optimizer_list = []
valid_graph_optimizer_list = []
can_not_apply_optimizer_list = []
# recall meta optimizers for ranking
for opt in distributed_optimizer_list:
opt._set_basic_info(loss, self._role_maker,
self.user_defined_optimizer,
copy_user_defined_strategy)
if opt._can_apply() and not opt._is_graph_out():
valid_optimizer_list.append(opt)
elif opt._can_apply() and opt._is_graph_out():
valid_graph_optimizer_list.append(opt)
else:
can_not_apply_optimizer_list.append(opt)
# combine recalled meta optimizers to be a valid meta optimizer
meta_optimizer, graph_optimizer = \
self.strategy_compiler.generate_optimizer(
loss, self._role_maker, self.user_defined_optimizer,
copy_user_defined_strategy, valid_optimizer_list,
valid_graph_optimizer_list)
valid_strategy = self.strategy_compiler._get_valid_strategy(
copy_user_defined_strategy, can_not_apply_optimizer_list)
context["valid_strategy"] = copy.deepcopy(valid_strategy)
applied_meta_list = self.strategy_compiler._get_applied_meta_list()
applied_graph_list = self.strategy_compiler._get_applied_graph_list()
context['applied_meta_list'] = applied_meta_list
context['applied_graph_list'] = applied_graph_list
self._context = context
self.valid_strategy = valid_strategy
self.valid_strategy._enable_env()
optimize_ops = []
params_grads = []
if self._role_maker._is_non_distributed() and not self._is_collective:
if self._runtime_handle is None:
self._runtime_handle = RuntimeFactory()._create_runtime(context)
compiled_program = compiler.CompiledProgram(
self.origin_main_program).with_data_parallel(
loss_name=loss.name, share_vars_from=None)
loss.block.program._graph = compiled_program
return self.user_defined_optimizer.minimize(
loss, startup_program, parameter_list, no_grad_set=no_grad_set)
if meta_optimizer:
optimize_ops, params_grads = meta_optimizer.minimize(
loss, startup_program, parameter_list, no_grad_set=no_grad_set)
default_program = paddle.static.default_main_program()
if id(default_program) != id(loss.block.program):
paddle.fluid.framework.switch_main_program(loss.block.program)
else:
optimize_ops, params_grads = self.user_defined_optimizer.minimize(
loss, startup_program, parameter_list, no_grad_set=no_grad_set)
context["program_optimize_ops"] = optimize_ops
context["program_params_grads"] = params_grads
if graph_optimizer:
optimize_ops, params_grads = graph_optimizer.minimize(
loss, startup_program, parameter_list, no_grad_set=no_grad_set)
# since we do not encourage users to use graph operations
# if a graph optimizer takes effect, mostly
# optimizers_ops and params_grads are None
# i.e. users can not modify current computation graph anymore
context["graph_optimize_ops"] = optimize_ops
context["graph_optimize_grads"] = params_grads
if self._runtime_handle is None:
self._runtime_handle = RuntimeFactory()._create_runtime(context)
import paddle.distributed.fleet as fleet
fleet.util._set_strategy(context["valid_strategy"])
return optimize_ops, params_grads
|
the-stack_0_10786 | # Examples - Simple
from dataclasses import dataclass
from flask import Flask
from miko import Manager
app = Flask(__name__)
manager = Manager()
@dataclass
class User:
name: str
comment: str
@app.get("/")
def index():
return manager.render(
"index.html",
user=User(
"麻弓=タイム", "小さい胸は貴重なのよっ、ステータスなのよっ!?"
),
logged_in=True
)
app.run() |
the-stack_0_10787 | r"""
Yang-Baxter Graphs
"""
#*****************************************************************************
# Copyright (C) 2009 Franco Saliola <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.graphs.digraph import DiGraph
from sage.structure.sage_object import SageObject
from sage.misc.lazy_attribute import lazy_attribute
from sage.combinat.partition import Partition
from sage.combinat.permutation import Permutation
def YangBaxterGraph(partition=None, root=None, operators=None):
r"""
Construct the Yang-Baxter graph from ``root`` by repeated application of
``operators``, or the Yang-Baxter graph associated to ``partition``.
INPUT:
The user needs to provide either ``partition`` or both ``root`` and
``operators``, where
- ``partition`` -- a partition of a positive integer
- ``root`` -- the root vertex
- ``operator`` - a function that maps vertices `u` to a list of
tuples of the form `(v, l)` where `v` is a successor of `u` and `l` is
the label of the edge from `u` to `v`.
OUTPUT:
- Either:
- :class:`YangBaxterGraph_partition` - if partition is defined
- :class:`YangBaxterGraph_generic` - if partition is ``None``
EXAMPLES:
The Yang-Baxter graph defined by a partition `[p_1,\dots,p_k]` is
the labelled directed graph with vertex set obtained by
bubble-sorting `(p_k-1,p_k-2,\dots,0,\dots,p_1-1,p_1-2,\dots,0)`;
there is an arrow from `u` to `v` labelled by `i` if `v` is
obtained by swapping the `i`-th and `(i+1)`-th elements of `u`.
For example, if the partition is `[3,1]`, then we begin with
`(0,2,1,0)` and generate all tuples obtained from it by swapping
two adjacent entries if they are increasing::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: bubbleswaps = [SwapIncreasingOperator(i) for i in range(3)]
sage: Y = YangBaxterGraph(root=(0,2,1,0), operators=bubbleswaps); Y
Yang-Baxter graph with root vertex (0, 2, 1, 0)
sage: Y.vertices()
[(2, 0, 1, 0), (2, 1, 0, 0), (0, 2, 1, 0)]
The ``partition`` keyword is a shorthand for the above construction.
::
sage: Y = YangBaxterGraph(partition=[3,1]); Y
Yang-Baxter graph of [3, 1], with top vertex (0, 2, 1, 0)
sage: Y.vertices()
[(0, 2, 1, 0), (2, 0, 1, 0), (2, 1, 0, 0)]
The permutahedron can be realized as a Yang-Baxter graph.
::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: swappers = [SwapIncreasingOperator(i) for i in range(3)]
sage: Y = YangBaxterGraph(root=(1,2,3,4), operators=swappers); Y
Yang-Baxter graph with root vertex (1, 2, 3, 4)
sage: Y.plot()
Graphics object consisting of 97 graphics primitives
The Cayley graph of a finite group can be realized as a Yang-Baxter graph.
::
sage: def left_multiplication_by(g):
... return lambda h : h*g
sage: G = CyclicPermutationGroup(4)
sage: operators = [ left_multiplication_by(gen) for gen in G.gens() ]
sage: Y = YangBaxterGraph(root=G.identity(), operators=operators); Y
Yang-Baxter graph with root vertex ()
sage: Y.plot(edge_labels=False)
Graphics object consisting of 9 graphics primitives
sage: G = SymmetricGroup(4)
sage: operators = [left_multiplication_by(gen) for gen in G.gens()]
sage: Y = YangBaxterGraph(root=G.identity(), operators=operators); Y
Yang-Baxter graph with root vertex ()
sage: Y.plot(edge_labels=False)
Graphics object consisting of 96 graphics primitives
AUTHORS:
- Franco Saliola (2009-04-23)
"""
if partition is None:
return YangBaxterGraph_generic(root=root, operators=operators)
else:
return YangBaxterGraph_partition(partition=Partition(partition))
##### General class for Yang-Baxter Graphs ################################
class YangBaxterGraph_generic(SageObject):
def __init__(self, root, operators):
r"""
A class to model the Yang-Baxter graph defined by ``root`` and
``operators``.
INPUT:
- ``root`` -- the root vertex of the graph
- ``operators`` -- a list of callables that map vertices to (new)
vertices.
.. NOTE::
This is a lazy implementation: the digraph is only computed
when it is needed.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(4)]
sage: Y = YangBaxterGraph(root=(1,0,2,1,0), operators=ops); Y
Yang-Baxter graph with root vertex (1, 0, 2, 1, 0)
sage: loads(dumps(Y)) == Y
True
AUTHORS:
- Franco Saliola (2009-04-23)
"""
self._root = root
self._operators = operators
def _successors(self, u):
r"""
Return a list of tuples for the form `(op(u), op)`, where op
is one of the operators defining ``self``.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(4)]
sage: Y = YangBaxterGraph(root=(1,0,2,1,0), operators=ops)
sage: Y._successors((1,0,2,1,0))
[((1, 2, 0, 1, 0), Swap-if-increasing at position 1)]
"""
successors = set()
for op in self._operators:
v = op(u)
if v != u:
successors.add((v, op))
return list(successors)
def __repr__(self):
r"""
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(2)]
sage: Y = YangBaxterGraph(root=(1,2,3), operators=ops)
sage: Y.__repr__()
'Yang-Baxter graph with root vertex (1, 2, 3)'
"""
return "Yang-Baxter graph with root vertex %s" % (self._root,)
@lazy_attribute
def _digraph(self):
r"""
Constructs the underlying digraph and stores the result as an
attribute.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(2)]
sage: Y = YangBaxterGraph(root=(1,2,3), operators=ops)
sage: Y._digraph
Digraph on 6 vertices
"""
digraph = DiGraph()
digraph.add_vertex(self._root)
queue = [self._root]
while queue:
u = queue.pop()
for (v, l) in self._successors(u):
if v not in digraph:
queue.append(v)
digraph.add_edge(u, v, l)
return digraph
def __hash__(self):
r"""
TESTS::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(2)]
sage: Y = YangBaxterGraph(root=(1,2,3), operators=ops)
sage: hash(Y)
1028420699 # 32-bit
7656306018247013467 # 64-bit
"""
# TODO: this is ugly but unavoidable: the Yang Baxter graphs are being
# used in containers but are mutable.
return hash(self._digraph.copy(immutable=True))
def __eq__(self, other):
r"""
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(4)]
sage: Y1 = YangBaxterGraph(root=(1,0,2,1,0), operators=ops)
sage: Y2 = YangBaxterGraph(root=(2,0,2,1,0), operators=ops)
sage: Y3 = YangBaxterGraph(root=(1,0,2,1,0), operators=ops)
sage: Y1.__eq__(Y2)
False
sage: Y2.__eq__(Y2)
True
sage: Y1.__eq__(Y1)
True
sage: Y3.__eq__(Y1)
True
sage: Y3.__eq__(Y2)
False
"""
return type(self) is type(other) and self._digraph.__eq__(other._digraph)
def __ne__(self, other):
r"""
Test non-equality.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(4)]
sage: Y1 = YangBaxterGraph(root=(1,0,2,1,0), operators=ops)
sage: Y2 = YangBaxterGraph(root=(2,0,2,1,0), operators=ops)
sage: Y3 = YangBaxterGraph(root=(1,0,2,1,0), operators=ops)
sage: Y1.__ne__(Y2)
True
sage: Y2.__ne__(Y2)
False
sage: Y1.__ne__(Y1)
False
sage: Y3.__ne__(Y1)
False
sage: Y3.__ne__(Y2)
True
"""
return not self.__eq__(other)
def __iter__(self):
r"""
Returns an iterator of the vertices in ``self``.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(4)]
sage: Y = YangBaxterGraph(root=(1,0,2,1,0), operators=ops)
sage: uniq(Y.__iter__())
[(1, 0, 2, 1, 0), (1, 2, 0, 1, 0), (1, 2, 1, 0, 0), (2, 1, 0, 1, 0), (2, 1, 1, 0, 0)]
"""
return self._digraph.vertex_iterator()
def __len__(self):
r"""
Returns the number of vertices in ``self``.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(4)]
sage: Y = YangBaxterGraph(root=(1,0,2,1,0), operators=ops)
sage: Y.__len__()
5
sage: ops = [SwapIncreasingOperator(i) for i in range(5)]
sage: Y = YangBaxterGraph(root=(0,1,0,2,1,0), operators=ops)
sage: Y.__len__()
16
"""
return self._digraph.num_verts()
def __copy__(self):
r"""
Returns a copy of ``self``.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(3)]
sage: Y = YangBaxterGraph(root=(1,0,2,1,0), operators=ops); Y
Yang-Baxter graph with root vertex (1, 0, 2, 1, 0)
sage: B = copy(Y); B
Yang-Baxter graph with root vertex (1, 0, 2, 1, 0)
sage: Y is B
False
sage: Y == B
True
"""
from copy import copy
Y = self.__class__(self._root, self._operators)
Y._digraph = copy(self._digraph)
return Y
def _edges_in_bfs(self):
r"""
Returns an iterator of the edges of the digraph traversed in a
breadth-first search of the vertices beginning at ``self.root()``.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(4)]
sage: Y = YangBaxterGraph(root=(1,0,2,1,0), operators=ops)
sage: list(Y._edges_in_bfs())
[((1, 0, 2, 1, 0), (1, 2, 0, 1, 0), Swap-if-increasing at position 1), ((1, 2, 0, 1, 0), (1, 2, 1, 0, 0), Swap-if-increasing at position 2), ((1, 2, 0, 1, 0), (2, 1, 0, 1, 0), Swap-if-increasing at position 0), ((2, 1, 0, 1, 0), (2, 1, 1, 0, 0), Swap-if-increasing at position 2)]
"""
digraph = self._digraph
seen = {}
queue = [self._root]
seen[self._root] = True
while queue:
u = queue.pop()
for w in digraph.neighbor_out_iterator(u):
if w not in seen:
seen[w] = True
queue.append(w)
yield (u,w,digraph.edge_label(u,w))
def root(self):
r"""
Returns the root vertex of ``self``.
If ``self`` is the Yang-Baxter graph of the partition
`[p_1,p_2,\dots,p_k]`, then this is the vertex
`(p_k-1,p_k-2,\dots,0,\dots,p_1-1,p_1-2,\dots,0)`.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(4)]
sage: Y = YangBaxterGraph(root=(1,0,2,1,0), operators=ops)
sage: Y.root()
(1, 0, 2, 1, 0)
sage: Y = YangBaxterGraph(root=(0,1,0,2,1,0), operators=ops)
sage: Y.root()
(0, 1, 0, 2, 1, 0)
sage: Y = YangBaxterGraph(root=(1,0,3,2,1,0), operators=ops)
sage: Y.root()
(1, 0, 3, 2, 1, 0)
sage: Y = YangBaxterGraph(partition=[3,2])
sage: Y.root()
(1, 0, 2, 1, 0)
"""
return self._root
def successors(self, v):
r"""
Return the successors of the vertex ``v``.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(4)]
sage: Y = YangBaxterGraph(root=(1,0,2,1,0), operators=ops)
sage: Y.successors(Y.root())
[(1, 2, 0, 1, 0)]
sage: Y.successors((1, 2, 0, 1, 0))
[(1, 2, 1, 0, 0), (2, 1, 0, 1, 0)]
"""
return [a for (a,b) in self._successors(v)]
def plot(self, *args, **kwds):
r"""
Plots ``self`` as a digraph.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(4)]
sage: Y = YangBaxterGraph(root=(1,0,2,1,0), operators=ops)
sage: Y.plot()
Graphics object consisting of 16 graphics primitives
sage: Y.plot(edge_labels=False)
Graphics object consisting of 11 graphics primitives
"""
if "edge_labels" not in kwds:
kwds["edge_labels"] = True
if "vertex_labels" not in kwds:
kwds["vertex_labels"] = True
return self._digraph.plot(*args, **kwds)
def vertices(self):
r"""
Returns the vertices of ``self``.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(3)]
sage: Y = YangBaxterGraph(root=(0,2,1,0), operators=ops)
sage: Y.vertices()
[(2, 0, 1, 0), (2, 1, 0, 0), (0, 2, 1, 0)]
"""
return list(self)
def edges(self):
r"""
Returns the (labelled) edges of ``self``.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(3)]
sage: Y = YangBaxterGraph(root=(0,2,1,0), operators=ops)
sage: Y.edges()
[((0, 2, 1, 0), (2, 0, 1, 0), Swap-if-increasing at position 0), ((2, 0, 1, 0), (2, 1, 0, 0), Swap-if-increasing at position 1)]
"""
return self._digraph.edges()
def vertex_relabelling_dict(self, v, relabel_operator):
r"""
Return a dictionary pairing vertices ``u`` of ``self`` with
the object obtained from ``v`` by applying the
``relabel_operator`` along a path from the root to ``u``. Note
that the root is paired with ``v``.
INPUT:
- ``v`` -- an object
- ``relabel_operator`` -- function mapping a vertex and a label to
the image of the vertex
OUTPUT:
- dictionary pairing vertices with the corresponding image of ``v``
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(3)]
sage: Y = YangBaxterGraph(root=(0,2,1,0), operators=ops)
sage: def relabel_operator(op, u):
... i = op.position()
... return u[:i] + u[i:i+2][::-1] + u[i+2:]
sage: Y.vertex_relabelling_dict((1,2,3,4), relabel_operator)
{(0, 2, 1, 0): (1, 2, 3, 4),
(2, 0, 1, 0): (2, 1, 3, 4),
(2, 1, 0, 0): (2, 3, 1, 4)}
"""
relabelling = {self._root:v}
for (u,w,i) in self._edges_in_bfs():
relabelling[w] = relabel_operator(i, relabelling[u])
return relabelling
def relabel_vertices(self, v, relabel_operator, inplace=True):
r"""
Relabel the vertices ``u`` of ``self`` by the object obtained
from ``u`` by applying the ``relabel_operator`` to ``v`` along
a path from ``self.root()`` to ``u``.
Note that the ``self.root()`` is paired with ``v``.
INPUT:
- ``v`` -- tuple, Permutation, CombinatorialObject
- ``inplace`` -- if ``True``, modifies ``self``; otherwise returns a
modified copy of ``self``.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(3)]
sage: Y = YangBaxterGraph(root=(0,2,1,0), operators=ops)
sage: def relabel_op(op, u):
... i = op.position()
... return u[:i] + u[i:i+2][::-1] + u[i+2:]
sage: d = Y.relabel_vertices((1,2,3,4), relabel_op, inplace=False); d
Yang-Baxter graph with root vertex (1, 2, 3, 4)
sage: Y.vertices()
[(2, 0, 1, 0), (2, 1, 0, 0), (0, 2, 1, 0)]
sage: e = Y.relabel_vertices((1,2,3,4), relabel_op); e
sage: Y.vertices()
[(2, 1, 3, 4), (1, 2, 3, 4), (2, 3, 1, 4)]
"""
from copy import copy
relabelling = self.vertex_relabelling_dict(v, relabel_operator)
Y = self if inplace else copy(self)
Y._root = relabelling[Y._root]
Y._digraph.relabel(relabelling, inplace=True)
if inplace is False:
return Y
def relabel_edges(self, edge_dict, inplace=True):
r"""
Relabel the edges of ``self``.
INPUT:
- ``edge_dict`` -- a dictionary keyed by the (unlabelled) edges.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: ops = [SwapIncreasingOperator(i) for i in range(3)]
sage: Y = YangBaxterGraph(root=(0,2,1,0), operators=ops)
sage: def relabel_op(op, u):
... i = op.position()
... return u[:i] + u[i:i+2][::-1] + u[i+2:]
sage: Y.edges()
[((0, 2, 1, 0), (2, 0, 1, 0), Swap-if-increasing at position 0), ((2, 0, 1, 0), (2, 1, 0, 0), Swap-if-increasing at position 1)]
sage: d = {((0,2,1,0),(2,0,1,0)):17, ((2,0,1,0),(2,1,0,0)):27}
sage: Y.relabel_edges(d, inplace=False).edges()
[((0, 2, 1, 0), (2, 0, 1, 0), 17), ((2, 0, 1, 0), (2, 1, 0, 0), 27)]
sage: Y.edges()
[((0, 2, 1, 0), (2, 0, 1, 0), Swap-if-increasing at position 0), ((2, 0, 1, 0), (2, 1, 0, 0), Swap-if-increasing at position 1)]
sage: Y.relabel_edges(d, inplace=True)
sage: Y.edges()
[((0, 2, 1, 0), (2, 0, 1, 0), 17), ((2, 0, 1, 0), (2, 1, 0, 0), 27)]
"""
if inplace:
Y = self
else:
from copy import copy
Y = copy(self)
digraph = Y._digraph
for (u,v,i) in digraph.edges():
digraph.set_edge_label(u,v,edge_dict[u,v])
if not inplace:
return Y
##### Yang-Baxter Graphs defined by a partition ###########################
class YangBaxterGraph_partition(YangBaxterGraph_generic):
def __init__(self, partition):
r"""
A class to model the Yang-Baxter graph of a partition.
The Yang-Baxter graph defined by a partition `[p_1,\dots,p_k]`
is the labelled directed graph with vertex set obtained by
bubble-sorting `(p_k-1,p_k-2,\dots,0,\dots,p_1-1,p_1-2,\dots,0)`;
there is an arrow from `u` to `v` labelled by `i` if `v` is
obtained by swapping the `i`-th and `(i+1)`-th elements of `u`.
.. note::
This is a lazy implementation: the digraph is only computed
when it is needed.
EXAMPLES::
sage: Y = YangBaxterGraph(partition=[3,2,1]); Y
Yang-Baxter graph of [3, 2, 1], with top vertex (0, 1, 0, 2, 1, 0)
sage: loads(dumps(Y)) == Y
True
AUTHORS:
- Franco Saliola (2009-04-23)
"""
self._partition = partition
beta = sorted(self._partition, reverse=True)
root = tuple(sum(map(range,beta), []))[::-1]
operators = [SwapIncreasingOperator(i) for i in range(sum(partition)-1)]
super(YangBaxterGraph_partition, self).__init__(root, operators)
def __repr__(self):
r"""
EXAMPLES::
sage: Y = YangBaxterGraph(partition=[3,2])
sage: Y.__repr__()
'Yang-Baxter graph of [3, 2], with top vertex (1, 0, 2, 1, 0)'
"""
return "Yang-Baxter graph of %s, with top vertex %s" % (self._partition, self._root)
def __copy__(self):
r"""
Returns a copy of ``self``.
EXAMPLES::
sage: Y = YangBaxterGraph(partition=[3,2]); Y
Yang-Baxter graph of [3, 2], with top vertex (1, 0, 2, 1, 0)
sage: B = copy(Y); B
Yang-Baxter graph of [3, 2], with top vertex (1, 0, 2, 1, 0)
sage: Y is B
False
sage: Y == B
True
"""
from copy import copy
Y = self.__class__(self._partition)
Y._digraph = copy(self._digraph)
return Y
@lazy_attribute
def _digraph(self):
r"""
Constructs the underlying digraph and stores the result as an
attribute.
EXAMPLES::
sage: Y = YangBaxterGraph(partition=[2,1])
sage: Y._digraph
Digraph on 2 vertices
sage: Y.edges()
[((0, 1, 0), (1, 0, 0), Swap positions 0 and 1)]
"""
digraph = super(YangBaxterGraph_partition, self)._digraph
for (u,v,op) in digraph.edges():
digraph.set_edge_label(u,v,SwapOperator(op.position()))
return digraph
@lazy_attribute
def _vertex_ordering(self):
r"""
Returns a list of the vertices of ``self``, sorted using
Pythons ``sorted`` method.
EXAMPLES::
sage: Y = YangBaxterGraph(partition=[3,2])
sage: Y._vertex_ordering
[(1, 0, 2, 1, 0), (1, 2, 0, 1, 0), (1, 2, 1, 0, 0), (2, 1, 0, 1, 0), (2, 1, 1, 0, 0)]
"""
return sorted(self._digraph.vertices())
def __iter__(self):
r"""
Iterate over the vertices ``self``.
.. NOTE::
The vertices are first sorted using Python's sorted command.
EXAMPLES::
sage: Y = YangBaxterGraph(partition=[3,2])
sage: list(Y.__iter__())
[(1, 0, 2, 1, 0), (1, 2, 0, 1, 0), (1, 2, 1, 0, 0), (2, 1, 0, 1, 0), (2, 1, 1, 0, 0)]
"""
for v in self._vertex_ordering:
yield v
def _swap_operator(self, operator, u):
r"""
Return the image of ``u`` under ``operator``.
INPUT:
- ``i`` -- positive integer between 1 and len(u)-1, inclusive
- ``u`` -- tuple, list, permutation, CombinatorialObject, ....
EXAMPLES::
sage: Y = YangBaxterGraph(partition=[3,1])
sage: from sage.combinat.yang_baxter_graph import SwapOperator
sage: ops = [SwapOperator(i) for i in range(3)]
sage: [Y._swap_operator(op, (1,2,3,4)) for op in ops]
[(2, 1, 3, 4), (1, 3, 2, 4), (1, 2, 4, 3)]
sage: [Y._swap_operator(op, [4,3,2,1]) for op in ops]
[[3, 4, 2, 1], [4, 2, 3, 1], [4, 3, 1, 2]]
sage: [Y._swap_operator(op, Permutation([1,2,3,4])) for op in ops]
[[2, 1, 3, 4], [1, 3, 2, 4], [1, 2, 4, 3]]
"""
return operator(u)
def vertex_relabelling_dict(self, v):
r"""
Return a dictionary pairing vertices ``u`` of ``self`` with the object
obtained from ``v`` by applying transpositions corresponding to the
edges labels along a path from the root to ``u``.
Note that the root is paired with ``v``.
INPUT:
- ``v`` -- an object
OUTPUT:
- dictionary pairing vertices with the corresponding image of ``v``
EXAMPLES::
sage: Y = YangBaxterGraph(partition=[3,1])
sage: Y.vertex_relabelling_dict((1,2,3,4))
{(0, 2, 1, 0): (1, 2, 3, 4),
(2, 0, 1, 0): (2, 1, 3, 4),
(2, 1, 0, 0): (2, 3, 1, 4)}
sage: Y.vertex_relabelling_dict((4,3,2,1))
{(0, 2, 1, 0): (4, 3, 2, 1),
(2, 0, 1, 0): (3, 4, 2, 1),
(2, 1, 0, 0): (3, 2, 4, 1)}
"""
return super(YangBaxterGraph_partition, self).vertex_relabelling_dict(v, self._swap_operator)
def relabel_vertices(self, v, inplace=True):
r"""
Relabel the vertices of ``self`` with the object obtained from
``v`` by applying the transpositions corresponding to the edge
labels along some path from the root to the vertex.
INPUT:
- ``v`` -- tuple, Permutation, CombinatorialObject
- ``inplace`` -- if ``True``, modifies ``self``; otherwise
returns a modified copy of ``self``.
EXAMPLES::
sage: Y = YangBaxterGraph(partition=[3,1]); Y
Yang-Baxter graph of [3, 1], with top vertex (0, 2, 1, 0)
sage: d = Y.relabel_vertices((1,2,3,4), inplace=False); d
Digraph on 3 vertices
sage: Y.vertices()
[(0, 2, 1, 0), (2, 0, 1, 0), (2, 1, 0, 0)]
sage: e = Y.relabel_vertices((1,2,3,4)); e
sage: Y.vertices()
[(1, 2, 3, 4), (2, 1, 3, 4), (2, 3, 1, 4)]
"""
relabelling = self.vertex_relabelling_dict(v)
if inplace:
Y = self
Y._root = relabelling[Y._root]
Y._digraph.relabel(relabelling, inplace=inplace)
Y._vertex_ordering = sorted(Y._digraph.vertices())
return
else:
from copy import copy
Y = copy(self)
Y._root = relabelling[Y._root]
return Y._digraph.relabel(relabelling, inplace=inplace)
##### Some Yang-Baxter operators ##########################################
class SwapOperator(SageObject):
def __init__(self, i):
r"""
The operator that swaps the items in positions ``i`` and ``i+1``.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapOperator
sage: s3 = SwapOperator(3)
sage: s3 == loads(dumps(s3))
True
"""
self._position = i
def __hash__(self):
r"""
TESTS::
sage: from sage.combinat.yang_baxter_graph import SwapOperator
sage: s = [SwapOperator(i) for i in range(3)]
sage: map(hash, s)
[0, 1, 2]
"""
return hash(self._position)
def __cmp__(self, other):
r"""
Compare two swap operators. The comparison is done by comparing the
positions.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapOperator
sage: s = [SwapOperator(i) for i in range(3)]
sage: s[0] == s[0]
True
sage: s[1] < s[0]
False
sage: s[1] < s[2]
True
"""
if type(self) is type(other):
return cmp(self._position, other._position)
else:
return cmp(type(self), type(other))
def __repr__(self):
r"""
Representation string.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapOperator
sage: s3 = SwapOperator(3)
sage: s3.__repr__()
'Swap positions 3 and 4'
"""
return "Swap positions %s and %s" % (self._position, self._position+1)
def __str__(self):
r"""
A short str representation (used, for example, in labelling edges of
graphs).
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapOperator
sage: s3 = SwapOperator(3)
sage: s3.__str__()
'3'
"""
return "%s" % self._position
def __call__(self, u):
r"""
Return the object obtained from swapping the items in positions
``i`` and ``i+1`` of ``u``.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapOperator
sage: s3 = SwapOperator(3)
sage: s3((1,2,3,4,5))
(1, 2, 3, 5, 4)
sage: s3([1,2,3,4,5])
[1, 2, 3, 5, 4]
"""
i = self._position
if isinstance(u, Permutation):
return Permutation(u[:i] + u[i:i+2][::-1] + u[i+2:])
return type(u)(u[:i] + u[i:i+2][::-1] + u[i+2:])
def position(self):
r"""
``self`` is the operator that swaps positions ``i`` and ``i+1``. This
method returns ``i``.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapOperator
sage: s3 = SwapOperator(3)
sage: s3.position()
3
"""
return self._position
class SwapIncreasingOperator(SwapOperator):
def __repr__(self):
r"""
Representation string.
EXAMPLES::
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: s3 = SwapIncreasingOperator(3)
sage: s3.__repr__()
'Swap-if-increasing at position 3'
"""
return "Swap-if-increasing at position %s" % self._position
def __call__(self, u):
r"""
Returns a copy of ``u`` with ``u[i-1]`` and ``u[i]`` swapped if
``u[i-1] > u[i]``; otherwise returns ``u``.
INPUT:
- ``i`` -- positive integer between ``1`` and ``len(u)-1``, inclusive
- ``u`` -- tuple, list, permutation, CombinatorialObject, ....
EXAMPLES::
sage: Y = YangBaxterGraph(partition=[2,2])
sage: from sage.combinat.yang_baxter_graph import SwapIncreasingOperator
sage: operators = [SwapIncreasingOperator(i) for i in range(3)]
sage: [op((1,2,3,4)) for op in operators]
[(2, 1, 3, 4), (1, 3, 2, 4), (1, 2, 4, 3)]
sage: [op([4,3,2,1]) for op in operators]
[[4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1]]
sage: [op(Permutation([1,3,2,4])) for op in operators]
[[3, 1, 2, 4], [1, 3, 2, 4], [1, 3, 4, 2]]
"""
i = self._position
j = i+1
if u[i] < u[j]:
v = list(u)
(v[j], v[i]) = (v[i], v[j])
if isinstance(u, Permutation):
return Permutation(v)
return type(u)(v)
else:
return u
|
the-stack_0_10791 | import logging
import os
import tempfile
from gensim import corpora
from gensim.parsing.preprocessing import STOPWORDS
from pprint import pprint # pretty-printer
from collections import defaultdict
class Indexer:
def __init__(self):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
self.TEMP_FOLDER = tempfile.gettempdir()
print('Folder "{}" will be used to save temporary dictionary and corpus.'.format(self.TEMP_FOLDER))
self.documents = None
def set_documents(self, documents):
if documents is None:
self.documents = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
else:
self.documents = documents
def remove_stopwords(self):
# remove common words and tokenize
texts = [[word for word in document.lower().split() if word not in STOPWORDS]
for document in self.documents]
return texts
def clean_low_freq_words(self, texts):
# remove words that appear only once
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > 1] for text in texts]
pprint(texts)
if __name__ == "__main__":
indexer = Indexer()
indexer.set_documents(None)
texts = indexer.remove_stopwords()
indexer.clean_low_freq_words(texts)
|
the-stack_0_10793 | from src.utils import *
import numpy as np
class SO3:
# tolerance criterion
TOL = 1e-8
Id = torch.eye(3).cuda().float()
dId = torch.eye(3).cuda().double()
@classmethod # cls: class
def exp(cls, phi):
angle = phi.norm(dim=1, keepdim=True)
mask = angle[:, 0] < cls.TOL
dim_batch = phi.shape[0]
Id = cls.Id.expand(dim_batch, 3, 3)
axis = phi[~mask] / angle[~mask]
c = angle[~mask].cos().unsqueeze(2)# add a 1 on second dimension
s = angle[~mask].sin().unsqueeze(2)
Rot = phi.new_empty(dim_batch, 3, 3)
Rot[mask] = Id[mask] + SO3.wedge(phi[mask])
Rot[~mask] = c*Id[~mask] + \
(1-c)*cls.bouter(axis, axis) + s*cls.wedge(axis)
return Rot
@classmethod
def log(cls, Rot):
dim_batch = Rot.shape[0]
Id = cls.Id.expand(dim_batch, 3, 3)
cos_angle = (0.5 * cls.btrace(Rot) - 0.5).clamp(-1., 1.) #min:-1, max:+1
# Clip cos(angle) to its proper domain to avoid NaNs from rounding
# errors
angle = cos_angle.acos()
mask = angle < cls.TOL
if mask.sum() == 0:
angle = angle.unsqueeze(1).unsqueeze(1)
return cls.vee((0.5 * angle/angle.sin())*(Rot-Rot.transpose(1, 2)))
elif mask.sum() == dim_batch:
# If angle is close to zero, use first-order Taylor expansion
return cls.vee(Rot - Id)
phi = cls.vee(Rot - Id)
angle = angle
phi[~mask] = cls.vee((0.5 * angle[~mask]/angle[~mask].sin()).unsqueeze(
1).unsqueeze(2)*(Rot[~mask] - Rot[~mask].transpose(1, 2)))
return phi
@staticmethod
def vee(Phi):
return torch.stack((Phi[:, 2, 1],
Phi[:, 0, 2],
Phi[:, 1, 0]), dim=1)
@staticmethod
def wedge(phi):
dim_batch = phi.shape[0]
zero = phi.new_zeros(dim_batch)
return torch.stack((zero, -phi[:, 2], phi[:, 1],
phi[:, 2], zero, -phi[:, 0],
-phi[:, 1], phi[:, 0], zero), 1).view(dim_batch,
3, 3)
@classmethod
def from_rpy(cls, roll, pitch, yaw):# rotation sequence z y' x''
return cls.rotz(yaw).bmm(cls.roty(pitch).bmm(cls.rotx(roll)))# tensor.bmm: batch matrix-matrix product,bij * bjk = bik
@classmethod
def rotx(cls, angle_in_radians):
c = angle_in_radians.cos()
s = angle_in_radians.sin()
mat = c.new_zeros((c.shape[0], 3, 3))# is it the dim of c or (c.shape[0],3,3), should be latter one
mat[:, 0, 0] = 1
mat[:, 1, 1] = c
mat[:, 2, 2] = c
mat[:, 1, 2] = -s
mat[:, 2, 1] = s
return mat
@classmethod
def roty(cls, angle_in_radians):
c = angle_in_radians.cos()
s = angle_in_radians.sin()
mat = c.new_zeros((c.shape[0], 3, 3))
mat[:, 1, 1] = 1
mat[:, 0, 0] = c
mat[:, 2, 2] = c
mat[:, 0, 2] = s
mat[:, 2, 0] = -s
return mat
@classmethod
def rotz(cls, angle_in_radians):
c = angle_in_radians.cos()
s = angle_in_radians.sin()
mat = c.new_zeros((c.shape[0], 3, 3))
mat[:, 2, 2] = 1
mat[:, 0, 0] = c
mat[:, 1, 1] = c
mat[:, 0, 1] = -s
mat[:, 1, 0] = s
return mat
@classmethod
def isclose(cls, x, y):
return (x-y).abs() < cls.TOL
@classmethod
def to_rpy(cls, Rots):
"""Convert a rotation matrix to RPY Euler angles."""
pitch = torch.atan2(-Rots[:, 2, 0],
torch.sqrt(Rots[:, 0, 0]**2 + Rots[:, 1, 0]**2))
yaw = pitch.new_empty(pitch.shape)
roll = pitch.new_empty(pitch.shape)
near_pi_over_two_mask = cls.isclose(pitch, np.pi / 2.)
near_neg_pi_over_two_mask = cls.isclose(pitch, -np.pi / 2.)
remainder_inds = ~(near_pi_over_two_mask | near_neg_pi_over_two_mask)
yaw[near_pi_over_two_mask] = 0
roll[near_pi_over_two_mask] = torch.atan2(
Rots[near_pi_over_two_mask, 0, 1],
Rots[near_pi_over_two_mask, 1, 1])
yaw[near_neg_pi_over_two_mask] = 0.
roll[near_neg_pi_over_two_mask] = -torch.atan2(
Rots[near_neg_pi_over_two_mask, 0, 1],
Rots[near_neg_pi_over_two_mask, 1, 1])
sec_pitch = 1/pitch[remainder_inds].cos()
remainder_mats = Rots[remainder_inds]
yaw = torch.atan2(remainder_mats[:, 1, 0] * sec_pitch,
remainder_mats[:, 0, 0] * sec_pitch)
roll = torch.atan2(remainder_mats[:, 2, 1] * sec_pitch,
remainder_mats[:, 2, 2] * sec_pitch)
rpys = torch.cat([roll.unsqueeze(dim=1),
pitch.unsqueeze(dim=1),
yaw.unsqueeze(dim=1)], dim=1)
return rpys
@classmethod
def from_quaternion(cls, quat, ordering='wxyz'):
"""Form a rotation matrix from a unit length quaternion.
Valid orderings are 'xyzw' and 'wxyz'.
"""
if ordering is 'xyzw':
qx = quat[:, 0]
qy = quat[:, 1]
qz = quat[:, 2]
qw = quat[:, 3]
elif ordering is 'wxyz':
qw = quat[:, 0]
qx = quat[:, 1]
qy = quat[:, 2]
qz = quat[:, 3]
# Form the matrix
mat = quat.new_empty(quat.shape[0], 3, 3)
qx2 = qx * qx
qy2 = qy * qy
qz2 = qz * qz
mat[:, 0, 0] = 1. - 2. * (qy2 + qz2)
mat[:, 0, 1] = 2. * (qx * qy - qw * qz)
mat[:, 0, 2] = 2. * (qw * qy + qx * qz)
mat[:, 1, 0] = 2. * (qw * qz + qx * qy)
mat[:, 1, 1] = 1. - 2. * (qx2 + qz2)
mat[:, 1, 2] = 2. * (qy * qz - qw * qx)
mat[:, 2, 0] = 2. * (qx * qz - qw * qy)
mat[:, 2, 1] = 2. * (qw * qx + qy * qz)
mat[:, 2, 2] = 1. - 2. * (qx2 + qy2)
return mat
@classmethod
def to_quaternion(cls, Rots, ordering='wxyz'):
"""Convert a rotation matrix to a unit length quaternion.
Valid orderings are 'xyzw' and 'wxyz'.
"""
tmp = 1 + Rots[:, 0, 0] + Rots[:, 1, 1] + Rots[:, 2, 2]
tmp[tmp < 0] = 0
qw = 0.5 * torch.sqrt(tmp)
qx = qw.new_empty(qw.shape[0])
qy = qw.new_empty(qw.shape[0])
qz = qw.new_empty(qw.shape[0])
near_zero_mask = qw.abs() < cls.TOL
if near_zero_mask.sum() > 0:
cond1_mask = near_zero_mask * \
(Rots[:, 0, 0] > Rots[:, 1, 1])*(Rots[:, 0, 0] > Rots[:, 2, 2])
cond1_inds = cond1_mask.nonzero()
if len(cond1_inds) > 0:
cond1_inds = cond1_inds.squeeze()
R_cond1 = Rots[cond1_inds].view(-1, 3, 3) #.view() is subset of .reshape()
d = 2. * torch.sqrt(1. + R_cond1[:, 0, 0] -
R_cond1[:, 1, 1] - R_cond1[:, 2, 2]).view(-1)
qw[cond1_inds] = (R_cond1[:, 2, 1] - R_cond1[:, 1, 2]) / d
qx[cond1_inds] = 0.25 * d
qy[cond1_inds] = (R_cond1[:, 1, 0] + R_cond1[:, 0, 1]) / d
qz[cond1_inds] = (R_cond1[:, 0, 2] + R_cond1[:, 2, 0]) / d
cond2_mask = near_zero_mask * (Rots[:, 1, 1] > Rots[:, 2, 2])
cond2_inds = cond2_mask.nonzero()
if len(cond2_inds) > 0:
cond2_inds = cond2_inds.squeeze()
R_cond2 = Rots[cond2_inds].view(-1, 3, 3)
d = 2. * torch.sqrt(1. + R_cond2[:, 1, 1] -
R_cond2[:, 0, 0] - R_cond2[:, 2, 2]).squeeze()
tmp = (R_cond2[:, 0, 2] - R_cond2[:, 2, 0]) / d
qw[cond2_inds] = tmp
qx[cond2_inds] = (R_cond2[:, 1, 0] + R_cond2[:, 0, 1]) / d
qy[cond2_inds] = 0.25 * d
qz[cond2_inds] = (R_cond2[:, 2, 1] + R_cond2[:, 1, 2]) / d
cond3_mask = near_zero_mask & cond1_mask.logical_not() & cond2_mask.logical_not()
cond3_inds = cond3_mask
if len(cond3_inds) > 0:
R_cond3 = Rots[cond3_inds].view(-1, 3, 3)
d = 2. * \
torch.sqrt(1. + R_cond3[:, 2, 2] -
R_cond3[:, 0, 0] - R_cond3[:, 1, 1]).squeeze()
qw[cond3_inds] = (R_cond3[:, 1, 0] - R_cond3[:, 0, 1]) / d
qx[cond3_inds] = (R_cond3[:, 0, 2] + R_cond3[:, 2, 0]) / d
qy[cond3_inds] = (R_cond3[:, 2, 1] + R_cond3[:, 1, 2]) / d
qz[cond3_inds] = 0.25 * d
far_zero_mask = near_zero_mask.logical_not()
far_zero_inds = far_zero_mask
if len(far_zero_inds) > 0:
R_fz = Rots[far_zero_inds]
d = 4. * qw[far_zero_inds]
qx[far_zero_inds] = (R_fz[:, 2, 1] - R_fz[:, 1, 2]) / d
qy[far_zero_inds] = (R_fz[:, 0, 2] - R_fz[:, 2, 0]) / d
qz[far_zero_inds] = (R_fz[:, 1, 0] - R_fz[:, 0, 1]) / d
# Check ordering last
if ordering is 'xyzw':
quat = torch.stack([qx, qy, qz, qw], dim=1)
elif ordering is 'wxyz':
quat = torch.stack([qw, qx, qy, qz], dim=1)
return quat
@classmethod
def normalize(cls, Rots):
U, _, V = torch.svd(Rots)
S = cls.Id.clone().repeat(Rots.shape[0], 1, 1)
S[:, 2, 2] = torch.det(U) * torch.det(V)
return U.bmm(S).bmm(V.transpose(1, 2))
@classmethod
def dnormalize(cls, Rots):
U, _, V = torch.svd(Rots)
S = cls.dId.clone().repeat(Rots.shape[0], 1, 1)
S[:, 2, 2] = torch.det(U) * torch.det(V)
return U.bmm(S).bmm(V.transpose(1, 2))
@classmethod
def qmul(cls, q, r, ordering='wxyz'):
"""
Multiply quaternion(s) q with quaternion(s) r.
"""
terms = cls.bouter(r, q)
w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3]
x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2]
y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1]
z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0]
xyz = torch.stack((x, y, z), dim=1)
xyz[w < 0] *= -1
w[w < 0] *= -1
if ordering == 'wxyz':
q = torch.cat((w.unsqueeze(1), xyz), dim=1)
else:
q = torch.cat((xyz, w.unsqueeze(1)), dim=1)
return q / q.norm(dim=1, keepdim=True)
@staticmethod
def sinc(x):
return x.sin() / x
@classmethod
def qexp(cls, xi, ordering='wxyz'):
"""
Convert exponential maps to quaternions.
"""
theta = xi.norm(dim=1, keepdim=True)
w = (0.5*theta).cos()
xyz = 0.5*cls.sinc(0.5*theta/np.pi)*xi
return torch.cat((w, xyz), 1)
@classmethod
def qlog(cls, q, ordering='wxyz'):
"""
Applies the log map to quaternions.
"""
n = 0.5*torch.norm(q[:, 1:], p=2, dim=1, keepdim=True)
n = torch.clamp(n, min=1e-8)
q = q[:, 1:] * torch.acos(torch.clamp(q[:, :1], min=-1.0, max=1.0))
r = q / n
return r
@classmethod
def qinv(cls, q, ordering='wxyz'):
"Quaternion inverse"
r = torch.empty_like(q)
if ordering == 'wxyz':
r[:, 1:4] = -q[:, 1:4]
r[:, 0] = q[:, 0]
else:
r[:, :3] = -q[:, :3]
r[:, 3] = q[:, 3]
return r
@classmethod
def qnorm(cls, q):
"Quaternion normalization"
return q / q.norm(dim=1, keepdim=True)
@classmethod
def qinterp(cls, qs, t, t_int):
idxs = np.searchsorted(t, t_int)
idxs0 = idxs-1
idxs0[idxs0 < 0] = 0
idxs1 = idxs
idxs1[idxs1 == t.shape[0]] = t.shape[0] - 1
q0 = qs[idxs0]
q1 = qs[idxs1]
tau = torch.zeros_like(t_int)
dt = (t[idxs1]-t[idxs0])[idxs0 != idxs1]
tau[idxs0 != idxs1] = (t_int-t[idxs0])[idxs0 != idxs1]/dt
return cls.slerp(q0, q1, tau)
@classmethod
def slerp(cls, q0, q1, tau, DOT_THRESHOLD = 0.9995):
"""Spherical linear interpolation."""
dot = (q0*q1).sum(dim=1)
q1[dot < 0] = -q1[dot < 0]
dot[dot < 0] = -dot[dot < 0]
q = torch.zeros_like(q0)
tmp = q0 + tau.unsqueeze(1) * (q1 - q0)
tmp = tmp[dot > DOT_THRESHOLD]
q[dot > DOT_THRESHOLD] = tmp / tmp.norm(dim=1, keepdim=True)
theta_0 = dot.acos()
sin_theta_0 = theta_0.sin()
theta = theta_0 * tau
sin_theta = theta.sin()
s0 = (theta.cos() - dot * sin_theta / sin_theta_0).unsqueeze(1)
s1 = (sin_theta / sin_theta_0).unsqueeze(1)
q[dot < DOT_THRESHOLD] = ((s0 * q0) + (s1 * q1))[dot < DOT_THRESHOLD]
return q / q.norm(dim=1, keepdim=True)
@staticmethod
def bouter(vec1, vec2):
"""batch outer product"""
return torch.einsum('bi, bj -> bij', vec1, vec2)
@staticmethod
def btrace(mat):
"""batch matrix trace"""
return torch.einsum('bii -> b', mat)
class CPUSO3:
# tolerance criterion
TOL = 1e-8
Id = torch.eye(3)
@classmethod
def qmul(cls, q, r):
"""
Multiply quaternion(s) q with quaternion(s) r.
"""
# Compute outer product
terms = cls.outer(r, q)
w = terms[0, 0] - terms[1, 1] - terms[2, 2] - terms[3, 3]
x = terms[0, 1] + terms[1, 0] - terms[2, 3] + terms[3, 2]
y = terms[0, 2] + terms[1, 3] + terms[2, 0] - terms[3, 1]
z = terms[0, 3] - terms[1, 2] + terms[2, 1] + terms[3, 0]
return torch.stack((w, x, y, z))
@staticmethod
def outer(a, b):
return torch.einsum('i, j -> ij', a, b)
|
the-stack_0_10794 | """ 自动取消(订单,拼团)异步任务 """
import datetime
import os
# import sentry_sdk
from django.utils.timezone import make_aware
# from sentry_sdk.integrations.celery import CeleryIntegration
from django_redis import get_redis_connection
from config.services import get_receipt_by_shop_id, get_msg_notify_by_shop_id
from groupon.constant import GrouponStatus, GrouponAttendStatus
from order.constant import OrderPayType, OrderRefundType, OrderType
from groupon.services import (
get_shop_groupon_by_id,
get_shop_groupon_attend_by_id,
list_paid_details_by_groupon_attend_id
)
from order.selectors import (
list_waitting_order_by_groupon_attend_id,
list_unpaid_order_by_groupon_attend_id,
)
from order.services import (
cancel_order,
# direct_pay,
refund_order,
get_order_by_shop_id_and_id,
direct_pay)
from promotion.events import GrouponEvent
from promotion.services import publish_product_promotion, get_product_promotion, PRODUCT_PROMOTION_KEY
# from .celery_tplmsg_task import (
# GrouponOrderFailAttendTplMsg,
# GrouponOrderRefundFailTplMsg,
# GrouponOrderSuccessAttendTplMsg,
# )
from celery_tasks.main import app
# sentry_sdk.init(SENTRY_DSN, integrations=[CeleryIntegration()])
@app.task(bind=True, name="auto_cancel_order")
def auto_cancel_order(self, shop_id, order_id):
""" 超时未支付(15min)自动取消订单 """
success, _ = cancel_order(shop_id, order_id)
if success:
return
order = get_order_by_shop_id_and_id(shop_id, order_id)
if not order:
return
elif order.order_type == OrderType.GROUPON:
auto_validate_groupon_attend.apply_async(
args=[order.shop_id, order.groupon_attend_id]
)
@app.task(bind=True, name="auto_publish_groupon")
def auto_publish_groupon(self, shop_id, groupon_id):
""" 自动发布拼团事件 """
now = make_aware(datetime.datetime.now())
success, groupon = get_shop_groupon_by_id(shop_id, groupon_id)
if not success:
print("Groupon [id={}] publish failed: {}".format(groupon_id, groupon))
return
if groupon.status != GrouponStatus.ON:
print(
"Groupon [id={}] publish failed: 状态错误{}".format(
groupon_id, groupon.status
)
)
return
if groupon.to_datetime < now:
print(
"Groupon [id={}] publish failed: 已过期{}".format(
groupon_id, groupon.to_datetime
)
)
return
content = {
"id": groupon.id,
"price": round(float(groupon.price), 2),
"to_datetime": groupon.to_datetime.strftime("%Y-%m-%d %H:%M:%S"),
"groupon_type": groupon.groupon_type,
"success_size": groupon.success_size,
"quantity_limit": groupon.quantity_limit,
"succeeded_count": groupon.succeeded_count,
"success_limit": groupon.success_limit,
"succeeded_quantity": int(round(groupon.succeeded_quantity)),
}
event = GrouponEvent(content)
ttl = (groupon.to_datetime - now).total_seconds()
publish_product_promotion(
groupon.shop_id, groupon.product_id, event, ttl=int(ttl)
)
print("Groupon [id={}] publish success".format(groupon.id))
@app.task(bind=True, name="auto_expire_groupon")
def auto_expire_groupon(self, shop_id, groupon_id):
""" 拼团自动过期 """
success, groupon = get_shop_groupon_by_id(shop_id, groupon_id)
if not success:
print("Groupon [id={}] expire failed: {}".format(groupon_id, groupon))
return
if groupon.status == GrouponStatus.EXPIRED:
print(
"Groupon [id={}] expire failed: 状态错误{}".format(
groupon_id, groupon.status
)
)
return
# 任务提前10s过期算作提前
if groupon.to_datetime - make_aware(datetime.datetime.now()) > datetime.timedelta(
seconds=10
):
print(
"Groupon [id={}] expire failed: 未到过期时间{}".format(
groupon_id, make_aware(datetime.datetime.now())
)
)
return
groupon.set_expired()
groupon.save()
print("Groupon [id={}] expire failed".format(groupon_id))
@app.task(bind=True, name="auto_validate_groupon_attend")
def auto_validate_groupon_attend(
self, shop_id: int, groupon_attend_id: int, force: bool = False
):
""" 自动验证拼团参与,如果满员,走订单直接支付 """
success, groupon_attend = get_shop_groupon_attend_by_id(
shop_id, groupon_attend_id, for_update=True
)
if not success:
raise ValueError(groupon_attend)
if groupon_attend.size < groupon_attend.success_size:
print("拼团验证: 拼团参与{}还未满员".format(groupon_attend_id))
return
if groupon_attend.status != GrouponAttendStatus.WAITTING:
raise ValueError(
"拼团验证: 拼团参与{}状态错误{}".format(groupon_attend_id, groupon_attend.status)
)
paid_attend_details = list_paid_details_by_groupon_attend_id(groupon_attend.id)
if len(paid_attend_details) < groupon_attend.size and not force:
print(
"拼团验证: 拼团参与{}还在等待团员支付,当前支付人数{}".format(
groupon_attend_id, len(paid_attend_details)
)
)
return
waitting_orders = list_waitting_order_by_groupon_attend_id(groupon_attend.id)
if len(waitting_orders) != len(paid_attend_details):
raise ValueError(
"拼团验证: 拼团参与{}付款人数{}和订单人数{}不匹配".format(
groupon_attend_id, len(paid_attend_details), len(waitting_orders)
)
)
promotion = get_product_promotion(shop_id, groupon_attend.groupon.product_id)
pattern = PRODUCT_PROMOTION_KEY.format(
shop_id=shop_id, product_id=groupon_attend.groupon.product_id
)
groupon_attend.set_succeeded()
groupon_attend.groupon.succeeded_count += 1
redis_conn = get_redis_connection("subscribe")
redis_conn.hset(pattern, "succeeded_count", groupon_attend.groupon.succeeded_count)
for waitting_order in waitting_orders:
if promotion and isinstance(promotion, GrouponEvent):
quantity = int(
round(float(waitting_order.amount_net) / float(promotion.price))
)
groupon_attend.groupon.succeeded_quantity += quantity
redis_conn.hset(
pattern,
"succeeded_quantity",
int(groupon_attend.groupon.succeeded_quantity),
)
direct_pay(waitting_order)
print("拼团验证: 拼团参与{}成团成功".format(groupon_attend_id))
groupon_attend.save()
# 拼团成功, 发送拼团成功的模板消息
msg_notify = get_msg_notify_by_shop_id(shop_id)
# if msg_notify.group_success_wx:
# for waitting_order in waitting_orders:
# GrouponOrderSuccessAttendTplMsg.send(order_id=waitting_order.id)
@app.task(bind=True, name="auto_fail_groupon_attend")
def auto_fail_groupon_attend(self, shop_id: int, groupon_attend_id: int, reason: str):
""" 拼团参与自动失败 """
success, groupon_attend = get_shop_groupon_attend_by_id(
shop_id, groupon_attend_id, for_update=True
)
if not success:
raise ValueError(groupon_attend)
if groupon_attend.status != GrouponAttendStatus.WAITTING:
print("拼团失败: 拼团参与{}状态错误{}".format(groupon_attend_id, groupon_attend.status))
return
paid_attend_details = list_paid_details_by_groupon_attend_id(
groupon_attend.id
)
waitting_orders = list_waitting_order_by_groupon_attend_id(
groupon_attend.id
)
if len(waitting_orders) != len(paid_attend_details):
raise ValueError(
"拼团失败: 拼团参与{}付款人数{}和订单人数{}不匹配".format(
groupon_attend_id, len(paid_attend_details), len(waitting_orders)
)
)
groupon_attend.set_failed(reason)
groupon_attend.save()
# 拼团中订单自动退款
map_refund_order = {True: [], False: []}
for waitting_order in waitting_orders:
refund_type = (
OrderRefundType.WEIXIN_JSAPI_REFUND
if waitting_order.pay_type == OrderPayType.WEIXIN_JSAPI
else OrderRefundType.UNDERLINE_REFUND
)
success, _ = refund_order(
waitting_order.shop.id, waitting_order, refund_type
)
map_refund_order[success].append(waitting_order.id)
# 未支付订单自动取消
unpaid_orders = list_unpaid_order_by_groupon_attend_id(
groupon_attend.id
)
for unpaid_order in unpaid_orders:
cancel_order(unpaid_order.shop_id, unpaid_order.id)
print(
"拼团失败: 拼团参与{},退款成功{},退款失败".format(
groupon_attend_id,
len(map_refund_order.get(True)),
len(map_refund_order.get(False)),
)
)
# 拼团失败, 发送拼团失败退款的模板消息
msg_notify = get_msg_notify_by_shop_id(shop_id)
# if msg_notify.group_failed_wx:
# for order_id in map_refund_order.get(True):
# GrouponOrderFailAttendTplMsg.send(order_id=order_id)
# for order_id in map_refund_order.get(False):
# GrouponOrderRefundFailTplMsg.send(order_id=order_id)
|
the-stack_0_10795 | # -*- coding: utf-8 -*-
"""Example for gam.AdditiveModel and PolynomialSmoother
This example was written as a test case.
The data generating process is chosen so the parameters are well identified
and estimated.
Created on Fri Nov 04 13:45:43 2011
Author: Josef Perktold
"""
from statsmodels.compat.python import lrange
import numpy as np
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.regression.linear_model import OLS
np.random.seed(8765993)
#seed is chosen for nice result, not randomly
#other seeds are pretty off in the prediction
#DGP: simple polynomial
order = 3
sigma_noise = 0.5
nobs = 1000 #1000 #with 1000, OLS and Additivemodel aggree in params at 2 decimals
lb, ub = -3.5, 4#2.5
x1 = np.linspace(lb, ub, nobs)
x2 = np.sin(2*x1)
x = np.column_stack((x1/x1.max()*2, x2))
exog = (x[:,:,None]**np.arange(order+1)[None, None, :]).reshape(nobs, -1)
idx = lrange((order+1)*2)
del idx[order+1]
exog_reduced = exog[:,idx] #remove duplicate constant
y_true = exog.sum(1) / 2.
z = y_true #alias check
d = x
y = y_true + sigma_noise * np.random.randn(nobs)
example = 1
if example == 1:
m = AdditiveModel(d)
m.fit(y)
y_pred = m.results.predict(d)
for ss in m.smoothers:
print(ss.params)
res_ols = OLS(y, exog_reduced).fit()
print(res_ols.params)
#from numpy.testing import assert_almost_equal
#assert_almost_equal(y_pred, res_ols.fittedvalues, 3)
if example > 0:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(exog)
y_pred = m.results.mu# + m.results.alpha #m.results.predict(d)
plt.figure()
plt.subplot(2,2,1)
plt.plot(y, '.', alpha=0.25)
plt.plot(y_true, 'k-', label='true')
plt.plot(res_ols.fittedvalues, 'g-', label='OLS', lw=2, alpha=-.7)
plt.plot(y_pred, 'r-', label='AM')
plt.legend(loc='upper left')
plt.title('gam.AdditiveModel')
counter = 2
for ii, xx in zip(['z', 'x1', 'x2'], [z, x[:,0], x[:,1]]):
sortidx = np.argsort(xx)
#plt.figure()
plt.subplot(2, 2, counter)
plt.plot(xx[sortidx], y[sortidx], '.', alpha=0.25)
plt.plot(xx[sortidx], y_true[sortidx], 'k.', label='true', lw=2)
plt.plot(xx[sortidx], y_pred[sortidx], 'r.', label='AM')
plt.legend(loc='upper left')
plt.title('gam.AdditiveModel ' + ii)
counter += 1
plt.show()
|
the-stack_0_10796 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from . import BaseWrapperDataset
class PrependTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None, pad_token=None):
super().__init__(dataset)
self.token = token
self.pad_token = pad_token
if token is not None:
self._sizes = np.array(dataset.sizes) + 1
else:
self._sizes = dataset.sizes
def __getitem__(self, idx):
item = self.dataset[idx]
if self.token is not None:
if self.pad_token is not None:
# char ngram data
item_len = item.size(1)
item = torch.cat([item.new([[self.token] + [self.pad_token]*(item_len-1)]), item])
else:
item = torch.cat([item.new([self.token]), item])
return item
@property
def sizes(self):
return self._sizes
def num_tokens(self, index):
n = self.dataset.num_tokens(index)
if self.token is not None:
n += 1
return n
def size(self, index):
n = self.dataset.size(index)
if self.token is not None:
n += 1
return n
|
the-stack_0_10797 | import json
import os
from maidfiddler.util.util import BASE_DIR
from maidfiddler.util.config import CONFIG
from maidfiddler.util.logger import logger
import random
current_translation = {}
def tr(obj):
return tr_str(obj.whatsThis())
def tr_str(original):
if "translation" not in current_translation:
return original
parts = original.split(".")
cur = current_translation["translation"]
for arg in parts:
if arg not in cur:
return get_original(original, parts)
cur = cur[arg]
return cur
MINIFY = None
def get_original(s, parts):
global MINIFY
if MINIFY is None:
logger.debug("Fetching MINIFY")
MINIFY = CONFIG.getboolean("Developer", "minify-untranslated-tags", fallback=True)
return s if not MINIFY else parts[-1]
def load_translation(name):
global current_translation
path = os.path.join(BASE_DIR, "translations", name)
logger.debug(f"TL path: {path}")
if not os.path.isfile(path):
return
with open(path, "r", encoding="utf-8-sig") as tl_file:
current_translation = json.load(tl_file)
if "translation" not in current_translation:
logger.warning("translation invalid")
current_translation = {}
return
def get_random_title():
if "titles" not in current_translation or len(current_translation["titles"]) == 0:
return None
return random.choice(current_translation["titles"])
def get_language_name(path):
if not os.path.isfile(path):
return None
try:
with open(path, "r", encoding="utf-8-sig") as tl_file:
tl = json.load(tl_file)
return tl["info"]["language"]
except:
return None |
the-stack_0_10799 | import pandas as pd
import glob
import csv
import os
import seaborn as sns
import matplotlib.pyplot as plt
from builtins import any
class CrystalBall:
def __init__(self, list_of_csvs:list, csvname_to_colnames_list:dict, csvname_to_IDs:dict, csvname_to_nonIDs:dict, all_IDs:list, all_nonIDs:list, csvname_to_one_ID:list):
# get list of all files in current directory that end in .csv
self.list_of_csvs = list_of_csvs
# create dictionary where csvname maps to colnames
self.csvname_to_colnames_list = csvname_to_colnames_list
# create dictionary where csvname maps to colnames that have the substring "ID"
self.csvname_to_IDs = csvname_to_IDs
# create dictionary where csvname maps to colnames that do not have the substring "ID"
self.csvname_to_nonIDs = csvname_to_nonIDs
# create list of only unique IDs
self.all_IDs = all_IDs
# create list of only unique nonIDs
self.all_nonIDs = all_nonIDs
# create list of all column names (IDs + nonIDs)
self.all_colnames = list(all_IDs.union(all_nonIDs))
# create dictionary that maps out relationship, one csvname to one ID
self.csvname_to_one_ID = csvname_to_one_ID
@classmethod
def run(self, rel_dir):
""" Initialize the Crystal Ball object for a given directory that contains the CSVs.
Parameters
----------
rel_dir : str
- A string that contains the relative directory, which contains the CSVs to analyze.
Returns
--------
CrystalBall
- CrystalBall that has all class variables initialized by this run script.
Examples
--------
.. code-block:: python
relative_directory = './folder1/folder2'
crystalBall = CrystalBall.run(relative_directory)
"""
rel_dir = rel_dir + '/*.csv'
list_of_csvs = sorted(glob.glob(rel_dir))
csvname_to_colnames_list = {}
csvname_to_IDs = {}
csvname_to_nonIDs = {}
all_IDs = set()
all_nonIDs = set()
csvname_to_one_ID = []
for csv_name in list_of_csvs:
with open(csv_name, "rt") as f:
reader = csv.reader(f)
try:
col_names = next(reader)
csvname_to_colnames_list[csv_name] = col_names
ids = []
non_ids = []
for col_name in col_names:
if 'ID' in col_name or 'Id' in col_name:
csvname_to_one_ID.append([os.path.split(csv_name)[1], col_name])
ids.append(col_name)
else:
non_ids.append(col_name)
csvname_to_IDs[csv_name] = ids
csvname_to_nonIDs[csv_name] = non_ids
all_IDs.update(ids)
all_nonIDs.update(non_ids)
continue
except StopIteration:
continue
except:
continue
return CrystalBall(list_of_csvs, csvname_to_colnames_list, csvname_to_IDs, csvname_to_nonIDs, all_IDs, all_nonIDs, csvname_to_one_ID)
def contains(self, keywords: list, all_colnames: list=None) -> list:
""" Check if keywords exist in all_colnames.
- Determine whether a keyword (substring) exists in a given list of column names (strings).
- Note: This search is case sensitive!
Parameters
----------
keywords : list[str]
- List of key words that the user is interested in
all_colnames : list[str]
- List of column names of a table, or for many tables.
- If no argument is provided, this function will use the column names generated when the run method was called.
Returns
-------
list
- Each index corresponds to a keyword.
- For each index, True if substring exists in list of strings, otherwise False.
Examples
--------
>>> colnames = ['id', 'name', 'title']
>>> cb.contains(['name'], colnames)
[True]
>>> cb.contains(['Name'], colnames)
[False]
>>> cb.contains(['name', 'Name'], colnames)
[True, False]
"""
if all_colnames is None:
return [any(keyword in colname for colname in self.all_colnames) for keyword in keywords]
else:
return [any(keyword in colname for colname in all_colnames) for keyword in keywords]
def featureSearch(self, keywords: list, all_colnames: list=None, mode: str='UNION') -> list:
""" Find the columns that contain the keywords.
- Find features (column names) that contain the substrings specified in keywords.
- Note: This search is case sensitive!
Parameters
----------
keywords : list[str]
- List of key words that the user is interested in
colnames : list[str]
- List of column names of a table, or for many tables.
- If no argument is provided, this function will use the column names generated when the run method was called.
Returns
--------
DataFrame
- DataFrame will contain all features (column names) that contains one/all substrings found in keywords.
- DataFrame will be sorted in alphabetical order.
Examples (update example, outputs a DataFrame instead of a list)
--------
>>> colnames = ['id', 'name', 'nameType', 'subSpeciesName', 'title']
>>> cb.featureSearch(['name'], colnames)
['name', 'nameType']
>>> cb.featureSearch(['Name'], colnames)
['subSpeciesName']
>>> cb.featureSearch(['name', 'Name'], colnames)
['name', 'nameType', 'subSpeciesName']
"""
##implement INTERSECTION mode later
def search(keywords, colnames):
suggested_colnames = set()
for colname in colnames:
for keyword in keywords:
if keyword in colname:
suggested_colnames.add(colname)
return pd.DataFrame( {'featureName': sorted(list(suggested_colnames))})
if type(keywords) is not list:
raise Exception('keywords argument expects a list')
if mode is 'UNION':
if all_colnames is None:
return search(keywords, self.all_colnames)
else:
return search(keywords, all_colnames)
elif mode is "INTERSECTION":
print('to implement later')
def tableSearch(self, keywords, csvname_to_colnames_list=None, mode='UNION'):
""" Find the tables that contain the keywords.
- Find tables that contain column names which have the substrings specified in keywords.
- Note: This search is case sensitive!
Parameters
----------
keywords : list[str]
- List of key words that the user is interested in
csvname_to_colnames_list : dict[str] = list
- Dictionary that maps a string (table name) to a list of column names it contains.
- If no argument is provided, this function will use the dictionary generated when the run method was called.
mode : str
- If mode is UNION, then return all tables that contain at least one keyword.
- If mode is INTERSECTION, then return all tables that contain all the keywords.
Returns
--------
list[str]
- List will contain all tables that contain a match with keywords.
- List will be sorted in alphabetical order.
Examples
--------
>>> csvname_to_colnames_list = {'table1': ['colName1', 'colName2'], 'table2':['colName3', 'colName4']}
>>> cb.tableSearch(['colName1'], csvname_to_colnames_list)
['table1']
>>> cb.tableSearch(['colName3'], csvname_to_colnames_list)
['table2']
>>> cb.tableSearch(['colName1', 'colName2'], csvname_to_colnames_list)
['table1', 'table2']
"""
def columnNamesContainKeyword(keyword, colname_list):
return any(keyword in colname for colname in colname_list)
if mode is 'UNION':
if csvname_to_colnames_list is None:
return list(filter(lambda x: x is not None, [key if False not in [True if any(keyword in colname for colname in self.csvname_to_colnames_list[key]) else False for keyword in keywords] else None for key in self.csvname_to_colnames_list]))
else:
return list(filter(lambda x: x is not None, [key if False not in [True if any(keyword in colname for colname in csvname_to_colnames_list[key]) else False for keyword in keywords] else None for key in csvname_to_colnames_list]))
elif mode is 'INTERSECTION':
csv_matches = []
if csvname_to_colnames_list is None:
for csvname in self.csvname_to_colnames_list:
keyword_checklist = []
for keyword in keywords:
keyword_checklist.append(columnNamesContainKeyword(keyword, self.csvname_to_colnames_list[csvname]))
if False not in keyword_checklist:
csv_matches.append(csvname)
return sorted(csv_matches)
else:
print("implement later")
def openTable(self, rel_dir, indices=None, encoding='utf-8'):
""" Open the csv that is referenced by the given relative directory.
Parameters
----------
rel_dir : str
- A path to the table that is relative to where the user is running Crystal Ball.
indices : list[int]
- Sets the (multi)index by columns represented by their numerical integer-locations.
Returns
--------
DataFrame
- The DataFrame containing the contents of the csv.
Examples
--------
(link juptyer notebook)
"""
df = pd.read_csv(rel_dir, engine='python', encoding=encoding , error_bad_lines=False)
if indices is not None:
df.set_index(list(df.columns[indices]), inplace=True)
return df
def subTable(self, supertable, chosen_index:list, chosen_columns:list):
""" Create a subtable from a supertable.
Parameters
----------
supertable : DataFrame
- Table from which to select chosen_columns from in order to form a subtable
chosen_index : list[str]
- The column names that will form the new (multi)index for the subtable.
chosen_columns : list[str]
- The column names that will form the new columns for the subtable.
Returns
--------
DataFrame
- DataFrame (the newly-formed subtable) that will have the (multi)index and columns specified in the arguments.
Examples
--------
(link juptyer notebook)
"""
## chosen_columns should default to empty list
# if len(chosen_columns) == 0:
# use all the columns from supertable
combined = chosen_index.copy()
combined.extend(chosen_columns)
subtable = supertable[combined].set_index(chosen_index)
return subtable
def mergeTables(self, tables:list):
""" Sequentially merge a list of tables that all share a common index.
- Merge defaults to using inner joins over the index.
Parameters
----------
tables : list[DataFrame]
- Contains a list of DataFrames that will be merged sequentially.
Returns
--------
DataFrame
- Table that results from sequentially merging the DataFrames given in the argument.
Examples
--------
(link juptyer notebook)
"""
# replace sequential mergeing with concat...
# TO IMPLEMENT LATER: other types of joins, merging by non-index
def chooseLargestString(string_list):
largest_string = string_list[0]
for string in string_list:
if len(string) > len(largest_string):
largest_string = string
return largest_string
if len(tables) < 2:
raise Exception("need at least two tables in order to merge")
num_of_dropped_rows = 0
max_num_of_rows = max(len(tables[0]), len(tables[1]))
current_merge = tables[0].merge(tables[1], how='inner', left_index=True, right_index=True)
diff = max_num_of_rows - len(current_merge)
max_num_of_rows = len(current_merge)
num_of_dropped_rows += diff
index_names = [tables[0].index.name, tables[1].index.name]
if len(tables) - 2 > 0:
for i in range(2, len(tables)):
current_merge = current_merge.merge(table[i], how='inner', left_index=True, right_index=True)
diff = max_num_of_rows - len(current_merge)
max_num_of_rows = len(current_merge)
num_of_dropped_rows += diff
index_names.append(tables[i].index.name)
print('Number of Dropped Rows: ',num_of_dropped_rows)
current_merge.index.name = chooseLargestString(index_names)
# CHECK FOR MULTI INDEX CASE, WHETHER THE ABOVE LINE BREAKS
return current_merge
def analyzeRelationships(self, to_analyze:list, visualize=True):
""" Analyze basic stats of one or more different indexes.
By comparing boxplots, you should be able to determine which indices are related.
Parameters
----------
to_analyze : list[list[str, Series]]
- A list of lists. The later should be of length two, in which the 0th index stores the table name and the 1st index contains a Series.
- The Series should contain the values of the column derived from the table associated with the name stored in the 0th index.
Returns
--------
DataFrame
- Table that contains basic stats about each given Series.
Examples
--------
(link juptyer notebook)
"""
descriptions = []
boxplot_data = []
boxplot_xtick_labels = []
for pair in to_analyze:
new_name = pair[1].name + ' from ' + pair[0]
descriptions.append(pair[1].describe().rename(new_name))
boxplot_data.append(pair[1])
boxplot_xtick_labels.append(new_name)
# add labels to the quartile ranges for exact measurment.
if visualize:
g = sns.boxplot(data=boxplot_data)
g.set(
title='Relationship Analysis',
xlabel='Features',
ylabel='Numerical Values',
xticklabels=boxplot_xtick_labels
)
plt.xticks(rotation=-10)
description_table = pd.concat(descriptions, axis=1)
return description_table
def compareRelationship(self, to_analyze1, to_analyze2, visualize=False):
""" Compare and contrast the difference between two Series.
By comparing boxplots, you should be able to determine which indices are related.
Parameters
----------
to_analyze1 : list[str, Series]
- A list that contains the name of the first table, and the contents of a specifc column from that table as a Series.
to_analyze2 : list[str, Series]
- A list that contains the name of the second table, and the contents of a specifc column from that table as a Series.
Returns
--------
DataFrame
- Table that contains basic stats about each given Series, as well as a third column that contains the difference between the stats.
Examples
--------
(link juptyer notebook)
"""
descriptions = []
boxplot_data = []
boxplot_xtick_labels = []
new_name = to_analyze1[1].name + ' from ' + to_analyze1[0]
description1 = to_analyze1[1].describe().rename(new_name)
descriptions.append(description1)
boxplot_data.append(to_analyze1[1])
boxplot_xtick_labels.append(new_name)
new_name = to_analyze2[1].name + ' from ' + to_analyze2[0]
description2 = to_analyze2[1].describe().rename(new_name)
descriptions.append(description2)
boxplot_data.append(to_analyze2[1])
boxplot_xtick_labels.append(new_name)
if visualize:
g = sns.boxplot(data=boxplot_data)
g.set(
title='Relationship Analysis',
xlabel='Features',
ylabel='Numerical Values',
xticklabels=boxplot_xtick_labels
)
plt.xticks(rotation=-10)
diff_description = abs(description1 - description2)
diff_description.name = "Difference"
descriptions.append(diff_description)
description_table = pd.concat(descriptions, axis=1)
return description_table
def export(self, df_to_export, write_to, export_type="CSV"):
""" Exports contents of dataframe to relative location specified by write_to parameter.
- Default export type is CSV
Parameters
----------
df_to_export : DataFrame
- DataFrame whose contents will be exported into a specifed location.
write_to : str
- Relative location (including file) that you will write into.
export_type : str
- Format that contents of df_to_export will be exported as.
Returns
--------
None
Examples
--------
(link juptyer notebook)
"""
if export_type is "CSV":
df_to_export.to_csv(write_to, encoding='utf-8', index=True, index_label=df_to_export.index.name)
else:
print('implemnt sql format')
# to implement later
# featureSearch should return a dictionary, where key is the index and value is the name of the feature
# this makes it easier for people to select the feature they want
# search function should also have an 'is_exact' option, to make search more precise.
# check if a lower case letter surrounds either side of the keyword, implies that it is an interjection
# create a function that let's you index into a python list with another list. useful for selecting many names at once
# from featureSearch result
# format output of lists better... can actually use DataFrame for formatting
# Print out "You have found the following column names/table names/etc." to make it easier for people to
# understand what they are seeing.
|
the-stack_0_10800 | import asyncio
import ssl
import sys
from aiohttp import web
import aiogram
from aiogram import Bot, types, Version
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.dispatcher import Dispatcher
from aiogram.dispatcher.webhook import get_new_configured_app, SendMessage
from aiogram.types import ChatType, ParseMode, ContentType
from aiogram.utils.markdown import hbold, bold, text, link
TOKEN = 'BOT TOKEN HERE'
WEBHOOK_HOST = 'example.com' # Domain name or IP addres which your bot is located.
WEBHOOK_PORT = 443 # Telegram Bot API allows only for usage next ports: 443, 80, 88 or 8443
WEBHOOK_URL_PATH = '/webhook' # Part of URL
# This options needed if you use self-signed SSL certificate
# Instructions: https://core.telegram.org/bots/self-signed
WEBHOOK_SSL_CERT = './webhook_cert.pem' # Path to the ssl certificate
WEBHOOK_SSL_PRIV = './webhook_pkey.pem' # Path to the ssl private key
WEBHOOK_URL = f"https://{WEBHOOK_HOST}:{WEBHOOK_PORT}{WEBHOOK_URL_PATH}"
# Web app settings:
# Use LAN address to listen webhooks
# User any available port in range from 1024 to 49151 if you're using proxy, or WEBHOOK_PORT if you're using direct webhook handling
WEBAPP_HOST = 'localhost'
WEBAPP_PORT = 3001
BAD_CONTENT = ContentType.PHOTO & ContentType.DOCUMENT & ContentType.STICKER & ContentType.AUDIO
loop = asyncio.get_event_loop()
bot = Bot(TOKEN, loop=loop)
storage = MemoryStorage()
dp = Dispatcher(bot, storage=storage)
async def cmd_start(message: types.Message):
# Yep. aiogram allows to respond into webhook.
# https://core.telegram.org/bots/api#making-requests-when-getting-updates
return SendMessage(chat_id=message.chat.id, text='Hi from webhook!',
reply_to_message_id=message.message_id)
async def cmd_about(message: types.Message):
# In this function markdown utils are userd for formatting message text
return SendMessage(message.chat.id, text(
bold('Hi! I\'m just a simple telegram bot.'),
'',
text('I\'m powered by', bold('Python', Version(*sys.version_info[:]))),
text('With', link(text('aiogram', aiogram.VERSION), 'https://github.com/aiogram/aiogram')),
sep='\n'
), parse_mode=ParseMode.MARKDOWN)
async def cancel(message: types.Message):
# Get current state context
state = dp.current_state(chat=message.chat.id, user=message.from_user.id)
# If current user in any state - cancel it.
if await state.get_state() is not None:
await state.set_state(state=None)
return SendMessage(message.chat.id, 'Current action is canceled.')
# Otherwise do nothing
async def unknown(message: types.Message):
"""
Handler for unknown messages.
"""
return SendMessage(message.chat.id, f"I don\'t know what to do with content type `{message.content_type()}`. Sorry :c")
async def cmd_id(message: types.Message):
"""
Return info about user.
"""
if message.reply_to_message:
target = message.reply_to_message.from_user
chat = message.chat
elif message.forward_from and message.chat.type == ChatType.PRIVATE:
target = message.forward_from
chat = message.forward_from or message.chat
else:
target = message.from_user
chat = message.chat
result_msg = [hbold('Info about user:'),
f"First name: {target.first_name}"]
if target.last_name:
result_msg.append(f"Last name: {target.last_name}")
if target.username:
result_msg.append(f"Username: {target.mention}")
result_msg.append(f"User ID: {target.id}")
result_msg.extend([hbold('Chat:'),
f"Type: {chat.type}",
f"Chat ID: {chat.id}"])
if chat.type != ChatType.PRIVATE:
result_msg.append(f"Title: {chat.title}")
else:
result_msg.append(f"Title: {chat.full_name}")
return SendMessage(message.chat.id, '\n'.join(result_msg), reply_to_message_id=message.message_id,
parse_mode=ParseMode.HTML)
async def on_startup(app):
# Demonstrate one of the available methods for registering handlers
# This command available only in main state (state=None)
dp.register_message_handler(cmd_start, commands=['start'])
# This handler is available in all states at any time.
dp.register_message_handler(cmd_about, commands=['help', 'about'], state='*')
dp.register_message_handler(unknown, content_types=BAD_CONTENT,
func=lambda message: message.chat.type == ChatType.PRIVATE)
# You are able to register one function handler for multiple conditions
dp.register_message_handler(cancel, commands=['cancel'], state='*')
dp.register_message_handler(cancel, func=lambda message: message.text.lower().strip() in ['cancel'], state='*')
dp.register_message_handler(cmd_id, commands=['id'], state='*')
dp.register_message_handler(cmd_id, func=lambda message: message.forward_from or
message.reply_to_message and
message.chat.type == ChatType.PRIVATE, state='*')
# Get current webhook status
webhook = await bot.get_webhook_info()
# If URL is bad
if webhook.url != WEBHOOK_URL:
# If URL doesnt match current - remove webhook
if not webhook.url:
await bot.delete_webhook()
# Set new URL for webhook
await bot.set_webhook(WEBHOOK_URL, certificate=open(WEBHOOK_SSL_CERT, 'rb'))
# If you want to use free certificate signed by LetsEncrypt you need to set only URL without sending certificate.
async def on_shutdown(app):
"""
Graceful shutdown. This method is recommended by aiohttp docs.
"""
# Remove webhook.
await bot.delete_webhook()
# Close Redis connection.
await dp.storage.close()
await dp.storage.wait_closed()
if __name__ == '__main__':
# Get instance of :class:`aiohttp.web.Application` with configured router.
app = get_new_configured_app(dispatcher=dp, path=WEBHOOK_URL_PATH)
# Setup event handlers.
app.on_startup.append(on_startup)
app.on_shutdown.append(on_shutdown)
# Generate SSL context
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(WEBHOOK_SSL_CERT, WEBHOOK_SSL_PRIV)
# Start web-application.
web.run_app(app, host=WEBAPP_HOST, port=WEBAPP_PORT, ssl_context=context)
# Note:
# If you start your bot using nginx or Apache web server, SSL context is not required.
# Otherwise you need to set `ssl_context` parameter.
|
the-stack_0_10801 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: An Incremental Earley Chart Parser
#
# Copyright (C) 2001-2014 NLTK Project
# Author: Peter Ljunglöf <[email protected]>
# Rob Speer <[email protected]>
# Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# Jean Mark Gawron <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Data classes and parser implementations for *incremental* chart
parsers, which use dynamic programming to efficiently parse a text.
A "chart parser" derives parse trees for a text by iteratively adding
\"edges\" to a \"chart\". Each "edge" represents a hypothesis about the tree
structure for a subsequence of the text. The "chart" is a
\"blackboard\" for composing and combining these hypotheses.
A parser is "incremental", if it guarantees that for all i, j where i < j,
all edges ending at i are built before any edges ending at j.
This is appealing for, say, speech recognizer hypothesis filtering.
The main parser class is ``EarleyChartParser``, which is a top-down
algorithm, originally formulated by Jay Earley (1970).
"""
from __future__ import print_function, division
from nltk.compat import xrange
from nltk.parse.chart import (Chart, ChartParser, EdgeI, LeafEdge, LeafInitRule,
BottomUpPredictRule, BottomUpPredictCombineRule,
TopDownInitRule, SingleEdgeFundamentalRule,
EmptyPredictRule,
CachedTopDownPredictRule,
FilteredSingleEdgeFundamentalRule,
FilteredBottomUpPredictCombineRule)
from nltk.parse.featurechart import (FeatureChart, FeatureChartParser,
FeatureTopDownInitRule,
FeatureTopDownPredictRule,
FeatureEmptyPredictRule,
FeatureBottomUpPredictRule,
FeatureBottomUpPredictCombineRule,
FeatureSingleEdgeFundamentalRule)
#////////////////////////////////////////////////////////////
# Incremental Chart
#////////////////////////////////////////////////////////////
class IncrementalChart(Chart):
def initialize(self):
# A sequence of edge lists contained in this chart.
self._edgelists = tuple([] for x in self._positions())
# The set of child pointer lists associated with each edge.
self._edge_to_cpls = {}
# Indexes mapping attribute values to lists of edges
# (used by select()).
self._indexes = {}
def edges(self):
return list(self.iteredges())
def iteredges(self):
return (edge for edgelist in self._edgelists for edge in edgelist)
def select(self, end, **restrictions):
edgelist = self._edgelists[end]
# If there are no restrictions, then return all edges.
if restrictions=={}: return iter(edgelist)
# Find the index corresponding to the given restrictions.
restr_keys = sorted(restrictions.keys())
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = tuple(restrictions[key] for key in restr_keys)
return iter(self._indexes[restr_keys][end].get(vals, []))
def _add_index(self, restr_keys):
# Make sure it's a valid index.
for key in restr_keys:
if not hasattr(EdgeI, key):
raise ValueError('Bad restriction: %s' % key)
# Create the index.
index = self._indexes[restr_keys] = tuple({} for x in self._positions())
# Add all existing edges to the index.
for end, edgelist in enumerate(self._edgelists):
this_index = index[end]
for edge in edgelist:
vals = tuple(getattr(edge, key)() for key in restr_keys)
this_index.setdefault(vals, []).append(edge)
def _register_with_indexes(self, edge):
end = edge.end()
for (restr_keys, index) in self._indexes.items():
vals = tuple(getattr(edge, key)() for key in restr_keys)
index[end].setdefault(vals, []).append(edge)
def _append_edge(self, edge):
self._edgelists[edge.end()].append(edge)
def _positions(self):
return xrange(self.num_leaves() + 1)
class FeatureIncrementalChart(IncrementalChart, FeatureChart):
def select(self, end, **restrictions):
edgelist = self._edgelists[end]
# If there are no restrictions, then return all edges.
if restrictions=={}: return iter(edgelist)
# Find the index corresponding to the given restrictions.
restr_keys = sorted(restrictions.keys())
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = tuple(self._get_type_if_possible(restrictions[key])
for key in restr_keys)
return iter(self._indexes[restr_keys][end].get(vals, []))
def _add_index(self, restr_keys):
# Make sure it's a valid index.
for key in restr_keys:
if not hasattr(EdgeI, key):
raise ValueError('Bad restriction: %s' % key)
# Create the index.
index = self._indexes[restr_keys] = tuple({} for x in self._positions())
# Add all existing edges to the index.
for end, edgelist in enumerate(self._edgelists):
this_index = index[end]
for edge in edgelist:
vals = tuple(self._get_type_if_possible(getattr(edge, key)())
for key in restr_keys)
this_index.setdefault(vals, []).append(edge)
def _register_with_indexes(self, edge):
end = edge.end()
for (restr_keys, index) in self._indexes.items():
vals = tuple(self._get_type_if_possible(getattr(edge, key)())
for key in restr_keys)
index[end].setdefault(vals, []).append(edge)
#////////////////////////////////////////////////////////////
# Incremental CFG Rules
#////////////////////////////////////////////////////////////
class CompleteFundamentalRule(SingleEdgeFundamentalRule):
def _apply_incomplete(self, chart, grammar, left_edge):
end = left_edge.end()
# When the chart is incremental, we only have to look for
# empty complete edges here.
for right_edge in chart.select(start=end, end=end,
is_complete=True,
lhs=left_edge.nextsym()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
class CompleterRule(CompleteFundamentalRule):
_fundamental_rule = CompleteFundamentalRule()
def apply(self, chart, grammar, edge):
if not isinstance(edge, LeafEdge):
for new_edge in self._fundamental_rule.apply(chart, grammar, edge):
yield new_edge
class ScannerRule(CompleteFundamentalRule):
_fundamental_rule = CompleteFundamentalRule()
def apply(self, chart, grammar, edge):
if isinstance(edge, LeafEdge):
for new_edge in self._fundamental_rule.apply(chart, grammar, edge):
yield new_edge
class PredictorRule(CachedTopDownPredictRule):
pass
class FilteredCompleteFundamentalRule(FilteredSingleEdgeFundamentalRule):
def apply(self, chart, grammar, edge):
# Since the Filtered rule only works for grammars without empty productions,
# we only have to bother with complete edges here.
if edge.is_complete():
for new_edge in self._apply_complete(chart, grammar, edge):
yield new_edge
#////////////////////////////////////////////////////////////
# Incremental FCFG Rules
#////////////////////////////////////////////////////////////
class FeatureCompleteFundamentalRule(FeatureSingleEdgeFundamentalRule):
def _apply_incomplete(self, chart, grammar, left_edge):
fr = self._fundamental_rule
end = left_edge.end()
# When the chart is incremental, we only have to look for
# empty complete edges here.
for right_edge in chart.select(start=end, end=end,
is_complete=True,
lhs=left_edge.nextsym()):
for new_edge in fr.apply(chart, grammar, left_edge, right_edge):
yield new_edge
class FeatureCompleterRule(CompleterRule):
_fundamental_rule = FeatureCompleteFundamentalRule()
class FeatureScannerRule(ScannerRule):
_fundamental_rule = FeatureCompleteFundamentalRule()
class FeaturePredictorRule(FeatureTopDownPredictRule):
pass
#////////////////////////////////////////////////////////////
# Incremental CFG Chart Parsers
#////////////////////////////////////////////////////////////
EARLEY_STRATEGY = [LeafInitRule(),
TopDownInitRule(),
CompleterRule(),
ScannerRule(),
PredictorRule()]
TD_INCREMENTAL_STRATEGY = [LeafInitRule(),
TopDownInitRule(),
CachedTopDownPredictRule(),
CompleteFundamentalRule()]
BU_INCREMENTAL_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictRule(),
CompleteFundamentalRule()]
BU_LC_INCREMENTAL_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictCombineRule(),
CompleteFundamentalRule()]
LC_INCREMENTAL_STRATEGY = [LeafInitRule(),
FilteredBottomUpPredictCombineRule(),
FilteredCompleteFundamentalRule()]
class IncrementalChartParser(ChartParser):
"""
An *incremental* chart parser implementing Jay Earley's
parsing algorithm:
| For each index end in [0, 1, ..., N]:
| For each edge such that edge.end = end:
| If edge is incomplete and edge.next is not a part of speech:
| Apply PredictorRule to edge
| If edge is incomplete and edge.next is a part of speech:
| Apply ScannerRule to edge
| If edge is complete:
| Apply CompleterRule to edge
| Return any complete parses in the chart
"""
def __init__(self, grammar, strategy=BU_LC_INCREMENTAL_STRATEGY,
trace=0, trace_chart_width=50,
chart_class=IncrementalChart):
"""
Create a new Earley chart parser, that uses ``grammar`` to
parse texts.
:type grammar: CFG
:param grammar: The grammar used to parse texts.
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
:type trace_chart_width: int
:param trace_chart_width: The default total width reserved for
the chart in trace output. The remainder of each line will
be used to display edges.
:param chart_class: The class that should be used to create
the charts used by this parser.
"""
self._grammar = grammar
self._trace = trace
self._trace_chart_width = trace_chart_width
self._chart_class = chart_class
self._axioms = []
self._inference_rules = []
for rule in strategy:
if rule.NUM_EDGES == 0:
self._axioms.append(rule)
elif rule.NUM_EDGES == 1:
self._inference_rules.append(rule)
else:
raise ValueError("Incremental inference rules must have "
"NUM_EDGES == 0 or 1")
def chart_parse(self, tokens, trace=None):
if trace is None: trace = self._trace
trace_new_edges = self._trace_new_edges
tokens = list(tokens)
self._grammar.check_coverage(tokens)
chart = self._chart_class(tokens)
grammar = self._grammar
# Width, for printing trace edges.
trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1)
if trace: print(chart.pp_leaves(trace_edge_width))
for axiom in self._axioms:
new_edges = list(axiom.apply(chart, grammar))
trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width)
inference_rules = self._inference_rules
for end in range(chart.num_leaves()+1):
if trace > 1: print("\n* Processing queue:", end, "\n")
agenda = list(chart.select(end=end))
while agenda:
edge = agenda.pop()
for rule in inference_rules:
new_edges = list(rule.apply(chart, grammar, edge))
trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
for new_edge in new_edges:
if new_edge.end()==end:
agenda.append(new_edge)
return chart
class EarleyChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, EARLEY_STRATEGY, **parser_args)
pass
class IncrementalTopDownChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, TD_INCREMENTAL_STRATEGY, **parser_args)
class IncrementalBottomUpChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, BU_INCREMENTAL_STRATEGY, **parser_args)
class IncrementalBottomUpLeftCornerChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, BU_LC_INCREMENTAL_STRATEGY, **parser_args)
class IncrementalLeftCornerChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
if not grammar.is_nonempty():
raise ValueError("IncrementalLeftCornerParser only works for grammars "
"without empty productions.")
IncrementalChartParser.__init__(self, grammar, LC_INCREMENTAL_STRATEGY, **parser_args)
#////////////////////////////////////////////////////////////
# Incremental FCFG Chart Parsers
#////////////////////////////////////////////////////////////
EARLEY_FEATURE_STRATEGY = [LeafInitRule(),
FeatureTopDownInitRule(),
FeatureCompleterRule(),
FeatureScannerRule(),
FeaturePredictorRule()]
TD_INCREMENTAL_FEATURE_STRATEGY = [LeafInitRule(),
FeatureTopDownInitRule(),
FeatureTopDownPredictRule(),
FeatureCompleteFundamentalRule()]
BU_INCREMENTAL_FEATURE_STRATEGY = [LeafInitRule(),
FeatureEmptyPredictRule(),
FeatureBottomUpPredictRule(),
FeatureCompleteFundamentalRule()]
BU_LC_INCREMENTAL_FEATURE_STRATEGY = [LeafInitRule(),
FeatureEmptyPredictRule(),
FeatureBottomUpPredictCombineRule(),
FeatureCompleteFundamentalRule()]
class FeatureIncrementalChartParser(IncrementalChartParser, FeatureChartParser):
def __init__(self, grammar,
strategy=BU_LC_INCREMENTAL_FEATURE_STRATEGY,
trace_chart_width=20,
chart_class=FeatureIncrementalChart,
**parser_args):
IncrementalChartParser.__init__(self, grammar,
strategy=strategy,
trace_chart_width=trace_chart_width,
chart_class=chart_class,
**parser_args)
class FeatureEarleyChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, EARLEY_FEATURE_STRATEGY, **parser_args)
class FeatureIncrementalTopDownChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, TD_INCREMENTAL_FEATURE_STRATEGY, **parser_args)
class FeatureIncrementalBottomUpChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, BU_INCREMENTAL_FEATURE_STRATEGY, **parser_args)
class FeatureIncrementalBottomUpLeftCornerChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, BU_LC_INCREMENTAL_FEATURE_STRATEGY, **parser_args)
#////////////////////////////////////////////////////////////
# Demonstration
#////////////////////////////////////////////////////////////
def demo(print_times=True, print_grammar=False,
print_trees=True, trace=2,
sent='I saw John with a dog with my cookie', numparses=5):
"""
A demonstration of the Earley parsers.
"""
import sys, time
from nltk.parse.chart import demo_grammar
# The grammar for ChartParser and SteppingChartParser:
grammar = demo_grammar()
if print_grammar:
print("* Grammar")
print(grammar)
# Tokenize the sample sentence.
print("* Sentence:")
print(sent)
tokens = sent.split()
print(tokens)
print()
# Do the parsing.
earley = EarleyChartParser(grammar, trace=trace)
t = time.clock()
chart = earley.chart_parse(tokens)
parses = list(chart.parses(grammar.start()))
t = time.clock()-t
# Print results.
if numparses:
assert len(parses)==numparses, 'Not all parses found'
if print_trees:
for tree in parses: print(tree)
else:
print("Nr trees:", len(parses))
if print_times:
print("Time:", t)
if __name__ == '__main__': demo()
|
the-stack_0_10804 | # Copyright 2017 Artyom Losev
# Copyright 2018 Kolushov Alexandr <https://it-projects.info/team/KolushovAlexandr>
# License MIT (https://opensource.org/licenses/MIT).
from odoo import _, api, fields, models
SO_CHANNEL = "pos_sale_orders"
INV_CHANNEL = "pos_invoices"
class PosOrder(models.Model):
_inherit = "pos.order"
@api.model
def create_from_ui(self, orders):
invoices_to_pay = [o for o in orders if o.get("data").get("invoice_to_pay")]
original_orders = [o for o in orders if o not in invoices_to_pay]
res = super(PosOrder, self).create_from_ui(original_orders)
if invoices_to_pay:
for inv in invoices_to_pay:
self.process_invoice_payment(inv)
return res
@api.model
def process_invoice_payment(self, invoice):
for statement in invoice["data"]["statement_ids"]:
inv_id = invoice["data"]["invoice_to_pay"]["id"]
inv_obj = self.env["account.invoice"].browse(inv_id)
journal_id = statement[2]["journal_id"]
journal = self.env["account.journal"].browse(journal_id)
amount = min(
statement[2]["amount"], # amount payed including change
invoice["data"]["invoice_to_pay"]["residual"], # amount required to pay
)
cashier = invoice["data"]["user_id"]
writeoff_acc_id = False
payment_difference_handling = "open"
vals = {
"journal_id": journal.id,
"payment_method_id": 1,
"payment_date": invoice["data"]["creation_date"],
"communication": invoice["data"]["invoice_to_pay"]["number"],
"invoice_ids": [(4, inv_id, None)],
"payment_type": "inbound",
"amount": amount,
"currency_id": inv_obj.currency_id.id,
"partner_id": invoice["data"]["invoice_to_pay"]["partner_id"][0],
"partner_type": "customer",
"payment_difference_handling": payment_difference_handling,
"writeoff_account_id": writeoff_acc_id,
"pos_session_id": invoice["data"]["pos_session_id"],
"cashier": cashier,
}
payment = self.env["account.payment"].create(vals)
payment.post()
@api.model
def process_invoices_creation(self, sale_order_id):
order = self.env["sale.order"].browse(sale_order_id)
inv_id = order.action_invoice_create()
self.env["account.invoice"].browse(inv_id).action_invoice_open()
return inv_id
class AccountPayment(models.Model):
_inherit = "account.payment"
pos_session_id = fields.Many2one("pos.session", string="POS session")
cashier = fields.Many2one("res.users")
datetime = fields.Datetime(string="Datetime", default=fields.Datetime.now)
class AccountInvoice(models.Model):
_inherit = "account.invoice"
def action_updated_invoice(self):
message = {"channel": INV_CHANNEL, "id": self.id}
self.env["pos.config"].search([])._send_to_channel(INV_CHANNEL, message)
@api.model
def get_invoice_lines_for_pos(self, invoice_ids):
res = []
invoice_lines = self.env["account.invoice.line"].search(
[("invoice_id", "in", invoice_ids)]
)
for l in invoice_lines:
line = {
"invoice_id": l.invoice_id.id,
"id": l.id,
"name": l.name,
"account": l.account_id.name,
"product": l.product_id.name,
"price_unit": l.price_unit,
"qty": l.quantity,
"tax": [tax.name or " " for tax in l.invoice_line_tax_ids],
"discount": l.discount,
"amount": l.price_subtotal,
}
res.append(line)
return res
@api.depends("payment_move_line_ids.amount_residual")
def _get_payment_info_JSON(self):
for record in self:
if not record.payment_move_line_ids:
pass
for move in record.payment_move_line_ids:
if move.payment_id.cashier:
if move.move_id.ref:
move.move_id.ref = "{} by {}".format(
move.move_id.ref, move.payment_id.cashier.name
)
else:
move.move_id.name = "{} by {}".format(
move.move_id.name, move.payment_id.cashier.name
)
data = super(AccountInvoice, self)._get_payment_info_JSON()
return data
class SaleOrder(models.Model):
_inherit = "sale.order"
def action_updated_sale_order(self):
message = {"channel": SO_CHANNEL, "id": self.id}
self.env["pos.config"].search([])._send_to_channel(SO_CHANNEL, message)
@api.model
def get_order_lines_for_pos(self, sale_order_ids):
res = []
order_lines = self.env["sale.order.line"].search(
[("order_id", "in", sale_order_ids)]
)
for l in order_lines:
line = {
"order_id": l.order_id.id,
"id": l.id,
"name": l.name,
"product": l.product_id.name,
"uom_qty": l.product_uom_qty,
"qty_delivered": l.qty_delivered,
"qty_invoiced": l.qty_invoiced,
"tax": [tax.name or " " for tax in l.tax_id],
"discount": l.discount,
"subtotal": l.price_subtotal,
"total": l.price_total,
"invoiceble": (
(l.qty_delivered > 0) or (l.product_id.invoice_policy == "order")
),
}
res.append(line)
return res
class PosConfig(models.Model):
_inherit = "pos.config"
def _get_default_writeoff_account(self):
acc = self.env["account.account"].search([("code", "=", 220000)]).id
return acc if acc else False
show_invoices = fields.Boolean(help="Show invoices in POS", default=True)
show_sale_orders = fields.Boolean(help="Show sale orders in POS", default=True)
pos_invoice_pay_writeoff_account_id = fields.Many2one(
"account.account",
string="Difference Account",
help="The account is used for the difference between due and paid amount",
default=_get_default_writeoff_account,
)
invoice_cashier_selection = fields.Boolean(
string="Select Invoice Cashier",
help="Ask for a cashier when fetch invoices",
defaul=True,
)
sale_order_cashier_selection = fields.Boolean(
string="Select Sale Order Cashier",
help="Ask for a cashier when fetch orders",
defaul=True,
)
class PosSession(models.Model):
_inherit = "pos.session"
session_payments = fields.One2many(
"account.payment",
"pos_session_id",
string="Invoice Payments",
help="Show invoices paid in the Session",
)
session_invoices_total = fields.Float(
"Invoices", compute="_compute_session_invoices_total"
)
def _compute_session_invoices_total(self):
for rec in self:
rec.session_invoices_total = sum(
rec.session_payments.mapped("invoice_ids").mapped("amount_total") + [0]
)
def action_invoice_payments(self):
payments = self.env["account.payment"].search(
[("pos_session_id", "in", self.ids)]
)
invoices = payments.mapped("invoice_ids").ids
domain = [("id", "in", invoices)]
return {
"name": _("Invoice Payments"),
"type": "ir.actions.act_window",
"domain": domain,
"res_model": "account.invoice",
"view_type": "form",
"view_mode": "tree,form",
}
|
the-stack_0_10809 | # File name: subtitles.py
import kivy
kivy.require('1.9.0')
from kivy.network.urlrequest import UrlRequest
class Subtitles:
def __init__(self, url):
self.subtitles = []
req = UrlRequest(url, self.got_subtitles)
def got_subtitles(self, req, results):
self.subtitles = results['captions']
def next(self, secs):
for sub in self.subtitles:
ms = secs*1000 - 12000
st = 'startTime'
d = 'duration'
if ms >= sub[st] and ms <= sub[st] + sub[d]:
return sub
return None
|
the-stack_0_10810 | from os import environ
import os
from urllib.parse import urlparse
import aiohttp
from pyrogram import Client, filters
import requests
from bs4 import BeautifulSoup
import re
API_ID = environ.get('API_ID', '4029928')
API_HASH = environ.get('API_HASH', '99dae01a51f441a77499e01ab08ebdd0')
BOT_TOKEN = environ.get('BOT_TOKEN')
PDISK_API_KEY = environ.get('PDISK_API_KEY')
CHANNEL = environ.get('CHANNEL', 'KayiChat_Official')
bot = Client('pdisk bot',
api_id=API_ID,
api_hash=API_HASH,
bot_token=BOT_TOKEN,
workers=50,
sleep_threshold=0)
@bot.on_message(filters.command('start') & filters.private)
async def start(bot, message):
await message.reply(
f"**Hiya 👋{message.chat.first_name}!**\n\n"
"**A Simple PDisk Uploader Bot.\n\n➠ Send Me Any Direct Link, YouTube Link Or Video Link I Will Upload To PDisk And Give Direct Link\n\nMade With❤BY @BamsiByrek**")
@bot.on_message(filters.text & filters.private)
async def pdisk_uploader(bot, message):
new_string = str(message.text)
try:
pdisk_link = await multi_pdisk_up(new_string)
await message.reply(f'{pdisk_link}', quote=True)
except Exception as e:
await message.reply(f'Error: {e}', quote=True)
@bot.on_message(filters.photo & filters.private)
async def pdisk_uploader(bot, message):
new_string = str(message.caption)
try:
pdisk_link = await multi_pdisk_up(new_string)
if(len(pdisk_link) > 1020):
await message.reply(f'{pdisk_link}', quote=True)
else:
await bot.send_photo(message.chat.id, message.photo.file_id, caption=f'{pdisk_link}')
except Exception as e:
await message.reply(f'Error: {e}', quote=True)
async def get_ptitle(url):
html_text = requests.get(url).text
soup = BeautifulSoup(html_text, 'html.parser')
for title in soup.find_all('title'):
pass
title = list(title.get_text())
title = title[8:]
str = '@' + CHANNEL + ' '
for i in title:
str = str + i
lst = list(html_text.split(","))
c = 0
for i in lst:
if ("""videoid""" in i):
found = lst[c]
break
c += 1
# pdisk.net link
pdisk_video_id = list(found.split(":"))
video_id = pdisk_video_id[2]
video_id = list(video_id.split(","))
v_id = video_id[0]
v_len = len(v_id)
v_id = v_id[1:v_len - 2]
v_url = 'https://www.pdisks.com/share-video?videoid=' + v_id
res = [str, v_url]
return res
async def pdisk_up(link):
if ('pdisk' in link or 'kuklink' in link or 'kofilink' in link or 'cofilink' in link or 'bit' in link):
res = await get_ptitle(link)
title_pdisk = res[0]
link = res[1]
else:
title_new = urlparse(link)
title_new = os.path.basename(title_new.path)
title_pdisk = '@' + CHANNEL + title_new
res = requests.get(
'http://linkapi.net/open/create_item?link_type=link&content_src=' + link + '&source=2000&api_key=' + PDISK_API_KEY + '&dir_id=0&title=' + title_pdisk + '&description=Join_' + CHANNEL + '_for_more_like_this')
data = res.json()
data = dict(data)
print(data)
v_id = data['data']['item_id']
v_url = 'https://www.pdisks.com/share-video?videoid=' + v_id
return (v_url)
async def multi_pdisk_up(ml_string):
new_ml_string = list(map(str, ml_string.split(" ")))
new_ml_string = await remove_username(new_ml_string)
new_join_str = "".join(new_ml_string)
urls = re.findall(r'(https?://[^\s]+)', new_join_str)
nml_len = len(new_ml_string)
u_len = len(urls)
url_index = []
count = 0
for i in range(nml_len):
for j in range(u_len):
if (urls[j] in new_ml_string[i]):
url_index.append(count)
count += 1
new_urls = await new_pdisk_url(urls)
url_index = list(dict.fromkeys(url_index))
i = 0
for j in url_index:
new_ml_string[j] = new_ml_string[j].replace(urls[i], new_urls[i])
i += 1
new_string = " ".join(new_ml_string)
return await addFooter(new_string)
async def new_pdisk_url(urls):
new_urls = []
for i in urls:
new_urls.append(await pdisk_up(i))
return new_urls
async def remove_username(new_List):
for i in new_List:
if('@' in i or 't.me' in i or 'https://bit.ly/3m4gabB' in i or 'https://bit.ly/pdisk_tuts' in i or 'telegra.ph' in i):
new_List.remove(i)
return new_List
async def addFooter(str):
footer = """
━━━━━━━━━━━━━━━
⦿ Made With♥️BY @bamsibyrek
━━━━━━━━━━━━━━━
✪ »JOIN CHANNEL ➡️ t.me/""" + CHANNEL
return str + footer
bot.run()
|
the-stack_0_10813 | # Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from conftest import is_allowing_any_non_gpu, get_non_gpu_allowed
from pyspark.sql import SparkSession, DataFrame
from spark_init_internal import get_spark_i_know_what_i_am_doing
def _from_scala_map(scala_map):
ret = {}
# The value we get is a scala map, not a java map, so we need to jump through some hoops
keys = scala_map.keys().iterator()
while keys.hasNext():
key = keys.next()
ret[key] = scala_map.get(key).get()
return ret
_spark = get_spark_i_know_what_i_am_doing()
# Have to reach into a private member to get access to the API we need
_orig_conf = _from_scala_map(_spark.conf._jconf.getAll())
_orig_conf_keys = _orig_conf.keys()
def is_tz_utc(spark=_spark):
"""
true if the tz is UTC else false
"""
# Now we have to do some kind of ugly internal java stuff
jvm = spark.sparkContext._jvm
utc = jvm.java.time.ZoneId.of('UTC').normalized()
sys_tz = jvm.java.time.ZoneId.systemDefault().normalized()
return utc == sys_tz
def _set_all_confs(conf):
for key, value in conf.items():
if _spark.conf.get(key, None) != value:
_spark.conf.set(key, value)
def reset_spark_session_conf():
"""Reset all of the configs for a given spark session."""
_set_all_confs(_orig_conf)
#We should clear the cache
_spark.catalog.clearCache()
# Have to reach into a private member to get access to the API we need
current_keys = _from_scala_map(_spark.conf._jconf.getAll()).keys()
for key in current_keys:
if key not in _orig_conf_keys:
_spark.conf.unset(key)
def _check_for_proper_return_values(something):
"""We don't want to return an DataFrame or Dataset from a with_spark_session. You will not get what you expect"""
if (isinstance(something, DataFrame)):
raise RuntimeError("You should never return a DataFrame from a with_*_session, you will not get the results that you expect")
def with_spark_session(func, conf={}):
"""Run func that takes a spark session as input with the given configs set."""
reset_spark_session_conf()
_set_all_confs(conf)
ret = func(_spark)
_check_for_proper_return_values(ret)
return ret
def with_cpu_session(func, conf={}):
"""Run func that takes a spark session as input with the given configs set on the CPU."""
copy = dict(conf)
copy['spark.rapids.sql.enabled'] = 'false'
return with_spark_session(func, conf=copy)
def with_gpu_session(func, conf={}):
"""
Run func that takes a spark session as input with the given configs set on the GPU.
Note that this forces you into test mode unless. It is not a requirement, but is
simplest for right now.
"""
copy = dict(conf)
copy['spark.rapids.sql.enabled'] = 'true'
if is_allowing_any_non_gpu():
copy['spark.rapids.sql.test.enabled'] = 'false'
else:
copy['spark.rapids.sql.test.enabled'] = 'true'
copy['spark.rapids.sql.test.allowedNonGpu'] = ','.join(get_non_gpu_allowed())
return with_spark_session(func, conf=copy)
|
the-stack_0_10815 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
from collections import OrderedDict
from typing import Dict, Any, Optional, List, Iterator, Tuple
import torch
import torch.nn as nn
from fbgemm_gpu.split_table_batched_embeddings_ops import (
IntNBitTableBatchedEmbeddingBagsCodegen,
EmbeddingLocation,
)
from torch import Tensor
from torchrec.modules.embedding_configs import (
EmbeddingBagConfig,
DataType,
DATA_TYPE_NUM_BITS,
data_type_to_sparse_type,
dtype_to_data_type,
pooling_type_to_pooling_mode,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection as OriginalEmbeddingBagCollection,
ebc_get_embedding_names,
)
from torchrec.modules.embedding_modules import EmbeddingBagCollectionInterface
from torchrec.sparse.jagged_tensor import (
KeyedJaggedTensor,
KeyedTensor,
)
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
except OSError:
pass
# OSS
try:
import fbgemm_gpu # @manual # noqa
except ImportError:
pass
def quantize_state_dict(
module: nn.Module,
table_name_to_quantized_weights: Dict[str, Tuple[Tensor, Tensor]],
data_type: DataType,
) -> torch.device:
device = torch.device("cpu")
for key, tensor in module.state_dict().items():
# Extract table name from state dict key.
# e.g. ebc.embedding_bags.t1.weight
splits = key.split(".")
assert splits[-1] == "weight"
table_name = splits[-2]
device = tensor.device
num_bits = DATA_TYPE_NUM_BITS[data_type]
if tensor.is_meta:
quant_weight = torch.empty(
(tensor.shape[0], (tensor.shape[1] * num_bits) // 8),
device="meta",
# pyre-fixme[16]: Item `Tensor` of `Union[Tensor, Module]` has
# no attribute `weight`.
dtype=module.qconfig.weight().dtype,
)
scale_shift = torch.empty(
(tensor.shape[0], 4),
device="meta",
# pyre-fixme[16]: Item `Tensor` of `Union[Tensor, Module]` has
# no attribute `weight`.
dtype=module.qconfig.weight().dtype,
)
else:
quant_res = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
tensor, num_bits
)
quant_weight, scale_shift = (
quant_res[:, :-4],
quant_res[:, -4:],
)
table_name_to_quantized_weights[table_name] = (quant_weight, scale_shift)
return device
class EmbeddingBagCollection(EmbeddingBagCollectionInterface):
"""
EmbeddingBagCollection represents a collection of pooled embeddings (EmbeddingBags).
This EmbeddingBagCollection is quantized for lower precision. It relies on fbgemm quantized ops
It processes sparse data in the form of KeyedJaggedTensor
with values of the form [F X B X L]
F: features (keys)
B: batch size
L: Length of sparse features (jagged)
and outputs a KeyedTensor with values of the form [B * (F * D)]
where
F: features (keys)
D: each feature's (key's) embedding dimension
B: batch size
Constructor Args:
table_name_to_quantized_weights (Dict[str, Tuple[Tensor, Tensor]]): map of tables to quantized weights
embedding_configs (List[EmbeddingBagConfig]): list of embedding tables
is_weighted: (bool): whether input KeyedJaggedTensor is weighted
device: (Optional[torch.device]): default compute device
Call Args:
features: KeyedJaggedTensor,
Returns:
KeyedTensor
Example::
table_0 = EmbeddingBagConfig(
name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"]
)
table_1 = EmbeddingBagConfig(
name="t2", embedding_dim=4, num_embeddings=10, feature_names=["f2"]
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
# 0 1 2 <-- batch
# "f1" [0,1] None [2]
# "f2" [3] [4] [5,6,7]
# ^
# feature
features = KeyedJaggedTensor(
keys=["f1", "f2"],
values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]),
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
)
ebc.qconfig = torch.quantization.QConfig(
activation=torch.quantization.PlaceholderObserver.with_args(
dtype=torch.qint8
),
weight=torch.quantization.PlaceholderObserver.with_args(dtype=torch.qint8),
)
qebc = QuantEmbeddingBagCollection.from_float(ebc)
quantized_embeddings = qebc(features)
"""
def __init__(
self,
table_name_to_quantized_weights: Dict[str, Tuple[Tensor, Tensor]],
embedding_configs: List[EmbeddingBagConfig],
is_weighted: bool,
device: torch.device,
) -> None:
super().__init__()
self._is_weighted = is_weighted
self._embedding_bag_configs: List[EmbeddingBagConfig] = embedding_configs
self.embedding_bags: nn.ModuleList = nn.ModuleList()
self._lengths_per_embedding: List[int] = []
table_names = set()
for emb_config in self._embedding_bag_configs:
if emb_config.name in table_names:
raise ValueError(f"Duplicate table name {emb_config.name}")
table_names.add(emb_config.name)
emb_module = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
emb_config.num_embeddings,
emb_config.embedding_dim,
data_type_to_sparse_type(emb_config.data_type),
EmbeddingLocation.HOST
if device.type == "cpu"
else EmbeddingLocation.DEVICE,
)
],
pooling_mode=pooling_type_to_pooling_mode(emb_config.pooling),
weight_lists=[table_name_to_quantized_weights[emb_config.name]],
device=device,
)
self.embedding_bags.append(emb_module)
if not emb_config.feature_names:
emb_config.feature_names = [emb_config.name]
self._lengths_per_embedding.extend(
len(emb_config.feature_names) * [emb_config.embedding_dim]
)
self._embedding_names: List[str] = ebc_get_embedding_names(embedding_configs)
def forward(
self,
features: KeyedJaggedTensor,
) -> KeyedTensor:
pooled_embeddings: List[Tensor] = []
length_per_key: List[int] = []
feature_dict = features.to_dict()
for emb_config, emb_module in zip(
self._embedding_bag_configs, self.embedding_bags
):
for feature_name in emb_config.feature_names:
f = feature_dict[feature_name]
values = f.values()
offsets = f.offsets()
pooled_embeddings.append(
emb_module(
indices=values.int(),
offsets=offsets.int(),
per_sample_weights=f.weights() if self._is_weighted else None,
).float()
)
length_per_key.append(emb_config.embedding_dim)
return KeyedTensor(
keys=self._embedding_names,
values=torch.cat(pooled_embeddings, dim=1),
length_per_key=self._lengths_per_embedding,
)
# pyre-fixme[14]: `state_dict` overrides method defined in `Module` inconsistently.
def state_dict(
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
if destination is None:
destination = OrderedDict()
# pyre-ignore [16]
destination._metadata = OrderedDict()
for emb_config, emb_module in zip(
self._embedding_bag_configs,
self.embedding_bags,
):
(weight, _) = emb_module.split_embedding_weights(split_scale_shifts=False)[
0
]
destination[prefix + f"embedding_bags.{emb_config.name}.weight"] = weight
return destination
def named_buffers(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, nn.Parameter]]:
state_dict = self.state_dict(prefix=prefix, keep_vars=True)
for key, value in state_dict.items():
yield key, value
def _get_name(self) -> str:
return "QuantizedEmbeddingBagCollection"
@classmethod
def from_float(
cls, module: OriginalEmbeddingBagCollection
) -> "EmbeddingBagCollection":
assert hasattr(
module, "qconfig"
), "EmbeddingBagCollection input float module must have qconfig defined"
# pyre-ignore [16]
data_type = dtype_to_data_type(module.qconfig.weight().dtype)
embedding_bag_configs = copy.deepcopy(module.embedding_bag_configs)
for config in embedding_bag_configs:
config.data_type = data_type
table_name_to_quantized_weights: Dict[str, Tuple[Tensor, Tensor]] = {}
device = quantize_state_dict(module, table_name_to_quantized_weights, data_type)
return cls(
table_name_to_quantized_weights,
embedding_bag_configs,
module.is_weighted,
device=device,
)
@property
def embedding_bag_configs(
self,
) -> List[EmbeddingBagConfig]:
return self._embedding_bag_configs
@property
def is_weighted(self) -> bool:
return self._is_weighted
|
the-stack_0_10816 | # import easydict
from multiprocessing import Process
import yaml
from pathlib import Path
import argparse
import torch
import tqdm
import numpy as np
import copy
# torch
import torchvision
from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.rpn import AnchorGenerator
from torchvision.models import mobilenet_v2
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision import transforms
# from yolov5.train_dt import yolov5
from EfficientObjectDetection.train_new_reward import EfficientOD
# import fr_utils
import munch
import os
import utils
from utils import load_filenames, load_dataset, load_dataloader, compute_map, convert_yolo2coco, label2idx, label_matching, reduce_dict, make_results
opt = {'epochs':100,
'batch_size':12,
'device':1,
'test_epoch':10,
'eval_epoch':2,
'step_batch_size':100,
'save_path':'save',
'save_freq': 5,
'rl_weight':None,
'print_freq': 50,
'h_detector_weight':'',
'l_detector_weight':'',
'fine_tr':'config/fine_tr.yaml',
'fine_eval':'config/fine_eval.yaml',
'coarse_tr':'config/coarse_tr.yaml',
'coarse_eval':'config/coarse_eval.yaml',
'EfficientOD':'config/EfficientOD.yaml',
'split': 4}
opt = munch.AutoMunch(opt)
# GPU Device
gpu_id = opt.device
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
use_cuda = torch.cuda.is_available()
print("GPU device " , use_cuda)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# training option load from yaml files
with open(opt.fine_tr) as f:
fine_tr = yaml.load(f, Loader=yaml.FullLoader)
with open(opt.fine_eval) as f:
fine_eval = yaml.load(f, Loader=yaml.FullLoader)
with open(opt.coarse_tr) as f:
coarse_tr = yaml.load(f, Loader=yaml.FullLoader)
with open(opt.coarse_eval) as f:
coarse_eval = yaml.load(f, Loader=yaml.FullLoader)
with open(opt.EfficientOD) as f:
efficient_config = yaml.load(f, Loader=yaml.FullLoader)
efficient_config['load'] = None # bug fix
epochs = opt.epochs
bs = opt.batch_size
# fine_detector = yolov5(fine_tr, fine_eval, epochs, bs)
# coarse_detector = yolov5(coarse_tr, coarse_eval, epochs, bs)
rl_agent = EfficientOD(efficient_config)
split_train_path = '/home/SSDD/ICIP21_dataset/800_HRSID/split_data_4_0/rl_ver/train/images'
split_val_path = '/home/SSDD/ICIP21_dataset/800_HRSID/split_data_4_0/rl_ver/val/images'
split_test_path = '/home/SSDD/ICIP21_dataset/800_HRSID/split_data_4_0/rl_ver/test/images'
split = 4
original_img_path = '/home/SSDD/ICIP21_dataset/800_HRSID/origin_data/rl_ver/'
original_img_path_train = original_img_path + 'train/images'
original_img_path_val = original_img_path + 'val/images'
original_img_path_test = original_img_path + 'test/images'
assert bs % split == 0, 'batch size should be divided with image split patch size'
num_classes = 2
fine_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, num_classes=num_classes, pretrained_backbone=False)
coarse_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, num_classes=num_classes, pretrained_backbone=False)
# # # # replace the classifier with a new one, that has
# # # # num_classes which is user-defined
# # # get number of input features for the classifier
fine_in_features = fine_model.roi_heads.box_predictor.cls_score.in_features
coarse_in_features = coarse_model.roi_heads.box_predictor.cls_score.in_features
# # # replace the pre-trained head with a new one
fine_model.roi_heads.box_predictor = FastRCNNPredictor(fine_in_features, num_classes)
coarse_model.roi_heads.box_predictor = FastRCNNPredictor(coarse_in_features, num_classes)
for fine_p, coarse_p in zip(fine_model.parameters(), coarse_model.parameters()):
fine_p.requires_grad = True
coarse_p.requires_grad = True
fine_model.to(device)
coarse_model.to(device)
# Optimizer
fine_params = [p for p in fine_model.parameters() if p.requires_grad]
coarse_params = [p for p in coarse_model.parameters() if p.requires_grad]
fine_optim = torch.optim.SGD(fine_params, lr=0.005, momentum=0.9, weight_decay=0.0005)
coarse_optim = torch.optim.SGD(coarse_params, lr=0.005, momentum=0.9, weight_decay=0.0005)
fine_lr_scheduler = torch.optim.lr_scheduler.StepLR(fine_optim, step_size=50)
coarse_lr_scheduler = torch.optim.lr_scheduler.StepLR(coarse_optim, step_size=50)
for e in range(epochs):
# label이 없더라도 loader에 image 생성
train_imgs = load_filenames(split_train_path, split, bs).files_array()
fine_train_dataset = load_dataset(train_imgs, fine_tr, bs)
coarse_train_dataset = load_dataset(train_imgs, fine_tr, bs)
fine_train_loader = load_dataloader(bs, fine_train_dataset)
coarse_train_loader = load_dataloader(bs, coarse_train_dataset)
fine_train_nb = len(fine_train_loader)
coarse_train_nb = len(coarse_train_loader)
assert fine_train_nb == coarse_train_nb, 'fine & coarse train batch number is not matched'
nb = fine_train_nb
# Logger
fine_metric_logger = utils.MetricLogger(delimiter=" ")
fine_metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
coarse_metric_logger = utils.MetricLogger(delimiter=" ")
coarse_metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
fine_header = 'Fine Epoch: [{}]'.format(e)
coarse_header = 'Coarse Epoch: [{}]'.format(e)
# # warmup
fine_lr_scheduler = None
corase_lr_scheduler = None
if e == 0:
warmup_factor = 1. / 1000
warmup_iters = min(1000, fine_train_nb-1)
fine_lr_scheduler = utils.warmup_lr_scheduler(fine_optim, warmup_iters, warmup_factor)
coarse_lr_scheduler = utils.warmup_lr_scheduler(coarse_optim, warmup_iters, warmup_factor)
for i, (fine_train, coarse_train) in enumerate(zip(fine_train_loader, coarse_train_loader)):
# train
fine_model.train()
coarse_model.train()
#### fine train ###
# Label mathching
fine_imgs, fine_labels = label_matching(fine_train, device)
fine_imgs = fine_imgs.to(device) / 255.
## train: img normalization --> not, zerodivision err
fine_loss_dict = fine_model(fine_imgs, copy.deepcopy(fine_labels))
fine_losses = sum(loss for loss in fine_loss_dict.values())
fine_loss_dict_reduced = reduce_dict(fine_loss_dict)
fine_loss_reduced = sum(loss for loss in fine_loss_dict_reduced.values())
fine_loss_val = fine_loss_reduced.item()
# optimizer
fine_optim.zero_grad()
fine_losses.backward()
fine_optim.step()
if fine_lr_scheduler is not None:
fine_lr_scheduler.step()
fine_metric_logger.update(loss=fine_loss_reduced, **fine_loss_dict_reduced)
fine_metric_logger.update(lr=fine_optim.param_groups[0]["lr"])
if i % opt.print_freq ==0:
space_fmt = ':' + str(len(str(fine_train_nb))) + 'd'
log_msg = fine_metric_logger.delimiter.join([fine_header, '[{0' + space_fmt + '}/{1}]', '{meters}'])
print(log_msg.format(i, fine_train_nb, meters=str(fine_metric_logger)))
### coarse train ###
# Label mathching
coarse_imgs, coarse_labels = label_matching(coarse_train, device)
coarse_imgs = coarse_imgs.to(device) / 255.
## train: img normalization --> not, zerodivision err
coarse_loss_dict = coarse_model(coarse_imgs, copy.deepcopy(coarse_labels))
coarse_losses = sum(loss for loss in coarse_loss_dict.values())
# utils
coarse_loss_dict_reduced = reduce_dict(coarse_loss_dict)
coarse_loss_reduced = sum(loss for loss in coarse_loss_dict_reduced.values())
coarse_loss_val = coarse_loss_reduced.item()
# optimizer
coarse_optim.zero_grad()
coarse_losses.backward()
coarse_optim.step()
if coarse_lr_scheduler is not None:
coarse_lr_scheduler.step()
coarse_metric_logger.update(loss=coarse_loss_reduced, **coarse_loss_dict_reduced)
coarse_metric_logger.update(lr=fine_optim.param_groups[0]["lr"])
if i % opt.print_freq ==0:
space_fmt = ':' + str(len(str(fine_train_nb))) + 'd'
log_msg = coarse_metric_logger.delimiter.join([coarse_header, '[{0' + space_fmt + '}/{1}]', '{meters}'])
print(log_msg.format(i, fine_train_nb, meters=str(coarse_metric_logger)))
## train eval
# result = (source_path, paths[si], mp, mr, map50, nl, stats)
# file_name, od_file_dir, mp=0(skip), ma=0(skip), map50(will be soon), objnum, stat
# stat = 4
# make_results(model, dataset, device)
fine_results = make_results(fine_model, fine_train, device)
coarse_results = make_results(coarse_model, coarse_train, device)
# conf_thresh=0.001 / iou_thres=0.6
rl_agent.train(e, i, nb, fine_results, coarse_results, original_data_path=original_img_path_train)
## Validation
if e % 1 == 0:
fine_dataset, coarse_dataset, policies = rl_agent.eval(split_val_path, original_img_path_val)
print(len(fine_dataset.tolist()))
print(len(coarse_dataset.tolist()))
fine_results, coarse_results = [], []
if len(fine_dataset.tolist()) > 0:
fine_val_dataset = load_dataset(fine_dataset, fine_tr, bs)
fine_val_loader = load_dataloader(bs, fine_val_dataset)
fine_nb = len(fine_val_loader)
for i, fine_val in tqdm.tqdm(enumerate(fine_val_loader), total=fine_nb):
fine_results += make_results(fine_model, fine_val, device)
if len(coarse_dataset.tolist()) > 0:
coarse_val_dataset = load_dataset(coarse_dataset, fine_tr, bs)
coarse_val_loader = load_dataloader(bs, coarse_val_dataset)
coarse_nb = len(coarse_train_loader)
for i, coarse_val in tqdm.tqdm(enumerate(coarse_val_loader), total=coarse_nb):
coarse_results += make_results(coarse_model, coarse_val, device)
map50 = compute_map(fine_results, coarse_results)
print('Validation MAP: \n', map50)
# save
if e % opt.save_freq == 0:
torch.save(fine_model, os.path.join(opt.save, 'fine_model'))
torch.save(coarse_model, os.path.join(opt.save, 'coarse_model'))
# Testing
fine_dataset, coarse_dataset, policies = rl_agent.eval(split_test_path, original_img_path_test)
fine_results, coarse_results = [], []
if len(fine_dataset.tolist()) > 0:
fine_test_dataset = load_dataset(fine_dataset, fine_tr, bs)
fine_test_loader = load_dataloader(bs, fine_test_dataset)
fine_nb = len(fine_test_loader)
for i, fine_test in tqdm.tqdm(enumerate(fine_test_loader), total=fine_nb):
fine_results += make_results(fine_model, fine_test, device)
if len(coarse_dataset.tolist()) > 0:
coarse_test_dataset = load_dataset(coarse_dataset, fine_tr, bs)
coarse_test_loader = load_dataloader(bs, coarse_test_dataset)
coarse_nb = len(coarse_test_loader)
for i, coarse_test in tqdm.tqdm(enumerate(coarse_test_loader), total=coarse_nb):
coarse_results += make_results(coarse_model, coarse_test, device)
map50 = compute_map(fine_results, coarse_results)
print('MAP: \n', map50)
with open('test_result.txt', 'a') as f:
f.write(str(map50))
with open('test_policies.txt', 'a') as f:
f.write(str(policies)) |
the-stack_0_10817 | # references:
# https://github.com/una-dinosauria/3d-pose-baseline/blob/master/src/predict_3dpose.py#L305
import numpy as np
from ..utils import data_utils, procrustes
class Human36M_JointErrorEvaluator:
def __init__(self, human36m, predict_14=False, apply_procrustes_alignment=False):
"""
Args:
human36m (Human36MDatasetHandler): Human3.6M dataset.
predict_14 (bool, optional): Whether to predict 14 3d-joints. Defaults to False.
apply_procrustes_alignment (bool, optional): Whether to apply procrustes alignment to the predicted poses.
"""
self.human36m = human36m
self.predict_14 = predict_14
self.apply_procrustes_alignment = apply_procrustes_alignment
self.n_joints = (
14 if self.predict_14 else 17
) # 17 = predicted 16 joints + root (Hip joint)
self.reset()
def reset(self):
"""Remove all samples added so far.
"""
self.joint_distances = []
self.actions = []
def add_samples(self, pred_3d_poses, truth_3d_poses, actions):
"""Add pairs of predicted and ground-truth poses to evaluate.
Args:
pred_3d_poses (numpy.array): Predicted 3d poses (normalized). `[batch_size, n_joints, 3]`.
truth_3d_poses (numpy.array): Ground-truth 3d poses (normalized). `[batch_size, n_joints, 3]`.
actions (list[str]): Actions to which the poses belong.
"""
# Compute distances of corresponding joints of pred/truth poses.
pred = self._preprocess_poses(pred_3d_poses) # [batch_size, n_joints x 3]
truth = self._preprocess_poses(truth_3d_poses) # [batch_size, n_joints x 3]
if self.apply_procrustes_alignment:
pred = self._apply_procrustes_alignment(
sources=pred, targets=truth
) # [batch_size, n_joints x 3]
d = self._compute_joint_distances(pred, truth) # [batch_size, n_joints]
self.joint_distances.append(d)
# Cache action of each frame for per action evaluation.
self.actions.extend(actions)
def get_metrics(self):
"""Get evaluation results.
Returns:
(dict): evaluation results.
"""
joint_distances = np.vstack(self.joint_distances) # [N, n_joints]
actions = np.array(self.actions) # [N,]
assert len(joint_distances) == len(actions)
# Evaluate joint position errors over all actions.
mpjpe = np.mean(joint_distances) # mean per joint position error: float
pjpe = np.mean(joint_distances, axis=0) # per joint position error: [n_joints,]
metrics = {
"MPJPE": mpjpe,
"PJPE": pjpe.tolist(),
}
# Evaluate joint position error per action.
for action in data_utils.H36M_ACTIONS:
mask = actions == action
if np.sum(mask) == 0: # In case no sample is found in the action,
mpjpe = pjpe = -1 # set errors as -1.
print("Warining: no test sample was found in the action: {action}. ")
else:
joint_distances_masked = joint_distances[mask]
mpjpe = np.mean(joint_distances_masked)
pjpe = np.mean(joint_distances_masked, axis=0)
metrics["MPJPE/{}".format(action)] = mpjpe
metrics["PJPE/{}".format(action)] = pjpe.tolist()
return metrics
def _preprocess_poses(self, poses_3d):
mean_3d = self.human36m.mean_3d
std_3d = self.human36m.std_3d
dim_to_ignore_3d = self.human36m.dim_to_ignore_3d
dim_to_use_3d = self.human36m.dim_to_use_3d
# Unnormalize 3d poses.
poses = data_utils.unnormalize_data(
poses_3d, mean_3d, std_3d, dim_to_ignore_3d
) # [batch_size, 32 x 3]
# Keep only the relevant joints.
dim_to_keep = (
dim_to_use_3d
if self.predict_14
else np.hstack([np.arange(3), dim_to_use_3d])
# Add root (Hip joint) if the model predicts 16 joints.
# XXX: Assuming the first 3 values represent root joint 3d position.
)
poses = poses[:, dim_to_keep] # [batch_size, n_joints x 3]
return poses
def _apply_procrustes_alignment(self, sources, targets):
sources_aligned = []
batch_size = len(sources)
for i in range(batch_size):
target = targets[i].reshape(-1, 3) # [n_joints, 3]
source = sources[i].reshape(-1, 3) # [n_joints, 3]
_, _, T, b, c = procrustes.compute_similarity_transform(
target, source, compute_optimal_scale=True
)
aligned = (b * source.dot(T)) + c
aligned = aligned.reshape((-1, self.n_joints * 3)) # [1, n_joints x 3]
sources_aligned.append(aligned)
return np.vstack(sources_aligned) # [batch_size, n_joints x 3]
def _compute_joint_distances(self, pred, truth):
# Compute Euclidean distance error per joint.
d_squared = (pred - truth) ** 2 # [batch_size, n_joints x 3]
d_squared = d_squared.reshape(
(-1, self.n_joints, 3)
) # [batch_size, n_joints, 3]
d_squared = np.sum(d_squared, axis=2) # [batch_size, n_joints]
d = np.sqrt(d_squared) # [batch_size, n_joints]
return d
|
the-stack_0_10820 | import numpy as np
import pickle
from sklearn.neighbors._kde import KernelDensity
import os
import sys
import joblib
import torch
import json
from tqdm import tqdm
sys.path.append("/NAS2020/Workspaces/DRLGroup/zbzhu/lfo-ppuu/lfo")
from dataloader import DataLoader
from map_i80_ctrl import ControlledI80
from tianshou.env import SubprocVectorEnv
from rlkit.torch.sac.policies import MakeDeterministic
from rlkit.data_management.split_dict import split_dict
s_std = np.tile(np.array([392.1703, 44.0625, 24.4669, 1.0952]), 7)
s_mean = np.tile(np.array([887.6, 117.67, 36.453, -0.23616]), 7)
def kl_divergence(x1, x2):
p = kde_prob(x1, min_v=0, max_v=1, scale=100)
q = kde_prob(x2, min_v=0, max_v=1, scale=100)
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def kde_prob(x, min_v=0, max_v=1, scale=100):
kde = KernelDensity(kernel="gaussian", bandwidth=(max_v - min_v) * 1.0 / scale).fit(
list(x)
) # x.shape: [None, 2]
data = [
(i * 1.0 / scale, j * 1.0 / scale)
for i in range(min_v * scale, max_v * scale)
for j in range(min_v * scale, max_v * scale)
]
prob = np.exp(kde.score_samples(data)) + 1e-4 # x.shape: [None, 1]
return prob
def obs_unnorm(obs):
obs *= s_std
obs += s_mean
return obs
def make_env(env_kwargs, rank, seed=0, car_index=None):
def _init():
"""
env_specs:
env_name: 'halfcheetah'
env_kwargs: {} # kwargs to pass to the env constructor call
"""
env = ControlledI80(**env_kwargs)
env.seed(rank + seed)
if car_index is not None and hasattr(env, "set_train_indx"):
env.set_train_indx(car_index)
return env
return _init
class opt:
debug = 0
demo_path = "/NAS2020/Workspaces/DRLGroup/zbzhu/lfo-ppuu/expert_demo_xy.pkl"
test_idx_path = "/NAS2020/Workspaces/DRLGroup/zbzhu/lfo-ppuu/lfo/demos/expert_trajs_50/PPUU/test_indx_final.pkl"
log_path = "/NAS2020/Workspaces/DRLGroup/zbzhu/lfo-ppuu/lfo/logs/gailfo-ppuu-final/gailfo_ppuu_final--2021_01_26_03_20_45--s-0"
model_path = os.path.join(log_path, "best.pkl")
variant_path = os.path.join(log_path, "variant.json")
with open(variant_path, "rb") as f:
variant = json.load(f)
env_kwargs = dict(
fps=30,
nb_states=1,
display=False,
delta_t=0.1,
store=False,
show_frame_count=False,
data_dir="ppuu_logs/",
)
if __name__ == "__main__":
env_num = 50
env_wait_num = 25
with open(test_idx_path, "rb") as f:
test_idx = pickle.load(f)
splited_eval_dict = split_dict(test_idx, env_num)
eval_car_num = [len(d) for d in splited_eval_dict]
envs = SubprocVectorEnv(
[
make_env(
env_kwargs,
i,
car_index=splited_eval_dict[i],
)
for i in range(env_num)
],
wait_num=env_wait_num,
)
if os.path.isfile(demo_path):
with open(demo_path, "rb") as f:
all_demo_x, all_demo_y = pickle.load(f)
else:
dataloader = DataLoader(None, opt, "i80")
all_demo_x, all_demo_y = [], []
for idx in test_idx.keys():
all_demo_x.extend(dataloader.states[idx][:, 0, 0].numpy())
all_demo_y.extend(dataloader.states[idx][:, 0, 1].numpy())
with open(demo_path, "wb") as f:
pickle.dump((all_demo_x, all_demo_y), f)
model = joblib.load(model_path)
policy = model["policy"]
eval_policy = MakeDeterministic(policy)
all_agent_x, all_agent_y = [], []
items = list(test_idx.items())
ready_env_ids = np.arange(env_num)
finished_env_ids = []
obs_list = envs.reset()
done = False
episode_step = np.zeros(env_num)
env_finished_car_num = np.zeros(env_num)
pbar = tqdm(total=len(items))
while True:
actions = []
for obs in obs_list[ready_env_ids]:
ori_obs = obs_unnorm(obs.copy())
agent_x = ori_obs[0]
agent_y = ori_obs[1]
all_agent_x.append(agent_x)
all_agent_y.append(agent_y)
with torch.no_grad():
action, _ = eval_policy.get_action(obs_np=obs)
actions.append(action)
actions = np.array(actions)
next_obs_list, rews, dones, env_infos = envs.step(actions, id=ready_env_ids)
ready_env_ids = np.array([i["env_id"] for i in env_infos])
obs_list[ready_env_ids] = next_obs_list
for idx, done in enumerate(dones):
env_id = ready_env_ids[idx]
episode_step[env_id] += 1
if done or episode_step[env_id] > 1500:
env_finished_car_num[env_id] += 1
pbar.update(1)
if not done:
obs_list[env_id] = envs.reset(id=env_id)
if env_finished_car_num[env_id] == eval_car_num[env_id]:
finished_env_ids.append(env_id)
ready_env_ids = np.array(
[x for x in ready_env_ids if x not in finished_env_ids]
)
if len(finished_env_ids) == env_num:
assert len(ready_env_ids) == 0
break
pbar.close()
all_agent_x = np.array(all_agent_x)[:, np.newaxis] / 1600
all_agent_y = np.array(all_agent_y)[:, np.newaxis] / 200
all_agent_pos = np.concatenate((all_agent_x, all_agent_y), 1)
all_demo_x = np.array(all_demo_x)[:, np.newaxis] / 1600
all_demo_y = np.array(all_demo_y)[:, np.newaxis] / 200
all_demo_pos = np.concatenate((all_demo_x, all_demo_y), 1)
kld = kl_divergence(all_agent_pos, all_demo_pos)
print(kld)
|
the-stack_0_10822 | from pawpyseed.core.wavefunction import *
class NCLWavefunction(pawpyc.CNCLWavefunction, Wavefunction):
def __init__(self, struct, pwf, cr, dim, symprec=1e-4, setup_projectors=False):
"""
Arguments:
struct (pymatgen.core.Structure): structure that the wavefunction describes
pwf (pawpyc.PWFPointer): holder class for pswf_t and k-points/k-point weights
cr (CoreRegion): Contains the pseudopotentials, with projectors and
partials waves, for the structure
dim (pymatgen.io.vasp.outputs.Outcar OR np.ndarry OR list of length 3):
Outcar object for reading ngf or the dimensions NG* of the FFT grid
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
self.band_props = pwf.band_props.copy(order="C")
super(Wavefunction, self).__init__(pwf)
if not self.ncl:
raise PAWpyError(
"Pseudowavefunction is collinear! Call Wavefunction(...) instead"
)
self.structure = struct
self.cr = cr
self.dim = np.array(dim).astype(np.int32)
if setup_projectors:
self.check_c_projectors()
@staticmethod
def from_files(
struct="CONTCAR",
wavecar="WAVECAR",
cr="POTCAR",
vr="vasprun.xml",
setup_projectors=False,
):
"""
Construct a Wavefunction object from file paths.
Arguments:
struct (str): VASP POSCAR or CONTCAR file path
wavecar (str): VASP WAVECAR file path
cr (str): VASP POTCAR file path
vr (str): VASP vasprun file path
outcar (str): VASP OUTCAR file path
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
vr = Vasprun(vr)
dim = np.array(
[vr.parameters["NGX"], vr.parameters["NGY"], vr.parameters["NGZ"]]
)
symprec = vr.parameters["SYMPREC"]
pwf = pawpyc.PWFPointer(wavecar, vr)
return NCLWavefunction(
Poscar.from_file(struct).structure,
pwf,
CoreRegion(Potcar.from_file(cr)),
dim,
symprec,
setup_projectors,
)
@staticmethod
def from_directory(path, setup_projectors=False):
"""
Assumes VASP output has the default filenames and is located
in the directory specificed by path.
Arguments:
path (str): VASP output directory
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
filepaths = []
for d in ["CONTCAR", "WAVECAR", "POTCAR", "vasprun.xml"]:
filepaths.append(str(os.path.join(path, d)))
args = filepaths + [setup_projectors]
return NCLWavefunction.from_files(*args)
def desymmetrized_copy(self, allkpts=None, weights=None):
raise NotImplementedError()
def write_state_realspace(
self, b, k, s, fileprefix="", dim=None, scale=1, remove_phase=False
):
"""
Writes the real and imaginary parts of a given band to two files,
prefixed by fileprefix
Args:
b (int): band number (0-indexed!)
k (int): kpoint number (0-indexed!)
s (int): spin number (0-indexed!)
fileprefix (string, ""): first part of the file name
dim (numpy array of 3 ints, None): dimensions of the FFT grid
scale (scalar, 1): number to multiply the realspace wavefunction by.
For example, VASP multiplies charge density by the volume
of the structure.
remove_phase (False): If True, removes the e^(ikr) phase
from the wavefunction (this does not necessarily mean
the wavefunction is real). This is useful if you want
to visualize the wavefunction because the e^(ikr) phase
makes the wavefunction non-periodic
Returns:
A 3D array (indexed by x,y,z where x,y,z are fractional coordinates)
with complex double values for the realspace wavefunction
The wavefunction is written in two files with z the slow index.
"""
self.check_c_projectors()
if dim is not None:
self.update_dim(np.array(dim))
filename_base = "%sB%dK%dS%d" % (fileprefix, b, k, s)
filename1 = "%s_UP_REAL.vasp" % filename_base
filename2 = "%s_UP_IMAG.vasp" % filename_base
filename3 = "%s_DOWN_REAL.vasp" % filename_base
filename4 = "%s_DOWN_IMAG.vasp" % filename_base
res0, res1 = self._write_realspace_state(
filename1,
filename2,
filename3,
filename4,
scale,
b,
k,
s,
remove_phase=remove_phase,
)
self._convert_to_vasp_volumetric(filename1, self.dim)
self._convert_to_vasp_volumetric(filename2, self.dim)
self._convert_to_vasp_volumetric(filename3, self.dim)
self._convert_to_vasp_volumetric(filename4, self.dim)
return res0, res1
def write_density_realspace(self, filename="PYAECCAR.vasp", dim=None, scale=1):
"""
Writes the real and imaginary parts of a given band to two files,
prefixed by fileprefix
Args:
b (int): band number (0-indexed!)
k (int): kpoint number (0-indexed!)
s (int): spin number (0-indexed!)
fileprefix (string, ""): first part of the file name
dim (numpy array of 3 ints, None): dimensions of the FFT grid
scale (scalar, 1): number to multiply the realspace wavefunction by.
For example, VASP multiplies charge density by the volume
of the structure.
Returns:
A 3D array (indexed by x,y,z where x,y,z are fractional coordinates)
with complex double values for the realspace wavefunction
The charge density is written with z the slow index.
"""
self.check_c_projectors()
if dim is not None:
self.update_dim(np.array(dim))
res = self._write_realspace_density(filename, scale)
self._convert_to_vasp_volumetric(filename, self.dim)
return res
|
the-stack_0_10823 | #!/usr/bin/env python
from common.realtime import sec_since_boot
from cereal import car
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.drive_helpers import EventTypes as ET, create_event
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.car.toyota.carstate import CarState, get_can_parser, get_cam_can_parser
from selfdrive.car.toyota.values import ECU, check_ecu_msgs, CAR, NO_STOP_TIMER_CAR
from selfdrive.swaglog import cloudlog
try:
from selfdrive.car.toyota.carcontroller import CarController
except ImportError:
CarController = None
class CarInterface(object):
def __init__(self, CP, sendcan=None):
self.CP = CP
self.VM = VehicleModel(CP)
self.frame = 0
self.gas_pressed_prev = False
self.brake_pressed_prev = False
self.can_invalid_count = 0
self.cam_can_valid_count = 0
self.cruise_enabled_prev = False
# *** init the major players ***
self.CS = CarState(CP)
self.cp = get_can_parser(CP)
self.cp_cam = get_cam_can_parser(CP)
self.forwarding_camera = False
# sending if read only is False
if sendcan is not None:
self.sendcan = sendcan
self.CC = CarController(self.cp.dbc_name, CP.carFingerprint, CP.enableCamera, CP.enableDsu, CP.enableApgs)
@staticmethod
def compute_gb(accel, speed):
return float(accel) / 3.0
@staticmethod
def calc_accel_override(a_ego, a_target, v_ego, v_target):
return 1.0
@staticmethod
def get_params(candidate, fingerprint):
# kg of standard extra cargo to count for drive, gas, etc...
std_cargo = 136
ret = car.CarParams.new_message()
ret.carName = "toyota"
ret.carFingerprint = candidate
ret.safetyModel = car.CarParams.SafetyModels.toyota
# pedal
ret.enableCruise = not ret.enableGasInterceptor
# FIXME: hardcoding honda civic 2016 touring params so they can be used to
# scale unknown params for other cars
mass_civic = 2923 * CV.LB_TO_KG + std_cargo
wheelbase_civic = 2.70
centerToFront_civic = wheelbase_civic * 0.4
centerToRear_civic = wheelbase_civic - centerToFront_civic
rotationalInertia_civic = 2500
tireStiffnessFront_civic = 192150
tireStiffnessRear_civic = 202500
ret.steerActuatorDelay = 0.12 # Default delay, Prius has larger delay
if candidate != CAR.PRIUS:
ret.lateralTuning.init('pid')
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0.], [0.]]
if candidate == CAR.PRIUS:
stop_and_go = True
ret.safetyParam = 66 # see conversion factor for STEER_TORQUE_EPS in dbc file
ret.wheelbase = 2.70
ret.steerRatio = 16.00 # unknown end-to-end spec
tire_stiffness_factor = 1.0 # hand-tune
ret.mass = 3375 * CV.LB_TO_KG + std_cargo
ret.lateralTuning.init('indi')
ret.lateralTuning.indi.innerLoopGain = 4.75
ret.lateralTuning.indi.outerLoopGain = 2.0
ret.lateralTuning.indi.timeConstant = 3.0
ret.lateralTuning.indi.actuatorEffectiveness = 1.5
ret.steerActuatorDelay = 0.5
ret.steerRateCost = 0.5
elif candidate in [CAR.RAV4, CAR.RAV4H]:
stop_and_go = True if (candidate in CAR.RAV4H) else False
ret.safetyParam = 73 # see conversion factor for STEER_TORQUE_EPS in dbc file
ret.wheelbase = 2.65
ret.steerRatio = 16.30 # 14.5 is spec end-to-end
tire_stiffness_factor = 0.5533
ret.mass = 3650 * CV.LB_TO_KG + std_cargo # mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.05]]
ret.lateralTuning.pid.kf = 0.00006 # full torque for 10 deg at 80mph means 0.00007818594
elif candidate == CAR.COROLLA:
stop_and_go = False
ret.safetyParam = 100 # see conversion factor for STEER_TORQUE_EPS in dbc file
ret.wheelbase = 2.70
ret.steerRatio = 17.8
tire_stiffness_factor = 0.444
ret.mass = 2860 * CV.LB_TO_KG + std_cargo # mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.2], [0.05]]
ret.lateralTuning.pid.kf = 0.00003 # full torque for 20 deg at 80mph means 0.00007818594
elif candidate == CAR.LEXUS_RXH:
stop_and_go = True
ret.safetyParam = 100 # see conversion factor for STEER_TORQUE_EPS in dbc file
ret.wheelbase = 2.79
ret.steerRatio = 16. # 14.8 is spec end-to-end
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 4481 * CV.LB_TO_KG + std_cargo # mean between min and max
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00006 # full torque for 10 deg at 80mph means 0.00007818594
elif candidate in [CAR.CHR, CAR.CHRH]:
stop_and_go = True
ret.safetyParam = 100
ret.wheelbase = 2.63906
ret.steerRatio = 13.6
tire_stiffness_factor = 0.7933
ret.mass = 3300. * CV.LB_TO_KG + std_cargo
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.723], [0.0428]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate in [CAR.CAMRY, CAR.CAMRYH]:
stop_and_go = True
ret.safetyParam = 100
ret.wheelbase = 2.82448
ret.steerRatio = 13.7
tire_stiffness_factor = 0.7933
ret.mass = 3400 * CV.LB_TO_KG + std_cargo #mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate in [CAR.HIGHLANDER, CAR.HIGHLANDERH]:
stop_and_go = True
ret.safetyParam = 100
ret.wheelbase = 2.78
ret.steerRatio = 16.0
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 4607 * CV.LB_TO_KG + std_cargo #mean between normal and hybrid limited
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.05]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.AVALON:
stop_and_go = False
ret.safetyParam = 73 # see conversion factor for STEER_TORQUE_EPS in dbc file
ret.wheelbase = 2.82
ret.steerRatio = 14.8 #Found at https://pressroom.toyota.com/releases/2016+avalon+product+specs.download
tire_stiffness_factor = 0.7983
ret.mass = 3505 * CV.LB_TO_KG + std_cargo # mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.17], [0.03]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.RAV4_2019:
stop_and_go = True
ret.safetyParam = 100
ret.wheelbase = 2.68986
ret.steerRatio = 14.3
tire_stiffness_factor = 0.7933
ret.mass = 3370. * CV.LB_TO_KG + std_cargo
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.05]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.COROLLA_HATCH:
stop_and_go = True
ret.safetyParam = 100
ret.wheelbase = 2.63906
ret.steerRatio = 13.9
tire_stiffness_factor = 0.444
ret.mass = 3060. * CV.LB_TO_KG + std_cargo
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.05]]
ret.lateralTuning.pid.kf = 0.00007818594
ret.steerRateCost = 1.
ret.centerToFront = ret.wheelbase * 0.44
#detect the Pedal address
ret.enableGasInterceptor = 0x201 in fingerprint
# min speed to enable ACC. if car can do stop and go, then set enabling speed
# to a negative value, so it won't matter.
ret.minEnableSpeed = -1. if (stop_and_go or ret.enableGasInterceptor) else 19. * CV.MPH_TO_MS
centerToRear = ret.wheelbase - ret.centerToFront
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = rotationalInertia_civic * \
ret.mass * ret.wheelbase**2 / (mass_civic * wheelbase_civic**2)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront = (tireStiffnessFront_civic * tire_stiffness_factor) * \
ret.mass / mass_civic * \
(centerToRear / ret.wheelbase) / (centerToRear_civic / wheelbase_civic)
ret.tireStiffnessRear = (tireStiffnessRear_civic * tire_stiffness_factor) * \
ret.mass / mass_civic * \
(ret.centerToFront / ret.wheelbase) / (centerToFront_civic / wheelbase_civic)
# no rear steering, at least on the listed cars above
ret.steerRatioRear = 0.
ret.steerControlType = car.CarParams.SteerControlType.torque
# steer, gas, brake limitations VS speed
ret.steerMaxBP = [16. * CV.KPH_TO_MS, 45. * CV.KPH_TO_MS] # breakpoints at 1 and 40 kph
ret.steerMaxV = [1., 1.] # 2/3rd torque allowed above 45 kph
ret.brakeMaxBP = [5., 20.]
ret.brakeMaxV = [1., 0.8]
ret.enableCamera = not check_ecu_msgs(fingerprint, ECU.CAM)
ret.enableDsu = not check_ecu_msgs(fingerprint, ECU.DSU)
ret.enableApgs = False #not check_ecu_msgs(fingerprint, ECU.APGS)
ret.openpilotLongitudinalControl = ret.enableCamera and ret.enableDsu
cloudlog.warn("ECU Camera Simulated: %r", ret.enableCamera)
cloudlog.warn("ECU DSU Simulated: %r", ret.enableDsu)
cloudlog.warn("ECU APGS Simulated: %r", ret.enableApgs)
cloudlog.warn("ECU Gas Interceptor: %r", ret.enableGasInterceptor)
ret.steerLimitAlert = False
ret.longitudinalTuning.deadzoneBP = [0., 9.]
ret.longitudinalTuning.deadzoneV = [0., .15]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.stoppingControl = False
ret.startAccel = 0.0
if ret.enableGasInterceptor:
ret.gasMaxBP = [0., 9., 35]
ret.gasMaxV = [0.2, 0.5, 0.7]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiV = [0.18, 0.12]
else:
ret.gasMaxBP = [0.]
ret.gasMaxV = [0.5]
ret.longitudinalTuning.kpV = [3.6, 2.4, 1.5]
ret.longitudinalTuning.kiV = [0.54, 0.36]
return ret
# returns a car.CarState
def update(self, c):
# ******************* do can recv *******************
canMonoTimes = []
self.cp.update(int(sec_since_boot() * 1e9), False)
# run the cam can update for 10s as we just need to know if the camera is alive
if self.frame < 1000:
self.cp_cam.update(int(sec_since_boot() * 1e9), False)
self.CS.update(self.cp, self.cp_cam)
# create message
ret = car.CarState.new_message()
# speeds
ret.vEgo = self.CS.v_ego
ret.vEgoRaw = self.CS.v_ego_raw
ret.aEgo = self.CS.a_ego
ret.yawRate = self.VM.yaw_rate(self.CS.angle_steers * CV.DEG_TO_RAD, self.CS.v_ego)
ret.standstill = self.CS.standstill
ret.wheelSpeeds.fl = self.CS.v_wheel_fl
ret.wheelSpeeds.fr = self.CS.v_wheel_fr
ret.wheelSpeeds.rl = self.CS.v_wheel_rl
ret.wheelSpeeds.rr = self.CS.v_wheel_rr
# gear shifter
ret.gearShifter = self.CS.gear_shifter
# gas pedal
ret.gas = self.CS.car_gas
if self.CP.enableGasInterceptor:
# use interceptor values to disengage on pedal press
ret.gasPressed = self.CS.pedal_gas > 15
else:
ret.gasPressed = self.CS.pedal_gas > 0
# brake pedal
ret.brake = self.CS.user_brake
ret.brakePressed = self.CS.brake_pressed != 0
ret.brakeLights = self.CS.brake_lights
# steering wheel
ret.steeringAngle = self.CS.angle_steers
ret.steeringRate = self.CS.angle_steers_rate
ret.steeringTorque = self.CS.steer_torque_driver
ret.steeringPressed = self.CS.steer_override
# cruise state
ret.cruiseState.enabled = self.CS.pcm_acc_active
ret.cruiseState.speed = self.CS.v_cruise_pcm * CV.KPH_TO_MS
ret.cruiseState.available = bool(self.CS.main_on)
ret.cruiseState.speedOffset = 0.
if self.CP.carFingerprint in NO_STOP_TIMER_CAR or self.CP.enableGasInterceptor:
# ignore standstill in hybrid vehicles, since pcm allows to restart without
# receiving any special command
# also if interceptor is detected
ret.cruiseState.standstill = False
else:
ret.cruiseState.standstill = self.CS.pcm_acc_status == 7
buttonEvents = []
if self.CS.left_blinker_on != self.CS.prev_left_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'leftBlinker'
be.pressed = self.CS.left_blinker_on != 0
buttonEvents.append(be)
if self.CS.right_blinker_on != self.CS.prev_right_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'rightBlinker'
be.pressed = self.CS.right_blinker_on != 0
buttonEvents.append(be)
ret.buttonEvents = buttonEvents
ret.leftBlinker = bool(self.CS.left_blinker_on)
ret.rightBlinker = bool(self.CS.right_blinker_on)
ret.doorOpen = not self.CS.door_all_closed
ret.seatbeltUnlatched = not self.CS.seatbelt
ret.genericToggle = self.CS.generic_toggle
# events
events = []
if not self.CS.can_valid:
self.can_invalid_count += 1
if self.can_invalid_count >= 5:
events.append(create_event('commIssue', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
else:
self.can_invalid_count = 0
if self.CS.cam_can_valid:
self.cam_can_valid_count += 1
if self.cam_can_valid_count >= 5:
self.forwarding_camera = True
if not ret.gearShifter == 'drive' and self.CP.enableDsu:
events.append(create_event('wrongGear', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.doorOpen:
events.append(create_event('doorOpen', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.seatbeltUnlatched:
events.append(create_event('seatbeltNotLatched', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if self.CS.esp_disabled and self.CP.enableDsu:
events.append(create_event('espDisabled', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if not self.CS.main_on and self.CP.enableDsu:
events.append(create_event('wrongCarMode', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gearShifter == 'reverse' and self.CP.enableDsu:
events.append(create_event('reverseGear', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if self.CS.steer_error:
events.append(create_event('steerTempUnavailable', [ET.NO_ENTRY, ET.WARNING]))
if self.CS.low_speed_lockout and self.CP.enableDsu:
events.append(create_event('lowSpeedLockout', [ET.NO_ENTRY, ET.PERMANENT]))
if ret.vEgo < self.CP.minEnableSpeed and self.CP.enableDsu:
events.append(create_event('speedTooLow', [ET.NO_ENTRY]))
if c.actuators.gas > 0.1:
# some margin on the actuator to not false trigger cancellation while stopping
events.append(create_event('speedTooLow', [ET.IMMEDIATE_DISABLE]))
if ret.vEgo < 0.001:
# while in standstill, send a user alert
events.append(create_event('manualRestart', [ET.WARNING]))
# enable request in prius is simple, as we activate when Toyota is active (rising edge)
if ret.cruiseState.enabled and not self.cruise_enabled_prev:
events.append(create_event('pcmEnable', [ET.ENABLE]))
elif not ret.cruiseState.enabled:
events.append(create_event('pcmDisable', [ET.USER_DISABLE]))
# disable on pedals rising edge or when brake is pressed and speed isn't zero
if (ret.gasPressed and not self.gas_pressed_prev) or \
(ret.brakePressed and (not self.brake_pressed_prev or ret.vEgo > 0.001)):
events.append(create_event('pedalPressed', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gasPressed:
events.append(create_event('pedalPressed', [ET.PRE_ENABLE]))
ret.events = events
ret.canMonoTimes = canMonoTimes
self.gas_pressed_prev = ret.gasPressed
self.brake_pressed_prev = ret.brakePressed
self.cruise_enabled_prev = ret.cruiseState.enabled
return ret.as_reader()
# pass in a car.CarControl
# to be called @ 100hz
def apply(self, c):
self.CC.update(self.sendcan, c.enabled, self.CS, self.frame,
c.actuators, c.cruiseControl.cancel, c.hudControl.visualAlert,
c.hudControl.audibleAlert, self.forwarding_camera,
c.hudControl.leftLaneVisible, c.hudControl.rightLaneVisible, c.hudControl.leadVisible,
c.hudControl.leftLaneDepart, c.hudControl.rightLaneDepart)
self.frame += 1
return False
|
the-stack_0_10824 | import swift # instantiate 3D browser-based visualizer
import roboticstoolbox as rtb
from spatialmath import SE3
import numpy as np
env = swift.Swift()
env.launch(realtime=True) # activate it
robot = rtb.models.Panda()
robot.q = robot.qr
T = SE3(0.5, 0.2, 0.1) * SE3.OA([0, 1, 0], [0, 0, -1])
sol = robot.ikine_LM(T) # solve IK
q_pickup = sol.q
qt = rtb.jtraj(robot.qr, q_pickup, 50)
env.add(robot) # add robot to the 3D scene
for qk in qt.q: # for each joint configuration on trajectory
robot.q = qk # update the robot state
# robot.q = robot.qr
env.step(0.05) # update visualization
env.hold()
|
the-stack_0_10825 | """
This file contains the hyperparameter values used for training and
testing RL agents.
"""
import os
BASE_DIR = './results/'
ENV_ID = 'gym_anm:ANM6Easy-v0'
GAMMA = 0.995
POLICY = 'MlpPolicy'
TRAIN_STEPS = 3000000
MAX_TRAINING_EP_LENGTH = 5000
EVAL_FREQ = 10000
N_EVAL_EPISODES = 5
MAX_EVAL_EP_LENGTH = 3000
LOG_DIR = BASE_DIR + ENV_ID + '/'
os.makedirs(LOG_DIR, exist_ok=True)
# Create a new directory for this run.
i = 0
while os.path.isdir(LOG_DIR + f'run_{i}/'):
i += 1
LOG_DIR += f'run_{i}/'
os.makedirs(LOG_DIR, exist_ok=True)
TENSORBOARD_LOG = LOG_DIR + 'tensorboard/'
os.makedirs(TENSORBOARD_LOG, exist_ok=True)
TB_LOG_NAME = 'run'
if __name__ == '__main__':
print('Done.')
|
the-stack_0_10826 | '''
Generalizes hmm_discrete_lib so it can handle any kind of observation distribution (eg Gaussian, Poisson, GMM, product of
Bernoullis). It is based on https://github.com/probml/pyprobml/blob/master/scripts/hmm_lib.py
and operates within the log space.
Author : Aleyna Kara(@karalleyna)
'''
from jax.random import split
import jax.numpy as jnp
from jax import jit, lax, vmap
from jax.nn import logsumexp, log_softmax, one_hot
from functools import partial
import superimport
import flax
import distrax
'''
Hidden Markov Model class in which trans_dist and init_dist are categorical-like
distribution from distrax, and obs_dist is any instance of distrax.Distribution.
The functions of optimizers expect that the type of its parameters
is pytree. So, they cannot work on a vanilla dataclass. To see more:
https://github.com/google/jax/issues/2371
Since the flax.dataclass is registered pytree beforehand, it facilitates to use
jit, vmap and optimizers on the hidden markov model.
'''
@flax.struct.dataclass
class HMM:
trans_dist: distrax.Distribution
obs_dist: distrax.Distribution
init_dist: distrax.Distribution
def logdotexp(u, v, axis=-1):
'''
Calculates jnp.log(jnp.exp(u) * jnp.exp(v)) in a stable way.
Parameters
----------
u : array
v : array
axis : int
Returns
-------
* array
Logarithm of the Hadamard product of u and v
'''
max_u = jnp.max(u, axis=axis, keepdims=True)
max_v = jnp.max(v, axis=axis, keepdims=True)
diff_u = jnp.nan_to_num(u - max_u, -jnp.inf)
diff_v = jnp.nan_to_num(v - max_v, -jnp.inf)
u_dot_v = jnp.log(jnp.exp(diff_u) * jnp.exp(diff_v))
u_dot_v = u_dot_v + max_u + max_v
return u_dot_v
def log_normalize(u, axis=-1):
'''
Normalizes the values within the axis in a way that the exponential of each values within the axis
sums up to 1.
Parameters
----------
u : array
axis : int
Returns
-------
* array
The Log of normalized version of the given matrix
* array(seq_len, n_hidden) :
The values of the normalizer
'''
c = logsumexp(u, axis=axis)
return jnp.where(u == -jnp.inf, -jnp.inf, u - c), c
@partial(jit, static_argnums=(1,))
def hmm_sample_log(params, seq_len, rng_key):
'''
Samples an observation of given length according to the defined
hidden markov model and gives the sequence of the hidden states
as well as the observation.
Parameters
----------
params : HMM
Hidden Markov Model
seq_len: array(seq_len)
The length of the observation sequence
rng_key : array
Random key of shape (2,) and dtype uint32
Returns
-------
* array(seq_len,)
Hidden state sequence
* array(seq_len,) :
Observation sequence
'''
trans_dist, obs_dist, init_dist = params.trans_dist, params.obs_dist, params.init_dist
rng_key, rng_init = split(rng_key)
initial_state = init_dist.sample(seed=rng_init)
def draw_state(prev_state, key):
state = trans_dist.sample(seed=key)[prev_state]
return state, state
rng_key, rng_state, rng_obs = split(rng_key, 3)
keys = split(rng_state, seq_len - 1)
final_state, states = lax.scan(draw_state, initial_state, keys)
states = jnp.append(initial_state, states)
def draw_obs(z, key):
return obs_dist.sample(seed=key)[z]
keys = split(rng_obs, seq_len)
obs_seq = vmap(draw_obs, in_axes=(0, 0))(states, keys)
return states, obs_seq
@jit
def hmm_forwards_log(params, obs_seq, length=None):
'''
Calculates a belief state
Parameters
----------
params : HMM
Hidden Markov Model
obs_seq: array(seq_len)
History of observable events
Returns
-------
* float
The loglikelihood giving log(p(x|model))
* array(seq_len, n_hidden) :
Log of alpha values
'''
seq_len = len(obs_seq)
if length is None:
length = seq_len
trans_dist, obs_dist, init_dist = params.trans_dist, params.obs_dist, params.init_dist
n_states = obs_dist.batch_shape[0]
def scan_fn(carry, t):
(alpha_prev, log_ll_prev) = carry
alpha_n = jnp.where(t < length,
obs_dist.log_prob(obs_seq[t]) + logsumexp(
logdotexp(alpha_prev[:, None], trans_dist.logits), axis=0),
-jnp.inf + jnp.zeros_like(alpha_prev))
alpha_n, cn = log_normalize(alpha_n)
carry = (alpha_n, cn + log_ll_prev)
return carry, alpha_n
# initial belief state
alpha_0, c0 = log_normalize(init_dist.logits + obs_dist.log_prob(obs_seq[0]))
# setup scan loop
init_state = (alpha_0, c0)
ts = jnp.arange(1, seq_len)
carry, alpha_hist = lax.scan(scan_fn, init_state, ts)
# post-process
alpha_hist = jnp.vstack([alpha_0.reshape(1, n_states), alpha_hist])
(alpha_final, log_ll) = carry
return log_ll, alpha_hist
@jit
def hmm_backwards_log(params, obs_seq, length=None):
'''
Computes the backwards probabilities
Parameters
----------
params : HMM
Hidden Markov Model
obs_seq: array(seq_len,)
History of observable events
length : array(seq_len,)
The valid length of the observation sequence
Returns
-------
* array(seq_len, n_states)
Log of beta values
'''
seq_len = len(obs_seq)
if length is None:
length = seq_len
trans_dist, obs_dist, init_dist = params.trans_dist, params.obs_dist, params.init_dist
n_states = trans_dist.batch_shape[0]
beta_t = jnp.zeros((n_states,))
def scan_fn(beta_prev, t):
beta_t = jnp.where(t > length,
-jnp.inf + jnp.zeros_like(beta_prev),
log_normalize(logsumexp(beta_prev + obs_dist.log_prob(obs_seq[-t + 1]) + trans_dist.logits,
axis=1))[0])
return beta_t, beta_t
ts = jnp.arange(2, seq_len + 1)
_, beta_hist = lax.scan(scan_fn, beta_t, ts)
beta_hist = jnp.flip(jnp.vstack([beta_t.reshape(1, n_states), beta_hist]), axis=0)
return beta_hist
@jit
def hmm_forwards_backwards_log(params, obs_seq, length=None):
'''
Computes, for each time step, the marginal conditional probability that the Hidden Markov Model was
in each possible state given the observations that were made at each time step, i.e.
P(z[i] | x[0], ..., x[num_steps - 1]) for all i from 0 to num_steps - 1
Parameters
----------
params : HMM
Hidden Markov Model
obs_seq: array(seq_len)
History of observed states
Returns
-------
* array(seq_len, n_states)
The log of alpha values
* array(seq_len, n_states)
The log of beta values
* array(seq_len, n_states)
The log of marginal conditional probability
* float
The loglikelihood giving log(p(x|model))
'''
seq_len = len(obs_seq)
if length is None:
length = seq_len
def gamma_t(t):
gamma_t = jnp.where(t < length,
alpha[t] + beta[t - length],
jnp.zeros((n_states,)))
return gamma_t
ll, alpha = hmm_forwards_log(params, obs_seq, length)
n_states = alpha.shape[1]
beta = hmm_backwards_log(params, obs_seq, length)
ts = jnp.arange(seq_len)
gamma = vmap(gamma_t, (0))(ts)
# gamma = alpha * jnp.roll(beta, -seq_len + length, axis=0) #: Alternative
gamma = vmap(lambda x: log_normalize(x, axis=0)[0])(gamma)
return alpha, beta, gamma, ll
@jit
def hmm_viterbi_log(params, obs_seq, length=None):
'''
Computes, for each time step, the marginal conditional probability that the Hidden Markov Model was
in each possible state given the observations that were made at each time step, i.e.
P(z[i] | x[0], ..., x[num_steps - 1]) for all i from 0 to num_steps - 1
It is based on https://github.com/deepmind/distrax/blob/master/distrax/_src/utils/hmm.py
Parameters
----------
params : HMM
Hidden Markov Model
obs_seq: array(seq_len)
History of observed states
Returns
-------
* array(seq_len, n_states)
Alpha values
* array(seq_len, n_states)
Beta values
* array(seq_len, n_states)
Marginal conditional probability
* float
The loglikelihood giving log(p(x|model))
'''
seq_len = len(obs_seq)
if length is None:
length = seq_len
trans_dist, obs_dist, init_dist = params.trans_dist, params.obs_dist, params.init_dist
trans_log_probs = log_softmax(trans_dist.logits)
init_log_probs = log_softmax(init_dist.logits)
n_states = obs_dist.batch_shape[0]
first_log_prob = init_log_probs + obs_dist.log_prob(obs_seq[0])
if seq_len == 1:
return jnp.expand_dims(jnp.argmax(first_log_prob), axis=0)
def viterbi_forward(prev_logp, t):
obs_logp = obs_dist.log_prob(obs_seq[t])
logp = jnp.where(t <= length,
prev_logp[..., None] + trans_log_probs + obs_logp[..., None, :],
-jnp.inf + jnp.zeros_like(trans_log_probs))
max_logp_given_successor = jnp.where(t <= length, jnp.max(logp, axis=-2), prev_logp)
most_likely_given_successor = jnp.where(t <= length, jnp.argmax(logp, axis=-2), -1)
return max_logp_given_successor, most_likely_given_successor
ts = jnp.arange(1, seq_len)
final_log_prob, most_likely_sources = lax.scan(viterbi_forward, first_log_prob, ts)
most_likely_initial_given_successor = jnp.argmax(
trans_log_probs + first_log_prob, axis=-2)
most_likely_sources = jnp.concatenate([
jnp.expand_dims(most_likely_initial_given_successor, axis=0),
most_likely_sources], axis=0)
def viterbi_backward(state, t):
state = jnp.where(t <= length,
jnp.sum(most_likely_sources[t] * one_hot(state, n_states)).astype(jnp.int64),
state)
most_likely = jnp.where(t <= length, state, -1)
return state, most_likely
final_state = jnp.argmax(final_log_prob)
_, most_likely_path = lax.scan(viterbi_backward, final_state, ts, reverse=True)
final_state = jnp.where(length == seq_len, final_state, -1)
return jnp.append(most_likely_path, final_state)
|
the-stack_0_10827 | '''
Collect results in Quantum ESPRESSO
'''
import sys
import numpy as np
from pymatgen.core import Structure
from . import structure as qe_structure
from ... import utility
from ...IO import pkl_data
from ...IO import read_input as rin
def collect_qe(current_id, work_path):
# ---------- check optimization in previous stage
try:
with open(work_path+rin.qe_outfile, 'r') as fpout:
lines = fpout.readlines()
check_opt = 'not_yet'
for line in lines:
if 'End final coordinates' in line:
check_opt = 'done'
except Exception as e:
print(e)
check_opt = 'no_file'
# ---------- obtain energy and magmom
try:
with open(work_path+rin.qe_outfile, 'r') as fpout:
lines = fpout.readlines()
energy = np.nan
for line in reversed(lines):
if line.startswith('!'):
energy = float(line.split()[-2]) # in Ry
energy = energy * utility.ry2ev / float(rin.natot) # Ry/cell --> eV/atom
break
magmom = np.nan # implemented by H. Sawahata 2020/10/04
for line in reversed(lines):
if line.find("total magnetization") >= 0:
muB = line.split()
magmom = float(muB[3])
break
except Exception as e:
energy = np.nan # error
magmom = np.nan # error
print(e)
print(' Structure ID {0}, could not obtain energy from {1}'.format(
current_id, rin.qe_outfile))
# ---------- collect the last structure
try:
lines_cell = qe_structure.extract_cell_parameters(
work_path+rin.qe_outfile)
if lines_cell is None:
lines_cell = qe_structure.extract_cell_parameters(
work_path+rin.qe_infile)
lines_atom = qe_structure.extract_atomic_positions(
work_path+rin.qe_outfile)
if lines_atom is None:
lines_atom = qe_structure.extract_atomic_positions(
work_path+rin.qe_infile)
opt_struc = qe_structure.from_lines(lines_cell, lines_atom)
# ------ opt_qe-structure
with open('./data/opt_qe-structure', 'a') as fstruc:
fstruc.write('# ID {0:d}\n'.format(current_id))
qe_structure.write(opt_struc, './data/opt_qe-structure', mode='a')
except Exception as e:
print(e)
opt_struc = None
# ---------- check
if np.isnan(energy):
opt_struc = None
if opt_struc is None:
energy = np.nan
magmom = np.nan
# ---------- return
return opt_struc, energy, magmom, check_opt
def get_energy_step_qe(energy_step_data, current_id, work_path):
'''
get energy step data in eV/atom
energy_step_data[ID][stage][step]
energy_step_data[ID][0] <-- stage 1
energy_step_data[ID][1] <-- stage 2
'''
try:
# ---------- read output file
with open(work_path+rin.qe_outfile, 'r') as f:
lines = f.readlines()
# ---------- get energy step
energy_step = []
final_flag = False # End final coordinates
vc_flag = False # vc-relax
for line in lines:
if line.startswith('!'):
energy_step.append(line.split()[4])
# ------ check opt and vc-relax
if 'End final coordinates' in line:
final_flag = True
if 'CELL_PARAMETERS' in line:
vc_flag = True
# ------ delete last energy (after End final coordinates)
if final_flag and vc_flag:
energy_step.pop(-1)
# ------ list --> array, Ry/cell --> eV/atom
if not energy_step:
energy_step = None # if empty
print('#### ID: {0}: failed to parse energy_step\n'.format(
current_id), file=sys.stderr)
else:
energy_step = utility.ry2ev / rin.natot * np.array(energy_step,
dtype='float')
except Exception as e:
energy_step = None
print(e, '#### ID: {0}: failed to parse energy_step\n'.format(
current_id), file=sys.stderr)
# ---------- append energy_step
if energy_step_data.get(current_id) is None:
energy_step_data[current_id] = [] # initialize
energy_step_data[current_id].append(energy_step)
# ---------- save energy_step_data
pkl_data.save_energy_step(energy_step_data)
# ---------- return
return energy_step_data
def get_struc_step_qe(struc_step_data, current_id, work_path):
'''
get structure step data
# ---------- args
struc_step_data: (dict) the key is structure ID
struc_step_data[ID][stage][step]
struc_step_data[ID][0] <-- stage 1
struc_step_data[ID][1] <-- stage 2
'''
try:
struc_step = []
# ------ init struc from pwscf.in
_extract_struc_qe(work_path+rin.qe_infile, struc_step)
# ------ struc step from pwscf.out
_extract_struc_qe(work_path+rin.qe_outfile, struc_step)
# ------ delete last structure due to duplication
struc_step.pop(-1)
except Exception as e:
struc_step = None
print(e ,'#### ID: {0}: failed to parse in struc_step\n'.format(
current_id), file=sys.stderr)
# ---------- append struc_step_data
if struc_step_data.get(current_id) is None:
struc_step_data[current_id] = [] # initialize
struc_step_data[current_id].append(struc_step)
# ---------- save struc_step_data
pkl_data.save_struc_step(struc_step_data)
# ---------- return
return struc_step_data
def _extract_struc_qe(filename, struc_step):
# ---------- read a file
with open(filename, 'r') as f:
lines = f.readlines()
# ---------- extract struc
read_cell = False
read_coords = False
vc_flag = False # in case of vc-relax
for line in lines:
# ------ cell part
if read_cell:
lattice.append(line.split())
if len(lattice) == 3:
read_cell = False
lattice = np.array(lattice, dtype='float')
if 'CELL_PARAMETERS' in line:
read_cell = True
vc_flag = True
lattice = []
# ------ coords part
if read_coords:
lsplit = line.split()
species.append(lsplit[0])
coords.append(lsplit[1:])
if len(coords) == rin.natot:
read_coords = False
coords = np.array(coords, dtype='float')
# ---- gen struc
if not vc_flag: # empty lattice, use init lattice
lattice = struc_step[0].lattice
struc = Structure(lattice, species, coords)
struc_step.append(struc)
if 'ATOMIC_POSITIONS' in line:
read_coords = True
species = []
coords = []
def get_force_step_qe(force_step_data, current_id, work_path):
'''
get force step data in eV/angstrom
# ---------- args
force_step_data: (dict) the key is structure ID
force_step_data[ID][stage][step]
force_step_data[ID][0] <-- stage 1
force_step_data[ID][1] <-- stage 2
'''
try:
# ---------- read output file
with open(work_path+rin.qe_outfile, 'r') as f:
lines = f.readlines()
# ---------- get force step
force_step = []
read_force = False
final_flag = False # End final coordinates
vc_flag = False # in case of vc-relax
for line in lines:
if 'atom 1 type 1 force' in line:
read_force = True
force = []
if read_force:
force.append(line.split()[6:])
if len(force) == rin.natot:
read_force = False
force_step.append(utility.ry2ev / utility.bohr2ang * np.array(
force, dtype='float'))
# ------ check opt and vc-relax
if 'End final coordinates' in line:
final_flag = True
if 'CELL_PARAMETERS' in line:
vc_flag = True
# ------ delete last energy (after End final coordinates)
if final_flag and vc_flag:
force_step.pop(-1)
# ------ if empty
if len(force_step) == 0:
force_step = None
print('#### ID: {0}: failed to parse force_step\n'.format(
current_id), file=sys.stderr)
except Exception as e:
force_step = None
print(e, '#### ID: {0}: failed to parse in force_step\n'.format(
current_id), file=sys.stderr)
# ---------- append force_step
if force_step_data.get(current_id) is None:
force_step_data[current_id] = [] # initialize
force_step_data[current_id].append(force_step)
# ---------- save force_step_data
pkl_data.save_force_step(force_step_data)
# ---------- return
return force_step_data
def get_stress_step_qe(stress_step_data, current_id, work_path):
'''
get stress step data in eV/ang**3
# ---------- args
stress_step_data: (dict) the key is structure ID
stress_step_data[ID][stage][step]
stress_step_data[ID][0] <-- stage 1
stress_step_data[ID][1] <-- stage 2
'''
try:
# ---------- read output file
with open(work_path+rin.qe_outfile, 'r') as f:
lines = f.readlines()
# ---------- get stress step
stress_step = []
read_stress = False
final_flag = False # End final coordinates
vc_flag = False # in case of vc-relax
for line in lines:
if read_stress:
stress.append(line.split()[3:])
if len(stress) == 3:
read_stress = False
stress_step.append(utility.kbar2ev_ang3 * np.array(
stress, dtype='float'))
if 'total stress (Ry/bohr**3)' in line:
read_stress = True
stress = []
# ------ check opt and vc-relax
if 'End final coordinates' in line:
final_flag = True
if 'CELL_PARAMETERS' in line:
vc_flag = True
# ------ delete last energy (after End final coordinates)
if final_flag and vc_flag:
stress_step.pop(-1)
# ------ if empty
if len(stress_step) == 0:
stress_step = None
print('#### ID: {0}: failed to parse stress_step\n'.format(
current_id), file=sys.stderr)
except Exception as e:
stress_step = None
print(e, '#### ID: {0}: failed to parse in stress_step\n'.format(
current_id), file=sys.stderr)
# ---------- append stress_step
if stress_step_data.get(current_id) is None:
stress_step_data[current_id] = [] # initialize
stress_step_data[current_id].append(stress_step)
# ---------- save stress_step_data
pkl_data.save_stress_step(stress_step_data)
# ---------- return
return stress_step_data
|
the-stack_0_10830 | '''
@Author: [email protected]
@Date: 2020-03-01 18:33:41
@LastEditors: [email protected]
@LastEditTime: 2020-03-10 19:51:30
@Description: 代理校验器
'''
import os
import requests
import asyncio
import time
import json
import ssl
from GeeProxy.utils.logger import proxy_validator
from aiohttp import ClientSession, ClientTimeout, ClientError, ClientSSLError
from aiohttp.client_exceptions import ClientHttpProxyError
from aiohttp_proxy import ProxyConnector
from GeeProxy.settings import VAILDATORS_TIMEOUT,\
VAILDATORS_RETRY, PROXY_REQUEST_DELAY, PUBLIC_IP,\
ANONYMOUS_CHECK_API
from GeeProxy.utils.user_agent import UserAgent
class AiohttpSingleton(ClientSession):
'''
This is a redis singleton connect client class
'''
def __new__(cls, *args, **keywords):
pid = os.getpid()
if not hasattr(cls, '_instance') or pid != cls._pid:
print("Aiohttp PID is {} and father PID is {}".format(
os.getpid(), os.getppid()))
if hasattr(cls, "_pid"):
print("Aiohttp Instance PID is {} and PID is {}".format(
cls._pid, pid))
cls._instance = ClientSession(*args, **keywords)
cls._pid = os.getpid()
return cls._instance
@property
def connector(self, connector):
proxy_connector = connector
self._instance._connector = proxy_connector
class ValidateResult:
"""
校验结果
"""
def __init__(self, proxy=None, web_key=None, delay=-1, dst=None, useful=1):
"""
:param proxy: 代理地址
:param web_key: 缓存key
:param delay: 延迟
:param dst: 目标站点
:param useful: 是否可用
"""
# 代理地址
self.proxy = proxy
# 缓存key
self.web_key = web_key
# 延迟
self.delay = delay
# 目标站点
self.dst = dst
# 是否为可匿代理
# self.anonymous = anonymous
# 是否可用
self.available = useful
class ProxyValidator:
"""
异步代理校验器,校验过程如下,通过代理请求目标站点,若超时,
则重试,当重试次数大于给定的阈值时,请求仍失败就认为这个代理不可用,
期间会计算请求过程的延迟。
"""
def __init__(self):
self._retry = 0
self._timeout = ClientTimeout(total=VAILDATORS_TIMEOUT)
self._ua = UserAgent()
self._result = {}
async def check_proxy(self, proxy: str, dst: str, web_key: str) -> ValidateResult:
"""
校验代理的可用性
:param proxy: 待校验的代理
:param dst: 目标站点地址
:param web_key: 目标站点
:return: 校验结果
"""
result = ValidateResult(proxy=proxy, delay=-1, web_key=web_key, dst=dst, useful=1)
time_start = time.time()
try:
# 启用代理
connector = ProxyConnector(verify_ssl=False).from_url(proxy)
requests.urllib3.disable_warnings()
# 异步http请求
async with ClientSession(connector=connector,
timeout=self._timeout) as session:
params = {
"url": dst,
"verify_ssl": False,
"timeout": self._timeout,
"headers": {
"User-Agent": self._ua.random()
}
}
# verify_ssl = False
if "https" in proxy.split(":"):
params["verify_ssl"] = False
# 异步http请求
async with session.get(**params) as response:
proxy_validator.info(
"wait proxy {} for {} response".format(proxy, dst))
await response.text()
await session.close()
time_end = time.time()
delay = time_end - time_start
proxy_validator.info(
"check proxy {} for {} success cost {} s".format(
proxy, dst, delay))
result.delay = delay
result.available = 1
# 请求超时就认为代理不可用
if delay > PROXY_REQUEST_DELAY:
result.available = 0
return result
except (BaseException, asyncio.TimeoutError, ClientError,
ClientHttpProxyError, ClientSSLError) as e:
err_msg = e
if isinstance(e, asyncio.TimeoutError) or isinstance(
e, ClientHttpProxyError):
err_msg = "Http request timeout"
if not isinstance(e, ClientSSLError) or not isinstance(
e, ssl.SSLError):
result.available = 0
# 重试
if self._retry <= VAILDATORS_RETRY:
# 重试次数小于阈值就再试一次
self._retry = self._retry + 1
result = await self.check_proxy(proxy, dst, web_key)
return result
time_end = time.time()
proxy_validator.error("check proxy {} {} times fail for {} "
"and cost {} s".format(proxy, self._retry, dst, time_end - time_start))
proxy_validator.error("check proxy {} for {} "
"error:{} type {}".format(proxy, dst, err_msg, type(e)))
self._retry = 0
result.delay = time_end - time_start
return result
@staticmethod
async def check_anonymous(proxy: str) -> bool:
"""
检测代理的匿名程度
:param proxy: 待校验的代理
:return: 校验结果,如果是高匿代理就返回True
"""
anonymous = True
try:
connector = ProxyConnector.from_url(proxy)
requests.urllib3.disable_warnings()
ua = UserAgent()
async with ClientSession(connector=connector, timeout=5) as session:
# 异步http请求
async with session.get(ANONYMOUS_CHECK_API,
ssl=False,
headers={"User-Agent": ua.random()},
timeout=5) as response:
res = await response.text()
res = json.loads(res)
anonymous = ProxyValidator.is_anonymous(res)
if anonymous:
proxy_validator.info("The proxy {} is anonymous".format(proxy))
await session.close()
return anonymous
except Exception as e:
proxy_validator.error("Checking proxy {} anonymous "
"has an error:{} type {}".format(proxy, str(e), type(e)))
raise ClientError("check anonymous")
@staticmethod
def is_anonymous(response: dict) -> bool:
"""
通过接口判断当前代理的可匿程度
:param response: 请检测api的响应
:return: 校验结果,如果是高匿代理就返回True
"""
origin = response["origin"]
proxy_connection = response.get("Proxy-Connection", "")
proxy_validator.info(
"Checking anonymous proxy response is {}".format(response))
if origin != PUBLIC_IP and not proxy_connection:
return True
return False
|
the-stack_0_10832 | from functools import partial
from pubsub import pub
from threading import Thread
from time import sleep
import wx
from wx.lib.agw.floatspin import FloatSpin
from spacq.gui.tool.box import load_csv, save_csv, Dialog, MessageDialog
from spacq.interface.units import Quantity
"""
Configuration for a ch4VoltageSource.
"""
class ch4VoltageSourceTunerDialog(Dialog):
"""
A dialog for tuning a voltage source port.
"""
def __init__(self, parent, global_store, ok_callback, port, *args, **kwargs):
Dialog.__init__(self, parent, title='Port {0} tuning'.format(port.num))
self.global_store = global_store
self.ok_callback = ok_callback
self.port = port
# Dialog.
dialog_box = wx.BoxSizer(wx.VERTICAL)
## Self-calibration.
calibration_static_box = wx.StaticBox(self, label='DAC self-calibration')
calibration_box = wx.StaticBoxSizer(calibration_static_box, wx.VERTICAL)
dialog_box.Add(calibration_box, flag=wx.EXPAND|wx.ALL, border=5)
self.calibrate_button = wx.Button(self, label='Self-calibrate')
self.Bind(wx.EVT_BUTTON, self.OnCalibrate, self.calibrate_button)
calibration_box.Add(self.calibrate_button, flag=wx.EXPAND)
## Tuning.
tuning_static_box = wx.StaticBox(self, label='Tuning')
tuning_box = wx.StaticBoxSizer(tuning_static_box, wx.VERTICAL)
dialog_box.Add(tuning_box, flag=wx.EXPAND)
### Autotune.
autotuning_static_box = wx.StaticBox(self, label='Autotuning')
autotuning_box = wx.StaticBoxSizer(autotuning_static_box, wx.VERTICAL)
tuning_box.Add(autotuning_box, flag=wx.EXPAND|wx.ALL, border=5)
autotuning_sizer = wx.FlexGridSizer(rows=3, cols=2, hgap=5)
autotuning_box.Add(autotuning_sizer, flag=wx.CENTER)
autotuning_sizer.Add(wx.StaticText(self, label='Resource name:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.resource_name_input = wx.TextCtrl(self, size=(300,-1))
autotuning_sizer.Add(self.resource_name_input)
autotuning_sizer.Add(wx.StaticText(self, label='Max:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.automax_input = FloatSpin(self, value=1, min_val=-10, max_val=10, increment=1,
digits=5)
autotuning_sizer.Add(self.automax_input)
autotuning_sizer.Add(wx.StaticText(self, label='Min:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.automin_input = FloatSpin(self, value=-1, min_val=-10, max_val=10, increment=1,
digits=5)
autotuning_sizer.Add(self.automin_input)
self.autotune_button = wx.Button(self, label='Autotune')
self.Bind(wx.EVT_BUTTON, self.OnAutotune, self.autotune_button)
autotuning_box.Add(self.autotune_button, flag=wx.EXPAND)
### Manual tune.
tuning_sizer = wx.FlexGridSizer(rows=2, cols=2, hgap=5)
tuning_box.Add(tuning_sizer, flag=wx.CENTER)
tuning_sizer.Add(wx.StaticText(self, label='Gain:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.gain_input = FloatSpin(self, value=0, min_val=-1e6, max_val=1e6, increment=1,
digits=5)
tuning_sizer.Add(self.gain_input)
tuning_sizer.Add(wx.StaticText(self, label='Offset:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.offset_input = FloatSpin(self, value=0, min_val=-1e6, max_val=1e6, increment=1,
digits=5)
tuning_sizer.Add(self.offset_input)
## End buttons.
button_box = wx.BoxSizer(wx.HORIZONTAL)
dialog_box.Add(button_box, flag=wx.CENTER|wx.ALL, border=5)
ok_button = wx.Button(self, wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.OnOk, ok_button)
button_box.Add(ok_button)
cancel_button = wx.Button(self, wx.ID_CANCEL)
button_box.Add(cancel_button)
self.SetSizerAndFit(dialog_box)
def autotune(self, resource):
gain, offset = self.port.autotune(resource, set_result=False,
min_value=self.automin_input.GetValue(),
max_value=self.automax_input.GetValue())
wx.CallAfter(self.gain_input.SetValue, gain)
wx.CallAfter(self.offset_input.SetValue, offset)
wx.CallAfter(self.autotune_button.Enable)
def self_calbrate(self):
self.port.apply_settings(calibrate=True)
sleep(self.port.calibration_delay)
wx.CallAfter(self.calibrate_button.Enable)
def SetValue(self, gain, offset):
self.gain_input.SetValue(gain)
self.offset_input.SetValue(offset)
def GetValue(self):
return (self.gain_input.GetValue(), self.offset_input.GetValue())
def OnAutotune(self, evt=None):
name = self.resource_name_input.Value
if not name:
MessageDialog(self, 'No resource provided').Show()
return
try:
resource = self.global_store.resources[name]
except KeyError:
MessageDialog(self, name, 'Missing resource').Show()
return
if not resource.readable:
MessageDialog(self, name, 'Unreadable resource').Show()
return
self.autotune_button.Disable()
thr = Thread(target=self.autotune, args=(resource,))
thr.daemon = True
thr.start()
def OnCalibrate(self, evt=None):
self.calibrate_button.Disable()
thr = Thread(target=self.self_calbrate)
thr.daemon = True
thr.start()
def OnOk(self, evt=None):
self.ok_callback(self)
self.Destroy()
class ch4VoltageSourceSettingsPanel(wx.Panel):
"""
All the settings for a voltage source.
"""
def __init__(self, parent, global_store, vsrc, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
self.global_store = global_store
self.vsrc = vsrc
self.port_value_inputs = []
self.port_buttons = []
# Panel.
panel_box = wx.BoxSizer(wx.VERTICAL)
## Ports.
ports_box = wx.FlexGridSizer(rows=3, cols=2)
panel_box.Add(ports_box)
for port in range(4):
port_static_box = wx.StaticBox(self, label='Port {0} '.format(port))
port_box = wx.StaticBoxSizer(port_static_box, wx.HORIZONTAL)
ports_box.Add(port_box, flag=wx.ALL, border=5)
spin = FloatSpin(self, value=0, min_val=-10, max_val=10, increment=1, digits=6)
self.port_value_inputs.append(spin)
port_box.Add(spin)
port_box.Add(wx.StaticText(self, label='V'))
set_button = wx.Button(self, label='Set', style=wx.BU_EXACTFIT)
set_button.Bind(wx.EVT_BUTTON, partial(self.OnSetVoltage, port))
port_box.Add(set_button)
tune_button = wx.Button(self, label='Tune...', style=wx.BU_EXACTFIT)
tune_button.Bind(wx.EVT_BUTTON, partial(self.OnTune, port))
port_box.Add(tune_button)
self.port_buttons.append((set_button, tune_button))
## All ports.
button_static_box = wx.StaticBox(self, label='All ports')
button_box = wx.StaticBoxSizer(button_static_box, wx.HORIZONTAL)
panel_box.Add(button_box, flag=wx.CENTER)
### Zero.
zero_all_button = wx.Button(self, label='Zero')
self.Bind(wx.EVT_BUTTON, self.OnZeroAll, zero_all_button)
button_box.Add(zero_all_button, flag=wx.CENTER)
### Self-calibrate.
self.calibrate_all_button = wx.Button(self, label='Self-calibrate')
self.Bind(wx.EVT_BUTTON, self.OnCalibrateAll, self.calibrate_all_button)
button_box.Add(self.calibrate_all_button, flag=wx.CENTER)
### Load tuning.
tuning_data_static_box = wx.StaticBox(self, label='Tuning data')
tuning_data_box = wx.StaticBoxSizer(tuning_data_static_box, wx.HORIZONTAL)
button_box.Add(tuning_data_box)
#### Save.
tuning_data_save_button = wx.Button(self, label='Save...')
self.Bind(wx.EVT_BUTTON, self.OnSave, tuning_data_save_button)
tuning_data_box.Add(tuning_data_save_button)
#### Load.
tuning_data_load_button = wx.Button(self, label='Load...')
self.Bind(wx.EVT_BUTTON, self.OnLoad, tuning_data_load_button)
tuning_data_box.Add(tuning_data_load_button)
self.SetSizer(panel_box)
def self_calbrate_all(self):
delay = 0 # s
for port in self.vsrc.ports:
# Use the largest delay.
if port.calibration_delay > delay:
delay = port.calibration_delay
port.apply_settings(calibrate=True)
sleep(delay)
wx.CallAfter(self.calibrate_all_button.Enable)
def zero_all(self):
for port in self.vsrc.ports:
port.voltage = Quantity(0.0, 'V')
def OnSetVoltage(self, port_num, evt=None):
try:
self.vsrc.ports[port_num].voltage = Quantity(self.port_value_inputs[port_num].GetValue(), 'V')
except ValueError as e:
MessageDialog(self, str(e), 'Invalid value').Show()
def OnTune(self, port_num, evt=None):
port = self.vsrc.ports[port_num]
def ok_callback(dlg):
port.gain, port.offset = dlg.GetValue()
dlg = ch4VoltageSourceTunerDialog(self, self.global_store, ok_callback, port)
dlg.SetValue(port.gain, port.offset)
dlg.Show()
def OnCalibrateAll(self, evt=None):
self.calibrate_all_button.Disable()
thr = Thread(target=self.self_calbrate_all)
thr.daemon = True
thr.start()
def OnZeroAll(self, evt=None):
thr = Thread(target=self.zero_all)
thr.daemon = True
thr.start()
def OnSave(self, evt=None):
values = [[port.gain, port.offset] for port in self.vsrc.ports]
try:
save_csv(self, values)
except IOError as e:
MessageDialog(self, str(e), 'Save error').Show()
return
def OnLoad(self, evt=None):
try:
result = load_csv(self)
if result is None:
return
has_header, values, _ = result
if has_header:
port_values = values[1:]
else:
port_values = values
if len(port_values) != len(self.vsrc.ports):
raise ValueError('Invalid number of ports.')
for i, port_value in enumerate(port_values):
if len(port_value) != 2:
raise ValueError('Invalid number of settings for port {0}.'.format(i))
try:
float(port_value[0])
float(port_value[1])
except TypeError:
raise ValueError('Not a number for port {0}.'.format(i))
except (IOError, ValueError) as e:
MessageDialog(self, str(e), 'Load error').Show()
return
for port, values in zip(self.vsrc.ports, port_values):
port.gain = float(values[0])
port.offset = float(values[1])
class ch4VoltageSourceSettingsDialog(Dialog):
"""
A wrapper for ch4VoltageSourceSettingsPanel.
"""
def __init__(self, parent, global_store, vsrc_name, *args, **kwargs):
# If the device doesn't exist, give up.
try:
vsrc = global_store.devices[vsrc_name].device
except (KeyError, AttributeError):
self.Destroy()
return
Dialog.__init__(self, parent, title='Four channel voltage source settings', *args, **kwargs)
self.vsrc_name = vsrc_name
# Dialog.
dialog_box = wx.BoxSizer(wx.VERTICAL)
## Settings panel.
self.panel = ch4VoltageSourceSettingsPanel(self, global_store, vsrc)
dialog_box.Add(self.panel)
self.SetSizerAndFit(dialog_box)
# Subscriptions.
pub.subscribe(self.msg_device, 'device.added')
pub.subscribe(self.msg_device, 'device.removed')
def msg_device(self, name, value=None):
if name == self.vsrc_name:
# Device has changed, so we can't trust it anymore.
self.Destroy()
return
|
the-stack_0_10833 | import copy
from decimal import Decimal
from django.apps.registry import Apps
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.backends.ddl_references import Statement
from django.db.transaction import atomic
from django.db.utils import NotSupportedError
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_delete_table = "DROP TABLE %(table)s"
sql_create_inline_fk = "REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED"
sql_create_unique = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)"
sql_delete_unique = "DROP INDEX %(name)s"
sql_foreign_key_constraint = None
def __enter__(self):
# Some SQLite schema alterations need foreign key constraints to be
# disabled. Enforce it here for the duration of the schema edition.
if not self.connection.disable_constraint_checking():
raise NotSupportedError(
'SQLite schema editor cannot be used while foreign key '
'constraint checks are enabled. Make sure to disable them '
'before entering a transaction.atomic() context because '
'SQLite does not support disabling them in the middle of '
'a multi-statement transaction.'
)
return super().__enter__()
def __exit__(self, exc_type, exc_value, traceback):
self.connection.check_constraints()
super().__exit__(exc_type, exc_value, traceback)
self.connection.enable_constraint_checking()
def quote_value(self, value):
# The backend "mostly works" without this function and there are use
# cases for compiling Python without the sqlite3 libraries (e.g.
# security hardening).
try:
import sqlite3
value = sqlite3.adapt(value)
except ImportError:
pass
except sqlite3.ProgrammingError:
pass
# Manual emulation of SQLite parameter quoting
if isinstance(value, bool):
return str(int(value))
elif isinstance(value, (Decimal, float, int)):
return str(value)
elif isinstance(value, str):
return "'%s'" % value.replace("\'", "\'\'")
elif value is None:
return "NULL"
elif isinstance(value, (bytes, bytearray, memoryview)):
# Bytes are only allowed for BLOB fields, encoded as string
# literals containing hexadecimal data and preceded by a single "X"
# character.
return "X'%s'" % value.hex()
else:
raise ValueError("Cannot quote parameter value %r of type %s" % (value, type(value)))
def _is_referenced_by_fk_constraint(self, table_name, column_name=None, ignore_self=False):
"""
Return whether or not the provided table name is referenced by another
one. If `column_name` is specified, only references pointing to that
column are considered. If `ignore_self` is True, self-referential
constraints are ignored.
"""
with self.connection.cursor() as cursor:
for other_table in self.connection.introspection.get_table_list(cursor):
if ignore_self and other_table.name == table_name:
continue
constraints = self.connection.introspection._get_foreign_key_constraints(cursor, other_table.name)
for constraint in constraints.values():
constraint_table, constraint_column = constraint['foreign_key']
if (constraint_table == table_name and
(column_name is None or constraint_column == column_name)):
return True
return False
def alter_db_table(self, model, old_db_table, new_db_table, disable_constraints=True):
if (not self.connection.features.supports_atomic_references_rename and
disable_constraints and self._is_referenced_by_fk_constraint(old_db_table)):
if self.connection.in_atomic_block:
raise NotSupportedError((
'Renaming the %r table while in a transaction is not '
'supported on SQLite < 3.26 because it would break referential '
'integrity. Try adding `atomic = False` to the Migration class.'
) % old_db_table)
self.connection.enable_constraint_checking()
super().alter_db_table(model, old_db_table, new_db_table)
self.connection.disable_constraint_checking()
else:
super().alter_db_table(model, old_db_table, new_db_table)
def alter_field(self, model, old_field, new_field, strict=False):
old_field_name = old_field.name
table_name = model._meta.db_table
_, old_column_name = old_field.get_attname_column()
if (new_field.name != old_field_name and
not self.connection.features.supports_atomic_references_rename and
self._is_referenced_by_fk_constraint(table_name, old_column_name, ignore_self=True)):
if self.connection.in_atomic_block:
raise NotSupportedError((
'Renaming the %r.%r column while in a transaction is not '
'supported on SQLite < 3.26 because it would break referential '
'integrity. Try adding `atomic = False` to the Migration class.'
) % (model._meta.db_table, old_field_name))
with atomic(self.connection.alias):
super().alter_field(model, old_field, new_field, strict=strict)
# Follow SQLite's documented procedure for performing changes
# that don't affect the on-disk content.
# https://sqlite.org/lang_altertable.html#otheralter
with self.connection.cursor() as cursor:
schema_version = cursor.execute('PRAGMA schema_version').fetchone()[0]
cursor.execute('PRAGMA writable_schema = 1')
references_template = ' REFERENCES "%s" ("%%s") ' % table_name
new_column_name = new_field.get_attname_column()[1]
search = references_template % old_column_name
replacement = references_template % new_column_name
cursor.execute('UPDATE sqlite_master SET sql = replace(sql, %s, %s)', (search, replacement))
cursor.execute('PRAGMA schema_version = %d' % (schema_version + 1))
cursor.execute('PRAGMA writable_schema = 0')
# The integrity check will raise an exception and rollback
# the transaction if the sqlite_master updates corrupt the
# database.
cursor.execute('PRAGMA integrity_check')
# Perform a VACUUM to refresh the database representation from
# the sqlite_master table.
with self.connection.cursor() as cursor:
cursor.execute('VACUUM')
else:
super().alter_field(model, old_field, new_field, strict=strict)
def _remake_table(self, model, create_field=None, delete_field=None, alter_field=None):
"""
Shortcut to transform a model from old_model into new_model
This follows the correct procedure to perform non-rename or column
addition operations based on SQLite's documentation
https://www.sqlite.org/lang_altertable.html#caution
The essential steps are:
1. Create a table with the updated definition called "new__app_model"
2. Copy the data from the existing "app_model" table to the new table
3. Drop the "app_model" table
4. Rename the "new__app_model" table to "app_model"
5. Restore any index of the previous "app_model" table.
"""
# Self-referential fields must be recreated rather than copied from
# the old model to ensure their remote_field.field_name doesn't refer
# to an altered field.
def is_self_referential(f):
return f.is_relation and f.remote_field.model is model
# Work out the new fields dict / mapping
body = {
f.name: f.clone() if is_self_referential(f) else f
for f in model._meta.local_concrete_fields
}
# Since mapping might mix column names and default values,
# its values must be already quoted.
mapping = {f.column: self.quote_name(f.column) for f in model._meta.local_concrete_fields}
# This maps field names (not columns) for things like unique_together
rename_mapping = {}
# If any of the new or altered fields is introducing a new PK,
# remove the old one
restore_pk_field = None
if getattr(create_field, 'primary_key', False) or (
alter_field and getattr(alter_field[1], 'primary_key', False)):
for name, field in list(body.items()):
if field.primary_key:
field.primary_key = False
restore_pk_field = field
if field.auto_created:
del body[name]
del mapping[field.column]
# Add in any created fields
if create_field:
body[create_field.name] = create_field
# Choose a default and insert it into the copy map
if not create_field.many_to_many and create_field.concrete:
mapping[create_field.column] = self.quote_value(
self.effective_default(create_field)
)
# Add in any altered fields
if alter_field:
old_field, new_field = alter_field
body.pop(old_field.name, None)
mapping.pop(old_field.column, None)
body[new_field.name] = new_field
if old_field.null and not new_field.null:
case_sql = "coalesce(%(col)s, %(default)s)" % {
'col': self.quote_name(old_field.column),
'default': self.quote_value(self.effective_default(new_field))
}
mapping[new_field.column] = case_sql
else:
mapping[new_field.column] = self.quote_name(old_field.column)
rename_mapping[old_field.name] = new_field.name
# Remove any deleted fields
if delete_field:
del body[delete_field.name]
del mapping[delete_field.column]
# Remove any implicit M2M tables
if delete_field.many_to_many and delete_field.remote_field.through._meta.auto_created:
return self.delete_model(delete_field.remote_field.through)
# Work inside a new app registry
apps = Apps()
# Work out the new value of unique_together, taking renames into
# account
unique_together = [
[rename_mapping.get(n, n) for n in unique]
for unique in model._meta.unique_together
]
# Work out the new value for index_together, taking renames into
# account
index_together = [
[rename_mapping.get(n, n) for n in index]
for index in model._meta.index_together
]
indexes = model._meta.indexes
if delete_field:
indexes = [
index for index in indexes
if delete_field.name not in index.fields
]
constraints = list(model._meta.constraints)
# Provide isolated instances of the fields to the new model body so
# that the existing model's internals aren't interfered with when
# the dummy model is constructed.
body_copy = copy.deepcopy(body)
# Construct a new model with the new fields to allow self referential
# primary key to resolve to. This model won't ever be materialized as a
# table and solely exists for foreign key reference resolution purposes.
# This wouldn't be required if the schema editor was operating on model
# states instead of rendered models.
meta_contents = {
'app_label': model._meta.app_label,
'db_table': model._meta.db_table,
'unique_together': unique_together,
'index_together': index_together,
'indexes': indexes,
'constraints': constraints,
'apps': apps,
}
meta = type("Meta", (), meta_contents)
body_copy['Meta'] = meta
body_copy['__module__'] = model.__module__
type(model._meta.object_name, model.__bases__, body_copy)
# Construct a model with a renamed table name.
body_copy = copy.deepcopy(body)
meta_contents = {
'app_label': model._meta.app_label,
'db_table': 'new__%s' % model._meta.db_table,
'unique_together': unique_together,
'index_together': index_together,
'indexes': indexes,
'constraints': constraints,
'apps': apps,
}
meta = type("Meta", (), meta_contents)
body_copy['Meta'] = meta
body_copy['__module__'] = model.__module__
new_model = type('New%s' % model._meta.object_name, model.__bases__, body_copy)
# Create a new table with the updated schema.
self.create_model(new_model)
# Copy data from the old table into the new table
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(new_model._meta.db_table),
', '.join(self.quote_name(x) for x in mapping),
', '.join(mapping.values()),
self.quote_name(model._meta.db_table),
))
# Delete the old table to make way for the new
self.delete_model(model, handle_autom2m=False)
# Rename the new table to take way for the old
self.alter_db_table(
new_model, new_model._meta.db_table, model._meta.db_table,
disable_constraints=False,
)
# Run deferred SQL on correct table
for sql in self.deferred_sql:
self.execute(sql)
self.deferred_sql = []
# Fix any PK-removed field
if restore_pk_field:
restore_pk_field.primary_key = True
def delete_model(self, model, handle_autom2m=True):
if handle_autom2m:
super().delete_model(model)
else:
# Delete the table (and only that)
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
# Remove all deferred statements referencing the deleted table.
for sql in list(self.deferred_sql):
if isinstance(sql, Statement) and sql.references_table(model._meta.db_table):
self.deferred_sql.remove(sql)
def add_field(self, model, field):
"""
Create a field on a model. Usually involves adding a column, but may
involve adding a table instead (for M2M fields).
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
self._remake_table(model, create_field=field)
def remove_field(self, model, field):
"""
Remove a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# M2M fields are a special case
if field.many_to_many:
# For implicit M2M tables, delete the auto-created table
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# For explicit "through" M2M fields, do nothing
# For everything else, remake.
else:
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
self._remake_table(model, delete_field=field)
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Perform a "physical" (non-ManyToMany) field update."""
# Use "ALTER TABLE ... RENAME COLUMN" if only the column name
# changed and there aren't any constraints.
if (self.connection.features.can_alter_table_rename_column and
old_field.column != new_field.column and
self.column_sql(model, old_field) == self.column_sql(model, new_field) and
not (old_field.remote_field and old_field.db_constraint or
new_field.remote_field and new_field.db_constraint)):
return self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type))
# Alter by remaking table
self._remake_table(model, alter_field=(old_field, new_field))
# Rebuild tables with FKs pointing to this field if the PK type changed.
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.related_objects:
if not rel.many_to_many:
self._remake_table(rel.related_model)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""Alter M2Ms to repoint their to= endpoints."""
if old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table:
# The field name didn't change, but some options did; we have to propagate this altering.
self._remake_table(
old_field.remote_field.through,
alter_field=(
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()),
),
)
return
# Make a new through table
self.create_model(new_field.remote_field.through)
# Copy the data across
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(new_field.remote_field.through._meta.db_table),
', '.join([
"id",
new_field.m2m_column_name(),
new_field.m2m_reverse_name(),
]),
', '.join([
"id",
old_field.m2m_column_name(),
old_field.m2m_reverse_name(),
]),
self.quote_name(old_field.remote_field.through._meta.db_table),
))
# Delete the old through table
self.delete_model(old_field.remote_field.through)
def add_constraint(self, model, constraint):
self._remake_table(model)
def remove_constraint(self, model, constraint):
self._remake_table(model)
|
the-stack_0_10834 | import datetime
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_ICON, CONF_WEEKDAY, ATTR_DATE
import homeassistant.util.dt as dt_util
from homeassistant.helpers.event import async_track_point_in_utc_time
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
CONF_EPOCH = 'epoch'
CONF_FREQUENCY = 'frequency'
DATE_STR_FORMAT = '%A, %Y-%m-%d'
WEEKDAY_STR_FORMAT = '%A'
DEFAULT_NAME = 'Upcoming Event'
DEFAULT_ICON = 'mdi:calendar-clock'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_EPOCH): cv.date,
vol.Optional(CONF_ICON): cv.icon,
vol.Required(CONF_FREQUENCY): vol.Coerce(int),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Setup the sensor platform."""
sensor_name = config.get(CONF_NAME)
icon = config.get(CONF_ICON)
epoch = config.get(CONF_EPOCH)
frequency = config.get(CONF_FREQUENCY)
sensors = [
PeriodicEventSensor(hass, f'{sensor_name} Date', icon, epoch,
frequency),
PeriodicEventRelativeSensor(hass, sensor_name, icon, epoch, frequency),
]
for sensor in sensors:
async_track_point_in_utc_time(
hass, sensor.point_in_time_listener, sensor.get_next_interval())
async_add_entities(sensors, True)
class PeriodicEventSensor(Entity):
def __init__(self, hass, name, icon, epoch, frequency):
"""Initialize the sensor."""
self.hass = hass
self._name = name
self._icon = icon
self._epoch = epoch
self._frequency = frequency
self._state = None
self._next_event = None
self._update_internal_state(dt_util.utcnow())
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon of the sensor."""
return self._icon
@property
def device_state_attributes(self):
return {
ATTR_DATE: self._next_event.isoformat(),
CONF_WEEKDAY: self._next_event.strftime(WEEKDAY_STR_FORMAT)
}
def _get_next_event(self, now):
""" Compute the next event """
from dateutil import relativedelta
today = dt_util.as_local(now).date()
weekday = relativedelta.weekdays[self._epoch.weekday()]
next_event = today + relativedelta.relativedelta(weekday=weekday)
# Check if this date matches the frequency after epoch, or else
# calculate the correct date
remainder = (next_event - self._epoch).days / 7 % self._frequency
if remainder != 0:
next_event = next_event + \
datetime.timedelta(weeks=self._frequency - remainder)
return next_event
def get_next_interval(self, now=None):
"""Compute next time update should occur (at next event)."""
if not now:
now = dt_util.utcnow()
next_event = self._get_next_event(now)
return datetime.datetime(
next_event.year, next_event.month, next_event.day)
def _update_internal_state(self, now):
self._next_event = self._get_next_event(now)
self._state = self._next_event.strftime(DATE_STR_FORMAT)
@callback
def point_in_time_listener(self, now):
"""Update state and schedule same listener to run again."""
self._update_internal_state(now)
self.async_schedule_update_ha_state()
async_track_point_in_utc_time(
self.hass, self.point_in_time_listener, self.get_next_interval())
class PeriodicEventRelativeSensor(PeriodicEventSensor):
def get_next_interval(self, now=None):
"""Compute next time update should occur (eg updates daily)."""
if now is None:
now = dt_util.utcnow()
start_of_day = dt_util.start_of_local_day(dt_util.as_local(now))
return start_of_day + datetime.timedelta(days=1)
def _update_internal_state(self, now):
from natural.date import duration
super()._update_internal_state(now)
# Compute the human-readable text between today and the next event
today = dt_util.as_local(now).date()
difference = self._next_event - today
if (difference.days == 0):
self._state = 'Today'
elif (difference.days == 1):
self._state = 'Tomorrow'
else:
self._state = duration(self._next_event, now=today, precision=2)
|
the-stack_0_10836 | from numpy import argsort as numpy_argsort
from numpy import atleast_1d as numpy_atleast_1d
from numpy import ndarray as numpy_ndarray
from copy import deepcopy
from .functions import RTOL, ATOL, equals
from .functions import inspect as cf_inspect
class Flags:
'''Self-describing CF flag values.
Stores the flag_values, flag_meanings and flag_masks CF attributes in
an internally consistent manner.
'''
def __init__(self, **kwargs):
'''**Initialization**
:Parameters:
flag_values : optional
The flag_values CF property. Sets the `flag_values`
attribute.
flag_meanings : optional
The flag_meanings CF property. Sets the `flag_meanings`
attribute.
flag_masks : optional
The flag_masks CF property. Sets the `flag_masks`
attribute.
'''
for attr, value in kwargs.items():
if value is not None:
setattr(self, attr, value)
def __eq__(self, other):
'''x.__eq__(y) <==> x==y <==> x.equals(y)
'''
return self.equals(other)
def __ne__(self, other):
'''x.__ne__(y) <==> x!=y <==> not x.equals(y)
'''
return not self.equals(other)
def __hash__(self):
'''Return the hash value of the flags.
Note that the flags will be sorted in place.
:Returns:
`int`
The hash value.
**Examples:**
>>> hash(f)
-956218661958673979
'''
self.sort()
x = [tuple(getattr(self, attr, ()))
for attr in ('_flag_meanings', '_flag_values', '_flag_masks')]
return hash(tuple(x))
def __bool__(self):
'''x.__bool__() <==> x!=0
'''
for attr in ('_flag_meanings', '_flag_values', '_flag_masks'):
if hasattr(self, attr):
return True
#--- End: for
return False
# ----------------------------------------------------------------
# Attributes
# ----------------------------------------------------------------
@property
def flag_values(self):
'''The flag_values CF attribute.
Stored as a 1-d numpy array but may be set as any array-like
object.
**Examples:*
>>> f.flag_values = ['a', 'b', 'c']
>>> f.flag_values
array(['a', 'b', 'c'], dtype='|S1')
>>> f.flag_values = numpy.arange(4, dtype='int8')
>>> f.flag_values
array([1, 2, 3, 4], dtype=int8)
>>> f.flag_values = 1
>>> f.flag_values
array([1])
'''
try:
return self._flag_values
except AttributeError:
raise AttributeError("'%s' has no attribute 'flag_values'" %
self.__class__.__name__)
@flag_values.setter
def flag_values(self, value):
if not isinstance(value, numpy_ndarray):
value = numpy_atleast_1d(value)
self._flag_values = value
@flag_values.deleter
def flag_values(self):
try:
del self._flag_values
except AttributeError:
raise AttributeError("Can't delete '%s' attribute 'flag_values'" %
self.__class__.__name__)
# ----------------------------------------------------------------
# Property attribute: flag_masks
# ----------------------------------------------------------------
@property
def flag_masks(self):
'''The flag_masks CF attribute.
Stored as a 1-d numpy array but may be set as array-like object.
**Examples:*
>>> f.flag_masks = numpy.array([1, 2, 4], dtype='int8')
>>> f.flag_masks
array([1, 2, 4], dtype=int8)
>>> f.flag_masks = 1
>>> f.flag_masks
array([1])
'''
try:
return self._flag_masks
except AttributeError:
raise AttributeError("'%s' object has no attribute 'flag_masks'" %
self.__class__.__name__)
@flag_masks.setter
def flag_masks(self, value):
if not isinstance(value, numpy_ndarray):
value = numpy_atleast_1d(value)
self._flag_masks = value
@flag_masks.deleter
def flag_masks(self):
try:
del self._flag_masks
except AttributeError:
raise AttributeError("Can't delete '%s' attribute 'flag_masks'" %
self.__class__.__name__)
@property
def flag_meanings(self):
'''The flag_meanings CF attribute.
Stored as a 1-d numpy string array but may be set as a space
delimited string or any array-like object.
**Examples:*
>>> f.flag_meanings = 'low medium high'
>>> f.flag_meanings
array(['low', 'medium', 'high'],
dtype='|S6')
>>> f.flag_meanings = ['left', 'right']
>>> f.flag_meanings
array(['left', 'right'],
dtype='|S5')
>>> f.flag_meanings = 'ok'
>>> f.flag_meanings
array(['ok'],
dtype='|S2')
>>> f.flag_meanings = numpy.array(['a', 'b'])
>>> f.flag_meanings
array(['a', 'b'],
dtype='|S1')
'''
try:
return self._flag_meanings
except AttributeError:
raise AttributeError("'%s' object has no attribute 'flag_meanings'" %
self.__class__.__name__)
@flag_meanings.setter
def flag_meanings(self, value):
if isinstance(value, str):
value = numpy_atleast_1d(value.split())
elif not isinstance(value, numpy_ndarray):
value = numpy_atleast_1d(value)
self._flag_meanings = value
@flag_meanings.deleter
def flag_meanings(self):
try:
del self._flag_meanings
except AttributeError:
raise AttributeError("Can't delete '%s' attribute 'flag_meanings'" %
self.__class__.__name__)
def __repr__(self):
'''x.__repr__() <==> repr(x)
'''
string = []
if hasattr(self, 'flag_values'):
string.append('flag_values=%s' % str(self.flag_values))
if hasattr(self, 'flag_masks'):
string.append('flag_masks=%s' % str(self.flag_masks))
if hasattr(self, 'flag_meanings'):
string.append('flag_meanings=%s' % str(self.flag_meanings))
return '<CF %s: %s>' % (self.__class__.__name__,
', '.join(string))
def copy(self):
'''Return a deep copy.
Equivalent to ``copy.deepcopy(f)``
:Returns:
The deep copy.
**Examples:*
>>> f.copy()
'''
return deepcopy(self)
def dump(self, display=True, _level=0):
'''Return a string containing a full description of the instance.
:Parameters:
display : bool, optional
If False then return the description as a string. By
default the description is printed, i.e. ``f.dump()`` is
equivalent to ``print(f.dump(display=False))``.
:Returns:
`None` or `str`
A string containing the description.
'''
indent0 = ' ' * _level
indent1 = ' ' * (_level+1)
string = ['%sFlags:' % indent0]
for attr in ('_flag_values', '_flag_meanings', '_flag_masks'):
value = getattr(self, attr, None)
if value is not None:
string.append('%s%s = %s' % (indent1, attr[1:], list(value)))
#--- End: for
string = '\n'.join(string)
if display:
print(string)
else:
return(string)
def equals(self, other, rtol=None, atol=None,
ignore_fill_value=False, verbose=False,
traceback=False):
'''True if two groups of flags are logically equal, False otherwise.
Note that both instances are sorted in place prior to the comparison.
:Parameters:
other:
The object to compare for equality.
atol: float, optional
The absolute tolerance for all numerical comparisons, By
default the value returned by the `ATOL` function is used.
rtol: float, optional
The relative tolerance for all numerical comparisons, By
default the value returned by the `RTOL` function is used.
ignore_fill_value: bool, optional
If True then data arrays with different fill values are
considered equal. By default they are considered unequal.
traceback: deprecated at version 3.0.0.
Use *verbose* instead.
:Returns:
`bool`
Whether or not the two instances are equal.
**Examples:*
>>> f
<CF Flags: flag_values=[1 0 2], flag_masks=[2 0 2], flag_meanings=['medium' 'low' 'high']>
>>> g
<CF Flags: flag_values=[2 0 1], flag_masks=[2 0 2], flag_meanings=['high' 'low' 'medium']>
>>> f.equals(g)
True
>>> f
<CF Flags: flag_values=[0 1 2], flag_masks=[0 2 2], flag_meanings=['low' 'medium' 'high']>
>>> g
<CF Flags: flag_values=[0 1 2], flag_masks=[0 2 2], flag_meanings=['low' 'medium' 'high']>
'''
if traceback:
_DEPRECATION_ERROR_KWARGS(self, 'equals', traceback=True) # pragma: no cover
# Check that each instance is the same type
if self.__class__ != other.__class__:
if verbose:
print("%s: Different type: %s, %s" %
(self.__class__.__name__,
self.__class__.__name__, other.__class__.__name__)) # pragma: no cover
return False
self.sort()
other.sort()
# Set default tolerances
if rtol is None:
rtol = RTOL()
if atol is None:
atol = ATOL()
for attr in ('_flag_meanings', '_flag_values', '_flag_masks'):
if hasattr(self, attr):
if not hasattr(other, attr):
if verbose:
print("%s: Different attributes: %s" %
(self.__class__.__name__, attr[1:])) # pragma: no cover
return False
x = getattr(self, attr)
y = getattr(other, attr)
if (x.shape != y.shape or
not equals(x, y, rtol=rtol, atol=atol,
ignore_fill_value=ignore_fill_value,
verbose=verbose)):
if verbose:
print("%s: Different '%s': %r, %r" %
(self.__class__.__name__, attr[1:], x, y)) # pragma: no cover
return False
elif hasattr(other, attr):
if verbose:
print("%s: Different attributes: %s" %
(self.__class__.__name__, attr[1:])) # pragma: no cover
return False
#--- End: for
return True
def inspect(self):
'''Inspect the object for debugging.
.. seealso:: `cf.inspect`
:Returns:
`None`
'''
print(cf_inspect(self)) # pragma: no cover
def sort(self):
'''Sort the flags in place.
By default sort by flag values. If flag values are not present
then sort by flag meanings. If flag meanings are not present then
sort by flag_masks.
:Returns:
`None`
**Examples:*
>>> f
<CF Flags: flag_values=[2 0 1], flag_masks=[2 0 2], flag_meanings=['high' 'low' 'medium']>
>>> f.sort()
>>> f
<CF Flags: flag_values=[0 1 2], flag_masks=[0 2 2], flag_meanings=['low' 'medium' 'high']>
'''
if not self:
return
# Sort all three attributes
for attr in ('flag_values', '_flag_meanings', '_flag_masks'):
if hasattr(self, attr):
indices = numpy_argsort(getattr(self, attr))
break
#--- End: for
for attr in ('_flag_values', '_flag_meanings', '_flag_masks'):
if hasattr(self, attr):
array = getattr(self, attr).view()
array[...] = array[indices]
#--- End: for
#--- End: class
|
the-stack_0_10837 | """Added transactions table
Revision ID: 5632aa202d89
Revises: 3a47813ce501
Create Date: 2015-03-18 14:54:09.061787
"""
# revision identifiers, used by Alembic.
revision = '5632aa202d89'
down_revision = '4d3ed7925db3'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('quark_transactions',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_engine='InnoDB')
op.add_column(u'quark_ip_addresses',
sa.Column('transaction_id', sa.Integer(), nullable=True))
op.create_foreign_key('fk_quark_ips_transaction_id',
'quark_ip_addresses',
'quark_transactions',
['transaction_id'],
['id'])
def downgrade():
op.drop_constraint('fk_quark_ips_transaction_id', 'quark_ip_addresses',
type_='foreignkey')
op.drop_column(u'quark_ip_addresses', 'transaction_id')
op.drop_table('quark_transactions')
|
the-stack_0_10838 | from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.base import Template
from django.template.context import RequestContext
from cms.test_utils.project.placeholderapp.models import (Example1,
MultilingualExample1)
from cms.utils import get_language_from_request
def example_view(request):
context = RequestContext(request)
context['examples'] = Example1.objects.all()
return render_to_response('placeholderapp.html', context)
def _base_detail(request, instance, template_name='detail.html',
item_name="char_1", template_string='',):
context = RequestContext(request)
context['instance'] = instance
context['item_name'] = item_name
if template_string:
template = Template(template_string)
return HttpResponse(template.render(context))
else:
return render_to_response(template_name, context)
def list_view_multi(request):
context = RequestContext(request)
context['examples'] = MultilingualExample1.objects.language(
get_language_from_request(request)).all()
return render_to_response('list.html', context)
def detail_view_multi(request, pk, template_name='detail_multi.html',
item_name="char_1", template_string='',):
instance = MultilingualExample1.objects.language(
get_language_from_request(request)).get(pk=pk)
return _base_detail(request, instance, template_name, item_name,
template_string)
def list_view(request):
context = RequestContext(request)
context['examples'] = Example1.objects.all()
return render_to_response('list.html', context)
def detail_view(request, pk, template_name='detail.html', item_name="char_1",
template_string='',):
instance = Example1.objects.get(pk=pk)
return _base_detail(request, instance, template_name, item_name,
template_string)
|
the-stack_0_10839 | # Copied from cellSNP, https://raw.githubusercontent.com/single-cell-genetics/cellSNP/purePython/cellSNP/utils/vcf_utils.py
# Utilility functions for processing vcf files
# Author: Yuanhua Huang
# Date: 09/06/2019
import os
import sys
import gzip
import subprocess
import numpy as np
def parse_sample_info(sample_dat, sparse=True):
"""
Parse genotype information for each sample
Note, it requires the format for each variants to
be the same.
"""
if sample_dat == [] or sample_dat is None:
return None
# require the same format for all variants
format_all = [x[0] for x in sample_dat]
if format_all.count(format_all[0]) != len(format_all):
print("Error: require the same format for all variants.")
exit()
format_list = format_all[0].split(":")
RV = {}
for _format in format_list:
RV[_format] = []
if sparse:
RV['indices'] = []
RV['indptr'] = [0]
RV['shape'] = (len(sample_dat[0][1:]), len(sample_dat))
missing_val = ":".join(["."] * len(format_list))
cnt = 0
for j in range(len(sample_dat)): #variant j
_line = sample_dat[j]
for i in range(len(_line[1:])): #cell i
if _line[i+1] == missing_val:
continue
_line_key = _line[i+1].split(":")
for k in range(len(format_list)):
RV[format_list[k]].append(_line_key[k])
cnt += 1
RV['indices'].append(i)
RV['indptr'].append(cnt)
else:
for _line in sample_dat:
_line_split = [x.split(":") for x in _line[1:]]
for k in range(len(format_list)):
_line_key = [x[k] for x in _line_split]
RV[format_list[k]].append(_line_key)
return RV
def load_VCF(vcf_file, biallelic_only=False, load_sample=True, sparse=True):
"""
Load whole VCF file
-------------------
Initially designed to load VCF from cellSNP output, requiring
1) all variants have the same format list;
2) a line starting with "#CHROM", with sample ids.
If these two requirements are satisfied, this function also supports general
VCF files, e.g., genotype for multiple samples.
Note, it may take a large memory, please filter the VCF with bcftools first.
"""
if vcf_file[-3:] == ".gz" or vcf_file[-4:] == ".bgz":
infile = gzip.open(vcf_file, "rb")
is_gzip = True
else:
infile = open(vcf_file, "r")
is_gzip = False
FixedINFO = {}
contig_lines = []
comment_lines = []
var_ids, obs_ids, obs_dat = [], [], []
for line in infile:
if is_gzip:
line = line.decode('utf-8')
if line.startswith("#"):
if line.startswith("##contig="):
contig_lines.append(line.rstrip())
if line.startswith("#CHROM"):
obs_ids = line.rstrip().split("\t")[9:]
key_ids = line[1:].rstrip().split("\t")[:8]
for _key in key_ids:
FixedINFO[_key] = []
else:
comment_lines.append(line.rstrip())
else:
list_val = line.rstrip().split("\t") #[:5] #:8
if biallelic_only:
if len(list_val[3]) > 1 or len(list_val[4]) > 1:
continue
if load_sample:
obs_dat.append(list_val[8:])
for i in range(len(key_ids)):
FixedINFO[key_ids[i]].append(list_val[i])
var_ids.append("_".join([list_val[x] for x in [0, 1, 3, 4]]))
infile.close()
RV = {}
RV["variants"] = var_ids
RV["FixedINFO"] = FixedINFO
RV["samples"] = obs_ids
RV["GenoINFO"] = parse_sample_info(obs_dat, sparse=sparse)
RV["contigs"] = contig_lines
RV["comments"] = comment_lines
return RV
def write_VCF_to_hdf5(VCF_dat, out_file):
"""
Write vcf data into hdf5 file
"""
import h5py
f = h5py.File(out_file, 'w')
f.create_dataset("contigs", data=np.string_(VCF_dat['contigs']),
compression="gzip", compression_opts=9)
f.create_dataset("samples", data=np.string_(VCF_dat['samples']),
compression="gzip", compression_opts=9)
f.create_dataset("variants", data=np.string_(VCF_dat['variants']),
compression="gzip", compression_opts=9)
f.create_dataset("comments", data=np.string_(VCF_dat['comments']),
compression="gzip", compression_opts=9)
## variant fixed information
fixed = f.create_group("FixedINFO")
for _key in VCF_dat['FixedINFO']:
fixed.create_dataset(_key, data=np.string_(VCF_dat['FixedINFO'][_key]),
compression="gzip", compression_opts=9)
## genotype information for each sample
geno = f.create_group("GenoINFO")
for _key in VCF_dat['GenoINFO']:
geno.create_dataset(_key, data=np.string_(VCF_dat['GenoINFO'][_key]),
compression="gzip", compression_opts=9)
f.close()
def read_sparse_GeneINFO(GenoINFO, keys=['AD', 'DP']):
M, N = np.array(GenoINFO['shape']).astype('int')
indptr = np.array(GenoINFO['indptr']).astype('int')
indices = np.array(GenoINFO['indices']).astype('int')
from scipy.sparse import csr_matrix
RV = {}
for _key in keys:
data = np.array(GenoINFO[_key]).astype('float')
RV[_key] = csr_matrix((data, indices, indptr), shape=(N, M))
return RV
def merge_vcf(out_file, out_files, hdf5_out=True):
"""Merge vcf for all chromsomes
"""
if out_file.endswith(".gz"):
out_file_use = out_file.split(".gz")[0]
else:
out_file_use = out_file
CNT = 0
fid_out = open(out_file_use, "w")
for _file in out_files:
with open(_file, "r") as fid_in:
for line in fid_in:
if line.startswith("#") and _file != out_files[0]:
continue
else:
CNT += 1
fid_out.writelines(line)
os.remove(_file)
fid_out.close()
print("[cellSNP] %d lines in final vcf file" %CNT)
import shutil
if shutil.which("bgzip") is not None:
bashCommand = "bgzip -f %s" %(out_file_use)
else:
bashCommand = "gzip -f %s" %(out_file_use)
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
## save to hdf5 file
if hdf5_out:
vcf_dat = load_VCF(out_file_use + ".gz", load_sample=True, sparse=True)
write_VCF_to_hdf5(vcf_dat, out_file_use + ".h5")
return None
def VCF_to_sparseMat(vcf_file, tags=["AD", "DP"], out_dir=None):
"""
Write VCF sample info into sparse matrices with given tags
"""
# out samples, out_var, tag_files
var_info = []
tag_mat_list = []
for _tag in tags:
_dict = {"data": [], "row": [], "col": []}
tag_mat_list.append(_dict)
if vcf_file[-3:] == ".gz" or vcf_file[-4:] == ".bgz":
infile = gzip.open(vcf_file, "rb")
is_gzip = True
else:
infile = open(vcf_file, "r")
is_gzip = False
var_idx, obs_idx = 0, 0
for line in infile:
if is_gzip:
line = line.decode('utf-8')
if line.startswith("#"):
if line.startswith("#CHROM"):
samples = line.rstrip().split("\t")[9:]
continue
## variants line
var_idx += 1
list_val = line.rstrip().split("\t")
var_info.append(list_val[:8])
FORMAT = list_val[8].split(":")
tag_idx = []
for _tag in tags:
if _tag in FORMAT:
tag_idx.append(FORMAT.index(_tag))
else:
tag_idx.append(None)
for obs_idx in range(len(list_val[9:])):
_samp_dat = list_val[9 + obs_idx]
if _samp_dat == ".":
continue
_samp_val = _samp_dat.split(":")
for ii in range(len(tags)):
if tag_idx[ii] is None:
continue
tag_dat = _samp_val[tag_idx[ii]]
if (tag_dat != "." and tag_dat != "0" and
tag_dat.count(".,") == 0):
tag_mat_list[ii]["data"].append(tag_dat)
tag_mat_list[ii]["row"].append(var_idx)
tag_mat_list[ii]["col"].append(obs_idx + 1)
infile.close()
if out_dir is not None:
if not os.path.exists(out_dir):
os.mkdir(out_dir)
fid_obs = open(out_dir + "/cellSNP.samples.tsv", "w")
fid_obs.writelines("\n".join(samples) + "\n")
fid_obs.close()
fid_var = open(out_dir + "/cellSNP.base.vcf", "w")
fid_var.writelines("##fileformat=VCFv4.2\n")
fid_var.writelines("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n")
for _var_info in var_info:
fid_var.writelines("\t".join(_var_info) + "\n")
fid_var.close()
try:
import shutil
if shutil.which("bgzip") is not None:
bashCommand = "bgzip -f %s" %(out_dir + "/cellSNP.base.vcf")
else:
bashCommand = "gzip -f %s" %(out_dir + "/cellSNP.base.vcf")
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
except:
print("sparse matrix: VCF uncmpressed.")
for ii in range(len(tags)):
_mat = tag_mat_list[ii]
_dat = _mat["data"]
_row = _mat["row"]
_col = _mat["col"]
fid = open(out_dir + "/cellSNP.tag.%s.mtx" %(tags[ii]), "w")
fid.writelines("%" +
"%MatrixMarket matrix coordinate integer general\n")
fid.writelines("%\n")
fid.writelines("%d\t%d\t%d\n" %(len(var_info), len(samples),
len(_dat)))
for jj in range(len(_dat)):
fid.writelines("%d\t%d\t%s\n" %(_row[jj], _col[jj], _dat[jj]))
fid.close()
return var_info, samples, tag_mat_list
|
the-stack_0_10840 | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.dev.tooling.configuration.consumers.model.model_consumer import VALIDATORS_DOCUMENTATION
from ...utils import get_model_consumer, normalize_yaml
pytestmark = [pytest.mark.conf, pytest.mark.conf_consumer, pytest.mark.conf_consumer_model]
def test():
consumer = get_model_consumer(
"""
name: test
version: 0.0.0
files:
- name: test.yaml
options:
- template: instances
options:
- name: foo
required: true
description: words
value:
type: string
- name: example
description: words
value:
example: bar
type: string
- name: default_precedence
description: words
value:
example: bar
default: baz
type: string
- name: example_ignored_array
description: words
value:
example:
- test
type: array
items:
type: string
- name: example_ignored_object
description: words
value:
example:
key: value
type: object
additionalProperties: true
- name: long_default_formatted
description: words
value:
default:
- ["01", "02", "03", "04", "05"]
- ["06", "07", "08", "09", "10"]
- ["11", "12", "13", "14", "15"]
- ["16", "17", "18", "19", "20"]
- ["21", "22", "23", "24", "25"]
type: array
items:
type: array
items:
type: string
"""
)
model_definitions = consumer.render()
assert len(model_definitions) == 1
files = model_definitions['test.yaml']
assert len(files) == 4
validators_contents, validators_errors = files['validators.py']
assert not validators_errors
assert validators_contents == VALIDATORS_DOCUMENTATION
package_root_contents, package_root_errors = files['__init__.py']
assert not package_root_errors
assert package_root_contents == normalize_yaml(
"""
from .instance import InstanceConfig
class ConfigMixin:
_config_model_instance: InstanceConfig
@property
def config(self) -> InstanceConfig:
return self._config_model_instance
"""
)
defaults_contents, defaults_errors = files['defaults.py']
assert not defaults_errors
assert defaults_contents == normalize_yaml(
"""
from datadog_checks.base.utils.models.fields import get_default_field_value
def instance_default_precedence(field, value):
return 'baz'
def instance_example(field, value):
return 'bar'
def instance_example_ignored_array(field, value):
return get_default_field_value(field, value)
def instance_example_ignored_object(field, value):
return get_default_field_value(field, value)
def instance_long_default_formatted(field, value):
return [
['01', '02', '03', '04', '05'],
['06', '07', '08', '09', '10'],
['11', '12', '13', '14', '15'],
['16', '17', '18', '19', '20'],
['21', '22', '23', '24', '25'],
]
"""
)
instance_model_contents, instance_model_errors = files['instance.py']
assert not instance_model_errors
assert instance_model_contents == normalize_yaml(
"""
from __future__ import annotations
from typing import Any, Mapping, Optional, Sequence
from pydantic import BaseModel, root_validator, validator
from datadog_checks.base.utils.functions import identity
from datadog_checks.base.utils.models import validation
from . import defaults, validators
class InstanceConfig(BaseModel):
class Config:
allow_mutation = False
default_precedence: Optional[str]
example: Optional[str]
example_ignored_array: Optional[Sequence[str]]
example_ignored_object: Optional[Mapping[str, Any]]
foo: str
long_default_formatted: Optional[Sequence[Sequence[str]]]
@root_validator(pre=True)
def _initial_validation(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values))
@validator('*', pre=True, always=True)
def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'instance_{field.name}')(field, v)
@validator('*')
def _run_validations(cls, v, field):
if not v:
return v
return getattr(validators, f'instance_{field.name}', identity)(v, field=field)
@root_validator(pre=False)
def _final_validation(cls, values):
return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values))
"""
)
|
the-stack_0_10842 | import typing
import sys
import numpy as np
import numba as nb
@nb.njit
def sort_csgraph(
n: int,
g: np.ndarray,
) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]:
sort_idx = np.argsort(g[:, 0], kind='mergesort')
g = g[sort_idx]
edge_idx = np.searchsorted(g[:, 0], np.arange(n + 1))
original_idx = np.arange(len(g))[sort_idx]
return g, edge_idx, original_idx
@nb.njit
def csgraph_to_undirected(g: np.ndarray) -> np.ndarray:
m = len(g)
g = np.vstack((g, g))
g[m:, :2] = g[m:, 1::-1]
return g
@nb.njit((nb.i8, nb.i8[:, :]), cache=True)
def connected_components_bfs(n: int, g: np.ndarray):
g = csgraph_to_undirected(g)
g, edge_idx, _ = sort_csgraph(n, g)
label = np.full(n, -1, np.int64)
l = 0
for i in range(n):
if label[i] != -1: continue
label[i] = l
que = [i]
for u in que:
for v in g[edge_idx[u]:edge_idx[u + 1], 1]:
if label[v] != -1: continue
label[v] = l
que.append(v)
l += 1
return label
@nb.njit((nb.i8[:], ), cache=True)
def solve(a: np.ndarray) -> typing.NoReturn:
n = a.size
a = np.searchsorted(np.unique(a), a)
m = a.max() + 1
g = np.empty((n, 2), np.int64)
idx_to_add = 0
def add_edge(u, v):
nonlocal idx_to_add
g[idx_to_add] = (u, v)
idx_to_add += 1
for i in range(n // 2):
x, y = a[i], a[n - 1 - i]
add_edge(x, y)
add_edge(y, x)
g = g[:idx_to_add]
label = connected_components_bfs(m, g)
print(m - label.max() - 1)
def main() -> typing.NoReturn:
n = int(input())
a = np.array(
sys.stdin.readline().split(),
dtype=np.int64,
)
solve(a)
main() |
the-stack_0_10843 | #!/usr/bin/env python3
import argparse
import os
import sys
import numpy as np
from functools import reduce
from collections import OrderedDict
import pandas as pd
## merge filtered/summarized files with qsim values by user-specified comparison
def getOptions():
parser = argparse.ArgumentParser(description='Merges together filtered/summarized comparate count tables by user-specified comparison')
parser.add_argument("-output", "--output", dest="output", action="store", required=True, help="Output directory for complete merged comparate files ready for Bayesian")
parser.add_argument("-comp", "--comp", dest="comp", action='store', required=True, help="Input filtered/summarized count tables per one comparate")
parser.add_argument("-design", "--design", dest="design", action='store', required=True, help="Design file")
args=parser.parse_args()
return(args)
def main():
args = getOptions()
### Read in design file as dataframe
df_design = pd.read_csv(args.design)
### Create subset of design file of comparate specification columns (will quantify # comparates by number of columns left)
### Store compID to name output file
c1_g1_list = df_design['C1_G1'].tolist()
c1_g2_list = df_design['C1_G2'].tolist()
c2_g1_list = df_design['C2_G1'].tolist()
c2_g2_list = df_design['C2_G2'].tolist()
c1_list = df_design['Comparate_1'].tolist()
c2_list = df_design['Comparate_2'].tolist()
del df_design['C1_G1']
del df_design['C1_G2']
del df_design['C2_G1']
del df_design['C2_G2']
dict = {}
col_list = list(df_design.columns.values)
row_list = []
comparison_list = df_design['compID'].tolist()
del df_design['compID']
### Create dictionaries per design file row to store the row's comparate files
for index, sample in df_design.iterrows():
dict[index] = list(sample)
## If there are comparison columns (column # > 1)
for key in dict:
row_list = dict[key]
file_list = []
comp_dict = {}
comparison = comparison_list[key]
c1_g1= c1_g1_list[key]
c1_g2= c1_g2_list[key]
c2_g1= c2_g1_list[key]
c2_g2= c2_g2_list[key]
c1= c1_list[key]
c2= c2_list[key]
for i, comp in enumerate(row_list):
comp_dict[i+1] = comp
### Assign filename so it can be called
row_list[i] = args.comp + '/bayesian_input_' + comp + '.csv'
file = pd.read_csv(row_list[i], index_col=None, header =0)
file_list.append(file)
df_merged = reduce(lambda x, y: pd.merge(x, y, on = ['FEATURE_ID']), file_list)
### drop columns you don't want before merge
df_merged = df_merged[df_merged.columns.drop(list(df_merged.filter(regex='comp')))]
df_merged.set_index('FEATURE_ID', inplace=True)
## AMM fixing below line get_values is deprecated
## merged_headers = list(df_merged.columns.get_values())
merged_headers = list(df_merged.columns.to_numpy())
### For stan model, requires headers to have general comparate input names
### This reassigns comparate names to be c1, c2, c3... depending on design file specifications
for x in comp_dict:
for i in range(len(merged_headers)):
if c1 in merged_headers[i]:
merged_headers[i] = merged_headers[i].replace(c1, 'c1')
if c2 in merged_headers[i]:
merged_headers[i] = merged_headers[i].replace(c2, 'c2')
df_merged.columns=merged_headers
df_filtered = df_merged
outfile = args.output + '/bayesian_input_' + comparison + '.csv'
df_filtered.to_csv(outfile)
if __name__=='__main__':
main()
|
the-stack_0_10845 | from django.conf import settings
IS_TEST = False
TEST_FLAG = '__TEST'
class DbRouterMiddleware(object):
def process_request( self, request):
global IS_TEST
IS_TEST = request.GET.get(TEST_FLAG)
return None
def process_response( self, request, response ):
global IS_TEST
IS_TEST = False
return response
class DatabaseRouter (object):
# def db_for_read( self, model, **hints ):
# return 'test' if IS_TEST else 'default';
#
# def db_for_write( self, model, **hints ):
# return 'test' if IS_TEST else 'default';
#
# def allow_relation( self, obj1, obj2, **hints ):
# return True
#
# def allow_migrate( self, db, app_label, model_name=None, **hints ):
# return True
def db_for_read(self, model, **hints):
""""Point all read operations to the specific database."""
if model._meta.app_label in settings.DATABASE_APPS_MAPPING:
return settings.DATABASE_APPS_MAPPING[model._meta.app_label]
return 'test' if IS_TEST else 'default';
def db_for_write(self, model, **hints):
"""Point all write operations to the specific database."""
if model._meta.app_label in settings.DATABASE_APPS_MAPPING:
return settings.DATABASE_APPS_MAPPING[model._meta.app_label]
return 'test' if IS_TEST else 'default';
def allow_relation(self, obj1, obj2, **hints):
"""Allow any relation between apps that use the same database."""
db_obj1 = settings.DATABASE_APPS_MAPPING.get(obj1._meta.app_label)
db_obj2 = settings.DATABASE_APPS_MAPPING.get(obj2._meta.app_label)
if db_obj1 and db_obj2:
if db_obj1 == db_obj2:
return True
else:
return False
return True
def allow_migrate(self, db, model):
"""Make sure that apps only appear in the related database."""
""" No migrate all database no_sql and model have ap_label = no_sql"""
if db == 'no_sql' or model._meta.app_label == "no_sql":
return False
else:
return True
|
the-stack_0_10847 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Wrappers around spark that correspond to common pandas functions.
"""
from typing import Any, Optional, Union, List, Tuple, Sized, cast
from collections import OrderedDict
from collections.abc import Iterable
from distutils.version import LooseVersion
from functools import reduce
from io import BytesIO
import json
import numpy as np
import pandas as pd
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like
import pyarrow as pa
import pyarrow.parquet as pq
import pyspark
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import (
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType,
BooleanType,
TimestampType,
DecimalType,
StringType,
DateType,
StructType,
)
from pyspark import pandas as pp # noqa: F401
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.utils import (
align_diff_frames,
default_session,
is_name_like_tuple,
name_like_string,
same_anchor,
scol_for,
validate_axis,
)
from pyspark.pandas.frame import DataFrame, _reduce_spark_multi
from pyspark.pandas.internal import (
InternalFrame,
DEFAULT_SERIES_NAME,
HIDDEN_COLUMNS,
)
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale
from pyspark.pandas.indexes import Index, DatetimeIndex
__all__ = [
"from_pandas",
"range",
"read_csv",
"read_delta",
"read_table",
"read_spark_io",
"read_parquet",
"read_clipboard",
"read_excel",
"read_html",
"to_datetime",
"date_range",
"get_dummies",
"concat",
"melt",
"isna",
"isnull",
"notna",
"notnull",
"read_sql_table",
"read_sql_query",
"read_sql",
"read_json",
"merge",
"to_numeric",
"broadcast",
"read_orc",
]
def from_pandas(pobj: Union[pd.DataFrame, pd.Series, pd.Index]) -> Union[Series, DataFrame, Index]:
"""Create a Koalas DataFrame, Series or Index from a pandas DataFrame, Series or Index.
This is similar to Spark's `SparkSession.createDataFrame()` with pandas DataFrame,
but this also works with pandas Series and picks the index.
Parameters
----------
pobj : pandas.DataFrame or pandas.Series
pandas DataFrame or Series to read.
Returns
-------
Series or DataFrame
If a pandas Series is passed in, this function returns a Koalas Series.
If a pandas DataFrame is passed in, this function returns a Koalas DataFrame.
"""
if isinstance(pobj, pd.Series):
return Series(pobj)
elif isinstance(pobj, pd.DataFrame):
return DataFrame(pobj)
elif isinstance(pobj, pd.Index):
return DataFrame(pd.DataFrame(index=pobj)).index
else:
raise ValueError("Unknown data type: {}".format(type(pobj).__name__))
_range = range # built-in range
def range(
start: int, end: Optional[int] = None, step: int = 1, num_partitions: Optional[int] = None
) -> DataFrame:
"""
Create a DataFrame with some range of numbers.
The resulting DataFrame has a single int64 column named `id`, containing elements in a range
from ``start`` to ``end`` (exclusive) with step value ``step``. If only the first parameter
(i.e. start) is specified, we treat it as the end value with the start value being 0.
This is similar to the range function in SparkSession and is used primarily for testing.
Parameters
----------
start : int
the start value (inclusive)
end : int, optional
the end value (exclusive)
step : int, optional, default 1
the incremental step
num_partitions : int, optional
the number of partitions of the DataFrame
Returns
-------
DataFrame
Examples
--------
When the first parameter is specified, we generate a range of values up till that number.
>>> pp.range(5)
id
0 0
1 1
2 2
3 3
4 4
When start, end, and step are specified:
>>> pp.range(start = 100, end = 200, step = 20)
id
0 100
1 120
2 140
3 160
4 180
"""
sdf = default_session().range(start=start, end=end, step=step, numPartitions=num_partitions)
return DataFrame(sdf)
def read_csv(
path,
sep=",",
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
mangle_dupe_cols=True,
dtype=None,
nrows=None,
parse_dates=False,
quotechar=None,
escapechar=None,
comment=None,
**options
) -> Union[DataFrame, Series]:
"""Read CSV (comma-separated) file into DataFrame or Series.
Parameters
----------
path : str
The path string storing the CSV file to be read.
sep : str, default ‘,’
Delimiter to use. Must be a single character.
header : int, list of int, default ‘infer’
Whether to to use as the column names, and the start of the data.
Default behavior is to infer the column names: if no names are passed
the behavior is identical to `header=0` and column names are inferred from
the first line of the file, if column names are passed explicitly then
the behavior is identical to `header=None`. Explicitly pass `header=0` to be
able to replace existing names
names : str or array-like, optional
List of column names to use. If file contains no header row, then you should
explicitly pass `header=None`. Duplicates in this list will cause an error to be issued.
If a string is given, it should be a DDL-formatted string in Spark SQL, which is
preferred to avoid schema inference for better performance.
index_col: str or list of str, optional, default: None
Index column of table in Spark.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either be
positional (i.e. integer indices into the document columns) or strings that
correspond to column names provided either by the user in names or inferred
from the document header row(s).
If callable, the callable function will be evaluated against the column names,
returning names where the callable function evaluates to `True`.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X0', 'X1', ... 'XN', rather
than 'X' ... 'X'. Passing in False will cause data to be overwritten if
there are duplicate names in the columns.
Currently only `True` is allowed.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {‘a’: np.float64, ‘b’: np.int32} Use str or object
together with suitable na_values settings to preserve and not interpret dtype.
nrows : int, default None
Number of rows to read from the CSV file.
parse_dates : boolean or list of ints or names or list of lists or dict, default `False`.
Currently only `False` is allowed.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted items can include
the delimiter and it will be ignored.
escapechar : str (length 1), default None
One-character string used to escape delimiter
comment: str, optional
Indicates the line should not be parsed.
options : dict
All other options passed directly into Spark's data source.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
Examples
--------
>>> pp.read_csv('data.csv') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if mangle_dupe_cols is not True:
raise ValueError("mangle_dupe_cols can only be `True`: %s" % mangle_dupe_cols)
if parse_dates is not False:
raise ValueError("parse_dates can only be `False`: %s" % parse_dates)
if usecols is not None and not callable(usecols):
usecols = list(usecols)
if usecols is None or callable(usecols) or len(usecols) > 0:
reader = default_session().read
reader.option("inferSchema", True)
reader.option("sep", sep)
if header == "infer":
header = 0 if names is None else None
if header == 0:
reader.option("header", True)
elif header is None:
reader.option("header", False)
else:
raise ValueError("Unknown header argument {}".format(header))
if quotechar is not None:
reader.option("quote", quotechar)
if escapechar is not None:
reader.option("escape", escapechar)
if comment is not None:
if not isinstance(comment, str) or len(comment) != 1:
raise ValueError("Only length-1 comment characters supported")
reader.option("comment", comment)
reader.options(**options)
if isinstance(names, str):
sdf = reader.schema(names).csv(path)
column_labels = OrderedDict((col, col) for col in sdf.columns)
else:
sdf = reader.csv(path)
if is_list_like(names):
names = list(names)
if len(set(names)) != len(names):
raise ValueError("Found non-unique column index")
if len(names) != len(sdf.columns):
raise ValueError(
"The number of names [%s] does not match the number "
"of columns [%d]. Try names by a Spark SQL DDL-formatted "
"string." % (len(sdf.schema), len(names))
)
column_labels = OrderedDict(zip(names, sdf.columns))
elif header is None:
column_labels = OrderedDict(enumerate(sdf.columns))
else:
column_labels = OrderedDict((col, col) for col in sdf.columns)
if usecols is not None:
if callable(usecols):
column_labels = OrderedDict(
(label, col) for label, col in column_labels.items() if usecols(label)
)
missing = []
elif all(isinstance(col, int) for col in usecols):
new_column_labels = OrderedDict(
(label, col)
for i, (label, col) in enumerate(column_labels.items())
if i in usecols
)
missing = [
col
for col in usecols
if col >= len(column_labels)
or list(column_labels)[col] not in new_column_labels
]
column_labels = new_column_labels
elif all(isinstance(col, str) for col in usecols):
new_column_labels = OrderedDict(
(label, col) for label, col in column_labels.items() if label in usecols
)
missing = [col for col in usecols if col not in new_column_labels]
column_labels = new_column_labels
else:
raise ValueError(
"'usecols' must either be list-like of all strings, "
"all unicode, all integers or a callable."
)
if len(missing) > 0:
raise ValueError(
"Usecols do not match columns, columns expected but not " "found: %s" % missing
)
if len(column_labels) > 0:
sdf = sdf.select([scol_for(sdf, col) for col in column_labels.values()])
else:
sdf = default_session().createDataFrame([], schema=StructType())
else:
sdf = default_session().createDataFrame([], schema=StructType())
column_labels = OrderedDict()
if nrows is not None:
sdf = sdf.limit(nrows)
if index_col is not None:
if isinstance(index_col, (str, int)):
index_col = [index_col]
for col in index_col:
if col not in column_labels:
raise KeyError(col)
index_spark_column_names = [column_labels[col] for col in index_col]
index_names = [(col,) for col in index_col] # type: List[Tuple]
column_labels = OrderedDict(
(label, col) for label, col in column_labels.items() if label not in index_col
)
else:
index_spark_column_names = []
index_names = []
kdf = DataFrame(
InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_spark_column_names],
index_names=index_names,
column_labels=[
label if is_name_like_tuple(label) else (label,) for label in column_labels
],
data_spark_columns=[scol_for(sdf, col) for col in column_labels.values()],
)
) # type: DataFrame
if dtype is not None:
if isinstance(dtype, dict):
for col, tpe in dtype.items():
kdf[col] = kdf[col].astype(tpe)
else:
for col in kdf.columns:
kdf[col] = kdf[col].astype(dtype)
if squeeze and len(kdf.columns) == 1:
return first_series(kdf)
else:
return kdf
def read_json(
path: str, lines: bool = True, index_col: Optional[Union[str, List[str]]] = None, **options
) -> DataFrame:
"""
Convert a JSON string to DataFrame.
Parameters
----------
path : string
File path
lines : bool, default True
Read the file as a json object per line. It should be always True for now.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
Examples
--------
>>> df = pp.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json(path=r'%s/read_json/foo.json' % path, num_files=1)
>>> pp.read_json(
... path=r'%s/read_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df.to_json(path=r'%s/read_json/foo.json' % path, num_files=1, lineSep='___')
>>> pp.read_json(
... path=r'%s/read_json/foo.json' % path, lineSep='___'
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
You can preserve the index in the roundtrip as below.
>>> df.to_json(path=r'%s/read_json/bar.json' % path, num_files=1, index_col="index")
>>> pp.read_json(
... path=r'%s/read_json/bar.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1 col 2
index
0 a b
1 c d
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
return read_spark_io(path, format="json", index_col=index_col, **options)
def read_delta(
path: str,
version: Optional[str] = None,
timestamp: Optional[str] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> DataFrame:
"""
Read a Delta Lake table on some file system and return a DataFrame.
If the Delta Lake table is already stored in the catalog (aka the metastore), use 'read_table'.
Parameters
----------
path : string
Path to the Delta Lake table.
version : string, optional
Specifies the table version (based on Delta's internal transaction version) to read from,
using Delta's time travel feature. This sets Delta's 'versionAsOf' option.
timestamp : string, optional
Specifies the table version (based on timestamp) to read from,
using Delta's time travel feature. This must be a valid date or timestamp string in Spark,
and sets Delta's 'timestampAsOf' option.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options
Additional options that can be passed onto Delta.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_delta
read_table
read_spark_io
read_parquet
Examples
--------
>>> pp.range(1).to_delta('%s/read_delta/foo' % path) # doctest: +SKIP
>>> pp.read_delta('%s/read_delta/foo' % path) # doctest: +SKIP
id
0 0
>>> pp.range(10, 15, num_partitions=1).to_delta('%s/read_delta/foo' % path,
... mode='overwrite') # doctest: +SKIP
>>> pp.read_delta('%s/read_delta/foo' % path) # doctest: +SKIP
id
0 10
1 11
2 12
3 13
4 14
>>> pp.read_delta('%s/read_delta/foo' % path, version=0) # doctest: +SKIP
id
0 0
You can preserve the index in the roundtrip as below.
>>> pp.range(10, 15, num_partitions=1).to_delta(
... '%s/read_delta/bar' % path, index_col="index") # doctest: +SKIP
>>> pp.read_delta('%s/read_delta/bar' % path, index_col="index") # doctest: +SKIP
id
index
0 10
1 11
2 12
3 13
4 14
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if version is not None:
options["versionAsOf"] = version
if timestamp is not None:
options["timestampAsOf"] = timestamp
return read_spark_io(path, format="delta", index_col=index_col, **options)
def read_table(name: str, index_col: Optional[Union[str, List[str]]] = None) -> DataFrame:
"""
Read a Spark table and return a DataFrame.
Parameters
----------
name : string
Table name in Spark.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_table
read_delta
read_parquet
read_spark_io
Examples
--------
>>> pp.range(1).to_table('%s.my_table' % db)
>>> pp.read_table('%s.my_table' % db)
id
0 0
>>> pp.range(1).to_table('%s.my_table' % db, index_col="index")
>>> pp.read_table('%s.my_table' % db, index_col="index") # doctest: +NORMALIZE_WHITESPACE
id
index
0 0
"""
sdf = default_session().read.table(name)
index_spark_columns, index_names = _get_index_map(sdf, index_col)
return DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
)
def read_spark_io(
path: Optional[str] = None,
format: Optional[str] = None,
schema: Union[str, "StructType"] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> DataFrame:
"""Load a DataFrame from a Spark data source.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
schema : string or StructType, optional
Input schema. If none, Spark tries to infer the schema automatically.
The schema can either be a Spark StructType, or a DDL-formatted string like
`col0 INT, col1 DOUBLE`.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
See Also
--------
DataFrame.to_spark_io
DataFrame.read_table
DataFrame.read_delta
DataFrame.read_parquet
Examples
--------
>>> pp.range(1).to_spark_io('%s/read_spark_io/data.parquet' % path)
>>> pp.read_spark_io(
... '%s/read_spark_io/data.parquet' % path, format='parquet', schema='id long')
id
0 0
>>> pp.range(10, 15, num_partitions=1).to_spark_io('%s/read_spark_io/data.json' % path,
... format='json', lineSep='__')
>>> pp.read_spark_io(
... '%s/read_spark_io/data.json' % path, format='json', schema='id long', lineSep='__')
id
0 10
1 11
2 12
3 13
4 14
You can preserve the index in the roundtrip as below.
>>> pp.range(10, 15, num_partitions=1).to_spark_io('%s/read_spark_io/data.orc' % path,
... format='orc', index_col="index")
>>> pp.read_spark_io(
... path=r'%s/read_spark_io/data.orc' % path, format="orc", index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 10
1 11
2 12
3 13
4 14
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
sdf = default_session().read.load(path=path, format=format, schema=schema, **options)
index_spark_columns, index_names = _get_index_map(sdf, index_col)
return DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
)
def read_parquet(path, columns=None, index_col=None, pandas_metadata=False, **options) -> DataFrame:
"""Load a parquet object from the file path, returning a DataFrame.
Parameters
----------
path : string
File path
columns : list, default=None
If not None, only these columns will be read from the file.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
pandas_metadata : bool, default: False
If True, try to respect the metadata if the Parquet file is written from pandas.
options : dict
All other options passed directly into Spark's data source.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_parquet
DataFrame.read_table
DataFrame.read_delta
DataFrame.read_spark_io
Examples
--------
>>> pp.range(1).to_parquet('%s/read_spark_io/data.parquet' % path)
>>> pp.read_parquet('%s/read_spark_io/data.parquet' % path, columns=['id'])
id
0 0
You can preserve the index in the roundtrip as below.
>>> pp.range(1).to_parquet('%s/read_spark_io/data.parquet' % path, index_col="index")
>>> pp.read_parquet('%s/read_spark_io/data.parquet' % path, columns=['id'], index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 0
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if columns is not None:
columns = list(columns)
index_names = None
if index_col is None and pandas_metadata:
if LooseVersion(pyspark.__version__) < LooseVersion("3.0.0"):
raise ValueError("pandas_metadata is not supported with Spark < 3.0.")
# Try to read pandas metadata
@pandas_udf("index_col array<string>, index_names array<string>", PandasUDFType.SCALAR)
def read_index_metadata(pser):
binary = pser.iloc[0]
metadata = pq.ParquetFile(pa.BufferReader(binary)).metadata.metadata
if b"pandas" in metadata:
pandas_metadata = json.loads(metadata[b"pandas"].decode("utf8"))
if all(isinstance(col, str) for col in pandas_metadata["index_columns"]):
index_col = []
index_names = []
for col in pandas_metadata["index_columns"]:
index_col.append(col)
for column in pandas_metadata["columns"]:
if column["field_name"] == col:
index_names.append(column["name"])
break
else:
index_names.append(None)
return pd.DataFrame({"index_col": [index_col], "index_names": [index_names]})
return pd.DataFrame({"index_col": [None], "index_names": [None]})
index_col, index_names = (
default_session()
.read.format("binaryFile")
.load(path)
.limit(1)
.select(read_index_metadata("content").alias("index_metadata"))
.select("index_metadata.*")
.head()
)
kdf = read_spark_io(path=path, format="parquet", options=options, index_col=index_col)
if columns is not None:
new_columns = [c for c in columns if c in kdf.columns]
if len(new_columns) > 0:
kdf = kdf[new_columns]
else:
sdf = default_session().createDataFrame([], schema=StructType())
index_spark_columns, index_names = _get_index_map(sdf, index_col)
kdf = DataFrame(
InternalFrame(
spark_frame=sdf,
index_spark_columns=index_spark_columns,
index_names=index_names,
)
)
if index_names is not None:
kdf.index.names = index_names
return kdf
def read_clipboard(sep=r"\s+", **kwargs) -> DataFrame:
r"""
Read text from clipboard and pass to read_csv. See read_csv for the
full argument list
Parameters
----------
sep : str, default '\s+'
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
See Also
--------
DataFrame.to_clipboard : Write text out to clipboard.
Returns
-------
parsed : DataFrame
"""
return cast(DataFrame, from_pandas(pd.read_clipboard(sep, **kwargs)))
def read_excel(
io,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds
) -> Union[DataFrame, Series, OrderedDict]:
"""
Read an Excel file into a Koalas DataFrame or Series.
Support both `xls` and `xlsx` file extensions from a local filesystem or URL.
Support an option to read a single sheet or a list of sheets.
Parameters
----------
io : str, file descriptor, pathlib.Path, ExcelFile or xlrd.Book
The string could be a URL. The value URL must be available in Spark's DataFrameReader.
.. note::
If the underlying Spark is below 3.0, the parameter as a string is not supported.
You can use `pp.from_pandas(pd.read_excel(...))` as a workaround.
sheet_name : str, int, list, or None, default 0
Strings are used for sheet names. Integers are used in zero-indexed
sheet positions. Lists of strings/integers are used to request
multiple sheets. Specify None to get all sheets.
Available cases:
* Defaults to ``0``: 1st sheet as a `DataFrame`
* ``1``: 2nd sheet as a `DataFrame`
* ``"Sheet1"``: Load sheet with name "Sheet1"
* ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
as a dict of `DataFrame`
* None: All sheets.
header : int, list of int, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None.
index_col : int, list of int, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
usecols : int, str, list-like, or callable default None
Return a subset of the columns.
* If None, then parse all columns.
* If str, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of int, then indicates list of column numbers to be parsed.
* If list of string, then indicates list of column names to be parsed.
* If callable, then evaluate each column name against it and parse the
column if the callable returns ``True``.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd.
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True.
false_values : list, default None
Values to consider as False.
skiprows : list-like
Rows to skip at the beginning (0-indexed).
nrows : int, default None
Number of rows to parse.
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
parse_dates : bool, list-like, or dict, default False
The behavior is as follows:
* bool. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index contains an unparseable date, the entire column or
index will be returned unaltered as an object data type. For non-standard
datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``
Note: A fast-path exists for iso8601-formatted dates.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Koalas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skipfooter : int, default 0
Rows at the end to skip (0-indexed).
convert_float : bool, default True
Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a dict of DataFrames is returned.
See Also
--------
DataFrame.to_excel : Write DataFrame to an Excel file.
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
The file can be read using the file name as string or an open file object:
>>> pp.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
Name Value
0 string1 1
1 string2 2
2 #Comment 3
>>> pp.read_excel(open('tmp.xlsx', 'rb'),
... sheet_name='Sheet3') # doctest: +SKIP
Unnamed: 0 Name Value
0 0 string1 1
1 1 string2 2
2 2 #Comment 3
Index and header can be specified via the `index_col` and `header` arguments
>>> pp.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 #Comment 3
Column types are inferred but can be explicitly specified
>>> pp.read_excel('tmp.xlsx', index_col=0,
... dtype={'Name': str, 'Value': float}) # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 #Comment 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> pp.read_excel('tmp.xlsx', index_col=0,
... na_values=['string1', 'string2']) # doctest: +SKIP
Name Value
0 None 1
1 None 2
2 #Comment 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> pp.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 None NaN
"""
def pd_read_excel(io_or_bin, sn, sq):
return pd.read_excel(
io=BytesIO(io_or_bin) if isinstance(io_or_bin, (bytes, bytearray)) else io_or_bin,
sheet_name=sn,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=sq,
dtype=dtype,
engine=engine,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
keep_default_na=keep_default_na,
verbose=verbose,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds
)
if isinstance(io, str):
if LooseVersion(pyspark.__version__) < LooseVersion("3.0.0"):
raise ValueError(
"The `io` parameter as a string is not supported if the underlying Spark is "
"below 3.0. You can use `pp.from_pandas(pd.read_excel(...))` as a workaround"
)
# 'binaryFile' format is available since Spark 3.0.0.
binaries = default_session().read.format("binaryFile").load(io).select("content").head(2)
io_or_bin = binaries[0][0]
single_file = len(binaries) == 1
else:
io_or_bin = io
single_file = True
pdf_or_psers = pd_read_excel(io_or_bin, sn=sheet_name, sq=squeeze)
if single_file:
if isinstance(pdf_or_psers, dict):
return OrderedDict(
[(sn, from_pandas(pdf_or_pser)) for sn, pdf_or_pser in pdf_or_psers.items()]
)
else:
return cast(Union[DataFrame, Series], from_pandas(pdf_or_psers))
else:
def read_excel_on_spark(pdf_or_pser, sn):
if isinstance(pdf_or_pser, pd.Series):
pdf = pdf_or_pser.to_frame()
else:
pdf = pdf_or_pser
kdf = from_pandas(pdf)
return_schema = force_decimal_precision_scale(
as_nullable_spark_type(kdf._internal.spark_frame.drop(*HIDDEN_COLUMNS).schema)
)
def output_func(pdf):
pdf = pd.concat(
[pd_read_excel(bin, sn=sn, sq=False) for bin in pdf[pdf.columns[0]]]
)
reset_index = pdf.reset_index()
for name, col in reset_index.iteritems():
dt = col.dtype
if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):
continue
reset_index[name] = col.replace({np.nan: None})
pdf = reset_index
# Just positionally map the column names to given schema's.
return pdf.rename(columns=dict(zip(pdf.columns, return_schema.names)))
sdf = (
default_session()
.read.format("binaryFile")
.load(io)
.select("content")
.mapInPandas(lambda iterator: map(output_func, iterator), schema=return_schema)
)
kdf = DataFrame(kdf._internal.with_new_sdf(sdf))
if squeeze and len(kdf.columns) == 1:
return first_series(kdf)
else:
return kdf
if isinstance(pdf_or_psers, dict):
return OrderedDict(
[
(sn, read_excel_on_spark(pdf_or_pser, sn))
for sn, pdf_or_pser in pdf_or_psers.items()
]
)
else:
return read_excel_on_spark(pdf_or_psers, sheet_name)
def read_html(
io,
match=".+",
flavor=None,
header=None,
index_col=None,
skiprows=None,
attrs=None,
parse_dates=False,
thousands=",",
encoding=None,
decimal=".",
converters=None,
na_values=None,
keep_default_na=True,
displayed_only=True,
) -> List[DataFrame]:
r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str or file-like
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
match : str or compiled regular expression, optional
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
string). The default value will return all tables contained on a page.
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
flavor : str or None, container of strings
The parsing engine to use. 'bs4' and 'html5lib' are synonymous with
each other, they are both there for backwards compatibility. The
default of ``None`` tries to use ``lxml`` to parse and if that fails it
falls back on ``bs4`` + ``html5lib``.
header : int or list-like or None, optional
The row (or list of rows for a :class:`~pp.MultiIndex`) to use to
make the columns headers.
index_col : int or list-like or None, optional
The column (or list of columns) to use to create the index.
skiprows : int or list-like or slice or None, optional
0-based. Number of rows to skip after parsing the column integer. If a
sequence of integers or a slice is given, will skip the rows indexed by
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
attrs : dict or None, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
attrs = {'id': 'table'}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<http://www.w3.org/TR/html-markup/global-attributes.html>`__. ::
attrs = {'asdf': 'table'}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
<http://www.w3.org/TR/html-markup/table.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
See :func:`~pp.read_csv` for more details.
thousands : str, optional
Separator to use to parse thousands. Defaults to ``','``.
encoding : str or None, optional
The encoding used to decode the web page. Defaults to ``None``.``None``
preserves the previous encoding behavior, which depends on the
underlying parser library (e.g., the parser library will try to use
the encoding provided by the document).
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European
data).
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
na_values : iterable, default None
Custom NA values
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
displayed_only : bool, default True
Whether elements with "display: none" should be parsed
Returns
-------
dfs : list of DataFrames
See Also
--------
read_csv
DataFrame.to_html
"""
pdfs = pd.read_html(
io=io,
match=match,
flavor=flavor,
header=header,
index_col=index_col,
skiprows=skiprows,
attrs=attrs,
parse_dates=parse_dates,
thousands=thousands,
encoding=encoding,
decimal=decimal,
converters=converters,
na_values=na_values,
keep_default_na=keep_default_na,
displayed_only=displayed_only,
)
return cast(List[DataFrame], [from_pandas(pdf) for pdf in pdfs])
# TODO: add `coerce_float` and 'parse_dates' parameters
def read_sql_table(
table_name, con, schema=None, index_col=None, columns=None, **options
) -> DataFrame:
"""
Read SQL database table into a DataFrame.
Given a table name and a JDBC URI, returns a DataFrame.
Parameters
----------
table_name : str
Name of SQL table in database.
con : str
A JDBC URI could be provided as as str.
.. note:: The URI must be JDBC URI instead of Python's database URI.
schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
columns : list, default None
List of column names to select from SQL table.
options : dict
All other options passed directly into Spark's JDBC data source.
Returns
-------
DataFrame
A SQL table is returned as two-dimensional data structure with labeled
axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Examples
--------
>>> pp.read_sql_table('table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
reader = default_session().read
reader.option("dbtable", table_name)
reader.option("url", con)
if schema is not None:
reader.schema(schema)
reader.options(**options)
sdf = reader.format("jdbc").load()
index_spark_columns, index_names = _get_index_map(sdf, index_col)
kdf = DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
) # type: DataFrame
if columns is not None:
if isinstance(columns, str):
columns = [columns]
kdf = kdf[columns]
return kdf
# TODO: add `coerce_float`, `params`, and 'parse_dates' parameters
def read_sql_query(sql, con, index_col=None, **options) -> DataFrame:
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default index will be used.
.. note:: Some database might hit the issue of Spark: SPARK-27596
Parameters
----------
sql : string SQL query
SQL query to be executed.
con : str
A JDBC URI could be provided as as str.
.. note:: The URI must be JDBC URI instead of Python's database URI.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex).
options : dict
All other options passed directly into Spark's JDBC data source.
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
Examples
--------
>>> pp.read_sql_query('SELECT * FROM table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
reader = default_session().read
reader.option("query", sql)
reader.option("url", con)
reader.options(**options)
sdf = reader.format("jdbc").load()
index_spark_columns, index_names = _get_index_map(sdf, index_col)
return DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
)
# TODO: add `coerce_float`, `params`, and 'parse_dates' parameters
def read_sql(sql, con, index_col=None, columns=None, **options) -> DataFrame:
"""
Read SQL query or database table into a DataFrame.
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (for backward compatibility). It will delegate
to the specific function depending on the provided input. A SQL query
will be routed to ``read_sql_query``, while a database table name will
be routed to ``read_sql_table``. Note that the delegated function might
have more specific notes about their functionality not listed here.
.. note:: Some database might hit the issue of Spark: SPARK-27596
Parameters
----------
sql : string
SQL query to be executed or a table name.
con : str
A JDBC URI could be provided as as str.
.. note:: The URI must be JDBC URI instead of Python's database URI.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex).
columns : list, default: None
List of column names to select from SQL table (only used when reading
a table).
options : dict
All other options passed directly into Spark's JDBC data source.
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql_query : Read SQL query into a DataFrame.
Examples
--------
>>> pp.read_sql('table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
>>> pp.read_sql('SELECT * FROM table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
striped = sql.strip()
if " " not in striped: # TODO: identify the table name or not more precisely.
return read_sql_table(sql, con, index_col=index_col, columns=columns, **options)
else:
return read_sql_query(sql, con, index_col=index_col, **options)
def to_datetime(
arg, errors="raise", format=None, unit=None, infer_datetime_format=False, origin="unix"
):
"""
Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
unit : string, default None
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pp.DataFrame({'year': [2015, 2016],
... 'month': [2, 3],
... 'day': [4, 5]})
>>> pp.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> pp.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pp.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pp.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> import timeit
>>> timeit.timeit(
... lambda: repr(pp.to_datetime(s, infer_datetime_format=True)),
... number = 1) # doctest: +SKIP
0.35832712500000063
>>> timeit.timeit(
... lambda: repr(pp.to_datetime(s, infer_datetime_format=False)),
... number = 1) # doctest: +SKIP
0.8895321660000004
Using a unix epoch time
>>> pp.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pp.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
Using a non-unix epoch origin
>>> pp.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01'))
DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'], dtype='datetime64[ns]', freq=None)
"""
def pandas_to_datetime(pser_or_pdf) -> Series[np.datetime64]:
if isinstance(pser_or_pdf, pd.DataFrame):
pser_or_pdf = pser_or_pdf[["year", "month", "day"]]
return pd.to_datetime(
pser_or_pdf,
errors=errors,
format=format,
unit=unit,
infer_datetime_format=infer_datetime_format,
origin=origin,
)
if isinstance(arg, Series):
return arg.koalas.transform_batch(pandas_to_datetime)
if isinstance(arg, DataFrame):
kdf = arg[["year", "month", "day"]]
return kdf.koalas.transform_batch(pandas_to_datetime)
return pd.to_datetime(
arg,
errors=errors,
format=format,
unit=unit,
infer_datetime_format=infer_datetime_format,
origin=origin,
)
def date_range(
start=None,
end=None,
periods=None,
freq=None,
tz=None,
normalize=False,
name=None,
closed=None,
**kwargs
) -> DatetimeIndex:
"""
Return a fixed frequency DatetimeIndex.
Parameters
----------
start : str or datetime-like, optional
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
periods : int, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'.
tz : str or tzinfo, optional
Time zone name for returning localized DatetimeIndex, for example
'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
timezone-naive.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None, the default).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
rng : DatetimeIndex
See Also
--------
DatetimeIndex : An immutable container for datetimes.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``DatetimeIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
**Specifying the values**
The next four examples generate the same `DatetimeIndex`, but vary
the combination of `start`, `end` and `periods`.
Specify `start` and `end`, with the default daily frequency.
>>> pp.date_range(start='1/1/2018', end='1/08/2018') # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq=None)
Specify `start` and `periods`, the number of periods (days).
>>> pp.date_range(start='1/1/2018', periods=8) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq=None)
Specify `end` and `periods`, the number of periods (days).
>>> pp.date_range(end='1/1/2018', periods=8) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
'2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq=None)
Specify `start`, `end`, and `periods`; the frequency is generated
automatically (linearly spaced).
>>> pp.date_range(
... start='2018-04-24', end='2018-04-27', periods=3
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
'2018-04-27 00:00:00'],
dtype='datetime64[ns]', freq=None)
**Other Parameters**
Changed the `freq` (frequency) to ``'M'`` (month end frequency).
>>> pp.date_range(start='1/1/2018', periods=5, freq='M') # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
'2018-05-31'],
dtype='datetime64[ns]', freq=None)
Multiples are allowed
>>> pp.date_range(start='1/1/2018', periods=5, freq='3M') # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq=None)
`freq` can also be specified as an Offset object.
>>> pp.date_range(
... start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3)
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq=None)
`closed` controls whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
>>> pp.date_range(
... start='2017-01-01', end='2017-01-04', closed=None
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq=None)
Use ``closed='left'`` to exclude `end` if it falls on the boundary.
>>> pp.date_range(
... start='2017-01-01', end='2017-01-04', closed='left'
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'], dtype='datetime64[ns]', freq=None)
Use ``closed='right'`` to exclude `start` if it falls on the boundary.
>>> pp.date_range(
... start='2017-01-01', end='2017-01-04', closed='right'
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq=None)
"""
assert freq not in ["N", "ns"], "nanoseconds is not supported"
assert tz is None, "Localized DatetimeIndex is not supported"
return cast(
DatetimeIndex,
pp.from_pandas(
pd.date_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
name=name,
closed=closed,
**kwargs
)
),
)
def get_dummies(
data,
prefix=None,
prefix_sep="_",
dummy_na=False,
columns=None,
sparse=False,
drop_first=False,
dtype=None,
) -> DataFrame:
"""
Convert categorical variable into dummy/indicator variables, also
known as one hot encoding.
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy-encoded columns should be be backed by
a :class:`SparseArray` (True) or a regular NumPy array (False).
In Koalas, this value must be "False".
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
Returns
-------
dummies : DataFrame
See Also
--------
Series.str.get_dummies
Examples
--------
>>> s = pp.Series(list('abca'))
>>> pp.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> df = pp.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]},
... columns=['A', 'B', 'C'])
>>> pp.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pp.get_dummies(pp.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pp.get_dummies(pp.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> pp.get_dummies(pp.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
if sparse is not False:
raise NotImplementedError("get_dummies currently does not support sparse")
if columns is not None:
if not is_list_like(columns):
raise TypeError("Input must be a list-like for parameter `columns`")
if dtype is None:
dtype = "byte"
if isinstance(data, Series):
if prefix is not None:
prefix = [str(prefix)]
kdf = data.to_frame()
column_labels = kdf._internal.column_labels
remaining_columns = []
else:
if isinstance(prefix, str):
raise NotImplementedError(
"get_dummies currently does not support prefix as string types"
)
kdf = data.copy()
if columns is None:
column_labels = [
label
for label in kdf._internal.column_labels
if isinstance(
kdf._internal.spark_type_for(label), _get_dummies_default_accept_types
)
]
else:
if is_name_like_tuple(columns):
column_labels = [
label
for label in kdf._internal.column_labels
if label[: len(columns)] == columns
]
if len(column_labels) == 0:
raise KeyError(name_like_string(columns))
if prefix is None:
prefix = [
str(label[len(columns):])
if len(label) > len(columns) + 1
else label[len(columns)]
if len(label) == len(columns) + 1
else ""
for label in column_labels
]
elif any(isinstance(col, tuple) for col in columns) and any(
not is_name_like_tuple(col) for col in columns
):
raise ValueError(
"Expected tuple, got {}".format(
type(set(col for col in columns if not is_name_like_tuple(col)).pop())
)
)
else:
column_labels = [
label
for key in columns
for label in kdf._internal.column_labels
if label == key or label[0] == key
]
if len(column_labels) == 0:
if columns is None:
return kdf
raise KeyError("{} not in index".format(columns))
if prefix is None:
prefix = [str(label) if len(label) > 1 else label[0] for label in column_labels]
column_labels_set = set(column_labels)
remaining_columns = [
(
kdf[label]
if kdf._internal.column_labels_level == 1
else kdf[label].rename(name_like_string(label))
)
for label in kdf._internal.column_labels
if label not in column_labels_set
]
if any(
not isinstance(kdf._internal.spark_type_for(label), _get_dummies_acceptable_types)
for label in column_labels
):
raise NotImplementedError(
"get_dummies currently only accept {} values".format(
", ".join([t.typeName() for t in _get_dummies_acceptable_types])
)
)
if prefix is not None and len(column_labels) != len(prefix):
raise ValueError(
"Length of 'prefix' ({}) did not match the length of "
"the columns being encoded ({}).".format(len(prefix), len(column_labels))
)
elif isinstance(prefix, dict):
prefix = [prefix[column_label[0]] for column_label in column_labels]
all_values = _reduce_spark_multi(
kdf._internal.spark_frame,
[F.collect_set(kdf._internal.spark_column_for(label)) for label in column_labels],
)
for i, label in enumerate(column_labels):
values = all_values[i]
if isinstance(values, np.ndarray):
values = values.tolist()
values = sorted(values)
if drop_first:
values = values[1:]
def column_name(value):
if prefix is None or prefix[i] == "":
return value
else:
return "{}{}{}".format(prefix[i], prefix_sep, value)
for value in values:
remaining_columns.append(
(kdf[label].notnull() & (kdf[label] == value))
.astype(dtype)
.rename(column_name(value))
)
if dummy_na:
remaining_columns.append(kdf[label].isnull().astype(dtype).rename(column_name(np.nan)))
return kdf[remaining_columns]
# TODO: there are many parameters to implement and support. See pandas's pd.concat.
def concat(objs, axis=0, join="outer", ignore_index=False, sort=False) -> Union[Series, DataFrame]:
"""
Concatenate Koalas objects along a particular axis with optional set logic
along the other axes.
Parameters
----------
objs : a sequence of Series or DataFrame
Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
sort : bool, default False
Sort non-concatenation axis if it is not already aligned.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
Combine two ``Series``.
>>> s1 = pp.Series(['a', 'b'])
>>> s2 = pp.Series(['c', 'd'])
>>> pp.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pp.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pp.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pp.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pp.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` and ``Series`` objects with different columns.
>>> pp.concat([df2, s1])
letter number 0
0 c 3.0 None
1 d 4.0 None
0 None NaN a
1 None NaN b
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``None`` values.
>>> df3 = pp.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pp.concat([df1, df3])
letter number animal
0 a 1 None
1 b 2 None
0 c 3 cat
1 d 4 dog
Sort the columns.
>>> pp.concat([df1, df3], sort=True)
animal letter number
0 None a 1
1 None b 2
0 cat c 3
1 dog d 4
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pp.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
>>> df4 = pp.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
Combine with column axis.
>>> pp.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
>>> reset_option("compute.ops_on_diff_frames")
"""
if isinstance(objs, (DataFrame, IndexOpsMixin)) or not isinstance(
objs, Iterable
): # TODO: support dict
raise TypeError(
"first argument must be an iterable of Koalas "
"objects, you passed an object of type "
'"{name}"'.format(name=type(objs).__name__)
)
if len(cast(Sized, objs)) == 0:
raise ValueError("No objects to concatenate")
objs = list(filter(lambda obj: obj is not None, objs))
if len(objs) == 0:
raise ValueError("All objects passed were None")
for obj in objs:
if not isinstance(obj, (Series, DataFrame)):
raise TypeError(
"cannot concatenate object of type "
"'{name}"
"; only pp.Series "
"and pp.DataFrame are valid".format(name=type(objs).__name__)
)
if join not in ["inner", "outer"]:
raise ValueError("Only can inner (intersect) or outer (union) join the other axis.")
axis = validate_axis(axis)
if axis == 1:
kdfs = [obj.to_frame() if isinstance(obj, Series) else obj for obj in objs]
level = min(kdf._internal.column_labels_level for kdf in kdfs)
kdfs = [
DataFrame._index_normalized_frame(level, kdf)
if kdf._internal.column_labels_level > level
else kdf
for kdf in kdfs
]
concat_kdf = kdfs[0]
column_labels = concat_kdf._internal.column_labels.copy()
kdfs_not_same_anchor = []
for kdf in kdfs[1:]:
duplicated = [label for label in kdf._internal.column_labels if label in column_labels]
if len(duplicated) > 0:
pretty_names = [name_like_string(label) for label in duplicated]
raise ValueError(
"Labels have to be unique; however, got duplicated labels %s." % pretty_names
)
column_labels.extend(kdf._internal.column_labels)
if same_anchor(concat_kdf, kdf):
concat_kdf = DataFrame(
concat_kdf._internal.with_new_columns(
[
concat_kdf._kser_for(label)
for label in concat_kdf._internal.column_labels
]
+ [kdf._kser_for(label) for label in kdf._internal.column_labels]
)
)
else:
kdfs_not_same_anchor.append(kdf)
if len(kdfs_not_same_anchor) > 0:
def resolve_func(kdf, this_column_labels, that_column_labels):
raise AssertionError("This should not happen.")
for kdf in kdfs_not_same_anchor:
if join == "inner":
concat_kdf = align_diff_frames(
resolve_func, concat_kdf, kdf, fillna=False, how="inner",
)
elif join == "outer":
concat_kdf = align_diff_frames(
resolve_func, concat_kdf, kdf, fillna=False, how="full",
)
concat_kdf = concat_kdf[column_labels]
if ignore_index:
concat_kdf.columns = list(map(str, _range(len(concat_kdf.columns))))
if sort:
concat_kdf = concat_kdf.sort_index()
return concat_kdf
# Series, Series ...
# We should return Series if objects are all Series.
should_return_series = all(map(lambda obj: isinstance(obj, Series), objs))
# DataFrame, Series ... & Series, Series ...
# In this case, we should return DataFrame.
new_objs = []
num_series = 0
series_names = set()
for obj in objs:
if isinstance(obj, Series):
num_series += 1
series_names.add(obj.name)
obj = obj.to_frame(DEFAULT_SERIES_NAME)
new_objs.append(obj)
objs = new_objs
column_labels_levels = set(obj._internal.column_labels_level for obj in objs)
if len(column_labels_levels) != 1:
raise ValueError("MultiIndex columns should have the same levels")
# DataFrame, DataFrame, ...
# All Series are converted into DataFrame and then compute concat.
if not ignore_index:
indices_of_kdfs = [kdf.index for kdf in objs]
index_of_first_kdf = indices_of_kdfs[0]
for index_of_kdf in indices_of_kdfs:
if index_of_first_kdf.names != index_of_kdf.names:
raise ValueError(
"Index type and names should be same in the objects to concatenate. "
"You passed different indices "
"{index_of_first_kdf} and {index_of_kdf}".format(
index_of_first_kdf=index_of_first_kdf.names, index_of_kdf=index_of_kdf.names
)
)
column_labels_of_kdfs = [kdf._internal.column_labels for kdf in objs]
if ignore_index:
index_names_of_kdfs = [[] for _ in objs] # type: List
else:
index_names_of_kdfs = [kdf._internal.index_names for kdf in objs]
if all(name == index_names_of_kdfs[0] for name in index_names_of_kdfs) and all(
idx == column_labels_of_kdfs[0] for idx in column_labels_of_kdfs
):
# If all columns are in the same order and values, use it.
kdfs = objs
else:
if join == "inner":
interested_columns = set.intersection(*map(set, column_labels_of_kdfs))
# Keep the column order with its firsts DataFrame.
merged_columns = [
label for label in column_labels_of_kdfs[0] if label in interested_columns
]
# When multi-index column, although pandas is flaky if `join="inner" and sort=False`,
# always sort to follow the `join="outer"` case behavior.
if (len(merged_columns) > 0 and len(merged_columns[0]) > 1) or sort:
# FIXME: better ordering
merged_columns = sorted(merged_columns, key=name_like_string)
kdfs = [kdf[merged_columns] for kdf in objs]
elif join == "outer":
merged_columns = []
for labels in column_labels_of_kdfs:
merged_columns.extend(label for label in labels if label not in merged_columns)
assert len(merged_columns) > 0
if LooseVersion(pd.__version__) < LooseVersion("0.24"):
# Always sort when multi-index columns, and if there are Series, never sort.
sort = len(merged_columns[0]) > 1 or (num_series == 0 and sort)
else:
# Always sort when multi-index columns or there are more than two Series,
# and if there is only one Series, never sort.
sort = len(merged_columns[0]) > 1 or num_series > 1 or (num_series != 1 and sort)
if sort:
# FIXME: better ordering
merged_columns = sorted(merged_columns, key=name_like_string)
kdfs = []
for kdf in objs:
columns_to_add = list(set(merged_columns) - set(kdf._internal.column_labels))
# TODO: NaN and None difference for missing values. pandas seems filling NaN.
sdf = kdf._internal.resolved_copy.spark_frame
for label in columns_to_add:
sdf = sdf.withColumn(name_like_string(label), F.lit(None))
data_columns = kdf._internal.data_spark_column_names + [
name_like_string(label) for label in columns_to_add
]
kdf = DataFrame(
kdf._internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in kdf._internal.index_spark_column_names
],
column_labels=(kdf._internal.column_labels + columns_to_add),
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_dtypes=(kdf._internal.data_dtypes + ([None] * len(columns_to_add))),
)
)
kdfs.append(kdf[merged_columns])
if ignore_index:
sdfs = [kdf._internal.spark_frame.select(kdf._internal.data_spark_columns) for kdf in kdfs]
else:
sdfs = [
kdf._internal.spark_frame.select(
kdf._internal.index_spark_columns + kdf._internal.data_spark_columns
)
for kdf in kdfs
]
concatenated = reduce(lambda x, y: x.union(y), sdfs)
if ignore_index:
index_spark_column_names = []
index_names = []
index_dtypes = []
else:
index_spark_column_names = kdfs[0]._internal.index_spark_column_names
index_names = kdfs[0]._internal.index_names
index_dtypes = kdfs[0]._internal.index_dtypes
result_kdf = DataFrame(
kdfs[0]._internal.copy(
spark_frame=concatenated,
index_spark_columns=[scol_for(concatenated, col) for col in index_spark_column_names],
index_names=index_names,
index_dtypes=index_dtypes,
data_spark_columns=[
scol_for(concatenated, col) for col in kdfs[0]._internal.data_spark_column_names
],
data_dtypes=None, # TODO: dtypes?
)
) # type: DataFrame
if should_return_series:
# If all input were Series, we should return Series.
if len(series_names) == 1:
name = series_names.pop()
else:
name = None
return first_series(result_kdf).rename(name)
else:
return result_kdf
def melt(frame, id_vars=None, value_vars=None, var_name=None, value_name="value") -> DataFrame:
return DataFrame.melt(frame, id_vars, value_vars, var_name, value_name)
melt.__doc__ = DataFrame.melt.__doc__
def isna(obj):
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
Series.isna : Detect missing values in a Series.
Series.isnull : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
DataFrame.isnull : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Index.isnull : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pp.isna('dog')
False
>>> pp.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pp.isna(array)
array([[False, True, False],
[False, False, True]])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pp.DataFrame({'a': ['ant', 'bee', 'cat'], 'b': ['dog', None, 'fly']})
>>> df
a b
0 ant dog
1 bee None
2 cat fly
>>> pp.isna(df)
a b
0 False False
1 False True
2 False False
>>> pp.isnull(df.b)
0 False
1 True
2 False
Name: b, dtype: bool
"""
# TODO: Add back:
# notnull : Boolean inverse of pandas.isnull.
# into the See Also in the docstring. It does not find the method in the latest numpydoc.
if isinstance(obj, (DataFrame, Series)):
return obj.isnull()
else:
return pd.isnull(obj)
isnull = isna
def notna(obj):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. NA values, such as None or
:attr:`numpy.NaN`, get mapped to False values.
Returns
-------
bool or array-like of bool
Mask of bool values for each element that
indicates whether an element is not an NA value.
See Also
--------
isna : Detect missing values for an array-like object.
Series.notna : Boolean inverse of Series.isna.
DataFrame.notnull : Boolean inverse of DataFrame.isnull.
Index.notna : Boolean inverse of Index.isna.
Index.notnull : Boolean inverse of Index.isnull.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pp.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notnull()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pp.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> pp.notna(ser)
0 True
1 True
2 False
dtype: bool
>>> pp.notna(ser.index)
True
"""
# TODO: Add back:
# Series.notnull :Boolean inverse of Series.isnull.
# DataFrame.notna :Boolean inverse of DataFrame.isna.
# into the See Also in the docstring. It does not find the method in the latest numpydoc.
if isinstance(obj, (DataFrame, Series)):
return obj.notna()
else:
return pd.notna(obj)
notnull = notna
def merge(
obj,
right: "DataFrame",
how: str = "inner",
on: Union[Any, List[Any], Tuple, List[Tuple]] = None,
left_on: Union[Any, List[Any], Tuple, List[Tuple]] = None,
right_on: Union[Any, List[Any], Tuple, List[Tuple]] = None,
left_index: bool = False,
right_index: bool = False,
suffixes: Tuple[str, str] = ("_x", "_y"),
) -> "DataFrame":
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{'left', 'right', 'outer', 'inner'}, default 'inner'
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> df1 = pp.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = pp.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = pp.merge(df1, df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS
lkey value_x rkey value_y
...bar 2 bar 6
...baz 3 baz 7
...foo 1 foo 5
...foo 1 foo 8
...foo 5 foo 5
...foo 5 foo 8
>>> left_kdf = pp.DataFrame({'A': [1, 2]})
>>> right_kdf = pp.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> pp.merge(left_kdf, right_kdf, left_index=True, right_index=True).sort_index()
A B
1 2 x
>>> pp.merge(left_kdf, right_kdf, left_index=True, right_index=True, how='left').sort_index()
A B
0 1 None
1 2 x
>>> pp.merge(left_kdf, right_kdf, left_index=True, right_index=True, how='right').sort_index()
A B
1 2.0 x
2 NaN y
>>> pp.merge(left_kdf, right_kdf, left_index=True, right_index=True, how='outer').sort_index()
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
return obj.merge(
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
suffixes=suffixes,
)
def to_numeric(arg):
"""
Convert argument to a numeric type.
Parameters
----------
arg : scalar, list, tuple, 1-d array, or Series
Returns
-------
ret : numeric if parsing succeeded.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
>>> kser = pp.Series(['1.0', '2', '-3'])
>>> kser
0 1.0
1 2
2 -3
dtype: object
>>> pp.to_numeric(kser)
0 1.0
1 2.0
2 -3.0
dtype: float32
If given Series contains invalid value to cast float, just cast it to `np.nan`
>>> kser = pp.Series(['apple', '1.0', '2', '-3'])
>>> kser
0 apple
1 1.0
2 2
3 -3
dtype: object
>>> pp.to_numeric(kser)
0 NaN
1 1.0
2 2.0
3 -3.0
dtype: float32
Also support for list, tuple, np.array, or a scalar
>>> pp.to_numeric(['1.0', '2', '-3'])
array([ 1., 2., -3.])
>>> pp.to_numeric(('1.0', '2', '-3'))
array([ 1., 2., -3.])
>>> pp.to_numeric(np.array(['1.0', '2', '-3']))
array([ 1., 2., -3.])
>>> pp.to_numeric('1.0')
1.0
"""
if isinstance(arg, Series):
return arg._with_new_scol(arg.spark.column.cast("float"))
else:
return pd.to_numeric(arg)
def broadcast(obj) -> DataFrame:
"""
Marks a DataFrame as small enough for use in broadcast joins.
Parameters
----------
obj : DataFrame
Returns
-------
ret : DataFrame with broadcast hint.
See Also
--------
DataFrame.merge : Merge DataFrame objects with a database-style join.
DataFrame.join : Join columns of another DataFrame.
DataFrame.update : Modify in place using non-NA values from another DataFrame.
DataFrame.hint : Specifies some hint on the current DataFrame.
Examples
--------
>>> df1 = pp.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value']).set_index('lkey')
>>> df2 = pp.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value']).set_index('rkey')
>>> merged = df1.merge(pp.broadcast(df2), left_index=True, right_index=True)
>>> merged.spark.explain() # doctest: +ELLIPSIS
== Physical Plan ==
...
...BroadcastHashJoin...
...
"""
if not isinstance(obj, DataFrame):
raise ValueError("Invalid type : expected DataFrame got {}".format(type(obj).__name__))
return DataFrame(
obj._internal.with_new_sdf(F.broadcast(obj._internal.resolved_copy.spark_frame))
)
def read_orc(
path,
columns: Optional[List[str]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> "DataFrame":
"""
Load an ORC object from the file path, returning a DataFrame.
Parameters
----------
path : str
The path string storing the ORC file to be read.
columns : list, default None
If not None, only these columns will be read from the file.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
Returns
-------
DataFrame
Examples
--------
>>> pp.range(1).to_orc('%s/read_spark_io/data.orc' % path)
>>> pp.read_orc('%s/read_spark_io/data.orc' % path, columns=['id'])
id
0 0
You can preserve the index in the roundtrip as below.
>>> pp.range(1).to_orc('%s/read_spark_io/data.orc' % path, index_col="index")
>>> pp.read_orc('%s/read_spark_io/data.orc' % path, columns=['id'], index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 0
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
kdf = read_spark_io(path, format="orc", index_col=index_col, **options)
if columns is not None:
kdf_columns = kdf.columns
new_columns = list()
for column in list(columns):
if column in kdf_columns:
new_columns.append(column)
else:
raise ValueError("Unknown column name '{}'".format(column))
kdf = kdf[new_columns]
return kdf
def _get_index_map(
sdf: spark.DataFrame, index_col: Optional[Union[str, List[str]]] = None
) -> Tuple[Optional[List[spark.Column]], Optional[List[Tuple]]]:
if index_col is not None:
if isinstance(index_col, str):
index_col = [index_col]
sdf_columns = set(sdf.columns)
for col in index_col:
if col not in sdf_columns:
raise KeyError(col)
index_spark_columns = [
scol_for(sdf, col) for col in index_col
] # type: Optional[List[spark.Column]]
index_names = [(col,) for col in index_col] # type: Optional[List[Tuple]]
else:
index_spark_columns = None
index_names = None
return index_spark_columns, index_names
_get_dummies_default_accept_types = (DecimalType, StringType, DateType)
_get_dummies_acceptable_types = _get_dummies_default_accept_types + (
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType,
BooleanType,
TimestampType,
)
def _test():
import os
import doctest
import shutil
import sys
import tempfile
import uuid
from pyspark.sql import SparkSession
import pyspark.pandas.namespace
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.namespace.__dict__.copy()
globs["pp"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.namespace tests")
.getOrCreate()
)
db_name = "db%s" % str(uuid.uuid4()).replace("-", "")
spark.sql("CREATE DATABASE %s" % db_name)
globs["db"] = db_name
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.namespace,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db_name)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
the-stack_0_10848 | """
Read a SAS XPort format file into a Pandas DataFrame.
Based on code from Jack Cushman (github.com/jcushman/xport).
The file format is defined here:
https://support.sas.com/techsup/technote/ts140.pdf
"""
from collections import abc
from datetime import datetime
import struct
import warnings
import numpy as np
from pandas.util._decorators import Appender
import pandas as pd
from pandas.io.common import get_filepath_or_buffer
from pandas.io.sas.sasreader import ReaderBase
_correct_line1 = (
"HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!"
"000000000000000000000000000000 "
)
_correct_header1 = (
"HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!000000000000000001600000000"
)
_correct_header2 = (
"HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!"
"000000000000000000000000000000 "
)
_correct_obs_header = (
"HEADER RECORD*******OBS HEADER RECORD!!!!!!!"
"000000000000000000000000000000 "
)
_fieldkeys = [
"ntype",
"nhfun",
"field_length",
"nvar0",
"name",
"label",
"nform",
"nfl",
"num_decimals",
"nfj",
"nfill",
"niform",
"nifl",
"nifd",
"npos",
"_",
]
_base_params_doc = """\
Parameters
----------
filepath_or_buffer : string or file-like object
Path to SAS file or object implementing binary read method."""
_params2_doc = """\
index : identifier of index column
Identifier of column that should be used as index of the DataFrame.
encoding : string
Encoding for text data.
chunksize : int
Read file `chunksize` lines at a time, returns iterator."""
_format_params_doc = """\
format : string
File format, only `xport` is currently supported."""
_iterator_doc = """\
iterator : boolean, default False
Return XportReader object for reading file incrementally."""
_read_sas_doc = f"""Read a SAS file into a DataFrame.
{_base_params_doc}
{_format_params_doc}
{_params2_doc}
{_iterator_doc}
Returns
-------
DataFrame or XportReader
Examples
--------
Read a SAS Xport file:
>>> df = pd.read_sas('filename.XPT')
Read a Xport file in 10,000 line chunks:
>>> itr = pd.read_sas('filename.XPT', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
"""
_xport_reader_doc = f"""\
Class for reading SAS Xport files.
{_base_params_doc}
{_params2_doc}
Attributes
----------
member_info : list
Contains information about the file
fields : list
Contains information about the variables in the file
"""
_read_method_doc = """\
Read observations from SAS Xport file, returning as data frame.
Parameters
----------
nrows : int
Number of rows to read from data file; if None, read whole
file.
Returns
-------
A DataFrame.
"""
def _parse_date(datestr: str) -> datetime:
""" Given a date in xport format, return Python date. """
try:
# e.g. "16FEB11:10:07:55"
return datetime.strptime(datestr, "%d%b%y:%H:%M:%S")
except ValueError:
return pd.NaT
def _split_line(s: str, parts):
"""
Parameters
----------
s: str
Fixed-length string to split
parts: list of (name, length) pairs
Used to break up string, name '_' will be filtered from output.
Returns
-------
Dict of name:contents of string at given location.
"""
out = {}
start = 0
for name, length in parts:
out[name] = s[start : start + length].strip()
start += length
del out["_"]
return out
def _handle_truncated_float_vec(vec, nbytes):
# This feature is not well documented, but some SAS XPORT files
# have 2-7 byte "truncated" floats. To read these truncated
# floats, pad them with zeros on the right to make 8 byte floats.
#
# References:
# https://github.com/jcushman/xport/pull/3
# The R "foreign" library
if nbytes != 8:
vec1 = np.zeros(len(vec), np.dtype("S8"))
dtype = np.dtype(f"S{nbytes},S{8 - nbytes}")
vec2 = vec1.view(dtype=dtype)
vec2["f0"] = vec
return vec2
return vec
def _parse_float_vec(vec):
"""
Parse a vector of float values representing IBM 8 byte floats into
native 8 byte floats.
"""
dtype = np.dtype(">u4,>u4")
vec1 = vec.view(dtype=dtype)
xport1 = vec1["f0"]
xport2 = vec1["f1"]
# Start by setting first half of ieee number to first half of IBM
# number sans exponent
ieee1 = xport1 & 0x00FFFFFF
# The fraction bit to the left of the binary point in the ieee
# format was set and the number was shifted 0, 1, 2, or 3
# places. This will tell us how to adjust the ibm exponent to be a
# power of 2 ieee exponent and how to shift the fraction bits to
# restore the correct magnitude.
shift = np.zeros(len(vec), dtype=np.uint8)
shift[np.where(xport1 & 0x00200000)] = 1
shift[np.where(xport1 & 0x00400000)] = 2
shift[np.where(xport1 & 0x00800000)] = 3
# shift the ieee number down the correct number of places then
# set the second half of the ieee number to be the second half
# of the ibm number shifted appropriately, ored with the bits
# from the first half that would have been shifted in if we
# could shift a double. All we are worried about are the low
# order 3 bits of the first half since we're only shifting by
# 1, 2, or 3.
ieee1 >>= shift
ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))
# clear the 1 bit to the left of the binary point
ieee1 &= 0xFFEFFFFF
# set the exponent of the ieee number to be the actual exponent
# plus the shift count + 1023. Or this into the first half of the
# ieee number. The ibm exponent is excess 64 but is adjusted by 65
# since during conversion to ibm format the exponent is
# incremented by 1 and the fraction bits left 4 positions to the
# right of the radix point. (had to add >> 24 because C treats &
# 0x7f as 0x7f000000 and Python doesn't)
ieee1 |= ((((((xport1 >> 24) & 0x7F) - 65) << 2) + shift + 1023) << 20) | (
xport1 & 0x80000000
)
ieee = np.empty((len(ieee1),), dtype=">u4,>u4")
ieee["f0"] = ieee1
ieee["f1"] = ieee2
ieee = ieee.view(dtype=">f8")
ieee = ieee.astype("f8")
return ieee
class XportReader(ReaderBase, abc.Iterator):
__doc__ = _xport_reader_doc
def __init__(
self, filepath_or_buffer, index=None, encoding="ISO-8859-1", chunksize=None
):
self._encoding = encoding
self._lines_read = 0
self._index = index
self._chunksize = chunksize
if isinstance(filepath_or_buffer, str):
filepath_or_buffer = get_filepath_or_buffer(
filepath_or_buffer, encoding=encoding
).filepath_or_buffer
if isinstance(filepath_or_buffer, (str, bytes)):
self.filepath_or_buffer = open(filepath_or_buffer, "rb")
else:
# Since xport files include non-text byte sequences, xport files
# should already be opened in binary mode in Python 3.
self.filepath_or_buffer = filepath_or_buffer
self._read_header()
def close(self):
self.filepath_or_buffer.close()
def _get_row(self):
return self.filepath_or_buffer.read(80).decode()
def _read_header(self):
self.filepath_or_buffer.seek(0)
# read file header
line1 = self._get_row()
if line1 != _correct_line1:
self.close()
raise ValueError("Header record is not an XPORT file.")
line2 = self._get_row()
fif = [["prefix", 24], ["version", 8], ["OS", 8], ["_", 24], ["created", 16]]
file_info = _split_line(line2, fif)
if file_info["prefix"] != "SAS SAS SASLIB":
self.close()
raise ValueError("Header record has invalid prefix.")
file_info["created"] = _parse_date(file_info["created"])
self.file_info = file_info
line3 = self._get_row()
file_info["modified"] = _parse_date(line3[:16])
# read member header
header1 = self._get_row()
header2 = self._get_row()
headflag1 = header1.startswith(_correct_header1)
headflag2 = header2 == _correct_header2
if not (headflag1 and headflag2):
self.close()
raise ValueError("Member header not found")
# usually 140, could be 135
fieldnamelength = int(header1[-5:-2])
# member info
mem = [
["prefix", 8],
["set_name", 8],
["sasdata", 8],
["version", 8],
["OS", 8],
["_", 24],
["created", 16],
]
member_info = _split_line(self._get_row(), mem)
mem = [["modified", 16], ["_", 16], ["label", 40], ["type", 8]]
member_info.update(_split_line(self._get_row(), mem))
member_info["modified"] = _parse_date(member_info["modified"])
member_info["created"] = _parse_date(member_info["created"])
self.member_info = member_info
# read field names
types = {1: "numeric", 2: "char"}
fieldcount = int(self._get_row()[54:58])
datalength = fieldnamelength * fieldcount
# round up to nearest 80
if datalength % 80:
datalength += 80 - datalength % 80
fielddata = self.filepath_or_buffer.read(datalength)
fields = []
obs_length = 0
while len(fielddata) >= fieldnamelength:
# pull data for one field
field, fielddata = (
fielddata[:fieldnamelength],
fielddata[fieldnamelength:],
)
# rest at end gets ignored, so if field is short, pad out
# to match struct pattern below
field = field.ljust(140)
fieldstruct = struct.unpack(">hhhh8s40s8shhh2s8shhl52s", field)
field = dict(zip(_fieldkeys, fieldstruct))
del field["_"]
field["ntype"] = types[field["ntype"]]
fl = field["field_length"]
if field["ntype"] == "numeric" and ((fl < 2) or (fl > 8)):
self.close()
msg = f"Floating field width {fl} is not between 2 and 8."
raise TypeError(msg)
for k, v in field.items():
try:
field[k] = v.strip()
except AttributeError:
pass
obs_length += field["field_length"]
fields += [field]
header = self._get_row()
if not header == _correct_obs_header:
self.close()
raise ValueError("Observation header not found.")
self.fields = fields
self.record_length = obs_length
self.record_start = self.filepath_or_buffer.tell()
self.nobs = self._record_count()
self.columns = [x["name"].decode() for x in self.fields]
# Setup the dtype.
dtypel = [
("s" + str(i), "S" + str(field["field_length"]))
for i, field in enumerate(self.fields)
]
dtype = np.dtype(dtypel)
self._dtype = dtype
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def _record_count(self) -> int:
"""
Get number of records in file.
This is maybe suboptimal because we have to seek to the end of
the file.
Side effect: returns file position to record_start.
"""
self.filepath_or_buffer.seek(0, 2)
total_records_length = self.filepath_or_buffer.tell() - self.record_start
if total_records_length % 80 != 0:
warnings.warn("xport file may be corrupted")
if self.record_length > 80:
self.filepath_or_buffer.seek(self.record_start)
return total_records_length // self.record_length
self.filepath_or_buffer.seek(-80, 2)
last_card = self.filepath_or_buffer.read(80)
last_card = np.frombuffer(last_card, dtype=np.uint64)
# 8 byte blank
ix = np.flatnonzero(last_card == 2314885530818453536)
if len(ix) == 0:
tail_pad = 0
else:
tail_pad = 8 * len(ix)
self.filepath_or_buffer.seek(self.record_start)
return (total_records_length - tail_pad) // self.record_length
def get_chunk(self, size=None):
"""
Reads lines from Xport file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
def _missing_double(self, vec):
v = vec.view(dtype="u1,u1,u2,u4")
miss = (v["f1"] == 0) & (v["f2"] == 0) & (v["f3"] == 0)
miss1 = (
((v["f0"] >= 0x41) & (v["f0"] <= 0x5A))
| (v["f0"] == 0x5F)
| (v["f0"] == 0x2E)
)
miss &= miss1
return miss
@Appender(_read_method_doc)
def read(self, nrows=None):
if nrows is None:
nrows = self.nobs
read_lines = min(nrows, self.nobs - self._lines_read)
read_len = read_lines * self.record_length
if read_len <= 0:
self.close()
raise StopIteration
raw = self.filepath_or_buffer.read(read_len)
data = np.frombuffer(raw, dtype=self._dtype, count=read_lines)
df = pd.DataFrame(index=range(read_lines))
for j, x in enumerate(self.columns):
vec = data["s" + str(j)]
ntype = self.fields[j]["ntype"]
if ntype == "numeric":
vec = _handle_truncated_float_vec(vec, self.fields[j]["field_length"])
miss = self._missing_double(vec)
v = _parse_float_vec(vec)
v[miss] = np.nan
elif self.fields[j]["ntype"] == "char":
v = [y.rstrip() for y in vec]
if self._encoding is not None:
v = [y.decode(self._encoding) for y in v]
df[x] = v
if self._index is None:
df.index = range(self._lines_read, self._lines_read + read_lines)
else:
df = df.set_index(self._index)
self._lines_read += read_lines
return df
|
the-stack_0_10851 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Day 5 of AdventOfCode.com: regex matching"""
import re
import os
class RegexMatchCounter(object):
"""This class counts strings which satisfy all specified regular expressions
"""
def __init__(self, regex_strings):
"""The constructor needs a list of valid regular expressions.
:param regex_strings: list of valid regular expressions to be matched"""
self.__regexes = [re.compile(regex) for regex in regex_strings]
self.__count = 0
def check(self, target):
"""This method checks its string argument against regexes and, if all of them matched, increases the counter
:param target: string to be matched
"""
if all(reg.search(target) is not None for reg in self.__regexes):
self.__count += 1
def count(self):
""":return: the current value of how many strings have matched regexes
"""
return self.__count
matchers = [
RegexMatchCounter([r'([aeiou].*){3}', r'(.)\1', r'^((?!(ab)|(cd)|(pq)|(xy)).)*$']),
RegexMatchCounter([r'(..).*\1', r'(.).\1'])
]
with open(os.path.dirname(os.path.realpath('__file__')) + "/input/day5.txt", "r") as datafile:
for line in datafile:
for matcher in matchers:
matcher.check(line)
for matcher in matchers:
print(matcher.count())
|
the-stack_0_10852 | from ldpc.encoder.base_encoder import Encoder
import numpy.typing as npt
import numpy as np
from bitstring import Bits
from ldpc.utils.custom_exceptions import IncorrectLength
from ldpc.utils.qc_format import QCFile
import os
from numpy.typing import NDArray
from ldpc.wifi_spec_codes import WiFiSpecCode
from typing import Any
class EncoderWiFi(Encoder):
"""Encode messages according to the codes in the IEEE802.11n standard"""
_spec_base_path: str = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'code_specs', 'ieee802.11')
def __init__(self, spec: WiFiSpecCode) -> None:
"""
:param spec: specify which code from the spec we use
"""
self.spec = spec
qc_file = QCFile.from_file(os.path.join(self._spec_base_path, spec.name + ".qc"))
self.h = qc_file.to_array()
self.m, n = self.h.shape
k = n - self.m
self.z = qc_file.z
self.block_structure = qc_file.block_structure
super().__init__(k, n)
def encode(self, information_bits: Bits) -> Bits:
"""Based on: Efficient encoding of IEEE 802.11n LDPC codes,
https://www.researchgate.net/publication/3389450_Efficient_encoding_of_IEEE_80211n_LDPC_codes
"""
if len(information_bits) != self.k:
raise IncorrectLength
shifted_messages = self._shifted_messages(information_bits)
parities: npt.NDArray[np.int_] = np.zeros((self.m//self.z, self.z), dtype=np.int_)
# special parts see article
parities[0, :] = np.sum(shifted_messages, axis=0) % 2 # find first batch of z parity bits
parities[1, :] = (shifted_messages[0, :] + np.roll(parities[0, :], -1)) % 2 # find second set of z parity bits
parities[-1, :] = (shifted_messages[-1, :] + np.roll(parities[0, :], -1)) % 2 # find last set of z parity bits
for idx in range(1, (self.m//self.z)-2): # -1 needed to avoid exceeding memory limits due to idx+1 below.
# -2 needed as bottom row is a special case.
if self.block_structure[idx][self.k // self.z] >= 0:
# special treatment of x-th row, see article
parities[idx+1, :] = (parities[idx, :] + shifted_messages[idx, :] + parities[0, :]) % 2
else:
parities[idx+1, :] = (parities[idx, :] + shifted_messages[idx, :]) % 2
return information_bits + Bits(np.ravel(parities))
def _shifted_messages(self, information_bits: Bits) -> NDArray[np.int_]:
# break message bits into groups (rows) of Z bits. Each row is a subset of z bits, overall k message bits
bit_blocks: npt.NDArray[np.int_] = np.array(information_bits, dtype=np.int_).reshape((self.k // self.z, self.z))
# find shifted messages (termed lambda_i in article)
shifted_messages: npt.NDArray[np.int_] = np.zeros((self.m // self.z, self.z),
dtype=np.int_) # each row is a sum of circular shifts of
# message bits (some lambda_i in article). One row per block of h.
for i in range(self.m // self.z):
for j in range(self.k // self.z):
if self.block_structure[i][j] >= 0: # zero blocks don't contribute to parity bits
# multiply by translation reduces to shift.
vec: npt.NDArray[Any] = np.roll(bit_blocks[j, :], -self.block_structure[i][j])
shifted_messages[i, :] = np.logical_xor(shifted_messages[i, :], vec) # xor as sum mod 2
return shifted_messages
|
the-stack_0_10853 | # Basic python
import numpy as np
import scipy as scp
from scipy.stats import gamma
from scipy.stats import mode
from scipy.stats import itemfreq
from scipy.stats import mode
import pandas as pd
import random
# Parallelization
import multiprocessing as mp
from multiprocessing import Process
from multiprocessing import Pool
import psutil
import argparse
# System utilities
from datetime import datetime
import time
import os
import pickle
import uuid
import glob
import gc
# My own code
import kde_class
#import ddm_data_simulation as ddm_simulator
import boundary_functions as bf
from cdwiener import batch_fptd
# Plotting
import matplotlib.pyplot as plt
# /users/afengler/data/kde/full_ddm/training_data_binned_0_nbins_0_n_20000/full_ddm_nchoices_2_train_data_binned_0_nbins_0_n_20000_213.pickle
# /users/afengler/data/kde/full_ddm/training_data_binned_0_nbins_0_n_20000/simulator_statistics_213.pickle
def filter_simulations_fast(base_simulation_folder = '',
file_name_prefix = '',
file_id = 0,
method_params = [],
param_ranges = 'none', # either 'none' or dict that specifies allowed ranges for parameters
filters = {'mode': 20, # != (checking if mode is max_rt)
'choice_cnt': 0, # > (checking that each choice receive at least 10 samples in simulator)
'mean_rt': 15, # < (checking that mean_rt is smaller than specified value
'std': 0, # > (checking that std is positive for each choice)
'mode_cnt_rel': 0.5 # < (checking that mode does not receive more than a proportion of samples for each choice)
}
):
file_ = pickle.load(open( base_simulation_folder + file_name_prefix + '_' + str(file_id) + '.pickle', 'rb' ))
init_cols = method_params['param_names'] + method_params['boundary_param_names']
n_datasets = file_[1].shape[0]
# Initialize data frame
sim_stat_data = pd.DataFrame(file_[0],
columns = init_cols)
# MAX RT BY SIMULATION: TEST SHOULD BE CONSISTENT
n_simulations = file_[1].shape[1] #['n_samples']
# TODO: BASE SIMULATIONS FILES NEED TO HOLD THE N-CHOICES PROPERTY DIRECTLY
# TODO RESOLVED
n_choices = len(file_[2]['possible_choices'])
#n_choices = len(np.unique(file_[1][0, :, 1])) # ['n_choices']
# TODO: BASE SIMULATIONS NEED TO HOLD THE UNIQUE CHOICES PROPERTY DIRECTLY
# RIGHT NOW THIS CODE USES THE DATA ITSELF TO RECOVER THE POSSIBLE CHOICES BUT THIS ALLOWS FOR READING IN N CHOICES < REAL N CHOICES
# TODO RESOLVED
choices = file_[2]['possible_choices']
#choices = np.unique(file_[1][0, :, 1])
#n_choices = len(file_[0][2]['possible_choices'])
#choices = file_[0][2]['possible_choices']
max_rts = np.zeros((n_datasets, 1))
max_t = file_[2]['max_t']
sim_stat_data['max_t'] = max_t
#max_ts[:] = max_t
max_ts = np.zeros((n_datasets, 1))
stds = np.zeros((n_datasets, n_choices))
mean_rts = np.zeros((n_datasets, n_choices))
choice_cnts = np.zeros((n_datasets, n_choices))
modes = np.zeros((n_datasets, n_choices))
mode_cnts = np.zeros((n_datasets, n_choices))
#sim_stat_data = [None] * n_datasets
cnt = 0
for i in range(n_datasets):
max_rts[i] = (file_[1][i, :, 0].max().round(2))
max_ts[i] = max_t
#max_ts[i] = (file_[1][i][2]['max_t'])
# Standard deviation of reaction times
choice_cnt = 0
for choice_tmp in choices:
tmp_rts = file_[1][i, :, 0][file_[1][i, :, 1] == choice_tmp]
n_c = len(tmp_rts)
choice_cnts[cnt, choice_cnt] = n_c
mode_tmp = mode(tmp_rts)
if n_c > 0:
mean_rts[cnt, choice_cnt] = np.mean(tmp_rts)
stds[cnt, choice_cnt] = np.std(tmp_rts)
modes[cnt, choice_cnt] = float(mode_tmp[0])
mode_cnts[cnt, choice_cnt] = int(mode_tmp[1])
else:
mean_rts[cnt, choice_cnt] = - 1
stds[cnt, choice_cnt] = - 1
modes[cnt, choice_cnt] = - 1
mode_cnts[cnt, choice_cnt] = 0
choice_cnt += 1
# Basic data column
# TODO: Put this back in respecting new input format
#sim_stat_data[cnt] = [file_[i][2][key] for key in list(file_[i][2].keys())]
cnt += 1
if cnt % 1000 == 0:
print(cnt)
#sim_stat_data = pd.DataFrame(sim_stat_data, columns = file_[0][2].keys())
# Compute some more columns
for i in range(0, n_choices, 1):
sim_stat_data['mean_rt_' + str(i)] = mean_rts[:, i]
sim_stat_data['std_' + str(i)] = stds[:, i]
sim_stat_data['choice_cnt_' + str(i)] = choice_cnts[:,i]
sim_stat_data['mode_' + str(i)] = modes[:, i]
sim_stat_data['mode_cnt_' + str(i)] = mode_cnts[:, i]
# Derived Columns
sim_stat_data['choice_prop_' + str(i)] = sim_stat_data['choice_cnt_' + str(i)] / n_simulations
sim_stat_data['mode_cnt_rel_' + str(i)] = sim_stat_data['mode_cnt_' + str(i)] / sim_stat_data['choice_cnt_' + str(i)]
# Clean-up
sim_stat_data = sim_stat_data.round(decimals = 2)
sim_stat_data = sim_stat_data.fillna(value = 0)
# check that max_t is consistently the same value across simulations
#assert len(np.unique(max_ts)) == 1
# Now filtering
# FILTER 1: PARAMETER RANGES
if param_ranges == 'none':
keep = sim_stat_data['max_t'] >= 0 # should return a vector of all true's
else:
cnt = 0
for param in param_ranges.keys():
if cnt == 0:
keep = (sim_stat_data[param] >= param_ranges[param][0]) & (sim_stat_data[param] <= param_ranges[param][1])
else:
keep = (keep) & \
(sim_stat_data[param] >= param_ranges[param][0]) & (sim_stat_data[param] <= param_ranges[param][1])
cnt += 1
# FILTER 2: SANITY CHECKS (Filter-bank)
for i in range(0, n_choices, 1):
keep = (keep) & \
(sim_stat_data['mode_' + str(i)] != filters['mode']) & \
(sim_stat_data['choice_cnt_' + str(i)] > filters['choice_cnt']) & \
(sim_stat_data['mean_rt_' + str(i)] < filters['mean_rt']) & \
(sim_stat_data['std_' + str(i)] > filters['std']) & \
(sim_stat_data['mode_cnt_rel_' + str(i)] < filters['mode_cnt_rel'])
# Add keep_file column to
sim_stat_data['keep_file'] = keep
# Write files:
#pickle.dump(list(sim_stat_data.loc[keep, 'file']), open(base_simulation_folder + '/keep_files.pickle', 'wb'))
pickle.dump(sim_stat_data,
open(base_simulation_folder + '/simulator_statistics' + '_' + str(file_id) + '.pickle', 'wb'))
return sim_stat_data
def make_kde_data(data = [], metadata = [], n_kde = 100, n_unif_up = 100, n_unif_down = 100, idx = 0):
# def make_kde_data(n_kde = 100, n_unif_up = 100, n_unif_down = 100, idx = 0):
# meta_data = file_[2]
# data = file_[1][idx, :, :]
out = np.zeros((n_kde + n_unif_up + n_unif_down, 3))
tmp_kde = kde_class.logkde((data[:, 0], data[:, 1], metadata))
# Get kde part
samples_kde = tmp_kde.kde_sample(n_samples = n_kde)
likelihoods_kde = tmp_kde.kde_eval(data = samples_kde).ravel()
out[:n_kde, 0] = samples_kde[0].ravel()
out[:n_kde, 1] = samples_kde[1].ravel()
out[:n_kde, 2] = likelihoods_kde
# Get positive uniform part:
choice_tmp = np.random.choice(metadata['possible_choices'], size = n_unif_up)
if metadata['max_t'] < 100:
rt_tmp = np.random.uniform(low = 0.0001,
high = metadata['max_t'],
size = n_unif_up)
else:
rt_tmp = np.random.uniform(low = 0.0001,
high = 100,
size = n_unif_up)
likelihoods_unif = tmp_kde.kde_eval(data = (rt_tmp, choice_tmp)).ravel()
out[n_kde:(n_kde + n_unif_up), 0] = rt_tmp
out[n_kde:(n_kde + n_unif_up), 1] = choice_tmp
out[n_kde:(n_kde + n_unif_up), 2] = likelihoods_unif
# Get negative uniform part:
choice_tmp = np.random.choice(metadata['possible_choices'], #['possible_choices'],
size = n_unif_down)
rt_tmp = np.random.uniform(low = - 1.0,
high = 0.0001,
size = n_unif_down)
out[(n_kde + n_unif_up):, 0] = rt_tmp
out[(n_kde + n_unif_up):, 1] = choice_tmp
out[(n_kde + n_unif_up):, 2] = -66.77497
if idx % 10 == 0:
print(idx)
return out.astype(np.float)
def make_fptd_data(data = [], params = [], metadata = [], n_kde = 100, n_unif_up = 100, n_unif_down = 100, idx = 0):
out = np.zeros((n_kde + n_unif_up + n_unif_down, 3))
tmp_kde = kde_class.logkde((data[:, 0], data[:, 1], metadata))
# Get kde part
samples_kde = tmp_kde.kde_sample(n_samples = n_kde)
out[:n_kde, 0] = samples_kde[0].ravel()
out[:n_kde, 1] = samples_kde[1].ravel()
# If we have 4 parameters we know we have the ddm --> use default sdv = 0
if len(params) == 4:
out[:n_kde, 2] = np.log(batch_fptd(out[:n_kde, 0] * out[:n_kde, 1] * ( -1),
params[0],
params[1] * 2,
params[2],
params[3]))
# If we have 5 parameters but analytic we know we need to use the ddm_sdv --> supply sdv value to batch_fptd
if len(params) == 5:
out[:n_kde, 2] = np.log(batch_fptd(out[:n_kde, 0] * out[:n_kde, 1] * ( -1),
params[0],
params[1] * 2,
params[2],
params[3],
params[4]))
# Get positive uniform part:
choice_tmp = np.random.choice(metadata['possible_choices'], size = n_unif_up)
if metadata['max_t'] < 100:
rt_tmp = np.random.uniform(low = 0.0001,
high = metadata['max_t'],
size = n_unif_up)
else:
rt_tmp = np.random.uniform(low = 0.0001,
high = 100,
size = n_unif_up)
likelihoods_unif = tmp_kde.kde_eval(data = (rt_tmp, choice_tmp)).ravel()
out[n_kde:(n_kde + n_unif_up), 0] = rt_tmp
out[n_kde:(n_kde + n_unif_up), 1] = choice_tmp
# If we have 4 parameters we know we have the ddm --> use default sdv = 0
if len(params) == 4:
out[n_kde:(n_kde + n_unif_up), 2] = np.log(batch_fptd(out[n_kde:(n_kde + n_unif_up), 0] * out[n_kde:(n_kde + n_unif_up), 1] * (- 1),
params[0],
params[1] * 2,
params[2],
params[3]))
# If we have 5 parameters but analytic we know we need to use the ddm_sdv --> supply sdv value to batch_fptd
if len(params) == 5:
out[n_kde:(n_kde + n_unif_up), 2] = np.log(batch_fptd(out[n_kde:(n_kde + n_unif_up), 0] * out[n_kde:(n_kde + n_unif_up), 1] * (- 1),
params[0],
params[1] * 2,
params[2],
params[3],
params[4]))
# Get negative uniform part:
choice_tmp = np.random.choice(metadata['possible_choices'],
size = n_unif_down)
rt_tmp = np.random.uniform(low = - 1.0,
high = 0.0001,
size = n_unif_down)
out[(n_kde + n_unif_up):, 0] = rt_tmp
out[(n_kde + n_unif_up):, 1] = choice_tmp
out[(n_kde + n_unif_up):, 2] = -66.77497
if idx % 10 == 0:
print(idx)
return out.astype(np.float)
# We should be able to parallelize this !
def kde_from_simulations_fast_parallel(base_simulation_folder = '',
file_name_prefix = '',
file_id = 1,
target_folder = '',
n_by_param = 3000,
mixture_p = [0.8, 0.1, 0.1],
process_params = ['v', 'a', 'w', 'c1', 'c2'],
print_info = False,
n_processes = 'all',
analytic = False):
# Parallel
if n_processes == 'all':
n_cpus = psutil.cpu_count(logical = False)
else:
n_cpus = n_processes
print('Number of cpus: ')
print(n_cpus)
file_ = pickle.load(open( base_simulation_folder + '/' + file_name_prefix + '_' + str(file_id) + '.pickle', 'rb' ) )
stat_ = pickle.load(open( base_simulation_folder + '/simulator_statistics' + '_' + str(file_id) + '.pickle', 'rb' ) )
# Initialize dataframe
# Initializations
n_kde = int(n_by_param * mixture_p[0])
n_unif_down = int(n_by_param * mixture_p[1])
n_unif_up = int(n_by_param * mixture_p[2])
n_kde = n_kde + (n_by_param - n_kde - n_unif_up - n_unif_down) # correct n_kde if sum != n_by_param
# Add possible choices to file_[2] which is the meta data for the simulator (expected when loaded the kde class)
# TODO: THIS INFORMATION SHOULD BE INCLUDED AS META-DATA INTO THE BASE SIMULATOIN FILES
file_[2]['possible_choices'] = np.unique([-1, 1])
#file_[2]['possible_choices'] = np.unique(file_[1][0, :, 1])
file_[2]['possible_choices'].sort()
# CONTINUE HERE
# Preparation loop --------------------------------------------------------------------
#s_id_kde = np.sum(stat_['keep_file']) * (n_unif_down + n_unif_up)
cnt = 0
starmap_iterator = ()
tmp_sim_data_ok = 0
results = []
for i in range(file_[1].shape[0]):
if stat_['keep_file'][i]:
# Don't remember what this part is doing....
if tmp_sim_data_ok:
pass
else:
tmp_sim_data = file_[1][i]
tmp_sim_data_ok = 1
lb = cnt * (n_unif_down + n_unif_up + n_kde)
# Allocate to starmap tuple for mixture component 3
if analytic:
starmap_iterator += ((file_[1][i, :, :].copy(), file_[0][i, :].copy(), file_[2].copy(), n_kde, n_unif_up, n_unif_down, cnt), )
else:
starmap_iterator += ((file_[1][i, :, :], file_[2], n_kde, n_unif_up, n_unif_down, cnt), )
cnt += 1
if (cnt % 100 == 0) or (i == file_[1].shape[0] - 1):
with Pool(processes = n_cpus, maxtasksperchild = 200) as pool:
results.append(np.array(pool.starmap(make_kde_data, starmap_iterator)).reshape((-1, 3)))
starmap_iterator = ()
print(i, 'arguments generated')
if not stat_['keep_file'][i]:
if (i == (file_[1].shape[0] - 1)) and len(starmap_iterator) > 0:
with Pool(processes = n_cpus, maxtasksperchild = 200) as pool:
results.append(np.array(pool.starmap(make_kde_data, starmap_iterator)).reshape((-1, 3)))
starmap_iterator = ()
print(i, 'last dataset was not kept')
my_columns = process_params + ['rt', 'choice', 'log_l']
data = pd.DataFrame(np.zeros((np.sum(stat_['keep_file']) * n_by_param, len(my_columns))),
columns = my_columns)
data.values[:, -3:] = np.concatenate(results)
# Filling in training data frame ---------------------------------------------------
cnt = 0
tmp_sim_data_ok = 0
for i in range(file_[1].shape[0]):
if stat_['keep_file'][i]:
# Don't remember what this part is doing....
if tmp_sim_data_ok:
pass
else:
tmp_sim_data = file_[1][i]
tmp_sim_data_ok = 1
lb = cnt * (n_unif_down + n_unif_up + n_kde)
# Make empty dataframe of appropriate size
p_cnt = 0
for param in process_params:
data.iloc[(lb):(lb + n_unif_down + n_unif_up + n_kde), my_columns.index(param)] = file_[0][i, p_cnt]
p_cnt += 1
cnt += 1
# ----------------------------------------------------------------------------------
# Store data
print('writing data to file: ', target_folder + '/data_' + str(file_id) + '.pickle')
pickle.dump(data.values, open(target_folder + '/data_' + str(file_id) + '.pickle', 'wb'), protocol = 4)
#data.to_pickle(target_folder + '/data_' + str(file_id) + '.pickle' , protocol = 4)
# Write metafile if it doesn't exist already
# Hack for now: Just copy one of the base simulations files over
if os.path.isfile(target_folder + '/meta_data.pickle'):
pass
else:
pickle.dump(tmp_sim_data, open(target_folder + '/meta_data.pickle', 'wb') )
return 0 #data
# UNUSED FROM PREV FUNCTION
# Garbage collection before starting pool:
# del file_
# gc.collect()
# if analytic:
# with Pool(processes = n_cpus, maxtasksperchild = 200) as pool:
# #result = np.array(pool.starmap(make_fptd_data, starmap_iterator)) #.reshape((-1, 3))
# result = pool.starmap(make_fptd_data, starmap_iterator)
# else:
# with Pool(processes = n_cpus, maxtasksperchild = 200) as pool:
# #result = np.array(pool.starmap(make_kde_data, starmap_iterator)) #.reshape((-1, 3))
# result = pool.starmap(make_kde_data, starmap_iterator)
# result = np.array(result).reshape((-1, 3))
# Make dataframe to save
# Initialize dataframe
def kde_from_simulations_fast(base_simulation_folder = '',
file_name_prefix = '',
file_id = 1,
target_folder = '',
n_by_param = 3000,
mixture_p = [0.8, 0.1, 0.1],
process_params = ['v', 'a', 'w', 'c1', 'c2'],
print_info = False
):
file_ = pickle.load(open( base_simulation_folder + '/' + file_name_prefix + '_' + str(file_id) + '.pickle', 'rb' ) )
stat_ = pickle.load(open( base_simulation_folder + '/simulator_statistics' + '_' + str(file_id) + '.pickle', 'rb' ) )
# Initialize dataframe
my_columns = process_params + ['rt', 'choice', 'log_l']
data = pd.DataFrame(np.zeros((np.sum(stat_['keep_file']) * n_by_param, len(my_columns))),
columns = my_columns)
n_kde = int(n_by_param * mixture_p[0])
n_unif_down = int(n_by_param * mixture_p[1])
n_unif_up = int(n_by_param * mixture_p[2])
n_kde = n_kde + (n_by_param - n_kde - n_unif_up - n_unif_down) # correct n_kde if sum != n_by_param
# Add possible choices to file_[2] which is the meta data for the simulator (expected when loaded the kde class)
# TODO: THIS INFORMATION SHOULD BE INCLUDED AS META-DATA INTO THE BASE SIMULATOIN FILES
file_[2]['possible_choices'] = np.unique([-1,1])
#file_[2]['possible_choices'] = np.unique(file_[1][0, :, 1])
file_[2]['possible_choices'].sort()
# CONTINUE HERE
# Main while loop --------------------------------------------------------------------
#row_cnt = 0
cnt = 0
for i in range(file_[1].shape[0]):
if stat_['keep_file'][i]:
# Read in simulator file
tmp_sim_data = file_[1][i]
lb = cnt * n_by_param
# Make empty dataframe of appropriate size
p_cnt = 0
for param in process_params:
data.iloc[(lb):(lb + n_by_param), my_columns.index(param)] = file_[0][i, p_cnt] #tmp_sim_data[2][param]
p_cnt += 1
# MIXTURE COMPONENT 1: Get simulated data from kde -------------------------------
tmp_kde = kde_class.logkde((file_[1][i, :, 0], file_[1][i, :, 1], file_[2])) #[tmp_sim_data)
tmp_kde_samples = tmp_kde.kde_sample(n_samples = n_kde)
data.iloc[lb:(lb + n_kde), my_columns.index('rt')] = tmp_kde_samples[0].ravel()
data.iloc[lb:(lb + n_kde), my_columns.index('choice')] = tmp_kde_samples[1].ravel()
data.iloc[lb:(lb + n_kde), my_columns.index('log_l')] = tmp_kde.kde_eval(data = tmp_kde_samples).ravel()
# --------------------------------------------------------------------------------
# MIXTURE COMPONENT 2: Negative uniform part -------------------------------------
choice_tmp = np.random.choice(file_[2]['possible_choices'], #['possible_choices'],
size = n_unif_down)
rt_tmp = np.random.uniform(low = - 1,
high = 0.0001,
size = n_unif_down)
data.iloc[(lb + n_kde):(lb + n_kde + n_unif_down), my_columns.index('rt')] = rt_tmp
data.iloc[(lb + n_kde):(lb + n_kde + n_unif_down), my_columns.index('choice')] = choice_tmp
data.iloc[(lb + n_kde):(lb + n_kde + n_unif_down), my_columns.index('log_l')] = -66.77497 # the number corresponds to log(1e-29)
# ---------------------------------------------------------------------------------
# MIXTURE COMPONENT 3: Positive uniform part --------------------------------------
choice_tmp = np.random.choice(file_[2]['possible_choices'],
size = n_unif_up)
if file_[2]['max_t'] < 100:
rt_tmp = np.random.uniform(low = 0.0001,
high = file_[2]['max_t'],
size = n_unif_up)
else:
rt_tmp = np.random.uniform(low = 0.0001,
high = 100,
size = n_unif_up)
data.iloc[(lb + n_kde + n_unif_down):(lb + n_by_param), my_columns.index('rt')] = rt_tmp
data.iloc[(lb + n_kde + n_unif_down):(lb + n_by_param), my_columns.index('choice')] = choice_tmp
data.iloc[(lb + n_kde + n_unif_down):(lb + n_by_param), my_columns.index('log_l')] = tmp_kde.kde_eval(data = (rt_tmp, choice_tmp))
# ----------------------------------------------------------------------------------
cnt += 1
if i % 10 == 0:
print(i, 'kdes generated')
# -----------------------------------------------------------------------------------
# Store data
print('writing data to file: ', target_folder + '/data_' + str(file_id) + '.pickle')
pickle.dump(data.values, open(target_folder + '/data_' + str(file_id) + '.pickle', 'wb'), protocol = 4)
# Write metafile if it doesn't exist already
# Hack for now: Just copy one of the base simulations files over
if os.path.isfile(target_folder + '/meta_data.pickle'):
pass
else:
pickle.dump(tmp_sim_data, open(target_folder + '/meta_data.pickle', 'wb') )
return data
def kde_load_data_new(path = '',
file_id_list = '',
prelog_cutoff_low = 1e-29,
prelog_cutoff_high = 100,
n_samples_by_dataset = 10000000,
return_log = True,
make_split = True,
val_p = 0.01):
# Read in two datasets to get meta data for the subsequent
print('Reading in initial dataset')
tmp_data = np.load(path + file_id_list[0], allow_pickle = True)
# Collect some meta data
n_files = len(file_id_list)
print('n_files: ', n_files)
print('n_samples_by_dataset: ', n_samples_by_dataset)
# Allocate memory for data
print('Allocating data arrays')
features = np.zeros((n_files * n_samples_by_dataset, tmp_data.shape[1] - 1))
labels = np.zeros((n_files * n_samples_by_dataset, 1))
# Read in data of initialization files
cnt_samples = tmp_data.shape[0]
features[:cnt_samples, :] = tmp_data[:, :-1]
labels[:cnt_samples, 0] = tmp_data[:, -1]
# Read in remaining files into preallocated np.array
for i in range(1, n_files, 1):
tmp_data = np.load(path + file_id_list[i], allow_pickle = True)
n_rows_tmp = tmp_data.shape[0]
features[(cnt_samples): (cnt_samples + n_rows_tmp), :] = tmp_data[:, :-1]
labels[(cnt_samples): (cnt_samples + n_rows_tmp), 0] = tmp_data[:, -1]
cnt_samples += n_rows_tmp
print(i, ' files processed')
features.resize((cnt_samples, features.shape[1]), refcheck = False)
labels.resize((cnt_samples, labels.shape[1]), refcheck = False)
print('new n rows features: ', features.shape[0])
print('new n rows labels: ', labels.shape[0])
if prelog_cutoff_low != 'none':
labels[labels < np.log(prelog_cutoff_low)] = np.log(prelog_cutoff_low)
if prelog_cutoff_high != 'none':
labels[labels > np.log(prelog_cutoff_high)] = np.log(prelog_cutoff_high)
if return_log == False:
labels = np.exp(labels)
if make_split:
# Making train test split
print('Making train test split...')
train_idx = np.random.choice(a = [False, True], size = cnt_samples, p = [val_p, 1 - val_p])
test_idx = np.invert(train_idx)
return ((features[train_idx, :], labels[train_idx, :]), (features[test_idx, :], labels[test_idx, :]))
else:
return features, labels
|
the-stack_0_10855 | """FragmentVC model architecture."""
from typing import Tuple, List, Optional
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .convolutional_transformer import Smoother, Extractor
class FragmentVC(nn.Module):
"""
FragmentVC uses Wav2Vec feature of the source speaker to query and attend
on mel spectrogram of the target speaker.
"""
def __init__(self, d_model=512):
super().__init__()
self.unet = UnetBlock(d_model)
self.smoothers = nn.TransformerEncoder(Smoother(d_model, 2, 1024), num_layers=3)
self.mel_linear = nn.Linear(d_model, 80)
self.post_net = nn.Sequential(
nn.Conv1d(80, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 80, kernel_size=5, padding=2),
nn.BatchNorm1d(80),
nn.Dropout(0.5),
)
def forward(
self,
srcs: Tensor,
refs: Tensor,
refs_features: Optional[Tensor] = None,
src_masks: Optional[Tensor] = None,
ref_masks: Optional[Tensor] = None,
) -> Tuple[Tensor, List[Optional[Tensor]]]:
"""Forward function.
Args:
srcs: (batch, src_len, 768)
src_masks: (batch, src_len)
refs: (batch, 80, ref_len)
refs_features: (batch, ref_len, 768)
ref_masks: (batch, ref_len)
"""
# out: (src_len, batch, d_model)
out, attns = self.unet(srcs, refs, refs_features=refs_features, src_masks=src_masks, ref_masks=ref_masks)
# out: (src_len, batch, d_model)
out = self.smoothers(out, src_key_padding_mask=src_masks)
# out: (src_len, batch, 80)
out = self.mel_linear(out)
# out: (batch, 80, src_len)
out = out.transpose(1, 0).transpose(2, 1)
refined = self.post_net(out)
out = out + refined
# out: (batch, 80, src_len)
return out, attns
class UnetBlock(nn.Module):
"""Hierarchically attend on references."""
def __init__(self, d_model: int):
super(UnetBlock, self).__init__()
self.conv1 = nn.Conv1d(80, d_model, 3, padding=1, padding_mode="replicate")
self.conv2 = nn.Conv1d(d_model, d_model, 3, padding=1, padding_mode="replicate")
self.conv3 = nn.Conv1d(d_model, d_model, 3, padding=1, padding_mode="replicate")
self.prenet = nn.Sequential(
nn.Linear(768, 768), nn.ReLU(), nn.Linear(768, d_model),
)
self.features_prenet = nn.Sequential(
nn.Linear(768, 768), nn.ReLU(), nn.Linear(768, d_model),
)
self.extractor1 = Extractor(d_model, 2, 1024, no_residual=True)
self.extractor2 = Extractor(d_model, 2, 1024)
self.extractor3 = Extractor(d_model, 2, 1024)
def forward(
self,
srcs: Tensor,
refs: Tensor,
refs_features: Optional[Tensor] = None,
src_masks: Optional[Tensor] = None,
ref_masks: Optional[Tensor] = None,
) -> Tuple[Tensor, List[Optional[Tensor]]]:
"""Forward function.
Args:
srcs: (batch, src_len, 768)
src_masks: (batch, src_len)
refs: (batch, 80, ref_len)
refs_features: (batch, ref_len, 768)
ref_masks: (batch, ref_len)
"""
# tgt: (batch, tgt_len, d_model)
tgt = self.prenet(srcs)
refs_features = None if refs_features is None else self.features_prenet(refs_features)
# tgt: (tgt_len, batch, d_model)
tgt = tgt.transpose(0, 1)
# ref*: (batch, d_model, mel_len)
ref1 = self.conv1(refs)
ref2 = self.conv2(F.relu(ref1))
ref3 = self.conv3(F.relu(ref2))
# out*: (tgt_len, batch, d_model)
out, attn1 = self.extractor1(
tgt,
ref3.transpose(1, 2).transpose(0, 1),
memory_features=refs_features.transpose(0, 1),
tgt_key_padding_mask=src_masks,
memory_key_padding_mask=ref_masks,
)
out, attn2 = self.extractor2(
out,
ref2.transpose(1, 2).transpose(0, 1),
tgt_key_padding_mask=src_masks,
memory_key_padding_mask=ref_masks,
)
out, attn3 = self.extractor3(
out,
ref1.transpose(1, 2).transpose(0, 1),
tgt_key_padding_mask=src_masks,
memory_key_padding_mask=ref_masks,
)
# out: (tgt_len, batch, d_model)
return out, [attn1, attn2, attn3]
|
the-stack_0_10856 | import argparse
import matplotlib.pyplot as plt
import numpy as np
from joblib import dump
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
descript = """Using parameters to edit OpenFOAM parameters"""
parser = argparse.ArgumentParser(description=descript)
parser.add_argument("-workspace", help="The DAG spec root")
args = parser.parse_args()
training_percent = 0.6
timestep = -1
fontsize = 20
WORKSPACE = args.workspace
inputs_dir = WORKSPACE + "/merlin_info"
outputs_dir = WORKSPACE + "/combine_outputs"
outputs = np.load(outputs_dir + "/data.npz")
U = outputs["arr_0"]
enstrophy = outputs["arr_1"]
energy_byhand = np.sum(np.sum(U ** 2, axis=3), axis=2) / U.shape[2] / 2
enstrophy_all = np.sum(enstrophy, axis=2)
X = np.load(inputs_dir + "/samples.npy")
y = np.concatenate(
(
enstrophy_all[:, timestep].reshape(-1, 1),
energy_byhand[:, timestep].reshape(-1, 1),
),
axis=1,
)
X[:, 1] = np.log10(X[:, 0] / X[:, 1]) # np.log10(X)
y = np.log10(y)
training_size = int(training_percent * len(X))
X_train = X[:training_size]
y_train = y[:training_size]
X_test = X[training_size:]
y_test = y[training_size:]
regr = RandomForestRegressor(max_depth=10, random_state=0, n_estimators=7)
regr.fit(X_train, y_train)
print("training score:", regr.score(X_train, y_train))
print("testing score: ", regr.score(X_test, y_test))
print(mean_squared_error(y_test, regr.predict(X_test)))
dump(regr, "trained_model.joblib")
fig, ax = plt.subplots(3, 2, figsize=(25, 25), constrained_layout=True)
plt.rcParams.update({"font.size": 25})
plt.rcParams["lines.linewidth"] = 5
x = np.linspace(-5, 8, 100)
y1 = 1 * x
ax[0][0].plot(x, y1, "-r", label="y=x", linewidth=1)
y_pred = regr.predict(X_train)
ax[0][0].scatter(y_train[:, 0], y_pred[:, 0], label="Log10 Enstrophy")
ax[0][0].scatter(y_train[:, 1], y_pred[:, 1], label="Log10 Energy")
ax[0][0].set_title("Velocity Magnitude %s" % timestep)
ax[0][0].set_xlabel("Actual", fontsize=fontsize)
ax[0][0].set_ylabel("Predicted", fontsize=fontsize)
ax[0][0].set_title("Training Data, # Points: %s" % len(y_pred))
ax[0][0].legend()
ax[0][0].grid()
x_min = np.min([np.min(y_train[:, 0]), np.min(y_train[:, 1])])
y_min = np.min([np.min(y_pred[:, 0]), np.min(y_pred[:, 1])])
x_max = np.max([np.max(y_train[:, 0]), np.max(y_train[:, 1])])
y_max = np.max([np.max(y_pred[:, 0]), np.max(y_pred[:, 1])])
y_pred = regr.predict(X_test)
ax[0][1].plot(x, y1, "-r", label="y=x", linewidth=1)
ax[0][1].scatter(y_test[:, 0], y_pred[:, 0], label="Log10 Enstrophy")
ax[0][1].scatter(y_test[:, 1], y_pred[:, 1], label="Log10 Energy")
ax[0][1].set_xlabel("Actual", fontsize=fontsize)
ax[0][1].set_ylabel("Predicted", fontsize=fontsize)
ax[0][1].set_title("Testing Data, # Points: %s" % len(y_pred))
ax[0][1].legend()
ax[0][1].grid()
x_min = np.min([np.min(y_test[:, 0]), np.min(y_test[:, 1]), x_min]) - 0.1
y_min = np.min([np.min(y_pred[:, 0]), np.min(y_pred[:, 1]), y_min]) - 0.1
x_max = np.max([np.max(y_test[:, 0]), np.max(y_test[:, 1]), x_max]) + 0.1
y_max = np.max([np.max(y_pred[:, 0]), np.max(y_pred[:, 1]), y_max]) + 0.1
ax[0][0].set_xlim([x_min, x_max])
ax[0][0].set_ylim([y_min, y_max])
ax[0][1].set_xlim([x_min, x_max])
ax[0][1].set_ylim([y_min, y_max])
y_pred_all = regr.predict(X)
input_enstrophy = ax[1][1].scatter(X[:, 0], 10 ** y[:, 1], s=100, edgecolors="black")
ax[1][1].set_xlabel(r"Lidspeed ($\frac{m}{s}$)", fontsize=fontsize)
ax[1][1].set_ylabel(r"$Energy$", fontsize=fontsize)
ax[1][1].set_title("Average Energy Variation with Lidspeed")
ax[1][1].grid()
input_energy = ax[1][0].scatter(
X[:, 0],
X[:, 1],
s=100,
edgecolors="black",
c=10 ** y[:, 1],
cmap=plt.get_cmap("viridis"),
)
ax[1][0].set_xlabel(r"Lidspeed ($\frac{m}{s}$)", fontsize=fontsize)
ax[1][0].set_ylabel(r"$Log_{10}$(Reynolds Number)", fontsize=fontsize)
ax[1][0].set_title("Inputs vs Average Energy")
ax[1][0].grid()
cbar = plt.colorbar(input_energy, ax=ax[1][0])
cbar.ax.set_ylabel(r"$Energy$", rotation=270, labelpad=30)
ax[1][0].tick_params(axis="both", which="major", labelsize=fontsize)
ax[1][1].tick_params(axis="both", which="major", labelsize=fontsize)
ax[1][0].tick_params(axis="both", which="major", labelsize=fontsize)
ax[1][1].tick_params(axis="both", which="major", labelsize=fontsize)
y_pred_all = regr.predict(X)
input_enstrophy = ax[2][0].scatter(
X[:, 0],
X[:, 1],
s=100,
edgecolors="black",
c=y[:, 0] - y_pred_all[:, 0],
cmap=plt.get_cmap("Spectral"),
)
ax[2][0].set_xlabel(r"Lidspeed ($\frac{m}{s}$)", fontsize=fontsize)
ax[2][0].set_ylabel(r"$Log_{10}$(Reynolds Number)", fontsize=fontsize)
ax[2][0].set_title("Inputs vs Enstrophy error")
ax[2][0].grid()
cbar = plt.colorbar(input_enstrophy, ax=ax[2][0])
cbar.ax.set_ylabel(r"$y_{act} - y_{pred}$", rotation=270, labelpad=30)
input_energy = ax[2][1].scatter(
X[:, 0],
X[:, 1],
s=100,
edgecolors="black",
c=y[:, 1] - y_pred_all[:, 1],
cmap=plt.get_cmap("Spectral"),
)
ax[2][1].set_xlabel(r"Lidspeed ($\frac{m}{s}$)", fontsize=fontsize)
ax[2][1].set_ylabel(r"$Log_{10}$(Reynolds Number)", fontsize=fontsize)
ax[2][1].set_title("Inputs vs Energy error")
ax[2][1].grid()
cbar = plt.colorbar(input_energy, ax=ax[2][1])
cbar.ax.set_ylabel(r"$y_{act} - y_{pred}$", rotation=270, labelpad=30)
ax[0][0].tick_params(axis="both", which="major", labelsize=fontsize)
ax[0][1].tick_params(axis="both", which="major", labelsize=fontsize)
ax[2][0].tick_params(axis="both", which="major", labelsize=fontsize)
ax[2][1].tick_params(axis="both", which="major", labelsize=fontsize)
plt.savefig("prediction.png")
|
the-stack_0_10861 | import numpy as np
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_layer(input, shape):
W = weight_variable(shape)
b = bias_variable([shape[3]])
return tf.nn.relu(conv2d(input, W) + b)
def full_layer(input, size):
in_size = int(input.get_shape()[1])
W = weight_variable([in_size, size])
b = bias_variable([size])
return tf.matmul(input, W) + b
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
x_image = tf.reshape(x, [-1, 28, 28, 1])
conv1 = conv_layer(x_image, shape=[5, 5, 1, 32])
conv1_pool = max_pool_2x2(conv1)
conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
conv2_pool = max_pool_2x2(conv2)
conv2_flat = tf.reshape(conv2_pool, [-1, 7*7*64])
full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))
keep_prob = tf.placeholder(tf.float32)
full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)
y_conv = full_layer(full1_drop, 10)
DATA_DIR = '../data/MNIST_DATA'
NUM_STEPS = 1000
MINIBATCH_SIZE = 50
mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(NUM_STEPS):
batch = mnist.train.next_batch(MINIBATCH_SIZE)
if i % 100==0:
train_accuracy = sess.run(accuracy, feed_dict={x: batch[0], y_: batch[1],
keep_prob: 1.0})
print("step {}, training accuracy {}".format(i, train_accuracy))
sess.run(train_step, feed_dict={x: batch[0], y_: batch[1],
keep_prob: 0.5})
X = mnist.test.images.reshape(10, 1000, 784)
Y = mnist.test.labels.reshape(10, 1000, 10)
test_accuracy = np.mean([
sess.run(accuracy, feed_dict={x:X[i], y_:Y[i], keep_prob:1.0}) for i in range(10)
])
print("test accuracy: {}".format(test_accuracy))
|
the-stack_0_10862 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import imath
import inspect
import six
import time
import unittest
import IECore
import IECoreScene
import Gaffer
import GafferTest
import GafferDispatch
import GafferScene
import GafferSceneTest
class InstancerTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
sphere = IECoreScene.SpherePrimitive()
instanceInput = GafferSceneTest.CompoundObjectSource()
instanceInput["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( imath.Box3f( imath.V3f( -2 ), imath.V3f( 2 ) ) ),
"children" : {
"sphere" : {
"object" : sphere,
"bound" : IECore.Box3fData( sphere.bound() ),
"transform" : IECore.M44fData( imath.M44f().scale( imath.V3f( 2 ) ) ),
},
}
} )
)
seeds = IECoreScene.PointsPrimitive(
IECore.V3fVectorData(
[ imath.V3f( 1, 0, 0 ), imath.V3f( 1, 1, 0 ), imath.V3f( 0, 1, 0 ), imath.V3f( 0, 0, 0 ) ]
)
)
seedsInput = GafferSceneTest.CompoundObjectSource()
seedsInput["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( imath.Box3f( imath.V3f( 1, 0, 0 ), imath.V3f( 2, 1, 0 ) ) ),
"children" : {
"seeds" : {
"bound" : IECore.Box3fData( seeds.bound() ),
"transform" : IECore.M44fData( imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) ),
"object" : seeds,
},
},
}, )
)
instancer = GafferScene.Instancer()
instancer["in"].setInput( seedsInput["out"] )
instancer["prototypes"].setInput( instanceInput["out"] )
instancer["parent"].setValue( "/seeds" )
instancer["name"].setValue( "instances" )
self.assertEqual( instancer["out"].object( "/" ), IECore.NullObject() )
self.assertEqual( instancer["out"].transform( "/" ), imath.M44f() )
self.assertEqual( instancer["out"].bound( "/" ), imath.Box3f( imath.V3f( -1, -2, -2 ), imath.V3f( 4, 3, 2 ) ) )
self.assertEqual( instancer["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "seeds" ] ) )
self.assertEqual( instancer["out"].object( "/seeds" ), IECore.NullObject() )
self.assertEqual( instancer["out"].transform( "/seeds" ), imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) )
self.assertEqual( instancer["out"].bound( "/seeds" ), imath.Box3f( imath.V3f( -2, -2, -2 ), imath.V3f( 3, 3, 2 ) ) )
self.assertEqual( instancer["out"].childNames( "/seeds" ), IECore.InternedStringVectorData( [ "instances" ] ) )
self.assertEqual( instancer["out"].object( "/seeds/instances" ), IECore.NullObject() )
self.assertEqual( instancer["out"].transform( "/seeds/instances" ), imath.M44f() )
self.assertEqual( instancer["out"].bound( "/seeds/instances" ), imath.Box3f( imath.V3f( -2, -2, -2 ), imath.V3f( 3, 3, 2 ) ) )
self.assertEqual( instancer["out"].childNames( "/seeds/instances" ), IECore.InternedStringVectorData( [ "sphere" ] ) )
self.assertEqual( instancer["out"].object( "/seeds/instances/sphere" ), IECore.NullObject() )
self.assertEqual( instancer["out"].transform( "/seeds/instances/sphere" ), imath.M44f() )
self.assertEqual( instancer["out"].bound( "/seeds/instances/sphere" ), imath.Box3f( imath.V3f( -2, -2, -2 ), imath.V3f( 3, 3, 2 ) ) )
self.assertEqual( instancer["out"].childNames( "/seeds/instances/sphere" ), IECore.InternedStringVectorData( [ "0", "1", "2", "3" ] ) )
for i in range( 0, 4 ) :
instancePath = "/seeds/instances/sphere/%d" % i
self.assertEqual( instancer["out"].object( instancePath ), sphere )
self.assertEqual(
instancer["out"].transform( instancePath ),
imath.M44f().scale( imath.V3f( 2 ) ) * imath.M44f().translate( seeds["P"].data[i] )
)
self.assertEqual( instancer["out"].bound( instancePath ), sphere.bound() )
self.assertEqual( instancer["out"].childNames( instancePath ), IECore.InternedStringVectorData() )
# Test paths that don't exist - the transform will trigger an error, the other functions don't depend on
# the index, so will just return a reasonable value
six.assertRaisesRegex( self,
Gaffer.ProcessException,
'Instancer.out.transform : Instance id "77" is invalid, instancer produces only 4 children. Topology may have changed during shutter.',
instancer["out"].transform, "/seeds/instances/sphere/77"
)
self.assertEqual( instancer["out"].object( "/seeds/instances/sphere/77" ), sphere )
self.assertEqual( instancer["out"].bound( "/seeds/instances/sphere/77" ), sphere.bound() )
self.assertEqual( instancer["out"].childNames( "/seeds/instances/sphere/77" ), IECore.InternedStringVectorData() )
# Test passthrough when disabled
instancer["enabled"].setValue( False )
self.assertScenesEqual( instancer["in"], instancer["out"] )
instancer["enabled"].setValue( True )
# Test encapsulation options
encapInstancer = GafferScene.Instancer()
encapInstancer["in"].setInput( seedsInput["out"] )
encapInstancer["prototypes"].setInput( instanceInput["out"] )
encapInstancer["parent"].setValue( "/seeds" )
encapInstancer["name"].setValue( "instances" )
encapInstancer["encapsulateInstanceGroups"].setValue( True )
unencapFilter = GafferScene.PathFilter()
unencapFilter["paths"].setValue( IECore.StringVectorData( [ "/..." ] ) )
unencap = GafferScene.Unencapsulate()
unencap["in"].setInput( encapInstancer["out"] )
unencap["filter"].setInput( unencapFilter["out"] )
self.assertTrue( isinstance( encapInstancer["out"].object( "/seeds/instances/sphere/" ), GafferScene.Capsule ) )
self.assertEqual( encapInstancer["out"].childNames( "/seeds/instances/sphere/" ), IECore.InternedStringVectorData() )
self.assertScenesEqual( unencap["out"], instancer["out"] )
# Edit seeds object
freezeTransform = GafferScene.FreezeTransform()
freezeTransform["in"].setInput( seedsInput["out"] )
freezeTransform["filter"].setInput( unencapFilter["out"] )
instancer["in"].setInput( freezeTransform["out"] )
encapInstancer["in"].setInput( freezeTransform["out"] )
self.assertScenesEqual( unencap["out"], instancer["out"] )
# Then set it back ( to make sure that returning to a previously cached value after
# changing the seeds doesn't pull an expired Capsule out of the cache )
freezeTransform["enabled"].setValue( False )
self.assertScenesEqual( unencap["out"], instancer["out"] )
# Test passthrough when disabled
instancer["enabled"].setValue( False )
self.assertScenesEqual( instancer["in"], instancer["out"] )
def testThreading( self ) :
sphere = IECoreScene.SpherePrimitive()
instanceInput = GafferSceneTest.CompoundObjectSource()
instanceInput["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( imath.Box3f( imath.V3f( -2 ), imath.V3f( 2 ) ) ),
"children" : {
"sphere" : {
"object" : sphere,
"bound" : IECore.Box3fData( sphere.bound() ),
"transform" : IECore.M44fData( imath.M44f().scale( imath.V3f( 2 ) ) ),
},
}
} )
)
seeds = IECoreScene.PointsPrimitive(
IECore.V3fVectorData(
[ imath.V3f( 1, 0, 0 ), imath.V3f( 1, 1, 0 ), imath.V3f( 0, 1, 0 ), imath.V3f( 0, 0, 0 ) ]
)
)
seedsInput = GafferSceneTest.CompoundObjectSource()
seedsInput["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( imath.Box3f( imath.V3f( 1, 0, 0 ), imath.V3f( 2, 1, 0 ) ) ),
"children" : {
"seeds" : {
"bound" : IECore.Box3fData( seeds.bound() ),
"transform" : IECore.M44fData( imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) ),
"object" : seeds,
},
},
}, )
)
instancer = GafferScene.Instancer()
instancer["in"].setInput( seedsInput["out"] )
instancer["prototypes"].setInput( instanceInput["out"] )
instancer["parent"].setValue( "/seeds" )
instancer["name"].setValue( "instances" )
GafferSceneTest.traverseScene( instancer["out"] )
def testNamePlugDefaultValue( self ) :
n = GafferScene.Instancer()
self.assertEqual( n["name"].defaultValue(), "instances" )
self.assertEqual( n["name"].getValue(), "instances" )
def testAffects( self ) :
n = GafferScene.Instancer()
a = n.affects( n["name"] )
self.assertGreaterEqual( { x.relativeName( n ) for x in a }, { "out.childNames", "out.bound", "out.set" } )
def testParentBoundsWhenNoInstances( self ) :
sphere = GafferScene.Sphere()
sphere["type"].setValue( sphere.Type.Primitive ) # no points, so we can't instance onto it
instancer = GafferScene.Instancer()
instancer["in"].setInput( sphere["out"] )
instancer["parent"].setValue( "/sphere" )
instancer["prototypes"].setInput( sphere["out"] )
self.assertSceneValid( instancer["out"] )
self.assertEqual( instancer["out"].bound( "/sphere" ), sphere["out"].bound( "/sphere" ) )
def testEmptyName( self ) :
plane = GafferScene.Plane()
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["parent"].setValue( "/plane" )
instancer["name"].setValue( "" )
f = GafferScene.PathFilter()
f["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
deleteObject = GafferScene.DeleteObject()
deleteObject["in"].setInput( plane["out"] )
deleteObject["filter"].setInput( f["out"] )
self.assertScenesEqual( instancer["out"], deleteObject["out"] )
def testEmptyParent( self ) :
plane = GafferScene.Plane()
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "" )
self.assertScenesEqual( instancer["out"], plane["out"] )
self.assertSceneHashesEqual( instancer["out"], plane["out"] )
def testSeedsAffectBound( self ) :
plane = GafferScene.Plane()
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/plane" )
h1 = instancer["out"].boundHash( "/plane/instances" )
b1 = instancer["out"].bound( "/plane/instances" )
plane["dimensions"].setValue( plane["dimensions"].getValue() * 2 )
h2 = instancer["out"].boundHash( "/plane/instances" )
b2 = instancer["out"].bound( "/plane/instances" )
self.assertNotEqual( h1, h2 )
self.assertNotEqual( b1, b2 )
def testBoundHashIsStable( self ) :
plane = GafferScene.Plane()
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/plane" )
h = instancer["out"].boundHash( "/plane/instances" )
for i in range( 0, 100 ) :
self.assertEqual( instancer["out"].boundHash( "/plane/instances" ), h )
def testObjectAffectsChildNames( self ) :
plane = GafferScene.Plane()
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/plane" )
cs = GafferTest.CapturingSlot( instancer.plugDirtiedSignal() )
plane["divisions"]["x"].setValue( 2 )
dirtiedPlugs = [ s[0] for s in cs ]
self.assertTrue( instancer["out"]["childNames"] in dirtiedPlugs )
self.assertTrue( instancer["out"]["bound"] in dirtiedPlugs )
self.assertTrue( instancer["out"]["transform"] in dirtiedPlugs )
def testPythonExpressionAndGIL( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["plane"]["divisions"].setValue( imath.V2i( 20 ) )
script["sphere"] = GafferScene.Sphere()
script["expression"] = Gaffer.Expression()
script["expression"].setExpression( "parent['sphere']['radius'] = context.getFrame()" )
script["instancer"] = GafferScene.Instancer()
script["instancer"]["in"].setInput( script["plane"]["out"] )
script["instancer"]["prototypes"].setInput( script["sphere"]["out"] )
script["instancer"]["parent"].setValue( "/plane" )
# The Instancer spawns its own threads, so if we don't release the GIL
# when invoking it, and an upstream node enters Python, we'll end up
# with a deadlock. Test that isn't the case. We increment the frame
# between each test to ensure the expression result is not cached and
# we do truly enter python.
with Gaffer.Context() as c :
c.setFrame( 1 )
script["instancer"]["out"]["globals"].getValue()
c.setFrame( 101 )
script["instancer"]["out"]["globals"].hash()
c["scene:path"] = IECore.InternedStringVectorData( [ "plane" ] )
c.setFrame( 2 )
script["instancer"]["out"]["bound"].getValue()
c.setFrame( 3 )
script["instancer"]["out"]["transform"].getValue()
c.setFrame( 4 )
script["instancer"]["out"]["object"].getValue()
c.setFrame( 5 )
script["instancer"]["out"]["attributes"].getValue()
c.setFrame( 6 )
script["instancer"]["out"]["childNames"].getValue()
c.setFrame( 7 )
c.setFrame( 102 )
script["instancer"]["out"]["bound"].hash()
c.setFrame( 103 )
script["instancer"]["out"]["transform"].hash()
c.setFrame( 104 )
script["instancer"]["out"]["object"].hash()
c.setFrame( 105 )
script["instancer"]["out"]["attributes"].hash()
c.setFrame( 106 )
script["instancer"]["out"]["childNames"].hash()
c.setFrame( 107 )
# The same applies for the higher level helper functions on ScenePlug
c.setFrame( 200 )
script["instancer"]["out"].bound( "/plane" )
c.setFrame( 201 )
script["instancer"]["out"].transform( "/plane" )
c.setFrame( 202 )
script["instancer"]["out"].fullTransform( "/plane" )
c.setFrame( 203 )
script["instancer"]["out"].attributes( "/plane" )
c.setFrame( 204 )
script["instancer"]["out"].fullAttributes( "/plane" )
c.setFrame( 205 )
script["instancer"]["out"].object( "/plane" )
c.setFrame( 206 )
script["instancer"]["out"].childNames( "/plane" )
c.setFrame( 207 )
c.setFrame( 300 )
script["instancer"]["out"].boundHash( "/plane" )
c.setFrame( 301 )
script["instancer"]["out"].transformHash( "/plane" )
c.setFrame( 302 )
script["instancer"]["out"].fullTransformHash( "/plane" )
c.setFrame( 303 )
script["instancer"]["out"].attributesHash( "/plane" )
c.setFrame( 304 )
script["instancer"]["out"].fullAttributesHash( "/plane" )
c.setFrame( 305 )
script["instancer"]["out"].objectHash( "/plane" )
c.setFrame( 306 )
script["instancer"]["out"].childNamesHash( "/plane" )
c.setFrame( 307 )
def testDynamicPlugsAndGIL( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["plane"]["divisions"].setValue( imath.V2i( 20 ) )
script["sphere"] = GafferScene.Sphere()
script["expression"] = Gaffer.Expression()
script["expression"].setExpression( "parent['sphere']['radius'] = context.getFrame()" )
script["instancer"] = GafferScene.Instancer()
script["instancer"]["in"].setInput( script["plane"]["out"] )
script["instancer"]["prototypes"].setInput( script["sphere"]["out"] )
script["instancer"]["parent"].setValue( "/plane" )
script["attributes"] = GafferScene.CustomAttributes()
script["attributes"]["in"].setInput( script["instancer"]["out"] )
script["outputs"] = GafferScene.Outputs()
script["outputs"]["in"].setInput( script["attributes"]["out"] )
# Simulate an InteractiveRender or Viewer traversal of the scene
# every time it is dirtied. If the GIL isn't released when dirtiness
# is signalled, we'll end up with a deadlock as the traversal enters
# python on another thread to evaluate the expression. We increment the frame
# between each test to ensure the expression result is not cached and
# we do truly enter python.
traverseConnection = Gaffer.ScopedConnection( GafferSceneTest.connectTraverseSceneToPlugDirtiedSignal( script["outputs"]["out"] ) )
with Gaffer.Context() as c :
c.setFrame( 1 )
script["attributes"]["attributes"].addChild( Gaffer.NameValuePlug( "test1", IECore.IntData( 10 ) ) )
c.setFrame( 2 )
script["attributes"]["attributes"].addChild( Gaffer.NameValuePlug( "test2", IECore.IntData( 20 ), True ) )
c.setFrame( 3 )
script["attributes"]["attributes"].addMembers(
IECore.CompoundData( {
"test3" : 30,
"test4" : 40,
} )
)
c.setFrame( 4 )
p = script["attributes"]["attributes"][0]
del script["attributes"]["attributes"][p.getName()]
c.setFrame( 5 )
script["attributes"]["attributes"].addChild( p )
c.setFrame( 6 )
script["attributes"]["attributes"].removeChild( p )
c.setFrame( 7 )
script["attributes"]["attributes"].setChild( p.getName(), p )
c.setFrame( 8 )
script["attributes"]["attributes"].removeChild( p )
c.setFrame( 9 )
script["attributes"]["attributes"][p.getName()] = p
c.setFrame( 10 )
script["outputs"].addOutput( "test", IECoreScene.Output( "beauty.exr", "exr", "rgba" ) )
def testLoadReferenceAndGIL( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["plane"]["divisions"].setValue( imath.V2i( 20 ) )
script["sphere"] = GafferScene.Sphere()
script["expression"] = Gaffer.Expression()
script["expression"].setExpression( "parent['sphere']['radius'] = 0.1 + context.getFrame()" )
script["instancer"] = GafferScene.Instancer()
script["instancer"]["in"].setInput( script["plane"]["out"] )
script["instancer"]["prototypes"].setInput( script["sphere"]["out"] )
script["instancer"]["parent"].setValue( "/plane" )
script["box"] = Gaffer.Box()
script["box"]["in"] = GafferScene.ScenePlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
script["box"]["out"] = GafferScene.ScenePlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
script["box"]["out"].setInput( script["box"]["in"] )
script["box"].exportForReference( self.temporaryDirectory() + "/test.grf" )
script["reference"] = Gaffer.Reference()
script["reference"].load( self.temporaryDirectory() + "/test.grf" )
script["reference"]["in"].setInput( script["instancer"]["out"] )
script["attributes"] = GafferScene.CustomAttributes()
script["attributes"]["in"].setInput( script["reference"]["out"] )
traverseConnection = Gaffer.ScopedConnection( GafferSceneTest.connectTraverseSceneToPlugDirtiedSignal( script["attributes"]["out"] ) )
with Gaffer.Context() as c :
script["reference"].load( self.temporaryDirectory() + "/test.grf" )
def testContextChangedAndGIL( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["plane"]["divisions"].setValue( imath.V2i( 20 ) )
script["sphere"] = GafferScene.Sphere()
script["expression"] = Gaffer.Expression()
script["expression"].setExpression( "parent['sphere']['radius'] = context.get( 'minRadius', 0.1 ) + context.getFrame()" )
script["instancer"] = GafferScene.Instancer()
script["instancer"]["in"].setInput( script["plane"]["out"] )
script["instancer"]["prototypes"].setInput( script["sphere"]["out"] )
script["instancer"]["parent"].setValue( "/plane" )
context = Gaffer.Context()
traverseConnection = Gaffer.ScopedConnection( GafferSceneTest.connectTraverseSceneToContextChangedSignal( script["instancer"]["out"], context ) )
with context :
context.setFrame( 10 )
context.setFramesPerSecond( 50 )
context.setTime( 1 )
context.set( "a", 1 )
context.set( "a", 2.0 )
context.set( "a", "a" )
context.set( "a", imath.V2i() )
context.set( "a", imath.V3i() )
context.set( "a", imath.V2f() )
context.set( "a", imath.V3f() )
context.set( "a", imath.Color3f() )
context.set( "a", IECore.BoolData( True ) )
context["b"] = 1
context["b"] = 2.0
context["b"] = "b"
context["b"] = imath.V2i()
context["b"] = imath.V3i()
context["b"] = imath.V2f()
context["b"] = imath.V3f()
context["b"] = imath.Color3f()
context["b"] = IECore.BoolData( True )
with Gaffer.BlockedConnection( traverseConnection ) :
# Must add it with the connection disabled, otherwise
# the addition causes a traversal, and then remove() gets
# all its results from the cache.
context["minRadius"] = 0.2
context.remove( "minRadius" )
with Gaffer.BlockedConnection( traverseConnection ) :
context["minRadius"] = 0.3
del context["minRadius"]
def testDispatchAndGIL( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["plane"]["divisions"].setValue( imath.V2i( 20 ) )
script["sphere"] = GafferScene.Sphere()
script["expression"] = Gaffer.Expression()
script["expression"].setExpression( "parent['sphere']['radius'] = context.get( 'minRadius', 0.1 ) + context.getFrame()" )
script["instancer"] = GafferScene.Instancer()
script["instancer"]["in"].setInput( script["plane"]["out"] )
script["instancer"]["prototypes"].setInput( script["sphere"]["out"] )
script["instancer"]["parent"].setValue( "/plane" )
script["pythonCommand"] = GafferDispatch.PythonCommand()
script["pythonCommand"]["command"].setValue( "pass" )
traverseConnection = Gaffer.ScopedConnection( GafferSceneTest.connectTraverseSceneToPreDispatchSignal( script["instancer"]["out"] ) )
dispatcher = GafferDispatch.LocalDispatcher()
dispatcher["jobsDirectory"].setValue( self.temporaryDirectory() )
with Gaffer.Context() as c :
for i in range( 1, 10 ) :
c.setFrame( i )
dispatcher.dispatch( [ script["pythonCommand"] ] )
def testTransform( self ) :
point = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( 4, 0, 0 ) ] ) )
point["orientation"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.QuatfVectorData( [ imath.Quatf().setAxisAngle( imath.V3f( 0, 1, 0 ), math.pi / 2.0 ) ] )
)
point["scale"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.V3fVectorData( [ imath.V3f( 2, 3, 4 ) ] )
)
point["uniformScale"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.FloatVectorData( [ 10 ] )
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( point )
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/object" )
self.assertEqual( instancer["out"].transform( "/object/instances/sphere/0" ), imath.M44f().translate( imath.V3f( 4, 0, 0 ) ) )
instancer["orientation"].setValue( "orientation" )
self.assertTrue(
imath.V3f( 4, 0, -1 ).equalWithAbsError(
imath.V3f( 1, 0, 0 ) * instancer["out"].transform( "/object/instances/sphere/0" ),
0.00001
)
)
instancer["scale"].setValue( "scale" )
self.assertTrue(
imath.V3f( 4, 0, -2 ).equalWithAbsError(
imath.V3f( 1, 0, 0 ) * instancer["out"].transform( "/object/instances/sphere/0" ),
0.00001
)
)
instancer["scale"].setValue( "uniformScale" )
self.assertTrue(
imath.V3f( 4, 0, -10 ).equalWithAbsError(
imath.V3f( 1, 0, 0 ) * instancer["out"].transform( "/object/instances/sphere/0" ),
0.00001
)
)
def testIndexedRootsListWithEmptyList( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 4 ) ] ) )
points["index"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ 0, 1, 1, 0 ] ),
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
cube = GafferScene.Cube()
instances = GafferScene.Parent()
instances["in"].setInput( sphere["out"] )
instances["children"][0].setInput( cube["out"] )
instances["parent"].setValue( "/" )
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( instances["out"] )
instancer["parent"].setValue( "/object" )
instancer["prototypeIndex"].setValue( "index" )
self.assertEqual( instancer["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "sphere", "cube" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere" ), IECore.InternedStringVectorData( [ "0", "3" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube" ), IECore.InternedStringVectorData( [ "1", "2" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere/0" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere/3" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube/1" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube/2" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/sphere" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/cube" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/sphere/0" ), sphere["out"].object( "/sphere" ) )
self.assertEqual( instancer["out"].object( "/object/instances/sphere/3" ), sphere["out"].object( "/sphere" ) )
self.assertEqual( instancer["out"].object( "/object/instances/cube/1" ), cube["out"].object( "/cube" ) )
self.assertEqual( instancer["out"].object( "/object/instances/cube/2" ), cube["out"].object( "/cube" ) )
self.assertSceneValid( instancer["out"] )
def buildPrototypeRootsScript( self ) :
# we don't strictly require a script, but its the easiest way to
# maintain references to all the nodes for use in client tests.
script = Gaffer.ScriptNode()
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 4 ) ] ) )
points["index"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ 0, 1, 1, 0 ] ),
)
# for use with RootPerVertex mode
points["root"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.StringVectorData( [ "/foo", "/bar" ] ),
IECore.IntVectorData( [ 0, 1, 1, 0 ] ),
)
script["objectToScene"] = GafferScene.ObjectToScene()
script["objectToScene"]["object"].setValue( points )
# for use with IndexedRootsVariable mode
script["variables"] = GafferScene.PrimitiveVariables()
script["variables"]["primitiveVariables"].addChild(
Gaffer.NameValuePlug(
"prototypeRoots",
Gaffer.StringVectorDataPlug( "value", defaultValue = IECore.StringVectorData( [ ] ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ),
True,
"prototypeRoots",
Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic
)
)
script["variables"]["primitiveVariables"]["prototypeRoots"]["name"].setValue( 'prototypeRoots' )
script["variables"]["in"].setInput( script["objectToScene"]["out"] )
script["filter"] = GafferScene.PathFilter()
script["filter"]["paths"].setValue( IECore.StringVectorData( [ "/object" ] ) )
script["variables"]["filter"].setInput( script["filter"]["out"] )
# /foo/bar/sphere
script["sphere"] = GafferScene.Sphere()
script["group"] = GafferScene.Group()
script["group"]["name"].setValue( "bar" )
script["group"]["in"][0].setInput( script["sphere"]["out"] )
script["group2"] = GafferScene.Group()
script["group2"]["name"].setValue( "foo" )
script["group2"]["in"][0].setInput( script["group"]["out"] )
# /bar/baz/cube
script["cube"] = GafferScene.Cube()
script["group3"] = GafferScene.Group()
script["group3"]["name"].setValue( "baz" )
script["group3"]["in"][0].setInput( script["cube"]["out"] )
script["group4"] = GafferScene.Group()
script["group4"]["name"].setValue( "bar" )
script["group4"]["in"][0].setInput( script["group3"]["out"] )
script["prototypes"] = GafferScene.Parent()
script["prototypes"]["in"].setInput( script["group2"]["out"] )
script["prototypes"]["children"][0].setInput( script["group4"]["out"] )
script["prototypes"]["parent"].setValue( "/" )
script["instancer"] = GafferScene.Instancer()
script["instancer"]["in"].setInput( script["variables"]["out"] )
script["instancer"]["prototypes"].setInput( script["prototypes"]["out"] )
script["instancer"]["parent"].setValue( "/object" )
script["instancer"]["prototypeIndex"].setValue( "index" )
return script
def assertRootsMatchPrototypeSceneChildren( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "foo", "bar" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo" ), IECore.InternedStringVectorData( [ "0", "3" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar" ), IECore.InternedStringVectorData( [ "1", "2" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar" ), IECore.NullObject.defaultNullObject() )
for i in [ "0", "3" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "bar" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo/{i}/bar".format( i=i ) ), IECore.InternedStringVectorData( [ "sphere" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}/bar".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}/bar/sphere".format( i=i ) ), script["sphere"]["out"].object( "/sphere" ) )
for i in [ "1", "2" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "baz" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}/baz".format( i=i ) ), IECore.InternedStringVectorData( [ "cube" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}/baz".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}/baz/cube".format( i=i ) ), script["cube"]["out"].object( "/cube" ) )
self.assertSceneValid( script["instancer"]["out"] )
def assertUnderspecifiedRoots( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
def assertSingleRoot( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "foo" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo" ), IECore.InternedStringVectorData( [ "0", "1", "2", "3" ] ) )
for i in [ "0", "1", "2", "3" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "bar" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo/{i}/bar".format( i=i ) ), IECore.InternedStringVectorData( [ "sphere" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}/bar".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}/bar/sphere".format( i=i ) ), script["sphere"]["out"].object( "/sphere" ) )
self.assertSceneValid( script["instancer"]["out"] )
def assertConflictingRootNames( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "bar", "bar1" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar" ), IECore.InternedStringVectorData( [ "0", "3" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar1" ), IECore.InternedStringVectorData( [ "1", "2" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar1" ), IECore.NullObject.defaultNullObject() )
for i in [ "0", "3" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "sphere" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}/sphere".format( i=i ) ), IECore.InternedStringVectorData( [] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}/sphere".format( i=i ) ), script["sphere"]["out"].object( "/sphere" ) )
for i in [ "1", "2" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar1/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "baz" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar1/{i}/baz".format( i=i ) ), IECore.InternedStringVectorData( [ "cube" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar1/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar1/{i}/baz".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar1/{i}/baz/cube".format( i=i ) ), script["cube"]["out"].object( "/cube" ) )
self.assertSceneValid( script["instancer"]["out"] )
def assertSwappedRoots( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "bar", "foo" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar" ), IECore.InternedStringVectorData( [ "0", "3" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo" ), IECore.InternedStringVectorData( [ "1", "2" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo" ), IECore.NullObject.defaultNullObject() )
for i in [ "0", "3" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "baz" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}/baz".format( i=i ) ), IECore.InternedStringVectorData( [ "cube" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}/baz".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}/baz/cube".format( i=i ) ), script["cube"]["out"].object( "/cube" ) )
for i in [ "1", "2" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "bar" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo/{i}/bar".format( i=i ) ), IECore.InternedStringVectorData( [ "sphere" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}/bar".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}/bar/sphere".format( i=i ) ), script["sphere"]["out"].object( "/sphere" ) )
self.assertSceneValid( script["instancer"]["out"] )
def assertSkippedRoots( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "bar" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar" ), IECore.InternedStringVectorData( [ "1", "2" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar" ), IECore.NullObject.defaultNullObject() )
for i in [ "1", "2" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "baz" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}/baz".format( i=i ) ), IECore.InternedStringVectorData( [ "cube" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}/baz".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}/baz/cube".format( i=i ) ), script["cube"]["out"].object( "/cube" ) )
self.assertSceneValid( script["instancer"]["out"] )
def assertRootsToLeaves( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "sphere", "cube" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/sphere" ), IECore.InternedStringVectorData( [ "0", "3" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/cube" ), IECore.InternedStringVectorData( [ "1", "2" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/sphere" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/cube" ), IECore.NullObject.defaultNullObject() )
for i in [ "0", "3" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/sphere/{i}".format( i=i ) ), IECore.InternedStringVectorData( [] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/sphere/{i}".format( i=i ) ), script["sphere"]["out"].object( "/sphere" ) )
for i in [ "1", "2" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/cube/{i}".format( i=i ) ), IECore.InternedStringVectorData( [] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/cube/{i}".format( i=i ) ), script["cube"]["out"].object( "/cube" ) )
self.assertSceneValid( script["instancer"]["out"] )
def assertRootsToRoot( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "root" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/root" ), IECore.InternedStringVectorData( [ "0", "1", "2", "3" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root" ), IECore.NullObject.defaultNullObject() )
for i in [ "0", "1", "2", "3" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/root/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "foo", "bar" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/root/{i}/foo".format( i=i ) ), IECore.InternedStringVectorData( [ "bar" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/root/{i}/foo/bar".format( i=i ) ), IECore.InternedStringVectorData( [ "sphere" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/root/{i}/bar".format( i=i ) ), IECore.InternedStringVectorData( [ "baz" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/root/{i}/bar/baz".format( i=i ) ), IECore.InternedStringVectorData( [ "cube" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root/{i}/foo".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root/{i}/foo/bar".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root/{i}/foo/bar/sphere".format( i=i ) ), script["sphere"]["out"].object( "/sphere" ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root/{i}/bar".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root/{i}/bar/baz".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root/{i}/bar/baz/cube".format( i=i ) ), script["cube"]["out"].object( "/cube" ) )
def testIndexedRootsList( self ) :
script = self.buildPrototypeRootsScript()
script["instancer"]["prototypeMode"].setValue( GafferScene.Instancer.PrototypeMode.IndexedRootsList )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [] ) )
self.assertRootsMatchPrototypeSceneChildren( script )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "", ] ) )
self.assertUnderspecifiedRoots( script )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/foo", ] ) )
self.assertSingleRoot( script )
# roots list matching the prototype root children
# we expect the same results as without a roots list
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/foo", "/bar" ] ) )
self.assertRootsMatchPrototypeSceneChildren( script )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/foo/bar", "/bar" ] ) )
self.assertConflictingRootNames( script )
# opposite order to the prototype root children
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/bar", "/foo" ] ) )
self.assertSwappedRoots( script )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "", "/bar" ] ) )
self.assertSkippedRoots( script )
# roots all the way to the leaf level of the prototype scene
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/foo/bar/sphere", "/bar/baz/cube" ] ) )
self.assertRootsToLeaves( script )
# we can specify the root of the prototype scene
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/" ] ) )
self.assertRootsToRoot( script )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/foo", "/does/not/exist" ] ) )
six.assertRaisesRegex( self,
Gaffer.ProcessException, '.*Prototype root "/does/not/exist" does not exist.*',
script["instancer"]["out"].childNames, "/object/instances",
)
def testIndexedRootsVariable( self ) :
script = self.buildPrototypeRootsScript()
script["instancer"]["prototypeMode"].setValue( GafferScene.Instancer.PrototypeMode.IndexedRootsVariable )
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [] ) )
six.assertRaisesRegex( self,
Gaffer.ProcessException, ".*must specify at least one root location.*",
script["instancer"]["out"].childNames, "/object/instances",
)
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "", ] ) )
self.assertUnderspecifiedRoots( script )
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "/foo", ] ) )
self.assertSingleRoot( script )
# roots list matching the prototype root children
# we expect the same results as without a roots list
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "/foo", "/bar" ] ) )
self.assertRootsMatchPrototypeSceneChildren( script )
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "/foo/bar", "/bar" ] ) )
self.assertConflictingRootNames( script )
# opposite order to the prototype root children
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "/bar", "/foo" ] ) )
self.assertSwappedRoots( script )
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "", "/bar" ] ) )
self.assertSkippedRoots( script )
# roots all the way to the leaf level of the prototype scene
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "/foo/bar/sphere", "/bar/baz/cube" ] ) )
self.assertRootsToLeaves( script )
# we can specify the root of the prototype scene
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "/" ] ) )
self.assertRootsToRoot( script )
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "/foo", "/does/not/exist" ] ) )
six.assertRaisesRegex( self,
Gaffer.ProcessException, '.*Prototype root "/does/not/exist" does not exist.*',
script["instancer"]["out"].childNames, "/object/instances",
)
script["instancer"]["prototypeRoots"].setValue( "notAPrimVar" )
six.assertRaisesRegex( self,
Gaffer.ProcessException, ".*must be Constant StringVectorData when using IndexedRootsVariable mode.*does not exist.*",
script["instancer"]["out"].childNames, "/object/instances",
)
# the vertex primvar should fail
script["instancer"]["prototypeRoots"].setValue( "root" )
six.assertRaisesRegex( self,
Gaffer.ProcessException, ".*must be Constant StringVectorData when using IndexedRootsVariable mode.*",
script["instancer"]["out"].childNames, "/object/instances",
)
def testRootPerVertex( self ) :
script = self.buildPrototypeRootsScript()
script["instancer"]["prototypeMode"].setValue( GafferScene.Instancer.PrototypeMode.RootPerVertex )
script["instancer"]["prototypeRoots"].setValue( "root" )
def updateRoots( roots, indices ) :
points = script["objectToScene"]["object"].getValue()
points["root"] = IECoreScene.PrimitiveVariable( points["root"].interpolation, roots, indices )
script["objectToScene"]["object"].setValue( points )
updateRoots( IECore.StringVectorData( [] ), IECore.IntVectorData( [] ) )
six.assertRaisesRegex( self,
Gaffer.ProcessException, ".*must specify at least one root location.*",
script["instancer"]["out"].childNames, "/object/instances",
)
updateRoots( IECore.StringVectorData( [ "", ] ), IECore.IntVectorData( [ 0, 0, 0, 0 ] ) )
self.assertUnderspecifiedRoots( script )
updateRoots( IECore.StringVectorData( [ "/foo", ] ), IECore.IntVectorData( [ 0, 0, 0, 0 ] ) )
self.assertSingleRoot( script )
# roots list matching the prototype root children
# we expect the same results as without a roots list
updateRoots( IECore.StringVectorData( [ "/foo", "/bar" ] ), IECore.IntVectorData( [ 0, 1, 1, 0 ] ) )
self.assertRootsMatchPrototypeSceneChildren( script )
updateRoots( IECore.StringVectorData( [ "/foo/bar", "/bar" ] ), IECore.IntVectorData( [ 0, 1, 1, 0 ] ) )
self.assertConflictingRootNames( script )
# opposite order to the prototype root children
updateRoots( IECore.StringVectorData( [ "/bar", "/foo" ] ), IECore.IntVectorData( [ 0, 1, 1, 0 ] ) )
self.assertSwappedRoots( script )
updateRoots( IECore.StringVectorData( [ "", "/bar" ] ), IECore.IntVectorData( [ 0, 1, 1, 0 ] ) )
self.assertSkippedRoots( script )
# roots all the way to the leaf level of the prototype scene
updateRoots( IECore.StringVectorData( [ "/foo/bar/sphere", "/bar/baz/cube" ] ), IECore.IntVectorData( [ 0, 1, 1, 0 ] ) )
self.assertRootsToLeaves( script )
# we can specify the root of the prototype scene
updateRoots( IECore.StringVectorData( [ "/", ] ), IECore.IntVectorData( [ 0, 0, 0, 0 ] ) )
self.assertRootsToRoot( script )
updateRoots( IECore.StringVectorData( [ "/foo", "/does/not/exist" ] ), IECore.IntVectorData( [ 0, 1, 1, 0 ] ) )
six.assertRaisesRegex( self,
Gaffer.ProcessException, '.*Prototype root "/does/not/exist" does not exist.*',
script["instancer"]["out"].childNames, "/object/instances",
)
script["instancer"]["prototypeRoots"].setValue( "notAPrimVar" )
six.assertRaisesRegex( self,
Gaffer.ProcessException, ".*must be Vertex StringVectorData when using RootPerVertex mode.*does not exist.*",
script["instancer"]["out"].childNames, "/object/instances",
)
# the constant primvar should fail
script["instancer"]["prototypeRoots"].setValue( "prototypeRoots" )
six.assertRaisesRegex( self,
Gaffer.ProcessException, ".*must be Vertex StringVectorData when using RootPerVertex mode.*",
script["instancer"]["out"].childNames, "/object/instances",
)
def testSets( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 4 ) ] ) )
points["index"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ 0, 1, 1, 0 ] ),
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
sphere["sets"].setValue( "sphereSet" )
cube = GafferScene.Cube()
cube["sets"].setValue( "cubeSet" )
cubeGroup = GafferScene.Group()
cubeGroup["name"].setValue( "cubeGroup" )
cubeGroup["in"][0].setInput( cube["out"] )
instances = GafferScene.Parent()
instances["in"].setInput( sphere["out"] )
instances["children"][0].setInput( cubeGroup["out"] )
instances["parent"].setValue( "/" )
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( instances["out"] )
instancer["parent"].setValue( "/object" )
instancer["prototypeIndex"].setValue( "index" )
self.assertEqual(
instancer["out"]["setNames"].getValue(),
IECore.InternedStringVectorData( [ "sphereSet", "cubeSet" ] )
)
self.assertEqual(
set( instancer["out"].set( "sphereSet" ).value.paths() ),
{
"/object/instances/sphere/0",
"/object/instances/sphere/3",
}
)
self.assertEqual(
set( instancer["out"].set( "cubeSet" ).value.paths() ),
{
"/object/instances/cubeGroup/1/cube",
"/object/instances/cubeGroup/2/cube",
}
)
# Test encapsulation options
encapInstancer = GafferScene.Instancer()
encapInstancer["in"].setInput( objectToScene["out"] )
encapInstancer["prototypes"].setInput( instances["out"] )
encapInstancer["parent"].setValue( "/object" )
encapInstancer["prototypeIndex"].setValue( "index" )
encapInstancer["encapsulateInstanceGroups"].setValue( True )
unencapFilter = GafferScene.PathFilter()
unencapFilter["paths"].setValue( IECore.StringVectorData( [ "/..." ] ) )
unencap = GafferScene.Unencapsulate()
unencap["in"].setInput( encapInstancer["out"] )
unencap["filter"].setInput( unencapFilter["out"] )
# Sets should be empty while encapsulated
self.assertEqual( encapInstancer["out"].set( "sphereSet" ).value.paths(), [] )
self.assertEqual( encapInstancer["out"].set( "cubeSet" ).value.paths(), [] )
# But should match after unencapsulating
self.assertScenesEqual( unencap["out"], instancer["out"] )
def testSetsWithDeepPrototypeRoots( self ) :
script = self.buildPrototypeRootsScript()
script["sphere"]["sets"].setValue( "sphereSet" )
script["cube"]["sets"].setValue( "cubeSet" )
script["set"] = GafferScene.Set()
script["set"]["name"].setValue( "barSet" )
script["set"]["in"].setInput( script["prototypes"]["out"] )
script["barFilter"] = GafferScene.PathFilter()
script["barFilter"]["paths"].setValue( IECore.StringVectorData( [ "/foo/bar", "/bar" ] ) )
script["set"]["filter"].setInput( script["barFilter"]["out"] )
script["instancer"]["prototypes"].setInput( script["set"]["out"] )
script["instancer"]["prototypeMode"].setValue( GafferScene.Instancer.PrototypeMode.IndexedRootsList )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/foo/bar", "/bar" ] ) )
self.assertEqual(
script["instancer"]["out"]["setNames"].getValue(),
IECore.InternedStringVectorData( [ "sphereSet", "cubeSet", "barSet" ] )
)
self.assertEqual(
set( script["instancer"]["out"].set( "sphereSet" ).value.paths() ),
{
"/object/instances/bar/0/sphere",
"/object/instances/bar/3/sphere",
}
)
self.assertEqual(
set( script["instancer"]["out"].set( "cubeSet" ).value.paths() ),
{
"/object/instances/bar1/1/baz/cube",
"/object/instances/bar1/2/baz/cube",
}
)
self.assertEqual(
set( script["instancer"]["out"].set( "barSet" ).value.paths() ),
{
"/object/instances/bar/0",
"/object/instances/bar/3",
"/object/instances/bar1/1",
"/object/instances/bar1/2",
}
)
def testIds( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 4 ) ] ) )
points["id"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ 10, 100, 111, 5 ] ),
)
points["index"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ 0, 1, 0, 1 ] ),
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
cube = GafferScene.Cube()
instances = GafferScene.Parent()
instances["in"].setInput( sphere["out"] )
instances["children"][0].setInput( cube["out"] )
instances["parent"].setValue( "/" )
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( instances["out"] )
instancer["parent"].setValue( "/object" )
instancer["prototypeIndex"].setValue( "index" )
instancer["id"].setValue( "id" )
self.assertEqual( instancer["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "sphere", "cube" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere" ), IECore.InternedStringVectorData( [ "10", "111" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube" ), IECore.InternedStringVectorData( [ "5", "100" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere/10" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere/111" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube/100" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube/5" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/sphere" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/cube" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/sphere/10" ), sphere["out"].object( "/sphere" ) )
self.assertEqual( instancer["out"].object( "/object/instances/sphere/111" ), sphere["out"].object( "/sphere" ) )
self.assertEqual( instancer["out"].object( "/object/instances/cube/100" ), cube["out"].object( "/cube" ) )
self.assertEqual( instancer["out"].object( "/object/instances/cube/5" ), cube["out"].object( "/cube" ) )
self.assertEqual( instancer["out"].transform( "/object/instances" ), imath.M44f() )
self.assertEqual( instancer["out"].transform( "/object/instances/sphere" ), imath.M44f() )
self.assertEqual( instancer["out"].transform( "/object/instances/cube" ), imath.M44f() )
self.assertEqual( instancer["out"].transform( "/object/instances/sphere/10" ), imath.M44f() )
self.assertEqual( instancer["out"].transform( "/object/instances/sphere/111" ), imath.M44f().translate( imath.V3f( 2, 0, 0 ) ) )
self.assertEqual( instancer["out"].transform( "/object/instances/cube/100" ), imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) )
self.assertEqual( instancer["out"].transform( "/object/instances/cube/5" ), imath.M44f().translate( imath.V3f( 3, 0, 0 ) ) )
six.assertRaisesRegex( self,
Gaffer.ProcessException,
'Instancer.out.transform : Instance id "77" is invalid. Topology may have changed during shutter.',
instancer["out"].transform, "/object/instances/cube/77"
)
self.assertSceneValid( instancer["out"] )
def testNegativeIdsAndIndices( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 2 ) ] ) )
points["id"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ -10, -5 ] ),
)
points["index"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ -1, -2 ] ),
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
cube = GafferScene.Cube()
instances = GafferScene.Parent()
instances["in"].setInput( sphere["out"] )
instances["children"][0].setInput( cube["out"] )
instances["parent"].setValue( "/" )
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( instances["out"] )
instancer["parent"].setValue( "/object" )
instancer["prototypeIndex"].setValue( "index" )
instancer["id"].setValue( "id" )
self.assertEqual( instancer["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "sphere", "cube" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere" ), IECore.InternedStringVectorData( [ "-5" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube" ), IECore.InternedStringVectorData( [ "-10" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere/-5" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube/-10" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/sphere" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/cube" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/sphere/-5" ), sphere["out"].object( "/sphere" ) )
self.assertEqual( instancer["out"].object( "/object/instances/cube/-10" ), cube["out"].object( "/cube" ) )
self.assertSceneValid( instancer["out"] )
def testDuplicateIds( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 6 ) ] ) )
points["id"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ 0, 0, 2, 2, 4, 4 ] ),
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/object" )
instancer["id"].setValue( "id" )
self.assertSceneValid( instancer["out"] )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere" ), IECore.InternedStringVectorData( [ "0", "2", "4" ] ) )
self.assertEqual( instancer["out"].transform( "/object/instances/sphere/0" ), imath.M44f().translate( imath.V3f( 0, 0, 0 ) ) )
self.assertEqual( instancer["out"].transform( "/object/instances/sphere/2" ), imath.M44f().translate( imath.V3f( 2, 0, 0 ) ) )
self.assertEqual( instancer["out"].transform( "/object/instances/sphere/4" ), imath.M44f().translate( imath.V3f( 4, 0, 0 ) ) )
def testAttributes( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 2 ) ] ) )
points["testFloat"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.FloatVectorData( [ 0, 1 ] ),
)
points["testColor"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.Color3fVectorData( [ imath.Color3f( 1, 0, 0 ), imath.Color3f( 0, 1, 0 ) ] ),
)
points["testPoint"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.V3fVectorData(
[ imath.V3f( 0, 0, 0 ), imath.V3f( 1, 1, 1 ) ],
IECore.GeometricData.Interpretation.Point
),
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/object" )
self.assertEqual(
instancer["out"].attributes( "/object/instances" ),
IECore.CompoundObject()
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere" ),
IECore.CompoundObject()
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/0" ),
IECore.CompoundObject()
)
instancer["attributes"].setValue( "testFloat testColor testPoint" )
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/0" ),
IECore.CompoundObject( {
"testFloat" : IECore.FloatData( 0.0 ),
"testColor" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ),
"testPoint" : IECore.V3fData(
imath.V3f( 0 ),
IECore.GeometricData.Interpretation.Point
)
} )
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/1" ),
IECore.CompoundObject( {
"testFloat" : IECore.FloatData( 1.0 ),
"testColor" : IECore.Color3fData( imath.Color3f( 0, 1, 0 ) ),
"testPoint" : IECore.V3fData(
imath.V3f( 1 ),
IECore.GeometricData.Interpretation.Point
)
} )
)
instancer["attributePrefix"].setValue( "user:" )
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/0" ),
IECore.CompoundObject( {
"user:testFloat" : IECore.FloatData( 0.0 ),
"user:testColor" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ),
"user:testPoint" : IECore.V3fData(
imath.V3f( 0 ),
IECore.GeometricData.Interpretation.Point
)
} )
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/1" ),
IECore.CompoundObject( {
"user:testFloat" : IECore.FloatData( 1.0 ),
"user:testColor" : IECore.Color3fData( imath.Color3f( 0, 1, 0 ) ),
"user:testPoint" : IECore.V3fData(
imath.V3f( 1 ),
IECore.GeometricData.Interpretation.Point
)
} )
)
instancer["attributePrefix"].setValue( "foo:" )
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/0" ),
IECore.CompoundObject( {
"foo:testFloat" : IECore.FloatData( 0.0 ),
"foo:testColor" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ),
"foo:testPoint" : IECore.V3fData(
imath.V3f( 0 ),
IECore.GeometricData.Interpretation.Point
)
} )
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/1" ),
IECore.CompoundObject( {
"foo:testFloat" : IECore.FloatData( 1.0 ),
"foo:testColor" : IECore.Color3fData( imath.Color3f( 0, 1, 0 ) ),
"foo:testPoint" : IECore.V3fData(
imath.V3f( 1 ),
IECore.GeometricData.Interpretation.Point
)
} )
)
def testEmptyAttributesHaveConstantHash( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 2 ) ] ) )
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/object" )
self.assertEqual(
instancer["out"].attributesHash( "/object/instances/sphere/0" ),
instancer["out"].attributesHash( "/object/instances/sphere/1" ),
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/0" ),
instancer["out"].attributes( "/object/instances/sphere/1" ),
)
def testEditAttributes( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 2 ) ] ) )
points["testFloat"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.FloatVectorData( [ 0, 1 ] ),
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/object" )
instancer["attributes"].setValue( "test*" )
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/0" ),
IECore.CompoundObject( {
"testFloat" : IECore.FloatData( 0.0 ),
} )
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/1" ),
IECore.CompoundObject( {
"testFloat" : IECore.FloatData( 1.0 ),
} )
)
points["testFloat"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.FloatVectorData( [ 1, 2 ] ),
)
objectToScene["object"].setValue( points )
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/0" ),
IECore.CompoundObject( {
"testFloat" : IECore.FloatData( 1.0 ),
} )
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/1" ),
IECore.CompoundObject( {
"testFloat" : IECore.FloatData( 2.0 ),
} )
)
def testPrototypeAttributes( self ) :
script = self.buildPrototypeRootsScript()
# add some attributes throughout the prototype hierarchies
script["attrFilter"] = GafferScene.PathFilter()
script["attrFilter"]["paths"].setValue( IECore.StringVectorData( [ "/foo", "/foo/bar", "/bar", "/bar/baz/cube" ] ) )
script["attributes"] = GafferScene.StandardAttributes()
script["attributes"]["in"].setInput( script["instancer"]["prototypes"].getInput() )
script["attributes"]["filter"].setInput( script["attrFilter"]["out"] )
script["attributes"]["attributes"]["deformationBlur"]["enabled"].setValue( True )
script["attrSpreadsheet"] = Gaffer.Spreadsheet()
script["attrSpreadsheet"]["selector"].setValue( "${scene:path}" )
script["attrSpreadsheet"]["rows"].addColumn( script["attributes"]["attributes"]["deformationBlur"]["value"] )
script["attributes"]["attributes"]["deformationBlur"]["value"].setInput( script["attrSpreadsheet"]["out"][0] )
for location, value in ( ( "/foo", False ), ( "/foo/bar", True ), ( "/bar", True ), ( "/bar/baz/cube", False ) ) :
row = script["attrSpreadsheet"]["rows"].addRow()
row["name"].setValue( location )
row["cells"][0]["value"].setValue( value )
script["instancer"]["prototypes"].setInput( script["attributes"]["out"] )
script["instancer"]["prototypeMode"].setValue( GafferScene.Instancer.PrototypeMode.IndexedRootsList )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/foo", "/bar" ] ) )
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances" ), IECore.CompoundObject() )
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/foo" ), IECore.CompoundObject() )
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/bar" ), IECore.CompoundObject() )
for i in [ "0", "3" ] :
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/foo/{i}".format( i=i ) )["gaffer:deformationBlur"].value, False )
self.assertEqual( script["instancer"]["out"].fullAttributes( "/object/instances/foo/{i}".format( i=i ) )["gaffer:deformationBlur"].value, False )
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/foo/{i}/bar".format( i=i ) )["gaffer:deformationBlur"].value, True )
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/foo/{i}/bar/sphere" ), IECore.CompoundObject() )
self.assertEqual( script["instancer"]["out"].fullAttributes( "/object/instances/foo/{i}/bar/sphere".format( i=i ) )["gaffer:deformationBlur"].value, True )
for i in [ "1", "2" ] :
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/bar/{i}".format( i=i ) )["gaffer:deformationBlur"].value, True )
self.assertEqual( script["instancer"]["out"].fullAttributes( "/object/instances/bar/{i}".format( i=i ) )["gaffer:deformationBlur"].value, True )
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/bar/{i}/baz".format( i=i ) ), IECore.CompoundObject() )
self.assertEqual( script["instancer"]["out"].fullAttributes( "/object/instances/bar/{i}/baz".format( i=i ) )["gaffer:deformationBlur"].value, True )
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/bar/{i}/baz/cube".format( i=i ) )["gaffer:deformationBlur"].value, False )
self.assertSceneValid( script["instancer"]["out"] )
def testUnconnectedInstanceInput( self ) :
plane = GafferScene.Plane()
plane["sets"].setValue( "A" )
plane["divisions"].setValue( imath.V2i( 1, 500 ) )
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["parent"].setValue( "/plane" )
self.assertEqual( instancer["out"].set( "A" ).value.paths(), [ "/plane" ] )
def testDirtyPropagation( self ) :
plane = GafferScene.Plane()
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["prototypes"].setInput( plane["out"] )
cs = GafferTest.CapturingSlot( instancer.plugDirtiedSignal() )
instancer["parent"].setValue( "plane" )
self.assertIn( instancer["out"]["childNames"], { x[0] for x in cs } )
del cs[:]
filter = GafferScene.PathFilter()
instancer["filter"].setInput( filter["out"] )
self.assertIn( instancer["out"]["childNames"], { x[0] for x in cs } )
def testNoPrimitiveAtParent( self ) :
group = GafferScene.Group()
sphere = GafferScene.Sphere()
sphere["sets"].setValue( "setA" )
groupFilter = GafferScene.PathFilter()
groupFilter["paths"].setValue( IECore.StringVectorData( [ "/group" ] ) )
instancer = GafferScene.Instancer()
instancer["in"].setInput( group["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["filter"].setInput( groupFilter["out"] )
self.assertSceneValid( instancer["out"] )
self.assertEqual( instancer["out"].childNames( "/group/instances" ) , IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].set( "setA" ) , IECore.PathMatcherData() )
def testSetPassThroughs( self ) :
# If the prototypes don't provide a set, then we should do a perfect
# pass through.
plane = GafferScene.Plane()
plane["sets"].setValue( "A" )
planeFilter = GafferScene.PathFilter()
planeFilter["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["filter"].setInput( planeFilter["out"] )
self.assertTrue( instancer["out"].exists( "/plane/instances/sphere/0" ) )
self.assertEqual( instancer["out"].setHash( "A" ), instancer["in"].setHash( "A" ) )
self.assertEqual( instancer["out"].set( "A" ), instancer["in"].set( "A" ) )
self.assertEqual( instancer["out"].set( "A" ).value.paths(), [ "/plane" ] )
def testContexts( self ):
points = IECoreScene.PointsPrimitive(
IECore.V3fVectorData(
[ imath.V3f( i, 0, 0 ) for i in range( 100 ) ]
)
)
points["floatVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData(
[ 2 * math.sin( i ) for i in range( 100 ) ]
) )
points["vectorVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData(
[ imath.V3f( i + 2, i + 3, i + 4 ) for i in range( 100 ) ]
) )
points["uvVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V2fVectorData(
[ imath.V2f( i * 0.01, i * 0.02 ) for i in range( 100 ) ]
) )
points["intVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.IntVectorData(
[ i for i in range( 100 ) ]
) )
points["colorVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.Color3fVectorData(
[ imath.Color3f( i * 0.1 + 2, i * 0.1 + 3, i * 0.1 + 4 ) for i in range( 100 ) ]
) )
points["color4fVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.Color4fVectorData(
[ imath.Color4f( i * 0.1 + 2, i * 0.1 + 3, i * 0.1 + 4, i * 0.1 + 5 ) for i in range( 100 ) ]
) )
points["stringVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.StringVectorData(
[ "foo%i"%(i//34) for i in range( 100 ) ]
) )
points["unindexedRoots"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.StringVectorData(
[ ["cube","plane","sphere"][i//34] for i in range( 100 ) ]
) )
points["indexedRoots"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.StringVectorData( [ "cube","plane","sphere"] ),
IECore.IntVectorData( [(i//34) for i in range( 100 )] ),
)
pointsSource = GafferScene.ObjectToScene()
pointsSource["name"].setValue( "points" )
pointsSource["object"].setValue( points )
attributeSphere = GafferScene.Sphere()
sphereFilter = GafferScene.PathFilter()
sphereFilter["paths"].setValue( IECore.StringVectorData( [ '/sphere' ] ) )
# In any practical situation where we just needed to set up attributes, we could use the "attributes"
# plug to set them up more cheaply. But for testing, setting up attributes is simpler than any realistic
# test
customAttributes = GafferScene.CustomAttributes()
customAttributes["in"].setInput( attributeSphere["out"] )
customAttributes["filter"].setInput( sphereFilter["out"] )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "floatAttr", Gaffer.FloatPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ), True, "member1" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "vectorAttr", Gaffer.V3fPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ), True, "member2" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "uvAttr", Gaffer.V2fPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ), True, "member3" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "intAttr", Gaffer.IntPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ), True, "member4" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "colorAttr", Gaffer.Color3fPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ), True, "member5" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "color4fAttr", Gaffer.Color4fPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ), True, "member6" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "stringAttr", Gaffer.StringPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ), True, "member7" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "seedAttr", Gaffer.IntPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ), True, "member8" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "frameAttr", Gaffer.FloatPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ), True, "member9" ) )
customAttributes["ReadContextExpression"] = Gaffer.Expression()
customAttributes["ReadContextExpression"].setExpression( inspect.cleandoc(
"""
parent["attributes"]["member1"]["value"] = context.get( "floatVar", -1 )
parent["attributes"]["member2"]["value"] = context.get( "vectorVar", imath.V3f(-1) )
parent["attributes"]["member3"]["value"] = context.get( "uvVar", imath.V2f(-1) )
parent["attributes"]["member4"]["value"] = context.get( "intVar", -1 )
parent["attributes"]["member5"]["value"] = context.get( "colorVar", imath.Color3f( -1 ) )
parent["attributes"]["member6"]["value"] = context.get( "color4fVar", imath.Color4f( -1 ) )
parent["attributes"]["member7"]["value"] = context.get( "stringVar", "" )
parent["attributes"]["member8"]["value"] = context.get( "seed", -1 )
parent["attributes"]["member9"]["value"] = context.get( "frame", -1 )
"""
) )
group = GafferScene.Group()
group["in"][0].setInput( customAttributes["out"] )
group["name"].setValue( 'withAttrs' )
cube = GafferScene.Cube()
plane = GafferScene.Plane()
sphere = GafferScene.Sphere()
parent = GafferScene.Parent()
parent["parent"].setValue( '/' )
parent["in"].setInput( group["out"] )
parent["children"][0].setInput( cube["out"] )
parent["children"][1].setInput( plane["out"] )
parent["children"][2].setInput( sphere["out"] )
pointsFilter = GafferScene.PathFilter()
pointsFilter["paths"].setValue( IECore.StringVectorData( [ '/points' ] ) )
instancer = GafferScene.Instancer()
instancer["in"].setInput( pointsSource["out"] )
instancer["filter"].setInput( pointsFilter["out"] )
instancer["prototypes"].setInput( parent["out"] )
def uniqueCounts():
return dict( [ (i[0], i[1].value) for i in instancer["variations"].getValue().items() ] )
def childNameStrings( location ):
return [ i.value() for i in instancer['out'].childNames( location ) ]
def testAttributes( **expected ):
a = [ instancer['out'].attributes( "points/instances/withAttrs/" + i.value() + "/sphere" ) for i in instancer['out'].childNames( "points/instances/withAttrs" ) ]
r = {}
for n in a[0].keys():
r = [ i[n].value for i in a]
if n + "_seedCount" in expected:
self.assertEqual( len( set( r ) ), expected[ n + "_seedCount" ] )
elif n in expected:
self.assertEqual( len(r), len(expected[n]) )
if type( r[0] ) == float:
if r != expected[n]:
for i in range( len( r ) ):
self.assertAlmostEqual( r[i], expected[n][i], places = 6 )
else:
self.assertEqual( r, expected[n] )
else:
if type( r[0] ) == str:
self.assertEqual( r, [""] * len( r ) )
else:
self.assertEqual( r, [type( r[0] )( -1 )] * len( r ) )
# Compatible with C++ rounding
def compatRound( x ):
if x >= 0.0:
return math.floor(x + 0.5)
else:
return math.ceil(x - 0.5)
def quant( x, q ):
return compatRound( float( x ) / q ) * q
self.assertEqual( uniqueCounts(), { "" : 1 } )
self.assertEqual( childNameStrings( "points/instances" ), [ "withAttrs", "cube", "plane", "sphere" ] )
self.assertEqual( childNameStrings( "points/instances/withAttrs" ), [ str(i) for i in range( 100 ) ] )
self.assertEqual( childNameStrings( "points/instances/cube" ), [] )
self.assertEqual( childNameStrings( "points/instances/plane" ), [] )
self.assertEqual( childNameStrings( "points/instances/sphere" ), [] )
instancer["prototypeMode"].setValue( GafferScene.Instancer.PrototypeMode.RootPerVertex )
instancer["prototypeRoots"].setValue( "indexedRoots" )
self.assertEqual( uniqueCounts(), { "" : 3 } )
self.assertEqual( childNameStrings( "points/instances/cube" ), [ str(i) for i in range( 0, 34 ) ] )
self.assertEqual( childNameStrings( "points/instances/plane" ), [ str(i) for i in range( 34, 68 ) ] )
self.assertEqual( childNameStrings( "points/instances/sphere" ), [ str(i) for i in range( 68, 100 ) ] )
instancer["prototypeRoots"].setValue( "unindexedRoots" )
"""
# How things should work
self.assertEqual( uniqueCounts(), { "" : 3 } )
self.assertEqual( childNameStrings( "points/instances/cube" ), [ str(i) for i in range( 0, 34 ) ] )
self.assertEqual( childNameStrings( "points/instances/plane" ), [ str(i) for i in range( 34, 68 ) ] )
self.assertEqual( childNameStrings( "points/instances/sphere" ), [ str(i) for i in range( 68, 100 ) ] )
"""
# How things currently work
self.assertEqual( uniqueCounts(), { "" : 1 } )
self.assertEqual( childNameStrings( "points/instances/cube" ), [ str(i) for i in range( 100 ) ] )
self.assertEqual( childNameStrings( "points/instances/plane" ), [] )
self.assertEqual( childNameStrings( "points/instances/sphere" ), [] )
instancer["prototypeMode"].setValue( GafferScene.Instancer.PrototypeMode.IndexedRootsList )
instancer["prototypeIndex"].setValue( 'intVar' )
self.assertEqual( uniqueCounts(), { "" : 4 } )
self.assertEqual( childNameStrings( "points/instances/withAttrs" ), [ str(i) for i in range( 0, 100, 4 ) ] )
self.assertEqual( childNameStrings( "points/instances/cube" ), [ str(i) for i in range( 1, 100, 4 ) ] )
self.assertEqual( childNameStrings( "points/instances/plane" ), [ str(i) for i in range( 2, 100, 4 ) ] )
self.assertEqual( childNameStrings( "points/instances/sphere" ), [ str(i) for i in range( 3, 100, 4 ) ] )
# No context overrides yet
testAttributes( frameAttr = [ 1 ] * 25 )
instancer["contextVariables"].addChild( GafferScene.Instancer.ContextVariablePlug( "context" ) )
instancer["contextVariables"][0]["name"].setValue( "floatVar" )
instancer["contextVariables"][0]["quantize"].setValue( 0 )
# With zero quantization, everything is now unique
testAttributes( frameAttr = [ 1 ] * 25, floatAttr = [ 2 * math.sin( i ) for i in range(0, 100, 4) ] )
# Check both the global unique count, and the per-context variable unique counts
self.assertEqual( uniqueCounts(), { "" : 100, "floatVar" : 100 } )
# With massive quantization, all values collapse
instancer["contextVariables"][0]["quantize"].setValue( 100 )
testAttributes( frameAttr = [ 1 ] * 25, floatAttr = [ 0 for i in range(0, 100, 4) ] )
self.assertEqual( uniqueCounts(), { "" : 4, "floatVar" : 1 } )
# With moderate quantization, we can see how different prototypes combine with the contexts to produce
# more unique values
instancer["contextVariables"][0]["quantize"].setValue( 1 )
floatExpected = [ compatRound( 2 * math.sin( i ) ) for i in range(0, 100, 4) ]
testAttributes( frameAttr = [ 1 ] * 25, floatAttr = floatExpected )
self.assertEqual( uniqueCounts(), { "" : 20, "floatVar" : 5 } )
instancer["prototypeRootsList"].setValue( IECore.StringVectorData( [ "withAttrs", "cube", "plane", "sphere" ] ) )
testAttributes( frameAttr = [ 1 ] * 25, floatAttr = floatExpected )
self.assertEqual( uniqueCounts(), { "" : 20, "floatVar" : 5 } )
# Test an empty root
instancer["prototypeRootsList"].setValue( IECore.StringVectorData( [ "withAttrs", "", "plane", "sphere" ] ) )
self.assertEqual( uniqueCounts(), { "" : 15, "floatVar" : 5 } )
# Now lets just focus on context variation
instancer["prototypeRootsList"].setValue( IECore.StringVectorData( [] ) )
instancer["prototypeIndex"].setValue( '' )
floatExpected = [ compatRound( 2 * math.sin( i ) ) for i in range(0, 100) ]
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected )
self.assertEqual( uniqueCounts(), { "" : 5, "floatVar" : 5 } )
# Add a second context variation
instancer["contextVariables"].addChild( GafferScene.Instancer.ContextVariablePlug( "context" ) )
instancer["contextVariables"][1]["name"].setValue( "vectorVar" )
instancer["contextVariables"][1]["quantize"].setValue( 0 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
vectorAttr = [ imath.V3f( i + 2, i + 3, i + 4 ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "vectorVar" : 100, "" : 100 } )
instancer["contextVariables"][1]["quantize"].setValue( 10 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
vectorAttr = [ imath.V3f( quant( i + 2, 10 ), quant( i + 3, 10 ), quant( i + 4, 10 ) ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "vectorVar" : 31, "" : 64 } )
# Try all the different types
instancer["contextVariables"][1]["name"].setValue( "uvVar" )
instancer["contextVariables"][1]["quantize"].setValue( 0 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
uvAttr = [ imath.V2f( i * 0.01, i * 0.02 ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "uvVar" : 100, "" : 100 } )
instancer["contextVariables"][1]["quantize"].setValue( 1 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
uvAttr = [ imath.V2f( compatRound( i * 0.01 ), compatRound( i * 0.02 ) ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "uvVar" : 4, "" : 20 } )
instancer["contextVariables"][1]["name"].setValue( "intVar" )
instancer["contextVariables"][1]["quantize"].setValue( 0 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
intAttr = [ i for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "intVar" : 100, "" : 100 } )
instancer["contextVariables"][1]["quantize"].setValue( 10 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
intAttr = [ quant( i, 10 ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "intVar" : 11, "" : 48 } )
instancer["contextVariables"][1]["name"].setValue( "stringVar" )
instancer["contextVariables"][1]["quantize"].setValue( 0 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
stringAttr = [ "foo%i" % ( i / 34 ) for i in range(100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "stringVar" : 3, "" : 15 } )
instancer["contextVariables"][1]["quantize"].setValue( 10 )
six.assertRaisesRegex( self,
Gaffer.ProcessException, 'Instancer.out.attributes : Context variable "0" : cannot quantize variable of type StringVectorData',
instancer['out'].attributes, "points/instances/withAttrs/0/sphere"
)
six.assertRaisesRegex( self,
Gaffer.ProcessException, 'Instancer.variations : Context variable "0" : cannot quantize variable of type StringVectorData',
uniqueCounts
)
instancer["contextVariables"][1]["name"].setValue( "colorVar" )
instancer["contextVariables"][1]["quantize"].setValue( 0 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
colorAttr = [ imath.Color3f( i * 0.1 + 2, i * 0.1 + 3, i * 0.1 + 4 ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "colorVar" : 100, "" : 100 } )
instancer["contextVariables"][1]["quantize"].setValue( 1 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
colorAttr = [ imath.Color3f( compatRound( i * 0.1 + 2 ), compatRound( i * 0.1 + 3 ), compatRound( i * 0.1 + 4 ) ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "colorVar" : 11, "" : 48 } )
instancer["contextVariables"][1]["name"].setValue( "color4fVar" )
instancer["contextVariables"][1]["quantize"].setValue( 0 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
color4fAttr = [ imath.Color4f( i * 0.1 + 2, i * 0.1 + 3, i * 0.1 + 4, i * 0.1 + 5 ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 100, "" : 100 } )
instancer["contextVariables"][1]["quantize"].setValue( 1 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
color4fAttr = [ imath.Color4f( compatRound( i * 0.1 + 2 ), compatRound( i * 0.1 + 3 ), compatRound( i * 0.1 + 4 ), compatRound( i * 0.1 + 5 ) ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 11, "" : 48 } )
# Set a high quantize so we can see how these variations interact with other types of variations
instancer["contextVariables"][1]["quantize"].setValue( 10 )
color4fExpected = [ imath.Color4f( quant( i * 0.1 + 2, 10 ), quant( i * 0.1 + 3, 10 ), quant( i * 0.1 + 4, 10 ), quant( i * 0.1 + 5, 10 ) ) for i in range(0, 100) ]
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected, color4fAttr = color4fExpected )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "" : 20 } )
instancer["seedEnabled"].setValue( True )
instancer["rawSeed"].setValue( True )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected, color4fAttr = color4fExpected, seedAttr = list( range( 100 ) ) )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 100, "" : 100 } )
instancer["rawSeed"].setValue( False )
instancer["seeds"].setValue( 10 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected, color4fAttr = color4fExpected, seedAttr_seedCount = 10 )
initialFirstVal = instancer['out'].attributes( '/points/instances/withAttrs/0/sphere' )["seedAttr"]
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 10, "" : 67 } )
# Changing the seed changes individual values, but not the overall behaviour
instancer["seedPermutation"].setValue( 1 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected, color4fAttr = color4fExpected, seedAttr_seedCount = 10 )
self.assertNotEqual( initialFirstVal, instancer['out'].attributes( '/points/instances/withAttrs/0/sphere' )["seedAttr"] )
# Total variation count is a bit different because the different variation sources line up differently
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 10, "" : 69 } )
# If we generate 100 seeds from 100 ids, we will get many collisions, and only 67 unique values
instancer["seeds"].setValue( 100 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected, color4fAttr = color4fExpected, seedAttr_seedCount = 67 )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 67, "" : 94 } )
# Now turn on time offset as well and play with everything together
instancer["seeds"].setValue( 10 )
instancer["timeOffset"]["enabled"].setValue( True )
instancer["timeOffset"]["name"].setValue( 'floatVar' )
instancer["timeOffset"]["quantize"].setValue( 0.0 )
testAttributes( frameAttr = [ 1 + 2 * math.sin( i ) for i in range(0, 100) ], floatAttr = floatExpected, color4fAttr = color4fExpected, seedAttr_seedCount = 10 )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 10, "frame" : 100, "" : 100 } )
instancer["timeOffset"]["quantize"].setValue( 0.5 )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 10, "frame" : 9, "" : 82 } )
instancer["timeOffset"]["quantize"].setValue( 1 )
testAttributes( frameAttr = [ i + 1 for i in floatExpected ], floatAttr = floatExpected, color4fAttr = color4fExpected, seedAttr_seedCount = 10 )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 10, "frame" : 5, "" : 69 } )
c = Gaffer.Context()
c["frame"] = IECore.FloatData( 42 )
with c:
testAttributes( frameAttr = [ i + 42 for i in floatExpected ], floatAttr = floatExpected, color4fAttr = color4fExpected, seedAttr_seedCount = 10 )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 10, "frame" : 5, "" : 69 } )
# Now reduce back down the variations to test different cumulative combinations
instancer["seedEnabled"].setValue( False )
testAttributes( frameAttr = [ i + 1 for i in floatExpected ], floatAttr = floatExpected, color4fAttr = color4fExpected )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "frame" : 5, "" : 20 } )
# With just one context var, driven by the same prim var as frame, with the same quantization,
# the variations don't multiply
del instancer["contextVariables"][1]
testAttributes( frameAttr = [ i + 1 for i in floatExpected ], floatAttr = floatExpected )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "frame" : 5, "" : 5 } )
# Using a different source primVar means the variations will multiply
instancer["timeOffset"]["name"].setValue( 'intVar' )
instancer["timeOffset"]["quantize"].setValue( 0 )
testAttributes( frameAttr = [ i + 1 for i in range(100) ], floatAttr = floatExpected )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "frame" : 100, "" : 100 } )
instancer["timeOffset"]["quantize"].setValue( 20 )
testAttributes( frameAttr = [ ((i+10)//20)*20 + 1 for i in range(100) ], floatAttr = floatExpected )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "frame" : 6, "" : 30 } )
# Test with multiple point sources
pointsMerge = GafferScene.Parent()
pointsMerge["parent"].setValue( '/' )
pointSources = []
for j in range( 3 ):
points = IECoreScene.PointsPrimitive(
IECore.V3fVectorData(
[ imath.V3f( i, 0, 0 ) for i in range( 10 ) ]
)
)
points["floatVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData(
[ i * 0.1 + j for i in range( 10 ) ]
) )
pointSources.append( GafferScene.ObjectToScene() )
pointSources[-1]["name"].setValue( "points" )
pointSources[-1]["object"].setValue( points )
parent["children"][-1].setInput( pointSources[-1]["out"] )
instancer["in"].setInput( parent["out"] )
instancer["timeOffset"]["enabled"].setValue( False )
instancer["contextVariables"][0]["quantize"].setValue( 0 )
pointsFilter["paths"].setValue( IECore.StringVectorData( [ '/points*' ] ) )
self.assertAlmostEqual( instancer['out'].attributes( "points/instances/withAttrs/2/sphere" )["floatAttr"].value, 0.2 )
self.assertAlmostEqual( instancer['out'].attributes( "points1/instances/withAttrs/3/sphere" )["floatAttr"].value, 1.3 )
self.assertAlmostEqual( instancer['out'].attributes( "points2/instances/withAttrs/5/sphere" )["floatAttr"].value, 2.5 )
self.assertEqual( uniqueCounts(), { "floatVar" : 30, "" : 30 } )
instancer["contextVariables"][0]["quantize"].setValue( 0.2001 )
self.assertAlmostEqual( instancer['out'].attributes( "points/instances/withAttrs/2/sphere" )["floatAttr"].value, 0.2001, places = 6 )
self.assertAlmostEqual( instancer['out'].attributes( "points1/instances/withAttrs/3/sphere" )["floatAttr"].value, 1.2006, places = 6 )
self.assertAlmostEqual( instancer['out'].attributes( "points2/instances/withAttrs/5/sphere" )["floatAttr"].value, 2.4012, places = 6 )
self.assertEqual( uniqueCounts(), { "floatVar" : 15, "" : 15 } )
# Test invalid location
for func in [ instancer["out"].object, instancer["out"].childNames, instancer["out"].bound, instancer["out"].transform ]:
six.assertRaisesRegex( self,
Gaffer.ProcessException,
'Instancer.out.' + func.__name__ + ' : Instance id "777" is invalid, instancer produces only 10 children. Topology may have changed during shutter.',
func, "/points/instances/withAttrs/777"
)
# Test passthrough when disabled
instancer["enabled"].setValue( False )
self.assertScenesEqual( instancer["in"], instancer["out"] )
def testContextSet( self ):
baseSphere = GafferScene.Sphere()
childSphere = GafferScene.Sphere()
parent = GafferScene.Parent()
parent["in"].setInput( baseSphere["out"] )
parent["children"][0].setInput( childSphere["out"] )
parent["parent"].setValue( '/sphere' )
parent["expression"] = Gaffer.Expression()
# Note that we must supply a default for the value of "seed", since the setNames will be evaluated
# with no context set
parent["expression"].setExpression( 'parent["enabled"] = context.get( "seed", 0 ) % 2' )
allFilter = GafferScene.PathFilter()
allFilter["paths"].setValue( IECore.StringVectorData( [ '/...' ] ) )
setNode = GafferScene.Set()
setNode["in"].setInput( parent["out"] )
setNode["filter"].setInput( allFilter["out"] )
plane = GafferScene.Plane()
pathFilter = GafferScene.PathFilter()
pathFilter["paths"].setValue( IECore.StringVectorData( [ '/plane' ] ) )
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["filter"].setInput( pathFilter["out"] )
instancer["prototypes"].setInput( setNode["out"] )
instancer["rawSeed"].setValue( True )
with Gaffer.Context() as c :
c["seed"] = 0
self.assertEqual(
set( instancer["out"].set( "set" ).value.paths() ),
set( [ "/plane/instances/sphere/" + i for i in [ "0", "1", "2", "3" ] ] )
)
c["seed"] = 1
self.assertEqual(
set( instancer["out"].set( "set" ).value.paths() ),
set( [ "/plane/instances/sphere/" + i for i in
[ "0", "1", "2", "3", "0/sphere", "1/sphere", "2/sphere", "3/sphere" ] ]
)
)
instancer["seedEnabled"].setValue( True )
self.assertEqual(
set( instancer["out"].set( "set" ).value.paths() ),
set( [ "/plane/instances/sphere/" + i for i in [ "0", "1", "2", "3", "1/sphere", "3/sphere" ] ] )
)
# When encapsulating, we shouldn't pay any time cost for evaluating the set, even with a huge
# number of instances
plane["divisions"].setValue( imath.V2i( 1000 ) )
instancer["encapsulateInstanceGroups"].setValue( True )
t = time.time()
instancer["out"].set( "set" )
totalTime = time.time() - t
self.assertLess( totalTime, 0.001 )
# Test passthrough when disabled
instancer["enabled"].setValue( False )
self.assertScenesEqual( instancer["in"], instancer["out"] )
def runTestContextSetPerf( self, useContexts, parallelEvaluate ):
plane = GafferScene.Plane()
plane["divisions"].setValue( imath.V2i( 1000 ) )
plane["divisionExpression"] = Gaffer.Expression()
plane["divisionExpression"].setExpression( 'parent["divisions"] = imath.V2i( 1000 + int( context["collect:rootName"][-1:] ) )' )
# Duplicate the source points, so that we are measuring the perf of an Instancer targeting multiple locations
collectScenes = GafferScene.CollectScenes()
collectScenes["in"].setInput( plane["out"] )
collectScenes["rootNames"].setValue( IECore.StringVectorData( [ 'plane0', 'plane1', 'plane2', 'plane3', 'plane4' ] ) )
collectScenes["sourceRoot"].setValue( '/plane' )
# Source scene, with a little hierarchy, so paths aren't trivial to merge
sphere = GafferScene.Sphere()
group = GafferScene.Group( "group" )
group["in"][0].setInput( sphere["out"] )
# Create a set
leafFilter = GafferScene.PathFilter()
leafFilter["paths"].setValue( IECore.StringVectorData( [ '/group/sphere' ] ) )
setNode = GafferScene.Set()
setNode["in"].setInput( group["out"] )
setNode["filter"].setInput( leafFilter["out"] )
# Instancer
instancerFilter = GafferScene.PathFilter()
instancerFilter["paths"].setValue( IECore.StringVectorData( [ '/plane*' ] ) )
instancer = GafferScene.Instancer()
instancer["in"].setInput( collectScenes["out"] )
instancer["filter"].setInput( instancerFilter["out"] )
instancer["prototypes"].setInput( setNode["out"] )
instancer["seedEnabled"].setValue( useContexts )
if not parallelEvaluate:
with GafferTest.TestRunner.PerformanceScope() :
instancer["out"].set( "set" )
else:
# Set up a slightly realistic scene which results in the set plug being
# pulled multiple times in parallel, to check whether TaskCollaborate is working
setFilter = GafferScene.SetFilter()
setFilter["setExpression"].setValue( 'set' )
customAttributes = GafferScene.CustomAttributes()
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "", Gaffer.BoolPlug( "value", defaultValue = False, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ), True, "member1", Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
customAttributes["in"].setInput( instancer["out"] )
customAttributes["filter"].setInput( setFilter["out"] )
customAttributes["attributes"]["member1"]["name"].setValue( 'testAttr' )
customAttributes["attributes"]["member1"]["value"].setValue( True )
subTree = GafferScene.SubTree()
subTree["in"].setInput( customAttributes["out"] )
subTree["root"].setValue( '/plane0/instances/group' )
isolateFilter = GafferScene.PathFilter()
isolateFilter["paths"].setValue( IECore.StringVectorData( [ '/67000*' ] ) )
isolate = GafferScene.Isolate()
isolate["in"].setInput( subTree["out"] )
isolate["filter"].setInput( isolateFilter["out"] )
with GafferTest.TestRunner.PerformanceScope() :
GafferSceneTest.traverseScene( isolate["out"] )
def testEmptyPrototypes( self ) :
plane = GafferScene.Plane()
planeFilter = GafferScene.PathFilter()
planeFilter["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["filter"].setInput( planeFilter["out"] )
self.assertEqual( instancer["variations"].getValue(), IECore.CompoundData( { "" : IECore.IntData( 0 ) } ) )
@unittest.skipIf( GafferTest.inCI(), "Performance not relevant on CI platform" )
@GafferTest.TestRunner.PerformanceTestMethod()
def testContextSetPerfNoVariationsSingleEvaluate( self ):
self.runTestContextSetPerf( False, False )
@unittest.skipIf( GafferTest.inCI(), "Performance not relevant on CI platform" )
@GafferTest.TestRunner.PerformanceTestMethod()
def testContextSetPerfNoVariationsParallelEvaluate( self ):
self.runTestContextSetPerf( False, True )
@unittest.skipIf( GafferTest.inCI(), "Performance not relevant on CI platform" )
@GafferTest.TestRunner.PerformanceTestMethod()
def testContextSetPerfWithVariationsSingleEvaluate( self ):
self.runTestContextSetPerf( True, False )
@unittest.skipIf( GafferTest.inCI(), "Performance not relevant on CI platform" )
@GafferTest.TestRunner.PerformanceTestMethod()
def testContextSetPerfWithVariationsParallelEvaluate( self ):
self.runTestContextSetPerf( True, True )
if __name__ == "__main__":
unittest.main()
|
the-stack_0_10864 | import chainer
class ParsevalAddition(chainer.function.Function):
"""Implementation of aggregation layer for Parseval networks.
Only two to one mapping is supported.
"""
def forward(self, inputs):
x0, x1, alpha = inputs
return x0 * alpha[0] + x1 * alpha[1],
def backward(self, inputs, grad_outputs):
x0, x1, alpha = inputs
gy = grad_outputs[0]
xp = chainer.cuda.get_array_module(gy)
ga = xp.array([(gy * x0).sum(), (gy * x1).sum()], xp.float32)
return gy * alpha[0], gy * alpha[1], ga
|
the-stack_0_10867 | from sef_dr.linear import LinearSEF
import numpy as np
from sklearn.neighbors import NearestCentroid
def test_linear_sef():
"""
Performs some basic testing using the LinearSEF
:return:
"""
np.random.seed(1)
train_data = np.random.randn(100, 50)
train_labels = np.random.randint(0, 2, 100)
proj = LinearSEF(50, output_dimensionality=12)
proj._initialize(train_data)
proj_data = proj.transform(train_data, batch_size=8)
assert proj_data.shape[0] == 100
assert proj_data.shape[1] == 12
ncc = NearestCentroid()
ncc.fit(proj_data, train_labels)
acc_before = ncc.score(proj_data, train_labels)
loss = proj.fit(data=train_data, target_labels=train_labels, epochs=200,
target='supervised', batch_size=8, regularizer_weight=0, learning_rate=0.0001, verbose=False)
# Ensure that loss is reducing
assert loss[0] > loss[-1]
proj_data = proj.transform(train_data, batch_size=8)
assert proj_data.shape[0] == 100
assert proj_data.shape[1] == 12
ncc = NearestCentroid()
ncc.fit(proj_data, train_labels)
acc_after = ncc.score(proj_data, train_labels)
assert acc_after > acc_before |
the-stack_0_10868 | import time
from absl import app, flags, logging
from absl.flags import FLAGS
import cv2
import tensorflow as tf
from yolov3_tf2.models import (
YoloV3, YoloV3Tiny
)
from yolov3_tf2.dataset import transform_images
from yolov3_tf2.utils import draw_outputs
flags.DEFINE_string('classes', './data/coco.names', 'path to classes file')
flags.DEFINE_string('weights', './checkpoints/yolov3.tf',
'path to weights file')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_string('video', './data/video.mp4',
'path to video file or number for webcam)')
flags.DEFINE_string('output', None, 'path to output video')
flags.DEFINE_string('output_format', 'XVID', 'codec used in VideoWriter when saving video to file')
flags.DEFINE_integer('num_classes', 2 , 'number of classes in the model')
def main(_argv):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
for physical_device in physical_devices:
tf.config.experimental.set_memory_growth(physical_device, True)
if FLAGS.tiny:
yolo = YoloV3Tiny(classes=FLAGS.num_classes)
else:
yolo = YoloV3(classes=FLAGS.num_classes)
yolo.load_weights(FLAGS.weights)
logging.info('weights loaded')
class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
logging.info('classes loaded')
times = []
try:
vid = cv2.VideoCapture(int(FLAGS.video))
except:
vid = cv2.VideoCapture(FLAGS.video)
out = None
if FLAGS.output:
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))
while True:
_, img = vid.read()
if img is None:
logging.warning("Empty Frame")
time.sleep(0.1)
break
img_in = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_in = tf.expand_dims(img_in, 0)
img_in = transform_images(img_in, FLAGS.size)
t1 = time.time()
boxes, scores, classes, nums = yolo.predict(img_in)
t2 = time.time()
times.append(t2-t1)
times = times[-20:]
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
img = cv2.putText(img, "Time: {:.2f}ms".format(sum(times)/len(times)*1000), (0, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
if FLAGS.output:
out.write(img)
cv2.imshow('output', img)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
|
the-stack_0_10870 | import json
with open("data/story.json", "r") as story_file:
story = json.load(story_file)
messages,\
defaults,\
zones\
= story.values()
# for zone in zones:
# rooms = zones[zone]
# for room in rooms:
# # room is currently the key of the room obj
# features = rooms[room]["features"]
# items = rooms[room]["items"]
# print(f"{room}: ->")
# print(f" feats-")
# for feature in features:
# print(" " + feature)
# print(f" items-")
# for item in items:
# print(" " + item) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.