ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3df64a3fc1a38fc7d04b10552dc5b80be4ec64 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_lldp_interface
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage LLDP interfaces configuration on network devices
description:
- This module provides declarative management of LLDP interfaces
configuration on network devices.
options:
name:
description:
- Name of the interface LLDP should be configured on.
aggregate:
description: List of interfaces LLDP should be configured on.
purge:
description:
- Purge interfaces not defined in the aggregate parameter.
default: no
state:
description:
- State of the LLDP configuration.
default: present
choices: ['present', 'absent', 'enabled', 'disabled']
"""
EXAMPLES = """
- name: Configure LLDP on specific interfaces
net_lldp_interface:
name: eth1
state: present
- name: Disable LLDP on specific interfaces
net_lldp_interface:
name: eth1
state: disabled
- name: Enable LLDP on specific interfaces
net_lldp_interface:
name: eth1
state: enabled
- name: Delete LLDP on specific interfaces
net_lldp_interface:
name: eth1
state: absent
- name: Create aggregate of LLDP interface configurations
net_lldp_interface:
aggregate:
- { name: eth1 }
- { name: eth2 }
state: present
- name: Delete aggregate of LLDP interface configurations
net_lldp_interface:
aggregate:
- { name: eth1 }
- { name: eth2 }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- set service lldp eth1 disable
"""
|
py | 1a3df65ffbb0e9d32c57ca17a2f4ba23547fb7ea | import unittest
from python.src.neurons.receive_all_neuron import ReceiveAllNeuron
from python.src.neurons.activation_functions.activation_function import (
ActivationFunction)
class ReceiveAllNeuronTest(unittest.TestCase):
def test_connectTo(self):
sender = ReceiveAllNeuron(self.simple_activation())
receiver = ReceiveAllNeuron(self.simple_activation())
sender.connect_to(receiver)
self.assertEqual(len(sender.out_connections), 1)
self.assertEqual(len(receiver.in_connections), 1)
self.assertSetEqual(sender.out_connections,
receiver.in_connections)
def test_removeConnection(self):
sender = ReceiveAllNeuron(self.simple_activation())
receiver = ReceiveAllNeuron(self.simple_activation())
connection = sender.connect_to(receiver)
connection.disconnect()
self.assertEqual(len(sender.out_connections), 0)
self.assertEqual(len(receiver.in_connections), 0)
self.assertSetEqual(sender.out_connections,
receiver.in_connections)
def test_wait_for_all_signals(self):
sender_1 = ReceiveAllNeuron(self.simple_activation())
sender_2 = ReceiveAllNeuron(self.simple_activation())
receiver = ReceiveAllNeuron(self.simple_activation())
sender_1.connect_to(receiver)
sender_2.connect_to(receiver)
sender_1.receive_signal(2.0)
self.assertEqual(receiver.output, 0.0)
self.assertFalse(receiver.allSignalsReceived())
sender_2.receive_signal(3.0)
self.assertEqual(receiver.output, 5.0)
self.assertTrue(receiver.allSignalsReceived())
sender_1.reset()
sender_2.reset()
receiver.reset()
self.assertEqual(sender_1.output, 0.0)
self.assertEqual(sender_2.output, 0.0)
self.assertEqual(receiver.output, 0.0)
self.assertFalse(receiver.allSignalsReceived())
def test_connection(self):
sender = ReceiveAllNeuron(self.simple_activation())
receiver = ReceiveAllNeuron(self.simple_activation())
connection = sender.connect_to(receiver)
self.assertEqual(connection.sender, sender)
self.assertEqual(connection.receiver, receiver)
self.assertEqual(connection.weight, 1.0)
self.assertEqual(connection.signal_sent, 0.0)
self.assertEqual(connection.signal_received, 0.0)
connection.weight = 0.5
sender.receive_signal(7.4)
self.assertEqual(connection.weight, 0.5)
self.assertEqual(connection.signal_sent, 3.7)
self.assertEqual(connection.signal_received, 7.4)
def test_sigmoid_activation(self):
neuron = ReceiveAllNeuron()
neuron.receive_signal(-0.607)
self.assertAlmostEqual(neuron.output, 0.3527438)
def simple_activation(self):
return ActivationFunction(lambda x: x, lambda x: 1)
if __name__ == '__main__':
unittest.main()
|
py | 1a3df71d3e54d862cf04af00c81973f2c2b3907c | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Convert caffe mean
"""
import argparse
import numpy as np
import mxnet as mx
import caffe_parser
def convert_mean(binaryproto_fname, output=None):
"""Convert caffe mean
Parameters
----------
binaryproto_fname : str
Filename of the mean
output : str, optional
Save the mean into mxnet's format
Returns
-------
NDArray
Mean in ndarray
"""
mean_blob = caffe_parser.caffe_pb2.BlobProto()
with open(binaryproto_fname, 'rb') as f:
mean_blob.ParseFromString(f.read())
img_mean_np = np.array(mean_blob.data)
img_mean_np = img_mean_np.reshape(
mean_blob.channels, mean_blob.height, mean_blob.width
)
# swap channels from Caffe BGR to RGB
img_mean_np[[0, 2], :, :] = img_mean_np[[2, 0], :, :]
nd = mx.nd.array(img_mean_np)
if output is not None:
mx.nd.save(output, {"mean_image": nd})
return nd
def main():
parser = argparse.ArgumentParser(description='Convert caffe mean')
parser.add_argument('binaryproto_fname', help='Filename of the mean')
parser.add_argument('output', help='The name of the output file')
args = parser.parse_args()
convert_mean(args.binaryproto_fname, args.output)
if __name__ == '__main__':
main()
|
py | 1a3df863ad541e9848c2e69d9c5f9cce5c24481a | """Job Adapter Template File
IMPORTANT: NOT A FUNCTIONAL ADAPTER. FUNCTIONS MUST BE IMPLEMENTED
Notes:
- Each of the functions defined below must return a json serializable
object, json_response, or valid HttpResponse object
- A json_response creates an HttpResponse object given parameters:
- content: string with the contents of the response
- status: string with the status of the response
- status_code: HTTP status code
- error: string with the error message if there is one
"""
from common.response import json_response
import logging
import re
logger = logging.getLogger("newt." + __name__)
def get_machines(request):
"""Returns the available machines that jobs can run on
Keyword arguments:
request - Django HttpRequest
"""
pass
def view_queue(request, machine_name):
"""Returns the current state of the queue in a list
Keyword arguments:
request -- Django HttpRequest
machine_name -- name of the machine
"""
pass
def submit_job(request, machine_name):
"""Submits a job to the queue
Keyword arguments:
request -- Django HttpRequest
machine_name -- name of the machine
"""
pass
def get_info(request, machine_name, job_id):
"""Gets the information of a job, given the id
Keyword arguments:
request -- Django HttpRequest
machine_name -- name of the machine
job_id -- the job id
"""
pass
def delete_job(request, machine_name, job_id):
"""Gets the information of a job, given the id
Keyword arguments:
request -- Django HttpRequest
machine_name -- name of the machine
job_id -- the job id
"""
pass
"""A tuple list in the form of:
(
(compiled_regex_exp, associated_function, request_required),
...
)
Note: The compiled_regex_exp must have named groups corresponding to
the arguments of the associated_function
Note: if request_required is True, the associated_function must have
request as the first argument
Example:
patterns = (
(re.compile(r'/usage/(?P<path>.+)$'), get_usage, False),
(re.compile(r'/image/(?P<query>.+)$'), get_image, False),
(re.compile(r'/(?P<path>.+)$'), get_resource, False),
)
"""
patterns = (
)
def extras_router(request, query):
"""Maps a query to a function if the pattern matches and returns result
Keyword arguments:
request -- Django HttpRequest
query -- the query to be matched against
"""
for pattern, func, req in patterns:
match = pattern.match(query)
if match and req:
return func(request, **match.groupdict())
elif match:
return func(**match.groupdict())
# Returns an Unimplemented response if no pattern matches
return json_response(status="Unimplemented",
status_code=501,
error="",
content="query: %s" % query) |
py | 1a3df8d8455e00bbdcf702f0363b3b7fd6ce371c | import scapy.all as scapy
import sys
#Send 10 VLAN paclkets. With data = "Test"
eth_src = "00:00:00:00:00:01" #Host 1
eth_dst = "00:00:00:00:00:02" #Host 2
eth_type = 0x8100 #VLAN
data = "Test" #Data to send
total_packets = 10 #Number of packets to send
l2packet = scapy.Ether(type=eth_type,src=eth_src,dst=eth_dst)/data #Creates a L2 ethernet packet, and adds data to the header.
scapy.sendp(l2packet,count=total_packets) |
py | 1a3df8ef7689d67d36cdff0ea7d5254b885ab853 |
def f(*args):
nargs = len(args)
print(nargs, args)
if __name__ == '__main__':
import dis
dis.show_code(f)
|
py | 1a3dfc2105c4c8a70ca1c7bb32e9c69f69467ee6 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import wx
from wx import Colour
from wx.adv import HyperlinkCtrl, EVT_HYPERLINK
from .preferences_dialogs import PreferencesPanel
from ..widgets import RIDEDialog, HtmlWindow
class ExcludePreferences(PreferencesPanel):
location = ('Excludes')
title = 'Excludes'
def __init__(self, settings, *args, **kwargs):
super(ExcludePreferences, self).__init__(*args, **kwargs)
self._settings = settings
self._general_settings = self._settings['General']
self.font = self.GetFont()
self.font.SetFaceName(self._general_settings['font face'])
self.font.SetPointSize(self._general_settings['font size'])
self.SetFont(self.font)
self.SetBackgroundColour(Colour(self._general_settings['background']))
self.color_secondary_background = Colour(self._general_settings['secondary background'])
self.SetForegroundColour(Colour(self._general_settings['foreground']))
self.color_secondary_foreground = Colour(self._general_settings['secondary foreground'])
self._create_sizer()
def _create_sizer(self):
sizer = wx.BoxSizer(orient=wx.VERTICAL)
self._add_help_dialog(sizer)
self._add_text_box(sizer)
self._add_button_and_status(sizer)
self.SetSizer(sizer)
def _add_help_dialog(self, sizer):
need_help = HyperlinkCtrl(self, wx.ID_ANY, '', 'Need help?')
need_help.SetBackgroundColour(Colour(self.color_secondary_background))
need_help.SetForegroundColour(Colour(self.color_secondary_foreground))
sizer.Add(need_help)
self.Bind(EVT_HYPERLINK, self.OnHelp)
def _add_text_box(self, sizer):
self._text_box = wx.TextCtrl(self,
style=wx.TE_MULTILINE|wx.TE_NOHIDESEL,
size=wx.Size(570, 100),
value=self._settings.excludes.get_excludes())
self._text_box.SetBackgroundColour(Colour(self.color_secondary_background))
self._text_box.SetForegroundColour(Colour(self.color_secondary_foreground))
sizer.Add(self._text_box, proportion=wx.EXPAND)
def _add_button_and_status(self, sizer):
# DEBUG wxPhoenix
status_and_button_sizer = wx.GridSizer(rows=1, cols=2, vgap=10, hgap=10)
save_button = wx.Button(self, id=wx.ID_SAVE)
save_button.SetBackgroundColour(Colour(self.color_secondary_background))
save_button.SetForegroundColour(Colour(self.color_secondary_foreground))
status_and_button_sizer.Add(save_button)
self.Bind(wx.EVT_BUTTON, self.OnSave)
self._status_label = wx.StaticText(self)
status_and_button_sizer.Add(self._status_label)
sizer.Add(status_and_button_sizer)
def OnSave(self, event):
text = self._text_box.GetValue()
self._settings.excludes.write_excludes(set(text.split('\n')))
save_label = 'Saved at %s. Reload the project for changes to take an effect.' %\
datetime.now().strftime('%H:%M:%S')
self._status_label.SetLabel(save_label)
def OnHelp(self, event):
dialog = ExcludeHelpDialog()
dialog.Show()
class ExcludeHelpDialog(RIDEDialog):
help = """<font size="5">
<h1>Excludes</h1>
<p>
Paths to excludes are described in the text box, one exclude per row.
These excludes are saved in a file which is located at $HOME/.robotframework/ride/excludes on POSIX-systems and
%APPDATA%\\RobotFramework\\ride\\excludes on Windows.
</p>
<p>
You can edit excludes yourself using either the text box or editing the file with an editor. After hitting "Save", close
the Preferences window and reload the project to make the edited exludes to take effect. You can reload the project by
selecting "File" from the main menu bar and then selecting your project from the list in view.
</p>
<h2>Patterns in paths</h2>
<p>
RIDE supports defining excludes with absolute paths. You can achieve relative paths with path patterns which are
also supported.
</p>
<p>
The following shell-style wildcards are supported:
<table width="100%" border="1">
<thead>
<th><b>Pattern</b></th>
<th><b>Meaning</b></th>
<th><b>Examples</b></th>
</thead>
<tbody>
<tr>
<td valign="top" align="center">*</td>
<td valign="top" align="center">matches everything</td>
<td valign="top" align="left">
Pattern /foo/*/quu matches:
<ul>
<li>/foo/bar/quu</li>
<li>/foo/corge/quu</li>
<li><i>etc.</i></li>
</ul>
</td>
</tr>
<tr>
<td valign="top" align="center">?</td>
<td valign="top" align="center">matches any single character</td>
<td valign="top" align="left">
Pattern C:\MyProject\?oo matches:
<ul>
<li>C:\MyProject\\foo</li>
<li>C:\MyProject\\boo</li>
<li><i>etc.</i></li>
</ul>
</td>
</tr>
<tr>
<td valign="top" align="center">[seq]</td>
<td valign="top" align="center">matches any character in <i>seq</i></td>
<td valign="top" align="left">
Pattern C:\MyProject\[bf]oo matches:
<ul>
<li>C:\MyProject\\foo</li>
<li>C:\MyProject\\boo</li>
<li><i>and nothing else</i></li>
</ul>
</td>
</tr>
<tr>
<td valign="top" align="center">[!seq]</td>
<td valign="top" align="center">matches any character not in <i>seq</i></td>
<td valign="top" align="left">
Pattern /foo/[!q]uu matches:
<ul>
<li>/foo/zuu</li>
<li><i>etc.</i></li>
</ul>
But does not match:
<ul>
<li>/foo/quu</li>
</ul>
</td>
</tr>
</tbody>
</table>
</p>
</font>"""
def __init__(self):
RIDEDialog.__init__(self, title='Help: excludes')
# set Left to Right direction (while we don't have localization)
self.SetLayoutDirection(wx.Layout_LeftToRight)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(HtmlWindow(self, (800, 600), self.help),
1,
flag=wx.EXPAND)
self.SetSizerAndFit(sizer)
def OnKey(self, *args):
pass
def close(self):
self.Destroy()
|
py | 1a3dfc5a8c1eda1ac8a5c81b84190c4fd05a6916 | import pygame as pg
import gym_gvgai as gvg
class Game:
def __init__(self, game, lvl):
self.env = gvg.make('gvgai-' + game + '-' + lvl + '-v0')
self.stateObs = self.env.reset()
size = (len(self.stateObs), len(self.stateObs[0]))
self.transpose = size[0] < size[1]
if self.transpose:
self.size = (size[1]*2, size[0]*2)
else:
self.size = (size[0]*2, size[1]*2)
self.done = False
self.score = 0
self.frame = 0
self.nAction = self.env.action_space.n
def start(self, agent, maxT=1000, printLog=True, visualized=True, fps=10):
if visualized:
clk = pg.time.Clock()
screen = pg.display.set_mode(self.size)
for i in range(maxT):
clk.tick(fps)
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
self.update(agent, printLog)
self.draw(screen)
pg.display.flip()
if self.done:
print('---------------------------\nFinish. Final score = %d' % self.score)
return
else:
for i in range(maxT):
self.update(agent, printLog)
if self.done:
print('---------------------------\nFinish. Final score = %d' % self.score)
return
def humanPlay(self):
print('Use direction keys to move, z key to take other actions(if exist in this game).')
screen = pg.display.set_mode(self.size)
while not self.done:
evt = pg.event.wait()
if evt.type == pg.QUIT:
pg.quit()
self.done = True
elif evt.type == 3:
self.playerAct(self.parseKey(evt))
if self.done:
print('---------------------------\nFinish. Final score = %d' % self.score)
return
self.draw(screen)
pg.display.flip()
def parseKey(self, evt):
if evt.key == pg.K_z:
if self.nAction > 5:
return 1
else:
return 0
if evt.key == pg.K_x:
if self.nAction > 6:
return 2
else:
return 0
elif evt.key == pg.K_UP:
return self.nAction-1
elif evt.key == pg.K_DOWN:
return self.nAction-2
elif evt.key == pg.K_RIGHT:
return self.nAction - 3
elif evt.key == pg.K_LEFT:
return self.nAction - 4
else:
return 0
def playerAct(self, actionID):
self.stateObs, reward, self.done, debug = self.env.step(actionID)
self.score += reward
self.frame += 1
print('frame%d, action:%d, reward:%d, score:%d' % (self.frame, actionID, reward, self.score))
def update(self, agent, printLog=True):
action_id = agent.act(self.stateObs, self.env.action_space)
self.stateObs, reward, self.done, debug = self.env.step(action_id)
self.score += reward
self.frame += 1
if printLog:
print('frame%d, action:%d, reward:%d, score:%d' % (self.frame, action_id, reward, self.score))
def draw(self, screen):
buffer = pg.pixelcopy.make_surface(self.stateObs[:, :, :3])
pa = pg.PixelArray(buffer)
if self.transpose:
pa = pa.transpose()
screen.blit(pg.transform.scale(pa.make_surface(), self.size), (0, 0))
|
py | 1a3dfc6e683aee1b15f7905e812a88d7105d4c3a | import collections
import copy
from builtins import range
from typing import Union
import numpy as np
from speclite.filters import FilterResponse, FilterSequence
from threeML.plugins.XYLike import XYLike
from threeML.utils.photometry import FilterSet, PhotometericObservation
__instrument_name = "Generic photometric data"
class BandNode(object):
def __init__(self, name, index, value, mask):
"""
Container class that allows for the shutting on and off of bands
"""
self._name = name
self._index = index
self._mask = mask
self._value = value
self._on = True
def _set_on(self, value=True):
self._on = value
self._mask[self._index] = self._on
def _get_on(self):
return self._on
on = property(_get_on, _set_on,
doc="Turn on or off the band. Use booleans, like: 'p.on = True' "
" or 'p.on = False'. ")
# Define property "fix"
def _set_off(self, value=True):
self._on = (not value)
self._mask[self._index] = self._on
def _get_off(self):
return not self._on
off = property(_get_off, _set_off,
doc="Turn on or off the band. Use booleans, like: 'p.off = True' "
" or 'p.off = False'. ")
def __repr__(self):
return f"on: {self._on}\nvalue: {self._value}"
class PhotometryLike(XYLike):
def __init__(self, name: str,
filters: Union[FilterSequence, FilterResponse],
observation: PhotometericObservation):
"""
The photometry plugin is desinged to fit optical/IR/UV photometric data from a given
filter system. Filters are given in the form a speclite (http://speclite.readthedocs.io)
FitlerResponse or FilterSequence objects. 3ML contains a vast number of filters via the SVO
VO service: http://svo2.cab.inta-csic.es/svo/theory/fps/ and can be accessed via:
from threeML.utils.photometry import get_photometric_filter_library
filter_lib = get_photometric_filter_library()
Bands can be turned on and off by setting
plugin.band_<band name>.on = False/True
plugin.band_<band name>.off = False/True
:param name: plugin name
:param filters: speclite filters
:param observation: A PhotometricObservation instance
"""
assert isinstance(
observation, PhotometericObservation), "Observation must be PhotometricObservation"
# convert names so that only the filters are present
# speclite uses '-' to separate instrument and filter
if isinstance(filters, FilterSequence):
# we have a filter sequence
names = [fname.split("-")[1] for fname in filters.names]
elif isinstance(filters, FilterResponse):
# we have a filter response
names = [filters.name.split("-")[1]]
filters = FilterSequence([filters])
else:
RuntimeError(
"filters must be A FilterResponse or a FilterSequence")
# since we may only have a few of the filters in use
# we will mask the filters not needed. The will stay fixed
# during the life of the plugin
assert observation.is_compatible_with_filter_set(
filters), "The data and filters are not congruent"
mask = observation.get_mask_from_filter_sequence(filters)
assert mask.sum() > 0, "There are no data in this observation!"
# create a filter set and use only the bands that were specified
self._filter_set = FilterSet(filters, mask)
self._magnitudes = np.zeros(self._filter_set.n_bands)
self._magnitude_errors = np.zeros(self._filter_set.n_bands)
# we want to fill the magnitudes in the same order as the
# the filters
for i, band in enumerate(self._filter_set.filter_names):
self._magnitudes[i] = observation[band][0]
self._magnitude_errors[i] = observation[band][1]
self._observation = observation
# pass thru to XYLike
super(PhotometryLike, self).__init__(
name=name,
x=self._filter_set.effective_wavelength, # dummy x values
y=self._magnitudes,
yerr=self._magnitude_errors,
poisson_data=False,
)
# now set up the mask zetting
for i, band in enumerate(self._filter_set.filter_names):
node = BandNode(band, i, (self._magnitudes[i], self._magnitude_errors[i]),
self._mask)
setattr(self, f"band_{band}", node)
@classmethod
def from_kwargs(cls, name, filters, **kwargs):
"""
Example:
grond = PhotometryLike.from_kwargs('GROND',
filters=threeML_filter_library.ESO.GROND,
g=(20.93,.23),
r=(20.6,0.12),
i=(20.4,.07),
z=(20.3,.04),
J=(20.0,.03),
H=(19.8,.03),
K=(19.7,.04))
Magnitudes and errors are entered as keyword arguments where the key is the filter name and
the argument is a tuple containing the data. You can exclude data for individual filters and
they will be ignored during the fit.
NOTE: PhotometryLike expects apparent AB magnitudes. Please calibrate your data to this system
:param name: plugin name
:param filters: speclite filters
:param kwargs: keyword args of band name and tuple(mag, mag error)
"""
return cls(name, filters, PhotometericObservation.from_kwargs(**kwargs))
@classmethod
def from_file(cls, name: str, filters: Union[FilterResponse, FilterSequence], file_name: str):
"""
Create the a PhotometryLike plugin from a saved HDF5 data file
:param name: plugin name
:param filters: speclite filters
:param file_name: name of the observation file
"""
return cls(name, filters, PhotometericObservation.from_hdf5(file_name))
@property
def magnitudes(self):
return self._magnitudes
@property
def magnitude_errors(self):
return self._magnitude_errors
def set_model(self, likelihood_model):
"""
set the likelihood model
:param likelihood_model:
:return:
"""
super(PhotometryLike, self).set_model(likelihood_model)
n_point_sources = self._likelihood_model.get_number_of_point_sources()
# sum up the differential
def differential_flux(energies):
fluxes = self._likelihood_model.get_point_source_fluxes(
0, energies, tag=self._tag
)
# If we have only one point source, this will never be executed
for i in range(1, n_point_sources):
fluxes += self._likelihood_model.get_point_source_fluxes(
i, energies, tag=self._tag
)
return fluxes
self._filter_set.set_model(differential_flux)
def _get_total_expectation(self):
return self._filter_set.ab_magnitudes()
def display_filters(self):
"""
display the filter transmission curves
:return:
"""
return self._filter_set.plot_filters()
def _new_plugin(self, name, x, y, yerr):
"""
construct a new PhotometryLike plugin. allows for returning a new plugin
from simulated data set while customizing the constructor
further down the inheritance tree
:param name: new name
:param x: new x
:param y: new y
:param yerr: new yerr
:return: new XYLike
"""
bands = collections.OrderedDict()
for i, band in enumerate(self._filter_set.filter_names):
bands[band] = (y[i], yerr[i])
new_photo = PhotometryLike(
name, filters=self._filter_set.speclite_filters, **bands
)
# apply the current mask
new_photo._mask = copy.copy(self._mask)
return new_photo
|
py | 1a3dfca9899d41fc6c20840f9dd98454e3c9c785 | """Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: training_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.util.tf_export import kwarg_only as _kwarg_only
from tensorflow.tools.docs import doc_controls as _doc_controls
_hard_routing_function_outputs = ["path_probability", "path"]
_HardRoutingFunctionOutput = _collections.namedtuple(
"HardRoutingFunction", _hard_routing_function_outputs)
@_dispatch.add_dispatch_list
@tf_export('hard_routing_function')
def hard_routing_function(input_data, tree_parameters, tree_biases, max_nodes, tree_depth, name=None):
r""" Chooses a single path for each instance in `input_data` and returns the leaf
the probability of the path and the path taken.
tree_depth: The depth of the decision tree.
input_data: The training batch's features as a 2-d tensor; `input_data[i][j]`
gives the j-th feature of the i-th input.
tree_parameters: `tree_parameters[i]` gives the weight of
the logistic regression model that translates from node features to
probabilities.
tree_biases: `tree_biases[i]` gives the bias of the logistic
regression model that translates from node features to
probabilities.
path_probability: `path_probability[i]` gives the probability of reaching each
node in `path[i]`.
path: `path[i][j]` gives the jth node in the path taken by the ith data
instance.
Args:
input_data: A `Tensor` of type `float32`.
tree_parameters: A `Tensor` of type `float32`.
tree_biases: A `Tensor` of type `float32`.
max_nodes: An `int`.
tree_depth: An `int`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (path_probability, path).
path_probability: A `Tensor` of type `float32`.
path: A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"HardRoutingFunction", name, _ctx._post_execution_callbacks,
input_data, tree_parameters, tree_biases, "max_nodes", max_nodes,
"tree_depth", tree_depth)
_result = _HardRoutingFunctionOutput._make(_result)
return _result
except _core._FallbackException:
try:
return hard_routing_function_eager_fallback(
input_data, tree_parameters, tree_biases, max_nodes=max_nodes,
tree_depth=tree_depth, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
hard_routing_function, input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases,
max_nodes=max_nodes,
tree_depth=tree_depth, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
max_nodes = _execute.make_int(max_nodes, "max_nodes")
tree_depth = _execute.make_int(tree_depth, "tree_depth")
try:
_, _, _op = _op_def_lib._apply_op_helper(
"HardRoutingFunction", input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases, max_nodes=max_nodes,
tree_depth=tree_depth, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
hard_routing_function, input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases, max_nodes=max_nodes,
tree_depth=tree_depth, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("max_nodes", _op.get_attr("max_nodes"), "tree_depth",
_op.get_attr("tree_depth"))
_execute.record_gradient(
"HardRoutingFunction", _inputs_flat, _attrs, _result, name)
_result = _HardRoutingFunctionOutput._make(_result)
return _result
def HardRoutingFunction(input_data, tree_parameters, tree_biases, max_nodes, tree_depth, name=None):
return hard_routing_function(input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, max_nodes=max_nodes, tree_depth=tree_depth, name=name)
HardRoutingFunction.__doc__ = hard_routing_function.__doc__
HardRoutingFunction = _doc_controls.do_not_generate_docs(_kwarg_only(HardRoutingFunction))
tf_export("raw_ops.HardRoutingFunction")(HardRoutingFunction)
def hard_routing_function_eager_fallback(input_data, tree_parameters, tree_biases, max_nodes, tree_depth, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function hard_routing_function
"""
_ctx = ctx if ctx else _context.context()
max_nodes = _execute.make_int(max_nodes, "max_nodes")
tree_depth = _execute.make_int(tree_depth, "tree_depth")
input_data = _ops.convert_to_tensor(input_data, _dtypes.float32)
tree_parameters = _ops.convert_to_tensor(tree_parameters, _dtypes.float32)
tree_biases = _ops.convert_to_tensor(tree_biases, _dtypes.float32)
_inputs_flat = [input_data, tree_parameters, tree_biases]
_attrs = ("max_nodes", max_nodes, "tree_depth", tree_depth)
_result = _execute.execute(b"HardRoutingFunction", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"HardRoutingFunction", _inputs_flat, _attrs, _result, name)
_result = _HardRoutingFunctionOutput._make(_result)
return _result
_ops.RegisterShape("HardRoutingFunction")(None)
_k_feature_gradient_outputs = ["routing_gradient", "data_gradient",
"weight_gradient"]
_KFeatureGradientOutput = _collections.namedtuple(
"KFeatureGradient", _k_feature_gradient_outputs)
@_dispatch.add_dispatch_list
@tf_export('k_feature_gradient')
def k_feature_gradient(input_data, tree_parameters, tree_biases, routes, layer_num, random_seed, name=None):
r""" Computes the derivative of the routing loss with respect to each decision
node. Each decision node is constrained to make a decision based on only
k features.
layer_num: The layer number of this tree.
random_seed: The base random seed.
input_data: The training batch's features as a 2-d tensor;
`input_data[i][j]` gives the j-th feature of the i-th input.
tree_parameters: `tree_parameters[i]` gives the weight of
the logistic regression model that translates from node features to
probabilities.
tree_biases: `tree_biases[i]` gives the bias of the logistic
regression model that translates from node features to
probabilities.
routes: The routes computed by routing_function_op.
routing_gradient: `routing_gradient` provides du / df, where u is the
routing function and f is the (vector of) decision functions. A decision
function f_i computes the routing decision at node i.
data_gradient: `data_gradient` provides df / dx, where f is the (vector
of) decision functions and x is a batch of data.
weights_gradient: `weights_gradient` provides df / dw, where f is the
(vector of) decision functions and w is the matrix of parameters that
determine how instances are routed through a tree.
f_i, the decision function at node i, is parameterized by t_i (parameters)
and b_i (bias) and takes data x as input. This op is called in
training_ops.py to compute du / df, and we use that to compute
du / dx = du / df * df / dx,
du / dt = du / df * df / dt, and
du / db = du / df * df / db.
Args:
input_data: A `Tensor` of type `float32`.
tree_parameters: A `Tensor` of type `float32`.
tree_biases: A `Tensor` of type `float32`.
routes: A `Tensor` of type `float32`.
layer_num: An `int`.
random_seed: An `int`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (routing_gradient, data_gradient, weight_gradient).
routing_gradient: A `Tensor` of type `float32`.
data_gradient: A `Tensor` of type `float32`.
weight_gradient: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"KFeatureGradient", name, _ctx._post_execution_callbacks, input_data,
tree_parameters, tree_biases, routes, "layer_num", layer_num,
"random_seed", random_seed)
_result = _KFeatureGradientOutput._make(_result)
return _result
except _core._FallbackException:
try:
return k_feature_gradient_eager_fallback(
input_data, tree_parameters, tree_biases, routes,
layer_num=layer_num, random_seed=random_seed, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
k_feature_gradient, input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases, routes=routes,
layer_num=layer_num,
random_seed=random_seed, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
layer_num = _execute.make_int(layer_num, "layer_num")
random_seed = _execute.make_int(random_seed, "random_seed")
try:
_, _, _op = _op_def_lib._apply_op_helper(
"KFeatureGradient", input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases, routes=routes,
layer_num=layer_num, random_seed=random_seed,
name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
k_feature_gradient, input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases, routes=routes,
layer_num=layer_num, random_seed=random_seed,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("layer_num", _op.get_attr("layer_num"), "random_seed",
_op.get_attr("random_seed"))
_execute.record_gradient(
"KFeatureGradient", _inputs_flat, _attrs, _result, name)
_result = _KFeatureGradientOutput._make(_result)
return _result
def KFeatureGradient(input_data, tree_parameters, tree_biases, routes, layer_num, random_seed, name=None):
return k_feature_gradient(input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, routes=routes, layer_num=layer_num, random_seed=random_seed, name=name)
KFeatureGradient.__doc__ = k_feature_gradient.__doc__
KFeatureGradient = _doc_controls.do_not_generate_docs(_kwarg_only(KFeatureGradient))
tf_export("raw_ops.KFeatureGradient")(KFeatureGradient)
def k_feature_gradient_eager_fallback(input_data, tree_parameters, tree_biases, routes, layer_num, random_seed, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function k_feature_gradient
"""
_ctx = ctx if ctx else _context.context()
layer_num = _execute.make_int(layer_num, "layer_num")
random_seed = _execute.make_int(random_seed, "random_seed")
input_data = _ops.convert_to_tensor(input_data, _dtypes.float32)
tree_parameters = _ops.convert_to_tensor(tree_parameters, _dtypes.float32)
tree_biases = _ops.convert_to_tensor(tree_biases, _dtypes.float32)
routes = _ops.convert_to_tensor(routes, _dtypes.float32)
_inputs_flat = [input_data, tree_parameters, tree_biases, routes]
_attrs = ("layer_num", layer_num, "random_seed", random_seed)
_result = _execute.execute(b"KFeatureGradient", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"KFeatureGradient", _inputs_flat, _attrs, _result, name)
_result = _KFeatureGradientOutput._make(_result)
return _result
_ops.RegisterShape("KFeatureGradient")(None)
@_dispatch.add_dispatch_list
@tf_export('k_feature_routing_function')
def k_feature_routing_function(input_data, tree_parameters, tree_biases, layer_num, max_nodes, num_features_per_node, random_seed, name=None):
r""" Returns the probability that each input will reach each leaf node. Each
decision is made based on k features.
layer_num: The layer number of this tree.
max_nodes: The number of nodes in the tree.
num_features_per_node: The number of features each node can use to make a
decision.
random_seed: The base random seed.
input_data: The training batch's features as a 2-d tensor; `input_data[i][j]`
gives the j-th feature of the i-th input.
tree_parameters: `tree_parameters[i]` gives the weight of
the logistic regression model that translates from node features to
probabilities.
tree_biases: `tree_biases[i]` gives the bias of the logistic
regression model that translates from node features to
probabilities.
tree_features: `tree_features[i]` gives the decision feature for node i.
probabilities: `probabilities[i][j]` is the probability that input i
will reach node j.
Args:
input_data: A `Tensor` of type `float32`.
tree_parameters: A `Tensor` of type `float32`.
tree_biases: A `Tensor` of type `float32`.
layer_num: An `int`.
max_nodes: An `int`.
num_features_per_node: An `int`.
random_seed: An `int`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"KFeatureRoutingFunction", name, _ctx._post_execution_callbacks,
input_data, tree_parameters, tree_biases, "layer_num", layer_num,
"max_nodes", max_nodes, "num_features_per_node",
num_features_per_node, "random_seed", random_seed)
return _result
except _core._FallbackException:
try:
return k_feature_routing_function_eager_fallback(
input_data, tree_parameters, tree_biases, layer_num=layer_num,
max_nodes=max_nodes, num_features_per_node=num_features_per_node,
random_seed=random_seed, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
k_feature_routing_function, input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases,
layer_num=layer_num,
max_nodes=max_nodes,
num_features_per_node=num_features_per_node,
random_seed=random_seed, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
layer_num = _execute.make_int(layer_num, "layer_num")
max_nodes = _execute.make_int(max_nodes, "max_nodes")
num_features_per_node = _execute.make_int(num_features_per_node, "num_features_per_node")
random_seed = _execute.make_int(random_seed, "random_seed")
try:
_, _, _op = _op_def_lib._apply_op_helper(
"KFeatureRoutingFunction", input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases,
layer_num=layer_num, max_nodes=max_nodes,
num_features_per_node=num_features_per_node,
random_seed=random_seed, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
k_feature_routing_function, input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases,
layer_num=layer_num,
max_nodes=max_nodes,
num_features_per_node=num_features_per_node,
random_seed=random_seed, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("layer_num", _op.get_attr("layer_num"), "max_nodes",
_op.get_attr("max_nodes"), "num_features_per_node",
_op.get_attr("num_features_per_node"), "random_seed",
_op.get_attr("random_seed"))
_execute.record_gradient(
"KFeatureRoutingFunction", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def KFeatureRoutingFunction(input_data, tree_parameters, tree_biases, layer_num, max_nodes, num_features_per_node, random_seed, name=None):
return k_feature_routing_function(input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, layer_num=layer_num, max_nodes=max_nodes, num_features_per_node=num_features_per_node, random_seed=random_seed, name=name)
KFeatureRoutingFunction.__doc__ = k_feature_routing_function.__doc__
KFeatureRoutingFunction = _doc_controls.do_not_generate_docs(_kwarg_only(KFeatureRoutingFunction))
tf_export("raw_ops.KFeatureRoutingFunction")(KFeatureRoutingFunction)
def k_feature_routing_function_eager_fallback(input_data, tree_parameters, tree_biases, layer_num, max_nodes, num_features_per_node, random_seed, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function k_feature_routing_function
"""
_ctx = ctx if ctx else _context.context()
layer_num = _execute.make_int(layer_num, "layer_num")
max_nodes = _execute.make_int(max_nodes, "max_nodes")
num_features_per_node = _execute.make_int(num_features_per_node, "num_features_per_node")
random_seed = _execute.make_int(random_seed, "random_seed")
input_data = _ops.convert_to_tensor(input_data, _dtypes.float32)
tree_parameters = _ops.convert_to_tensor(tree_parameters, _dtypes.float32)
tree_biases = _ops.convert_to_tensor(tree_biases, _dtypes.float32)
_inputs_flat = [input_data, tree_parameters, tree_biases]
_attrs = ("layer_num", layer_num, "max_nodes", max_nodes,
"num_features_per_node", num_features_per_node, "random_seed", random_seed)
_result = _execute.execute(b"KFeatureRoutingFunction", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"KFeatureRoutingFunction", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("KFeatureRoutingFunction")(None)
@_dispatch.add_dispatch_list
@tf_export('routing_function')
def routing_function(input_data, tree_parameters, tree_biases, max_nodes, name=None):
r""" Returns the probability that each input will reach each leaf node.
max_nodes: The number of nodes in the tree.
input_data: The training batch's features as a 2-d tensor; `input_data[i][j]`
gives the j-th feature of the i-th input.
tree_parameters: `tree_parameters[i]` gives the weight of
the logistic regression model that translates from node features to
probabilities.
tree_biases: `tree_biases[i]` gives the bias of the logistic
regression model that translates from node features to
probabilities.
probabilities: `probabilities[i][j]` is the probability that input i
will reach node j.
Args:
input_data: A `Tensor` of type `float32`.
tree_parameters: A `Tensor` of type `float32`.
tree_biases: A `Tensor` of type `float32`.
max_nodes: An `int`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"RoutingFunction", name, _ctx._post_execution_callbacks, input_data,
tree_parameters, tree_biases, "max_nodes", max_nodes)
return _result
except _core._FallbackException:
try:
return routing_function_eager_fallback(
input_data, tree_parameters, tree_biases, max_nodes=max_nodes,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
routing_function, input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases, max_nodes=max_nodes,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
max_nodes = _execute.make_int(max_nodes, "max_nodes")
try:
_, _, _op = _op_def_lib._apply_op_helper(
"RoutingFunction", input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases, max_nodes=max_nodes,
name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
routing_function, input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases, max_nodes=max_nodes,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("max_nodes", _op.get_attr("max_nodes"))
_execute.record_gradient(
"RoutingFunction", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def RoutingFunction(input_data, tree_parameters, tree_biases, max_nodes, name=None):
return routing_function(input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, max_nodes=max_nodes, name=name)
RoutingFunction.__doc__ = routing_function.__doc__
RoutingFunction = _doc_controls.do_not_generate_docs(_kwarg_only(RoutingFunction))
tf_export("raw_ops.RoutingFunction")(RoutingFunction)
def routing_function_eager_fallback(input_data, tree_parameters, tree_biases, max_nodes, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function routing_function
"""
_ctx = ctx if ctx else _context.context()
max_nodes = _execute.make_int(max_nodes, "max_nodes")
input_data = _ops.convert_to_tensor(input_data, _dtypes.float32)
tree_parameters = _ops.convert_to_tensor(tree_parameters, _dtypes.float32)
tree_biases = _ops.convert_to_tensor(tree_biases, _dtypes.float32)
_inputs_flat = [input_data, tree_parameters, tree_biases]
_attrs = ("max_nodes", max_nodes)
_result = _execute.execute(b"RoutingFunction", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"RoutingFunction", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("RoutingFunction")(None)
@_dispatch.add_dispatch_list
@tf_export('routing_gradient')
def routing_gradient(input_data, tree_parameters, tree_biases, routes, max_nodes, name=None):
r""" Computes the derivative of the routing loss with respect to each decision
node.
max_nodes: The number of nodes in the tree.
tree_parameters: `tree_parameters[i]` gives the weight of
the logistic regression model that translates from node features to
probabilities.
tree_biases: `tree_biases[i]` gives the bias of the logistic
regression model that translates from node features to
probabilities.
routes: The routes computed by routing_function_op.
routing_gradient: `routing_gradient` provides du / df, where u is the routing
function and f is the (vector of) decision functions. A decision function
f_i computes the routing decision at node i.
f_i is parameterized by t_i (parameters) and b_i (bias) and takes data x as
input. This op is called in training_ops.py to compute du / df, and we use
that to compute
du / dx = du / df * df / dx,
du / dt = du / df * df / dt, and
du / db = du / df * df / db.
Args:
input_data: A `Tensor` of type `float32`.
tree_parameters: A `Tensor` of type `float32`.
tree_biases: A `Tensor` of type `float32`.
routes: A `Tensor` of type `float32`.
max_nodes: An `int`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"RoutingGradient", name, _ctx._post_execution_callbacks, input_data,
tree_parameters, tree_biases, routes, "max_nodes", max_nodes)
return _result
except _core._FallbackException:
try:
return routing_gradient_eager_fallback(
input_data, tree_parameters, tree_biases, routes,
max_nodes=max_nodes, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
routing_gradient, input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases, routes=routes,
max_nodes=max_nodes, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
max_nodes = _execute.make_int(max_nodes, "max_nodes")
try:
_, _, _op = _op_def_lib._apply_op_helper(
"RoutingGradient", input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases, routes=routes,
max_nodes=max_nodes, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
routing_gradient, input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases, routes=routes,
max_nodes=max_nodes, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("max_nodes", _op.get_attr("max_nodes"))
_execute.record_gradient(
"RoutingGradient", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def RoutingGradient(input_data, tree_parameters, tree_biases, routes, max_nodes, name=None):
return routing_gradient(input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, routes=routes, max_nodes=max_nodes, name=name)
RoutingGradient.__doc__ = routing_gradient.__doc__
RoutingGradient = _doc_controls.do_not_generate_docs(_kwarg_only(RoutingGradient))
tf_export("raw_ops.RoutingGradient")(RoutingGradient)
def routing_gradient_eager_fallback(input_data, tree_parameters, tree_biases, routes, max_nodes, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function routing_gradient
"""
_ctx = ctx if ctx else _context.context()
max_nodes = _execute.make_int(max_nodes, "max_nodes")
input_data = _ops.convert_to_tensor(input_data, _dtypes.float32)
tree_parameters = _ops.convert_to_tensor(tree_parameters, _dtypes.float32)
tree_biases = _ops.convert_to_tensor(tree_biases, _dtypes.float32)
routes = _ops.convert_to_tensor(routes, _dtypes.float32)
_inputs_flat = [input_data, tree_parameters, tree_biases, routes]
_attrs = ("max_nodes", max_nodes)
_result = _execute.execute(b"RoutingGradient", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"RoutingGradient", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("RoutingGradient")(None)
_stochastic_hard_routing_function_outputs = ["path_probability", "path"]
_StochasticHardRoutingFunctionOutput = _collections.namedtuple(
"StochasticHardRoutingFunction",
_stochastic_hard_routing_function_outputs)
@_dispatch.add_dispatch_list
@tf_export('stochastic_hard_routing_function')
def stochastic_hard_routing_function(input_data, tree_parameters, tree_biases, tree_depth, random_seed, name=None):
r""" Samples a path for each instance in `input_data` and returns the
probability of the path and the path taken.
tree_depth: The depth of the decision tree.
random_seed: The base random seed.
input_data: The training batch's features as a 2-d tensor; `input_data[i][j]`
gives the j-th feature of the i-th input.
tree_parameters: `tree_parameters[i]` gives the weight of
the logistic regression model that translates from node features to
probabilities.
tree_biases: `tree_biases[i]` gives the bias of the logistic
regression model that translates from node features to
probabilities.
path_probability: `path_probability[i]` gives the probability of reaching each
node in `path[i]`.
path: `path[i][j]` gives the jth node in the path taken by the ith data
instance.
Args:
input_data: A `Tensor` of type `float32`.
tree_parameters: A `Tensor` of type `float32`.
tree_biases: A `Tensor` of type `float32`.
tree_depth: An `int`.
random_seed: An `int`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (path_probability, path).
path_probability: A `Tensor` of type `float32`.
path: A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"StochasticHardRoutingFunction", name, _ctx._post_execution_callbacks,
input_data, tree_parameters, tree_biases, "tree_depth", tree_depth,
"random_seed", random_seed)
_result = _StochasticHardRoutingFunctionOutput._make(_result)
return _result
except _core._FallbackException:
try:
return stochastic_hard_routing_function_eager_fallback(
input_data, tree_parameters, tree_biases, tree_depth=tree_depth,
random_seed=random_seed, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
stochastic_hard_routing_function, input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases,
tree_depth=tree_depth,
random_seed=random_seed,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
tree_depth = _execute.make_int(tree_depth, "tree_depth")
random_seed = _execute.make_int(random_seed, "random_seed")
try:
_, _, _op = _op_def_lib._apply_op_helper(
"StochasticHardRoutingFunction", input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases,
tree_depth=tree_depth,
random_seed=random_seed, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
stochastic_hard_routing_function, input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases,
tree_depth=tree_depth,
random_seed=random_seed,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("tree_depth", _op.get_attr("tree_depth"), "random_seed",
_op.get_attr("random_seed"))
_execute.record_gradient(
"StochasticHardRoutingFunction", _inputs_flat, _attrs, _result, name)
_result = _StochasticHardRoutingFunctionOutput._make(_result)
return _result
def StochasticHardRoutingFunction(input_data, tree_parameters, tree_biases, tree_depth, random_seed, name=None):
return stochastic_hard_routing_function(input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, tree_depth=tree_depth, random_seed=random_seed, name=name)
StochasticHardRoutingFunction.__doc__ = stochastic_hard_routing_function.__doc__
StochasticHardRoutingFunction = _doc_controls.do_not_generate_docs(_kwarg_only(StochasticHardRoutingFunction))
tf_export("raw_ops.StochasticHardRoutingFunction")(StochasticHardRoutingFunction)
def stochastic_hard_routing_function_eager_fallback(input_data, tree_parameters, tree_biases, tree_depth, random_seed, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function stochastic_hard_routing_function
"""
_ctx = ctx if ctx else _context.context()
tree_depth = _execute.make_int(tree_depth, "tree_depth")
random_seed = _execute.make_int(random_seed, "random_seed")
input_data = _ops.convert_to_tensor(input_data, _dtypes.float32)
tree_parameters = _ops.convert_to_tensor(tree_parameters, _dtypes.float32)
tree_biases = _ops.convert_to_tensor(tree_biases, _dtypes.float32)
_inputs_flat = [input_data, tree_parameters, tree_biases]
_attrs = ("tree_depth", tree_depth, "random_seed", random_seed)
_result = _execute.execute(b"StochasticHardRoutingFunction", 2,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"StochasticHardRoutingFunction", _inputs_flat, _attrs, _result, name)
_result = _StochasticHardRoutingFunctionOutput._make(_result)
return _result
_ops.RegisterShape("StochasticHardRoutingFunction")(None)
_stochastic_hard_routing_gradient_outputs = ["routing_gradient",
"data_gradient",
"parameter_gradient",
"bias_gradient"]
_StochasticHardRoutingGradientOutput = _collections.namedtuple(
"StochasticHardRoutingGradient",
_stochastic_hard_routing_gradient_outputs)
@_dispatch.add_dispatch_list
@tf_export('stochastic_hard_routing_gradient')
def stochastic_hard_routing_gradient(input_data, tree_parameters, tree_biases, path_probability, path, tree_depth, name=None):
r""" Computes the derivative of the routing loss with respect to each decision
node.
tree_depth: The depth of the decision tree.
input_data: The training batch's features as a 2-d tensor; `input_data[i][j]`
gives the j-th feature of the i-th input
tree_parameters: `tree_parameters[i]` gives the weight of
the logistic regression model that translates from node features to
probabilities.
tree_biases: `tree_biases[i]` gives the bias of the logistic
regression model that translates from node features to
probabilities.
path_probability: `path_probability[i]` gives the probability of reaching each
node in `path[i]`.
path: `path[i][j]` gives the jth node in the path taken by the ith data
instance.
routing_gradient: `routing_gradient` provides du / df, where u is the routing
function and f is the (vector of) decision functions. A decision function
f_i computes the routing decision at node i.
data_gradient: `data_gradient` provides df / dx, where f is the (vector
of) decision functions and x is a batch of data.
parameter_gradient: `parameter_gradient` provides df / dw, where f is the
(vector of) decision functions and w is the matrix of parameters that
determine how instances are routed through a tree.
bias_gradient: `bias_gradient` provides df / db, where f is the
(vector of) decision functions and b is the vector of bias parameters that
determine how instances are routed through a tree.
f_i is parameterized by t_i (parameters) and b_i (bias) and takes data x as
input. This op is called in training_ops.py to compute du / df, and we use
that to compute
du / dx = du / df * df / dx,
du / dt = du / df * df / dt, and
du / db = du / df * df / db.
Args:
input_data: A `Tensor` of type `float32`.
tree_parameters: A `Tensor` of type `float32`.
tree_biases: A `Tensor` of type `float32`.
path_probability: A `Tensor` of type `float32`.
path: A `Tensor` of type `int32`.
tree_depth: An `int`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (routing_gradient, data_gradient, parameter_gradient, bias_gradient).
routing_gradient: A `Tensor` of type `float32`.
data_gradient: A `Tensor` of type `float32`.
parameter_gradient: A `Tensor` of type `float32`.
bias_gradient: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"StochasticHardRoutingGradient", name, _ctx._post_execution_callbacks,
input_data, tree_parameters, tree_biases, path_probability, path,
"tree_depth", tree_depth)
_result = _StochasticHardRoutingGradientOutput._make(_result)
return _result
except _core._FallbackException:
try:
return stochastic_hard_routing_gradient_eager_fallback(
input_data, tree_parameters, tree_biases, path_probability, path,
tree_depth=tree_depth, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
stochastic_hard_routing_gradient, input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases,
path_probability=path_probability,
path=path,
tree_depth=tree_depth,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
tree_depth = _execute.make_int(tree_depth, "tree_depth")
try:
_, _, _op = _op_def_lib._apply_op_helper(
"StochasticHardRoutingGradient", input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases,
path_probability=path_probability,
path=path, tree_depth=tree_depth,
name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
stochastic_hard_routing_gradient, input_data=input_data,
tree_parameters=tree_parameters,
tree_biases=tree_biases,
path_probability=path_probability,
path=path, tree_depth=tree_depth,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("tree_depth", _op.get_attr("tree_depth"))
_execute.record_gradient(
"StochasticHardRoutingGradient", _inputs_flat, _attrs, _result, name)
_result = _StochasticHardRoutingGradientOutput._make(_result)
return _result
def StochasticHardRoutingGradient(input_data, tree_parameters, tree_biases, path_probability, path, tree_depth, name=None):
return stochastic_hard_routing_gradient(input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, path_probability=path_probability, path=path, tree_depth=tree_depth, name=name)
StochasticHardRoutingGradient.__doc__ = stochastic_hard_routing_gradient.__doc__
StochasticHardRoutingGradient = _doc_controls.do_not_generate_docs(_kwarg_only(StochasticHardRoutingGradient))
tf_export("raw_ops.StochasticHardRoutingGradient")(StochasticHardRoutingGradient)
def stochastic_hard_routing_gradient_eager_fallback(input_data, tree_parameters, tree_biases, path_probability, path, tree_depth, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function stochastic_hard_routing_gradient
"""
_ctx = ctx if ctx else _context.context()
tree_depth = _execute.make_int(tree_depth, "tree_depth")
input_data = _ops.convert_to_tensor(input_data, _dtypes.float32)
tree_parameters = _ops.convert_to_tensor(tree_parameters, _dtypes.float32)
tree_biases = _ops.convert_to_tensor(tree_biases, _dtypes.float32)
path_probability = _ops.convert_to_tensor(path_probability, _dtypes.float32)
path = _ops.convert_to_tensor(path, _dtypes.int32)
_inputs_flat = [input_data, tree_parameters, tree_biases, path_probability, path]
_attrs = ("tree_depth", tree_depth)
_result = _execute.execute(b"StochasticHardRoutingGradient", 4,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"StochasticHardRoutingGradient", _inputs_flat, _attrs, _result, name)
_result = _StochasticHardRoutingGradientOutput._make(_result)
return _result
_ops.RegisterShape("StochasticHardRoutingGradient")(None)
@_dispatch.add_dispatch_list
@tf_export('unpack_path')
def unpack_path(path, path_values, name=None):
r""" Takes a batch of paths through a tree and a batch of values along those paths
and returns a batch_size by num_nodes encoding of the path values.
path: `path[i][j]` gives the jth node in the path taken by the ith data
instance.
path_values: `path_values[i][j]` gives the value associated with node j in the
path defined by the ith instance
unpacked_paths: `unpacked_paths[i][path[i][k]]` is path_values[i][k] for k in
[0, tree_depth). All other elements of unpacked_paths are zero.
Args:
path: A `Tensor` of type `int32`.
path_values: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"UnpackPath", name, _ctx._post_execution_callbacks, path, path_values)
return _result
except _core._FallbackException:
try:
return unpack_path_eager_fallback(
path, path_values, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
unpack_path, path=path, path_values=path_values, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
try:
_, _, _op = _op_def_lib._apply_op_helper(
"UnpackPath", path=path, path_values=path_values, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
unpack_path, path=path, path_values=path_values, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"UnpackPath", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def UnpackPath(path, path_values, name=None):
return unpack_path(path=path, path_values=path_values, name=name)
UnpackPath.__doc__ = unpack_path.__doc__
UnpackPath = _doc_controls.do_not_generate_docs(_kwarg_only(UnpackPath))
tf_export("raw_ops.UnpackPath")(UnpackPath)
def unpack_path_eager_fallback(path, path_values, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function unpack_path
"""
_ctx = ctx if ctx else _context.context()
path = _ops.convert_to_tensor(path, _dtypes.int32)
path_values = _ops.convert_to_tensor(path_values, _dtypes.float32)
_inputs_flat = [path, path_values]
_attrs = None
_result = _execute.execute(b"UnpackPath", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"UnpackPath", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("UnpackPath")(None)
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "HardRoutingFunction"
# input_arg {
# name: "input_data"
# type: DT_FLOAT
# }
# input_arg {
# name: "tree_parameters"
# type: DT_FLOAT
# }
# input_arg {
# name: "tree_biases"
# type: DT_FLOAT
# }
# output_arg {
# name: "path_probability"
# type: DT_FLOAT
# }
# output_arg {
# name: "path"
# type: DT_INT32
# }
# attr {
# name: "max_nodes"
# type: "int"
# }
# attr {
# name: "tree_depth"
# type: "int"
# }
# }
# op {
# name: "KFeatureGradient"
# input_arg {
# name: "input_data"
# type: DT_FLOAT
# }
# input_arg {
# name: "tree_parameters"
# type: DT_FLOAT
# }
# input_arg {
# name: "tree_biases"
# type: DT_FLOAT
# }
# input_arg {
# name: "routes"
# type: DT_FLOAT
# }
# output_arg {
# name: "routing_gradient"
# type: DT_FLOAT
# }
# output_arg {
# name: "data_gradient"
# type: DT_FLOAT
# }
# output_arg {
# name: "weight_gradient"
# type: DT_FLOAT
# }
# attr {
# name: "layer_num"
# type: "int"
# }
# attr {
# name: "random_seed"
# type: "int"
# }
# }
# op {
# name: "KFeatureRoutingFunction"
# input_arg {
# name: "input_data"
# type: DT_FLOAT
# }
# input_arg {
# name: "tree_parameters"
# type: DT_FLOAT
# }
# input_arg {
# name: "tree_biases"
# type: DT_FLOAT
# }
# output_arg {
# name: "probabilities"
# type: DT_FLOAT
# }
# attr {
# name: "layer_num"
# type: "int"
# }
# attr {
# name: "max_nodes"
# type: "int"
# }
# attr {
# name: "num_features_per_node"
# type: "int"
# }
# attr {
# name: "random_seed"
# type: "int"
# }
# }
# op {
# name: "RoutingFunction"
# input_arg {
# name: "input_data"
# type: DT_FLOAT
# }
# input_arg {
# name: "tree_parameters"
# type: DT_FLOAT
# }
# input_arg {
# name: "tree_biases"
# type: DT_FLOAT
# }
# output_arg {
# name: "probabilities"
# type: DT_FLOAT
# }
# attr {
# name: "max_nodes"
# type: "int"
# }
# }
# op {
# name: "RoutingGradient"
# input_arg {
# name: "input_data"
# type: DT_FLOAT
# }
# input_arg {
# name: "tree_parameters"
# type: DT_FLOAT
# }
# input_arg {
# name: "tree_biases"
# type: DT_FLOAT
# }
# input_arg {
# name: "routes"
# type: DT_FLOAT
# }
# output_arg {
# name: "routing_gradient"
# type: DT_FLOAT
# }
# attr {
# name: "max_nodes"
# type: "int"
# }
# }
# op {
# name: "StochasticHardRoutingFunction"
# input_arg {
# name: "input_data"
# type: DT_FLOAT
# }
# input_arg {
# name: "tree_parameters"
# type: DT_FLOAT
# }
# input_arg {
# name: "tree_biases"
# type: DT_FLOAT
# }
# output_arg {
# name: "path_probability"
# type: DT_FLOAT
# }
# output_arg {
# name: "path"
# type: DT_INT32
# }
# attr {
# name: "tree_depth"
# type: "int"
# }
# attr {
# name: "random_seed"
# type: "int"
# }
# }
# op {
# name: "StochasticHardRoutingGradient"
# input_arg {
# name: "input_data"
# type: DT_FLOAT
# }
# input_arg {
# name: "tree_parameters"
# type: DT_FLOAT
# }
# input_arg {
# name: "tree_biases"
# type: DT_FLOAT
# }
# input_arg {
# name: "path_probability"
# type: DT_FLOAT
# }
# input_arg {
# name: "path"
# type: DT_INT32
# }
# output_arg {
# name: "routing_gradient"
# type: DT_FLOAT
# }
# output_arg {
# name: "data_gradient"
# type: DT_FLOAT
# }
# output_arg {
# name: "parameter_gradient"
# type: DT_FLOAT
# }
# output_arg {
# name: "bias_gradient"
# type: DT_FLOAT
# }
# attr {
# name: "tree_depth"
# type: "int"
# }
# }
# op {
# name: "UnpackPath"
# input_arg {
# name: "path"
# type: DT_INT32
# }
# input_arg {
# name: "path_values"
# type: DT_FLOAT
# }
# output_arg {
# name: "unpacked_path"
# type: DT_FLOAT
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n\220\001\n\023HardRoutingFunction\022\016\n\ninput_data\030\001\022\023\n\017tree_parameters\030\001\022\017\n\013tree_biases\030\001\032\024\n\020path_probability\030\001\032\010\n\004path\030\003\"\020\n\tmax_nodes\022\003int\"\021\n\ntree_depth\022\003int\n\270\001\n\020KFeatureGradient\022\016\n\ninput_data\030\001\022\023\n\017tree_parameters\030\001\022\017\n\013tree_biases\030\001\022\n\n\006routes\030\001\032\024\n\020routing_gradient\030\001\032\021\n\rdata_gradient\030\001\032\023\n\017weight_gradient\030\001\"\020\n\tlayer_num\022\003int\"\022\n\013random_seed\022\003int\n\270\001\n\027KFeatureRoutingFunction\022\016\n\ninput_data\030\001\022\023\n\017tree_parameters\030\001\022\017\n\013tree_biases\030\001\032\021\n\rprobabilities\030\001\"\020\n\tlayer_num\022\003int\"\020\n\tmax_nodes\022\003int\"\034\n\025num_features_per_node\022\003int\"\022\n\013random_seed\022\003int\nl\n\017RoutingFunction\022\016\n\ninput_data\030\001\022\023\n\017tree_parameters\030\001\022\017\n\013tree_biases\030\001\032\021\n\rprobabilities\030\001\"\020\n\tmax_nodes\022\003int\n{\n\017RoutingGradient\022\016\n\ninput_data\030\001\022\023\n\017tree_parameters\030\001\022\017\n\013tree_biases\030\001\022\n\n\006routes\030\001\032\024\n\020routing_gradient\030\001\"\020\n\tmax_nodes\022\003int\n\234\001\n\035StochasticHardRoutingFunction\022\016\n\ninput_data\030\001\022\023\n\017tree_parameters\030\001\022\017\n\013tree_biases\030\001\032\024\n\020path_probability\030\001\032\010\n\004path\030\003\"\021\n\ntree_depth\022\003int\"\022\n\013random_seed\022\003int\n\334\001\n\035StochasticHardRoutingGradient\022\016\n\ninput_data\030\001\022\023\n\017tree_parameters\030\001\022\017\n\013tree_biases\030\001\022\024\n\020path_probability\030\001\022\010\n\004path\030\003\032\024\n\020routing_gradient\030\001\032\021\n\rdata_gradient\030\001\032\026\n\022parameter_gradient\030\001\032\021\n\rbias_gradient\030\001\"\021\n\ntree_depth\022\003int\n:\n\nUnpackPath\022\010\n\004path\030\003\022\017\n\013path_values\030\001\032\021\n\runpacked_path\030\001")
|
py | 1a3dfdac2f9cc67c025006a2a480d01f2d5016fc | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
import copy
import json
from collections import defaultdict
from django.dispatch import receiver
from django.template.loader import get_template
from django.urls import resolve, reverse
from django.utils.html import escape
from django.utils.translation import gettext_lazy as _
from pretix.base.models import Event, Order
from pretix.base.signals import (
event_copy_data, item_copy_data, logentry_display, logentry_object_link,
register_data_exporters,
)
from pretix.control.signals import (
item_forms, nav_event, order_info, order_position_buttons,
)
from pretix.plugins.badges.forms import BadgeItemForm
from pretix.plugins.badges.models import BadgeItem, BadgeLayout
@receiver(nav_event, dispatch_uid="badges_nav")
def control_nav_import(sender, request=None, **kwargs):
url = resolve(request.path_info)
p = (
request.user.has_event_permission(request.organizer, request.event, 'can_change_settings', request)
or request.user.has_event_permission(request.organizer, request.event, 'can_view_orders', request)
)
if not p:
return []
return [
{
'label': _('Badges'),
'url': reverse('plugins:badges:index', kwargs={
'event': request.event.slug,
'organizer': request.event.organizer.slug,
}),
'active': url.namespace == 'plugins:badges',
'icon': 'id-card',
}
]
@receiver(item_forms, dispatch_uid="badges_item_forms")
def control_item_forms(sender, request, item, **kwargs):
try:
inst = BadgeItem.objects.get(item=item)
except BadgeItem.DoesNotExist:
inst = BadgeItem(item=item)
return BadgeItemForm(
instance=inst,
event=sender,
data=(request.POST if request.method == "POST" else None),
prefix="badgeitem"
)
@receiver(item_copy_data, dispatch_uid="badges_item_copy")
def copy_item(sender, source, target, **kwargs):
try:
inst = BadgeItem.objects.get(item=source)
BadgeItem.objects.create(item=target, layout=inst.layout)
except BadgeItem.DoesNotExist:
pass
@receiver(signal=event_copy_data, dispatch_uid="badges_copy_data")
def event_copy_data_receiver(sender, other, question_map, item_map, **kwargs):
layout_map = {}
for bl in other.badge_layouts.all():
oldid = bl.pk
bl = copy.copy(bl)
bl.pk = None
bl.event = sender
layout = json.loads(bl.layout)
for o in layout:
if o['type'] == 'textarea':
if o['content'].startswith('question_'):
newq = question_map.get(int(o['content'][9:]))
if newq:
o['content'] = 'question_{}'.format(newq.pk)
bl.save()
if bl.background and bl.background.name:
bl.background.save('background.pdf', bl.background)
layout_map[oldid] = bl
for bi in BadgeItem.objects.filter(item__event=other):
BadgeItem.objects.create(item=item_map.get(bi.item_id), layout=layout_map.get(bi.layout_id))
@receiver(register_data_exporters, dispatch_uid="badges_export_all")
def register_pdf(sender, **kwargs):
from .exporters import BadgeExporter
return BadgeExporter
def _cached_rendermap(event):
if hasattr(event, '_cached_renderermap'):
return event._cached_renderermap
renderermap = {
bi.item_id: bi.layout_id
for bi in BadgeItem.objects.select_related('layout').filter(item__event=event)
}
try:
default_renderer = event.badge_layouts.get(default=True).pk
except BadgeLayout.DoesNotExist:
default_renderer = None
event._cached_renderermap = defaultdict(lambda: default_renderer)
event._cached_renderermap.update(renderermap)
return event._cached_renderermap
@receiver(order_position_buttons, dispatch_uid="badges_control_order_buttons")
def control_order_position_info(sender: Event, position, request, order: Order, **kwargs):
if _cached_rendermap(sender)[position.item_id] is None:
return ''
template = get_template('pretixplugins/badges/control_order_position_buttons.html')
ctx = {
'order': order,
'request': request,
'event': sender,
'position': position
}
return template.render(ctx, request=request).strip()
@receiver(order_info, dispatch_uid="badges_control_order_info")
def control_order_info(sender: Event, request, order: Order, **kwargs):
cm = _cached_rendermap(sender)
if all(cm[p.item_id] is None for p in order.positions.all()):
return ''
template = get_template('pretixplugins/badges/control_order_info.html')
ctx = {
'order': order,
'request': request,
'event': sender,
}
return template.render(ctx, request=request)
@receiver(signal=logentry_display, dispatch_uid="badges_logentry_display")
def badges_logentry_display(sender, logentry, **kwargs):
if not logentry.action_type.startswith('pretix.plugins.badges'):
return
plains = {
'pretix.plugins.badges.layout.added': _('Badge layout created.'),
'pretix.plugins.badges.layout.deleted': _('Badge layout deleted.'),
'pretix.plugins.badges.layout.changed': _('Badge layout changed.'),
}
if logentry.action_type in plains:
return plains[logentry.action_type]
@receiver(signal=logentry_object_link, dispatch_uid="badges_logentry_object_link")
def badges_logentry_object_link(sender, logentry, **kwargs):
if not logentry.action_type.startswith('pretix.plugins.badges.layout') or not isinstance(logentry.content_object,
BadgeLayout):
return
a_text = _('Badge layout {val}')
a_map = {
'href': reverse('plugins:badges:edit', kwargs={
'event': sender.slug,
'organizer': sender.organizer.slug,
'layout': logentry.content_object.id
}),
'val': escape(logentry.content_object.name),
}
a_map['val'] = '<a href="{href}">{val}</a>'.format_map(a_map)
return a_text.format_map(a_map)
|
py | 1a3dfe083fe37d61aa98e57733681fcc46273461 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import glob
import salem
from combine2d.core.utils import NonRGIGlacierDirectory
from combine2d.core.test_cases import Borden, Giluwe
from combine2d.core.arithmetics import RMSE, mean_BIAS, percentiles
from combine2d.core.data_logging import load_pickle
from combine2d.sandbox.quick_n_dirty_eval.experiment_naming_engine import \
get_experiment_group, get_experiment_subgroup
from oggm import cfg;
cfg.initialize()
basedir = '/home/philipp/final'
outputdir = '/home/philipp/final'
case = Giluwe
gdir = NonRGIGlacierDirectory(case, basedir)
ref_ice_mask = np.load(gdir.get_filepath('ref_ice_mask'))
true_bed = salem.GeoTiff(gdir.get_filepath('dem')).get_vardata()
true_surf = salem.GeoTiff(gdir.get_filepath('ref_dem')).get_vardata()
filepaths = glob.glob(os.path.join(gdir.dir, '*', 'data_logger.pkl'))
filepaths = sorted(filepaths)
columns = [
'experiment',
'experimentgroup',
'experimentsubgroup',
'subgroupindex',
'optimizedbed',
'optimizedsurf',
'optimizedicethick',
'firstguess',
'beds',
'surfs',
'costs',
'cterms',
'optimizedbederror',
'optimizedsurferror',
'optimizedbedrmse',
'optimizedsurfrmse',
'optimizedbedbias',
'optimizedsurfbias',
'firstguessrmse',
'firstguessbias',
'firstguess_5_percentile',
'firstguess_25_percentile',
'firstguess_75_percentile',
'firstguess_95_percentile',
'surfacenoise',
'surfacenoisermse',
'surfacenoisebias',
'surfacenoise_5_percentile',
'surfacenoise_25_percentile',
'surfacenoise_75_percentile',
'surfacenoise_95_percentile',
'bedmeasurements',
'bedmeasurementsrmse',
'bedmeasurementsbias',
'iterations',
'R',
'dV',
'warning',
'dir_path'
]
df = pd.DataFrame(columns=columns)
for path in filepaths:
dl = load_pickle(path)
inv_subdir = os.path.split(path)[0]
inv_settings = load_pickle(os.path.join(gdir.dir, inv_subdir,
'inversion_settings.pkl'))
experiment = inv_settings['inversion_subdir']
surface_noise = np.zeros(true_bed.shape)
if os.path.exists(os.path.join(gdir.dir, inv_subdir, 'dem_noise.npy')):
surface_noise = np.load(os.path.join(gdir.dir, inv_subdir,
'dem_noise.npy'))
bed_measurements = np.ma.masked_all(true_bed.shape)
if os.path.exists(os.path.join(gdir.dir, inv_subdir,
'bed_measurements.pkl')):
bed_measurements = np.load(os.path.join(gdir.dir, inv_subdir,
'bed_measurements.pkl'))
warning_found = False
# first_guessed_bed_noise = np.load(os.path.join(gdir.dir, inv_subdir,
# 'first_guessed_bed_noise.npy'))
if os.path.exists(os.path.join(gdir.dir, inv_subdir,
'warning.txt')):
warning_found = True
if len(dl.step_indices) > 0:
final_bed = dl.beds[-1]
final_surf = dl.surfs[-1]
final_it = dl.surfs[-1] - dl.beds[-1]
bed_rmse = RMSE(dl.beds[-1], true_bed, ref_ice_mask)
bed_bias = mean_BIAS(dl.beds[-1], true_bed, ref_ice_mask)
bed_error = final_bed - true_bed
surf_rmse = RMSE(dl.surfs[-1], true_surf, ref_ice_mask)
surf_bias = mean_BIAS(dl.surfs[-1], true_surf, ref_ice_mask)
surf_error = final_surf - true_surf
dV = (((dl.surfs[-1] - dl.beds[-1]).sum())
- (true_surf - true_bed).sum()) / (true_surf - true_bed).sum()
else:
final_bed = np.ma.masked_all(true_bed.shape)
final_surf = np.ma.masked_all(true_bed.shape)
final_it = np.ma.masked_all(true_bed.shape)
bed_error = np.ma.masked_all(true_bed.shape)
bed_rmse = np.nan
bed_bias = np.nan
surf_error = np.ma.masked_all(true_bed.shape)
surf_rmse = np.nan
surf_bias = np.nan
dV = np.nan
first_guess_percentiles = percentiles(dl.first_guessed_bed, true_bed,
ref_ice_mask)
surface_noise_percentiles = percentiles(surface_noise, 0, ref_ice_mask)
new_row = {
'experiment': experiment,
'experimentgroup': get_experiment_group(experiment),
'experimentsubgroup': get_experiment_subgroup(experiment),
'subgroupindex': '',
'optimizedbed': final_bed,
'optimizedsurf': final_surf,
'optimizedicethick': final_it,
'firstguess': dl.first_guessed_bed,
# 'beds': dl.beds,
# 'surfs': dl.surfs,
# 'costs': dl.costs,
# 'cterms': dl.c_terms,
'optimizedbederror': bed_error,
'optimizedbedrmse': bed_rmse,
'optimizedbedbias': bed_bias,
'optimizedsurferror': surf_error,
'optimizedsurfrmse': surf_rmse,
'optimizedsurfbias': surf_rmse,
'firstguessrmse': RMSE(dl.first_guessed_bed, true_bed, ref_ice_mask),
'firstguessbias': mean_BIAS(dl.first_guessed_bed, true_bed,
ref_ice_mask),
'firstguess_5_percentile': first_guess_percentiles[0],
'firstguess_25_percentile': first_guess_percentiles[1],
'firstguess_75_percentile': first_guess_percentiles[-2],
'firstguess_95_percentile': first_guess_percentiles[-1],
'surfacenoise': surface_noise,
'surfacenoisermse': RMSE(surface_noise, 0, ref_ice_mask),
'surfacenoisebias': mean_BIAS(surface_noise, 0, ref_ice_mask),
'surfacenoise_5_percentile': surface_noise_percentiles[0],
'surfacenoise_25_percentile': surface_noise_percentiles[1],
'surfacenoise_75_percentile': surface_noise_percentiles[-2],
'surfacenoise_95_percentile': surface_noise_percentiles[-1],
'bedmeasurements': bed_measurements,
'bedmeasurementsrmse': RMSE(bed_measurements, 0, ref_ice_mask),
'bedmeasurementsbias': mean_BIAS(bed_measurements, 0, ref_ice_mask),
'iterations': len(dl.step_indices),
'dx': case.dx,
'dV': dV,
'warning': warning_found,
'dir_path': inv_subdir
}
if new_row['experimentgroup'] == 'fg rmse':
new_row['subgroupindex'] = new_row['firstguessrmse']
elif new_row['experimentgroup'] == 'fg bias':
new_row['subgroupindex'] = new_row['firstguessbias']
elif new_row['experimentgroup'] == 'promised land':
new_row['subgroupindex'] = new_row['surfacenoisermse']
elif new_row['experimentgroup'] == 'bed measurements':
new_row['subgroupindex'] = new_row['bedmeasurementsrmse']
# df_row = pd.DataFrame(new_row)
df = df.append(new_row, ignore_index=True)
df = df.sort_values(['experimentgroup', 'experimentsubgroup', 'subgroupindex',
'experiment'])
df.to_pickle(os.path.join(basedir, '{:s} dataframe.pkl'.format(case.name)))
# store = pd.HDFStore(os.path.join(basedir,
# '{:s} dataframe.h5'.format(case.name)))
# store['df'] = df
# store.close()
cols_to_drop = [
'optimizedbed',
'optimizedsurf',
'optimizedicethick',
'firstguess',
'beds',
'surfs',
'costs',
'cterms',
'optimizedbederror',
'optimizedsurferror',
'surfacenoise',
'bedmeasurements'
]
small_df = df.copy()
small_df.drop(cols_to_drop, inplace=True, axis=1)
small_df = small_df.to_csv(
os.path.join(basedir, '{:s} dataframe small.csv'.format(case.name)))
print('end')
fig, ax = plt.subplots()
sdf = df.loc[df['experimentsubgroup'] == 'fg bias']
sdf.plot('subgroupindex', 'optimizedbedrmse', ax=ax)
sdf.plot('subgroupindex', 'firstguessrmse', ax=ax)
plt.show()
fig, ax = plt.subplots()
sdf = df.loc[df['experimentgroup'] == 'fg rmse']
for key, grp in sdf.groupby(['experimentsubgroup']):
grp.plot('subgroupindex', 'optimizedbedrmse', ax=ax, label='optimized '
+ str(key))
grp.plot('subgroupindex', 'firstguessrmse', ax=ax, label='fg rmse ' + str(
key))
grp.plot('subgroupindex', 'firstguessbias', ax=ax, label='fg bias ' + str(
key))
plt.show()
|
py | 1a3dfe623a0775d26de7853fcc7325146c75d724 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# Copyright (C) 2020-2021 LuaVela Authors. See Copyright Notice in COPYRIGHT
# Copyright (C) 2015-2020 IPONWEB Ltd. See Copyright Notice in COPYRIGHT
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'Documentation'
copyright = u'2015-2019 IPONWEB Ltd.'
author = u'uJIT team'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
if tags.has('use_confluencebuilder'):
extensions.append('sphinxcontrib.confluencebuilder')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
]
# Single sourcing
rst_epilog = '''
.. |PROJECT| replace:: LuaVela
.. |PRJ_INT| replace:: uJIT
.. |CLI_BIN| replace:: ujit
'''
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Documentationdoc'
# -- Options for LaTeX output ------------------------------------------------
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'uJIT documentation', u'Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Documentation', u'Documentation',
author, 'uJIT', 'Implementation of Lua 5.1, originally a fork of LuaJIT',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
confluence_publish = True
confluence_page_hierarchy = True
confluence_server_user = os.getenv("CONFLUENCE_USERNAME", "")
confluence_server_pass = os.getenv("CONFLUENCE_PASSWORD", "")
|
py | 1a3dfed622eafb365936b6b6807a37844faa017c | #!/usr/bin/env python
"""
requests_cache
~~~~~~~~~~~~~~
Transparent cache for ``requests`` library with persistence and async support
Just write::
import requests_cache
requests_cache.install_cache()
And requests to resources will be cached for faster repeated access::
import requests
for i in range(10):
r = requests.get('http://httpbin.org/delay/5')
# will took approximately 5 seconds instead 50
:copyright: (c) 2012 by Roman Haritonov.
:license: BSD, see LICENSE for more details.
"""
__docformat__ = 'restructuredtext'
__version__ = '0.5.2'
from .core import (
CachedSession,
clear,
disabled,
enabled,
get_cache,
install_cache,
remove_expired_responses,
uninstall_cache,
)
|
py | 1a3dff19d61861e63757aaf2e68be40d12a39c15 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide base classes that define what is an adversarial for object detection models."""
import math
from .base import Criterion
import numpy as np
class TargetClassMiss(Criterion):
""" Defines adversarials as images for which the target class is not
in the detection result.
"""
def __init__(self, target_class):
super(TargetClassMiss, self).__init__()
self._target_class = target_class
def target_class(self):
"""Return target class."""
return self._target_class
def name(self):
"""Return ctiterion name."""
return 'TargetClassMiss'
def is_adversarial(self, predictions, annotation):
"""Decides if predictions for an image are adversarial."""
if predictions is None:
return True
return self._target_class not in predictions['classes']
class RegionalTargetClassMiss(Criterion):
"""Defines adversarials as images for which the target class in target region is not
in the detection result.
"""
def __init__(self, target_class, target_region):
super(RegionalTargetClassMiss, self).__init__()
self._target_class = target_class
self._target_retion = np.array(target_region).astype(int)
def target_class(self):
"""Return target class."""
return self._target_class
def target_region(self):
"""Return target region."""
return self._target_retion
def name(self):
"""Return ctiterion name."""
return 'RegionalTargetClassMiss'
def is_adversarial(self, predictions, annotation):
"""Decides if predictions for an image are adversarial."""
if predictions is None:
return True
bbox_list = predictions['boxes']
class_list = predictions['classes']
for bbox_pred, cls_pred in zip(bbox_list, class_list):
iou = self._get_IoU(bbox_pred, self._target_retion)
if iou > 0 and cls_pred == self._target_class:
return False
return True
@staticmethod
def _get_IoU(bbox1, bbox2):
bi = [max(bbox1[0], bbox2[0]), max(bbox1[1], bbox2[1]),
min(bbox1[2], bbox2[2]), min(bbox1[3], bbox2[3])]
ih = bi[2] - bi[0] + 1
iw = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU) = area of intersection / area of union
ua = (bbox1[2] - bbox1[0] + 1) * (bbox1[3] - bbox1[1] + 1) + \
(bbox2[2] - bbox2[0] + 1) * \
(bbox2[3] - bbox2[1] + 1) - iw * ih
ov = iw * ih / ua
return ov
else:
return 0.0
class TargetClassMissGoogle(Criterion):
"""Defines adversarials as images for which the target class is not
in the Google object detection result.
"""
def __init__(self, target_class):
super(TargetClassMissGoogle, self).__init__()
self._target_class = target_class
def target_class(self):
"""Return target class."""
return self._target_class
def name(self):
"""Return ctiterion name."""
return '{}-{}'.format(
self.__class__.__name__, self.target_class())
def is_adversarial(self, predictions):
"""Decides if predictions for an image are adversarial."""
if predictions is None:
return True
assert isinstance(predictions, list), 'Predictions should be list.'
for pred in predictions:
if pred['name'].lower() == self._target_class.lower():
return False
return True
class WeightedAP(Criterion):
"""Defines adversarials as weighted AP value
larger than given threshold.
"""
_defaults = {
"alpha": 0.001,
"lambda_tp_area": 0,
"lambda_tp_dis": 0,
"lambda_tp_cs": 0,
"lambda_tp_cls": 1,
"lambda_fp_area": 0.1,
"lambda_fp_cs": 0,
'lambda_fn_area': 0.1,
'lambda_fn_cs': 0,
'a_set': [1, 1, 1, 0.1],
'MINOVERLAP': 0.5,
}
@classmethod
def get_defaults(cls, n):
"""Return default value of n.
Parameters
----------
n : str
Key of the defalut dictionary.
"""
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, Height, Width, distance_th, print_f=False):
self.__dict__.update(self._defaults) # set up default values
self.Height = float(Height)
self.Width = float(Width)
self.th_is_adv = distance_th
self.print_f = print_f
self.a_tp = self.a_set[0]
self.a_fp = self.a_set[1]
self.a_fn = self.a_set[2]
self.a_er = self.a_set[3]
def name(self):
"""Return ctiterion name."""
return 'WeightedAP'
def is_adversarial(self, predictions, annotation):
"""Decides if predictions for an image are adversarial."""
if predictions is None:
return None
return self.distance_score(annotation, predictions) > self.th_is_adv
def _get_bb_area(self, bbox):
return (bbox[2] - bbox[0] + 1) * (bbox[3] - bbox[1] + 1)
def _get_IoU(self, obj_1, obj_2):
bb = obj_1["bbox"]
bbgt = obj_2["bbox"]
bi = [max(bb[0], bbgt[0]), max(bb[1], bbgt[1]),
min(bb[2], bbgt[2]), min(bb[3], bbgt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU) = area of intersection / area of union
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + \
(bbgt[2] - bbgt[0] + 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih
ov = iw * ih / ua
return ov
else:
return 0.0
def _find_by_idx(self, idx, source_dic_list):
for temp_obj in source_dic_list:
if temp_obj['index'] == idx:
return temp_obj
return {}
def _get_largest_bb_area(self, obj_list):
temp_max = 1
for temp_obj in obj_list:
bb = temp_obj['bbox']
bb_area = self._get_bb_area(bb)
if bb_area > temp_max:
temp_max = bb_area
return temp_max
def _get_total_bb_area(self, obj_list):
total_area = 1
for temp_obj in obj_list:
bb = temp_obj['bbox']
bb_area = self._get_bb_area(bb)
total_area += bb_area
return total_area
def _get_largest_bb_edge(self, obj_list):
temp_max = -1
for temp_obj in obj_list:
bb = temp_obj['bbox']
if abs(bb[2] - bb[0]) > temp_max:
temp_max = abs(bb[2] - bb[0])
if abs(bb[3] - bb[1]) > temp_max:
temp_max = abs(bb[3] - bb[1])
return temp_max
def _sort_by_conf(self, ori_list, source_dic_list):
tup_list = []
if len(ori_list) <= 1:
return ori_list
for temp in ori_list:
temp_obj = self._find_by_idx(temp, source_dic_list)
if not temp_obj:
raise ValueError('object cannot be found by index.')
tup_list.append((temp_obj['index'], temp_obj['confident_score']))
tup_list.sort(key=lambda tup: tup[1])
return [x[0] for x in tup_list]
def _sort_match_dic(self, ori_index_dic, source_dic_list):
sorted_dic = {}
for temp_key in ori_index_dic.keys():
temp_list = ori_index_dic[temp_key]
if len(temp_list) <= 1:
sorted_dic[temp_key] = temp_list
else:
sorted_dic[temp_key] = self._sort_by_conf(
temp_list, source_dic_list)
return sorted_dic
def _get_fn_list(self, tp_match_dic, source_list):
dst_list = []
for temp_source in source_list:
flag_found = False
for temp_idx_pair in tp_match_dic.keys():
if (temp_source['index'] in tp_match_dic[temp_idx_pair]):
flag_found = True
if not flag_found:
dst_list.append(temp_source)
return dst_list
def _get_bb_distance(self, bb1, bb2):
c1 = [0.5 * (bb1[2] + bb1[0]), 0.5 * (bb1[3] + bb1[1])]
c2 = [0.5 * (bb2[2] + bb2[0]), 0.5 * (bb2[3] + bb2[1])]
return math.sqrt((c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2)
def distance_score(self, gt_dic, pd_dic):
"""Compute metric distance between given two detection results.
Parameters
----------
gt_dic : dict
The ground truth annotation which contains: scores, boxes and classes.
pd_dic : dict
The target output form detector which contains: scores, boxes and classes.
"""
gt_list = self._dic2list(gt_dic)
pd_list = self._dic2list(pd_dic)
return self._compute_score(gt_list, pd_list)
def _dic2list(self, dic):
res_list = []
for idx, key in enumerate(dic.keys()):
if idx == 0:
for sub_idx in range(len(dic[key])):
res_list.append({'index': sub_idx})
if key == 'scores':
temp_name = 'confident_score'
elif key == 'boxes':
temp_name = 'bbox'
elif key == 'classes':
temp_name = 'class_name'
else:
raise ValueError('Invalid key.')
for sub_idx, temp_obj in enumerate(dic[key]):
if temp_name is 'bbox':
temp_obj = [
temp_obj[1],
temp_obj[0],
temp_obj[3],
temp_obj[2]]
res_list[sub_idx][temp_name] = temp_obj
return res_list
def _compute_score(self, gt_obj_list, pd_obj_list):
'''
Notes
-----
compute metirc distance score for two results from object detection.
input:
pd_obj_list: object list of prediction
gt_obj_list: object list of ground gruth
obj = {
'class_name' : 'car'
'bbox' : '634 663 787 913' string of [left, up, right, down] splited by ' '
'confident score' : 0.9918241
'index' : 0
}
'''
tp_match_dic = {} # {pd_idx : [gt_idx1, gt_idx2...]}
for pd_obj in pd_obj_list:
tp_match_dic[pd_obj['index']] = []
for gt_obj in gt_obj_list:
IoU = self._get_IoU(pd_obj, gt_obj)
# and gt_obj['class_name'] == pd_obj['class_name']:
if IoU >= self.MINOVERLAP:
tp_match_dic[pd_obj['index']].append(gt_obj['index'])
tp_match_dic = self._sort_match_dic(tp_match_dic, gt_obj_list)
tp_pair = []
fp_pd = []
for temp_idx in tp_match_dic.keys():
if not tp_match_dic[temp_idx]:
fp_pd.append(self._find_by_idx(temp_idx, pd_obj_list))
else:
tp_pair.append(
(self._find_by_idx(
temp_idx, pd_obj_list), self._find_by_idx(
tp_match_dic[temp_idx][0], gt_obj_list)))
fn_gt = self._get_fn_list(tp_match_dic, gt_obj_list)
self.largest_area_gt = self._get_largest_bb_area(gt_obj_list)
self.largest_edge_gt = self._get_largest_bb_edge(gt_obj_list)
self.total_area_gt = self._get_total_bb_area(gt_obj_list)
self.total_area_pd = self._get_total_bb_area(pd_obj_list)
cum_tp_penal = 0.0
for temp_tp_pair in tp_pair:
results = self._tp_panelize(temp_tp_pair)
distance = results['distance']
area_dif = results['area_dif']
cs_dif = results['cs_dif']
class_dif = results['class_dif']
temp_tp_penal = self.lambda_tp_dis * distance + self.lambda_tp_area * area_dif \
+ self.lambda_tp_cs * cs_dif + self.lambda_tp_cls * class_dif
cum_tp_penal += temp_tp_penal
if self.print_f:
print('cum tp: ', cum_tp_penal)
if len(tp_pair) > 1:
cum_tp_penal /= len(tp_pair)
cum_fp_penal = 0.0
for temp_fp_pd in fp_pd:
area, cs = self._fp_fn_panelize(temp_fp_pd)
drop_func_out = self._factor_func(
self.total_area_pd / (self.Height * self.Width))
temp_fp_panel = self.lambda_fp_area * drop_func_out * \
area / self.total_area_pd + self.lambda_fp_cs * cs
cum_fp_penal += temp_fp_panel
if self.print_f:
print('cum fp: ', cum_fp_penal)
if len(fp_pd) > 1:
cum_fp_penal /= len(fp_pd)
cum_fn_penal = 0.0
for temp_fn_gt in fn_gt:
area, cs = self._fp_fn_panelize(temp_fn_gt)
drop_func_out = self._factor_func(
self.total_area_gt / (self.Height * self.Width))
temp_fn_panel = self.lambda_fn_area * drop_func_out * \
area / self.total_area_gt + self.lambda_fn_cs * cs
cum_fn_penal += temp_fn_panel
if self.print_f:
print('cum fn: ', cum_fn_penal)
if len(fn_gt) > 1:
cum_fn_penal /= len(fn_gt)
if (len(tp_pair) + len(fp_pd) + len(fn_gt)) == 0:
err_panel = 0
else:
err_panel = float((len(fp_pd) + len(fn_gt))) / \
(len(tp_pair) + len(fp_pd) + len(fn_gt))
if self.print_f:
print('tp: ', len(tp_pair), ' cum_tp_penal: ', cum_tp_penal)
print('fp: ', len(fp_pd), ' cum_fp_penal: ', cum_fp_penal)
print('fn: ', len(fn_gt), ' cum_fn_penal: ', cum_fn_penal)
print(
'total num: ',
len(tp_pair) +
len(fp_pd) +
len(fn_gt),
' err_panel: ',
err_panel)
score_final = (self.a_tp * cum_tp_penal + self.a_fp * cum_fp_penal + self.a_fn
* cum_fn_penal + self.a_er * err_panel) \
/ (self.a_tp + self.a_fp + self.a_fn + self.a_er)
return score_final
def _factor_func(self, x):
x = float(x)
if x != 0:
return x / (x + self.alpha)
return x
def _tp_panelize(self, obj_pair):
bb0 = obj_pair[0]['bbox']
bb1 = obj_pair[1]['bbox']
distance = self._get_bb_distance(bb0, bb1)
area0 = self._get_bb_area(bb0)
area1 = self._get_bb_area(bb1)
area_dif = abs(area0 - area1)
cs_dif = abs(
float(
obj_pair[0]['confident_score']) -
float(
obj_pair[1]['confident_score']))
class_dif = 0
if obj_pair[0]['class_name'] != obj_pair[1]['class_name']:
class_dif = 1
return {'distance': distance, 'area_dif': area_dif, 'cs_dif': cs_dif, 'class_dif': class_dif}
def _fp_fn_panelize(self, obj):
bb = obj['bbox']
area = self._get_bb_area(bb)
cs = float(obj['confident_score'])
return area, cs
|
py | 1a3dff52e8e3a6b3d9b94b80a0747e2762374f35 | """Define report loader class."""
import re
import os
import bioc
import pandas as pd
from negbio.pipeline import text2bioc, ssplit, section_split
from tqdm import tqdm
from constants import *
class Loader(object):
"""Report impression loader."""
def __init__(self, reports_path, extract_impression=False, extension='txt'):
self.reports_path = reports_path
self.extract_impression = extract_impression
self.punctuation_spacer = str.maketrans({key: f"{key} "
for key in ".,"})
self.splitter = ssplit.NegBioSSplitter(newline=False)
self.extension = extension
if os.path.isdir(reports_path):
# load in all radiology reports in a folder
self.load_files()
else:
# load in a single CSV file with all radiology reports
self.load_csv()
self.prep_collection()
def load_files(self):
"""Load and clean many reports stored in a folder"""
files = os.listdir(self.reports_path)
files = [f for f in files if f.endswith(self.extension)]
assert len(files) > 0,\
('Folder with reports must contain at '
f'least one ".{self.extension}" file')
files.sort()
# if args.verbose:
files = tqdm(files)
print('Collecting reports from files...')
# assume one report per file
self.reports = list()
self.index = list()
for f in tqdm(files, total=len(files)):
with open(self.reports_path / f, 'r') as fp:
self.reports.append(''.join(fp.readlines()))
self.index.append(f)
def load_csv(self):
"""Load and clean the reports."""
reports = pd.read_csv(self.reports_path, header=None)
# allow users to input
# (1) single column CSV or reports
# (2) two columns; first is the index, second is the report
assert reports.shape[1] <= 2,\
('A one or two column CSV with no header is expected as input.')
if reports.shape[1] == 1:
reports = reports.iloc[:, 0].tolist()
index = None
else:
# reports shape must be 2
index = reports.iloc[:, 0].tolist()
reports = reports.iloc[:, 1].tolist()
self.index = index
self.reports = ['' if type(x) is not str else x
for x in reports]
def prep_collection(self):
"""Apply splitter and create bioc collection"""
collection = bioc.BioCCollection()
for i, report in enumerate(self.reports):
clean_report = self.clean(report)
document = text2bioc.text2document(str(i), clean_report)
if self.extract_impression:
document = section_split.split_document(document)
self.extract_impression_from_passages(document)
split_document = self.splitter.split_doc(document)
assert len(split_document.passages) == 1,\
('Each document must have a single passage, ' +
'the Impression section.')
collection.add_document(split_document)
self.collection = collection
def extract_impression_from_passages(self, document):
"""Extract the Impression section from a Bioc Document."""
impression_passages = []
for i, passage in enumerate(document.passages):
if 'title' in passage.infons:
if passage.infons['title'] == 'impression':
next_passage = document.passages[i+1]
assert 'title' not in next_passage.infons,\
"Document contains empty impression section."
impression_passages.append(next_passage)
assert len(impression_passages) <= 1,\
(f"The document contains {len(document.passages)} impression " +
"passages.")
assert len(impression_passages) >= 1,\
"The document contains no explicit impression passage."
document.passages = impression_passages
def clean(self, report):
"""Clean the report text."""
lower_report = report.lower()
# Change `and/or` to `or`.
corrected_report = re.sub('and/or',
'or',
lower_report)
# Change any `XXX/YYY` to `XXX or YYY`.
corrected_report = re.sub('(?<=[a-zA-Z])/(?=[a-zA-Z])',
' or ',
corrected_report)
# Clean double periods
clean_report = corrected_report.replace("..", ".")
# Insert space after commas and periods.
clean_report = clean_report.translate(self.punctuation_spacer)
# Convert any multi white spaces to single white spaces.
clean_report = ' '.join(clean_report.split())
# Remove empty sentences
clean_report = re.sub(r'\.\s+\.', '.', clean_report)
return clean_report
|
py | 1a3e0021d3184dc4b723a83e704013591df656a4 | # Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .blueprint import product_catalog_page
|
py | 1a3e015fa54c182a2d5300a48cf1dec06bba1ad9 | class Complex:
def __init__(self, realpart, imagpart):
self.r = realpart
self.i = imagpart
def add(self, addend):
return Complex(self.r + addend.r, self.i + addend.i)
def subtract(self, subtrahend):
return Complex(self.r - subtrahend.r, self.i - subtrahend.i)
def multiply(self, multiplicand):
return Complex((self.r * multiplicand.r) - (self.i * multiplicand.i),
(self.r * multiplicand.i) + (self.i * multiplicand.r))
def divide(self, divisor):
return Complex(((self.r *
divisor.r) +
(self.i *
divisor.i)) /
((divisor.r)**2 +
(divisor.i)**2), ((self.i *
divisor.r) -
(self.r *
divisor.i)) /
((divisor.r)**2 +
(divisor.i)**2))
a = Complex(3.0, -4.5)
b = Complex(4.0, -9)
x = a.add(b)
y = a.subtract(b)
z = a.multiply(b)
w = a.divide(b)
print('sum:',
x.r,
'+',
str(x.i) + 'i',
'\ndifference:',
y.r,
'+',
str(y.i) + 'i',
'\nproduct:',
z.r,
'+',
str(z.i) + 'i',
'\nquotient:',
w.r,
'+',
str(w.i) + 'i')
|
py | 1a3e01c470e11bfec622a5369f9b565b91a36864 | import datetime
import pytest
import responses
from dagster import Failure, build_init_resource_context
from dagster_fivetran import FivetranOutput, fivetran_resource
from .utils import (
DEFAULT_CONNECTOR_ID,
get_complex_sample_connector_schema_config,
get_sample_connector_response,
get_sample_sync_response,
get_sample_update_response,
)
def test_get_connector_details():
ft_resource = fivetran_resource(
build_init_resource_context(
config={
"api_key": "some_key",
"api_secret": "some_secret",
}
)
)
with responses.RequestsMock() as rsps:
rsps.add(
rsps.GET,
f"{ft_resource.api_base_url}{DEFAULT_CONNECTOR_ID}",
json=get_sample_connector_response(),
)
assert (
ft_resource.get_connector_details(DEFAULT_CONNECTOR_ID)
== get_sample_connector_response()["data"]
)
@pytest.mark.parametrize("max_retries,n_flakes", [(0, 0), (1, 2), (5, 7), (7, 5), (4, 4)])
def test_get_connector_details_flake(max_retries, n_flakes):
ft_resource = fivetran_resource(
build_init_resource_context(
config={
"api_key": "some_key",
"api_secret": "some_secret",
"request_max_retries": max_retries,
"request_retry_delay": 0,
}
)
)
def _mock_interaction():
with responses.RequestsMock() as rsps:
# first n requests fail
for _ in range(n_flakes):
rsps.add(
rsps.GET,
f"{ft_resource.api_base_url}{DEFAULT_CONNECTOR_ID}",
status=500,
)
rsps.add(
rsps.GET,
f"{ft_resource.api_base_url}{DEFAULT_CONNECTOR_ID}",
json=get_sample_connector_response(),
)
return ft_resource.get_connector_details(DEFAULT_CONNECTOR_ID)
if n_flakes > max_retries:
with pytest.raises(Failure, match="Exceeded max number of retries."):
_mock_interaction()
else:
assert _mock_interaction() == get_sample_connector_response()["data"]
@pytest.mark.parametrize(
"data,expected",
[
(
{"succeeded_at": "2021-01-01T01:00:00.0Z", "failed_at": None},
(datetime.datetime(2021, 1, 1, 1, 0, tzinfo=datetime.timezone.utc), True, "scheduled"),
),
(
{"succeeded_at": None, "failed_at": "2021-01-01T01:00:00.0Z"},
(
datetime.datetime(2021, 1, 1, 1, 0, tzinfo=datetime.timezone.utc),
False,
"scheduled",
),
),
(
{
"succeeded_at": "2021-01-01T01:00:00.0Z",
"failed_at": None,
"status": {"sync_state": "foo"},
},
(datetime.datetime(2021, 1, 1, 1, 0, tzinfo=datetime.timezone.utc), True, "foo"),
),
(
{"succeeded_at": "2021-01-01T02:00:00.00Z", "failed_at": "2021-01-01T01:00:00.0Z"},
(datetime.datetime(2021, 1, 1, 2, 0, tzinfo=datetime.timezone.utc), True, "scheduled"),
),
(
{"succeeded_at": "2021-01-01T01:00:00.0Z", "failed_at": "2021-01-01T02:00:00.00Z"},
(
datetime.datetime(2021, 1, 1, 2, 0, tzinfo=datetime.timezone.utc),
False,
"scheduled",
),
),
],
)
def test_get_connector_sync_status(data, expected):
ft_resource = fivetran_resource(
build_init_resource_context(
config={
"api_key": "some_key",
"api_secret": "some_secret",
}
)
)
with responses.RequestsMock() as rsps:
rsps.add(
rsps.GET,
f"{ft_resource.api_base_url}{DEFAULT_CONNECTOR_ID}",
json=get_sample_connector_response(data=data),
)
assert ft_resource.get_connector_sync_status(DEFAULT_CONNECTOR_ID) == expected
@pytest.mark.parametrize(
"n_polls, succeed_at_end",
[(0, True), (0, False), (4, True), (4, False), (30, True)],
)
def test_sync_and_poll(n_polls, succeed_at_end):
ft_resource = fivetran_resource(
build_init_resource_context(
config={
"api_key": "some_key",
"api_secret": "some_secret",
}
)
)
api_prefix = f"{ft_resource.api_base_url}{DEFAULT_CONNECTOR_ID}"
final_data = (
{"succeeded_at": "2021-01-01T02:00:00.0Z"}
if succeed_at_end
else {"failed_at": "2021-01-01T02:00:00.0Z"}
)
def _mock_interaction():
with responses.RequestsMock() as rsps:
rsps.add(
rsps.GET,
f"{ft_resource.api_base_url}{DEFAULT_CONNECTOR_ID}/schemas",
json=get_complex_sample_connector_schema_config(),
)
rsps.add(rsps.PATCH, api_prefix, json=get_sample_update_response())
rsps.add(rsps.POST, f"{api_prefix}/force", json=get_sample_sync_response())
# initial state
rsps.add(rsps.GET, api_prefix, json=get_sample_connector_response())
# n polls before updating
for _ in range(n_polls):
rsps.add(rsps.GET, api_prefix, json=get_sample_connector_response())
# final state will be updated
rsps.add(rsps.GET, api_prefix, json=get_sample_connector_response(data=final_data))
return ft_resource.sync_and_poll(DEFAULT_CONNECTOR_ID, poll_interval=0.1)
if succeed_at_end:
assert _mock_interaction() == FivetranOutput(
connector_details=get_sample_connector_response(data=final_data)["data"],
schema_config=get_complex_sample_connector_schema_config()["data"],
)
else:
with pytest.raises(Failure, match="failed!"):
_mock_interaction()
def test_sync_and_poll_timeout():
ft_resource = fivetran_resource(
build_init_resource_context(
config={
"api_key": "some_key",
"api_secret": "some_secret",
}
)
)
with pytest.raises(Failure, match="timed out"):
with responses.RequestsMock() as rsps:
rsps.add(
rsps.GET,
f"{ft_resource.api_base_url}{DEFAULT_CONNECTOR_ID}/schemas",
json=get_complex_sample_connector_schema_config(),
)
rsps.add(
rsps.GET,
f"{ft_resource.api_base_url}{DEFAULT_CONNECTOR_ID}",
json=get_sample_connector_response(),
)
rsps.add(
rsps.PATCH,
f"{ft_resource.api_base_url}{DEFAULT_CONNECTOR_ID}",
json=get_sample_update_response(),
)
rsps.add(
rsps.POST,
f"{ft_resource.api_base_url}{DEFAULT_CONNECTOR_ID}/force",
json=get_sample_sync_response(),
)
ft_resource.sync_and_poll(DEFAULT_CONNECTOR_ID, poll_interval=1, poll_timeout=2)
@pytest.mark.parametrize(
"data,match",
[
({"paused": True}, "paused"),
({"status": {"setup_state": "foo"}}, "setup"),
],
)
def test_sync_and_poll_invalid(data, match):
ft_resource = fivetran_resource(
build_init_resource_context(
config={
"api_key": "some_key",
"api_secret": "some_secret",
}
)
)
with pytest.raises(Failure, match=match):
with responses.RequestsMock() as rsps:
rsps.add(
rsps.GET,
f"{ft_resource.api_base_url}{DEFAULT_CONNECTOR_ID}/schemas",
json=get_complex_sample_connector_schema_config(),
)
rsps.add(
rsps.GET,
f"{ft_resource.api_base_url}{DEFAULT_CONNECTOR_ID}",
json=get_sample_connector_response(data=data),
)
rsps.add(
rsps.PATCH,
f"{ft_resource.api_base_url}{DEFAULT_CONNECTOR_ID}",
json=get_sample_update_response(),
)
rsps.add(
rsps.POST,
f"{ft_resource.api_base_url}{DEFAULT_CONNECTOR_ID}/force",
json=get_sample_sync_response(),
)
ft_resource.sync_and_poll(DEFAULT_CONNECTOR_ID, poll_interval=0.1)
|
py | 1a3e01d72a710931aab79f898a7b1e0216e28922 |
'''
Kivy standard library imports
'''
import kivy
from kivy.config import Config
#kivy.config.Config.set('graphics','resizable', False) #config needs to be set before kivy.app is imported
Config.set('graphics', 'fullscreen', 'auto')
from kivy.app import App
from time import time
from os.path import dirname, join
from kivy.lang import Builder
from kivy.properties import NumericProperty,StringProperty,BooleanProperty
from kivy.properties import ListProperty,ReferenceListProperty,ObjectProperty
from kivy.animation import Animation
from kivy.uix.screenmanager import Screen
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.uix.bubble import Bubble
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.uix.rst import RstDocument
from kivy.clock import Clock, mainthread
from kivy.uix.videoplayer import VideoPlayer
'''
Project imports
'''
from data.DatabaseThread import *
from data.StepperControl import *
from data.ServoControl import *
from data.libs.MyKnob import *
from data.libs.garden.mapview import *
|
py | 1a3e021cca6948c00871617f5f9eb6ed935ce1ab | from django.conf.urls import url
from django.urls import path, include
from public import views
urlpatterns = [] |
py | 1a3e02e7cf5bbffd98f08b92ba502ed7e25ef572 | __author__ = 'jlegind'
from urllib import parse, request
import requests
import json
import collections
import csv
class SearchAPI(object):
def __init__(self, url, read_path, write_path, suffix='', separator='\t'):
"""
:param url: JSON api url
:param read_path: File that contains the search params
:param write_path: Output file
:param suffix: If the url has a suffix like /verbatim after the params this can be tagged on
"""
self.wp = write_path
self.file = open(read_path, mode='r', encoding='utf-8-sig')
self.write_file = open(write_path, mode='w', encoding='utf-8')
self.url = url
self.suffix = suffix
self.appended = ''
self.separator = separator
def take_parameters(self, *args, **kwargs):
"""
:param args: The JSON values you want returned
:param kwargs: The API search term[key] in the API call,
and position[value] in the read_file (tab separated columns)
"""
line = self.file.readline()
while line:
new_url = self.url
to_paging_params = []
split_line = line.split(self.separator)
if kwargs:
print(kwargs)
for k, v in kwargs.items():
print('this is k in kwargs:', k)
kw = split_line[kwargs[k]].strip()
print('value ? ', v)
new_url += k+'='+parse.quote_plus(kw)+'&'
to_paging_params.append(kw)
else:
vl = split_line[0].strip()
new_url += vl
print(vl+" prrr---")
self.appended = vl
to_paging_params.append(vl)
self.pagination(new_url.strip('&')+self.suffix, to_paging_params, args)
line = self.file.readline()
def searching_gbif_api(self, url):
'''
Just get the GBIF api search result
'''
rson = requests.get(url)
rson = rson.json()
return rson
def filter_api_response(self, response, fields):
'''
response = json response from api
fields = A list of fields to parse for
'''
resp_dict = dict.fromkeys(fields)
for j in fields:
resp_dict[j] = response[j]
return resp_dict
def make_search_name(self, positions):
'''
assumes multiple columns and composes a name from these in order of positions
param: positions = a LIST of column positions in a csv/text file
'''
line = self.file.readline()
while line:
rowlist = line.split(self.separator)
# res = [name for name in rowlist]
name = [rowlist[e] for e in positions]
stripped_name = [j.rstrip() for j in name]
stripped_name = ' '.join(stripped_name)
print('stripped name: ', stripped_name)
line = self.file.readline()
search_url = self.url+stripped_name
yield search_url
def pagination(self, url, terms, keys, offset=None, appended=''):
"""
:param url: Takes the url with the search term and value added
:param terms: A list of search values
:param keys: A list of JSON keys that you want the value for
:param offset: Used to increment paging
"""
#print(url)
if not terms or offset == None:
print('Absolute no_param')
new_url = url
else:
new_url = url+'&offset='+str(offset)+'&limit=100'
print(new_url)
try:
response = request.urlopen(new_url)
r = response.read()
decoded_json = json.loads(r.decode('utf-8'))
print('debug1')
end_of_records = None
try:
results = decoded_json['results']
end_of_records = decoded_json['endOfRecords']
except KeyError:
print('keyError !!!!!!!')
results = decoded_json
#print(results)
if end_of_records is False:
print('False')
for j in results:
self.parse_json(j, keys, terms)
offset += 100
self.pagination(url, terms, keys, offset=offset)
else:
print('debug2')
try:
for j in results:
#print('debug3')
self.parse_json(j, keys, terms)
except:
#print('8888debuggg')
self.parse_json(results, keys, terms)
except Exception as err:
print(err, 'err')
print(type(err))
#Below is NOT TESTED outside a plain non-JSON result (like the count API call)
self.write_output(decoded_json)
def write_output(self, input_to_file):
#print('debug5')
if isinstance(input_to_file, collections.Iterable):
output = '\t'.join(str(e) for e in input_to_file)
else:
output = input_to_file
#output string is created from the input_to_file list. Integers are cast to str
#print(output)
self.write_file.write(str(output)+'\t'+self.appended+'\n')
def parse_json(self, json_element, keys, terms):
list_output = []
#print('debug4')
for k in keys:
try:
list_output.append(json_element[k])
except KeyError:
#print('keyerror---')
list_output.append('NULL')
#print('debug3', terms)
[list_output.append(i) for i in terms]
#print('debug4', list_output)
self.write_output(list_output)
def main():
#my_api = SearchAPI('http://api.gbif.org/v1/species/', 'G:/GIASIP/export/nubkeys.txt', 'G:/GIASIP/export/GISDnubtaxonomy_test.txt')
"""my_api.take_parameters("key", "nubKey", "taxonID", "kingdom", "phylum", "order", "family", "genus",
"species", "kingdomKey", "phylumKey", "classKey", "orderKey", "familyKey",
"genusKey", "speciesKey", "datasetKey", "parentKey", "parent", "acceptedKey",
"accepted", "scientificName", "canonicalName", "authorship", "nameType",
"rank", "origin", "taxonomicStatus", "nomenclaturalStatus", "accordingTo",
"numDescendants", "synonym", "class", "publishedIn", "references",
no_param=0)
"""
# my_api = SearchAPI('http://api.gbif.org/v1/species/', 'G:/GIASIP/export/GISDnubtaxonomy_unique.txt', 'G:/GIASIP/export/no_param_test.txt', suffix="/distributions?")
# my_api.take_parameters("locationId", "locality", "country", "status", "establishmentMeans", "sourceTaxonKey")
# my_api = SearchAPI('http://api.gbif.org/v1/species/', 'C:/Users/jlegind/Dropbox/GIASIP/taxon_keys.txt', 'C:/Users/jlegind/Dropbox/GIASIP/export/GISDvernacularnames2.txt', suffix="/vernacularNames?")
# my_api.take_parameters("vernacularName", "language", "sourceTaxonKey", "preferred")
#
# my_api = SearchAPI('http://api.gbif.org/v1/species/match?kingdom=Animalia&', 'G:/Custom exports/Imanol/names.txt', 'G:/Custom exports/Imanol/interpreted_names.txt', separator=';')
# my_api.take_parameters("usageKey",
# "scientificName", "rank",
# name=0)
my_api = SearchAPI('http://api.gbif.org/v1/species/match?kingdom=Animalia&name=', 'H:/into_api/atomized_fish_list.txt', 'H:/output_api/interpreted_names_fish.txt')
# #separator = tab
# my_api.take_parameters("usageKey",
# "scientificName", "kingdom", "phylum", "class", "order", "family", "genus", "rank", "status", "confidence",
# genus=0, name=1)
res = my_api.make_search_name([0,1,2])
with open('H:/output_api/interpreted_names_fish.txt', 'w+', newline='') as wfile:
field_list = ["usageKey", "acceptedUsageKey", "scientificName", "kingdom", "phylum", "class", "order", "family", "genus", "rank", "status", "confidence"]
writer = csv.DictWriter(wfile, fieldnames=field_list, delimiter='\t')
writer.writeheader()
for j in res:
print('name url == ', j)
try:
reply = my_api.searching_gbif_api(j)
res = my_api.filter_api_response(reply, field_list)
print('return dict === ', res)
writer.writerow(res)
except:
print('ERROR')
#
# my_api.pagination(j, ["usageKey",
# "scientificName", "kingdom", "phylum", "class", "order", "family", "genus", "rank", "status", "confidence"], )
# name_list.append(j)
# my_api = SearchAPI('http://api.gbif.org/v1/dataset/', 'G:/Custom exports/dataset_list.csv', 'G:/Custom exports/lic_datasets.txt')
# my_api.take_parameters("key", "title", "type")
# my_api.take_parameters("key", "title", identifier=0)
# my_api = SearchAPI('http://api.gbif.org/v1/occurrence/count?datasetKey=', 'G:/Deletion/deleted_datasets/datasetkeys.csv', 'G:/Custom exports/del_counts.txt')
# my_api.take_parameters(None)
# UGLY HACK line 49 offset=None , must be 0
if __name__ == '__main__':
main() |
py | 1a3e03c9dd2917b7d5b80535b21f940b51af1831 | from __future__ import unicode_literals
import click
import six
from ..aliases import aliases_database
from .base import cli
@cli.command(name='clean-aliases',
short_help="Remove aliases mapping to closed or inexistent "
"activities.")
@click.option('-y', '--yes', 'force_yes', is_flag=True,
help="Don't ask confirmation.")
@click.pass_context
def clean_aliases(ctx, force_yes):
"""
Removes aliases from your config file that point to inactive projects.
"""
inactive_aliases = []
for (alias, mapping) in six.iteritems(aliases_database):
# Ignore local aliases
if mapping.mapping is None:
continue
project = ctx.obj['projects_db'].get(mapping.mapping[0],
mapping.backend)
if (project is None or not project.is_active() or
(mapping.mapping[1] is not None
and project.get_activity(mapping.mapping[1]) is None)):
inactive_aliases.append(((alias, mapping), project))
if not inactive_aliases:
ctx.obj['view'].msg("No inactive aliases found.")
return
if not force_yes:
confirm = ctx.obj['view'].clean_inactive_aliases(inactive_aliases)
if force_yes or confirm:
ctx.obj['settings'].remove_aliases(
[item[0] for item in inactive_aliases]
)
ctx.obj['settings'].write_config()
ctx.obj['view'].msg("%d inactive aliases have been successfully"
" cleaned." % len(inactive_aliases))
|
py | 1a3e042a5a6e071890e01be803044e368adaba5b | import glob
import os
from typing import List
from torch import sigmoid
import matplotlib.pyplot as plt
import seaborn as sn
import torch
import wandb
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers import LoggerCollection, WandbLogger
from sklearn import metrics
from sklearn.metrics import f1_score, precision_score, recall_score
def get_wandb_logger(trainer: Trainer) -> WandbLogger:
"""Safely get Weights&Biases logger from Trainer."""
if isinstance(trainer.logger, WandbLogger):
return trainer.logger
if isinstance(trainer.logger, LoggerCollection):
for logger in trainer.logger:
if isinstance(logger, WandbLogger):
return logger
raise Exception(
"You are using wandb related callback, but WandbLogger was not found for some reason..."
)
class WatchModelWithWandb(Callback):
"""Make WandbLogger watch model at the beginning of the run."""
def __init__(self, log: str = "gradients", log_freq: int = 100):
self.log = log
self.log_freq = log_freq
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
logger.watch(model=trainer.model, log=self.log, log_freq=self.log_freq)
class UploadCodeToWandbAsArtifact(Callback):
"""Upload all *.py files to wandb as an artifact, at the beginning of the run."""
def __init__(self, code_dir: str):
self.code_dir = code_dir
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
code = wandb.Artifact("project-source", type="code")
for path in glob.glob(os.path.join(self.code_dir, "**/*.py"), recursive=True):
code.add_file(path)
experiment.use_artifact(code)
class UploadCheckpointsToWandbAsArtifact(Callback):
"""Upload checkpoints to wandb as an artifact, at the end of run."""
def __init__(self, ckpt_dir: str = "checkpoints/", upload_best_only: bool = False):
self.ckpt_dir = ckpt_dir
self.upload_best_only = upload_best_only
def on_train_end(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
ckpts = wandb.Artifact("experiment-ckpts", type="checkpoints")
if self.upload_best_only:
ckpts.add_file(trainer.checkpoint_callback.best_model_path)
else:
for path in glob.glob(os.path.join(self.ckpt_dir, "**/*.ckpt"), recursive=True):
ckpts.add_file(path)
experiment.use_artifact(ckpts)
class LogConfusionMatrixToWandb(Callback):
"""Generate confusion matrix every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module) -> None:
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate confusion matrix."""
if self.ready:
logger = get_wandb_logger(trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
confusion_matrix = metrics.confusion_matrix(y_true=targets, y_pred=preds)
# set figure size
plt.figure(figsize=(14, 8))
# set labels size
sn.set(font_scale=1.4)
# set font size
sn.heatmap(confusion_matrix, annot=True, annot_kws={"size": 8}, fmt="g")
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"confusion_matrix/{experiment.name}": wandb.Image(plt)}, commit=False)
# according to wandb docs this should also work but it crashes
# experiment.log(f{"confusion_matrix/{experiment.name}": plt})
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogF1PrecRecHeatmapToWandb(Callback):
"""Generate f1, precision, recall heatmap every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self, class_names: List[str] = None):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate f1, precision and recall heatmap."""
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
f1 = f1_score(preds, targets, average=None)
r = recall_score(preds, targets, average=None)
p = precision_score(preds, targets, average=None)
data = [f1, p, r]
# set figure size
plt.figure(figsize=(14, 3))
# set labels size
sn.set(font_scale=1.2)
# set font size
sn.heatmap(
data,
annot=True,
annot_kws={"size": 10},
fmt=".3f",
yticklabels=["F1", "Precision", "Recall"],
)
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"f1_p_r_heatmap/{experiment.name}": wandb.Image(plt)}, commit=False)
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class ImagePredictionLogger(Callback):
"""Logs a validation batch and their predictions to wandb.
Example adapted from:
https://wandb.ai/wandb/wandb-lightning/reports/Image-Classification-using-PyTorch-Lightning--VmlldzoyODk1NzY
"""
def __init__(self, num_samples: int = 8):
super().__init__()
self.num_samples = num_samples
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
# get a validation batch from the validation dat loader
val_samples = next(iter(trainer.datamodule.val_dataloader()))
val_imgs, val_labels = val_samples
# run the batch through the network
val_imgs = val_imgs.to(device=pl_module.device)
logits = pl_module(val_imgs)
# preds = torch.argmax(logits, axis=-1)
preds = sigmoid(logits).squeeze()
preds[preds>=0.5]=1
preds[preds<0.5]=0
# log the images as wandb Image
experiment.log(
{
f"Images/{experiment.name}": [
wandb.Image(x, caption=f"Pred:{pred}, Label:{y}")
for x, pred, y in zip(
val_imgs[: self.num_samples],
preds[: self.num_samples],
val_labels[: self.num_samples],
)
]
}
)
|
py | 1a3e077184cee17fc382d62a04bde8688a17ebb7 | # -*- coding: utf-8 -*-
import wtforms
from flask import render_template, request, Markup, abort, flash, redirect, escape, url_for, make_response
from .. import b__ as __
from .form import Form
from .fields import SubmitField
class ConfirmDeleteForm(Form):
"""
Confirm a delete operation
"""
# The labels on these widgets are not used. See delete.html.
delete = SubmitField(__(u"Delete"))
cancel = SubmitField(__(u"Cancel"))
def render_form(form, title, message='', formid='form', submit=__(u"Submit"), cancel_url=None, ajax=False):
multipart = False
for field in form:
if isinstance(field.widget, wtforms.widgets.FileInput):
multipart = True
if form.errors:
code = 200 # 400
else:
code = 200
if request.is_xhr and ajax:
return make_response(render_template('baseframe/ajaxform.html', form=form, title=title,
message=message, formid=formid, submit=submit,
cancel_url=cancel_url, multipart=multipart), code)
else:
return make_response(render_template('baseframe/autoform.html', form=form, title=title,
message=message, formid=formid, submit=submit,
cancel_url=cancel_url, ajax=ajax, multipart=multipart), code)
def render_message(title, message, code=200):
if request.is_xhr:
return make_response(Markup("<p>%s</p>" % escape(message)), code)
else:
return make_response(render_template('baseframe/message.html', title=title, message=message), code)
def render_redirect(url, code=302):
if request.is_xhr:
return make_response(render_template('baseframe/redirect.html', url=url))
else:
return redirect(url, code=code)
def render_delete_sqla(obj, db, title, message, success=u'', next=None, cancel_url=None):
if not obj:
abort(404)
form = ConfirmDeleteForm()
if request.method in ('POST', 'DELETE') and form.validate():
if 'delete' in request.form or request.method == 'DELETE':
db.session.delete(obj)
db.session.commit()
if success:
flash(success, 'success')
return render_redirect(next or url_for('index'), code=303)
else:
return render_redirect(cancel_url or next or url_for('index'), code=303)
return make_response(render_template('baseframe/delete.html', form=form, title=title, message=message))
|
py | 1a3e098720c4ef9b00839f13c72edff551e21a5c | import math
import pytest
from click.testing import CliRunner
from r2b2.athena import Athena
from r2b2.cli import cli
from r2b2.contest import Contest
from r2b2.contest import ContestType
from r2b2.minerva import Minerva
from r2b2.tests import util as util
default_contest = util.generate_contest(10000)
def test_simple_athena():
simple_athena = Athena(.1, 2**31 - 1, .1, default_contest)
assert simple_athena.alpha == .1
assert simple_athena.beta == 0.0
assert simple_athena.delta == 2**31 - 1
assert simple_athena.max_fraction_to_draw == .1
assert len(simple_athena.rounds) == 0
assert len(simple_athena.sub_audits['a-b'].min_winner_ballots) == 0
assert simple_athena.get_risk_level() is None
def test_athena_minerva_paper():
contest = Contest(100000, {'A': 75000, 'B': 25000}, 1, ['A'], ContestType.MAJORITY)
athena = Athena(.1, 1, .1, contest)
minerva = Minerva(.1, .1, contest)
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [50])
minerva.compute_min_winner_ballots(minerva.sub_audits['A-B'], [50])
# From Athena paper
assert athena.sub_audits['A-B'].min_winner_ballots == [32]
assert minerva.sub_audits['A-B'].min_winner_ballots == [31]
def test_athena_execute_round():
contest = Contest(100000, {'A': 75000, 'B': 25000}, 1, ['A'], ContestType.MAJORITY)
athena = Athena(.1, 1, .1, contest)
assert not athena.execute_round(50, {'A': 31, 'B': 19})
assert not athena.stopped
assert athena.sample_ballots['A'] == [31]
assert athena.sample_ballots['B'] == [19]
assert not athena.sub_audits['A-B'].stopped
assert athena.rounds == [50]
assert athena.execute_round(100, {'A': 70, 'B': 30})
assert athena.stopped
assert athena.sample_ballots['A'] == [31, 70]
assert athena.sample_ballots['B'] == [19, 30]
assert athena.sub_audits['A-B'].stopped
assert athena.rounds == [50, 100]
assert athena.get_risk_level() < 0.1
def test_interactive_athena():
runner = CliRunner()
user_in = 'athena\n0.1\n0.1\n100000\n2\nA\n75000\nB\n25000\n1\nA\nMAJORITY\ny\n1\ny\nn\n50\n31\n19\nn\nn\n100\n70\n30\n'
result = runner.invoke(cli, 'interactive', input=user_in)
output_file = open('src/r2b2/tests/data/cli_test_expected_out_interactive_athena.txt', 'r')
expected_out = output_file.read()
assert result.output == expected_out
output_file.close()
def test_bulk_athena():
# Same as Minerva (that is, delta = infinity)
# Ballot-by-ballot Minerva should yield identical stopping rules to BRAVO.
contest = Contest(100000, {'A': 60000, 'B': 40000}, 1, ['A'], ContestType.MAJORITY)
athena = Athena(.1, 2**31 - 1, .01, contest)
athena.compute_all_min_winner_ballots(athena.sub_audits['A-B'])
# p0 not hardcoded as .5 for scalability with odd total contest ballots.
p0 = (athena.contest.contest_ballots // 2) / athena.contest.contest_ballots
log_winner_multiplier = math.log(athena.sub_audits['A-B'].sub_contest.winner_prop / p0)
log_loser_multiplier = math.log((1 - athena.sub_audits['A-B'].sub_contest.winner_prop) / p0)
log_rhs = math.log(1 / athena.alpha)
for i in range(len(athena.rounds)):
n = athena.rounds[i]
kmin = athena.sub_audits['A-B'].min_winner_ballots[i]
# Assert this kmin satisfies ratio, but a kmin one less does not.
assert kmin * log_winner_multiplier + (n - kmin) * log_loser_multiplier > log_rhs
assert (kmin - 1) * log_winner_multiplier + (n - kmin + 1) * log_loser_multiplier <= log_rhs
def test_athena_next_sample_size():
# TODO: Create tests for ahtena next sample size
simple_athena = Athena(0.1, 1, 0.1, default_contest)
simple_athena.next_sample_size()
pass
def test_exceptions():
contest = Contest(100000, {'A': 60000, 'B': 40000}, 1, ['A'], ContestType.MAJORITY)
with pytest.raises(ValueError):
Athena(.1, 0, .1, contest)
athena = Athena(.1, 1, .1, contest)
with pytest.raises(Exception):
athena.stopping_condition_pairwise('A-B')
athena.rounds.append(10)
with pytest.raises(ValueError):
athena.stopping_condition_pairwise('X')
athena.rounds = []
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [0])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [1, 2])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [20, 20])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [20, 19])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [10001])
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [20])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [20])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [19])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [10001])
contest2 = Contest(100, {'A': 60, 'B': 30}, 1, ['A'], ContestType.MAJORITY)
athena2 = Athena(0.1, 1, 1.0, contest2)
with pytest.raises(ValueError):
athena2.compute_min_winner_ballots(athena2.sub_audits['A-B'], [91])
athena2.rounds.append(10)
with pytest.raises(Exception):
athena2.compute_all_min_winner_ballots(athena2.sub_audits['A-B'])
athena2.rounds = []
with pytest.raises(ValueError):
athena2.compute_all_min_winner_ballots(athena2.sub_audits['A-B'], 0)
with pytest.raises(ValueError):
athena2.compute_all_min_winner_ballots(athena2.sub_audits['A-B'], 200)
with pytest.raises(ValueError):
athena2.compute_all_min_winner_ballots(athena2.sub_audits['A-B'], 0)
|
py | 1a3e09c4dc43af8e90455b4da68e59f34337d57f | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
from utils.image_precess import *
import numpy as np
import cv2
import json
def get_image(img_dir, img_name):
# cv2 default (H, W)
return preprocess_input(img_2_array(cv2.resize(cv2.imread(img_dir + img_name), (80, 240))))
def get_feature(img_name):
img_dir = '/home/ubuntu/media/File/1Various/Person_reid_dataset/DukeMTMC-reID/bounding_box_train/'
with tf.name_scope('input'):
image_input = tf.placeholder(tf.float32, [None, 240, 80, 3], name='img_input', )
with slim.arg_scope(nets.resnet_v1.resnet_arg_scope()):
feature, _ = nets.resnet_v1.resnet_v1_50(image_input, is_training=False, global_pool=False, reuse=tf.AUTO_REUSE)
sess = tf.Session()
saver = tf.train.Saver()
saver.restore(sess, './model_1_3_49950.ckpt')
img_array = get_image(img_dir, img_name)
print(img_name)
_feature = sess.run(feature, feed_dict={image_input: np.reshape(img_array, [1, 240, 80, 3])})
sess.close()
return _feature
if __name__ == '__main__':
feature = get_feature('0001_c2_f0046182.jpg')
# (1, 8, 3, 2048)
print(feature.shape) |
py | 1a3e0b2dd89d713c1c5f9cf014d9c1a63ff0a760 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""DQN net"""
import mindspore.nn as nn
import mindspore.ops as ops
class DQN(nn. Cell):
def __init__(self, input_size, hidden_size, output_size):
super(DQN, self).__init__()
self.linear1 = nn.Dense(input_size, hidden_size)
self.linear2 = nn.Dense(hidden_size, output_size)
self.relu = nn.ReLU()
def construct(self, x):
x = self.relu(self.linear1(x))
return self.linear2(x)
class WithLossCell(nn.Cell):
"""
network with loss function
"""
def __init__(self, backbone, loss_fn):
super(WithLossCell, self).__init__(auto_prefix=False)
self._backbone = backbone
self._loss_fn = loss_fn
self.gather = ops.GatherD()
def construct(self, x, act, label):
out = self._backbone(x)
out = self.gather(out, 1, act)
loss = self._loss_fn(out, label)
return loss
|
py | 1a3e0ba952a9181d8c2c91e9be7ac6761f40e9a5 | import os
import sys
import importlib
# Setting the correct config file
config_path = ".".join(["models", sys.argv[1]]) + "." if len(sys.argv) >= 2 else ""
config = importlib.import_module(config_path + "config")
output_FF_layers = [100, 2] #[4000, 1000, 100, 1] #[200, 200, 100, 100, 1]
cur_work_dir = os.getcwd()
d_model = 8
attention_heads = 4
attention_dropout = 0.0
n_layers = 1
add_seq_cnn = True
add_parallel_cnn = False
k_dim = 10
cnn_dropout = 0.2
attention_norm = False
attention_layer_norm = False
n_feature_dim = config.embedding_vec_dim
analysis = None #'deepCrispr'
oversample = False
#(should always be 1)
data_folder = os.path.join(cur_work_dir, 'datas_OTcpf1')
if not os.path.exists(data_folder):
print("Create {0} directory".format(data_folder))
os.mkdir(data_folder)
|
py | 1a3e0bb7efd90e9847f7990b89e890dd00c06412 | import os
import pandas as pd
import helpers.hgit as hgit
import helpers.hio as hio
import helpers.hsystem as hsysinte
import helpers.hunit_test as hunitest
class TestCsvToPq(hunitest.TestCase):
def test_csv_to_pq_script(self) -> None:
"""
Test that generated parquet dataset is correct.
"""
# Generate the files.
self._generate_example_csv_files()
pq_dir_path = os.path.join(self.get_scratch_space(), "pq_dir")
# Run command.
exec_path = os.path.join(
hgit.get_amp_abs_path(), "im_v2/common/data/transform/convert_csv_to_pq.py"
)
cmd = [
exec_path,
f"--src_dir {self.csv_dir_path}",
f"--dst_dir {pq_dir_path}",
"--datetime_col timestamp",
"--asset_col currency_pair",
]
cmd = " ".join(cmd)
hsysinte.system(cmd)
include_file_content = True
dir_signature = hunitest.get_dir_signature(
pq_dir_path, include_file_content
)
self.check_string(dir_signature, purify_text=True)
def _generate_example_csv_files(self) -> None:
"""
Create CSV files in scratch directory.
"""
test_dir = self.get_scratch_space()
self.csv_dir_path = os.path.join(test_dir, "csv_dir")
hio.create_dir(self.csv_dir_path, False)
d1 = {
"timestamp": [1638646800000, 1638646860000, 1638646960000],
"open": [49317.68, 49330.63, 49320.31],
"high": [49346.95, 49400.98, 49500.75],
"volume": [23.13681, 61.99752, 79.92761],
"low": [49315.45, 49322.78, 49325.23],
"close": [49330.63, 49325.23, 49328.23],
"currency_pair": ["BTC_USDT", "ETH_USDT", "BTC_USDT"],
"created_at": [
"2021-12-07 13:01:20.183463+00:00",
"2021-12-07 13:01:20.183463+00:00",
"2021-12-07 13:01:20.183463+00:00",
],
"exchange_id": ["binance", "binance", "binance"],
}
d2 = {
"timestamp": [1638656800000, 1638676860000, 1638656960000],
"open": [49318.68, 49331.63, 49321.31],
"high": [49446.95, 49500.98, 49600.75],
"volume": [24.13681, 62.99752, 80.92761],
"low": [49325.45, 49323.78, 49326.23],
"close": [49340.63, 49335.23, 49428.23],
"currency_pair": ["BTC_USDT", "ETH_USDT", "BTC_USDT"],
"created_at": [
"2021-12-07 13:01:20.183463+00:00",
"2021-12-07 13:01:20.183463+00:00",
"2021-12-07 13:01:20.183463+00:00",
],
"exchange_id": ["binance", "binance", "binance"],
}
df1 = pd.DataFrame(data=d1)
df1.to_csv(os.path.join(self.csv_dir_path, "test1.csv"), index=False)
df2 = pd.DataFrame(data=d2)
df2.to_csv(os.path.join(self.csv_dir_path, "test2.csv"), index=False)
|
py | 1a3e0f2127483dd2792f54d5a7cfd0f4ca4b2f95 | from typing import List
from ravendb.documents.session.tokens.query_tokens.query_token import QueryToken
import ravendb.documents.session.tokens.query_tokens.definitions as tokens
class DocumentQueryHelper:
@staticmethod
def add_space_if_needed(previous_token: QueryToken, current_token: QueryToken, writer: List[str]) -> None:
if previous_token is None:
return
if isinstance(previous_token, tokens.OpenSubclauseToken) or isinstance(
current_token, (tokens.CloseSubclauseToken, tokens.IntersectMarkerToken)
):
return
writer.append(" ")
|
py | 1a3e0f36c12af2a57c946303b5735aacd785e214 | from .cdm_argument_value import CdmArgumentValue
from .cdm_corpus_context import CdmCorpusContext
from .cdm_argument_def import CdmArgumentDefinition
from .cdm_attribute_context import CdmAttributeContext
from .cdm_attribute_context_ref import CdmAttributeContextReference
from .cdm_attribute_def import CdmAttribute
from .cdm_attribute_group_def import CdmAttributeGroupDefinition
from .cdm_attribute_group_ref import CdmAttributeGroupReference
from .cdm_attribute_item import CdmAttributeItem
from .cdm_attribute_ref import CdmAttributeReference
from .cdm_attribute_resolution_guidance_def import CdmAttributeResolutionGuidanceDefinition, CdmAttributeResolutionGuidance_EntityByReference, \
CdmAttributeResolutionGuidance_Expansion, CdmAttributeResolutionGuidance_SelectsSubAttribute
from .cdm_collection import CdmCollection
from .cdm_constant_entity_def import CdmConstantEntityDefinition
from .cdm_container_def import CdmContainerDefinition
from .cdm_corpus_def import CdmCorpusDefinition
from .cdm_data_partition_def import CdmDataPartitionDefinition
from .cdm_data_partition_pattern_def import CdmDataPartitionPatternDefinition
from .cdm_data_type_def import CdmDataTypeDefinition
from .cdm_data_type_ref import CdmDataTypeReference
from .cdm_document_def import CdmDocumentDefinition
from .cdm_e2e_relationship import CdmE2ERelationship
from .cdm_entity_attribute_def import CdmEntityAttributeDefinition
from .cdm_entity_declaration_def import CdmEntityDeclarationDefinition
from .cdm_entity_def import CdmEntityDefinition
from .cdm_entity_ref import CdmEntityReference
from .cdm_file_status import CdmFileStatus
from .cdm_folder_def import CdmFolderDefinition
from .cdm_import import CdmImport
from .cdm_local_entity_declaration_def import CdmLocalEntityDeclarationDefinition
from .cdm_manifest_def import CdmManifestDefinition
from .cdm_manifest_declaration_def import CdmManifestDeclarationDefinition
from .cdm_object import CdmObject
from .cdm_object_def import CdmObjectDefinition
from .cdm_object_ref import CdmObjectReference
from .cdm_parameter_def import CdmParameterDefinition
from .cdm_purpose_def import CdmPurposeDefinition
from .cdm_purpose_ref import CdmPurposeReference
from .cdm_referenced_entity_declaration_def import CdmReferencedEntityDeclarationDefinition
from .cdm_references_entities import CdmReferencesEntities
from .cdm_trait_collection import CdmTraitCollection
from .cdm_trait_def import CdmTraitDefinition
from .cdm_trait_ref import CdmTraitReference
from .cdm_type_attribute_def import CdmTypeAttributeDefinition
__all__ = [
'CdmArgumentValue',
'CdmArgumentDefinition',
'CdmAttributeContext',
'CdmAttributeContextReference',
'CdmAttribute',
'CdmAttributeGroupDefinition',
'CdmAttributeGroupReference',
'CdmAttributeItem',
'CdmAttributeReference',
'CdmAttributeResolutionGuidanceDefinition',
'CdmAttributeResolutionGuidance_EntityByReference',
'CdmAttributeResolutionGuidance_Expansion',
'CdmAttributeResolutionGuidance_SelectsSubAttribute',
'CdmCollection',
'CdmConstantEntityDefinition',
'CdmContainerDefinition',
'CdmCorpusDefinition',
'CdmDataPartitionDefinition',
'CdmDataPartitionPatternDefinition',
'CdmDataTypeDefinition',
'CdmDataTypeReference',
'CdmDocumentDefinition',
'CdmE2ERelationship',
'CdmEntityAttributeDefinition',
'CdmEntityDeclarationDefinition',
'CdmEntityDefinition',
'CdmEntityReference',
'CdmFileStatus',
'CdmFolderDefinition',
'CdmImport',
'CdmLocalEntityDeclarationDefinition',
'CdmManifestDefinition',
'CdmManifestDeclarationDefinition',
'CdmObject',
'CdmObjectDefinition',
'CdmObjectReference',
'CdmParameterDefinition',
'CdmPurposeDefinition',
'CdmPurposeReference',
'CdmReferencedEntityDeclarationDefinition',
'CdmReferencesEntities',
'CdmTraitCollection',
'CdmTraitDefinition',
'CdmTraitReference',
'CdmTypeAttributeDefinition'
]
|
py | 1a3e1049a9e19f23b77ad2606ff31679c37ca059 | """
WSGI config for fnf_34280 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fnf_34280.settings')
application = get_wsgi_application()
|
py | 1a3e110756e853bd187b939f7a54a8f359f96ee4 | import sys
import os.path
from PyQt5 import QtWidgets, uic
from PyQt5.QtCore import QThread, pyqtSlot
from mychat_client.client import User
from mychat_client.handlers import GuiReceiver
try:
addr = sys.argv[1]
except IndexError:
addr = 'localhost'
try:
port = int(sys.argv[2])
except IndexError:
port = 7777
except ValueError:
print('Порт должен быть целым числом')
sys.exit(0)
try:
name = sys.argv[3]
print(name)
except IndexError:
login = input('Login: ')
name = login
paths = sys.path
b = ''
for i in paths:
if i.endswith('site-packages'):
b = i
form_path = b + '\mychat_client\sv_main.ui'
app = QtWidgets.QApplication(sys.argv)
window = uic.loadUi(form_path)
client = User(name, addr, port)
client.connect()
listener = GuiReceiver(client.sock, client.request_queue)
@pyqtSlot(str)
def update_chat(data):
try:
msg = data
window.listWidgetMessages.addItem(msg)
except Exception as e:
print(e)
listener.gotData.connect(update_chat)
th = QThread()
listener.moveToThread(th)
th.started.connect(listener.poll)
th.start()
contact_list = client.get_contacts()
def load_contacts(contacts):
window.listWidgetContacts.clear()
for contact in contacts:
window.listWidgetContacts.addItem(contact)
load_contacts(contact_list)
def add_contact():
try:
username = window.textEditUsername.toPlainText()
if username:
client.add_contact(username)
window.listWidgetContacts.addItem(username)
except Exception as e:
print(e)
window.pushButtonAddContact.clicked.connect(add_contact)
def del_contact():
try:
current_item = window.listWidgetContacts.currentItem()
username = current_item.text()
client.del_contact(username)
current_item = window.listWidgetContactstake.Item(window.listWidgetContacts.row(current_item))
del current_item
except Exception as e:
print(e)
def send_message():
text = window.textEditMessage.toPlainText()
if text:
selected_index = window.listWidgetContacts.currentIndex()
user_name = selected_index.data()
client.send_message(user_name, text)
msg = '{:>30}: {}'.format(name, text)
window.listWidgetMessages.addItem(msg)
window.textEditMessage.clear()
window.pushButtonDelContact.clicked.connect(del_contact)
window.pushButtonSend.clicked.connect(send_message)
# window.show()
# sys.exit(app.exec_())
def main():
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
py | 1a3e114af70473d19d01035c4a37d635e246e6d3 | import torch.nn as nn
import torch.nn.functional as F
from layers import GraphSN
import torch
class GNN(torch.nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, n_layers, batchnorm_dim, dropout_1, dropout_2):
super().__init__()
self.dropout = dropout_1
self.convs = nn.ModuleList()
self.convs.append(GraphSN(input_dim, hidden_dim, batchnorm_dim, dropout_2))
for _ in range(n_layers-1):
self.convs.append(GraphSN(hidden_dim, hidden_dim, batchnorm_dim, dropout_2))
# In order to perform graph classification, each hidden state
# [batch x nodes x hidden_dim] is concatenated, resulting in
# [batch x nodes x input_dim+hidden_dim*(n_layers)], then aggregated
# along nodes dimension, without keeping that dimension:
# [batch x input_dim+hidden_dim*(n_layers)].
#self.out_proj = nn.Linear(input_dim+hidden_dim*(n_layers), output_dim)
self.out_proj = nn.Linear((input_dim+hidden_dim*(n_layers)), output_dim)
def forward(self, data):
X, A = data[:2]
hidden_states = [X]
for layer in self.convs:
X = F.dropout(layer(A, X), self.dropout)
hidden_states.append(X)
X = torch.cat(hidden_states, dim=2).sum(dim=1)
X = self.out_proj(X)
return X |
py | 1a3e114f33ca10911c5e80abc84e78ef809eb76a | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 21 11:52:27 2021
@author: user24
"""
class Person():
def __init__(self, name):
# __ means that the att is protected, can only be accessed and changed
# by the class itself
self.__name = name
# This method can access and edit the protected __name
def who(self):
print(self.__name + "です。")
# インスタンス生成 「インスタンスを作る」 |
py | 1a3e11db590f05e7735ef04b3f35204914d267f0 | # create heatmaps of players in a frame
# this generates a 2 x num_fames x num_features numpy matrix
# channel 1: the number of bounding boxes in in each X interval - (if the screen is split up into num_features intervals x-wise) - this will indicate grouping of players
# channel 2: the average size of these boxes in each X interval (this will indicate if it is a headshot and possibly also perspective)
# the input is a pickle file for each video clip with bounding boxes of each player
from config import *
from pipe import *
import mPyPl as mp
from mpyplx import *
import numpy as np
import pickle
image_width = 1280
num_features = 100
px_per_feature = image_width/num_features
def get_bbox_width(bbox):
"""Return the width of the bounding box
:param bbox: the player bounding box [top_left x, top_left y, bottom_left x, bottom_left y]
:return: the width of the bouding box
#>>> get_bbox_width([23,12,35,20])
#12
"""
return (bbox[2] - bbox[0])
def get_bbox_center(bbox):
"""Return the center of the bounding box
:param bbox: the player bounding box [top_left x, top_left y, bottom_left x, bottom_left y]
:return: the center x, y of the bounding box
#>>> get_bbox_center([23,12,35,20])
#(29.0, 16.0)
"""
return ((bbox[2]-bbox[0])/2+bbox[0], (bbox[3]-bbox[1])/2+bbox[1])
def generate_heatmap_for_frame(bboxes):
"""
:param bboxes: the player bounding boxes for the frame
:return: a histogram of number of players in each x-section of the image,
and the average width of the boxes in that section
"""
heatmap = np.zeros(num_features)
total_widths = np.zeros(num_features)
for bbox in bboxes:
bbox_width = get_bbox_width(bbox)
bbox_center = get_bbox_center(bbox)
f_index = int(bbox_center[0]/px_per_feature)
heatmap[f_index] += 1
total_widths[f_index] += bbox_width
avg_width = np.divide(total_widths, heatmap, out=np.zeros_like(total_widths), where=heatmap!=0)
return (heatmap, avg_width)
def calcheatmap(x, nfn):
pickle_name = x['filename']
print("Creating player heatmaps for {}".format(pickle_name))
with open(pickle_name, 'rb') as f:
frames = pickle.load(f)
num_frames = len(frames)
heatmaps = np.zeros((num_frames, num_features))
avg_widths = np.zeros((num_frames, num_features))
for i, frame in enumerate(frames):
(heatmap, avg_width) = generate_heatmap_for_frame(frame)
heatmaps[i] = heatmap
avg_widths[i] = avg_width
f = np.concatenate((heatmaps, avg_widths)).reshape(2, num_frames, num_features)
np.save(nfn, f)
def arrcalcheatmap(frames):
num_frames = len(frames)
heatmaps = np.zeros((num_frames, num_features))
avg_widths = np.zeros((num_frames, num_features))
for i, frame in enumerate(frames):
(heatmap, avg_width) = generate_heatmap_for_frame(frame)
heatmaps[i] = heatmap
avg_widths[i] = avg_width
f = np.concatenate((heatmaps, avg_widths)).reshape(2, num_frames, num_features)
return f
if __name__ == "__main__":
(mp.get_datastream(data_dir, ext=".boxes.pickle")
| cachecomputex(".boxes.pickle", ".heatmaps.npy", calcheatmap, lambda x, nx: print("Skipping {}".format(x)))
| execute
)
|
py | 1a3e12894dc8d26f7ead2c238f2ab797f6ce3a39 | import configparser
import logging
import os
import warnings
_logger = logging.getLogger(__name__)
FILENAME = "jolly_brancher.ini"
# CONFIG VARS
KEYS_AND_PROMPTS = [
["auth_email", "your login email for Atlassian"],
["base_url", "the base URL for Atlassian (e.g., https://cirrusv2x.atlassian.net)"],
[
"token",
"your Atlassian API token which can be generated here (https://id.atlassian.com/manage-profile/security/api-tokens)",
],
]
CONFIG_DIR = os.path.expanduser("~/.config")
CONFIG_FILENAME = os.path.join(CONFIG_DIR, FILENAME)
JIRA_SECTION_NAME = "jira"
GIT_SECTION_NAME = "git"
def config_setup():
config = configparser.ConfigParser()
if not os.path.exists(CONFIG_DIR):
os.mkdir(CONFIG_DIR)
if os.path.exists(CONFIG_FILENAME):
config.read(CONFIG_FILENAME)
for key, input_prompt in KEYS_AND_PROMPTS:
if (
key not in config[JIRA_SECTION_NAME]
or config[JIRA_SECTION_NAME][key] == ""
): # check all entries are present and populated
config[JIRA_SECTION_NAME][key] = input(f"Please enter {input_prompt}: ")
else:
warnings.warn(f"~/.config/{FILENAME} does not exist. Creating the file now...")
config[JIRA_SECTION_NAME] = {
key: input(f"Please enter {input_prompt}: ")
for key, input_prompt in KEYS_AND_PROMPTS
} # ask for input and set all entries
with open(CONFIG_FILENAME, "w") as configfile:
config.write(configfile)
def fetch_config():
config_setup()
config = configparser.ConfigParser()
config.read(CONFIG_FILENAME)
default_config = config[JIRA_SECTION_NAME]
git_config = config[GIT_SECTION_NAME]
DEFAULT_BRANCH_FORMAT = "{issue_type}/{ticket}-{summary}"
return (
git_config["repo_root"],
default_config["token"],
default_config["base_url"],
default_config["auth_email"],
default_config.get("branch_format", DEFAULT_BRANCH_FORMAT),
git_config["pat"],
git_config["forge_root"],
)
|
py | 1a3e131b0b03f7b3b49ceaf33ba00a40373fa34d | #!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['ZARHEXCASH-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
|
py | 1a3e138a5f430cb38676c0168391298ca882655c | if __name__ == '__main__':
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
plt.style.use('ja')
data_dir = '../IceCubeData/'
mp = 1.0
nu_mass = 0.15
filename = 'mp' + str(mp) + 'mnu' + str(nu_mass) + '.csv'
data = pd.read_csv(data_dir + 'complex/' + filename, index_col=0)
print('--- Opened ' + data_dir + 'complex/' + filename + ' ---')
gmin_arr1 = data['gmin_arr']
mn_arr1 = data['mn_arr']
mp = 0.5
nu_mass = 0.1
filename = 'mp' + str(mp) + 'mnu' + str(nu_mass) + '.csv'
data = pd.read_csv(data_dir + 'complex/' + filename, index_col=0)
print('--- Opened ' + data_dir + 'complex/' + filename + ' ---')
gmin_arr2 = data['gmin_arr']
mn_arr2 = data['mn_arr']
mp = 0.5
nu_mass = 0.05
filename = 'mp' + str(mp) + 'mnu' + str(nu_mass) + '.csv'
data = pd.read_csv(data_dir + 'complex/' + filename, index_col=0)
print('--- Opened ' + data_dir + 'complex/' + filename + ' ---')
gmin_arr3 = data['gmin_arr']
mn_arr3 = data['mn_arr']
mn_max = 12.0 # MeV
g_min = -5.0
g_max = -1.0
axis_min = -4.0
axis_max = -1.0
plt.figure()
plt.rcParams.update({'font.size': 22})
plt.plot([mp, mp], [np.power(10.0, -4), np.power(10.0, -2)], c='k', linewidth=1.0)
plt.plot([mp, 10.0], [np.power(10.0, -2), np.power(10.0, -2)], c='k', linewidth=1.0)
plt.plot([10.0, 10.0], [np.power(10.0, -2), np.power(10.0, -4)], c='k', linewidth=1.0)
plt.semilogy(mn_arr1, gmin_arr1, linewidth=1.0, linestyle='-', marker='', markersize=0.0, markerfacecolor='r', alpha = 1.0, markeredgewidth=0.0)
upper_limit1 = np.empty(len(gmin_arr1))
upper_limit1.fill(np.power(10.0, g_max))
plt.fill_between(mn_arr1, gmin_arr1, upper_limit1, alpha=0.2, edgecolor='r', facecolor='k', linewidth=2.0)
style = dict(size=15, color='r')
plt.text(6.0, np.power(10.0, -2.6), r'$m_\nu = 0.15 \, \mathrm{eV}$', **style)
plt.semilogy(mn_arr2, gmin_arr2, linewidth=1.0, linestyle='-', marker='', markersize=0.0, markerfacecolor='b', alpha = 1.0, markeredgewidth=0.0)
upper_limit2 = np.empty(len(gmin_arr2))
upper_limit2.fill(np.power(10.0, g_max))
plt.fill_between(mn_arr2, gmin_arr2, upper_limit2, alpha=0.2, edgecolor='b', facecolor='k', linewidth=0.0)
style = dict(size=15, color='b')
plt.text(6.0, np.power(10.0, -2.9), r'$m_\nu = 0.10 \, \mathrm{eV}$', **style)
plt.semilogy(mn_arr3, gmin_arr3, linewidth=1.0, linestyle='-', marker='', markersize=0.0, markerfacecolor='g', alpha = 1.0, markeredgewidth=0.0)
upper_limit3 = np.empty(len(gmin_arr3))
upper_limit3.fill(np.power(10.0, g_max))
plt.fill_between(mn_arr3, gmin_arr3, upper_limit3, alpha=0.2, edgecolor='g', facecolor='k', linewidth=0.0)
style = dict(size=15, color='g')
plt.text(6.0, np.power(10.0, -3.2), r'$m_\nu = 0.05 \, \mathrm{eV}$', **style)
plt.fill_between([10.0, mn_max], [np.power(10.0, g_min), np.power(10.0, g_min)], [np.power(10.0, g_max), np.power(10.0, g_max)], alpha=0.1, edgecolor='k', facecolor='k', linewidth=0.0)
plt.fill_between([0.0, mp], [np.power(10.0, g_min), np.power(10.0, g_min)], [np.power(10.0, g_max), np.power(10.0, g_max)], alpha=0.1, edgecolor='k', facecolor='k', linewidth=0.0)
# plt.fill_betweenx([np.power(10.0, -5), 3*np.power(10.0,-4)], [mp, mp], [10.0, 10.0], alpha=0.1, edgecolor='k', facecolor='k', linewidth=0.0)
plt.fill_betweenx([np.power(10.0, -2), np.power(10.0, g_max)], [mp, mp], [10.0, 10.0], alpha=0.1, edgecolor='k', facecolor='k', linewidth=0.0)
style = dict(size=15, color='k')
plt.text(6.0, np.power(10.0, -3.5), r'$K^+$ decay constraint', **style)
axes = plt.axis()
plt.axis([0.0, mn_max, np.power(10.0, axis_min), np.power(10.0, axis_max)])
plt.xlabel(r'$m_N / \mathrm{MeV}$')
plt.ylabel(r'$g_\mu$')
#plt.savefig('/Users/james/allMyStuff/Neutrinos/Constraints/plots/constraints[{},{}].pdf'.format(mp, nu_mass))
plt.savefig('/Users/james/allMyStuff/Neutrinos/Constraints/plots/constraints[mp{}].pdf'.format(mp, nu_mass))
print('--- Saved constraints[mp{}].pdf ---'.format(mp, nu_mass))
|
py | 1a3e151d5502992e88684cfd020c7d17455c517c | """
Spin up an instance, run a single command, spin it down :-)
Usage:
run.py [options] -- <COMMAND> ...
run.py [options] <COMMAND> ...
Options:
--type TYPE type, eg ng0 for bfboost, or ngd3 for dual Titan X [default: ng0]
--image IMAGE image [default: s1]
"""
from __future__ import print_function
import sys
import yaml
import json
import requests
import time
from docopt import docopt
from util.logtailer import LogTailer
api_url = 'https://api.jarvice.com/jarvice'
args = docopt(__doc__)
instancetype = args['--type']
image = args['--image']
command = args['<COMMAND>']
print('command', command)
with open('nimbix.yaml', 'r') as f:
config = yaml.load(f)
username = config['username']
apikey = config['apikey']
launch_data = {
"machine": {
"nodes": "1",
"type": instancetype
},
"variables": {
"FOO": "BAR"
},
"vault": {
"readonly": False,
"force": False,
"name": "drop.jarvice.com"
},
"user": {
"username": username,
"apikey": apikey
},
"nae": {
"force": False,
"name": image,
# "geometry": "1904x881",
"command": " ".join(command),
"ephemeral": False,
"staging": True,
"interactive": False
}
}
res = requests.post('%s/submit' % api_url, json=launch_data)
assert res.status_code == 200
res = json.loads(res.content.decode('utf-8'))
jobnumber = res['number']
print('jobnumber %s' % jobnumber)
logtailer = LogTailer(username=username, apikey=apikey, jobnumber=jobnumber)
while True:
res = requests.get('%s/status?username=%s&apikey=%s&number=%s' % (api_url, username, apikey, jobnumber))
assert res.status_code == 200
res = json.loads(res.content.decode('utf-8'))
status = res[str(jobnumber)]['job_status']
logtailer.updateFromTail()
if 'COMPLETED' in status:
break
time.sleep(1)
logtailer.updateFromOutput()
res = requests.get('%s/status?username=%s&apikey=%s&number=%s' % (api_url, username, apikey, jobnumber))
assert res.status_code == 200
res = json.loads(res.content.decode('utf-8'))
print('wall time %s' % res[str(jobnumber)]['job_walltime'])
|
py | 1a3e152b6161b26658f9806629887d77b6911f72 | from __future__ import print_function, division
from sympy.core import S, C
from sympy.core.compatibility import u
from sympy.core.exprtools import factor_terms
from sympy.core.function import (Function, Derivative, ArgumentIndexError,
AppliedUndef)
from sympy.core.logic import fuzzy_not
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.core import Add, Mul
from sympy.core.relational import Eq
from sympy.functions.elementary.trigonometric import atan, atan2
###############################################################################
######################### REAL and IMAGINARY PARTS ############################
###############################################################################
class re(Function):
"""Returns real part of expression. This function performs only
elementary analysis and so it will fail to decompose properly
more complicated expressions. If completely simplified result
is needed then use Basic.as_real_imag() or perform complex
expansion on instance of this function.
>>> from sympy import re, im, I, E
>>> from sympy.abc import x, y
>>> re(2*E)
2*E
>>> re(2*I + 17)
17
>>> re(2*I)
0
>>> re(im(x) + x*I + 2)
2
See Also
========
im
"""
is_real = True
unbranched = True # implicitely works on the projection to C
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg.is_real:
return arg
elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_real:
return S.Zero
elif arg.is_Function and arg.func is conjugate:
return re(arg.args[0])
else:
included, reverted, excluded = [], [], []
args = Add.make_args(arg)
for term in args:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_real:
reverted.append(coeff)
elif not term.has(S.ImaginaryUnit) and term.is_real:
excluded.append(term)
else:
# Try to do some advanced expansion. If
# impossible, don't try to do re(arg) again
# (because this is what we are trying to do now).
real_imag = term.as_real_imag(ignore=arg)
if real_imag:
excluded.append(real_imag[0])
else:
included.append(term)
if len(args) != len(included):
a, b, c = map(lambda xs: Add(*xs),
[included, reverted, excluded])
return cls(a) - im(b) + c
def as_real_imag(self, deep=True, **hints):
"""
Returns the real number with a zero complex part.
"""
return (self, S.Zero)
def _eval_derivative(self, x):
if x.is_real or self.args[0].is_real:
return re(Derivative(self.args[0], x, evaluate=True))
if x.is_imaginary or self.args[0].is_imaginary:
return -S.ImaginaryUnit \
* im(Derivative(self.args[0], x, evaluate=True))
def _eval_rewrite_as_im(self, arg):
return self.args[0] - im(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
def _sage_(self):
import sage.all as sage
return sage.real_part(self.args[0]._sage_())
class im(Function):
"""
Returns imaginary part of expression. This function performs only
elementary analysis and so it will fail to decompose properly more
complicated expressions. If completely simplified result is needed then
use Basic.as_real_imag() or perform complex expansion on instance of
this function.
Examples
========
>>> from sympy import re, im, E, I
>>> from sympy.abc import x, y
>>> im(2*E)
0
>>> re(2*I + 17)
17
>>> im(x*I)
re(x)
>>> im(re(x) + y)
im(y)
See Also
========
re
"""
is_real = True
unbranched = True # implicitely works on the projection to C
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg.is_real:
return S.Zero
elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_real:
return -S.ImaginaryUnit * arg
elif arg.is_Function and arg.func is conjugate:
return -im(arg.args[0])
else:
included, reverted, excluded = [], [], []
args = Add.make_args(arg)
for term in args:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_real:
reverted.append(coeff)
else:
excluded.append(coeff)
elif term.has(S.ImaginaryUnit) or not term.is_real:
# Try to do some advanced expansion. If
# impossible, don't try to do im(arg) again
# (because this is what we are trying to do now).
real_imag = term.as_real_imag(ignore=arg)
if real_imag:
excluded.append(real_imag[1])
else:
included.append(term)
if len(args) != len(included):
a, b, c = map(lambda xs: Add(*xs),
[included, reverted, excluded])
return cls(a) + re(b) + c
def as_real_imag(self, deep=True, **hints):
"""
Return the imaginary part with a zero real part.
Examples
========
>>> from sympy.functions import im
>>> from sympy import I
>>> im(2 + 3*I).as_real_imag()
(3, 0)
"""
return (self, S.Zero)
def _eval_derivative(self, x):
if x.is_real or self.args[0].is_real:
return im(Derivative(self.args[0], x, evaluate=True))
if x.is_imaginary or self.args[0].is_imaginary:
return -S.ImaginaryUnit \
* re(Derivative(self.args[0], x, evaluate=True))
def _sage_(self):
import sage.all as sage
return sage.imag_part(self.args[0]._sage_())
def _eval_rewrite_as_re(self, arg):
return self.args[0] - re(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
###############################################################################
############### SIGN, ABSOLUTE VALUE, ARGUMENT and CONJUGATION ################
###############################################################################
class sign(Function):
"""
Returns the complex sign of an expression:
If the expresssion is real the sign will be:
* 1 if expression is positive
* 0 if expression is equal to zero
* -1 if expression is negative
If the expresssion is imaginary the sign will be:
* I if im(expression) is positive
* -I if im(expression) is negative
Otherwise an unevaluated expression will be returned. When evaluated, the
result (in general) will be ``cos(arg(expr)) + I*sin(arg(expr))``.
Examples
========
>>> from sympy.functions import sign
>>> from sympy.core.numbers import I
>>> sign(-1)
-1
>>> sign(0)
0
>>> sign(-3*I)
-I
>>> sign(1 + I)
sign(1 + I)
>>> _.evalf()
0.707106781186548 + 0.707106781186548*I
See Also
========
Abs, conjugate
"""
is_finite = True
is_complex = True
def doit(self):
if self.args[0].is_nonzero:
return self.args[0] / Abs(self.args[0])
return self
@classmethod
def eval(cls, arg):
# handle what we can
if arg.is_Mul:
c, args = arg.as_coeff_mul()
unk = []
s = sign(c)
for a in args:
if a.is_negative:
s = -s
elif a.is_positive:
pass
else:
ai = im(a)
if a.is_imaginary and ai.is_comparable: # i.e. a = I*real
s *= S.ImaginaryUnit
if ai.is_negative:
# can't use sign(ai) here since ai might not be
# a Number
s = -s
else:
unk.append(a)
if c is S.One and len(unk) == len(args):
return None
return s * cls(arg._new_rawargs(*unk))
if arg is S.NaN:
return S.NaN
if arg.is_zero: # it may be an Expr that is zero
return S.Zero
if arg.is_positive:
return S.One
if arg.is_negative:
return S.NegativeOne
if arg.is_Function:
if arg.func is sign:
return arg
if arg.is_imaginary:
if arg.is_Pow and arg.exp is S.Half:
# we catch this because non-trivial sqrt args are not expanded
# e.g. sqrt(1-sqrt(2)) --x--> to I*sqrt(sqrt(2) - 1)
return S.ImaginaryUnit
arg2 = -S.ImaginaryUnit * arg
if arg2.is_positive:
return S.ImaginaryUnit
if arg2.is_negative:
return -S.ImaginaryUnit
def _eval_Abs(self):
if self.args[0].is_nonzero:
return S.One
def _eval_conjugate(self):
return sign(conjugate(self.args[0]))
def _eval_derivative(self, x):
if self.args[0].is_real:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(self.args[0])
elif self.args[0].is_imaginary:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(-S.ImaginaryUnit * self.args[0])
def _eval_is_nonnegative(self):
if self.args[0].is_nonnegative:
return True
def _eval_is_nonpositive(self):
if self.args[0].is_nonpositive:
return True
def _eval_is_imaginary(self):
return self.args[0].is_imaginary
def _eval_is_integer(self):
return self.args[0].is_real
def _eval_is_zero(self):
return self.args[0].is_zero
def _eval_power(self, other):
if (
self.args[0].is_real and
self.args[0].is_nonzero and
other.is_integer and
other.is_even
):
return S.One
def _sage_(self):
import sage.all as sage
return sage.sgn(self.args[0]._sage_())
def _eval_rewrite_as_Piecewise(self, arg):
if arg.is_real:
return Piecewise((1, arg > 0), (-1, arg < 0), (0, True))
def _eval_rewrite_as_Heaviside(self, arg):
if arg.is_real:
return C.Heaviside(arg)*2-1
def _eval_simplify(self, ratio, measure):
return self.func(self.args[0].factor())
class Abs(Function):
"""
Return the absolute value of the argument.
This is an extension of the built-in function abs() to accept symbolic
values. If you pass a SymPy expression to the built-in abs(), it will
pass it automatically to Abs().
Examples
========
>>> from sympy import Abs, Symbol, S
>>> Abs(-1)
1
>>> x = Symbol('x', real=True)
>>> Abs(-x)
Abs(x)
>>> Abs(x**2)
x**2
>>> abs(-x) # The Python built-in
Abs(x)
Note that the Python built-in will return either an Expr or int depending on
the argument::
>>> type(abs(-1))
<... 'int'>
>>> type(abs(S.NegativeOne))
<class 'sympy.core.numbers.One'>
Abs will always return a sympy object.
See Also
========
sign, conjugate
"""
is_real = True
is_negative = False
unbranched = True
def fdiff(self, argindex=1):
"""
Get the first derivative of the argument to Abs().
Examples
========
>>> from sympy.abc import x
>>> from sympy.functions import Abs
>>> Abs(-x).fdiff()
sign(x)
"""
if argindex == 1:
return sign(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy.simplify.simplify import signsimp
if hasattr(arg, '_eval_Abs'):
obj = arg._eval_Abs()
if obj is not None:
return obj
if not isinstance(arg, C.Expr):
raise TypeError("Bad argument type for Abs(): %s" % type(arg))
# handle what we can
arg = signsimp(arg, evaluate=False)
if arg.is_Mul:
known = []
unk = []
for t in arg.args:
tnew = cls(t)
if tnew.func is cls:
unk.append(tnew.args[0])
else:
known.append(tnew)
known = Mul(*known)
unk = cls(Mul(*unk), evaluate=False) if unk else S.One
return known*unk
if arg is S.NaN:
return S.NaN
if arg.is_Pow:
base, exponent = arg.as_base_exp()
if base.is_real:
if exponent.is_integer:
if exponent.is_even:
return arg
if base is S.NegativeOne:
return S.One
if base.func is cls and exponent is S.NegativeOne:
return arg
return Abs(base)**exponent
if base.is_positive == True:
return base**re(exponent)
return (-base)**re(exponent)*C.exp(-S.Pi*im(exponent))
if isinstance(arg, C.exp):
return C.exp(re(arg.args[0]))
if arg.is_zero: # it may be an Expr that is zero
return S.Zero
if arg.is_nonnegative:
return arg
if arg.is_nonpositive:
return -arg
if arg.is_imaginary:
arg2 = -S.ImaginaryUnit * arg
if arg2.is_nonnegative:
return arg2
if arg.is_Add:
if arg.has(S.Infinity, S.NegativeInfinity):
if any(a.is_infinite for a in arg.as_real_imag()):
return S.Infinity
if arg.is_real is None and arg.is_imaginary is None:
if all(a.is_real or a.is_imaginary or (S.ImaginaryUnit*a).is_real for a in arg.args):
from sympy import expand_mul
return sqrt(expand_mul(arg*arg.conjugate()))
if arg.is_real is False and arg.is_imaginary is False:
from sympy import expand_mul
return sqrt(expand_mul(arg*arg.conjugate()))
def _eval_is_integer(self):
if self.args[0].is_real:
return self.args[0].is_integer
def _eval_is_nonzero(self):
return self._args[0].is_nonzero
def _eval_is_positive(self):
return self.is_nonzero
def _eval_is_rational(self):
if self.args[0].is_real:
return self.args[0].is_rational
def _eval_is_even(self):
if self.args[0].is_real:
return self.args[0].is_even
def _eval_is_odd(self):
if self.args[0].is_real:
return self.args[0].is_odd
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
def _eval_power(self, exponent):
if self.args[0].is_real and exponent.is_integer:
if exponent.is_even:
return self.args[0]**exponent
elif exponent is not S.NegativeOne and exponent.is_Integer:
return self.args[0]**(exponent - 1)*self
return
def _eval_nseries(self, x, n, logx):
direction = self.args[0].leadterm(x)[0]
s = self.args[0]._eval_nseries(x, n=n, logx=logx)
when = Eq(direction, 0)
return Piecewise(
((s.subs(direction, 0)), when),
(sign(direction)*s, True),
)
def _sage_(self):
import sage.all as sage
return sage.abs_symbolic(self.args[0]._sage_())
def _eval_derivative(self, x):
if self.args[0].is_real or self.args[0].is_imaginary:
return Derivative(self.args[0], x, evaluate=True) \
* sign(conjugate(self.args[0]))
return (re(self.args[0]) * Derivative(re(self.args[0]), x,
evaluate=True) + im(self.args[0]) * Derivative(im(self.args[0]),
x, evaluate=True)) / Abs(self.args[0])
def _eval_rewrite_as_Heaviside(self, arg):
# Note this only holds for real arg (since Heaviside is not defined
# for complex arguments).
if arg.is_real:
return arg*(C.Heaviside(arg) - C.Heaviside(-arg))
def _eval_rewrite_as_Piecewise(self, arg):
if arg.is_real:
return Piecewise((arg, arg >= 0), (-arg, True))
def _eval_rewrite_as_sign(self, arg):
return arg/C.sign(arg)
class arg(Function):
"""Returns the argument (in radians) of a complex number"""
is_real = True
is_finite = True
@classmethod
def eval(cls, arg):
if not arg.is_Atom:
c, arg_ = factor_terms(arg).as_coeff_Mul()
if arg_.is_Mul:
arg_ = Mul(*[a if (sign(a) not in (-1, 1)) else
sign(a) for a in arg_.args])
arg_ = sign(c)*arg_
else:
arg_ = arg
x, y = re(arg_), im(arg_)
rv = C.atan2(y, x)
if rv.is_number and not rv.atoms(AppliedUndef):
return rv
if arg_ != arg:
return cls(arg_, evaluate=False)
def _eval_derivative(self, t):
x, y = re(self.args[0]), im(self.args[0])
return (x * Derivative(y, t, evaluate=True) - y *
Derivative(x, t, evaluate=True)) / (x**2 + y**2)
def _eval_rewrite_as_atan2(self, arg):
x, y = re(self.args[0]), im(self.args[0])
return atan2(y, x)
class conjugate(Function):
"""
Changes the sign of the imaginary part of a complex number.
Examples
========
>>> from sympy import conjugate, I
>>> conjugate(1 + I)
1 - I
See Also
========
sign, Abs
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_conjugate()
if obj is not None:
return obj
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
def _eval_adjoint(self):
return transpose(self.args[0])
def _eval_conjugate(self):
return self.args[0]
def _eval_derivative(self, x):
if x.is_real:
return conjugate(Derivative(self.args[0], x, evaluate=True))
elif x.is_imaginary:
return -conjugate(Derivative(self.args[0], x, evaluate=True))
def _eval_transpose(self):
return adjoint(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
class transpose(Function):
"""
Linear map transposition.
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_transpose()
if obj is not None:
return obj
def _eval_adjoint(self):
return conjugate(self.args[0])
def _eval_conjugate(self):
return adjoint(self.args[0])
def _eval_transpose(self):
return self.args[0]
class adjoint(Function):
"""
Conjugate transpose or Hermite conjugation.
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_adjoint()
if obj is not None:
return obj
obj = arg._eval_transpose()
if obj is not None:
return conjugate(obj)
def _eval_adjoint(self):
return self.args[0]
def _eval_conjugate(self):
return transpose(self.args[0])
def _eval_transpose(self):
return conjugate(self.args[0])
def _latex(self, printer, exp=None, *args):
arg = printer._print(self.args[0])
tex = r'%s^{\dag}' % arg
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, printer._print(exp))
return tex
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
pform = printer._print(self.args[0], *args)
if printer._use_unicode:
pform = pform**prettyForm(u('\N{DAGGER}'))
else:
pform = pform**prettyForm('+')
return pform
###############################################################################
############### HANDLING OF POLAR NUMBERS #####################################
###############################################################################
class polar_lift(Function):
"""
Lift argument to the Riemann surface of the logarithm, using the
standard branch.
>>> from sympy import Symbol, polar_lift, I
>>> p = Symbol('p', polar=True)
>>> x = Symbol('x')
>>> polar_lift(4)
4*exp_polar(0)
>>> polar_lift(-4)
4*exp_polar(I*pi)
>>> polar_lift(-I)
exp_polar(-I*pi/2)
>>> polar_lift(I + 2)
polar_lift(2 + I)
>>> polar_lift(4*x)
4*polar_lift(x)
>>> polar_lift(4*p)
4*p
See Also
========
sympy.functions.elementary.exponential.exp_polar
periodic_argument
"""
is_polar = True
is_comparable = False # Cannot be evalf'd.
@classmethod
def eval(cls, arg):
from sympy import exp_polar, pi, I, arg as argument
if arg.is_number:
ar = argument(arg)
#if not ar.has(argument) and not ar.has(atan):
if ar in (0, pi/2, -pi/2, pi):
return exp_polar(I*ar)*abs(arg)
if arg.is_Mul:
args = arg.args
else:
args = [arg]
included = []
excluded = []
positive = []
for arg in args:
if arg.is_polar:
included += [arg]
elif arg.is_positive:
positive += [arg]
else:
excluded += [arg]
if len(excluded) < len(args):
if excluded:
return Mul(*(included + positive))*polar_lift(Mul(*excluded))
elif included:
return Mul(*(included + positive))
else:
return Mul(*positive)*exp_polar(0)
def _eval_evalf(self, prec):
""" Careful! any evalf of polar numbers is flaky """
return self.args[0]._eval_evalf(prec)
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
class periodic_argument(Function):
"""
Represent the argument on a quotient of the Riemann surface of the
logarithm. That is, given a period P, always return a value in
(-P/2, P/2], by using exp(P*I) == 1.
>>> from sympy import exp, exp_polar, periodic_argument, unbranched_argument
>>> from sympy import I, pi
>>> unbranched_argument(exp(5*I*pi))
pi
>>> unbranched_argument(exp_polar(5*I*pi))
5*pi
>>> periodic_argument(exp_polar(5*I*pi), 2*pi)
pi
>>> periodic_argument(exp_polar(5*I*pi), 3*pi)
-pi
>>> periodic_argument(exp_polar(5*I*pi), pi)
0
See Also
========
sympy.functions.elementary.exponential.exp_polar
polar_lift : Lift argument to the Riemann surface of the logarithm
principal_branch
"""
@classmethod
def _getunbranched(cls, ar):
from sympy import exp_polar, log, polar_lift
if ar.is_Mul:
args = ar.args
else:
args = [ar]
unbranched = 0
for a in args:
if not a.is_polar:
unbranched += arg(a)
elif a.func is exp_polar:
unbranched += a.exp.as_real_imag()[1]
elif a.is_Pow:
re, im = a.exp.as_real_imag()
unbranched += re*unbranched_argument(
a.base) + im*log(abs(a.base))
elif a.func is polar_lift:
unbranched += arg(a.args[0])
else:
return None
return unbranched
@classmethod
def eval(cls, ar, period):
# Our strategy is to evaluate the argument on the Riemann surface of the
# logarithm, and then reduce.
# NOTE evidently this means it is a rather bad idea to use this with
# period != 2*pi and non-polar numbers.
from sympy import ceiling, oo, atan2, atan, polar_lift, pi, Mul
if not period.is_positive:
return None
if period == oo and isinstance(ar, principal_branch):
return periodic_argument(*ar.args)
if ar.func is polar_lift and period >= 2*pi:
return periodic_argument(ar.args[0], period)
if ar.is_Mul:
newargs = [x for x in ar.args if not x.is_positive]
if len(newargs) != len(ar.args):
return periodic_argument(Mul(*newargs), period)
unbranched = cls._getunbranched(ar)
if unbranched is None:
return None
if unbranched.has(periodic_argument, atan2, arg, atan):
return None
if period == oo:
return unbranched
if period != oo:
n = ceiling(unbranched/period - S(1)/2)*period
if not n.has(ceiling):
return unbranched - n
def _eval_evalf(self, prec):
from sympy import ceiling, oo
z, period = self.args
if period == oo:
unbranched = periodic_argument._getunbranched(z)
if unbranched is None:
return self
return unbranched._eval_evalf(prec)
ub = periodic_argument(z, oo)._eval_evalf(prec)
return (ub - ceiling(ub/period - S(1)/2)*period)._eval_evalf(prec)
def unbranched_argument(arg):
from sympy import oo
return periodic_argument(arg, oo)
class principal_branch(Function):
"""
Represent a polar number reduced to its principal branch on a quotient
of the Riemann surface of the logarithm.
This is a function of two arguments. The first argument is a polar
number `z`, and the second one a positive real number of infinity, `p`.
The result is "z mod exp_polar(I*p)".
>>> from sympy import exp_polar, principal_branch, oo, I, pi
>>> from sympy.abc import z
>>> principal_branch(z, oo)
z
>>> principal_branch(exp_polar(2*pi*I)*3, 2*pi)
3*exp_polar(0)
>>> principal_branch(exp_polar(2*pi*I)*3*z, 2*pi)
3*principal_branch(z, 2*pi)
See Also
========
sympy.functions.elementary.exponential.exp_polar
polar_lift : Lift argument to the Riemann surface of the logarithm
periodic_argument
"""
is_polar = True
is_comparable = False # cannot always be evalf'd
@classmethod
def eval(self, x, period):
from sympy import oo, exp_polar, I, Mul, polar_lift, Symbol
if isinstance(x, polar_lift):
return principal_branch(x.args[0], period)
if period == oo:
return x
ub = periodic_argument(x, oo)
barg = periodic_argument(x, period)
if ub != barg and not ub.has(periodic_argument) \
and not barg.has(periodic_argument):
pl = polar_lift(x)
def mr(expr):
if not isinstance(expr, Symbol):
return polar_lift(expr)
return expr
pl = pl.replace(polar_lift, mr)
if not pl.has(polar_lift):
res = exp_polar(I*(barg - ub))*pl
if not res.is_polar and not res.has(exp_polar):
res *= exp_polar(0)
return res
if not x.free_symbols:
c, m = x, ()
else:
c, m = x.as_coeff_mul(*x.free_symbols)
others = []
for y in m:
if y.is_positive:
c *= y
else:
others += [y]
m = tuple(others)
arg = periodic_argument(c, period)
if arg.has(periodic_argument):
return None
if arg.is_number and (unbranched_argument(c) != arg or
(arg == 0 and m != () and c != 1)):
if arg == 0:
return abs(c)*principal_branch(Mul(*m), period)
return principal_branch(exp_polar(I*arg)*Mul(*m), period)*abs(c)
if arg.is_number and ((abs(arg) < period/2) == True or arg == period/2) \
and m == ():
return exp_polar(arg*I)*abs(c)
def _eval_evalf(self, prec):
from sympy import exp, pi, I
z, period = self.args
p = periodic_argument(z, period)._eval_evalf(prec)
if abs(p) > pi or p == -pi:
return self # Cannot evalf for this argument.
return (abs(z)*exp(I*p))._eval_evalf(prec)
# /cyclic/
from sympy.core import basic as _
_.abs_ = Abs
del _
|
py | 1a3e153b35e68ed10c9ed6eeb5e4138c70900889 | """This module contains nodes for spectral analysis with Timeflux."""
import numpy as np
import pandas as pd
import xarray as xr
from scipy.signal import welch
from scipy.fft import fftfreq, rfftfreq, fft, rfft
from timeflux.core.node import Node
class FFT(Node):
"""Compute the one-dimensional discrete Fourier Transform for each column using the Fast Fourier Tranform algorithm.
Attributes:
i (Port): default input, expects DataFrame.
o (Port): default output, provides DataArray.
Example:
In this exemple, we simulate a white noise and we apply FFT:
* ``fs`` = `10.0`
* ``nfft`` = `5`
* ``return_onesided`` = `False`
self.i.data::
A B C
2017-12-31 23:59:59.998745401 0.185133 0.541901 0.872946
2018-01-01 00:00:00.104507143 0.732225 0.806561 0.658783
2018-01-01 00:00:00.202319939 0.692277 0.849196 0.249668
2018-01-01 00:00:00.300986584 0.489425 0.221209 0.987668
2018-01-01 00:00:00.396560186 0.944059 0.039427 0.705575
self.o.data::
xarray.DataArray (times: 1, freqs: 5, space: 3)
array([[[ 3.043119+0.j , 2.458294+0.j , 3.47464 +0.j ],
[-0.252884+0.082233j, -0.06265 -1.098709j, 0.29353 +0.478287j],
[-0.805843+0.317437j, 0.188256+0.146341j, 0.151515-0.674376j],
[-0.805843-0.317437j, 0.188256-0.146341j, 0.151515+0.674376j],
[-0.252884-0.082233j, -0.06265 +1.098709j, 0.29353 -0.478287j]]])
Coordinates:
* times (times) datetime64[ns] 2018-01-01T00:00:00.396560186
* freqs (freqs) float64 0.0 2.0 4.0 -4.0 -2.0
* space (space) object 'A' 'B' 'C'
Notes:
This node should be used after a buffer.
References:
* `scipy.fft <https://docs.scipy.org/doc/scipy/reference/fft.html>`_
"""
def __init__(self, fs=1.0, nfft=None, return_onesided=True):
"""
Args:
fs (float): Nominal sampling rate of the input data.
nfft (int|None): Length of the Fourier transform. Default: length of the chunk.
return_onesided (bool): If `True`, return a one-sided spectrum for real data.
If `False` return a two-sided spectrum.
(Note that for complex data, a two-sided spectrum is always returned.)
Default: `True`.
"""
self._fs = fs
self._nfft = nfft
if return_onesided:
self._sides = "onesided"
else:
self._sides = "twosided"
if self._nfft is not None:
self._set_freqs()
def _check_nfft(self):
# Check validity of nfft at first chunk
if self._nfft is None:
self.logger.debug("nfft := length of the chunk ")
self._nfft = self.i.data.shape[0]
self._set_freqs()
elif self._nfft < self.i.data.shape[0]:
raise ValueError("nfft must be greater than or equal to length of chunk.")
else:
self._nfft = int(self._nfft)
def _set_freqs(self):
# Set freqs indexes
if self._sides == "onesided":
self._freqs = rfftfreq(self._nfft, 1 / self._fs)
else:
self._freqs = fftfreq(self._nfft, 1 / self._fs)
def update(self):
# copy the meta
self.o = self.i
# When we have not received data, there is nothing to do
if not self.i.ready():
return
# At this point, we are sure that we have some data to process
self._check_nfft()
self.o.data = self.i.data
if self._sides == "twosided":
func = fft
else:
self.o.data = self.o.data.apply(lambda x: x.real)
func = rfft
values = func(self.o.data.values.T, n=self._nfft).T
self.o.data = xr.DataArray(
np.stack([values], 0),
coords=[[self.o.data.index[-1]], self._freqs, self.o.data.columns],
dims=["time", "freq", "space"],
)
class Welch(Node):
"""Estimate power spectral density using Welch’s method.
Attributes:
i (Port): default input, expects DataFrame.
o (Port): default output, provides DataArray with dimensions (time, freq, space).
Example:
In this exemple, we simulate data with noisy sinus on three sensors (columns `a`, `b`, `c`):
* ``fs`` = `100.0`
* ``nfft`` = `24`
node.i.data::
\s a b c
1970-01-01 00:00:00.000 -0.233920 -0.343296 0.157988
1970-01-01 00:00:00.010 0.460353 0.777296 0.957201
1970-01-01 00:00:00.020 0.768459 1.234923 1.942190
1970-01-01 00:00:00.030 1.255393 1.782445 2.326175
... ... ... ...
1970-01-01 00:00:01.190 1.185759 2.603828 3.315607
node.o.data::
<xarray.DataArray (time: 1, freq: 13, space: 3)>
array([[[2.823924e-02, 1.087382e-01, 1.153163e-01],
[1.703466e-01, 6.048703e-01, 6.310628e-01],
... ... ...
[9.989429e-04, 8.519226e-04, 7.769918e-04],
[1.239551e-03, 7.412518e-04, 9.863335e-04],
[5.382880e-04, 4.999334e-04, 4.702757e-04]]])
Coordinates:
* time (time) datetime64[ns] 1970-01-01T00:00:01.190000
* freq (freq) float64 0.0 4.167 8.333 12.5 16.67 ... 37.5 41.67 45.83 50.0
* space (space) object 'a' 'b' 'c'
Notes:
This node should be used after a Window with the appropriate length, with regard to the parameters
`noverlap`, `nperseg` and `nfft`.
It should be noted that a pipeline such as {LargeWindow-Welch} is in fact equivalent to a pipeline
{SmallWindow-FFT-LargeWindow-Average} with SmallWindow 's parameters `length` and `step` respectively
equivalent to `nperseg` and `step` and with FFT node with same kwargs.
"""
def __init__(self, rate=None, closed="right", **kwargs):
"""
Args:
rate (float|None): Nominal sampling rate of the input data. If `None`, the rate will be taken from the input meta/
closed (str): Make the index closed on the `right`, `left` or `center`.
kwargs: Keyword arguments to pass to scipy.signal.welch function.
You can specify: window, nperseg, noverlap, nfft, detrend, return_onesided and scaling.
"""
self._rate = rate
self._closed = closed
self._kwargs = kwargs
self._set_default()
def _set_default(self):
# We set the default params if they are not specifies in kwargs in order to check that they are valid, in respect of the length and sampling of the input data.
if "nperseg" not in self._kwargs.keys():
self._kwargs["nperseg"] = 256
self.logger.debug("nperseg := 256")
if "nfft" not in self._kwargs.keys():
self._kwargs["nfft"] = self._kwargs["nperseg"]
self.logger.debug(
"nfft := nperseg := {nperseg}".format(nperseg=self._kwargs["nperseg"])
)
if "noverlap" not in self._kwargs.keys():
self._kwargs["noverlap"] = self._kwargs["nperseg"] // 2
self.logger.debug(
"noverlap := nperseg/2 := {noverlap}".format(
noverlap=self._kwargs["noverlap"]
)
)
def _check_nfft(self):
# Check validity of nfft at first chun
if not all(
i <= len(self.i.data)
for i in [self._kwargs[k] for k in ["nfft", "nperseg", "noverlap"]]
):
raise ValueError(
"nfft, noverlap and nperseg must be greater than or equal to length of chunk."
)
else:
self._kwargs.update(
{
keyword: int(self._kwargs[keyword])
for keyword in ["nfft", "nperseg", "noverlap"]
}
)
def update(self):
# copy the meta
self.o = self.i
# When we have not received data, there is nothing to do
if not self.i.ready():
return
# Check rate
if self._rate:
rate = self._rate
elif "rate" in self.i.meta:
rate = self.i.meta["rate"]
else:
raise ValueError(
"The rate was neither explicitely defined nor found in the stream meta."
)
# At this point, we are sure that we have some data to process
# apply welch on the data:
self._check_nfft()
f, Pxx = welch(x=self.i.data, fs=rate, **self._kwargs, axis=0)
if self._closed == "left":
time = self.i.data.index[-1]
elif self._closed == "center":
def middle(a):
return int(np.ceil(len(a) / 2)) - 1
time = self.i.data.index[middle(self.i.data)]
else: # right
time = self.i.data.index[-1]
# f is the frequency axis and Pxx the average power of shape (Nfreqs x Nchanels)
# we reshape Pxx to fit the ('time' x 'freq' x 'space') dimensions
self.o.data = xr.DataArray(
np.stack([Pxx], 0),
coords=[[time], f, self.i.data.columns],
dims=["time", "frequency", "space"],
)
class Bands(Node):
"""Averages the XArray values over freq dimension according to the frequencies bands given in arguments.
This node selects a subset of values over the chosen dimensions, averages them along this axis and convert the result into a flat dataframe.
This node will output as many ports bands as given bands, with their respective name as suffix.
Attributes:
i (Port): default output, provides DataArray with 3 dimensions (time, freq, space).
o (Port): Default output, provides DataFrame.
o_* (Port): Dynamic outputs, provide DataFrame.
"""
def __init__(self, bands=None, relative=False):
"""
Args:
bands (dict): Define the band to extract given its name and its range.
An output port will be created with the given names as suffix.
"""
bands = bands or {
"delta": [1, 4],
"theta": [4, 8],
"alpha": [8, 12],
"beta": [12, 30],
}
self._relative = relative
self._bands = []
for band_name, band_range in bands.items():
self._bands.append(
dict(
port=getattr(self, "o_" + band_name),
slice=slice(band_range[0], band_range[1]),
meta={"bands": {"range": band_range, "relative": relative}},
)
)
def update(self):
# When we have not received data, there is nothing to do
if not self.i.ready():
return
# At this point, we are sure that we have some data to process
for band in self._bands:
# 1. select the Xarray on freq axis in the range, 2. average along freq axis
band_power = (
self.i.data.loc[{"frequency": band["slice"]}].sum("frequency").values
) # todo: sum
if self._relative:
tot_power = self.i.data.sum("frequency").values
tot_power[tot_power == 0.0] = 1
band_power /= tot_power
band["port"].data = pd.DataFrame(
columns=self.i.data.space.values,
index=self.i.data.time.values,
data=band_power,
)
band["port"].meta = {**(self.i.meta or {}), **band["meta"]}
|
py | 1a3e16044456ac0c554c72b0613284869fc23af7 | import os, glob, pickle, time, gc, copy
def pickle_save(path, df):
with open(path, 'wb') as f:
pickle.dump(df, f)
def pickle_load(path):
with open(path, 'rb') as f:
df = pickle.load(f)
return df
def ri(df):
return df.reset_index(drop=True) |
py | 1a3e1742ed8dc5d7afe60c4f892caceeffec07bd | # Copyright 2016 Medical Research Council Harwell.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @author James Brown <[email protected]>
import subprocess as sp
import numpy as np
import re
from tempfile import NamedTemporaryFile
minc_dtypes = {'unsigned': {'byte': np.uint8, 'short': np.uint16, 'float': np.float32},
'signed': {'byte': np.int8, 'short': np.int16, 'float': np.float32}}
def minc_to_numpy(minc_file):
info = minc_info(minc_file)
if not info:
return False
mrsg = MincRawSliceGenerator(minc_file, info)
vol = mrsg.volume
return vol
def mincstats_to_numpy(minc_file):
info = minc_info(minc_file)
if not info:
return False
info['dtype'] = 'float' # have to force it, because mincinfo is hopeless
info['np_dtype'] = np.float32
mrsg = MincRawSliceGenerator(minc_file, info)
vol = mrsg.volume
return vol
class SliceGenerator(object):
def __init__(self, recon):
self.recon = recon
self.slice_index = 0
def slices(self):
"""The slices method should yield xy image slices from a memory mapped numpy array."""
raise NotImplementedError("Ths method needs overriding")
def dtype(self):
"""The dtype method should return the datatype of the memory mapped numpy array"""
raise NotImplementedError("Ths method needs overriding")
def shape(self):
"""The shape method should return the shape of the memory mapped numpy array in x, y, z order."""
raise NotImplementedError("Ths method needs overriding")
class MincRawSliceGenerator(SliceGenerator):
"""The MincRawSliceGenerator class extends SliceGenerator, yielding slices from a single MINC (Medical Image NetCDF)
file, having been dumped to a temporary raw file via minctoraw. mincinfo is used to determine the file type/dimensions
"""
def __init__(self, recon, info):
"""The constructor takes a recon path as an argument, and dumps the MINC file to a temporary raw file. The raw file
is then memory mapped using numpy, from which slices are yielded.
:param recon: a path to a MINC file.
"""
super(MincRawSliceGenerator, self).__init__(recon)
self.ext = 'mnc'
tmp_file = NamedTemporaryFile() # TemporaryFile() seems not to work with Python3.4
sp.call(['mincextract', '-{}'.format(info['dtype']),
'-{}'.format(info['sign']), recon], stdout=tmp_file)
self.volume = np.fromfile(tmp_file.name, dtype=info['np_dtype']).reshape(info['shape'])
def slices(self, start=0):
"""Slices are yielded one slice at a time from the memory mapped numpy array
"""
try:
for i in range(self.volume.shape[0]):
yield self.volume[i, :, :]
except Exception:
raise CorruptReconError("Error yielding slices from MINC file")
def dtype(self):
"""Overrides the superclass to return the data type of the MINC file i.e. 8 bit/16 bit.
"""
return self.volume.dtype
def shape(self):
"""Overrides the superclass to return the shape of the MINC file.
"""
return self.volume.shape[::-1]
def minc_info(recon):
try:
info = sp.check_output(['mincinfo', recon], universal_newlines=True)
except OSError as e:
raise OSError("Minc tools not installed\n{}".format(e))
#info = str(info)
info_dict = {}
dims = []
for line in info.splitlines():
if 'image:' in line: # strip non alphanumeric characters
# Get range
min_max = re.findall('\d+ to \d+', line)[0]
info_dict['min'], info_dict['max'] = int(min_max.split()[0]), int(min_max.split()[2])
regex = re.compile('[^a-zA-Z]')
info_dict['sign'] = regex.sub('', line.split()[1])
info_dict['dtype'] = regex.sub('', line.split()[2])
try:
info_dict['np_dtype'] = minc_dtypes[info_dict['sign']][info_dict['dtype']]
except KeyError:
return None
elif 'dimensions' not in line and any(space in line for space in ['xspace', 'yspace', 'zspace']):
spacing = line.split()
dims.append(int(spacing[1]))
info_dict['voxel_size'] = float(spacing[2]) * 1000 # in microns
info_dict['shape'] = tuple(dims) # zspace, yspace, xspace
return info_dict
class ReconFormatError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class CorruptReconError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
|
py | 1a3e17c46a53588419b30cd3c37d4b75354321b6 | # Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from bricks.openstack.common.gettextutils import _
"""
import copy
import functools
import gettext
import locale
from logging import handlers
import os
import re
from babel import localedata
import six
_localedir = os.environ.get('bricks'.upper() + '_LOCALEDIR')
_t = gettext.translation('bricks', localedir=_localedir, fallback=True)
# We use separate translation catalogs for each log level, so set up a
# mapping between the log level name and the translator. The domain
# for the log level is project_name + "-log-" + log_level so messages
# for each level end up in their own catalog.
_t_log_levels = dict(
(level, gettext.translation('bricks' + '-log-' + level,
localedir=_localedir,
fallback=True))
for level in ['info', 'warning', 'error', 'critical']
)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, domain='bricks')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def _log_translation(msg, level):
"""Build a single translation of a log message
"""
if USE_LAZY:
return Message(msg, domain='bricks' + '-log-' + level)
else:
translator = _t_log_levels[level]
if six.PY3:
return translator.gettext(msg)
return translator.ugettext(msg)
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = functools.partial(_log_translation, level='info')
_LW = functools.partial(_log_translation, level='warning')
_LE = functools.partial(_log_translation, level='error')
_LC = functools.partial(_log_translation, level='critical')
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. nova, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='bricks', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
params = self._trim_dictionary_parameters(other)
else:
params = self._copy_param(other)
return params
def _trim_dictionary_parameters(self, dict_param):
"""Return a dict that only has matching entries in the msgid."""
# NOTE(luisg): Here we trim down the dictionary passed as parameters
# to avoid carrying a lot of unnecessary weight around in the message
# object, for example if someone passes in Message() % locals() but
# only some params are used, and additionally we prevent errors for
# non-deepcopyable objects by unicoding() them.
# Look for %(param) keys in msgid;
# Skip %% and deal with the case where % is first character on the line
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
# If we don't find any %(param) keys but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
# Apparently the full dictionary is the parameter
params = self._copy_param(dict_param)
else:
params = {}
# Save our existing parameters as defaults to protect
# ourselves from losing values if we are called through an
# (erroneous) chain that builds a valid Message with
# arguments, and then does something like "msg % kwds"
# where kwds is an empty dictionary.
src = {}
if isinstance(self.params, dict):
src.update(self.params)
src.update(dict_param)
for key in keys:
params[key] = self._copy_param(src[key])
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except TypeError:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
|
py | 1a3e17c5eb334837f4aff79c912ae586459a5f78 | import KratosMultiphysics
import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication
import KratosMultiphysics.KratosUnittest as KratosUnittest
from math import sqrt, sin, cos, pi, exp, atan
class TestComputeCenterOfGravity(KratosUnittest.TestCase):
# muting the output
KratosMultiphysics.Logger.GetDefaultOutput().SetSeverity(KratosMultiphysics.Logger.Severity.WARNING)
def _apply_beam_material_properties(self,mp,dim):
#define properties
mp.GetProperties()[0].SetValue(KratosMultiphysics.YOUNG_MODULUS,210e9)
mp.GetProperties()[0].SetValue(KratosMultiphysics.DENSITY,7850)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.CROSS_AREA,0.01)
mp.GetProperties()[0].SetValue(KratosMultiphysics.POISSON_RATIO,0.30)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.TORSIONAL_INERTIA,0.00001)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.I22,0.00001)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.I33,0.00001)
cl = StructuralMechanicsApplication.LinearElastic3DLaw()
mp.GetProperties()[0].SetValue(KratosMultiphysics.CONSTITUTIVE_LAW,cl)
def _apply_shell_material_properties(self,mp):
#define properties
mp.GetProperties()[1].SetValue(KratosMultiphysics.YOUNG_MODULUS,100e3)
mp.GetProperties()[1].SetValue(KratosMultiphysics.POISSON_RATIO,0.3)
mp.GetProperties()[1].SetValue(KratosMultiphysics.THICKNESS,1.0)
mp.GetProperties()[1].SetValue(KratosMultiphysics.DENSITY,1.0)
cl = StructuralMechanicsApplication.LinearElasticPlaneStress2DLaw()
mp.GetProperties()[1].SetValue(KratosMultiphysics.CONSTITUTIVE_LAW,cl)
def _apply_orthotropic_shell_material_properties(self,mp):
#define properties
# we specify only the properties we need (others are youngs modulus etc)
num_plies = 3
orthotropic_props = KratosMultiphysics.Matrix(num_plies,16)
for row in range(num_plies):
for col in range(16):
orthotropic_props[row,col] = 0.0
# Orthotropic mechanical moduli
orthotropic_props[0,0] = 0.005 # lamina thickness
orthotropic_props[0,2] = 2200 # density
orthotropic_props[1,0] = 0.01 # lamina thickness
orthotropic_props[1,2] = 1475 # density
orthotropic_props[2,0] = 0.015 # lamina thickness
orthotropic_props[2,2] = 520 # density
mp.GetProperties()[1].SetValue(StructuralMechanicsApplication.SHELL_ORTHOTROPIC_LAYERS,orthotropic_props)
cl = StructuralMechanicsApplication.LinearElasticOrthotropic2DLaw()
mp.GetProperties()[1].SetValue(KratosMultiphysics.CONSTITUTIVE_LAW,cl)
def _apply_solid_material_properties(self,mp):
#define properties
mp.GetProperties()[1].SetValue(KratosMultiphysics.YOUNG_MODULUS,100e3)
mp.GetProperties()[1].SetValue(KratosMultiphysics.POISSON_RATIO,0.3)
mp.GetProperties()[1].SetValue(KratosMultiphysics.THICKNESS,1.0)
mp.GetProperties()[1].SetValue(KratosMultiphysics.DENSITY,1.0)
cl = StructuralMechanicsApplication.LinearElasticPlaneStrain2DLaw()
mp.GetProperties()[1].SetValue(KratosMultiphysics.CONSTITUTIVE_LAW,cl)
def _create_shell_nodes(self,mp):
mp.CreateNewNode(1, -0.5, - 0.45, 0.1)
mp.CreateNewNode(2, 0.7, -0.5, 0.2)
mp.CreateNewNode(3, 0.55, 0.6, 0.15)
mp.CreateNewNode(4, -0.48, 0.65, 0.0)
mp.CreateNewNode(5, 0.02, -0.01, -0.15)
def _create_shell_elements(self,mp,element_name = "ShellThinElementCorotational3D3N"):
mp.CreateNewElement(element_name, 1, [1,2,5], mp.GetProperties()[1])
mp.CreateNewElement(element_name, 2, [2,3,5], mp.GetProperties()[1])
mp.CreateNewElement(element_name, 3, [3,4,5], mp.GetProperties()[1])
mp.CreateNewElement(element_name, 4, [4,1,5], mp.GetProperties()[1])
def test_nodal_cog(self):
dim = 3
nr_nodes = 4
current_model = KratosMultiphysics.Model()
mp = current_model.CreateModelPart("structural_part_nodal_masses")
mp.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] = dim
#create nodes
dx = 1.2
for i in range(nr_nodes):
mp.CreateNewNode(i+1,i*dx,0.00,0.00)
#add dofs
#create Element
elem1 = mp.CreateNewElement("NodalConcentratedElement2D1N", 1, [1], mp.GetProperties()[0])
elem2 = mp.CreateNewElement("NodalConcentratedElement2D1N", 2, [2], mp.GetProperties()[0])
elem3 = mp.CreateNewElement("NodalConcentratedElement3D1N", 3, [3], mp.GetProperties()[0])
elem4 = mp.CreateNewElement("NodalConcentratedElement3D1N", 4, [4], mp.GetProperties()[0])
elem1.SetValue(KratosMultiphysics.NODAL_MASS,21.234)
elem2.SetValue(KratosMultiphysics.NODAL_MASS,5.234)
elem3.SetValue(KratosMultiphysics.NODAL_MASS,112.234)
elem4.SetValue(KratosMultiphysics.NODAL_MASS,78.234)
cog_process = StructuralMechanicsApplication.ComputeCenterOfGravityProcess(mp)
cog_process.Execute()
center_of_gravity = mp.ProcessInfo[StructuralMechanicsApplication.CENTER_OF_GRAVITY]
self.assertAlmostEqual(2.5688903639, center_of_gravity[0])
self.assertAlmostEqual(0.0, center_of_gravity[1])
self.assertAlmostEqual(0.0, center_of_gravity[2])
def test_beam_cog(self):
dim = 3
nr_nodes = 11
nr_elements = nr_nodes-1
current_model = KratosMultiphysics.Model()
mp = current_model.CreateModelPart("structural_part_beams")
mp.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] = dim
self._apply_beam_material_properties(mp,dim)
#create nodes
dx = 1.20 / nr_elements
for i in range(nr_nodes):
mp.CreateNewNode(i+1,i*dx,0.00,0.00)
#add dofs
#create Element
for i in range(nr_elements):
elem = mp.CreateNewElement("CrLinearBeamElement3D2N", i+1, [i+1,i+2], mp.GetProperties()[0])
cog_process = StructuralMechanicsApplication.ComputeCenterOfGravityProcess(mp)
cog_process.Execute()
center_of_gravity = mp.ProcessInfo[StructuralMechanicsApplication.CENTER_OF_GRAVITY]
self.assertAlmostEqual(0.6, center_of_gravity[0])
self.assertAlmostEqual(0.0, center_of_gravity[1])
self.assertAlmostEqual(0.0, center_of_gravity[2])
def test_shell_cog(self):
dim = 3
current_model = KratosMultiphysics.Model()
mp = current_model.CreateModelPart("structural_part_shells")
mp.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] = dim
mp.SetBufferSize(2)
self._apply_shell_material_properties(mp)
self._create_shell_nodes(mp)
self._create_shell_elements(mp)
cog_process = StructuralMechanicsApplication.ComputeCenterOfGravityProcess(mp)
cog_process.Execute()
center_of_gravity = mp.ProcessInfo[StructuralMechanicsApplication.CENTER_OF_GRAVITY]
self.assertAlmostEqual(0.0723057, center_of_gravity[0])
self.assertAlmostEqual(0.0517395, center_of_gravity[1])
self.assertAlmostEqual(0.0269436, center_of_gravity[2])
def test_orthotropic_shell_cog(self):
dim = 3
current_model = KratosMultiphysics.Model()
mp = current_model.CreateModelPart("structural_part_orthotropic_shells")
mp.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] = dim
mp.SetBufferSize(2)
self._apply_orthotropic_shell_material_properties(mp)
self._create_shell_nodes(mp)
self._create_shell_elements(mp)
cog_process = StructuralMechanicsApplication.ComputeCenterOfGravityProcess(mp)
cog_process.Execute()
center_of_gravity = mp.ProcessInfo[StructuralMechanicsApplication.CENTER_OF_GRAVITY]
self.assertAlmostEqual(0.0723057, center_of_gravity[0])
self.assertAlmostEqual(0.0517395, center_of_gravity[1])
self.assertAlmostEqual(0.0269436, center_of_gravity[2])
def test_solid_cog(self):
dim = 2
current_model = KratosMultiphysics.Model()
mp = current_model.CreateModelPart("structural_part_solids")
mp.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] = dim
mp.SetBufferSize(2)
self._apply_solid_material_properties(mp)
#create nodes
mp.CreateNewNode(1,0.5,0.5,0.0)
mp.CreateNewNode(2,0.7,0.2,0.0)
mp.CreateNewNode(3,0.9,0.8,0.0)
mp.CreateNewNode(4,0.3,0.7,0.0)
mp.CreateNewNode(5,0.6,0.6,0.0)
#create Element
mp.CreateNewElement("TotalLagrangianElement2D3N", 1, [1,2,5], mp.GetProperties()[1])
mp.CreateNewElement("TotalLagrangianElement2D3N", 2, [2,3,5], mp.GetProperties()[1])
mp.CreateNewElement("TotalLagrangianElement2D3N", 3, [3,4,5], mp.GetProperties()[1])
mp.CreateNewElement("TotalLagrangianElement2D3N", 4, [4,1,5], mp.GetProperties()[1])
cog_process = StructuralMechanicsApplication.ComputeCenterOfGravityProcess(mp)
cog_process.Execute()
center_of_gravity = mp.ProcessInfo[StructuralMechanicsApplication.CENTER_OF_GRAVITY]
self.assertAlmostEqual(0.6416666667, center_of_gravity[0])
self.assertAlmostEqual(0.5729166667, center_of_gravity[1])
self.assertAlmostEqual(0.0, center_of_gravity[2])
if __name__ == '__main__':
KratosUnittest.main()
|
py | 1a3e187321f265b4722e0cc563240c008f62ed35 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import itertools
import re
import threading
import uuid
import requests
from ..describe import Description, autoDescribeRoute
from ..rest import Resource, rawResponse
from bson.objectid import ObjectId
from girderformindlogger.constants import AccessType, SortDir, TokenScope, \
DEFINED_INFORMANTS, REPROLIB_CANONICAL, SPECIAL_SUBJECTS, USER_ROLES
from girderformindlogger.api import access
from girderformindlogger.exceptions import AccessException, ValidationException
from girderformindlogger.models.activity import Activity as ActivityModel
from girderformindlogger.models.applet import Applet as AppletModel
from girderformindlogger.models.collection import Collection as CollectionModel
from girderformindlogger.models.folder import Folder as FolderModel
from girderformindlogger.models.group import Group as GroupModel
from girderformindlogger.models.item import Item as ItemModel
from girderformindlogger.models.protocol import Protocol as ProtocolModel
from girderformindlogger.models.roles import getCanonicalUser, getUserCipher
from girderformindlogger.models.user import User as UserModel
from girderformindlogger.utility import config, jsonld_expander
from pyld import jsonld
USER_ROLE_KEYS = USER_ROLES.keys()
class Applet(Resource):
def __init__(self):
super(Applet, self).__init__()
self.resourceName = 'applet'
self._model = AppletModel()
self.route('GET', (':id',), self.getApplet)
self.route('GET', (':id', 'data'), self.getAppletData)
self.route('GET', (':id', 'groups'), self.getAppletGroups)
self.route('POST', (), self.createApplet)
self.route('PUT', (':id', 'informant'), self.updateInformant)
self.route('PUT', (':id', 'assign'), self.assignGroup)
self.route('PUT', (':id', 'constraints'), self.setConstraints)
self.route('PUT', (':id', 'schedule'), self.setSchedule)
self.route('POST', (':id', 'invite'), self.invite)
self.route('GET', (':id', 'roles'), self.getAppletRoles)
self.route('GET', (':id', 'users'), self.getAppletUsers)
self.route('DELETE', (':id',), self.deactivateApplet)
@access.user(scope=TokenScope.DATA_OWN)
@autoDescribeRoute(
Description('Get userlist, groups & statuses.')
.modelParam(
'id',
model=FolderModel,
level=AccessType.ADMIN,
destName='applet'
)
)
def getAppletUsers(self, applet):
thisUser=self.getCurrentUser()
if AppletModel().isCoordinator(applet['_id'], thisUser):
return(AppletModel().getAppletUsers(applet, thisUser, force=True))
else:
raise AccessException(
"Only coordinators and managers can see user lists."
)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Assign a group to a role in an applet.')
.deprecated()
.responseClass('Folder')
.modelParam('id', model=FolderModel, level=AccessType.READ)
.param(
'group',
'ID of the group to assign.',
required=True,
strip=True
)
.param(
'role',
'Role to invite this user to. One of ' + str(USER_ROLE_KEYS),
default='user',
required=False,
strip=True
)
.jsonParam(
'subject',
'Requires a JSON Object in the form \n```'
'{'
' "groups": {'
' "«relationship»": []'
' },'
' "users": {'
' "«relationship»": []'
' }'
'}'
'``` \n For \'user\' or \'reviewer\' assignments, specify '
'group-level relationships, filling in \'«relationship»\' with a '
'JSON-ld key semantically defined in in your context, and IDs in '
'the value Arrays (either applet-specific or canonical IDs in the '
'case of users; applet-specific IDs will be stored either way).',
paramType='form',
required=False,
requireObject=True
)
.errorResponse('ID was invalid.')
.errorResponse('Write access was denied for the folder or its new parent object.', 403)
)
def assignGroup(self, folder, group, role, subject):
applet = folder
if role not in USER_ROLE_KEYS:
raise ValidationException(
'Invalid role.',
'role'
)
thisUser=self.getCurrentUser()
group=GroupModel().load(group, level=AccessType.WRITE, user=thisUser)
return(
AppletModel().setGroupRole(
applet,
group,
role,
currentUser=thisUser,
force=False,
subject=subject
)
)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Create an applet.')
.param(
'protocolUrl',
'URL of Activity Set from which to create applet',
required=False
)
.param(
'name',
'Name to give the applet. The Protocol\'s name will be used if '
'this parameter is not provided.',
required=False
)
.param(
'informant',
' '.join([
'Relationship from informant to individual of interest.',
'Currently handled informant relationships are',
str([r for r in DEFINED_INFORMANTS.keys()])
]),
required=False
)
.errorResponse('Write access was denied for this applet.', 403)
)
def createApplet(self, protocolUrl=None, name=None, informant=None):
thisUser = self.getCurrentUser()
thread = threading.Thread(
target=AppletModel().createAppletFromUrl,
kwargs={
'name': name,
'protocolUrl': protocolUrl,
'user': thisUser,
'constraints': {
'informantRelationship': informant
} if informant is not None else None
}
)
thread.start()
return({
"message": "The applet is being created. Please check back in "
"several mintutes to see it. If you have an email "
"address associated with your account, you will receive "
"an email when your applet is ready."
})
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Get all data you are authorized to see for an applet.')
.param(
'id',
'ID of the applet for which to fetch data',
required=True
)
.param(
'format',
'JSON or CSV',
required=False
)
.errorResponse('Write access was denied for this applet.', 403)
)
def getAppletData(self, id, format='json'):
import pandas as pd
from datetime import datetime
from ..rest import setContentDisposition, setRawResponse, setResponseHeader
format = ('json' if format is None else format).lower()
thisUser = self.getCurrentUser()
data = AppletModel().getResponseData(id, thisUser)
setContentDisposition("{}-{}.{}".format(
str(id),
datetime.now().isoformat(),
format
))
if format=='csv':
setRawResponse()
setResponseHeader('Content-Type', 'text/{}'.format(format))
csv = pd.DataFrame(data).to_csv(index=False)
return(csv)
setResponseHeader('Content-Type', 'application/{}'.format(format))
return(data)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('(managers only) Update the informant of an applet.')
.modelParam(
'id',
model=AppletModel,
description='ID of the applet to update',
destName='applet',
force=True,
required=True
)
.param(
'informant',
' '.join([
'Relationship from informant to individual of interest.',
'Currently handled informant relationships are',
str([r for r in DEFINED_INFORMANTS.keys()])
]),
required=True
)
.errorResponse('Write access was denied for this applet.', 403)
)
def updateInformant(self, applet, informant):
user = self.getCurrentUser()
if not AppletModel().isManager(applet['_id'], user):
raise AccessException(
"Only managers can update informant relationship"
)
AppletModel().updateRelationship(applet, informant)
return(
jsonld_expander.formatLdObject(
applet,
'applet',
user,
refreshCache=False
)
)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Deactivate an applet by ID.')
.modelParam('id', model=AppletModel, level=AccessType.WRITE)
.errorResponse('Invalid applet ID.')
.errorResponse('Write access was denied for this applet.', 403)
)
def deactivateApplet(self, folder):
applet = folder
user = Applet().getCurrentUser()
applet['meta']['applet']['deleted'] = True
applet = AppletModel().setMetadata(applet, applet.get('meta'), user)
if applet.get('meta', {}).get('applet', {}).get('deleted')==True:
message = 'Successfully deactivated applet {} ({}).'.format(
AppletModel().preferredName(applet),
applet.get('_id')
)
thread = threading.Thread(
target=AppletModel().updateUserCacheAllUsersAllRoles,
args=(applet, user)
)
thread.start()
else:
message = 'Could not deactivate applet {} ({}).'.format(
AppletModel().preferredName(applet),
applet.get('_id')
)
Description().errorResponse(message, 403)
return(message)
@access.user(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get an applet by ID.')
.modelParam(
'id',
model=AppletModel,
level=AccessType.READ,
destName='applet'
)
.param(
'refreshCache',
'Reparse JSON-LD',
required=False,
dataType='boolean'
)
.errorResponse('Invalid applet ID.')
.errorResponse('Read access was denied for this applet.', 403)
)
def getApplet(self, applet, refreshCache=False):
user = self.getCurrentUser()
if refreshCache:
thread = threading.Thread(
target=jsonld_expander.formatLdObject,
args=(applet, 'applet', user),
kwargs={'refreshCache': refreshCache}
)
thread.start()
return({
"message": "The applet is being refreshed. Please check back "
"in several mintutes to see it."
})
return(
jsonld_expander.formatLdObject(
applet,
'applet',
user,
refreshCache=refreshCache
)
)
@access.user(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get associated groups for a given role and applet ID.')
.modelParam('id', 'ID of the Applet.', model=AppletModel, level=AccessType.READ)
.param(
'role',
'One of ' + str(set(USER_ROLE_KEYS)),
default='user',
required=False,
strip=True
)
.errorResponse('Invalid applet ID.')
.errorResponse('Read access was denied for this applet.', 403)
)
def getAppletGroups(self, folder, role):
applet = folder
user = self.getCurrentUser()
groups = [
group for group in AppletModel(
).getAppletGroups(applet).get(role) if ObjectId(group) in [
*user.get('groups', []),
*user.get('formerGroups', []),
*[invite['groupId'] for invite in [
*user.get('groupInvites', []),
*user.get('declinedInvites', [])
]]
]
]
return(
groups
)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Get roles for an applet by ID.')
.modelParam(
'id',
model=AppletModel,
level=AccessType.WRITE,
description='ID of the Applet.'
)
.errorResponse('Invalid applet ID.')
.errorResponse('Write access was denied for this applet.', 403)
.notes('Only users with write access can see roles.')
)
def getAppletRoles(self, folder):
applet = folder
user = Applet().getCurrentUser()
return(AppletModel().getFullRolesList(applet))
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Invite a user to a role in an applet.')
.modelParam(
'id',
model=AppletModel,
level=AccessType.READ,
destName='applet'
)
.param(
'role',
'Role to invite this user to. One of ' + str(set(USER_ROLE_KEYS)),
default='user',
required=False,
strip=True
)
.param(
'idCode',
'ID code for data reporting. One will be generated if none is '
'provided.',
required=False,
strip=True
)
.jsonParam(
'profile',
'Optional, coordinator-defined user profile information, eg, '
'`displayName`, `email`',
required=False,
paramType='form'
)
.errorResponse('ID was invalid.')
.errorResponse('Write access was denied for the folder or its new parent object.', 403)
)
def invite(self, applet, role="user", idCode=None, profile=None):
from girderformindlogger.models.invitation import Invitation
from girderformindlogger.models.profile import Profile
user = self.getCurrentUser()
try:
if role not in USER_ROLE_KEYS:
raise ValidationException(
'Invalid role.',
'role'
)
invitation = Invitation().createInvitation(
applet=applet,
coordinator=user,
role=role,
profile=profile,
idCode=idCode
)
return(Profile().displayProfileFields(invitation, user))
except:
import sys, traceback
print(sys.exc_info())
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Deprecated. Do not use')
.modelParam('id', model=AppletModel, level=AccessType.READ)
.param(
'activity',
'Deprecated. Do not use.'
'schedule.',
required=False
)
.jsonParam(
'schedule',
'Deprecated. Do not use.',
paramType='form',
required=False
)
.errorResponse('Invalid applet ID.')
.errorResponse('Read access was denied for this applet.', 403)
.deprecated()
)
def setConstraints(self, folder, activity, schedule, **kwargs):
thisUser = self.getCurrentUser()
applet = jsonld_expander.formatLdObject(
_setConstraints(folder, activity, schedule, thisUser),
'applet',
thisUser,
refreshCache=True
)
thread = threading.Thread(
target=AppletModel().updateUserCacheAllUsersAllRoles,
args=(applet, thisUser)
)
thread.start()
return(applet)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Set or update schedule information for an applet.')
.modelParam(
'id',
model=AppletModel,
level=AccessType.READ,
destName='applet'
)
.jsonParam(
'schedule',
'A JSON object containing schedule information for an applet',
paramType='form',
required=False
)
.errorResponse('Invalid applet ID.')
.errorResponse('Read access was denied for this applet.', 403)
)
def setSchedule(self, applet, schedule, **kwargs):
thisUser = self.getCurrentUser()
if not AppletModel().isCoordinator(applet['_id'], thisUser):
raise AccessException(
"Only coordinators and managers can update applet schedules."
)
appletMeta = applet['meta'] if 'meta' in applet else {'applet': {}}
if 'applet' not in appletMeta:
appletMeta['applet'] = {}
appletMeta['applet']['schedule'] = schedule
AppletModel().setMetadata(applet, appletMeta)
thread = threading.Thread(
target=AppletModel().updateUserCacheAllUsersAllRoles,
args=(applet, thisUser)
)
thread.start()
return(appletMeta)
def authorizeReviewer(applet, reviewer, user):
thisUser = Applet().getCurrentUser()
user = UserModel().load(
user,
level=AccessType.NONE,
user=thisUser
)
try:
applet = FolderModel().load(
applet,
level=AccessType.READ,
user=thisUser
)
responsesCollection = FolderModel().createFolder(
parent=user,
name='Responses',
parentType='user',
public=False,
creator=thisUser,
reuseExisting=True
)
thisApplet = list(FolderModel().childFolders(
parent=responsesCollection,
parentType='folder',
user=thisUser,
filters={
'meta.applet.@id': str(applet['_id'])
}
))
thisApplet = thisApplet[0] if len(
thisApplet
) else FolderModel().setMetadata(
FolderModel().createFolder(
parent=responsesCollection,
name=FolderModel().preferredName(applet),
parentType='folder',
public=False,
creator=thisUser,
allowRename=True,
reuseExisting=False
),
{
'applet': {
'@id': str(applet['_id'])
}
}
)
accessList = thisApplet['access']
accessList['users'].append({
"id": reviewer,
"level": AccessType.READ
})
thisApplet = FolderModel().setAccessList(
thisApplet,
accessList,
save=True,
recurse=True,
user=thisUser
)
except:
thisApplet = None
return(thisApplet)
def authorizeReviewers(assignment):
assignment = assignment.get('meta', assignment)
thisUser = Applet().getCurrentUser()
allUsers = []
reviewAll = []
members = assignment.get('members', [])
applet = assignment.get('applet').get('@id')
for member in [member for member in members if 'roles' in member]:
try:
if member['roles']['user']:
allUsers.append(getCanonicalUser(member.get("@id")))
except:
pass
if 'reviewer' in member['roles']:
if "ALL" in member['roles']['reviewer']:
reviewAll.append(getCanonicalUser(member.get("@id")))
for user in [
user for user in member['roles'][
'reviewer'
] if user not in SPECIAL_SUBJECTS
]:
authorizeReviewer(
assignment.get('applet').get('@id'),
getCanonicalUser(member.get('@id')),
getCanonicalUser(user)
)
for reviewer in reviewAll:
[authorizeReviewer(
assignment.get('applet').get('@id'),
reviewer,
user
) for user in allUsers]
return(None)
def _invite(applet, user, role, rsvp, subject):
"""
Helper function to invite a user to an applet.
:param applet: Applet to invite user to
:type applet: AppletModel
:param user: ID (canonical or applet-specific) or email address of user to
invite
:type user: string
:param role: Role to invite user to
:type role: string
:param rsvp: Require user acceptance?
:type rsvp: boolean
:param subject: Subject about 'user' role can inform or about which
'reviewer' role can review
:type subject: string or literal
:returns: New assignment (dictionary)
"""
if role not in USER_ROLE_KEYS:
raise ValidationException(
'Invalid role.',
'role'
)
thisUser = Applet().getCurrentUser()
user = user if user else str(thisUser['_id'])
if bool(rsvp):
groupName = {
'title': '{} {}s'.format(
str(applet.get('_id')),
role
)
}
groupName['lower'] = groupName.get('title', '').lower()
group = GroupModel().findOne(query={'lowerName': groupName['lower']})
if not group or group is None:
group = GroupModel().createGroup(
name=groupName['title'],
creator=thisUser,
public=bool(role in ['manager', 'reviewer'])
)
try:
assignments = CollectionModel().createCollection(
name="Assignments",
public=True,
reuseExisting=True
)
assignmentType = 'collection'
except AccessException:
assignments, assignmentType = selfAssignment()
appletAssignment = list(FolderModel().childFolders(
parent=assignments,
parentType=assignmentType,
user=thisUser,
filters={
'meta.applet.@id': str(applet['_id']) if '_id' in applet else None
}
))
appletAssignment = appletAssignment[0] if len(
appletAssignment
) else FolderModel().setMetadata(
FolderModel().createFolder(
parent=assignments,
name=FolderModel().preferredName(applet),
parentType=assignmentType,
public=False,
creator=thisUser,
allowRename=True,
reuseExisting=False
),
{
'applet': {
'@id': str(applet['_id']) if '_id' in applet else None
}
}
)
meta = appletAssignment.get('meta', {})
members = meta.get('members', []) if meta.get(
'members'
) is not None else []
cUser = getUserCipher(appletAssignment, user)
subject = subject.upper() if subject is not None and subject.upper(
) in SPECIAL_SUBJECTS else getUserCipher(
appletAssignment,
str(thisUser['_id']) if subject is None else subject
)
thisAppletAssignment = {
'@id': str(cUser),
'roles': {
role: True if role not in [
'reviewer',
'user'
] else [
subject
]
}
}
for i, u in enumerate(members):
if '@id' in u and u["@id"]==str(cUser):
thisAppletAssignment = members.pop(i)
if 'roles' not in thisAppletAssignment:
thisAppletAssignment['roles'] = {}
thisAppletAssignment['roles'][
role
] = True if role not in [
'reviewer',
'user'
] else [
subject
] if (
subject in SPECIAL_SUBJECTS
) or (
'reviewer' not in thisAppletAssignment[
'roles'
]
) else list(set(
thisAppletAssignment['roles']['reviewer'] + [subject]
).difference(set(
SPECIAL_SUBJECTS
))) if "ALL" not in thisAppletAssignment['roles'][
'reviewer'
] else ["ALL"]
members.append(thisAppletAssignment)
meta['members'] = members
appletAssignment = FolderModel().setMetadata(appletAssignment, meta)
authorizeReviewers(appletAssignment)
return(appletAssignment)
def selfAssignment():
thisUser = Applet().getCurrentUser()
assignmentsFolder = FolderModel().createFolder(
parent=thisUser,
parentType='user',
name='Assignments',
creator=thisUser,
public=False,
reuseExisting=True
)
return((
assignmentsFolder,
'folder'
))
def _setConstraints(applet, activity, schedule, user, refreshCache=False):
"""
Helper function for method recursion.
:param applet: applet Object
:type applet: dict
:param activity: Activity ID
:type activity: str, list, or None
:param schedule: schedule data
:type schedule: dict, list, or None
:param user: user making the call
:type user: dict
:returns: updated applet Object
"""
if activity is None:
if schedule is not None:
appletMeta = applet.get('meta', {})
appletMeta['applet']['schedule'] = schedule
applet = AppletModel().setMetadata(applet, appletMeta)
return(applet)
if isinstance(activity, str) and activity.startswith('['):
try:
activity = [
activity_.replace(
"'",
""
).replace(
'"',
''
).strip() for activity_ in activity[1:-1].split(',')
]
except (TypeError, AttributeError) as e:
print(e)
if isinstance(activity, list):
for activity_ in activity:
applet = _setConstraints(
applet,
activity_,
schedule,
user
)
return(applet)
try:
activityLoaded = ActivityModel().getFromUrl(
activity,
'activity',
thisUser,
refreshCache
)[0]
except:
activityLoaded = ActivityModel().load(
activity,
AccessType.WRITE,
user
)
try:
activityMeta = activityLoaded['meta'].get('activity')
except AttributeError:
raise ValidationException(
'Invalid activity.',
'activity'
)
activityKey = activityMeta.get(
'url',
activityMeta.get(
'@id',
activityLoaded.get(
'_id'
)
)
)
if activityKey is None:
raise ValidationException(
'Invalid activity.',
'activity'
)
else:
activityKey = jsonld_expander.reprolibPrefix(activityKey)
protocolExpanded = jsonld_expander.formatLdObject(
applet,
'applet',
user
).get('applet', {})
protocolOrder = protocolExpanded.get('ui', {}).get('order', [])
framedActivityKeys = [
protocolOrder[i] for i, v in enumerate(
protocolExpanded.get(
"reprolib:terms/order"
)[0].get(
"@list"
)
) if jsonld_expander.reprolibPrefix(v.get("@id"))==activityKey
]
if schedule is not None:
appletMeta = applet.get('meta', {})
scheduleInApplet = appletMeta.get('applet', {}).get('schedule', {})
for k in framedActivityKeys:
scheduleInApplet[k] = schedule
appletMeta['applet']['schedule'] = scheduleInApplet
applet = AppletModel().setMetadata(applet, appletMeta)
return(applet)
|
py | 1a3e189f52a1cd5da5f4170d552cede4d9d0b6da | from transformers import AutoTokenizer, AutoModelWithLMHead
import numpy as np
from pathlib import Path
import json
import joblib
def model_fn(model_dir):
tokenizer = AutoTokenizer.from_pretrained("distilgpt2", cache_dir=model_dir)
model = AutoModelWithLMHead.from_pretrained("distilgpt2", cache_dir=model_dir)
model_assets = {
"tokenizer": tokenizer,
"model": model
}
return model_assets
def input_fn(request_body_str, request_content_type):
assert (
request_content_type == "application/json"
), "content_type must be 'application/json'"
request_body = json.loads(request_body_str)
return request_body
def get_parameter(request_body, parameter_name, default):
parameter = default
if 'parameters' in request_body:
if parameter_name in request_body['parameters']:
parameter = request_body['parameters'][parameter_name]
return parameter
def predict_fn(request_body, model_assets):
input_text = request_body["text"]
tokenizer = model_assets['tokenizer']
model = model_assets['model']
input_ids = tokenizer.encode(input_text, return_tensors='pt')
sample_output = model.generate(
input_ids,
do_sample=True,
min_length=get_parameter(request_body, 'min_length', 25),
max_length=get_parameter(request_body, 'max_length', 100),
top_k=0,
temperature=get_parameter(request_body, 'temperature', 100)
)
output_text = tokenizer.decode(sample_output[0], skip_special_tokens=True)
return {"text": output_text}
def output_fn(prediction, response_content_type):
assert (
response_content_type == "application/json"
), "accept must be 'application/json'"
response_body_str = json.dumps(prediction)
return response_body_str
|
py | 1a3e19bce84a9e6a8abbe394c49bf4e961f0d43e | from django.conf.urls import include, url
from .manga import MANGA
from .chapter import CHAPTER
urlpatterns = [
url(r'^', include(MANGA)),
url(r'^', include(CHAPTER)),
]
|
py | 1a3e1a5f562fef8b599ecb25e3e6790f7fdf7312 | # Boilerplate Lambda function pulled from AWS
import json
def handler(event, context):
return {
'statusCode': 200,
'body': json.dumps('Hello from Architect Sandbox running python3.6!')
}
|
py | 1a3e1b928cfc07d6e763cf52f5e4566809ce1afc | #!/usr/bin/env python
"""
/proc/thedir
"""
from slashproc_parser.basic_parser import BasicSPParser
class TheParser(BasicSPParser):
THEDIR = "/proc/thedir"
def __init__(self):
super(TheParser, self).__init__(self)
@staticmethod
def get_groups():
"""
REMOVE THIS DOCSTRING AND CREATE ONE APPROPIATE TO THE PARSER
Ensure first group is the parser name and its parent is ['root']
Ensure group labels are unique
if there are multiple then subscript with number etc...
Ensure each group has a parent, and parents is a list
:rtype: dict
"""
groups = {
'theparser': {'label': "Formatted Long Parser Label",
'desc': "Description of the parser",
'parents': ['root']},
'group1': {'label': 'The first group',
'parents': ['theparser']},
'group2': {'label': 'The second group',
'parents': ['group1'],
'desc': "Desc recommended but not necessary"}
}
return groups
@staticmethod
def get_vars():
"""
REMOVE THIS DOCSTRING AND CREATE ONE APPROPIATE TO THE PARSER
Ensure var labels are all lower case, contain underscores (not dash)
and the following chars are not permitted "()[]/\ "
Ensure every var has a unit where appropriate
:rtype: dict
"""
vars = {
'var1': {'label': 'The first Variable'},
'var2': {'label': 'The Second Variable',
'unit': 'kB',
'desc': 'Description recommended but not necessary'}
}
return vars
@staticmethod
def get_data():
"""
REMOVE THIS DOCSTRING AND CREATE ONE APPROPIATE TO THE PARSER
Ensure first group is the parser name
Ensure return adheres to the groups structure
Ensure all groups are present in the groups dict
Ensure all vars adhere to the var format
Ensure all vars are present in the vars dict
Ensure every value is a string
:rtype: dict
"""
data = {'theparser': {
'group1': {
'group2': {'var1': 'val1',
'var2': 'val2'},
}
}
}
return data
if __name__ == "__main__":
c = TheParser()
c.test_parse()
|
py | 1a3e1c2ac07a09e4ff5ec962e939347d31704903 | from django.db import models
# Create your models here.
class Day(models.Model):
account = models.CharField(max_length=255)
member = models.CharField(max_length=255)
date = models.DateField()
meal = models.CharField(max_length=255)
whoiscooking = models.CharField(max_length=255,blank=True)
ingredient = models.CharField(max_length=255,blank=True)
created = models.DateTimeField(auto_now_add=True)
ingredient_there = models.BooleanField()
|
py | 1a3e1caafaa37238d678502e342a8a8cc07ba18d | # -*- coding: utf-8 -*
#!/usr/bin/python
from dealctrl import *
class deal_7_com(dealctrl):
def __init__(self,con):
dealctrl.__init__(self,con)
def run(self):
userid=int(self.recvdic['userid'])
aid=int(self.recvdic['aid'])
content=self.recvdic['content']
sql=("INSERT INTO `activity_comment` (`aid`,`userid`,`content`,`commenttime`) VALUES (%d,%d,'%s',%d)" % (aid,userid,content,self.now))
self.log.write("sql: %s\n" % sql)
self.db.execute(sql)
cid=self.db.insert_id()
senddic={
'type':'7_com_r',
'reply':1,
'cid':cid
}
self.sendmessage(senddic)
return 1
|
py | 1a3e1defc0e5499227a364704d4b20d61e629927 | from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
import scipy
import numbers
import random
from matplotlib import colors
import matplotlib.patches as mpatches
from statsmodels.nonparametric.kde import KDEUnivariate
from PIL import ImageFilter
from kornia import augmentation as augs
from kornia import filters, color
def adjust_learning_rate(epoch, opt, optimizer):
"""Sets the learning rate to the initial LR decayed by 0.2 every steep step"""
steps = np.sum(epoch > np.asarray(opt.lr_decay_epochs))
if steps > 0:
new_lr = opt.learning_rate * (opt.lr_decay_rate ** steps)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def convert_to_np(tensor):
# convert pytorch tensors to numpy arrays
if not isinstance(tensor, np.ndarray):
tensor = tensor.cpu().numpy()
return tensor
def labels_to_dfc(tensor, no_savanna):
"""
INPUT:
Classes encoded in the training scheme (0-9 if savanna is a valid label
or 0-8 if not). Invalid labels are marked by 255 and will not be changed.
OUTPUT:
Classes encoded in the DFC2020 scheme (1-10, and 255 for invalid).
"""
# transform to numpy array
tensor = convert_to_np(tensor)
# copy the original input
out = np.copy(tensor)
# shift labels if there is no savanna class
if no_savanna:
for i in range(2, 9):
out[tensor == i] = i + 1
else:
pass
# transform from zero-based labels to 1-10
out[tensor != 255] += 1
# make sure the mask is intact and return transformed labels
assert np.all((tensor == 255) == (out == 255))
return out
def display_input_batch(tensor, display_indices=0, brightness_factor=3):
# extract display channels
tensor = tensor[:, display_indices, :, :]
# restore NCHW tensor shape if single channel image
if len(tensor.shape) == 3:
tensor = tensor.unsqueeze(1)
# scale image
tensor = torch.clamp((tensor * brightness_factor), 0, 1)
return tensor
def display_label_batch(tensor, no_savanna=False):
# get predictions if input is one-hot encoded
if len(tensor.shape) == 4:
tensor = tensor.max(1)[1]
# convert train labels to DFC2020 class scheme
tensor = labels_to_dfc(tensor, no_savanna)
# colorize labels
cmap = mycmap()
imgs = []
for s in range(tensor.shape[0]):
im = (tensor[s, :, :] - 1) / 10
im = cmap(im)[:, :, 0:3]
im = np.rollaxis(im, 2, 0)
imgs.append(im)
tensor = np.array(imgs)
return tensor
def classnames():
return ["Forest", "Shrubland", "Savanna", "Grassland", "Wetlands",
"Croplands", "Urban/Built-up", "Snow/Ice", "Barren", "Water"]
def mycmap():
cmap = colors.ListedColormap(['#009900',
'#c6b044',
'#fbff13',
'#b6ff05',
'#27ff87',
'#c24f44',
'#a5a5a5',
'#69fff8',
'#f9ffa4',
'#1c0dff',
'#ffffff'])
return cmap
def mypatches():
patches = []
for counter, name in enumerate(classnames()):
patches.append(mpatches.Patch(color=mycmap().colors[counter],
label=name))
return patches
## tensor operation
def _is_tensor_video_clip(clip):
if not torch.is_tensor(clip):
raise TypeError("clip should be Tesnor. Got %s" % type(clip))
if not clip.ndimension() == 4:
raise ValueError("clip should be 4D. Got %dD" % clip.dim())
return True
def crop(clip, i, j, h, w):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
"""
assert len(clip.size()) == 4, "clip should be a 4D tensor"
return clip[..., i:i + h, j:j + w]
def center_crop(clip, crop_size):
assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor"
h, w = clip.size(-2), clip.size(-1)
th, tw = crop_size, crop_size
assert h >= th and w >= tw, "height and width must be no smaller than crop_size"
i = int(round((h - th) / 2.0))
j = int(round((w - tw) / 2.0))
return crop(clip, i, j, th, tw)
class CenterCropVideo(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: central cropping of video clip. Size is
(C, T, size, size)
"""
return center_crop(clip, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
def ztz(x, y):
"""
Compute the inner product between datapoints from corresponding patches of data
organized in batches. Since x and y are data between the range [-1,1],
it is normalized to be between the range [0,1] using max_norm.
Input:x - float, array of [batch_size, patch_size, patch_size, num_channels],
Batch of patches from data domain x.
y - float, array of [batch_size, patch_size, patch_size, num_channels],
Batch of patches from data domain y.
Output:
ztz - float, array of [batch_size, patch_size^2, patch_size^2], Inner product
"""
max_norm = x.shape[-1]
flat_shape = [x.shape[0], x.shape[1] ** 2, -1]
x = torch.reshape(x, flat_shape)
y = torch.reshape(y, flat_shape)
#ztz = (tf.keras.backend.batch_dot(y, x, -1) + max_norm) / (2 * max_norm) ??
ztz = (torch.bmm(x, y.permute(0, 2, 1)) + max_norm)/ (2 * max_norm)
return ztz
def affinity(x):
"""
Compute the affinity matrices of the patches of contained in a batch.
It first computes the distances between the datapoints within a patch.
Then it finds the suitable kernel width for each patch.
Finally, applies the RBF.
Input:
x - float, array of [batch_size, patch_size, patch_size, num_channels],
Batch of patches from data domain x.
Output:
A - float, array of [batch_size, patch_size^2, patch_size^2], Affinity matrix
"""
_, h, w, c = x.shape
x_1 = torch.unsqueeze(torch.reshape(x, [-1, h * w, c]), 2)
x_2 = torch.unsqueeze(torch.reshape(x, [-1, h * w, c]), 1)
A = torch.norm(x_1 - x_2, dim=-1)
krnl_width, _ = torch.topk(A, k=A.shape[-1])
krnl_width = torch.mean(krnl_width[:, :, (h * w) // 4], 1)
krnl_width = torch.reshape(krnl_width, (-1, 1, 1))
krnl_width = torch.where(torch.eq(krnl_width, torch.zeros_like(krnl_width)), torch.ones_like(krnl_width), krnl_width)
A = torch.exp(-(torch.div(A, krnl_width) ** 2))
return A
def Degree_matrix(x, y):
"""
Compute the degree matrix starting from corresponding patches of data organized
in batches. It first computes the affinity matrices of the two batches and then
it computes the norm of the difference between the rows of Ax and the rows of Ay.
Then it is normalized.
Input:
x - float, array of [batch_size, patch_size, patch_size, num_channels_x],
Batch of patches from data domain x.
y - float, array of [batch_size, patch_size, patch_size, num_channels_y],
Batch of patches from data domain y.
Output:
D - float, array of [batch_size, patch_size^2, patch_size^2], Degree matrix
"""
ax = affinity(x)
ay = affinity(y)
D = torch.norm(torch.unsqueeze(ax, 1) - torch.unsqueeze(ay, 2), 2, -1)
D = (D - torch.min(D)) / (torch.max(D) - torch.min(D))
return D
#CVA
def cva(X, Y):
diff = X - Y
diff_s = (diff**2).sum(axis=-1)
return torch.sqrt(diff_s)
def SFA(X, Y):
'''
see http://sigma.whu.edu.cn/data/res/files/SFACode.zip
'''
norm_flag = True
m, n = np.shape(X)
meanX = np.mean(X, axis=0)
meanY = np.mean(Y, axis=0)
stdX = np.std(X, axis=0)
stdY = np.std(Y, axis=0)
Xc = (X - meanX) / stdX
Yc = (Y - meanY) / stdY
Xc = Xc.T
Yc = Yc.T
A = np.matmul((Xc-Yc), (Xc-Yc).T)/m
B = (np.matmul(Yc, Yc.T)+np.matmul(Yc, Yc.T))/2/m
D, V = scipy.linalg.eig(A, B) # V is column wise
D = D.real
#idx = D.argsort()
#D = D[idx]
if norm_flag is True:
aux1 = np.matmul(np.matmul(V.T, B), V)
aux2 = 1/np.sqrt(np.diag(aux1))
V = V * aux2
#V = V[:,0:3]
X_trans = np.matmul(V.T, Xc).T
Y_trans = np.matmul(V.T, Yc).T
return X_trans, Y_trans
# split whole image to patches
def patchize(img: torch.Tensor, patch_size, unfold_stride) -> torch.Tensor:
"""
img.shape
B : batch size
C : channels of image (same to patches.shape[1])
iH : height of image
iW : width of image
pH : height of patch
pW : width of patch
V : values in a patch (pH * pW * C)
"""
B, C, iH, iW = img.shape
pH = patch_size
pW = patch_size
unfold = nn.Unfold(kernel_size=(pH, pW), stride=unfold_stride)
patches = unfold(img) # (B, V, P)
patches = patches.permute(0, 2, 1).contiguous() # (B, P, V)
patches = patches.view(-1, C, pH, pW) # (P, C, pH, pW)
return patches
#thresholding methods
def kde_statsmodels_u(x, x_grid, bandwidth, **kwargs):
kde = KDEUnivariate(x)
kde.fit(bw=bandwidth, **kwargs)
return kde.evaluate(x_grid)
#Rosin
def rosin(heatmap):
heatmap_list = heatmap.flatten().tolist()
f_heatmap = np.array(heatmap_list)
new_data = f_heatmap - np.min(f_heatmap)
print(np.min(new_data))
# declare kernel estimation parameters
bandwidth = 0.06
# estimate kernel
x_grid = np.linspace(0, np.max(new_data), 90) # x-coordinates for data points in the kernel
kernel = kde_statsmodels_u(new_data, x_grid, bandwidth) # get kernel
# get the index of the kernal peak
maxIndex = np.argmax(kernel)
# Assign percent below the max kernel value for the 'zero' peak i.e. a value of 2 = 2% the maximum value
maxPercent = 1
# assign x and y coords for peak-to-base line
x1 = x_grid[maxIndex]
y1 = kernel[maxIndex]
# find all local minima in the kernel
local_mins = np.where(np.r_[True, kernel[1:] < kernel[:-1]] & np.r_[kernel[:-1] < kernel[1:], True])
local_mins = local_mins[0] # un 'tuple' local mins
# filter for points below a certain kernel max
local_mins = local_mins[(np.where(kernel[local_mins] < (y1 / (100 / maxPercent))))]
# get local minima beyond the peak
local_mins = local_mins[(np.where(local_mins > maxIndex))] # get local minima that meet percent max threshold
x2_index = local_mins[0] # find minumum beyond peak of kernel
x2 = x_grid[x2_index] # index to local min beyond kernel peak
y2 = kernel[x2_index]
# calculate line slope and get perpendicular line
slope = (y2 - y1) / (x2 - x1)
# find y_intercept for line
y_int = y1 - (slope * x1)
slopeTan = -1 / slope # perpendicular line slope
# allocate lists for x-y coordinates and distance values
dist = list()
# save x-y coords of intersect points
yii = list()
xii = list()
# iterate and generate perpendicular lines
for i in range(maxIndex + 1, x2_index):
# find intersection point between lines
# determine equation of the perpendicular line based on current bin coordinate
xt1 = x_grid[i]
yt1 = kernel[i]
y_int_tan = yt1 - (slopeTan * xt1)
# calculate intersection point between lines
b1 = y_int
b2 = y_int_tan
m1 = slope
m2 = slopeTan
# y = mx + b
# Set both lines equal to find the intersection point in the x direction, y1=y2, x1=x2
# y1 = m1 * x + b1, y2 = m2 * x + b2
# if y1 == y2...
# m1 * x + b1 = m2 * x + b2
# m1 * x - m2 * x = b2 - b1
# x * (m1 - m2) = b2 - b1
# x = (b2 - b1) / (m1 - m2)
xi = (b2 - b1) / (m1 - m2)
# Now solve for y -- use either line, because they are equal here
# y = mx + b
yi = m1 * xi + b1
# assert that the new line generated is equal or very close to the correct perpendicular value of the max deviation line
assert ((m2 - m2 * .01) < ((yi - y_int_tan) / (xi - 0)) < (
m2 + m2 * .01)) # an error will throw if this statement is false
# save x-y coordinates of the point
yii.append(yi)
xii.append(xi)
# get euclidean distance between kernel coordinate and intersect point
euc = np.sqrt((xi - xt1) ** 2 + (yi - yt1) ** 2)
# store the euclidean distance
dist.append(euc)
# get kernel point with the maximum distance from the Rosin line
# remeber, we started at maxIndex+1, so the index of the optimalPoint in the kernel array will be maxIndex+1
# + the index in the 'dist' array
optimalPoint = np.argmax(dist) + maxIndex + 1
# plot the optimal point over the kernel with Rosin line we plotted before
threshold = x_grid[optimalPoint]
final_threhold = threshold + np.min(f_heatmap)
#return heatmap < final_threhold
return final_threhold
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[0.1, 2.0]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
def default(val, def_val):
return def_val if val is None else val
# augmentation utils
class RandomApply(nn.Module):
def __init__(self, fn, p):
super().__init__()
self.fn = fn
self.p = p
def forward(self, x):
if random.random() > self.p:
return x
return self.fn(x)
# default SimCLR augmentation
image_size = 256
DEFAULT_AUG = nn.Sequential(
RandomApply(augs.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),
augs.RandomGrayscale(p=0.2),
augs.RandomHorizontalFlip(),
RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1),
augs.RandomResizedCrop((image_size, image_size)))
#color.Normalize(mean=torch.tensor([0.485, 0.456, 0.406]), std=torch.tensor([0.229, 0.224, 0.225])))
if __name__ == '__main__':
meter = AverageMeter()
|
py | 1a3e1e00be991e8ed3dee61a89a340126a6e98f5 | import datetime
def now() -> str:
return datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f") + "Z"
|
py | 1a3e2092af1917a52840a21cafd2614caef31865 | from django.db import migrations, transaction
class Migration(migrations.Migration):
dependencies = [
('msgboard', '0001_initial'),
]
def generate_data(apps, schema_editor):
from msgboard.models import Message
test_data = [
('Test User1', 'A simple test message'),
('Test User2', 'Another simple test message'),
]
with transaction.atomic():
for author, text in test_data:
Message(author=author, text=text).save()
operations = [
migrations.RunPython(generate_data),
]
|
py | 1a3e21e4f00e65c9eb9b63909fd6702d481df1ea | '''
File: team_net.py
Author: Thomas Kost
Date: 23 March 2022
@breif nerual net for predicting march maddness outcomes given team scores
'''
|
py | 1a3e227155fbfe97d69928181d78ebd75d276c58 | import os
import copy
import tempfile
import atexit
import shutil
import yaml
from pmd_beamphysics import ParticleGroup
from abc import ABC, abstractmethod
from . import tools
class Base(ABC):
"""
Base Interface for LUME-compatible code.
Parameters
----------
input_file : str, optional
The input file to be used, by default None
initial_particles : dict, optional
Initial Particle metadata to be used, by default None
verbose : bool, optional
Whether or not to produce verbose output, by default False
timeout : float, optional
The timeout in seconds to be used, by default None
"""
def __init__(
self, input_file=None, *, initial_particles=None,
verbose=False, timeout=None, **kwargs):
self._input_file = input_file
self._initial_particles = initial_particles
self._input = None
self._output = None
# Execution
self._timeout = timeout
# Logging
self._verbose = verbose
# State
self._configured = False
self._finished = False
self._error = False
@property
def input(self):
"""
Input data as a dictionary
"""
return self._input
@input.setter
def input(self, input):
self._input = input
@property
def output(self):
"""
require openPMD standard, in the future we can add more methods
for libs such as pandas Dataframes, xarray DataArrays and Dask Arrays.
"""
return self._output
@output.setter
def output(self, output):
self._output = output
@property
def initial_particles(self):
"""
Initial Particles
"""
return self._initial_particles
@initial_particles.setter
def initial_particles(self, initial_particles):
self._initial_particles = initial_particles
@abstractmethod
def configure(self):
"""
Configure and set up for run.
"""
raise NotImplementedError
@abstractmethod
def run(self):
"""
Execute the code.
"""
raise NotImplementedError
@property
def verbose(self):
"""
Read or configure the verbose flag.
"""
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def timeout(self):
"""
Read or configure the timeout in seconds.
"""
return self._timeout
@timeout.setter
def timeout(self, timeout):
self._timeout = timeout
@property
def configured(self):
"""
Get or set the configured flag state.
"""
return self._configured
@configured.setter
def configured(self, configured):
self._configured = configured
@property
def finished(self):
"""
Get or set the finished flag state.
"""
return self._finished
@finished.setter
def finished(self, finished):
self._finished = finished
@property
def error(self):
"""
Get or set the error flag state.
"""
return self._error
@error.setter
def error(self, error):
self._error = error
@property
def input_file(self):
"""
Get or set the input file to be processed.
"""
return self._input_file
@input_file.setter
def input_file(self, input_file):
"""dictionary with parameters?"""
self._input_file = input_file
def fingerprint(self):
"""
Data fingerprint (hash) using the input parameters.
Returns
-------
fingerprint : str
The hash for this object based on the input parameters.
"""
return tools.fingerprint(self.input)
def copy(self):
"""
Returns a deep copy of this object.
If a tempdir is being used, will clear this and deconfigure.
"""
other = copy.deepcopy(self)
other.reset()
return other
def reset(self):
"""
Reset this object to its initial state.
"""
pass
def vprint(self, *args, **kwargs):
# Verbose print
if self._verbose:
print(*args, **kwargs)
@classmethod
def from_yaml(cls, yaml_file):
"""
Returns an Impact object instantiated from a YAML config file
Will load intial_particles from an h5 file.
"""
# Try file
if os.path.exists(tools.full_path(yaml_file)):
yaml_file = tools.full_path(yaml_file)
config = yaml.safe_load(open(yaml_file))
# The input file might be relative to the yaml file
if 'input_file' in config:
f = os.path.expandvars(config['input_file'])
if not os.path.isabs(f):
# Get the yaml file root
root, _ = os.path.split(tools.full_path(yaml_file))
config['input_file'] = os.path.join(root, f)
else:
# Try raw string
config = yaml.safe_load(yaml_file)
# Form ParticleGroup from file
if 'initial_particles' in config:
f = config['initial_particles']
if not os.path.isabs(f):
root, _ = os.path.split(tools.full_path(yaml_file))
f = os.path.join(root, f)
config['initial_particles'] = ParticleGroup(f)
return cls(**config)
class CommandWrapper(Base):
"""
Interface for LUME-compatible code.
Parameters
----------
input_file : str, optional
The input file to be used, by default None
initial_particles : dict, optional
Initial Particle metadata to be used, by default None
command : str, optional
The command to be executed by this wrapper. E.g. ImpactTexe
If not specified, the class attribute `COMMAND` is used, by default None
command_mpi : str, optional
The command to be executed by this wrapper when using MPI. E.g. ImpactTexe-mpi
If not specified, the class attribute `COMMAND_MPI` is used, by default None
use_mpi : bool, optional
Whether or not to use MPI when running this code, by default False
mpi_run : str, optional
The command syntax to invoke mpirun. If not specified, the class attribute `MPI_RUN` is used.
This is expected to be a formated string taking as parameters the number of processors (nproc) and
the command to be executed (command_mpi), by default None
use_temp_dir : bool, optional
Whether or not to use a temporary directory to run the process, by default True
workdir : str, optional
The work directory to be used, by default None
verbose : bool, optional
Whether or not to produce verbose output, by default False
timeout : float, optional
The timeout in seconds to be used, by default None
"""
COMMAND = ""
COMMAND_MPI = ""
MPI_RUN = "mpirun -n {nproc} {command_mpi}"
def __init__(
self, input_file=None, *, initial_particles=None,
command=None, command_mpi=None, use_mpi=False, mpi_run="",
use_temp_dir=True, workdir=None,
verbose=False, timeout=None):
super().__init__(
input_file=input_file, initial_particles=initial_particles, verbose=verbose, timeout=timeout
)
# Execution
self._command = command or self.COMMAND
self._command_mpi = command_mpi or self.COMMAND_MPI
self._use_mpi = use_mpi
self._mpi_run = mpi_run or self.MPI_RUN
self._tempdir = None
self._use_temp_dir = use_temp_dir
self._workdir = workdir
self._base_path = None
@property
def use_mpi(self):
"""
Whether or not MPI should be used if supported.
"""
return self._use_mpi
@use_mpi.setter
def use_mpi(self, use_mpi):
self._use_mpi = use_mpi
@property
def mpi_run(self):
"""
The command syntax to invoke mpirun. If not specified, the class attribute `MPI_RUN` is used.
This is expected to be a formated string taking as parameters the number of processors (nproc) and
the command to be executed (command_mpi).
"""
return self._mpi_run
@mpi_run.setter
def mpi_run(self, mpi_run):
self._mpi_run = mpi_run
@property
def path(self):
"""
The base path used by the code to manipulate files.
"""
return self._base_path
@path.setter
def path(self, path):
self._base_path = path
@property
def use_temp_dir(self):
"""
Whether or not the code is using temporary dir to run.
Returns
-------
bool
"""
return self._use_temp_dir
@property
def command(self):
"""
Get or set the command to be executed. Defaults to `COMMAND`.
"""
return self._command
@command.setter
def command(self, command):
cmd = command
if command:
cmd = tools.full_path(command)
assert os.path.exists(cmd), 'ERROR: Command does not exist:' + command
self._command = cmd
@property
def command_mpi(self):
"""
Get or set the command to be executed when running with MPI. Defaults to `COMMAND_MPI`.
"""
return self._command_mpi
@command_mpi.setter
def command_mpi(self, command_mpi):
cmd = command_mpi
if command_mpi:
cmd = tools.full_path(command_mpi)
assert os.path.exists(cmd), 'ERROR: Command does not exist:' + command_mpi
self._command_mpi = cmd
def get_run_script(self, write_to_path=True):
"""
Assembles the run script. Optionally writes a file 'run' with this line to path.
This expect to run with .path as the cwd.
Parameters
----------
write_to_path : bool
Whether or not to write the script to the path.
Returns
-------
runscript : str
The script to run the command.
"""
_, infile = os.path.split(self.input_file) # Expect to run locally. Astra has problems with long paths.
runscript = [self.command, infile]
if write_to_path:
with open(os.path.join(self.path, 'run'), 'w') as f:
f.write(' '.join(runscript))
return runscript
@abstractmethod
def archive(self, h5=None):
"""
Dump inputs and outputs into HDF5 file.
Parameters
----------
h5 : str or h5py.File
The filename or handle to HDF5 file in which to write the information.
If not in informed, a new file is generated.
Returns
-------
h5 : h5py.File
Handle to the HDF5 file.
"""
raise NotImplementedError
@classmethod
def from_archive(cls, archive_h5):
"""
Class method to return a new instance via restore of an archive file.
Parameters
----------
archive_h5 : str or h5py.File
The filename or handle to HDF5 file in which to write the information.
Returns
-------
c : object
An instance of the class with information from the archive file.
"""
c = cls()
c.load_archive(archive_h5)
return c
@abstractmethod
def plot(self, y=[], x=None, xlim=None, ylim=None, ylim2=None, y2=[], nice=True,
include_layout=True, include_labels=False, include_particles=True, include_legend=True,
return_figure=False):
"""
Plots output multiple keys.
Parameters
----------
y : list
List of keys to be displayed on the Y axis
x : str
Key to be displayed as X axis
xlim : list
Limits for the X axis
ylim : list
Limits for the Y axis
ylim2 : list
Limits for the secondary Y axis
y2 : list
List of keys to be displayed on the secondary Y axis
nice : bool
Whether or not a nice SI prefix and scaling will be used to
make the numbers reasonably sized. Default: True
include_layout : bool
Whether or not to include a layout plot at the bottom. Default: True
include_labels : bool
Whether or not the layout will include element labels. Default: False
include_particles : bool
Whether or not to plot the particle statistics as dots. Default: True
include_legend : bool
Whether or not the plot should include the legend. Default: True
return_figure : bool
Whether or not to return the figure object for further manipulation.
Default: True
kwargs : dict
Extra arguments can be passed to the specific plotting function.
Returns
-------
fig : matplotlib.pyplot.figure.Figure
The plot figure for further customizations or `None` if `return_figure` is set to False.
"""
raise NotImplementedError
@abstractmethod
def write_input(self, input_filename):
"""
Write the input parameters into the file.
Parameters
----------
input_filename : str
The file in which to write the input parameters
"""
raise NotImplementedError
@abstractmethod
def input_parser(self, path):
"""
Invoke the specialized input parser and returns the
input dictionary.
Parameters
----------
path : str
Path to the input file
Returns
-------
input : dict
The input dictionary
"""
raise NotImplementedError
def load_input(self, input_filepath, **kwargs):
"""
Invoke the `input_parser` with the given input file path as argument.
This method sets the input property to the contents of the input file after the parser.
Parameters
----------
input_filepath : str
The input file path
kwargs : dict
Support for extra arguments.
"""
f = tools.full_path(input_filepath)
self.original_path, self.original_input_file = os.path.split(f) # Get original path, filename
self.input = self.input_parser(f)
@abstractmethod
def load_output(self, **kwargs):
"""
Reads and load into `.output` the outputs generated by the code.
"""
raise NotImplementedError
@abstractmethod
def load_archive(self, h5, configure=True):
"""
Loads input and output from archived h5 file.
Parameters
----------
h5 : str or h5py.File
The filename or handle on h5py.File from which to load input and output data
configure : bool, optional
Whether or not to invoke the configure method after loading, by default True
"""
raise NotImplementedError
def reset(self):
"""
Reset this object to its initial state.
"""
super().reset()
# Clear this
if self._use_temp_dir:
self._base_path = None
self._configured = False
def setup_workdir(self, workdir, cleanup=True):
"""
Set up the work directory if `use_temp_dir` is set.
Parameters
----------
workdir : str
The directory name.
cleanup : bool
Whether or not to remove the directory at exit. Defaults to True.
"""
# Set paths
if self._use_temp_dir:
# Need to attach this to the object. Otherwise it will go out of scope.
self._tempdir = tempfile.TemporaryDirectory(dir=workdir)
self._base_path = self._tempdir.name
if cleanup:
atexit.register(self._cleanup_workdir)
else:
# Work in place
self._base_path = self.original_path
def _cleanup_workdir(self):
if self._tempdir:
try:
self._tempdir.cleanup()
except OSError:
shutil.rmtree(self._tempdir.name, ignore_errors=True)
|
py | 1a3e22a833baea90ccc575365704e1443a6057e0 |
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='sizesrc',
parent_name='funnelarea.textfont',
**kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name='size', parent_name='funnelarea.textfont', **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'plot'),
min=kwargs.pop('min', 1),
role=kwargs.pop('role', 'style'),
**kwargs
)
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='familysrc',
parent_name='funnelarea.textfont',
**kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='family',
parent_name='funnelarea.textfont',
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'plot'),
no_blank=kwargs.pop('no_blank', True),
role=kwargs.pop('role', 'style'),
strict=kwargs.pop('strict', True),
**kwargs
)
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='colorsrc',
parent_name='funnelarea.textfont',
**kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name='color', parent_name='funnelarea.textfont', **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'style'),
**kwargs
)
|
py | 1a3e230ac003b6e0d99f66523bbc1b85b61dc8b9 | # Generated by Django 2.1.4 on 2019-01-06 12:37
from django.conf import settings
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mainapp', '0019_auto_20181227_1534'),
]
operations = [
migrations.RenameModel(
old_name='HistoricalOrganizationMembership',
new_name='HistoricalMembership',
),
migrations.RenameModel(
old_name='OrganizationMembership',
new_name='Membership',
),
migrations.AlterModelOptions(
name='historicalmembership',
options={'get_latest_by': 'history_date', 'ordering': ('-history_date', '-history_id'), 'verbose_name': 'historical membership'},
),
migrations.RemoveIndex(
model_name='paper',
name='mainapp_pap_sort_da_a4a03b_idx',
),
migrations.RenameField(
model_name='agendaitem',
old_name='title',
new_name='name',
),
migrations.RenameField(
model_name='file',
old_name='displayed_filename',
new_name='filename',
),
migrations.RenameField(
model_name='historicalagendaitem',
old_name='title',
new_name='name',
),
migrations.RenameField(
model_name='historicalfile',
old_name='displayed_filename',
new_name='filename',
),
migrations.RenameField(
model_name='historicallocation',
old_name='postalCode',
new_name='postal_code',
),
migrations.RenameField(
model_name='historicallocation',
old_name='streetAddress',
new_name='street_address',
),
migrations.RenameField(
model_name='location',
old_name='postalCode',
new_name='postal_code',
),
migrations.RenameField(
model_name='location',
old_name='streetAddress',
new_name='street_address',
),
migrations.RemoveField(
model_name='consultation',
name='title',
),
migrations.RemoveField(
model_name='historicalconsultation',
name='title',
),
migrations.RemoveField(
model_name='historicallocation',
name='short_description',
),
migrations.RemoveField(
model_name='historicalpaper',
name='description',
),
migrations.RemoveField(
model_name='location',
name='bodies',
),
migrations.RemoveField(
model_name='location',
name='short_description',
),
migrations.RemoveField(
model_name='paper',
name='description',
),
migrations.AddField(
model_name='body',
name='ags',
field=models.CharField(blank=True, max_length=8, null=True),
),
migrations.AddField(
model_name='historicalbody',
name='ags',
field=models.CharField(blank=True, max_length=8, null=True),
),
migrations.AlterField(
model_name='file',
name='filesize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='file',
name='sort_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='historicalfile',
name='filesize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalfile',
name='sort_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='historicalpaper',
name='sort_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='paper',
name='sort_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
py | 1a3e2319c3a2aabb82aa4a4dd1153d44b6ac6482 | # Copyright (C) 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Third party imports
import yaml
from tornado.log import app_log
# Local imports
from opsrest.settings import settings
from opsrest.constants import (PASSWD_SRV_SOCK_TYPE_KEY,
PASSWD_SRV_PUB_TYPE_KEY)
class PasswordServerConfig(object):
__instance = None
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(PasswordServerConfig, cls).__new__(cls)
return cls.instance
def __init__(self):
self.sock_fd = ''
self.pub_key_loc = ''
self.__get_passwd_srv_files_location__()
def __get_passwd_srv_files_location__(self):
try:
passwd_srv_yaml = \
open(settings['passwd_srv_yaml'], "r")
passwd_srv_files = yaml.load_all(passwd_srv_yaml)
for file in passwd_srv_files:
for k, v in file.items():
passwd_srv_list = v
for element in passwd_srv_list:
if element['type'] == PASSWD_SRV_SOCK_TYPE_KEY:
self.sock_fd = element['path']
if element['type'] == PASSWD_SRV_PUB_TYPE_KEY:
self.pub_key_loc = element['path']
passwd_srv_yaml.close()
except IOError as e:
app_log.debug("Failed to open Password Server YAML file: %s" % e)
|
py | 1a3e23dcd67e2d76aba7691ef9c0da82e34621ad | """
Author: Andreas Rössler
"""
import os
import argparse
import torch
import pretrainedmodels
import torch.nn as nn
import torch.nn.functional as F
from xception import xception
import math
import torchvision
def return_pytorch04_xception(init_checkpoint=None):
# Raises warning "src not broadcastable to dst" but thats fine
model = xception(pretrained=False)
if init_checkpoint is not None:
# Load model in torch 0.4+
model.fc = model.last_linear
del model.last_linear
state_dict = torch.load(
init_checkpoint)
for name, weights in state_dict.items():
if 'pointwise' in name:
state_dict[name] = weights.unsqueeze(-1).unsqueeze(-1)
model.load_state_dict(state_dict)
model.last_linear = model.fc
del model.fc
return model
class TransferModel(nn.Module):
"""
Simple transfer learning model that takes an imagenet pretrained model with
a fc layer as base model and retrains a new fc layer for num_out_classes
"""
def __init__(self, modelchoice, num_out_classes=2, dropout=0.0, init_checkpoint=None):
super(TransferModel, self).__init__()
self.modelchoice = modelchoice
if modelchoice == 'xception':
self.model = return_pytorch04_xception(init_checkpoint)
# Replace fc
num_ftrs = self.model.last_linear.in_features
if not dropout:
self.model.last_linear = nn.Linear(num_ftrs, num_out_classes)
else:
print('Using dropout', dropout)
self.model.last_linear = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(num_ftrs, num_out_classes)
)
elif modelchoice == 'resnet50' or modelchoice == 'resnet18':
if modelchoice == 'resnet50':
self.model = torchvision.models.resnet50(pretrained=True)
if modelchoice == 'resnet18':
self.model = torchvision.models.resnet18(pretrained=True)
# Replace fc
num_ftrs = self.model.fc.in_features
if not dropout:
self.model.fc = nn.Linear(num_ftrs, num_out_classes)
else:
self.model.fc = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(num_ftrs, num_out_classes)
)
else:
raise Exception('Choose valid model, e.g. resnet50')
def set_trainable_up_to(self, boolean, layername="Conv2d_4a_3x3"):
"""
Freezes all layers below a specific layer and sets the following layers
to true if boolean else only the fully connected final layer
:param boolean:
:param layername: depends on network, for inception e.g. Conv2d_4a_3x3
:return:
"""
# Stage-1: freeze all the layers
if layername is None:
for i, param in self.model.named_parameters():
param.requires_grad = True
return
else:
for i, param in self.model.named_parameters():
param.requires_grad = False
if boolean:
# Make all layers following the layername layer trainable
ct = []
found = False
for name, child in self.model.named_children():
if layername in ct:
found = True
for params in child.parameters():
params.requires_grad = True
ct.append(name)
if not found:
raise Exception('Layer not found, cant finetune!'.format(
layername))
else:
if self.modelchoice == 'xception':
# Make fc trainable
for param in self.model.last_linear.parameters():
param.requires_grad = True
else:
# Make fc trainable
for param in self.model.fc.parameters():
param.requires_grad = True
def forward(self, x):
x = self.model(x)
return x
def model_selection(modelname, num_out_classes,
dropout=None, init_checkpoint=None):
"""
:param modelname:
:return: model, image size, pretraining<yes/no>, input_list
"""
if modelname == 'xception':
return TransferModel(modelchoice='xception',
num_out_classes=num_out_classes, init_checkpoint=init_checkpoint), 299, \
True, ['image'], None
elif modelname == 'resnet18':
return TransferModel(modelchoice='resnet18', dropout=dropout,
num_out_classes=num_out_classes), \
224, True, ['image'], None
else:
raise NotImplementedError(modelname) |
py | 1a3e240209b80959b1dff4267af0b6aa7e701835 | #!/Users/wafflecake/PycharmProjects/coffee/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
py | 1a3e2506f9910819ddc1eb4222619fa09df838f5 | from math import ceil
# 入力
N, A, B = map(int, input().split())
h = [int(input()) for _ in range(N)]
# 二分法により解を求める
def bis(p, ok, ng):
mid = (ok + ng) // 2
return (
ok if abs(ok - ng) == 1 else
bis(p, mid, ng) if p(mid) else
bis(p, ok, mid)
)
ans = bis(
lambda k: sum(max(0, ceil((x - k * B) / (A - B))) for x in h) <= k,
10**10,
0
)
# 出力
print(ans)
|
py | 1a3e254a25ae202fbaf33d140ef558f74b460efd | # coding: utf-8
"""
Files
Upload and manage files. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class Configuration(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param host: Base url
:param api_key: Dict to store API key(s).
Each entry in the dict specifies an API key.
The dict key is the name of the security scheme in the OAS specification.
The dict value is the API key secret.
:param api_key_prefix: Dict to store API prefix (e.g. Bearer)
The dict key is the name of the security scheme in the OAS specification.
The dict value is an API key prefix when generating the auth data.
:param username: Username for HTTP basic authentication
:param password: Password for HTTP basic authentication
:param discard_unknown_keys: Boolean value indicating whether to discard
unknown properties. A server may send a response that includes additional
properties that are not known by the client in the following scenarios:
1. The OpenAPI document is incomplete, i.e. it does not match the server
implementation.
2. The client was generated using an older version of the OpenAPI document
and the server has been upgraded since then.
If a schema in the OpenAPI document defines the additionalProperties attribute,
then all undeclared properties received by the server are injected into the
additional properties map. In that case, there are undeclared properties, and
nothing to discard.
:Example:
API Key Authentication Example.
Given the following security scheme in the OpenAPI specification:
components:
securitySchemes:
cookieAuth: # name for the security scheme
type: apiKey
in: cookie
name: JSESSIONID # cookie name
You can programmatically set the cookie:
conf = hubspot.files.files.Configuration(
api_key={'cookieAuth': 'abc123'}
api_key_prefix={'cookieAuth': 'JSESSIONID'}
)
The following cookie will be added to the HTTP request:
Cookie: JSESSIONID abc123
"""
_default = None
def __init__(
self,
host="https://api.hubapi.com",
api_key=None,
api_key_prefix=None,
username=None,
password=None,
discard_unknown_keys=False,
):
"""Constructor"""
self.host = host
"""Default Base url
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.discard_unknown_keys = discard_unknown_keys
self.access_token = None
"""access token for OAuth/Bearer
"""
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("hubspot.files.files")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = "%(asctime)s %(levelname)s %(message)s"
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = None
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
"""urllib3 connection pool's maximum number of connections saved
per pool. urllib3 uses 1 connection as default value, but this is
not the best value when you are making a lot of possibly parallel
requests to the same host, which is often the case here.
cpu_count * 5 is used as default value to increase performance.
"""
self.proxy = None
"""Proxy URL
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ""
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Disable client side validation
self.client_side_validation = False
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ("logger", "logger_file_handler"):
setattr(result, k, copy.deepcopy(v, memo))
# shallow copy of loggers
result.logger = copy.copy(self.logger)
# use setters to configure loggers
result.logger_file = self.logger_file
result.debug = self.debug
return result
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
@classmethod
def set_default(cls, default):
"""Set default instance of configuration.
It stores default configuration, which can be
returned by get_default_copy method.
:param default: object of Configuration
"""
cls._default = copy.deepcopy(default)
@classmethod
def get_default_copy(cls):
"""Return new instance of configuration.
This method returns newly created, based on default constructor,
object of Configuration class or returns a copy of default
configuration passed by the set_default method.
:return: The configuration object.
"""
if cls._default is not None:
return copy.deepcopy(cls._default)
return Configuration()
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
username = ""
if self.username is not None:
username = self.username
password = ""
if self.password is not None:
password = self.password
return urllib3.util.make_headers(basic_auth=username + ":" + password).get(
"authorization"
)
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
auth = {}
if "hapikey" in self.api_key:
auth["hapikey"] = {
"type": "api_key",
"in": "query",
"key": "hapikey",
"value": self.get_api_key_with_prefix("hapikey"),
}
if self.access_token is not None:
auth["oauth2"] = {
"type": "oauth2",
"in": "header",
"key": "Authorization",
"value": "Bearer " + self.access_token,
}
return auth
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return (
"Python SDK Debug Report:\n"
"OS: {env}\n"
"Python Version: {pyversion}\n"
"Version of the API: v3\n"
"SDK Package Version: 1.0.0".format(env=sys.platform, pyversion=sys.version)
)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
"url": "https://api.hubapi.com/",
"description": "No description provided",
}
]
def get_host_from_settings(self, index, variables=None):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:return: URL based on host settings
"""
variables = {} if variables is None else variables
servers = self.get_host_settings()
try:
server = servers[index]
except IndexError:
raise ValueError(
"Invalid index {0} when selecting the host settings. "
"Must be less than {1}".format(index, len(servers))
)
url = server["url"]
# go through variables and replace placeholders
for variable_name, variable in server["variables"].items():
used_value = variables.get(variable_name, variable["default_value"])
if "enum_values" in variable and used_value not in variable["enum_values"]:
raise ValueError(
"The variable `{0}` in the host URL has invalid value "
"{1}. Must be {2}.".format(
variable_name, variables[variable_name], variable["enum_values"]
)
)
url = url.replace("{" + variable_name + "}", used_value)
return url
|
py | 1a3e265f1638d52745533d81e8c4dff9afe14995 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.fitting.modelgenerators.grid Contains the GridModelGenerator class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from ....core.tools.logging import log
from .generator import ModelGenerator
# -----------------------------------------------------------------
class GridModelGenerator(ModelGenerator):
"""
This class...
"""
def __init__(self):
"""
The constructor ...
:return:
"""
# Call the constructor of the base class
super(GridModelGenerator, self).__init__()
# -- Attributes --
# -----------------------------------------------------------------
def setup(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
|
py | 1a3e2772c2d9db914cd843f7632cbe6d57cae75e | import requests as req
import json
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.conf.urls import url
from django.conf import settings
from django.core.mail import send_mail
client_id = settings.FENIX_CLIENT_ID
clientSecret = settings.FENIX_CLIENT_SECRET
redirect_uri = settings.URL_HOST + settings.FENIX_REDIRECT_URL_PATH
# Note, make sure that you exported the URL_HOST variable, otherwise localhost will be the default
print("*SETUP* redirect_url:" + str(redirect_uri)) # debug
fenixLoginpage = settings.FENIX_LOGIN
fenixacesstokenpage = settings.FENIX_URL_TOKEN
RequestPage = fenixLoginpage % (client_id, redirect_uri)
def login_fenix_oauth(request):
from helios_auth.views import after # if dajngo is set sync, the import must be inside because with the aspps are not loaded yet
from helios_auth import url_names
code = request.GET.get('code') # registration code used to obtain the access token
payload = {'client_id': client_id, 'client_secret': clientSecret, 'redirect_uri' : redirect_uri, 'code' : code, 'grant_type': 'authorization_code'}
response = req.post(fenixacesstokenpage, params = payload)
if(response.status_code == 200):
r_token = response.json()
params = {'access_token': r_token['access_token']}
#print("login_fenix_0auth() - OUATH PARAMS",params) # debug
request.session['access_token_fenix'] =r_token['access_token'] # save token
request.session['auth_system_name']='fenix'
return HttpResponseRedirect(reverse(url_names.AUTH_AFTER))
else:
print("login_fenix_0auth() - OAUTH FAILED")
def get_auth_url(request, redirect_url = None):
# the app redirects the user to the FENIX login page
return RequestPage
def get_user_info_after_auth(request):
token = request.session['access_token_fenix'] # token saved in the current session
params = {'access_token': token}
resp = req.get("https://fenix.tecnico.ulisboa.pt/api/fenix/v1/person", params = params)
#print("\n\n", "get_user_info_after_auth() - FENIX RESPONSE", resp.json()["username"])
r_info = resp.json() # user data from Fenix
del request.session['access_token_fenix']
obj = {'type': 'fenix', 'user_id' : json.dumps(r_info["username"]),'name':r_info["name"],'info':{'email': r_info["email"]}, 'token': None}
return obj
def send_message(user_id, name, user_info, subject, body):
#send email to google users. user_id is the email for google.
send_mail(subject, body, settings.SERVER_EMAIL, ["%s <%s>" % (name, user_id)], fail_silently=False)
#
# Election Creation
#
def can_create_election(user_id, user_info):
return True
FENIX_LOGIN = 'auth@fenix@login'
#^ matches the start of the string. this urlpattern must be include at urls.py
urlpatterns = [
url(r'^fenix/login', login_fenix_oauth, name=FENIX_LOGIN),
]
|
py | 1a3e278472c0fc9eb6b9de6b5a4c9515b9e8ccdb | # -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
class PasswordConfig(Config):
"""Password login configuration
"""
def read_config(self, config):
password_config = config.get("password_config", {})
self.password_enabled = password_config.get("enabled", True)
def default_config(self, config_dir_path, server_name, **kwargs):
return """
# Enable password for login.
password_config:
enabled: true
"""
|
py | 1a3e27da4898817d7f6e2d491b9711b21b749faa | # coding: utf-8
"""
Translator Knowledge Beacon Aggregator API
This is the Translator Knowledge Beacon Aggregator web service application programming interface (API) that provides integrated access to a pool of knowledge sources publishing concepts and relations through the Translator Knowledge Beacon API. This API is similar to that of the latter mentioned API with the addition of some extra informative endpoints plus session identifier and beacon indices. These latter identifiers are locally assigned numeric indices provided to track the use of specific registered beacons within the aggregator API itself. # noqa: E501
OpenAPI spec version: 1.1.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ClientStatementsQueryBeaconStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'beacon': 'int',
'count': 'int',
'status': 'int'
}
attribute_map = {
'beacon': 'beacon',
'count': 'count',
'status': 'status'
}
def __init__(self, beacon=None, count=None, status=None): # noqa: E501
"""ClientStatementsQueryBeaconStatus - a model defined in OpenAPI""" # noqa: E501
self._beacon = None
self._count = None
self._status = None
self.discriminator = None
if beacon is not None:
self.beacon = beacon
if count is not None:
self.count = count
if status is not None:
self.status = status
@property
def beacon(self):
"""Gets the beacon of this ClientStatementsQueryBeaconStatus. # noqa: E501
Index number of beacon providing these statements # noqa: E501
:return: The beacon of this ClientStatementsQueryBeaconStatus. # noqa: E501
:rtype: int
"""
return self._beacon
@beacon.setter
def beacon(self, beacon):
"""Sets the beacon of this ClientStatementsQueryBeaconStatus.
Index number of beacon providing these statements # noqa: E501
:param beacon: The beacon of this ClientStatementsQueryBeaconStatus. # noqa: E501
:type: int
"""
self._beacon = beacon
@property
def count(self):
"""Gets the count of this ClientStatementsQueryBeaconStatus. # noqa: E501
When a 200 status code is returned, this integer designates the number of statements matched by the query for the given beacon. # noqa: E501
:return: The count of this ClientStatementsQueryBeaconStatus. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ClientStatementsQueryBeaconStatus.
When a 200 status code is returned, this integer designates the number of statements matched by the query for the given beacon. # noqa: E501
:param count: The count of this ClientStatementsQueryBeaconStatus. # noqa: E501
:type: int
"""
self._count = count
@property
def status(self):
"""Gets the status of this ClientStatementsQueryBeaconStatus. # noqa: E501
Http code status of beacon API - 200 means 'data ready', 102 means 'query in progress', other codes (e.g. 500) are server errors. Once a beacon has a '200' success code, then the /statements/data endpoint may be used to retrieve it. # noqa: E501
:return: The status of this ClientStatementsQueryBeaconStatus. # noqa: E501
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ClientStatementsQueryBeaconStatus.
Http code status of beacon API - 200 means 'data ready', 102 means 'query in progress', other codes (e.g. 500) are server errors. Once a beacon has a '200' success code, then the /statements/data endpoint may be used to retrieve it. # noqa: E501
:param status: The status of this ClientStatementsQueryBeaconStatus. # noqa: E501
:type: int
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClientStatementsQueryBeaconStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a3e28e34be37c8b4b981a7f9bd30843f8bfb753 | # Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, Sequence, Union
import nnabla as nn
from nnabla_rl.environments.environment_info import EnvironmentInfo
from nnabla_rl.model_trainers.model_trainer import TrainingVariables, rnn_support
from nnabla_rl.model_trainers.q_value.quantile_distribution_function_trainer import (
QuantileDistributionFunctionTrainer, QuantileDistributionFunctionTrainerConfig)
from nnabla_rl.models import QuantileDistributionFunction
from nnabla_rl.utils.misc import create_variables
@dataclass
class QRDQNQTrainerConfig(QuantileDistributionFunctionTrainerConfig):
pass
class QRDQNQTrainer(QuantileDistributionFunctionTrainer):
# type declarations to type check with mypy
# NOTE: declared variables are instance variable and NOT class variable, unless it is marked with ClassVar
# See https://mypy.readthedocs.io/en/stable/class_basics.html for details
_target_function: QuantileDistributionFunction
_prev_target_rnn_states: Dict[str, Dict[str, nn.Variable]]
def __init__(self,
train_functions: Union[QuantileDistributionFunction, Sequence[QuantileDistributionFunction]],
solvers: Dict[str, nn.solver.Solver],
target_function: QuantileDistributionFunction,
env_info: EnvironmentInfo,
config: QRDQNQTrainerConfig = QRDQNQTrainerConfig()):
self._target_function = target_function
self._prev_target_rnn_states = {}
super(QRDQNQTrainer, self).__init__(train_functions, solvers, env_info, config)
def support_rnn(self) -> bool:
return True
def _compute_target(self, training_variables: TrainingVariables, **kwargs) -> nn.Variable:
gamma = training_variables.gamma
reward = training_variables.reward
non_terminal = training_variables.non_terminal
s_next = training_variables.s_next
prev_rnn_states = self._prev_target_rnn_states
train_rnn_states = training_variables.rnn_states
with rnn_support(self._target_function, prev_rnn_states, train_rnn_states, training_variables, self._config):
theta_j = self._target_function.max_q_quantiles(s_next)
Ttheta_j = reward + non_terminal * gamma * theta_j
return Ttheta_j
def _setup_training_variables(self, batch_size: int) -> TrainingVariables:
training_variables = super()._setup_training_variables(batch_size)
rnn_states = {}
if self._target_function.is_recurrent():
shapes = self._target_function.internal_state_shapes()
rnn_state_variables = create_variables(batch_size, shapes)
rnn_states[self._target_function.scope_name] = rnn_state_variables
training_variables.rnn_states.update(rnn_states)
return training_variables
|
py | 1a3e2c15f26e98409c62bc0910ed6565f3721e42 | """ Contains a class for logic of the Subjects.
"""
import os
import logging
import json
import pkg_resources
import mne
import meggie.utilities.filemanager as filemanager
from meggie.mainwindow.dynamic import find_all_datatype_specs
class Subject:
""" The class for holding subject-specific information
and subject-specific data.
Parameters
----------
experiment : meggie.experiment.Experiment
The experiment to which the subject is created.
name : str
Name of the subject.
raw_fname : str
Path to the subject data.
uid : str
A unique identifier to differentiate between subjects that have
same name.
ica_applied : bool
Whether ICA has been applied (at least once) to this data.
rereferenced : bool
Whether the data has been rereferenced (at least once).
"""
def __init__(self, experiment, name, raw_fname, uid,
ica_applied=False, rereferenced=False):
self.name = name
self.raw_fname = raw_fname
self.uid = uid
self._raw = None
self.ica_applied = ica_applied
self.rereferenced = rereferenced
self.path = os.path.join(experiment.path,
name)
datatype_specs = find_all_datatype_specs()
for source, package, datatype_spec in datatype_specs.values():
datatype = datatype_spec['id']
dir_ = datatype_spec['dir']
setattr(self, datatype, dict())
setattr(self, datatype + '_directory',
os.path.join(self.path, dir_))
def add(self, dataobject, datatype):
""" Adds a dataobject of type datatype to the subject.
Parameters
----------
dataobject : instance of a datatype
A data object.
datatype : str
Name of the datatype.
"""
container = getattr(self, datatype)
name = dataobject.name
container[name] = dataobject
def remove(self, name, datatype):
""" Removes a dataobject by name from the subject.
Parameters
----------
name : str
Name of the data object.
datatype : str
Name of the datatype.
"""
container = getattr(self, datatype)
dataobject = container.pop(name, None)
try:
dataobject.delete_content()
except Exception as exc:
logging.getLogger('ui_logger').exception('')
raise IOError('Could not delete ' + str(datatype) +
' from folders')
@property
def raw_path(self):
""" Returns the raw path."""
path = os.path.join(self.path,
self.raw_fname)
return path
def get_raw(self, preload=True, verbose='warning'):
""" Gets the raw object for the subject.
Reads from the file system if not in the memory already.
Parameters
----------
preload : bool
Whether to read the data or only the metadata.
verbose : str
Verbose level of read_raw.
Returns
-------
mne.io.Raw
The raw object.
"""
if self._raw is not None:
if preload:
self._raw.load_data()
return self._raw
else:
try:
raw = filemanager.open_raw(self.raw_path, preload=preload,
verbose=verbose)
except OSError:
raise IOError("Could not find the raw file.")
self._raw = raw
return raw
def save(self):
""" Saves the data to the existing path. """
try:
filemanager.save_raw(self._raw, self.raw_path)
except Exception as exc:
raise Exception("Could not save the raw file. Please ensure "
"that the entire experiment folder has "
"write permissions.")
def release_memory(self):
""" Releases data from the memory.
"""
if self._raw is not None:
self._raw = None
@property
def has_eeg(self):
""" Checks if the raw has eeg data present
"""
raw = self.get_raw(preload=False)
channels = mne.pick_types(raw.info, eeg=True, meg=False)
if len(channels) == 0:
return False
return True
@property
def sss_applied(self):
"""Checks if sss applied.
"""
try:
raw = self.get_raw()
for item in raw.info['proc_history']:
if 'maxfilter' in item.get('creator', []):
return True
except Exception as exc:
return False
return False
def ensure_folders(self):
""" When called, checks that the subject folder with all datatype folders
exist and if not, creates them.
"""
paths = []
datatype_specs = find_all_datatype_specs()
for source, package, datatype_spec in datatype_specs.values():
datatype = datatype_spec['id']
path = getattr(self, datatype + '_directory')
paths.append(path)
try:
filemanager.ensure_folders(
[self.path] + paths)
except OSError:
raise OSError("Couldn't create all the necessary folders. "
"Please ensure that the experiment folder "
"has write permissions everywhere.")
|
py | 1a3e2d00fe06d72f8b178454af710b65c040bfcb | # Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import wsgi as base_wsgi
import routes
import six
import webob
import webob.exc as webexc
import webtest
import neutron
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.common import exceptions
from neutron import manager
from neutron.plugins.common import constants
from neutron import quota
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit import extension_stubs as ext_stubs
import neutron.tests.unit.extensions
from neutron.tests.unit.extensions import extendedattribute as extattr
from neutron.tests.unit import testlib_api
from neutron import wsgi
LOG = logging.getLogger(__name__)
_uuid = test_base._uuid
_get_path = test_base._get_path
extensions_path = ':'.join(neutron.tests.unit.extensions.__path__)
class ExtensionsTestApp(base_wsgi.Router):
def __init__(self, options=None):
options = options or {}
mapper = routes.Mapper()
controller = ext_stubs.StubBaseAppController()
mapper.resource("dummy_resource", "/dummy_resources",
controller=controller)
super(ExtensionsTestApp, self).__init__(mapper)
class FakePluginWithExtension(object):
"""A fake plugin used only for extension testing in this file."""
supported_extension_aliases = ["FOXNSOX"]
def method_to_support_foxnsox_extension(self, context):
self._log("method_to_support_foxnsox_extension", context)
class ExtensionPathTest(base.BaseTestCase):
def setUp(self):
self.base_path = extensions.get_extensions_path()
super(ExtensionPathTest, self).setUp()
def test_get_extensions_path_with_plugins(self):
path = extensions.get_extensions_path(
{constants.CORE: FakePluginWithExtension()})
self.assertEqual(path,
'%s:neutron/tests/unit/extensions' % self.base_path)
def test_get_extensions_path_no_extensions(self):
# Reset to default value, as it's overridden by base class
cfg.CONF.set_override('api_extensions_path', '')
path = extensions.get_extensions_path()
self.assertEqual(path, self.base_path)
def test_get_extensions_path_single_extension(self):
cfg.CONF.set_override('api_extensions_path', 'path1')
path = extensions.get_extensions_path()
self.assertEqual(path, '%s:path1' % self.base_path)
def test_get_extensions_path_multiple_extensions(self):
cfg.CONF.set_override('api_extensions_path', 'path1:path2')
path = extensions.get_extensions_path()
self.assertEqual(path, '%s:path1:path2' % self.base_path)
def test_get_extensions_path_duplicate_extensions(self):
cfg.CONF.set_override('api_extensions_path', 'path1:path1')
path = extensions.get_extensions_path()
self.assertEqual(path, '%s:path1' % self.base_path)
class PluginInterfaceTest(base.BaseTestCase):
def test_issubclass_hook(self):
class A(object):
def f(self):
pass
class B(extensions.PluginInterface):
@abc.abstractmethod
def f(self):
pass
self.assertTrue(issubclass(A, B))
def test_issubclass_hook_class_without_abstract_methods(self):
class A(object):
def f(self):
pass
class B(extensions.PluginInterface):
def f(self):
pass
self.assertFalse(issubclass(A, B))
def test_issubclass_hook_not_all_methods_implemented(self):
class A(object):
def f(self):
pass
class B(extensions.PluginInterface):
@abc.abstractmethod
def f(self):
pass
@abc.abstractmethod
def g(self):
pass
self.assertFalse(issubclass(A, B))
class ResourceExtensionTest(base.BaseTestCase):
class ResourceExtensionController(wsgi.Controller):
def index(self, request):
return "resource index"
def show(self, request, id):
return {'data': {'id': id}}
def notimplemented_function(self, request, id):
return webob.exc.HTTPNotImplemented()
def custom_member_action(self, request, id):
return {'member_action': 'value'}
def custom_collection_action(self, request, **kwargs):
return {'collection': 'value'}
class DummySvcPlugin(wsgi.Controller):
def get_plugin_type(self):
return constants.DUMMY
def index(self, request, **kwargs):
return "resource index"
def custom_member_action(self, request, **kwargs):
return {'member_action': 'value'}
def collection_action(self, request, **kwargs):
return {'collection': 'value'}
def show(self, request, id):
return {'data': {'id': id}}
def test_exceptions_notimplemented(self):
controller = self.ResourceExtensionController()
member = {'notimplemented_function': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
member_actions=member)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
# Ideally we would check for a 501 code here but webtest doesn't take
# anything that is below 200 or above 400 so we can't actually check
# it. It throws webtest.AppError instead.
try:
test_app.get("/tweedles/some_id/notimplemented_function")
# Shouldn't be reached
self.assertTrue(False)
except webtest.AppError as e:
self.assertIn('501', str(e))
def test_resource_can_be_added_as_extension(self):
res_ext = extensions.ResourceExtension(
'tweedles', self.ResourceExtensionController())
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
index_response = test_app.get("/tweedles")
self.assertEqual(200, index_response.status_int)
self.assertEqual(b"resource index", index_response.body)
show_response = test_app.get("/tweedles/25266")
self.assertEqual({'data': {'id': "25266"}}, show_response.json)
def test_resource_gets_prefix_of_plugin(self):
class DummySvcPlugin(wsgi.Controller):
def index(self, request):
return ""
def get_plugin_type(self):
return constants.DUMMY
res_ext = extensions.ResourceExtension(
'tweedles', DummySvcPlugin(), path_prefix="/dummy_svc")
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
index_response = test_app.get("/dummy_svc/tweedles")
self.assertEqual(200, index_response.status_int)
def test_resource_extension_with_custom_member_action(self):
controller = self.ResourceExtensionController()
member = {'custom_member_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
member_actions=member)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/some_id/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
def test_resource_ext_with_custom_member_action_gets_plugin_prefix(self):
controller = self.DummySvcPlugin()
member = {'custom_member_action': "GET"}
collections = {'collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
path_prefix="/dummy_svc",
member_actions=member,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/dummy_svc/tweedles/1/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
response = test_app.get("/dummy_svc/tweedles/collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'],
"value")
def test_plugin_prefix_with_parent_resource(self):
controller = self.DummySvcPlugin()
parent = dict(member_name="tenant",
collection_name="tenants")
member = {'custom_member_action': "GET"}
collections = {'collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller, parent,
path_prefix="/dummy_svc",
member_actions=member,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
index_response = test_app.get("/dummy_svc/tenants/1/tweedles")
self.assertEqual(200, index_response.status_int)
response = test_app.get("/dummy_svc/tenants/1/"
"tweedles/1/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
response = test_app.get("/dummy_svc/tenants/2/"
"tweedles/collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'],
"value")
def test_resource_extension_for_get_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
LOG.debug(jsonutils.loads(response.body))
self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_resource_extension_for_put_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "PUT"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.put("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
def test_resource_extension_for_post_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "POST"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.post("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
def test_resource_extension_for_delete_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "DELETE"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.delete("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
def test_resource_ext_for_formatted_req_on_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/custom_collection_action.json")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_resource_ext_for_nested_resource_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "GET"}
parent = dict(collection_name='beetles', member_name='beetle')
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections,
parent=parent)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/beetles/beetle_id"
"/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_resource_extension_with_custom_member_action_and_attr_map(self):
controller = self.ResourceExtensionController()
member = {'custom_member_action': "GET"}
params = {
'tweedles': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
}
}
res_ext = extensions.ResourceExtension('tweedles', controller,
member_actions=member,
attr_map=params)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/some_id/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
def test_returns_404_for_non_existent_extension(self):
test_app = _setup_extensions_test_app(SimpleExtensionManager(None))
response = test_app.get("/non_extistant_extension", status='*')
self.assertEqual(404, response.status_int)
class ActionExtensionTest(base.BaseTestCase):
def setUp(self):
super(ActionExtensionTest, self).setUp()
self.extension_app = _setup_extensions_test_app()
def test_extended_action_for_adding_extra_data(self):
action_name = 'FOXNSOX:add_tweedle'
action_params = dict(name='Beetle')
req_body = jsonutils.dumps({action_name: action_params})
response = self.extension_app.post('/dummy_resources/1/action',
req_body,
content_type='application/json')
self.assertEqual(b"Tweedle Beetle Added.", response.body)
def test_extended_action_for_deleting_extra_data(self):
action_name = 'FOXNSOX:delete_tweedle'
action_params = dict(name='Bailey')
req_body = jsonutils.dumps({action_name: action_params})
response = self.extension_app.post("/dummy_resources/1/action",
req_body,
content_type='application/json')
self.assertEqual(b"Tweedle Bailey Deleted.", response.body)
def test_returns_404_for_non_existent_action(self):
non_existent_action = 'blah_action'
action_params = dict(name="test")
req_body = jsonutils.dumps({non_existent_action: action_params})
response = self.extension_app.post("/dummy_resources/1/action",
req_body,
content_type='application/json',
status='*')
self.assertEqual(404, response.status_int)
def test_returns_404_for_non_existent_resource(self):
action_name = 'add_tweedle'
action_params = dict(name='Beetle')
req_body = jsonutils.dumps({action_name: action_params})
response = self.extension_app.post("/asdf/1/action", req_body,
content_type='application/json',
status='*')
self.assertEqual(404, response.status_int)
class RequestExtensionTest(base.BaseTestCase):
def test_headers_can_be_extended(self):
def extend_headers(req, res):
assert req.headers['X-NEW-REQUEST-HEADER'] == "sox"
res.headers['X-NEW-RESPONSE-HEADER'] = "response_header_data"
return res
app = self._setup_app_with_request_handler(extend_headers, 'GET')
response = app.get("/dummy_resources/1",
headers={'X-NEW-REQUEST-HEADER': "sox"})
self.assertEqual(response.headers['X-NEW-RESPONSE-HEADER'],
"response_header_data")
def test_extend_get_resource_response(self):
def extend_response_data(req, res):
data = jsonutils.loads(res.body)
data['FOXNSOX:extended_key'] = req.GET.get('extended_key')
res.body = jsonutils.dumps(data).encode('utf-8')
return res
app = self._setup_app_with_request_handler(extend_response_data, 'GET')
response = app.get("/dummy_resources/1?extended_key=extended_data")
self.assertEqual(200, response.status_int)
response_data = jsonutils.loads(response.body)
self.assertEqual('extended_data',
response_data['FOXNSOX:extended_key'])
self.assertEqual('knox', response_data['fort'])
def test_get_resources(self):
app = _setup_extensions_test_app()
response = app.get("/dummy_resources/1?chewing=newblue")
response_data = jsonutils.loads(response.body)
self.assertEqual('newblue', response_data['FOXNSOX:googoose'])
self.assertEqual("Pig Bands!", response_data['FOXNSOX:big_bands'])
def test_edit_previously_uneditable_field(self):
def _update_handler(req, res):
data = jsonutils.loads(res.body)
data['uneditable'] = req.params['uneditable']
res.body = jsonutils.dumps(data).encode('utf-8')
return res
base_app = webtest.TestApp(setup_base_app(self))
response = base_app.put("/dummy_resources/1",
{'uneditable': "new_value"})
self.assertEqual(response.json['uneditable'], "original_value")
ext_app = self._setup_app_with_request_handler(_update_handler,
'PUT')
ext_response = ext_app.put("/dummy_resources/1",
{'uneditable': "new_value"})
self.assertEqual(ext_response.json['uneditable'], "new_value")
def _setup_app_with_request_handler(self, handler, verb):
req_ext = extensions.RequestExtension(verb,
'/dummy_resources/:(id)',
handler)
manager = SimpleExtensionManager(None, None, req_ext)
return _setup_extensions_test_app(manager)
class ExtensionManagerTest(base.BaseTestCase):
def test_missing_required_extensions_raise_error(self):
ext_mgr = extensions.ExtensionManager('')
attr_map = {}
ext_mgr.add_extension(ext_stubs.StubExtensionWithReqs('foo_alias'))
self.assertRaises(exceptions.ExtensionsNotFound,
ext_mgr.extend_resources, "2.0", attr_map)
def test_missing_required_extensions_gracefully_error(self):
ext_mgr = extensions.ExtensionManager('')
attr_map = {}
default_ext = list(constants.DEFAULT_SERVICE_PLUGINS.values())[0]
ext_mgr.add_extension(ext_stubs.StubExtensionWithReqs(default_ext))
ext_mgr.extend_resources("2.0", attr_map)
self.assertIn(default_ext, ext_mgr.extensions)
def test_invalid_extensions_are_not_registered(self):
class InvalidExtension(object):
"""Invalid extension.
This Extension doesn't implement extension methods :
get_name, get_description and get_updated
"""
def get_alias(self):
return "invalid_extension"
ext_mgr = extensions.ExtensionManager('')
ext_mgr.add_extension(InvalidExtension())
ext_mgr.add_extension(ext_stubs.StubExtension("valid_extension"))
self.assertIn('valid_extension', ext_mgr.extensions)
self.assertNotIn('invalid_extension', ext_mgr.extensions)
def test_assignment_of_attr_map(self):
"""Unit test for bug 1443342
In this bug, an extension that extended multiple resources with the
same dict would cause future extensions to inadvertently modify the
resources of all of the resources since they were referencing the same
dictionary.
"""
class MultiResourceExtension(ext_stubs.StubExtension):
"""Generated Extended Resources.
This extension's extended resource will assign
to more than one resource.
"""
def get_extended_resources(self, version):
EXTENDED_TIMESTAMP = {
'created_at': {'allow_post': False, 'allow_put': False,
'is_visible': True}}
EXTENDED_RESOURCES = ["ext1", "ext2"]
attrs = {}
for resources in EXTENDED_RESOURCES:
attrs[resources] = EXTENDED_TIMESTAMP
return attrs
class AttrExtension(ext_stubs.StubExtension):
def get_extended_resources(self, version):
attrs = {
self.alias: {
'%s-attr' % self.alias: {'allow_post': False,
'allow_put': False,
'is_visible': True}}}
return attrs
ext_mgr = extensions.ExtensionManager('')
attr_map = {}
ext_mgr.add_extension(MultiResourceExtension('timestamp'))
ext_mgr.extend_resources("2.0", attr_map)
ext_mgr.add_extension(AttrExtension("ext1"))
ext_mgr.add_extension(AttrExtension("ext2"))
ext_mgr.extend_resources("2.0", attr_map)
self.assertIn('created_at', attr_map['ext2'])
self.assertIn('created_at', attr_map['ext1'])
# now we need to make sure the attrextensions didn't leak across
self.assertNotIn('ext1-attr', attr_map['ext2'])
self.assertNotIn('ext2-attr', attr_map['ext1'])
class PluginAwareExtensionManagerTest(base.BaseTestCase):
def test_unsupported_extensions_are_not_loaded(self):
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1", "e3"])
plugin_info = {constants.CORE: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.StubExtension("e1"))
ext_mgr.add_extension(ext_stubs.StubExtension("e2"))
ext_mgr.add_extension(ext_stubs.StubExtension("e3"))
self.assertIn("e1", ext_mgr.extensions)
self.assertNotIn("e2", ext_mgr.extensions)
self.assertIn("e3", ext_mgr.extensions)
def test_extensions_are_not_loaded_for_plugins_unaware_of_extensions(self):
class ExtensionUnawarePlugin(object):
"""This plugin does not implement supports_extension method.
Extensions will not be loaded when this plugin is used.
"""
pass
plugin_info = {constants.CORE: ExtensionUnawarePlugin()}
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.StubExtension("e1"))
self.assertNotIn("e1", ext_mgr.extensions)
def test_extensions_not_loaded_for_plugin_without_expected_interface(self):
class PluginWithoutExpectedIface(object):
"""Does not implement get_foo method as expected by extension."""
supported_extension_aliases = ["supported_extension"]
plugin_info = {constants.CORE: PluginWithoutExpectedIface()}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface(
"supported_extension"))
self.assertNotIn("e1", ext_mgr.extensions)
def test_extensions_are_loaded_for_plugin_with_expected_interface(self):
class PluginWithExpectedInterface(object):
"""Implements get_foo method as expected by extension."""
supported_extension_aliases = ["supported_extension"]
def get_foo(self, bar=None):
pass
plugin_info = {constants.CORE: PluginWithExpectedInterface()}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface(
"supported_extension"))
self.assertIn("supported_extension", ext_mgr.extensions)
def test_extensions_expecting_neutron_plugin_interface_are_loaded(self):
class ExtensionForQuamtumPluginInterface(ext_stubs.StubExtension):
"""This Extension does not implement get_plugin_interface method.
This will work with any plugin implementing NeutronPluginBase
"""
pass
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
plugin_info = {constants.CORE: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ExtensionForQuamtumPluginInterface("e1"))
self.assertIn("e1", ext_mgr.extensions)
def test_extensions_without_need_for__plugin_interface_are_loaded(self):
class ExtensionWithNoNeedForPluginInterface(ext_stubs.StubExtension):
"""This Extension does not need any plugin interface.
This will work with any plugin implementing NeutronPluginBase
"""
def get_plugin_interface(self):
return None
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
plugin_info = {constants.CORE: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ExtensionWithNoNeedForPluginInterface("e1"))
self.assertIn("e1", ext_mgr.extensions)
def test_extension_loaded_for_non_core_plugin(self):
class NonCorePluginExtenstion(ext_stubs.StubExtension):
def get_plugin_interface(self):
return None
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
plugin_info = {constants.DUMMY: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(NonCorePluginExtenstion("e1"))
self.assertIn("e1", ext_mgr.extensions)
def test_unloaded_supported_extensions_raises_exception(self):
stub_plugin = ext_stubs.StubPlugin(
supported_extensions=["unloaded_extension"])
plugin_info = {constants.CORE: stub_plugin}
self.assertRaises(exceptions.ExtensionsNotFound,
extensions.PluginAwareExtensionManager,
'', plugin_info)
class ExtensionControllerTest(testlib_api.WebTestCase):
def setUp(self):
super(ExtensionControllerTest, self).setUp()
self.test_app = _setup_extensions_test_app()
def test_index_gets_all_registerd_extensions(self):
response = self.test_app.get("/extensions." + self.fmt)
res_body = self.deserialize(response)
foxnsox = res_body["extensions"][0]
self.assertEqual(foxnsox["alias"], "FOXNSOX")
def test_extension_can_be_accessed_by_alias(self):
response = self.test_app.get("/extensions/FOXNSOX." + self.fmt)
foxnsox_extension = self.deserialize(response)
foxnsox_extension = foxnsox_extension['extension']
self.assertEqual(foxnsox_extension["alias"], "FOXNSOX")
def test_show_returns_not_found_for_non_existent_extension(self):
response = self.test_app.get("/extensions/non_existent" + self.fmt,
status="*")
self.assertEqual(response.status_int, 404)
def app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return ExtensionsTestApp(conf)
def setup_base_app(test):
base.BaseTestCase.config_parse()
app = config.load_paste_app('extensions_test_app')
return app
def setup_extensions_middleware(extension_manager=None):
extension_manager = (extension_manager or
extensions.PluginAwareExtensionManager(
extensions_path,
{constants.CORE: FakePluginWithExtension()}))
base.BaseTestCase.config_parse()
app = config.load_paste_app('extensions_test_app')
return extensions.ExtensionMiddleware(app, ext_mgr=extension_manager)
def _setup_extensions_test_app(extension_manager=None):
return webtest.TestApp(setup_extensions_middleware(extension_manager))
class SimpleExtensionManager(object):
def __init__(self, resource_ext=None, action_ext=None, request_ext=None):
self.resource_ext = resource_ext
self.action_ext = action_ext
self.request_ext = request_ext
def get_resources(self):
resource_exts = []
if self.resource_ext:
resource_exts.append(self.resource_ext)
return resource_exts
def get_actions(self):
action_exts = []
if self.action_ext:
action_exts.append(self.action_ext)
return action_exts
def get_request_extensions(self):
request_extensions = []
if self.request_ext:
request_extensions.append(self.request_ext)
return request_extensions
class ExtensionExtendedAttributeTestPlugin(object):
supported_extension_aliases = [
'ext-obj-test', "extended-ext-attr"
]
def __init__(self, configfile=None):
super(ExtensionExtendedAttributeTestPlugin, self)
self.objs = []
self.objh = {}
def create_ext_test_resource(self, context, ext_test_resource):
obj = ext_test_resource['ext_test_resource']
id = _uuid()
obj['id'] = id
self.objs.append(obj)
self.objh.update({id: obj})
return obj
def get_ext_test_resources(self, context, filters=None, fields=None):
return self.objs
def get_ext_test_resource(self, context, id, fields=None):
return self.objh[id]
class ExtensionExtendedAttributeTestCase(base.BaseTestCase):
def setUp(self):
super(ExtensionExtendedAttributeTestCase, self).setUp()
plugin = (
"neutron.tests.unit.api.test_extensions."
"ExtensionExtendedAttributeTestPlugin"
)
# point config file to: neutron/tests/etc/neutron.conf
self.config_parse()
self.setup_coreplugin(plugin)
ext_mgr = extensions.PluginAwareExtensionManager(
extensions_path,
{constants.CORE: ExtensionExtendedAttributeTestPlugin()}
)
ext_mgr.extend_resources("2.0", {})
extensions.PluginAwareExtensionManager._instance = ext_mgr
app = config.load_paste_app('extensions_test_app')
self._api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1"
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP):
self.saved_attr_map[res] = attrs.copy()
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
extattr.EXTENDED_ATTRIBUTES_2_0)
self.agentscheduler_dbMinxin = manager.NeutronManager.get_plugin()
self.addCleanup(self.restore_attribute_map)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
def restore_attribute_map(self):
# Restore the original RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def _do_request(self, method, path, data=None, params=None, action=None):
content_type = 'application/json'
body = None
if data is not None: # empty dict is valid
body = wsgi.Serializer().serialize(data, content_type)
req = testlib_api.create_request(
path, body, content_type,
method, query_string=params)
res = req.get_response(self._api)
if res.status_code >= 400:
raise webexc.HTTPClientError(detail=res.body, code=res.status_code)
if res.status_code != webexc.HTTPNoContent.code:
return res.json
def _ext_test_resource_create(self, attr=None):
data = {
"ext_test_resource": {
"tenant_id": self._tenant_id,
"name": "test",
extattr.EXTENDED_ATTRIBUTE: attr
}
}
res = self._do_request('POST', _get_path('ext_test_resources'), data)
return res['ext_test_resource']
def test_ext_test_resource_create(self):
ext_test_resource = self._ext_test_resource_create()
attr = _uuid()
ext_test_resource = self._ext_test_resource_create(attr)
self.assertEqual(ext_test_resource[extattr.EXTENDED_ATTRIBUTE], attr)
def test_ext_test_resource_get(self):
attr = _uuid()
obj = self._ext_test_resource_create(attr)
obj_id = obj['id']
res = self._do_request('GET', _get_path(
'ext_test_resources/{0}'.format(obj_id)))
obj2 = res['ext_test_resource']
self.assertEqual(obj2[extattr.EXTENDED_ATTRIBUTE], attr)
|
py | 1a3e2dfc6372d5548c2e22c43dc286b22cc780cb | """Tests for Euclidean algorithms, GCDs, LCMs and polynomial remainder sequences. """
from sympy.polys.rings import ring
from sympy.polys.domains import ZZ, QQ, RR
from sympy.core.compatibility import range
from sympy.polys.specialpolys import (
f_polys,
dmp_fateman_poly_F_1,
dmp_fateman_poly_F_2,
dmp_fateman_poly_F_3)
f_0, f_1, f_2, f_3, f_4, f_5, f_6 = f_polys()
def test_dup_gcdex():
R, x = ring("x", QQ)
f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
g = x**3 + x**2 - 4*x - 4
s = -QQ(1,5)*x + QQ(3,5)
t = QQ(1,5)*x**2 - QQ(6,5)*x + 2
h = x + 1
assert R.dup_half_gcdex(f, g) == (s, h)
assert R.dup_gcdex(f, g) == (s, t, h)
f = x**4 + 4*x**3 - x + 1
g = x**3 - x + 1
s, t, h = R.dup_gcdex(f, g)
S, T, H = R.dup_gcdex(g, f)
assert R.dup_add(R.dup_mul(s, f),
R.dup_mul(t, g)) == h
assert R.dup_add(R.dup_mul(S, g),
R.dup_mul(T, f)) == H
f = 2*x
g = x**2 - 16
s = QQ(1,32)*x
t = -QQ(1,16)
h = 1
assert R.dup_half_gcdex(f, g) == (s, h)
assert R.dup_gcdex(f, g) == (s, t, h)
def test_dup_invert():
R, x = ring("x", QQ)
assert R.dup_invert(2*x, x**2 - 16) == QQ(1,32)*x
def test_dup_euclidean_prs():
R, x = ring("x", QQ)
f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
assert R.dup_euclidean_prs(f, g) == [
f,
g,
-QQ(5,9)*x**4 + QQ(1,9)*x**2 - QQ(1,3),
-QQ(117,25)*x**2 - 9*x + QQ(441,25),
QQ(233150,19773)*x - QQ(102500,6591),
-QQ(1288744821,543589225)]
def test_dup_primitive_prs():
R, x = ring("x", ZZ)
f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
assert R.dup_primitive_prs(f, g) == [
f,
g,
-5*x**4 + x**2 - 3,
13*x**2 + 25*x - 49,
4663*x - 6150,
1]
def test_dup_subresultants():
R, x = ring("x", ZZ)
assert R.dup_resultant(0, 0) == 0
assert R.dup_resultant(1, 0) == 0
assert R.dup_resultant(0, 1) == 0
f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
a = 15*x**4 - 3*x**2 + 9
b = 65*x**2 + 125*x - 245
c = 9326*x - 12300
d = 260708
assert R.dup_subresultants(f, g) == [f, g, a, b, c, d]
assert R.dup_resultant(f, g) == R.dup_LC(d)
f = x**2 - 2*x + 1
g = x**2 - 1
a = 2*x - 2
assert R.dup_subresultants(f, g) == [f, g, a]
assert R.dup_resultant(f, g) == 0
f = x**2 + 1
g = x**2 - 1
a = -2
assert R.dup_subresultants(f, g) == [f, g, a]
assert R.dup_resultant(f, g) == 4
f = x**2 - 1
g = x**3 - x**2 + 2
assert R.dup_resultant(f, g) == 0
f = 3*x**3 - x
g = 5*x**2 + 1
assert R.dup_resultant(f, g) == 64
f = x**2 - 2*x + 7
g = x**3 - x + 5
assert R.dup_resultant(f, g) == 265
f = x**3 - 6*x**2 + 11*x - 6
g = x**3 - 15*x**2 + 74*x - 120
assert R.dup_resultant(f, g) == -8640
f = x**3 - 6*x**2 + 11*x - 6
g = x**3 - 10*x**2 + 29*x - 20
assert R.dup_resultant(f, g) == 0
f = x**3 - 1
g = x**3 + 2*x**2 + 2*x - 1
assert R.dup_resultant(f, g) == 16
f = x**8 - 2
g = x - 1
assert R.dup_resultant(f, g) == -1
def test_dmp_subresultants():
R, x, y = ring("x,y", ZZ)
assert R.dmp_resultant(0, 0) == 0
assert R.dmp_prs_resultant(0, 0)[0] == 0
assert R.dmp_zz_collins_resultant(0, 0) == 0
assert R.dmp_qq_collins_resultant(0, 0) == 0
assert R.dmp_resultant(1, 0) == 0
assert R.dmp_resultant(1, 0) == 0
assert R.dmp_resultant(1, 0) == 0
assert R.dmp_resultant(0, 1) == 0
assert R.dmp_prs_resultant(0, 1)[0] == 0
assert R.dmp_zz_collins_resultant(0, 1) == 0
assert R.dmp_qq_collins_resultant(0, 1) == 0
f = 3*x**2*y - y**3 - 4
g = x**2 + x*y**3 - 9
a = 3*x*y**4 + y**3 - 27*y + 4
b = -3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16
r = R.dmp_LC(b)
assert R.dmp_subresultants(f, g) == [f, g, a, b]
assert R.dmp_resultant(f, g) == r
assert R.dmp_prs_resultant(f, g)[0] == r
assert R.dmp_zz_collins_resultant(f, g) == r
assert R.dmp_qq_collins_resultant(f, g) == r
f = -x**3 + 5
g = 3*x**2*y + x**2
a = 45*y**2 + 30*y + 5
b = 675*y**3 + 675*y**2 + 225*y + 25
r = R.dmp_LC(b)
assert R.dmp_subresultants(f, g) == [f, g, a]
assert R.dmp_resultant(f, g) == r
assert R.dmp_prs_resultant(f, g)[0] == r
assert R.dmp_zz_collins_resultant(f, g) == r
assert R.dmp_qq_collins_resultant(f, g) == r
R, x, y, z, u, v = ring("x,y,z,u,v", ZZ)
f = 6*x**2 - 3*x*y - 2*x*z + y*z
g = x**2 - x*u - x*v + u*v
r = y**2*z**2 - 3*y**2*z*u - 3*y**2*z*v + 9*y**2*u*v - 2*y*z**2*u \
- 2*y*z**2*v + 6*y*z*u**2 + 12*y*z*u*v + 6*y*z*v**2 - 18*y*u**2*v \
- 18*y*u*v**2 + 4*z**2*u*v - 12*z*u**2*v - 12*z*u*v**2 + 36*u**2*v**2
assert R.dmp_zz_collins_resultant(f, g) == r.drop(x)
R, x, y, z, u, v = ring("x,y,z,u,v", QQ)
f = x**2 - QQ(1,2)*x*y - QQ(1,3)*x*z + QQ(1,6)*y*z
g = x**2 - x*u - x*v + u*v
r = QQ(1,36)*y**2*z**2 - QQ(1,12)*y**2*z*u - QQ(1,12)*y**2*z*v + QQ(1,4)*y**2*u*v \
- QQ(1,18)*y*z**2*u - QQ(1,18)*y*z**2*v + QQ(1,6)*y*z*u**2 + QQ(1,3)*y*z*u*v \
+ QQ(1,6)*y*z*v**2 - QQ(1,2)*y*u**2*v - QQ(1,2)*y*u*v**2 + QQ(1,9)*z**2*u*v \
- QQ(1,3)*z*u**2*v - QQ(1,3)*z*u*v**2 + u**2*v**2
assert R.dmp_qq_collins_resultant(f, g) == r.drop(x)
Rt, t = ring("t", ZZ)
Rx, x = ring("x", Rt)
f = x**6 - 5*x**4 + 5*x**2 + 4
g = -6*t*x**5 + x**4 + 20*t*x**3 - 3*x**2 - 10*t*x + 6
assert Rx.dup_resultant(f, g) == 2930944*t**6 + 2198208*t**4 + 549552*t**2 + 45796
def test_dup_discriminant():
R, x = ring("x", ZZ)
assert R.dup_discriminant(0) == 0
assert R.dup_discriminant(x) == 1
assert R.dup_discriminant(x**3 + 3*x**2 + 9*x - 13) == -11664
assert R.dup_discriminant(5*x**5 + x**3 + 2) == 31252160
assert R.dup_discriminant(x**4 + 2*x**3 + 6*x**2 - 22*x + 13) == 0
assert R.dup_discriminant(12*x**7 + 15*x**4 + 30*x**3 + x**2 + 1) == -220289699947514112
def test_dmp_discriminant():
R, x = ring("x", ZZ)
assert R.dmp_discriminant(0) == 0
R, x, y = ring("x,y", ZZ)
assert R.dmp_discriminant(0) == 0
assert R.dmp_discriminant(y) == 0
assert R.dmp_discriminant(x**3 + 3*x**2 + 9*x - 13) == -11664
assert R.dmp_discriminant(5*x**5 + x**3 + 2) == 31252160
assert R.dmp_discriminant(x**4 + 2*x**3 + 6*x**2 - 22*x + 13) == 0
assert R.dmp_discriminant(12*x**7 + 15*x**4 + 30*x**3 + x**2 + 1) == -220289699947514112
assert R.dmp_discriminant(x**2*y + 2*y) == (-8*y**2).drop(x)
assert R.dmp_discriminant(x*y**2 + 2*x) == 1
R, x, y, z = ring("x,y,z", ZZ)
assert R.dmp_discriminant(x*y + z) == 1
R, x, y, z, u = ring("x,y,z,u", ZZ)
assert R.dmp_discriminant(x**2*y + x*z + u) == (-4*y*u + z**2).drop(x)
R, x, y, z, u, v = ring("x,y,z,u,v", ZZ)
assert R.dmp_discriminant(x**3*y + x**2*z + x*u + v) == \
(-27*y**2*v**2 + 18*y*z*u*v - 4*y*u**3 - 4*z**3*v + z**2*u**2).drop(x)
def test_dup_gcd():
R, x = ring("x", ZZ)
f, g = 0, 0
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (0, 0, 0)
f, g = 2, 0
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, 1, 0)
f, g = -2, 0
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, -1, 0)
f, g = 0, -2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, 0, -1)
f, g = 0, 2*x + 4
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2*x + 4, 0, 1)
f, g = 2*x + 4, 0
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2*x + 4, 1, 0)
f, g = 2, 2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, 1, 1)
f, g = -2, 2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, -1, 1)
f, g = 2, -2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, 1, -1)
f, g = -2, -2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, -1, -1)
f, g = x**2 + 2*x + 1, 1
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (1, x**2 + 2*x + 1, 1)
f, g = x**2 + 2*x + 1, 2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (1, x**2 + 2*x + 1, 2)
f, g = 2*x**2 + 4*x + 2, 2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, x**2 + 2*x + 1, 1)
f, g = 2, 2*x**2 + 4*x + 2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, 1, x**2 + 2*x + 1)
f, g = 2*x**2 + 4*x + 2, x + 1
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (x + 1, 2*x + 2, 1)
f, g = x + 1, 2*x**2 + 4*x + 2
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (x + 1, 1, 2*x + 2)
f, g = x - 31, x
assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (1, f, g)
f = x**4 + 8*x**3 + 21*x**2 + 22*x + 8
g = x**3 + 6*x**2 + 11*x + 6
h = x**2 + 3*x + 2
cff = x**2 + 5*x + 4
cfg = x + 3
assert R.dup_zz_heu_gcd(f, g) == (h, cff, cfg)
assert R.dup_rr_prs_gcd(f, g) == (h, cff, cfg)
f = x**4 - 4
g = x**4 + 4*x**2 + 4
h = x**2 + 2
cff = x**2 - 2
cfg = x**2 + 2
assert R.dup_zz_heu_gcd(f, g) == (h, cff, cfg)
assert R.dup_rr_prs_gcd(f, g) == (h, cff, cfg)
f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
h = 1
cff = f
cfg = g
assert R.dup_zz_heu_gcd(f, g) == (h, cff, cfg)
assert R.dup_rr_prs_gcd(f, g) == (h, cff, cfg)
R, x = ring("x", QQ)
f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
h = 1
cff = f
cfg = g
assert R.dup_qq_heu_gcd(f, g) == (h, cff, cfg)
assert R.dup_ff_prs_gcd(f, g) == (h, cff, cfg)
R, x = ring("x", ZZ)
f = - 352518131239247345597970242177235495263669787845475025293906825864749649589178600387510272*x**49 \
+ 46818041807522713962450042363465092040687472354933295397472942006618953623327997952*x**42 \
+ 378182690892293941192071663536490788434899030680411695933646320291525827756032*x**35 \
+ 112806468807371824947796775491032386836656074179286744191026149539708928*x**28 \
- 12278371209708240950316872681744825481125965781519138077173235712*x**21 \
+ 289127344604779611146960547954288113529690984687482920704*x**14 \
+ 19007977035740498977629742919480623972236450681*x**7 \
+ 311973482284542371301330321821976049
g = 365431878023781158602430064717380211405897160759702125019136*x**21 \
+ 197599133478719444145775798221171663643171734081650688*x**14 \
- 9504116979659010018253915765478924103928886144*x**7 \
- 311973482284542371301330321821976049
assert R.dup_zz_heu_gcd(f, R.dup_diff(f, 1))[0] == g
assert R.dup_rr_prs_gcd(f, R.dup_diff(f, 1))[0] == g
R, x = ring("x", QQ)
f = QQ(1,2)*x**2 + x + QQ(1,2)
g = QQ(1,2)*x + QQ(1,2)
h = x + 1
assert R.dup_qq_heu_gcd(f, g) == (h, g, QQ(1,2))
assert R.dup_ff_prs_gcd(f, g) == (h, g, QQ(1,2))
R, x = ring("x", ZZ)
f = 1317378933230047068160*x + 2945748836994210856960
g = 120352542776360960*x + 269116466014453760
h = 120352542776360960*x + 269116466014453760
cff = 10946
cfg = 1
assert R.dup_zz_heu_gcd(f, g) == (h, cff, cfg)
def test_dmp_gcd():
R, x, y = ring("x,y", ZZ)
f, g = 0, 0
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (0, 0, 0)
f, g = 2, 0
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, 1, 0)
f, g = -2, 0
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, -1, 0)
f, g = 0, -2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, 0, -1)
f, g = 0, 2*x + 4
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2*x + 4, 0, 1)
f, g = 2*x + 4, 0
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2*x + 4, 1, 0)
f, g = 2, 2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, 1, 1)
f, g = -2, 2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, -1, 1)
f, g = 2, -2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, 1, -1)
f, g = -2, -2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, -1, -1)
f, g = x**2 + 2*x + 1, 1
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (1, x**2 + 2*x + 1, 1)
f, g = x**2 + 2*x + 1, 2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (1, x**2 + 2*x + 1, 2)
f, g = 2*x**2 + 4*x + 2, 2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, x**2 + 2*x + 1, 1)
f, g = 2, 2*x**2 + 4*x + 2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, 1, x**2 + 2*x + 1)
f, g = 2*x**2 + 4*x + 2, x + 1
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (x + 1, 2*x + 2, 1)
f, g = x + 1, 2*x**2 + 4*x + 2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (x + 1, 1, 2*x + 2)
R, x, y, z, u = ring("x,y,z,u", ZZ)
f, g = u**2 + 2*u + 1, 2*u + 2
assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (u + 1, u + 1, 2)
f, g = z**2*u**2 + 2*z**2*u + z**2 + z*u + z, u**2 + 2*u + 1
h, cff, cfg = u + 1, z**2*u + z**2 + z, u + 1
assert R.dmp_zz_heu_gcd(f, g) == (h, cff, cfg)
assert R.dmp_rr_prs_gcd(f, g) == (h, cff, cfg)
assert R.dmp_zz_heu_gcd(g, f) == (h, cfg, cff)
assert R.dmp_rr_prs_gcd(g, f) == (h, cfg, cff)
R, x, y, z = ring("x,y,z", ZZ)
f, g, h = map(R.from_dense, dmp_fateman_poly_F_1(2, ZZ))
H, cff, cfg = R.dmp_zz_heu_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
H, cff, cfg = R.dmp_rr_prs_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
R, x, y, z, u, v = ring("x,y,z,u,v", ZZ)
f, g, h = map(R.from_dense, dmp_fateman_poly_F_1(4, ZZ))
H, cff, cfg = R.dmp_zz_heu_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
R, x, y, z, u, v, a, b = ring("x,y,z,u,v,a,b", ZZ)
f, g, h = map(R.from_dense, dmp_fateman_poly_F_1(6, ZZ))
H, cff, cfg = R.dmp_zz_heu_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
R, x, y, z, u, v, a, b, c, d = ring("x,y,z,u,v,a,b,c,d", ZZ)
f, g, h = map(R.from_dense, dmp_fateman_poly_F_1(8, ZZ))
H, cff, cfg = R.dmp_zz_heu_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
R, x, y, z = ring("x,y,z", ZZ)
f, g, h = map(R.from_dense, dmp_fateman_poly_F_2(2, ZZ))
H, cff, cfg = R.dmp_zz_heu_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
H, cff, cfg = R.dmp_rr_prs_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
f, g, h = map(R.from_dense, dmp_fateman_poly_F_3(2, ZZ))
H, cff, cfg = R.dmp_zz_heu_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
H, cff, cfg = R.dmp_rr_prs_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
R, x, y, z, u, v = ring("x,y,z,u,v", ZZ)
f, g, h = map(R.from_dense, dmp_fateman_poly_F_3(4, ZZ))
H, cff, cfg = R.dmp_inner_gcd(f, g)
assert H == h and R.dmp_mul(H, cff) == f \
and R.dmp_mul(H, cfg) == g
R, x, y = ring("x,y", QQ)
f = QQ(1,2)*x**2 + x + QQ(1,2)
g = QQ(1,2)*x + QQ(1,2)
h = x + 1
assert R.dmp_qq_heu_gcd(f, g) == (h, g, QQ(1,2))
assert R.dmp_ff_prs_gcd(f, g) == (h, g, QQ(1,2))
R, x, y = ring("x,y", RR)
f = 2.1*x*y**2 - 2.2*x*y + 2.1*x
g = 1.0*x**3
assert R.dmp_ff_prs_gcd(f, g) == \
(1.0*x, 2.1*y**2 - 2.2*y + 2.1, 1.0*x**2)
def test_dup_lcm():
R, x = ring("x", ZZ)
assert R.dup_lcm(2, 6) == 6
assert R.dup_lcm(2*x**3, 6*x) == 6*x**3
assert R.dup_lcm(2*x**3, 3*x) == 6*x**3
assert R.dup_lcm(x**2 + x, x) == x**2 + x
assert R.dup_lcm(x**2 + x, 2*x) == 2*x**2 + 2*x
assert R.dup_lcm(x**2 + 2*x, x) == x**2 + 2*x
assert R.dup_lcm(2*x**2 + x, x) == 2*x**2 + x
assert R.dup_lcm(2*x**2 + x, 2*x) == 4*x**2 + 2*x
def test_dmp_lcm():
R, x, y = ring("x,y", ZZ)
assert R.dmp_lcm(2, 6) == 6
assert R.dmp_lcm(x, y) == x*y
assert R.dmp_lcm(2*x**3, 6*x*y**2) == 6*x**3*y**2
assert R.dmp_lcm(2*x**3, 3*x*y**2) == 6*x**3*y**2
assert R.dmp_lcm(x**2*y, x*y**2) == x**2*y**2
f = 2*x*y**5 - 3*x*y**4 - 2*x*y**3 + 3*x*y**2
g = y**5 - 2*y**3 + y
h = 2*x*y**7 - 3*x*y**6 - 4*x*y**5 + 6*x*y**4 + 2*x*y**3 - 3*x*y**2
assert R.dmp_lcm(f, g) == h
f = x**3 - 3*x**2*y - 9*x*y**2 - 5*y**3
g = x**4 + 6*x**3*y + 12*x**2*y**2 + 10*x*y**3 + 3*y**4
h = x**5 + x**4*y - 18*x**3*y**2 - 50*x**2*y**3 - 47*x*y**4 - 15*y**5
assert R.dmp_lcm(f, g) == h
def test_dmp_content():
R, x,y = ring("x,y", ZZ)
assert R.dmp_content(-2) == 2
f, g, F = 3*y**2 + 2*y + 1, 1, 0
for i in range(0, 5):
g *= f
F += x**i*g
assert R.dmp_content(F) == f.drop(x)
R, x,y,z = ring("x,y,z", ZZ)
assert R.dmp_content(f_4) == 1
assert R.dmp_content(f_5) == 1
R, x,y,z,t = ring("x,y,z,t", ZZ)
assert R.dmp_content(f_6) == 1
def test_dmp_primitive():
R, x,y = ring("x,y", ZZ)
assert R.dmp_primitive(0) == (0, 0)
assert R.dmp_primitive(1) == (1, 1)
f, g, F = 3*y**2 + 2*y + 1, 1, 0
for i in range(0, 5):
g *= f
F += x**i*g
assert R.dmp_primitive(F) == (f.drop(x), F / f)
R, x,y,z = ring("x,y,z", ZZ)
cont, f = R.dmp_primitive(f_4)
assert cont == 1 and f == f_4
cont, f = R.dmp_primitive(f_5)
assert cont == 1 and f == f_5
R, x,y,z,t = ring("x,y,z,t", ZZ)
cont, f = R.dmp_primitive(f_6)
assert cont == 1 and f == f_6
def test_dup_cancel():
R, x = ring("x", ZZ)
f = 2*x**2 - 2
g = x**2 - 2*x + 1
p = 2*x + 2
q = x - 1
assert R.dup_cancel(f, g) == (p, q)
assert R.dup_cancel(f, g, include=False) == (1, 1, p, q)
f = -x - 2
g = 3*x - 4
F = x + 2
G = -3*x + 4
assert R.dup_cancel(f, g) == (f, g)
assert R.dup_cancel(F, G) == (f, g)
assert R.dup_cancel(0, 0) == (0, 0)
assert R.dup_cancel(0, 0, include=False) == (1, 1, 0, 0)
assert R.dup_cancel(x, 0) == (1, 0)
assert R.dup_cancel(x, 0, include=False) == (1, 1, 1, 0)
assert R.dup_cancel(0, x) == (0, 1)
assert R.dup_cancel(0, x, include=False) == (1, 1, 0, 1)
f = 0
g = x
one = 1
assert R.dup_cancel(f, g, include=True) == (f, one)
def test_dmp_cancel():
R, x, y = ring("x,y", ZZ)
f = 2*x**2 - 2
g = x**2 - 2*x + 1
p = 2*x + 2
q = x - 1
assert R.dmp_cancel(f, g) == (p, q)
assert R.dmp_cancel(f, g, include=False) == (1, 1, p, q)
assert R.dmp_cancel(0, 0) == (0, 0)
assert R.dmp_cancel(0, 0, include=False) == (1, 1, 0, 0)
assert R.dmp_cancel(y, 0) == (1, 0)
assert R.dmp_cancel(y, 0, include=False) == (1, 1, 1, 0)
assert R.dmp_cancel(0, y) == (0, 1)
assert R.dmp_cancel(0, y, include=False) == (1, 1, 0, 1)
|
gyp | 1a3e2efe6f6a62802b4bbdaaff6d6e785dcc6326 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is all.gyp file for Android to prevent breakage in Android and other
# platform; It will be churning a lot in the short term and eventually be merged
# into all.gyp.
{
'variables': {
# A hook that can be overridden in other repositories to add additional
# compilation targets to 'All'
'android_app_targets%': [],
},
'targets': [
{
'target_name': 'All',
'type': 'none',
'dependencies': [
'../content/content.gyp:content_shell_apk',
'<@(android_app_targets)',
'android_builder_tests',
'../android_webview/android_webview.gyp:android_webview_apk',
'../chrome/chrome.gyp:chromium_testshell',
# TODO(nyquist) This should instead by a target for sync when all of
# the sync-related code for Android has been upstreamed.
# See http://crbug.com/159203
'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation_javalib',
],
}, # target_name: All
{
# The current list of tests for android. This is temporary
# until the full set supported. If adding a new test here,
# please also add it to build/android/run_tests.py, else the
# test is not run.
#
# WARNING:
# Do not add targets here without communicating the implications
# on tryserver triggers and load. Discuss with jrg please.
'target_name': 'android_builder_tests',
'type': 'none',
'dependencies': [
'../android_webview/android_webview.gyp:android_webview_unittests',
'../base/android/jni_generator/jni_generator.gyp:jni_generator_tests',
'../base/base.gyp:base_unittests',
'../build/android/tests/multiple_proguards/multiple_proguards.gyp:multiple_proguards_test_apk',
'../cc/cc_tests.gyp:cc_perftests_apk',
'../cc/cc_tests.gyp:cc_unittests',
'../chrome/chrome.gyp:unit_tests',
'../components/components.gyp:components_unittests',
'../content/content.gyp:content_browsertests',
'../content/content.gyp:content_shell_test_apk',
'../content/content.gyp:content_unittests',
'../gpu/gpu.gyp:gl_tests',
'../gpu/gpu.gyp:gpu_unittests',
'../ipc/ipc.gyp:ipc_tests',
'../media/media.gyp:media_unittests',
'../net/net.gyp:net_unittests',
'../sandbox/sandbox.gyp:sandbox_linux_unittests',
'../sql/sql.gyp:sql_unittests',
'../sync/sync.gyp:sync_unit_tests',
'../third_party/WebKit/Source/WebKit/chromium/All.gyp:*',
'../tools/android/android_tools.gyp:android_tools',
'../tools/android/device_stats_monitor/device_stats_monitor.gyp:device_stats_monitor',
'../tools/android/findbugs_plugin/findbugs_plugin.gyp:findbugs_plugin_test',
'../ui/ui.gyp:ui_unittests',
# Required by ui_unittests.
# TODO(wangxianzhu): It'd better let ui_unittests depend on it, but
# this would cause circular gyp dependency which needs refactoring the
# gyps to resolve.
'../chrome/chrome_resources.gyp:packed_resources',
],
'conditions': [
['linux_breakpad==1', {
'dependencies': [
'../breakpad/breakpad.gyp:breakpad_unittests',
# Also compile the tools needed to deal with minidumps, they are
# needed to run minidump tests upstream.
'../breakpad/breakpad.gyp:dump_syms#host',
'../breakpad/breakpad.gyp:symupload#host',
'../breakpad/breakpad.gyp:minidump_dump#host',
'../breakpad/breakpad.gyp:minidump_stackwalk#host'
],
}],
['"<(gtest_target_type)"=="shared_library"', {
'dependencies': [
# Unit test bundles packaged as an apk.
'../android_webview/android_webview.gyp:android_webview_unittests_apk',
'../base/base.gyp:base_unittests_apk',
'../cc/cc_tests.gyp:cc_unittests_apk',
'../chrome/chrome.gyp:unit_tests_apk',
'../components/components.gyp:components_unittests_apk',
'../content/content.gyp:content_browsertests_apk',
'../content/content.gyp:content_unittests_apk',
'../gpu/gpu.gyp:gl_tests_apk',
'../gpu/gpu.gyp:gpu_unittests_apk',
'../ipc/ipc.gyp:ipc_tests_apk',
'../media/media.gyp:media_unittests_apk',
'../net/net.gyp:net_unittests_apk',
'../sandbox/sandbox.gyp:sandbox_linux_jni_unittests_apk',
'../sql/sql.gyp:sql_unittests_apk',
'../sync/sync.gyp:sync_unit_tests_apk',
'../ui/ui.gyp:ui_unittests_apk',
'../android_webview/android_webview.gyp:android_webview_test_apk',
'../chrome/chrome.gyp:chromium_testshell_test_apk',
'../webkit/compositor_bindings/compositor_bindings_tests.gyp:webkit_compositor_bindings_unittests_apk'
],
}],
],
},
{
# Experimental / in-progress targets that are expected to fail
# but we still try to compile them on bots (turning the stage
# orange, not red).
'target_name': 'android_experimental',
'type': 'none',
'dependencies': [
],
},
{
# In-progress targets that are expected to fail and are NOT run
# on any bot.
'target_name': 'android_in_progress',
'type': 'none',
'dependencies': [
],
},
], # targets
}
|
py | 1a3e2f08bbef6a5c29da8a267dd61d4a3225cda1 | # encoding: utf-8
"""
Gherkin step implementations for chart features.
"""
from __future__ import absolute_import, print_function
import hashlib
from itertools import islice
from behave import given, then, when
from pptx import Presentation
from pptx.chart.chart import Legend
from pptx.chart.data import (
BubbleChartData, CategoryChartData, ChartData, XyChartData
)
from pptx.enum.chart import XL_CHART_TYPE
from pptx.parts.embeddedpackage import EmbeddedXlsxPart
from pptx.util import Inches
from helpers import count, test_pptx
# given ===================================================
@given('a chart')
def given_a_chart(context):
prs = Presentation(test_pptx('shp-common-props'))
sld = prs.slides[0]
context.chart = sld.shapes[6].chart
@given('a chart having {a_or_no} title')
def given_a_chart_having_a_or_no_title(context, a_or_no):
shape_idx = {'no': 0, 'a': 1}[a_or_no]
prs = Presentation(test_pptx('cht-chart-props'))
context.chart = prs.slides[0].shapes[shape_idx].chart
@given('a chart {having_or_not} a legend')
def given_a_chart_having_or_not_a_legend(context, having_or_not):
slide_idx = {
'having': 0,
'not having': 1,
}[having_or_not]
prs = Presentation(test_pptx('cht-legend'))
context.chart = prs.slides[slide_idx].shapes[0].chart
@given('a chart of size and type {spec}')
def given_a_chart_of_size_and_type_spec(context, spec):
slide_idx = {
'2x2 Clustered Bar': 0,
'2x2 100% Stacked Bar': 1,
'2x2 Clustered Column': 2,
'4x3 Line': 3,
'3x1 Pie': 4,
'3x2 XY': 5,
'3x2 Bubble': 6,
}[spec]
prs = Presentation(test_pptx('cht-replace-data'))
chart = prs.slides[slide_idx].shapes[0].chart
context.chart = chart
context.xlsx_sha1 = hashlib.sha1(
chart._workbook.xlsx_part.blob
).hexdigest()
@given('a chart of type {chart_type}')
def given_a_chart_of_type_chart_type(context, chart_type):
slide_idx, shape_idx = {
'Area': (0, 0),
'Stacked Area': (0, 1),
'100% Stacked Area': (0, 2),
'3-D Area': (0, 3),
'3-D Stacked Area': (0, 4),
'3-D 100% Stacked Area': (0, 5),
'Clustered Bar': (1, 0),
'Stacked Bar': (1, 1),
'100% Stacked Bar': (1, 2),
'Clustered Column': (1, 3),
'Stacked Column': (1, 4),
'100% Stacked Column': (1, 5),
'Line': (2, 0),
'Stacked Line': (2, 1),
'100% Stacked Line': (2, 2),
'Marked Line': (2, 3),
'Stacked Marked Line': (2, 4),
'100% Stacked Marked Line': (2, 5),
'Pie': (3, 0),
'Exploded Pie': (3, 1),
'XY (Scatter)': (4, 0),
'XY Lines': (4, 1),
'XY Lines No Markers': (4, 2),
'XY Smooth Lines': (4, 3),
'XY Smooth No Markers': (4, 4),
'Bubble': (5, 0),
'3D-Bubble': (5, 1),
'Radar': (6, 0),
'Marked Radar': (6, 1),
'Filled Radar': (6, 2),
'Line (with date categories)': (7, 0),
}[chart_type]
prs = Presentation(test_pptx('cht-chart-type'))
context.chart = prs.slides[slide_idx].shapes[shape_idx].chart
@given('a chart title')
def given_a_chart_title(context):
prs = Presentation(test_pptx('cht-chart-props'))
context.chart_title = prs.slides[0].shapes[1].chart.chart_title
@given('a chart title having {a_or_no} text frame')
def given_a_chart_title_having_a_or_no_text_frame(context, a_or_no):
prs = Presentation(test_pptx('cht-chart-props'))
shape_idx = {'no': 0, 'a': 1}[a_or_no]
context.chart_title = prs.slides[1].shapes[shape_idx].chart.chart_title
# when ====================================================
@when('I add a Clustered bar chart with multi-level categories')
def when_I_add_a_clustered_bar_chart_with_multi_level_categories(context):
chart_type = XL_CHART_TYPE.BAR_CLUSTERED
chart_data = CategoryChartData()
WEST = chart_data.add_category('WEST')
WEST.add_sub_category('SF')
WEST.add_sub_category('LA')
EAST = chart_data.add_category('EAST')
EAST.add_sub_category('NY')
EAST.add_sub_category('NJ')
chart_data.add_series('Series 1', (1, 2, None, 4))
chart_data.add_series('Series 2', (5, None, 7, 8))
context.chart = context.slide.shapes.add_chart(
chart_type, Inches(1), Inches(1), Inches(8), Inches(5), chart_data
).chart
@when('I add a {kind} chart with {cats} categories and {sers} series')
def when_I_add_a_chart_with_categories_and_series(context, kind, cats, sers):
chart_type = {
'Area': XL_CHART_TYPE.AREA,
'Stacked Area': XL_CHART_TYPE.AREA_STACKED,
'100% Stacked Area': XL_CHART_TYPE.AREA_STACKED_100,
'Clustered Bar': XL_CHART_TYPE.BAR_CLUSTERED,
'Stacked Bar': XL_CHART_TYPE.BAR_STACKED,
'100% Stacked Bar': XL_CHART_TYPE.BAR_STACKED_100,
'Clustered Column': XL_CHART_TYPE.COLUMN_CLUSTERED,
'Stacked Column': XL_CHART_TYPE.COLUMN_STACKED,
'100% Stacked Column': XL_CHART_TYPE.COLUMN_STACKED_100,
'Doughnut': XL_CHART_TYPE.DOUGHNUT,
'Exploded Doughnut': XL_CHART_TYPE.DOUGHNUT_EXPLODED,
'Line': XL_CHART_TYPE.LINE,
'Line with Markers': XL_CHART_TYPE.LINE_MARKERS,
'Line Markers Stacked': XL_CHART_TYPE.LINE_MARKERS_STACKED,
'100% Line Markers Stacked': XL_CHART_TYPE.LINE_MARKERS_STACKED_100,
'Line Stacked': XL_CHART_TYPE.LINE_STACKED,
'100% Line Stacked': XL_CHART_TYPE.LINE_STACKED_100,
'Pie': XL_CHART_TYPE.PIE,
'Exploded Pie': XL_CHART_TYPE.PIE_EXPLODED,
'Radar': XL_CHART_TYPE.RADAR,
'Filled Radar': XL_CHART_TYPE.RADAR_FILLED,
'Radar with markers': XL_CHART_TYPE.RADAR_MARKERS,
}[kind]
category_count, series_count = int(cats), int(sers)
category_source = ('Foo', 'Bar', 'Baz', 'Boo', 'Far', 'Faz')
series_value_source = count(1.1, 1.1)
chart_data = CategoryChartData()
chart_data.categories = category_source[:category_count]
for idx in range(series_count):
series_title = 'Series %d' % (idx+1)
series_values = tuple(islice(series_value_source, category_count))
chart_data.add_series(series_title, series_values)
context.chart = context.slide.shapes.add_chart(
chart_type, Inches(1), Inches(1), Inches(8), Inches(5), chart_data
).chart
@when('I add a {bubble_type} chart having 2 series of 3 points each')
def when_I_add_a_bubble_chart_having_2_series_of_3_pts(context, bubble_type):
chart_type = getattr(XL_CHART_TYPE, bubble_type)
data = (
('Series 1', ((-0.1, 0.5, 1.0), (16.2, 0.0, 2.0), (8.0, -0.2, 3.0))),
('Series 2', ((12.4, 0.8, 4.0), (-7.5, 0.5, 5.0), (5.1, -0.5, 6.0))),
)
chart_data = BubbleChartData()
for series_data in data:
series_label, points = series_data
series = chart_data.add_series(series_label)
for point in points:
x, y, size = point
series.add_data_point(x, y, size)
context.chart = context.slide.shapes.add_chart(
chart_type, Inches(1), Inches(1), Inches(8), Inches(5), chart_data
).chart
@when('I assign {value} to chart.has_legend')
def when_I_assign_value_to_chart_has_legend(context, value):
new_value = {
'True': True,
'False': False,
}[value]
context.chart.has_legend = new_value
@when('I assign {value} to chart.has_title')
def when_I_assign_value_to_chart_has_title(context, value):
context.chart.has_title = {'True': True, 'False': False}[value]
@when('I assign {value} to chart_title.has_text_frame')
def when_I_assign_value_to_chart_title_has_text_frame(context, value):
context.chart_title.has_text_frame = {
'True': True,
'False': False
}[value]
@when('I replace its data with {cats} categories and {sers} series')
def when_I_replace_its_data_with_categories_and_series(context, cats, sers):
category_count, series_count = int(cats), int(sers)
category_source = ('Foo', 'Bar', 'Baz', 'Boo', 'Far', 'Faz')
series_value_source = count(1.1, 1.1)
chart_data = ChartData()
chart_data.categories = category_source[:category_count]
for idx in range(series_count):
series_title = 'New Series %d' % (idx+1)
series_values = tuple(islice(series_value_source, category_count))
chart_data.add_series(series_title, series_values)
context.chart.replace_data(chart_data)
@when('I replace its data with 3 series of 3 bubble points each')
def when_I_replace_its_data_with_3_series_of_three_bubble_pts_each(context):
chart_data = BubbleChartData()
for idx in range(3):
series_title = 'New Series %d' % (idx+1)
series = chart_data.add_series(series_title)
for jdx in range(3):
x, y, size = idx * 3 + jdx, idx * 2 + jdx, idx + jdx
series.add_data_point(x, y, size)
context.chart.replace_data(chart_data)
@when('I replace its data with 3 series of 3 points each')
def when_I_replace_its_data_with_3_series_of_three_points_each(context):
chart_data = XyChartData()
x = y = 0
for idx in range(3):
series_title = 'New Series %d' % (idx+1)
series = chart_data.add_series(series_title)
for jdx in range(3):
x, y = idx * 3 + jdx, idx * 2 + jdx
series.add_data_point(x, y)
context.chart.replace_data(chart_data)
# then ====================================================
@then('chart.category_axis is a {cls_name} object')
def then_chart_category_axis_is_a_cls_name_object(context, cls_name):
category_axis = context.chart.category_axis
type_name = type(category_axis).__name__
assert type_name == cls_name, 'got %s' % type_name
@then('chart.chart_title is a ChartTitle object')
def then_chart_chart_title_is_a_ChartTitle_object(context):
class_name = type(context.chart.chart_title).__name__
assert class_name == 'ChartTitle', 'got %s' % class_name
@then('chart.chart_type is {enum_member}')
def then_chart_chart_type_is_value(context, enum_member):
expected_value = getattr(XL_CHART_TYPE, enum_member)
chart = context.chart
assert chart.chart_type is expected_value, 'got %s' % chart.chart_type
@then('chart.has_legend is {value}')
def then_chart_has_legend_is_value(context, value):
expected_value = {
'True': True,
'False': False,
}[value]
chart = context.chart
assert chart.has_legend is expected_value
@then('chart.has_title is {value}')
def then_chart_has_title_is_value(context, value):
chart = context.chart
actual_value = chart.has_title
expected_value = {'True': True, 'False': False}[value]
assert actual_value is expected_value, 'got %s' % actual_value
@then('chart.legend is a legend object')
def then_chart_legend_is_a_legend_object(context):
chart = context.chart
assert isinstance(chart.legend, Legend)
@then('chart.series is a SeriesCollection object')
def then_chart_series_is_a_SeriesCollection_object(context):
type_name = type(context.chart.series).__name__
assert type_name == 'SeriesCollection', 'got %s' % type_name
@then('chart.value_axis is a ValueAxis object')
def then_chart_value_axis_is_a_ValueAxis_object(context):
value_axis = context.chart.value_axis
assert type(value_axis).__name__ == 'ValueAxis'
@then('chart_title.format is a ChartFormat object')
def then_chart_title_format_is_a_ChartFormat_object(context):
class_name = type(context.chart_title.format).__name__
assert class_name == 'ChartFormat', 'got %s' % class_name
@then('chart_title.format.fill is a FillFormat object')
def then_chart_title_format_fill_is_a_FillFormat_object(context):
class_name = type(context.chart_title.format.fill).__name__
assert class_name == 'FillFormat', 'got %s' % class_name
@then('chart_title.format.line is a LineFormat object')
def then_chart_title_format_line_is_a_LineFormat_object(context):
class_name = type(context.chart_title.format.line).__name__
assert class_name == 'LineFormat', 'got %s' % class_name
@then('chart_title.has_text_frame is {value}')
def then_chart_title_has_text_frame_is_value(context, value):
actual_value = context.chart_title.has_text_frame
expected_value = {'True': True, 'False': False}[value]
assert actual_value is expected_value, 'got %s' % actual_value
@then('chart_title.text_frame is a TextFrame object')
def then_chart_title_text_frame_is_a_TextFrame_object(context):
class_name = type(context.chart_title.text_frame).__name__
assert class_name == 'TextFrame', 'got %s' % class_name
@then('each series has a new name')
def then_each_series_has_a_new_name(context):
for series in context.chart.plots[0].series:
assert series.name.startswith('New ')
@then('each series has {count} values')
def then_each_series_has_count_values(context, count):
expected_count = int(count)
for series in context.chart.plots[0].series:
actual_value_count = len(series.values)
assert actual_value_count == expected_count
@then('len(chart.series) is {count}')
def then_len_chart_series_is_count(context, count):
expected_count = int(count)
assert len(context.chart.series) == expected_count
@then('the chart has an Excel data worksheet')
def then_the_chart_has_an_Excel_data_worksheet(context):
xlsx_part = context.chart._workbook.xlsx_part
assert isinstance(xlsx_part, EmbeddedXlsxPart)
@then('the chart has new chart data')
def then_the_chart_has_new_chart_data(context):
orig_xlsx_sha1 = context.xlsx_sha1
new_xlsx_sha1 = hashlib.sha1(
context.chart._workbook.xlsx_part.blob
).hexdigest()
assert new_xlsx_sha1 != orig_xlsx_sha1
|
py | 1a3e2fcb016feb40e928ebd6f8291925b5ad74ca | from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
AddField('Group', 'email_list_only', models.BooleanField, initial=True)
]
|
py | 1a3e306f40fa6f75d28bd794473f5425304265cc | from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible # needed to support Python 2
class Battle(models.Model):
battle_name = models.CharField(max_length=100)
hashtag1 = models.CharField(max_length=500)
hashtag1_typos = models.CharField(null=True, max_length=100, blank=True)
hashtag2 = models.CharField(max_length=500)
hashtag2_typos = models.CharField(null=True, max_length=100, blank=True)
started_at = models.CharField(max_length=100)
ended_at = models.CharField(max_length=100)
winner = models.CharField(null=True, max_length=500, blank=True)
status = models.CharField(max_length=100)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="+")
def __unicode__(self):
return self.battle_name
def __str__(self):
return str(self.battle_name)
def get_absolute_url(self):
return reverse('battle:battle_edit', kwargs={'pk': self.pk}) |
py | 1a3e30e1692e46348c94ac7bc556a1f22d55afd5 | from flask_session import Session
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_bootstrap import Bootstrap
from flask_debugtoolbar import DebugToolbarExtension
from flask_caching import Cache
from myapp.settings import CACHES
from flask_restful import Api
api = Api()
db = SQLAlchemy()
bs = Bootstrap()
cache = Cache(config=CACHES.get("default"))
def init_ext(app):
# 实例化
se = Session()
# 绑定app
se.init_app(app)
# db绑定
db.init_app(app)
# 实例化migrate
migrate = Migrate(app=app,db=db)
bs.init_app(app)
toolbar = DebugToolbarExtension()
toolbar.init_app(app)
cache.init_app(app)
api.init_app(app)
|
py | 1a3e316d3591f1b6b664ca235d18d450b6bd45f3 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
bl_info = {
'name': 'Pivot Transform',
#"description": "This is a test version of the addon. Write in the discord channel(link below) about the errors."
"author": "Max Derksen",
'version': (1, 4, 4),
'blender': (2, 81, 0),
'location': 'VIEW 3D > N-Panel > Pivot Point Popover',
#"warning": "This is a test version of the addon. Write in the discord channel(link below) about the errors.",
"support": "COMMUNITY",
'category': 'Object',
}
'''
import bpy
import re
import bmesh
import mathutils
from mathutils import Matrix, Vector
from bpy.types import Operator
from bpy.props import IntProperty, FloatProperty #Bounding Box
#========================================================================PIVOT TRANSFORM TOOL IN EDIT MODE
storeGT = False
storeGR = False
storeGS = False
class PIVOT_OT_transform_on_N(Operator):
bl_idname = "pivot.transform_on_n"
bl_label = "Transform"
bl_description = "Start Pivot Transformation"
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
if context.scene.tool_settings.use_transform_data_origin == False:
props_pivot = context.preferences.addons[__name__.split(".")[0]].preferences
global storeGT
global storeGR
global storeGS
storeGT = context.space_data.show_gizmo_object_translate
storeGR = context.space_data.show_gizmo_object_rotate
storeGS = context.space_data.show_gizmo_object_scale
#if props_pivot.gizmo_preselect == True:
#context.space_data.show_gizmo_object_translate = props_pivot.move_giz
#context.space_data.show_gizmo_object_rotate = props_pivot.rotate_giz
#context.space_data.show_gizmo_object_scale = props_pivot.scale_giz
if context.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
context.scene.tool_settings.use_transform_data_origin = True
return{'FINISHED'}
else:
return{'CANCELLED'}
class PIVOT_OT_transform_off_N(Operator):
bl_idname = "pivot.transform_off_n"
bl_label = "Apply"
bl_description = "Apply Pivot Transformation"
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
if context.scene.tool_settings.use_transform_data_origin == True:
global storeGT
global storeGR
global storeGS
context.space_data.show_gizmo_object_translate = storeGT
context.space_data.show_gizmo_object_rotate = storeGR
context.space_data.show_gizmo_object_scale = storeGS
if context.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
context.scene.tool_settings.use_transform_data_origin = False
return{'FINISHED'}
else:
return{'CANCELLED'}
import bgl
import gpu
from gpu_extras.batch import batch_for_shader
def ob_add(self, context, obj):
""" bpy.ops.mesh.primitive_cube_add(enter_editmode=True, align='WORLD', location=(0, 0, 0))
bbox = context.active_object
bbox.matrix_world = self.obj.matrix_world
bm = bmesh.from_edit_mesh(bbox.data)
bm.verts.ensure_lookup_table()
bm.verts.index_update()
for i, vert in enumerate(ob_bbox):
bm.verts[i].co = (vert[0], vert[1], vert[2])
#bm.to_mesh(me)
bpy.ops.mesh.select_all(action='DESELECT') """
ob_bbox = obj.bound_box
me = bpy.data.meshes.new('PivotBBox')
bbox = bpy.data.objects.new('PivotBBox', me)
bbox.matrix_world = obj.matrix_world
context.collection.objects.link(bbox)
bm = bmesh.new()
bm.from_mesh(me)
for vert in ob_bbox:
bm.verts.new(vert[:])
vertex=[]
for v in bm.verts:
vertex.append(v)
bm.faces.new((vertex[0], vertex[1], vertex[2], vertex[3]))
bm.faces.new((vertex[3], vertex[2], vertex[6], vertex[7]))
bm.faces.new((vertex[3], vertex[7], vertex[4], vertex[0]))
bm.faces.new((vertex[4], vertex[5], vertex[6], vertex[7]))
bm.faces.new((vertex[2], vertex[1], vertex[5], vertex[6]))
bm.faces.new((vertex[0], vertex[4], vertex[5], vertex[1]))
bm.to_mesh(me)
bpy.ops.object.select_all(action='DESELECT')
bbox.select_set(state=True)
context.view_layer.objects.active = bbox
context.object.display_type = 'WIRE'
bpy.ops.object.mode_set(mode='EDIT')
context.tool_settings.mesh_select_mode = (True, True, True)
obj.select_set(state=True)
class PIVOT_OT_bounding_box_N(Operator):
bl_idname = "pivot.bounding_box_n"
bl_label = "Pivot To Bounding Box"
bl_description = "Apply Transformation"
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
@classmethod
def poll(cls, context):
return context.active_object is not None
def __init__(self):
self.obj = None
self.select_mode = tuple()
@staticmethod
def draw_mesh(self, context):
shader = gpu.shader.from_builtin('3D_SMOOTH_COLOR')
theme = bpy.context.preferences.themes['Default']
vertex_size = theme.view_3d.vertex_size
bgl.glEnable(bgl.GL_BLEND)
bgl.glLineWidth(3)
bgl.glPointSize(vertex_size + 4)
bgl.glEnable(bgl.GL_DEPTH_TEST)
bgl.glEnable(bgl.GL_LINE_SMOOTH)
bgl.glEnable(bgl.GL_CULL_FACE)
bgl.glCullFace(bgl.GL_BACK)
bgl.glDepthRange(0, 0.9999)
bgl.glDepthMask(False)
shader.bind()
bbox = context.active_object
mesh = bmesh.from_edit_mesh(bbox.data)
vertex_co = [bbox.matrix_world @ v.co for v in mesh.verts]
vertex_all = []
for e in mesh.edges:
v1 = bbox.matrix_world @ e.verts[0].co
v2 = bbox.matrix_world @ e.verts[1].co
vCo = (v1 + v2) / 2
vertex_all.append(vCo)
for f in mesh.faces:
vCo = bbox.matrix_world @ f.calc_center_bounds()
vertex_all.append(vCo)
for v in vertex_co:
vertex_all.append(v)
edge_keys = bbox.data.edge_keys
loop_triangles = mesh.calc_loop_triangles()
faces_indices = [[loop.vert.index for loop in looptris] for looptris in loop_triangles]
face_col = [(0.2, 0.2, 0.2, 0.6) for _ in range(len(vertex_co))]
edge_col = [(0.1, 0.1, 0.1, 1.0) for _ in range(len(vertex_co))]
vert_col = [(0.1, 0.4, 1.0, 1.0) for _ in range(len(vertex_all))]
FACES = batch_for_shader(shader, 'TRIS', {"pos": vertex_co, "color": face_col}, indices=faces_indices)
EDGES = batch_for_shader(shader, 'LINES', {"pos": vertex_co, "color": edge_col}, indices=edge_keys)
VERTS = batch_for_shader(shader, 'POINTS', {"pos": vertex_all, "color": vert_col})
FACES.draw(shader)
EDGES.draw(shader)
VERTS.draw(shader)
bgl.glDepthRange(0, 1)
bgl.glDisable(bgl.GL_LINE_SMOOTH)
bgl.glDisable(bgl.GL_DEPTH_TEST)
bgl.glDisable(bgl.GL_CULL_FACE)
bgl.glLineWidth(1)
bgl.glPointSize(vertex_size)
bgl.glDisable(bgl.GL_BLEND)
def modal(self, context, event):
props_pivot = context.preferences.addons[__name__.split(".")[0]].preferences
if context.area:
if context.area.type == 'VIEW_3D':
#context.area.tag_redraw()
# Selected Object(EDIT_MODE)
bbox = context.active_object
me = bmesh.from_edit_mesh(bbox.data)
# select items
verts_sel = []
verts_sel.extend([v for v in me.verts if v.select])
if len(verts_sel) >= 1:
#bpy.ops.pivot.alignface()
cursor_pos = context.scene.cursor.location.copy()
bpy.ops.view3d.snap_cursor_to_selected()
context.tool_settings.mesh_select_mode = self.select_mode
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.origin_set(type='ORIGIN_CURSOR', center='MEDIAN')
bpy.context.collection.objects.unlink(bbox)
bpy.ops.object.delete({"selected_objects": [bbox]})
context.view_layer.objects.active = self.obj
context.scene.cursor.location = cursor_pos
bpy.types.SpaceView3D.draw_handler_remove(self._bb_mesh_draw, 'WINDOW')
#props_pivot.bbox_run = False
return {'FINISHED'}
if not event.type in {'RIGHTMOUSE', 'MIDLEMOUSE', 'LEFTMOUSE'} and event.value == 'PRESS':
#props_pivot.bbox_run = False
context.tool_settings.mesh_select_mode = self.select_mode
bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.collection.objects.unlink(bbox)
bpy.ops.object.delete({"selected_objects": [bbox]})
context.view_layer.objects.active = self.obj
bpy.types.SpaceView3D.draw_handler_remove(self._bb_mesh_draw, 'WINDOW')
return {'CANCELLED'}
else:
#props_pivot.bbox_run = False
bpy.types.SpaceView3D.draw_handler_remove(self._bb_mesh_draw, 'WINDOW')
return {'FINISHED'}
return {'PASS_THROUGH'}
def invoke(self, context, event):
if context.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
props_pivot = context.preferences.addons[__name__.split(".")[0]].preferences
#props_pivot.bbox_run = True
self.select_mode = context.tool_settings.mesh_select_mode[:]
self.obj = context.active_object
ob_add(self, context, self.obj)
if context.area.type == 'VIEW_3D':
args = (self, context)
self._bb_mesh_draw= bpy.types.SpaceView3D.draw_handler_add(self.draw_mesh, args, 'WINDOW', 'POST_VIEW')
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
classes = [
PIVOT_OT_transform_on_N,
PIVOT_OT_transform_off_N,
PIVOT_OT_bounding_box_N,
]
def register():
global storeGT
global storeGR
global storeGS
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls) |
py | 1a3e319bd294632bd0c8f2a6d83f7630e38f2853 | # Copyright 2016 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
from octavia.controller.worker.tasks import lifecycle_tasks
import octavia.tests.unit.base as base
class TestLifecycleTasks(base.TestCase):
def setUp(self):
self.AMPHORA = mock.MagicMock()
self.AMPHORA_ID = uuidutils.generate_uuid()
self.AMPHORA.id = self.AMPHORA_ID
self.HEALTH_MON = mock.MagicMock()
self.HEALTH_MON_ID = uuidutils.generate_uuid()
self.HEALTH_MON.pool_id = self.HEALTH_MON_ID
self.L7POLICY = mock.MagicMock()
self.L7POLICY_ID = uuidutils.generate_uuid()
self.L7POLICY.id = self.L7POLICY_ID
self.L7RULE = mock.MagicMock()
self.L7RULE_ID = uuidutils.generate_uuid()
self.L7RULE.id = self.L7RULE_ID
self.LISTENER = mock.MagicMock()
self.LISTENER_ID = uuidutils.generate_uuid()
self.LISTENER.id = self.LISTENER_ID
self.LISTENERS = [self.LISTENER]
self.LOADBALANCER = mock.MagicMock()
self.LOADBALANCER_ID = uuidutils.generate_uuid()
self.LOADBALANCER.id = self.LOADBALANCER_ID
self.LISTENER.load_balancer = self.LOADBALANCER
self.MEMBER = mock.MagicMock()
self.MEMBER_ID = uuidutils.generate_uuid()
self.MEMBER.id = self.MEMBER_ID
self.MEMBERS = [self.MEMBER]
self.POOL = mock.MagicMock()
self.POOL_ID = uuidutils.generate_uuid()
self.POOL.id = self.POOL_ID
super(TestLifecycleTasks, self).setUp()
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'unmark_amphora_health_busy')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_amphora_status_error')
def test_AmphoraIDToErrorOnRevertTask(self, mock_amp_status_error,
mock_amp_health_busy):
amp_id_to_error_on_revert = (lifecycle_tasks.
AmphoraIDToErrorOnRevertTask())
# Execute
amp_id_to_error_on_revert.execute(self.AMPHORA_ID)
self.assertFalse(mock_amp_status_error.called)
# Revert
amp_id_to_error_on_revert.revert(self.AMPHORA_ID)
mock_amp_status_error.assert_called_once_with(self.AMPHORA_ID)
self.assertFalse(mock_amp_health_busy.called)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'unmark_amphora_health_busy')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_amphora_status_error')
def test_AmphoraToErrorOnRevertTask(self, mock_amp_status_error,
mock_amp_health_busy):
amp_to_error_on_revert = lifecycle_tasks.AmphoraToErrorOnRevertTask()
# Execute
amp_to_error_on_revert.execute(self.AMPHORA)
self.assertFalse(mock_amp_status_error.called)
# Revert
amp_to_error_on_revert.revert(self.AMPHORA)
mock_amp_status_error.assert_called_once_with(self.AMPHORA_ID)
self.assertFalse(mock_amp_health_busy.called)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_health_mon_prov_status_error')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_active')
def test_HealthMonitorToErrorOnRevertTask(
self,
mock_listener_prov_status_active,
mock_loadbalancer_prov_status_active,
mock_health_mon_prov_status_error):
health_mon_to_error_on_revert = (lifecycle_tasks.
HealthMonitorToErrorOnRevertTask())
# Execute
health_mon_to_error_on_revert.execute(self.HEALTH_MON,
self.LISTENERS,
self.LOADBALANCER)
self.assertFalse(mock_health_mon_prov_status_error.called)
# Revert
health_mon_to_error_on_revert.revert(self.HEALTH_MON,
self.LISTENERS,
self.LOADBALANCER)
mock_health_mon_prov_status_error.assert_called_once_with(
self.HEALTH_MON_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
mock_listener_prov_status_active.assert_called_once_with(
self.LISTENER_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_l7policy_prov_status_error')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_active')
def test_L7PolicyToErrorOnRevertTask(
self,
mock_listener_prov_status_active,
mock_loadbalancer_prov_status_active,
mock_l7policy_prov_status_error):
l7policy_to_error_on_revert = (lifecycle_tasks.
L7PolicyToErrorOnRevertTask())
# Execute
l7policy_to_error_on_revert.execute(self.L7POLICY,
self.LISTENERS,
self.LOADBALANCER)
self.assertFalse(mock_l7policy_prov_status_error.called)
# Revert
l7policy_to_error_on_revert.revert(self.L7POLICY,
self.LISTENERS,
self.LOADBALANCER)
mock_l7policy_prov_status_error.assert_called_once_with(
self.L7POLICY_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
mock_listener_prov_status_active.assert_called_once_with(
self.LISTENER_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_l7rule_prov_status_error')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_active')
def test_L7RuleToErrorOnRevertTask(
self,
mock_listener_prov_status_active,
mock_loadbalancer_prov_status_active,
mock_l7rule_prov_status_error):
l7rule_to_error_on_revert = (lifecycle_tasks.
L7RuleToErrorOnRevertTask())
# Execute
l7rule_to_error_on_revert.execute(self.L7RULE,
self.LISTENERS,
self.LOADBALANCER)
self.assertFalse(mock_l7rule_prov_status_error.called)
# Revert
l7rule_to_error_on_revert.revert(self.L7RULE,
self.LISTENERS,
self.LOADBALANCER)
mock_l7rule_prov_status_error.assert_called_once_with(
self.L7RULE_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
mock_listener_prov_status_active.assert_called_once_with(
self.LISTENER_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_error')
def test_ListenerToErrorOnRevertTask(
self,
mock_listener_prov_status_error,
mock_loadbalancer_prov_status_active):
listener_to_error_on_revert = (lifecycle_tasks.
ListenerToErrorOnRevertTask())
# Execute
listener_to_error_on_revert.execute(self.LISTENER)
self.assertFalse(mock_listener_prov_status_error.called)
# Revert
listener_to_error_on_revert.revert(self.LISTENER)
mock_listener_prov_status_error.assert_called_once_with(
self.LISTENER_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_error')
def test_ListenersToErrorOnRevertTask(
self,
mock_listener_prov_status_error,
mock_loadbalancer_prov_status_active):
listeners_to_error_on_revert = (lifecycle_tasks.
ListenersToErrorOnRevertTask())
# Execute
listeners_to_error_on_revert.execute(self.LISTENERS,
self.LOADBALANCER)
self.assertFalse(mock_listener_prov_status_error.called)
# Revert
listeners_to_error_on_revert.revert(self.LISTENERS,
self.LOADBALANCER)
mock_listener_prov_status_error.assert_called_once_with(
self.LISTENER_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_error')
def test_LoadBalancerIDToErrorOnRevertTask(
self,
mock_loadbalancer_prov_status_error):
loadbalancer_id_to_error_on_revert = (
lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask())
# Execute
loadbalancer_id_to_error_on_revert.execute(self.LOADBALANCER_ID)
self.assertFalse(mock_loadbalancer_prov_status_error.called)
# Revert
loadbalancer_id_to_error_on_revert.revert(self.LOADBALANCER_ID)
mock_loadbalancer_prov_status_error.assert_called_once_with(
self.LOADBALANCER_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_error')
def test_LoadBalancerToErrorOnRevertTask(
self,
mock_loadbalancer_prov_status_error):
loadbalancer_to_error_on_revert = (
lifecycle_tasks.LoadBalancerToErrorOnRevertTask())
# Execute
loadbalancer_to_error_on_revert.execute(self.LOADBALANCER)
self.assertFalse(mock_loadbalancer_prov_status_error.called)
# Revert
loadbalancer_to_error_on_revert.revert(self.LOADBALANCER)
mock_loadbalancer_prov_status_error.assert_called_once_with(
self.LOADBALANCER_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_member_prov_status_error')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_pool_prov_status_active')
def test_MemberToErrorOnRevertTask(
self,
mock_pool_prov_status_active,
mock_listener_prov_status_active,
mock_loadbalancer_prov_status_active,
mock_member_prov_status_error):
member_to_error_on_revert = lifecycle_tasks.MemberToErrorOnRevertTask()
# Execute
member_to_error_on_revert.execute(self.MEMBER,
self.LISTENERS,
self.LOADBALANCER,
self.POOL)
self.assertFalse(mock_member_prov_status_error.called)
# Revert
member_to_error_on_revert.revert(self.MEMBER,
self.LISTENERS,
self.LOADBALANCER,
self.POOL)
mock_member_prov_status_error.assert_called_once_with(
self.MEMBER_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
mock_listener_prov_status_active.assert_called_once_with(
self.LISTENER_ID)
mock_pool_prov_status_active.assert_called_once_with(
self.POOL_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_member_prov_status_error')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_pool_prov_status_active')
def test_MembersToErrorOnRevertTask(
self,
mock_pool_prov_status_active,
mock_listener_prov_status_active,
mock_loadbalancer_prov_status_active,
mock_member_prov_status_error):
members_to_error_on_revert = (
lifecycle_tasks.MembersToErrorOnRevertTask())
# Execute
members_to_error_on_revert.execute(self.MEMBERS,
self.LISTENERS,
self.LOADBALANCER,
self.POOL)
self.assertFalse(mock_member_prov_status_error.called)
# Revert
members_to_error_on_revert.revert(self.MEMBERS,
self.LISTENERS,
self.LOADBALANCER,
self.POOL)
mock_member_prov_status_error.assert_called_once_with(
self.MEMBER_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
mock_listener_prov_status_active.assert_called_once_with(
self.LISTENER_ID)
mock_pool_prov_status_active.assert_called_once_with(
self.POOL_ID)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_pool_prov_status_error')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_loadbalancer_prov_status_active')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_active')
def test_PoolToErrorOnRevertTask(
self,
mock_listener_prov_status_active,
mock_loadbalancer_prov_status_active,
mock_pool_prov_status_error):
pool_to_error_on_revert = lifecycle_tasks.PoolToErrorOnRevertTask()
# Execute
pool_to_error_on_revert.execute(self.POOL,
self.LISTENERS,
self.LOADBALANCER)
self.assertFalse(mock_pool_prov_status_error.called)
# Revert
pool_to_error_on_revert.revert(self.POOL,
self.LISTENERS,
self.LOADBALANCER)
mock_pool_prov_status_error.assert_called_once_with(
self.POOL_ID)
mock_loadbalancer_prov_status_active.assert_called_once_with(
self.LOADBALANCER_ID)
mock_listener_prov_status_active.assert_called_once_with(
self.LISTENER_ID)
|
py | 1a3e32c0f754f69c764470c3ee6d6dad219f64e9 | ## Alarm Server
## Supporting Envisalink 2DS/3
##
## This code is under the terms of the GPL v3 license.
evl_Commands = {
'KeepAlive' : '000',
'StatusReport' : '001',
'DumpZoneTimers' : '008',
'PartitionKeypress' : '071',
'Disarm' : '040',
'ArmStay' : '031',
'ArmAway' : '030',
'ArmMax' : '032',
'Login' : '005',
'Panic' : '060',
'SendCode' : '200',
'CommandOutput' : '020',
'SetTime' : '010'
}
evl_PanicTypes = {
'Fire' : '1',
'Ambulance' : '2',
'Police' : '3'
}
evl_ArmModes = {
'0' : {'name' : 'Arm Away', 'status':{'armed_away': True, 'armed_zero_entry_delay': False, 'alpha':'Arm Away', 'exit_delay':False, 'entry_delay': False }},
'1' : {'name' : 'Arm Stay', 'status':{'armed_stay': True, 'armed_zero_entry_delay': False, 'alpha':'Arm Stay', 'exit_delay':False, 'entry_delay': False }},
'2' : {'name' : 'Arm Zero Entry Away', 'status':{'armed_away': True, 'armed_zero_entry_delay': True, 'alpha':'Arm Zero Entry Away', 'exit_delay':False, 'entry_delay': False }},
'3' : {'name' : 'Arm Zero Entry Stay', 'status':{'armed_stay': True, 'armed_zero_entry_delay': True, 'alpha':'Arm Zero Entry Stay', 'exit_delay':False, 'entry_delay': False }}
}
evl_ResponseTypes = {
'505' : {'name':'Login Prompt', 'handler':'login'},
'615' : {'name':'Envisalink Zone Timer Dump', 'handler':'zone_timer_dump'},
'500' : {'name':'Poll', 'handler':'poll_response'},
'501' : {'name':'Checksum', 'handler':'command_response_error'},
'900' : {'name':'EnterCode', 'handler':'send_code'},
'912' : {'name':'PGMEnterCode', 'handler':'send_code'},
#ZONE UPDATES
'601' : {'name':'Zone Alarm', 'handler':'zone_state_change', 'status':{'alarm' : True}},
'602' : {'name':'Zone Alarm Restore', 'handler':'zone_state_change', 'status':{'alarm' : False}},
'603' : {'name':'Zone Tamper', 'handler':'zone_state_change', 'status':{'tamper' : True}},
'604' : {'name':'Zone Tamper Restore', 'handler':'zone_state_change', 'status':{'tamper' : False}},
'605' : {'name':'Zone Fault', 'handler':'zone_state_change', 'status':{'fault' : True}},
'606' : {'name':'Zone Fault Restore', 'handler':'zone_state_change', 'status':{'fault' : False}},
'609' : {'name':'Zone Open', 'handler':'zone_state_change', 'status':{'open' : True}},
'610' : {'name':'Zone Restored', 'handler':'zone_state_change', 'status':{'open' : False}},
#PARTITION UPDATES
'650' : {'name':'Ready', 'handler':'partition_state_change', 'status':{'ready' : True, 'alpha' : 'Ready'}},
'651' : {'name':'Not Ready', 'handler':'partition_state_change', 'status':{'ready' : False, 'alpha' : 'Not Ready'}},
'652' : {'name':'Armed', 'handler':'partition_state_change'},
'653' : {'name':'Ready - Force Arming Enabled', 'handler':'partition_state_change', 'status':{'ready': True, 'alpha' : 'Ready - Force Arm'}},
'654' : {'name':'Alarm', 'handler':'partition_state_change', 'status':{'alarm' : True, 'alpha' : 'Alarm'}},
'655' : {'name':'Disarmed', 'handler':'partition_state_change', 'status' : {'alarm' : False, 'armed_stay' : False, 'armed_zero_entry_delay': False, 'armed_away' : False, 'exit_delay' : False, 'entry_delay' : False, 'alpha' : 'Disarmed'}},
'656' : {'name':'Exit Delay in Progress', 'handler':'partition_state_change', 'status':{'exit_delay' : True, 'alpha' : 'Exit Delay In Progress'}},
'657' : {'name':'Entry Delay in Progress', 'handler':'partition_state_change', 'status':{'entry_delay' : True, 'alpha' : 'Entry Delay in Progress'}},
'663' : {'name':'ChimeOn', 'handler':'partition_state_change', 'status': {'chime': True}},
'664' : {'name':'ChimeOff', 'handler':'partition_state_change', 'status': {'chime': False}},
'673' : {'name':'Busy', 'handler':'partition_state_change', 'status': {'alpha': 'Busy'}},
'700' : {'name':'Armed by user', 'handler':'partition_state_change', 'status':{}},
'750' : {'name':'Disarmed by user', 'handler':'partition_state_change', 'status' : {'alarm' : False, 'armed_stay' : False, 'armed_away' : False, 'armed_zero_entry_delay': False, 'exit_delay' : False, 'entry_delay' : False, 'alpha' : 'Disarmed'}},
'751' : {'name':'Disarmed special', 'handler':'partition_state_change', 'status' : {'alarm' : False, 'armed_stay' : False, 'armed_away' : False, 'armed_zero_entry_delay': False, 'exit_delay' : False, 'entry_delay' : False, 'alpha' : 'Disarmed'}},
'840' : {'name':'Trouble LED', 'handler':'partition_state_change', 'status':{'trouble' : True}},
'841' : {'name':'Trouble Clear', 'handler':'partition_state_change', 'status':{'trouble' : False, 'ac_present': True}},
#GENERAL UPDATES
'621' : {'name':'FireAlarmButton', 'handler':'keypad_update', 'status':{'fire' : True, 'alarm': True, 'alpha' : 'Fire Alarm'}},
'622' : {'name':'FireAlarmButtonOff', 'handler':'keypad_update', 'status':{'fire' : False, 'alarm': False, 'alpha' : 'Fire Alarm Cleared'}},
'623' : {'name':'AuxAlarmButton', 'handler':'keypad_update', 'status':{'alarm': True, 'alpha' : 'Aux Alarm'}},
'624' : {'name':'AuxAlarmButtonOff', 'handler':'keypad_update', 'status':{'alarm': False, 'alpha' : 'Aux Alarm Cleared'}},
'625' : {'name':'PanicAlarmButton', 'handler':'keypad_update', 'status':{'alarm': True, 'alpha' : 'Panic Alarm'}},
'626' : {'name':'PanicAlarmButtonOff', 'handler':'keypad_update', 'status':{'alarm': False, 'alpha' : 'Panic Alarm Cleared'}},
'631' : {'name':'SmokeAlarmButton', 'handler':'keypad_update', 'status':{'alarm': True, 'alpha' : 'Smoke Alarm'}},
'632' : {'name':'SmokeAlarmButtonOff', 'handler':'keypad_update', 'status':{'alarm': False, 'alpha' : 'Smoke Alarm Cleared'}},
'800' : {'name':'LowBatTrouble', 'handler':'keypad_update', 'status':{'bat_trouble': True, 'alpha' : 'Low Battery'}},
'801' : {'name':'LowBatTroubleOff', 'handler':'keypad_update', 'status':{'bat_trouble': False, 'alpha' : 'Low Battery Cleared'}},
'802' : {'name':'ACTrouble', 'handler':'keypad_update', 'status':{'ac_present': False, 'alpha' : 'AC Power Lost'}},
'803' : {'name':'ACTroubleOff', 'handler':'keypad_update', 'status':{'ac_present': True, 'alpha' : 'AC Power Restored'}},
'829' : {'name':'SystemTamper', 'handler':'keypad_update', 'status':{'alpha' : 'System tamper'}},
'830' : {'name':'SystemTamperOff', 'handler':'keypad_update', 'status':{'alpha' : 'System tamper Restored'}},
'849' : {'name':'TroubleVerbose', 'handler':'keypad_update', 'status':None}
}
evl_verboseTrouble = {
0 : 'Service is Required',
1 : 'AC Power Lost',
2 : 'Telephone Line Fault',
3 : 'Failure to communicate',
4 : 'Zone/Sensor Fault',
5 : 'Zone/Sensor Tamper',
6 : 'Zone/Sensor Low Battery',
7 : 'Loss of time'
}
|
py | 1a3e32cf80e82d30cbdcdff0d4ce1ec34000b3f2 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from ..common._container import SklearnModelContainer
from ..common._topology import *
# Pipeline
from sklearn import pipeline
# Linear classifiers
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
# Linear regressors
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import LassoLars
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import SGDRegressor
from sklearn.svm import LinearSVR
# Tree-based models
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
# Support vector machines
from sklearn.svm import SVC, SVR, NuSVC, NuSVR
# K-nearest neighbors
from sklearn.neighbors import KNeighborsRegressor
# Naive Bayes
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
# Operators for preprocessing and feature engineering
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import Binarizer
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import MaxAbsScaler
from lightgbm import LGBMClassifier, LGBMRegressor
# In most cases, scikit-learn operator produces only one output. However, each classifier has basically two outputs;
# one is the predicted label and the other one is the probabilities of all possible labels. Here is a list of supported
# scikit-learn classifiers. In the parsing stage, we produce two outputs for objects included in the following list and
# one output for everything not in the list.
sklearn_classifier_list = [LogisticRegression, SGDClassifier, LinearSVC, SVC, NuSVC,
GradientBoostingClassifier, RandomForestClassifier, DecisionTreeClassifier,
ExtraTreesClassifier, LGBMClassifier, BernoulliNB, MultinomialNB]
# Associate scikit-learn types with our operator names. If two scikit-learn models share a single name, it means their
# are equivalent in terms of conversion.
sklearn_operator_name_map = {RobustScaler: 'SklearnRobustScaler',
StandardScaler: 'SklearnScaler',
LogisticRegression: 'SklearnLinearClassifier',
SGDClassifier: 'SklearnLinearClassifier',
LinearSVC: 'SklearnLinearSVC',
OneHotEncoder: 'SklearnOneHotEncoder',
DictVectorizer: 'SklearnDictVectorizer',
Imputer: 'SklearnImputer',
LabelEncoder: 'SklearnLabelEncoder',
SVC: 'SklearnSVC',
NuSVC: 'SklearnSVC',
SVR: 'SklearnSVR',
NuSVR: 'SklearnSVR',
LinearSVR: 'SklearnLinearSVR',
ElasticNet: 'SklearnElasticNetRegressor',
LinearRegression: 'SklearnLinearRegressor',
LassoLars: 'SklearnLassoLars',
Ridge: 'SklearnRidge',
SGDRegressor: 'SklearnLinearRegressor',
Normalizer: 'SklearnNormalizer',
DecisionTreeClassifier: 'SklearnDecisionTreeClassifier',
DecisionTreeRegressor: 'SklearnDecisionTreeRegressor',
RandomForestClassifier: 'SklearnRandomForestClassifier',
RandomForestRegressor: 'SklearnRandomForestRegressor',
ExtraTreesClassifier: 'SklearnExtraTreesClassifier',
ExtraTreesRegressor: 'SklearnExtraTreesRegressor',
GradientBoostingClassifier: 'SklearnGradientBoostingClassifier',
GradientBoostingRegressor: 'SklearnGradientBoostingRegressor',
KNeighborsRegressor: 'SklearnKNeighborsRegressor',
MultinomialNB: 'SklearnMultinomialNB',
BernoulliNB: 'SklearnBernoulliNB',
Binarizer: 'SklearnBinarizer',
LGBMClassifier: 'LgbmClassifier',
LGBMRegressor: 'LgbmRegressor',
PCA: 'SklearnPCA',
TruncatedSVD: 'SklearnTruncatedSVD',
MinMaxScaler: 'SklearnMinMaxScaler',
MaxAbsScaler: 'SklearnMaxAbsScaler'}
def _get_sklearn_operator_name(model_type):
'''
Get operator name of the input argument
:param model_type: A scikit-learn object (e.g., SGDClassifier and Binarizer)
:return: A string which stands for the type of the input model in our conversion framework
'''
if model_type not in sklearn_operator_name_map:
print(sklearn_operator_name_map)
raise ValueError('No proper operator name found for %s' % model_type)
return sklearn_operator_name_map[model_type]
def _parse_sklearn_simple_model(scope, model, inputs):
'''
This function handles all non-pipeline models.
:param scope: Scope object
:param model: A scikit-learn object (e.g., OneHotEncoder and LogisticRegression)
:param inputs: A list of variables
:return: A list of output variables which will be passed to next stage
'''
print('simple model: %s ' % type(model))
this_operator = scope.declare_local_operator(_get_sklearn_operator_name(type(model)), model)
this_operator.inputs = inputs
if type(model) in sklearn_classifier_list:
# For classifiers, we may have two outputs, one for label and the other one for probabilities of all classes.
# Notice that their types here are not necessarily correct and they will be fixed in shape inference phase
label_variable = scope.declare_local_variable('label', FloatTensorType())
probability_map_variable = scope.declare_local_variable('probabilities', FloatTensorType())
this_operator.outputs.append(label_variable)
this_operator.outputs.append(probability_map_variable)
else:
# We assume that all scikit-learn operator can only produce a single float tensor.
variable = scope.declare_local_variable('variable', FloatTensorType())
this_operator.outputs.append(variable)
return this_operator.outputs
def _parse_sklearn_pipeline(scope, model, inputs):
'''
The basic ideas of scikit-learn parsing:
1. Sequentially go though all stages defined in the considered scikit-learn pipeline
2. The output variables of one stage will be fed into its next stage as the inputs.
:param scope: Scope object defined in _topology.py
:param model: scikit-learn pipeline object
:param inputs: A list of Variable objects
:return: A list of output variables produced by the input pipeline
'''
print('pipeline: %s ' % type(model))
for step in model.steps:
inputs = _parse_sklearn(scope, step[1], inputs)
return inputs
def _parse_sklearn(scope, model, inputs):
'''
This is a delegate function. It doesn't nothing but invoke the correct parsing function according to the input
model's type.
:param scope: Scope object
:param model: A scikit-learn object (e.g., OneHotEncoder and LogisticRegression)
:param inputs: A list of variables
:return: The output variables produced by the input model
'''
if isinstance(model, pipeline.Pipeline):
return _parse_sklearn_pipeline(scope, model, inputs)
else:
return _parse_sklearn_simple_model(scope, model, inputs)
def parse_sklearn(model, initial_types=None, targeted_onnx=onnx.__version__, custom_conversion_functions=None, custom_shape_calculators=None):
# Put scikit-learn object into an abstract container so that our framework can work seamlessly on models created
# with different machine learning tools.
raw_model_container = SklearnModelContainer(model)
# Declare a computational graph. It will become a representation of the input scikit-learn model after parsing.
topology = Topology(raw_model_container, initial_types=initial_types, targeted_onnx=targeted_onnx,
custom_conversion_functions = custom_conversion_functions, custom_shape_calculators = custom_shape_calculators)
# Declare an object to provide variables' and operators' naming mechanism. In contrast to CoreML, one global scope
# is enough for parsing scikit-learn models.
scope = topology.declare_scope('__root__')
# Declare input variables. They should be the inputs of the scikit-learn model you want to convert into ONNX
inputs = []
for var_name, initial_type in initial_types:
inputs.append(scope.declare_local_variable(var_name, initial_type))
# The object raw_model_container is a part of the topology we're going to return. We use it to store the inputs of
# the scikit-learn's computational graph.
for variable in inputs:
raw_model_container.add_input(variable)
# Parse the input scikit-learn model as a Topology object.
outputs = _parse_sklearn(scope, model, inputs)
# THe object raw_model_container is a part of the topology we're going to return. We use it to store the outputs of
# the scikit-learn's computational graph.
for variable in outputs:
raw_model_container.add_output(variable)
return topology
|
py | 1a3e3317054907d1384882a7b4035d0f2593b6ee | #!/usr/bin/env python
# coding: utf-8
#takes as input a csv or tsv file, and a evalue cutoff, loads the data in pandas and fiters the dataframe by this value.
#writes a clean csv/tsv file. If imported as library is able to take as input a pandas df and return a clean pandas df
import pandas as pd
from sys import argv
def loadData(file):
if file.endswith(".csv"):
df = pd.read_csv(file, sep=',', header=None)
elif file.endswith(".tsv"):
df = pd.read_csv(file, sep='\t', header=None)
df.columns = ["node1","qstart","qend","qlen","qseq","node2","eval","pident","bitscore","sstart","send","slen","length","sseq"]
df = df.drop(columns=["qend","qstart","qlen","qseq","pident","bitscore","sstart","send","slen","length","sseq"])
print(df)
return df
def filterevalue(df,eval):
print(f"## FILTER BY EVALUE {eval}")
to_drop = []
for i in range(len(df)):
evalu = df.iloc[i]['eval']
''' if the evalue of the result is above the indicated one then it is dropped '''
if float(evalu) > float(eval):
to_drop.append(i)
df = df.drop(df.index[to_drop])
print("Lenght of the dataframe after FILTER BY EVALUE: " + str(len(df)))
if len(df) == 0:
print("ERROR: Lenght of the dataframe = 0 - I can't generate the gephi/cytoscape network")
exit()
print('------------------------------')
return df
if __name__ == "__main__":
if not argv[1] or not argv[0]:
"""
This script takes as input a csv/tsv file and a evalue value.
Please run it in this format: python3 filterbyevalue.py [file] [evalue]
Example: python3 filterbyevalue.py myfile.csv 1e-10
"""
exit()
file = argv[1]
file_name = file.split(".")[0]
evalue = float(argv[2])
df = loadData(file)
result = filterevalue(df,evalue)
df.to_csv(f'{file_name}_filtered_{str(evalue)}.csv', index = False)
|
py | 1a3e339f602593a304d90f5d3f30b231dd728f03 | import os
from flask import Flask, jsonify, request, abort
from components import CardsView, Card, CardHeader
app = Flask(__name__)
@app.route("/", methods=['POST'])
def index():
# process payload from archy
payload = request.json.get('payload', {})
args = payload.get('args', {})
links = [{
'address': '/list'
}]
viewProps = {'links': links}
view = CardsView(viewProps,
Card({},
CardHeader({
'title': 'Card Title 1',
'subtitle': 'card subtitle',
})),
Card({},
CardHeader({
'title': 'Card Title 2',
'subtitle': 'card subtitle',
})),
)
return jsonify(**view.to_dict())
if __name__ == '__main__':
port = int(os.environ.get('PORT', 3000))
app.run(host='0.0.0.0', port=port)
|
py | 1a3e36b2076efe5f59566b49945802b8e118a422 | #!/usr/bin/env python3
import shutil
import iterm2
async def main(connection):
component = iterm2.StatusBarComponent(
short_description="RootVolume Usage",
detailed_description="Show Root Volume Usage",
knobs=[],
exemplar="[RootVolume Usage]",
update_cadence=30,
identifier="koh-sh.iterm2-statusbar-scripts.rootvolume"
)
@iterm2.StatusBarRPC
async def showrootvolume(knobs):
rootusage = shutil.disk_usage('/')
lst = [round(x / 1024 / 1024 / 1024) for x in rootusage]
return ("RootVolume: {}/{} GB ({}%)".format(lst[1], lst[0], (round(lst[1] / lst[0] * 100))))
await component.async_register(connection, showrootvolume)
iterm2.run_forever(main)
|
py | 1a3e3838ee0a5836556a81f2d15304ceb2bebdc2 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://plankton-toolbox.org
# Copyright (c) 2010-2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import codecs
import toolbox_utils
import plankton_core
class ImportSharkWeb(plankton_core.DataImportPreparedBase):
""" Class for parsing sharkweb text files. """
def __init__(self):
""" """
# Initialize parent.
super(ImportSharkWeb, self).__init__()
# Information needed for parsing. List of lists with:
# Column 0: node level.
# Column 1: internal key.
# Column 2: view format.
# Column 3: source file column name. Multiple alternatives should be separated by '<or>'. TODO: '<or>' not implemented.
# Column 4: export column name. None = not used, empty string ('') = same as column 1 (internal key).
self._parsing_info = [
['visit', 'visit_year', 'integer', 'visit_year', ''],
['visit', 'sample_date', 'date', 'sample_date', ''],
['visit', 'visit_month', 'integer', '', ''], # Calculate. Code below.
['visit', 'station_name', 'text', 'station_name', ''],
['visit', 'sample_latitude_dd', 'float', 'sample_latitude_dd', ''],
['visit', 'sample_longitude_dd', 'float', 'sample_longitude_dd', ''],
['visit', 'water_depth_m', 'float', 'water_depth_m', ''],
#
['sample', 'sample_id', 'text', 'sample_id', ''],
['sample', 'sample_min_depth_m', 'float', 'sample_min_depth_m', ''],
['sample', 'sample_max_depth_m', 'float', 'sample_max_depth_m', ''],
#
['variable', 'scientific_name', 'text', 'scientific_name', ''],
['variable', 'species_flag_code', 'text', 'species_flag_code', ''],
['variable', 'size_class', 'text', 'size_class', ''],
['variable', 'trophic_type', 'text', 'trophic_type_code', ''],
#
['variable', 'parameter', 'text', 'parameter', ''],
['variable', 'value', 'float', 'value', ''],
['variable', 'unit', 'text', 'unit', ''],
#
['variable', 'plankton_group', 'text', '', ''], # Calculate. Code below.
['variable', 'taxon_kingdom', 'text', 'taxon_kingdom', ''],
['variable', 'taxon_phylum', 'text', 'taxon_phylum', ''],
['variable', 'taxon_class', 'text', 'taxon_class', ''],
['variable', 'taxon_order', 'text', 'taxon_order', ''],
['variable', 'taxon_family', 'text', 'taxon_family', ''],
['variable', 'taxon_genus', 'text', 'taxon_genus', ''],
['variable', 'taxon_hierarchy', 'text', 'taxon_hierarchy', ''],
#
['variable', 'sampling_laboratory', 'text', 'sampling_laboratory_name_sv', ''],
['variable', 'analytical_laboratory', 'text', 'analytical_laboratory_name_sv', ''],
['variable', 'analysis_date', 'text', 'analysis_date', ''],
['variable', 'analysed_by', 'text', 'analysed_by', ''],
]
# Keys:
self._visit_key_fields = ['sample_date', 'station_name']
self._sample_key_fields = ['sample_date', 'station_name', 'sample_id', 'sample_min_depth_m', 'sample_max_depth_m', 'sample_id']
#
self.clear() #
def clear(self):
""" """
self._header = []
self._rows = []
def read_file(self, file_name = None):
""" """
if file_name == None:
raise UserWarning('File name is missing.')
input_file = None
try:
### txtencode = toolbox_settings.ToolboxSettings().getValue('General:Character encoding, txt-files', 'cp1252')
txtencode = 'cp1252'
input_file = codecs.open(file_name, mode = 'r', encoding = txtencode)
# Read data header. Same header used for data and aggregated data.
separator = '\t' # Use ',' as default item separator.
first_row = input_file.readline()
if ';' in first_row:
separator = ';' # Use ';' as item separator.
#
self._header = []
for headeritem in first_row.split(separator):
item = headeritem.strip()
self._header.append(item)
# Read data rows. Continue until empty line occurs.
self._rows = []
for row in input_file.readlines():
rowitems = []
for item in row.split(separator):
rowitems.append(item.strip())
self._rows.append(rowitems)
#
except (IOError, OSError):
raise
finally:
if input_file: input_file.close()
def create_tree_dataset(self, dataset, update_trophic_type):
""" """
try:
# Base class must know header for _asText(), etc.
# self._set_header(self._header)
# Iterate over rows in imported_table.
for row in self._rows:
row_dict = dict(zip(self._header, row))
# Get or create nodes.
currentvisit = None
currentsample = None
currentvariable = None
# Check if visit exists. Create or reuse.
keystring = ''
delimiter = ''
for key_field in self._visit_key_fields:
keystring += delimiter + row_dict.get(key_field, '')
delimiter = '<+>'
#
currentvisit = dataset.get_visit_lookup(keystring)
if not currentvisit:
currentvisit = plankton_core.VisitNode()
dataset.add_child(currentvisit)
currentvisit.set_id_string(keystring)
# Check if sample exists. Create or reuse.
keystring = ''
delimiter = ''
for key_field in self._sample_key_fields:
keystring += delimiter + row_dict.get(key_field, '')
delimiter = '<+>'
#
currentsample = dataset.get_sample_lookup(keystring)
if not currentsample:
currentsample = plankton_core.SampleNode()
currentvisit.add_child(currentsample)
currentsample.set_id_string(keystring)
# Add all variables in row.
currentvariable = plankton_core.VariableNode()
currentsample.add_child(currentvariable)
# === Parse row and add fields on nodes. ===
for parsinginforow in self._parsing_info:
#
value = row_dict.get(parsinginforow[3], '')
# Fix float.
if parsinginforow[2] == 'float':
value = value.replace(',', '.')
# Calculate some values.
if parsinginforow[1] == 'visit_month':
try:
value = row_dict.get('sample_date', '')
value = value[5:7]
except:
pass
if parsinginforow[1] == 'plankton_group':
try:
value = row_dict.get('scientific_name', '')
value = plankton_core.Species().get_plankton_group_from_taxon_name(value)
except:
pass
if parsinginforow[1] == 'analysed_by':
try:
if not value:
value = row_dict.get('taxonomist', '')
except:
pass
if parsinginforow[1] == 'trophic_type':
# Update trophic_type.
if parsinginforow[1] == 'trophic_type':
if update_trophic_type:
scientific_name = row_dict.get('scientific_name', '')
size_class = row_dict.get('size_class', '')
trophic_type = plankton_core.Species().get_bvol_value(scientific_name, size_class, 'trophic_type')
if trophic_type:
value = trophic_type # Use existing if not in local list.
# Replace empty with NS=Not specified.
if not value:
value = 'NS'
# Add at right level.
if parsinginforow[0] == 'visit':
currentvisit.add_data(parsinginforow[1], value)
#
if parsinginforow[0] == 'sample':
currentsample.add_data(parsinginforow[1], value)
#
if parsinginforow[0] == 'variable':
currentvariable.add_data(parsinginforow[1], value)
#
except Exception as e:
toolbox_utils.Logging().warning('Failed to parse dataset: %s' % (e.args[0]))
|
py | 1a3e392ca536c1e4bb5dc1faf8f9ad942a4d2aaa | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class UnlabeledLoadStatistics(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _class=None): # noqa: E501
"""UnlabeledLoadStatistics - a model defined in OpenAPI
:param _class: The _class of this UnlabeledLoadStatistics. # noqa: E501
:type _class: str
"""
self.openapi_types = {
'_class': str
}
self.attribute_map = {
'_class': '_class'
}
self.__class = _class
@classmethod
def from_dict(cls, dikt) -> 'UnlabeledLoadStatistics':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The UnlabeledLoadStatistics of this UnlabeledLoadStatistics. # noqa: E501
:rtype: UnlabeledLoadStatistics
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self):
"""Gets the _class of this UnlabeledLoadStatistics.
:return: The _class of this UnlabeledLoadStatistics.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this UnlabeledLoadStatistics.
:param _class: The _class of this UnlabeledLoadStatistics.
:type _class: str
"""
self.__class = _class
|
py | 1a3e39e2ea67bc0f4701930b86b291dd14921349 | import os
def list_images(folder_path):
return [img_name for img_name in os.listdir(folder_path) if os.path.splitext(img_name)[1] in ['.jpg', '.JPG', '.png']]
def list_subfolders(folder_path, full_paths: bool = False):
return [subfolder for subfolder in os.listdir(folder_path) if os.path.isdir(os.path.join(folder_path, subfolder))] |
py | 1a3e3a6a695a3eb2566f668aafc8549ca6d367d4 | # Begin: Python 2/3 compatibility header small
# Get Python 3 functionality:
from __future__ import\
absolute_import, print_function, division, unicode_literals
from future.utils import raise_with_traceback, raise_from
# catch exception with: except Exception as e
from builtins import range, map, zip, filter
from io import open
import six
# End: Python 2/3 compatability header small
import contextlib
import lasagne.layers
import lasagne.nonlinearities
from lasagne.nonlinearities import rectify
import theano.tensor as T
@contextlib.contextmanager
def ignore_sigmoids(layer):
if(hasattr(layer,'nonlinearity') and
layer.nonlinearity in [lasagne.nonlinearities.softmax,
lasagne.nonlinearities.sigmoid]):
print("Removing the sigmoids from output for the explanation approach.")
nonlinearity = layer.nonlinearity
layer.nonlinearity = lambda x: x
try:
yield layer
finally:
layer.nonlinearity = nonlinearity
else:
yield layer
def remove_sigmoids(layer):
if(hasattr(layer,'nonlinearity') and
layer.nonlinearity in [lasagne.nonlinearities.softmax,
lasagne.nonlinearities.sigmoid]):
layer.nonlinearity = lambda x: x
class GuidedReLU(lasagne.layers.MergeLayer):
"""
A layer with two input streams of which the
first will be passed on as long as the
second is not 0. If the second input stream
is not None, then it will be passed on
instead.
"""
def __init__(self, input_layer, other_layer):
super(GuidedReLU, self).__init__([input_layer, other_layer])
def get_output_shape_for(self, input_shapes):
return input_shapes[0]
def get_output_for(self, inputs, **kwargs):
in1, in2 = inputs
out = T.switch(T.gt(in2, 0.), in1, 0.)
return out
class OppositeGuidedRelu(lasagne.layers.MergeLayer):
"""
A layer with two input streams of which the
first will be passed on as long as the
second is 0. If the second input stream is
not None, then it will be passed on
instead.
"""
def __init__(self, input_layer, other_layer):
super(OppositeGuidedRelu, self).__init__([input_layer, other_layer])
def get_output_shape_for(self, input_shapes):
return input_shapes[0]
def get_output_for(self, inputs, **kwargs):
in1, in2 = inputs
out = T.switch(T.gt(in2, 0.), 0., in1)
return out
def has_ReLU(layer):
relus = [lasagne.nonlinearities.rectify, T.nnet.relu]
return (hasattr(layer, 'nonlinearity') and
layer.nonlinearity in relus)
def get_rectifier_copy_layer(input_layer, rectifier_layer):
if has_ReLU(rectifier_layer):
return GuidedReLU(input_layer, rectifier_layer)
return input_layer
def get_rectifier_opposite_layer(input_layer, rectifier_layer):
if has_ReLU(rectifier_layer):
return OppositeGuidedRelu(input_layer, rectifier_layer)
return None
def get_rectifier_layer(input_layer, rectifier_layer):
if has_ReLU(rectifier_layer):
return lasagne.layers.NonlinearityLayer(input_layer,
nonlinearity=rectify)
return input_layer
|
py | 1a3e3a785ead2a9ee4f94a84fe5ca88be975928c | #-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <[email protected]>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2011 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from eoxserver.core import Component, implements
from eoxserver.services.ows.interfaces import (
ServiceHandlerInterface, GetServiceHandlerInterface
)
from eoxserver.services.ows.wms.basehandlers import (
WMSGetCapabilitiesHandlerBase
)
class WMS13GetCapabilitiesHandler(WMSGetCapabilitiesHandlerBase, Component):
implements(ServiceHandlerInterface)
implements(GetServiceHandlerInterface)
versions = ("1.3", "1.3.0",)
|
py | 1a3e3ecbb5e49feea7712a6ac0ee79447f1116ac | # -*- coding: utf-8 -*-
#
# s3fields unit tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/modules/unit_tests/s3/s3fields.py
#
import unittest
from gluon.languages import lazyT
from gluon.dal import Query
from s3.s3fields import *
# =============================================================================
class S3RepresentTests(unittest.TestCase):
# -------------------------------------------------------------------------
def setUp(self):
T = current.T
self.test_opts = {
1: "Test1",
2: "Test2",
3: "Test3"
}
current.auth.override = True
s3db = current.s3db
otable = s3db.org_organisation
org1 = Storage(name="Represent Test Organisation1")
org1_id = otable.insert(**org1)
org1.update(id=org1_id)
s3db.update_super(otable, org1)
org2 = Storage(name="Represent Test Organisation2")
org2_id = otable.insert(**org2)
org2.update(id=org2_id)
s3db.update_super(otable, org2)
self.id1 = org1_id
self.id2 = org2_id
self.name1 = org1.name
self.name2 = org2.name
# -------------------------------------------------------------------------
def testSetup(self):
""" Check lazy setup method """
# Check for options
r = S3Represent(options=self.test_opts)
self.assertFalse(r.setup)
r._setup()
self.assertTrue(r.setup)
self.assertEqual(r.tablename, None)
self.assertEqual(r.options, self.test_opts)
# Check for lookups
r = S3Represent(lookup="org_organisation")
self.assertFalse(r.setup)
self.assertEqual(r.options, None)
self.assertEqual(r.tablename, "org_organisation")
self.assertEqual(r.key, None)
self.assertEqual(r.fields, None)
self.assertEqual(r.labels, None)
self.assertEqual(r.table, None)
r._setup()
self.assertTrue(r.setup)
self.assertEqual(r.options, None)
self.assertEqual(r.tablename, "org_organisation")
self.assertEqual(r.key, "id")
self.assertEqual(r.fields, ["name"])
self.assertEqual(r.labels, None)
self.assertEqual(r.table, current.db.org_organisation)
# -------------------------------------------------------------------------
def testOptions(self):
""" Test option field representation """
r = S3Represent(options=self.test_opts, none="NONE")
# Standard variants
self.assertEqual(r(1), "Test1")
self.assertEqual(r.multiple([1,2,3]), "Test1, Test2, Test3")
self.assertEqual(r.bulk([1,2,3]),
{
1: "Test1",
2: "Test2",
3: "Test3",
None: "NONE",
}
)
# list:type
r = S3Represent(options=self.test_opts,
none="NONE", multiple=True)
# Should work with both, single value and list
self.assertEqual(r(1), "Test1")
self.assertEqual(r([1,2]), "Test1, Test2")
# Multiple does always expect list of lists
self.assertRaises(ValueError, r.multiple, [1,2,3])
# Check multiple with list:type
result = r.multiple([[1,2]]).split(", ")
self.assertTrue("Test1" in result)
self.assertTrue("Test2" in result)
self.assertEqual(len(result), 2)
# Check that multiple with list:type de-duplicates properly
result = r.multiple([[1,2], [2,3]]).split(", ")
self.assertTrue("Test1" in result)
self.assertTrue("Test2" in result)
self.assertTrue("Test3" in result)
self.assertEqual(len(result), 3)
# Check bulk with list:type
result = r.bulk([[1,2], [2,3]])
self.assertEqual(len(result), 4)
self.assertTrue(1 in result)
self.assertEqual(result[1], "Test1")
self.assertTrue(2 in result)
self.assertEqual(result[2], "Test2")
self.assertTrue(3 in result)
self.assertEqual(result[3], "Test3")
self.assertTrue(None in result)
self.assertEqual(result[None], "NONE")
# -------------------------------------------------------------------------
def testForeignKeys(self):
""" Test foreign key lookup representation """
r = S3Represent(lookup="org_organisation")
# Check lookup value by value
self.assertEqual(r(self.id1), self.name1)
self.assertEqual(r(self.id2), self.name2)
self.assertEqual(r.queries, 2)
# Check lookup of multiple values
self.assertEqual(r.multiple([self.id1, self.id2]),
"%s, %s" % (self.name1, self.name2))
# Should not have needed any additional queries
self.assertEqual(r.queries, 2)
# Check bulk lookup
result = r.bulk([self.id1, self.id2])
self.assertTrue(len(result), 3)
self.assertEqual(result[self.id1], self.name1)
self.assertEqual(result[self.id2], self.name2)
self.assertTrue(None in result)
# Should still not have needed any additional queries
self.assertEqual(r.queries, 2)
# Check that only one query is used for multiple values
r = S3Represent(lookup="org_organisation")
result = r.bulk([self.id1, self.id2])
self.assertTrue(len(result), 3)
self.assertEqual(r.queries, 1)
# Check translation
r = S3Represent(lookup="org_organisation", translate=True)
result = r(self.id1)
self.assertTrue(isinstance(result, lazyT))
self.assertEqual(result, current.T(self.name1))
def testRowsPrecedence(self):
# Check that rows get preferred over values
r = S3Represent(lookup="org_organisation")
otable = current.s3db.org_organisation
org1 = otable[self.id1]
org2 = otable[self.id2]
# Test single value
self.assertEqual(r(None, row=org1), self.name1)
self.assertEqual(r(self.id2, row=org1), self.name1)
# Test multiple
result = r.multiple(None, rows=[org1, org2])
self.assertTrue(isinstance(result, basestring))
self.assertTrue(", " in result)
result = result.split(", ")
self.assertEqual(len(result), 2)
self.assertTrue(self.name1 in result)
self.assertTrue(self.name2 in result)
result = r.multiple([self.id1], rows=[org1, org2])
self.assertTrue(isinstance(result, basestring))
self.assertTrue(", " in result)
result = result.split(", ")
self.assertEqual(len(result), 2)
self.assertTrue(self.name1 in result)
self.assertTrue(self.name2 in result)
# Test bulk
result = r.bulk(None, rows=[org1, org2])
self.assertTrue(len(result), 3)
self.assertEqual(result[self.id1], self.name1)
self.assertEqual(result[self.id2], self.name2)
self.assertTrue(None in result)
result = r.bulk([self.id1], rows=[org1, org2])
self.assertTrue(len(result), 3)
self.assertEqual(result[self.id1], self.name1)
self.assertEqual(result[self.id2], self.name2)
self.assertTrue(None in result)
# -------------------------------------------------------------------------
def testListReference(self):
""" Test Foreign Key Representation in list:reference types """
r = S3Represent(lookup="org_organisation",
multiple=True,
#linkto=URL(c="org", f="organisation", args=["[id]"]),
show_link=True)
a = current.request.application
# Single value gives a single result
result = r(self.id1)
self.assertTrue(isinstance(result, DIV))
self.assertEqual(len(result), 1)
self.assertTrue(isinstance(result[0], A))
self.assertEqual(result[0].attributes["_href"],
"/%s/org/organisation/%s" % (a, self.id1))
self.assertEqual(result[0].components[0],
"Represent Test Organisation1")
# Test with show_link=False
result = r(self.id1, show_link=False)
self.assertEqual(result, self.name1)
# List value gives a comma-separated list
result = r([self.id1, self.id2], show_link=False).split(", ")
self.assertEqual(len(result), 2)
self.assertTrue(self.name1 in result)
self.assertTrue(self.name2 in result)
values = [[self.id1, self.id2], [self.id2], [None, self.id1]]
# Multiple lists give a comma-separated list of unique values
result = r.multiple(values, show_link=False).split(", ")
self.assertEqual(len(result), 3)
self.assertTrue(self.name1 in result)
self.assertTrue(self.name2 in result)
self.assertTrue(current.messages.NONE in result)
# Bulk representation gives a dict of all unique values
result = r.bulk(values, show_link=False)
self.assertTrue(isinstance(result, dict))
self.assertEqual(len(result), 3)
self.assertEqual(result[self.id1], self.name1)
self.assertEqual(result[self.id2], self.name2)
self.assertTrue(None in result)
# Test render_list method
repr1 = r.render_list(values[0], result, show_link=False)
self.assertEqual(repr1, ", ".join([self.name1, self.name2]))
repr2 = r.render_list(values[1], result, show_link=False)
self.assertEqual(repr2, self.name2)
# Test render_list with show_link
result = r.bulk(values)
repr1 = r.render_list(values[0], result)
self.assertTrue(isinstance(repr1, DIV))
self.assertEqual(len(repr1), 3)
self.assertTrue(isinstance(repr1[0], A))
self.assertEqual(repr1[0].attributes["_href"],
"/%s/org/organisation/%s" % (a, self.id1))
self.assertEqual(repr1[0].components[0],
"Represent Test Organisation1")
self.assertEqual(repr1[1], ", ")
self.assertTrue(isinstance(repr1[2], A))
self.assertEqual(repr1[2].attributes["_href"],
"/%s/org/organisation/%s" % (a, self.id2))
self.assertEqual(repr1[2].components[0],
"Represent Test Organisation2")
# Check NONE-option
repr2 = r.render_list(values[2], result)
self.assertTrue(isinstance(repr2, DIV))
self.assertEqual(len(repr2), 3)
self.assertEqual(str(repr2[0]), str(current.messages.NONE))
# Check representation of None and empty lists
self.assertEqual(r(None, show_link=False), str(current.messages.NONE))
self.assertEqual(r([]), str(current.messages.NONE))
self.assertEqual(r.multiple([None], show_link=False), str(current.messages.NONE))
self.assertEqual(r.multiple([[]], show_link=False), str(current.messages.NONE))
# All that should have taken exactly 2 queries!
self.assertEqual(r.queries, 2)
# -------------------------------------------------------------------------
def tearDown(self):
current.db.rollback()
current.auth.override = False
# =============================================================================
class S3ExtractLazyFKRepresentationTests(unittest.TestCase):
""" Test lazy representation of foreign keys in datatables """
tablename = "export_lazy_fk_represent"
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
db = current.db
db.define_table(cls.tablename,
Field("location_id",
"reference gis_location"),
Field("organisation_id",
"reference org_organisation"),
Field("facility_type_id",
"list:reference org_facility_type"),
*s3_meta_fields())
# -------------------------------------------------------------------------
def setUp(self):
tablename = self.tablename
s3db = current.s3db
table = s3db[tablename]
s3db.add_components("org_organisation",
**{tablename: {"name": "test",
"joinby": "organisation_id",
},
}
)
current.auth.override = True
# Create locations
locations = (Storage(name="FK Represent TestLocation 1"),
Storage(name="FK Represent TestLocation 2"))
ltable = s3db.gis_location
for i in xrange(len(locations)):
location = locations[i]
location_id = ltable.insert(**location)
location["id"] = location_id
self.locations = locations
# Create facility types
fac_types = (Storage(name="FK Represent TestFacType P"),
Storage(name="FK Represent TestFacType Q"),
Storage(name="FK Represent TestFacType R"))
ttable = s3db.org_facility_type
for i in xrange(len(fac_types)):
fac_type = fac_types[i]
fac_type_id = ttable.insert(**fac_type)
fac_type["id"] = fac_type_id
self.fac_types = fac_types
# Create organisation
org = Storage(name="FK Represent TestOrg A")
otable = s3db.org_organisation
org_id = otable.insert(**org)
org["id"] = org_id
s3db.update_super(otable, org)
self.org = org
# Create test records
facs = (Storage(organisation_id=org.id,
facility_type_id=[fac_types[0].id, fac_types[1].id],
location_id=locations[0].id),
Storage(organisation_id=org.id,
facility_type_id=[fac_types[1].id, fac_types[2].id],
location_id=locations[1].id))
for i in xrange(len(facs)):
fac = facs[i]
fac_id = table.insert(**fac)
fac["id"] = fac_id
self.facs = facs
# -------------------------------------------------------------------------
def testRepresentReferenceSingleNoLinkto(self):
"""
Test Representation of reference, single value,
without linkto
"""
s3db = current.s3db
tablename = self.tablename
fname = "%s.organisation_id" % tablename
fac = self.facs[0]
resource = s3db.resource(tablename, id=fac.id)
renderer = S3Represent(lookup="org_organisation")
table = resource.table
table.organisation_id.represent = renderer
data = resource.select(["id", "organisation_id"],
limit=None,
represent=True)
result = data["rows"]
self.assertEqual(renderer.queries, 1)
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 1)
output = result[0]
self.assertTrue(isinstance(output, Storage))
self.assertTrue(fname in output)
self.assertEqual(output[fname], self.org.name)
# -------------------------------------------------------------------------
def testRepresentReferenceSingleLinktoOn(self):
"""
Test Representation of reference, single value,
with linkto
"""
s3db = current.s3db
tablename = self.tablename
fname = "%s.organisation_id" % tablename
fac = self.facs[0]
resource = s3db.resource(tablename, id=fac.id)
renderer = S3Represent(lookup="org_organisation",
#linkto=URL(c="org", f="organisation", args=["[id]"]),
show_link=True)
table = resource.table
table.organisation_id.represent = renderer
data = resource.select(["id", "organisation_id"],
limit=None,
represent=True)
result = data["rows"]
self.assertEqual(renderer.queries, 1)
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 1)
output = result[0]
self.assertTrue(isinstance(output, Storage))
self.assertTrue(fname in output)
representation = output[fname]
self.assertTrue(isinstance(representation, A))
self.assertEqual(representation.attributes["_href"],
"/%s/org/organisation/%s" %
(current.request.application, self.org.id))
self.assertEqual(representation.components[0],
self.org.name)
# -------------------------------------------------------------------------
def testRepresentReferenceSingleLinktoOff(self):
"""
Test Representation of reference, single value,
with linkto + show_link=False
"""
s3db = current.s3db
tablename = self.tablename
fname = "%s.organisation_id" % tablename
fac = self.facs[0]
resource = s3db.resource(tablename, id=fac.id)
renderer = S3Represent(lookup="org_organisation",
linkto=URL(c="org", f="organisation", args=["[id]"]))
table = resource.table
table.organisation_id.represent = renderer
data = resource.select(["id", "organisation_id"],
limit=None,
represent=True,
show_links=False)
result = data["rows"]
self.assertEqual(renderer.queries, 1)
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 1)
output = result[0]
self.assertTrue(isinstance(output, Storage))
self.assertTrue(fname in output)
self.assertEqual(output[fname], self.org.name)
# -------------------------------------------------------------------------
def testRepresentReferenceMultipleNoLinkto(self):
"""
Test Representation of reference, multiple values,
without linkto
"""
s3db = current.s3db
tablename = self.tablename
fname = "%s.location_id" % tablename
ftable = current.db[tablename]
renderer = S3Represent(lookup="gis_location")
ftable.location_id.represent = renderer
resource = s3db.resource("org_organisation", id=self.org.id)
data = resource.select(["id", "test.location_id"],
limit=None,
represent=True)
result = data["rows"]
self.assertEqual(renderer.queries, 1)
self.assertTrue(len(result), 1)
output = result[0]
self.assertTrue(fname in output)
names = output[fname].split(", ")
self.assertEqual(len(names), 2)
self.assertTrue(self.locations[0].name in names)
self.assertTrue(self.locations[1].name in names)
# -------------------------------------------------------------------------
def testRepresentReferenceMultipleLinktoOn(self):
"""
Test Representation of reference, multiple values,
with linkto
"""
s3db = current.s3db
tablename = self.tablename
fname = "%s.location_id" % tablename
ftable = current.db[tablename]
renderer = S3Represent(lookup="gis_location",
#linkto=URL(c="gis", f="location", args=["[id]"]),
show_link=True)
ftable.location_id.represent = renderer
resource = s3db.resource("org_organisation", id=self.org.id)
data = resource.select(["id", "name", "test.location_id"],
limit=None,
represent=True)
result = data["rows"]
self.assertEqual(renderer.queries, 1)
self.assertTrue(len(result), 1)
output = result[0]
self.assertTrue(fname in output)
names = output[fname]
self.assertTrue(isinstance(names, DIV))
from lxml import etree
tree = etree.fromstring("<div>%s</div>" % names)
links = tree.findall("a")
self.assertEqual(len(links), 2)
appname = current.request.application
a = lambda location: (location.name,
"/%s/gis/location/%s" % (appname, location.id))
types = dict(a(location) for location in self.locations)
for link in links:
name = link.text
self.assertTrue(name in types)
self.assertEqual(link.get("href", None),
types[name])
# -------------------------------------------------------------------------
def testRepresentReferenceMultipleLinktoOff(self):
"""
Test Representation of reference, multiple values,
with linkto + show_link=False
"""
s3db = current.s3db
tablename = self.tablename
fname = "%s.location_id" % tablename
ftable = current.db[tablename]
renderer = S3Represent(lookup="gis_location",
linkto=URL(c="gis", f="location", args=["[id]"]))
ftable.location_id.represent = renderer
resource = s3db.resource("org_organisation", id=self.org.id)
data = resource.select(["id", "name", "test.location_id"],
limit=None,
represent=True,
show_links=False)
result = data["rows"]
self.assertEqual(renderer.queries, 1)
self.assertTrue(len(result), 1)
output = result[0]
self.assertTrue(fname in output)
names = output[fname].split(", ")
self.assertEqual(len(names), 2)
self.assertTrue(self.locations[0].name in names)
self.assertTrue(self.locations[1].name in names)
# -------------------------------------------------------------------------
def testRepresentListReferenceSingleNoLinkto(self):
"""
Test Representation of list:reference, single value,
without linkto
"""
s3db = current.s3db
tablename = self.tablename
fname = "%s.facility_type_id" % tablename
fac = self.facs[0]
resource = s3db.resource(tablename, id=fac.id)
renderer = S3Represent(lookup="org_facility_type",
multiple=True)
table = resource.table
table.facility_type_id.represent = renderer
data = resource.select(["id", "facility_type_id"],
limit=None,
represent=True)
result = data["rows"]
self.assertEqual(renderer.queries, 1)
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 1)
output = result[0]
self.assertTrue(isinstance(output, Storage))
self.assertTrue(fname in output)
representation = output[fname].split(", ")
self.assertEqual(len(representation), 2)
self.assertTrue(self.fac_types[0].name in representation)
self.assertTrue(self.fac_types[1].name in representation)
# -------------------------------------------------------------------------
def testRepresentListReferenceSingleLinktoOn(self):
"""
Test Representation of list:reference, single value,
with linkto
"""
s3db = current.s3db
tablename = self.tablename
fname = "%s.facility_type_id" % tablename
fac = self.facs[0]
resource = s3db.resource(tablename, id=fac.id)
renderer = S3Represent(lookup="org_facility_type",
multiple=True,
#linkto=URL(c="org", f="facility_type", args=["[id]"]),
show_link=True)
table = resource.table
table.facility_type_id.represent = renderer
data = resource.select(["id", "facility_type_id"],
limit=None,
represent=True)
result = data["rows"]
self.assertEqual(renderer.queries, 1)
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 1)
output = result[0]
self.assertTrue(isinstance(output, Storage))
self.assertTrue(fname in output)
names = output[fname]
self.assertTrue(isinstance(names, DIV))
from lxml import etree
tree = etree.fromstring("<div>%s</div>" % names)
links = tree.findall("a")
self.assertEqual(len(links), 2)
appname = current.request.application
a = lambda fac_type: (fac_type.name,
"/%s/org/facility_type/%s" % (appname, fac_type.id))
types = dict(a(fac_type) for fac_type in self.fac_types)
for link in links:
name = link.text
self.assertTrue(name in types)
self.assertEqual(link.get("href", None),
types[name])
# -------------------------------------------------------------------------
def testRepresentListReferenceSingleLinktoOff(self):
"""
Test Representation of list:reference, single value,
with linkto + show_link=False
"""
s3db = current.s3db
tablename = self.tablename
fname = "%s.facility_type_id" % tablename
fac = self.facs[0]
resource = s3db.resource(tablename, id=fac.id)
renderer = S3Represent(lookup="org_facility_type",
multiple=True,
linkto=URL(c="org", f="facility_type", args=["[id]"]))
table = resource.table
table.facility_type_id.represent = renderer
data = resource.select(["id", "facility_type_id"],
limit=None,
represent=True,
show_links=False)
result = data["rows"]
self.assertEqual(renderer.queries, 1)
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 1)
output = result[0]
self.assertTrue(isinstance(output, Storage))
self.assertTrue(fname in output)
representation = output[fname].split(", ")
self.assertEqual(len(representation), 2)
self.assertTrue(self.fac_types[0].name in representation)
self.assertTrue(self.fac_types[1].name in representation)
# -------------------------------------------------------------------------
def testRepresentListReferenceMultipleNoLinkto(self):
"""
Test Representation of list:reference, multiple values,
without linkto
"""
s3db = current.s3db
tablename = self.tablename
fname = "%s.location_id" % tablename
ftable = current.db[tablename]
renderer = S3Represent(lookup="gis_location",
linkto=URL(c="gis", f="location", args=["[id]"]))
ftable.location_id.represent = renderer
resource = s3db.resource("org_organisation", id=self.org.id)
data = resource.select(["id", "name", "test.location_id"],
limit=None,
represent=True,
show_links=False)
result = data["rows"]
self.assertEqual(renderer.queries, 1)
self.assertTrue(len(result), 1)
output = result[0]
self.assertTrue(fname in output)
names = output[fname].split(", ")
self.assertEqual(len(names), 2)
self.assertTrue(self.locations[0].name in names)
self.assertTrue(self.locations[1].name in names)
# -------------------------------------------------------------------------
def testRepresentListReferenceSingleNoLinkto(self):
"""
Test Representation of list:reference, single value,
without linkto
"""
s3db = current.s3db
tablename = self.tablename
fname = "%s.facility_type_id" % tablename
ftable = current.db[tablename]
renderer = S3Represent(lookup="org_facility_type",
multiple=True)
ftable.facility_type_id.represent = renderer
org = self.org
resource = s3db.resource("org_organisation", id=org.id)
data = resource.select(["id", "test.facility_type_id"],
limit=None,
represent=True)
result = data["rows"]
self.assertEqual(renderer.queries, 1)
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 1)
output = result[0]
self.assertTrue(isinstance(output, Storage))
self.assertTrue(fname in output)
representation = output[fname].split(", ")
self.assertEqual(len(representation), 3)
self.assertTrue(self.fac_types[0].name in representation)
self.assertTrue(self.fac_types[1].name in representation)
self.assertTrue(self.fac_types[2].name in representation)
# -------------------------------------------------------------------------
def testRepresentListReferenceMultipleLinktoOn(self):
"""
Test Representation of list:reference, multiple values,
with linkto
"""
s3db = current.s3db
s3db = current.s3db
tablename = self.tablename
fname = "%s.facility_type_id" % tablename
ftable = current.db[tablename]
renderer = S3Represent(lookup="org_facility_type",
multiple=True,
#linkto=URL(c="org", f="facility_type", args=["[id]"]),
show_link=True)
ftable.facility_type_id.represent = renderer
org = self.org
resource = s3db.resource("org_organisation", id=org.id)
data = resource.select(["id", "test.facility_type_id"],
limit=None,
represent=True)
result = data["rows"]
self.assertEqual(renderer.queries, 1)
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 1)
output = result[0]
self.assertTrue(isinstance(output, Storage))
self.assertTrue(fname in output)
names = output[fname]
self.assertTrue(isinstance(names, DIV))
from lxml import etree
tree = etree.fromstring("<div>%s</div>" % names)
links = tree.findall("a")
self.assertEqual(len(links), 3)
appname = current.request.application
a = lambda fac_type: (fac_type.name,
"/%s/org/facility_type/%s" % (appname, fac_type.id))
types = dict(a(fac_type) for fac_type in self.fac_types)
for link in links:
name = link.text
self.assertTrue(name in types)
self.assertEqual(link.get("href", None),
types[name])
# -------------------------------------------------------------------------
def testRepresentListReferenceMultipleLinktoOff(self):
s3db = current.s3db
tablename = self.tablename
fname = "%s.facility_type_id" % tablename
ftable = current.db[tablename]
renderer = S3Represent(lookup="org_facility_type",
multiple=True,
linkto=URL(c="org", f="facility_type", args=["[id]"]))
ftable.facility_type_id.represent = renderer
org = self.org
resource = s3db.resource("org_organisation", id=org.id)
data = resource.select(["id", "test.facility_type_id"],
limit=None,
represent=True,
show_links=False)
result = data["rows"]
self.assertEqual(renderer.queries, 1)
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 1)
output = result[0]
self.assertTrue(isinstance(output, Storage))
self.assertTrue(fname in output)
representation = output[fname].split(", ")
self.assertEqual(len(representation), 3)
self.assertTrue(self.fac_types[0].name in representation)
self.assertTrue(self.fac_types[1].name in representation)
self.assertTrue(self.fac_types[2].name in representation)
# -------------------------------------------------------------------------
def tearDown(self):
del current.model.components["org_organisation"]["test"]
current.db.rollback()
current.auth.override = False
# -------------------------------------------------------------------------
@classmethod
def tearDownClass(cls):
try:
current.db[cls.tablename].drop()
except:
pass
#=============================================================================
class S3ExportLazyFKRepresentationTests(unittest.TestCase):
""" Test lazy representations of foreign keys in exports """
# -------------------------------------------------------------------------
def setUp(self):
self.tablename = tablename = "export_lazy_fk_represent"
db = current.db
db.define_table(tablename,
Field("location_id",
"reference gis_location"),
Field("organisation_id",
"reference org_organisation"),
Field("facility_type_id",
"list:reference org_facility_type"),
*s3_meta_fields())
current.auth.override = True
s3db = current.s3db
# Create locations
locations = (Storage(name="FK Represent TestLocation 1"),
Storage(name="FK Represent TestLocation 2"))
ltable = s3db.gis_location
for i in xrange(len(locations)):
location = locations[i]
location_id = ltable.insert(**location)
location["id"] = location_id
self.locations = Storage([(l.id, l) for l in locations])
# Create facility types
fac_types = (Storage(name="FK Represent TestFacType P"),
Storage(name="FK Represent TestFacType Q"),
Storage(name="FK Represent TestFacType R"))
ttable = s3db.org_facility_type
for i in xrange(len(fac_types)):
fac_type = fac_types[i]
fac_type_id = ttable.insert(**fac_type)
fac_type["id"] = fac_type_id
self.fac_types = Storage([(t.id, t) for t in fac_types])
# Create organisation
org = Storage(name="FK Represent TestOrg B")
otable = s3db.org_organisation
org_id = otable.insert(**org)
org["id"] = org_id
s3db.update_super(otable, org)
self.org = org
# Create test records
facs = (Storage(organisation_id=org.id,
facility_type_id=[fac_types[0].id, fac_types[1].id],
location_id=locations[0].id),
Storage(organisation_id=org.id,
facility_type_id=[fac_types[1].id, fac_types[2].id],
location_id=locations[1].id))
for i in xrange(len(facs)):
fac = facs[i]
fac_id = db[tablename].insert(**fac)
fac["id"] = fac_id
self.facs = facs
# -------------------------------------------------------------------------
def testRepresentReferenceSingleNoLinkto(self):
"""
Test Representation of reference, single value,
without linkto
"""
s3db = current.s3db
resource = s3db.resource(self.tablename,
id=[fac.id for fac in self.facs])
table = resource.table
# Attach lazy renderers
org_id_renderer = S3Represent(lookup="org_organisation")
table.organisation_id.represent = org_id_renderer
fac_type_renderer = S3Represent(lookup="org_facility_type",
multiple=True)
table.facility_type_id.represent = fac_type_renderer
loc_id_renderer = S3Represent(lookup="gis_location",
linkto=URL(c="gis", f="location", args=["[id]"]))
table.location_id.represent = loc_id_renderer
# Export with IDs
current.xml.show_ids = True
tree = resource.export_tree(dereference=False)
root = tree.getroot()
locations = self.locations
fac_types = self.fac_types
org = self.org
# Check correct representation in exports
for fac in self.facs:
# Find the element
elem = root.findall("resource[@id='%s']" % fac.id)
elem = elem[0] if len(elem) else None
self.assertNotEqual(elem, None)
find = lambda name: elem.findall("reference[@field='%s']" % name)
organisation_id = find("organisation_id")
organisation_id = organisation_id[0] \
if len(organisation_id) else None
self.assertNotEqual(organisation_id, None)
self.assertEqual(organisation_id.text, org.name)
location_id = find("location_id")
location_id = location_id[0] \
if len(location_id) else None
self.assertNotEqual(location_id, None)
location = locations[fac.location_id]
self.assertEqual(location_id.text, location.name)
facility_type_id = find("facility_type_id")
facility_type_id = facility_type_id[0] \
if len(facility_type_id) else None
self.assertNotEqual(facility_type_id, None)
ftypes = ", ".join([fac_types[i].name
for i in fac.facility_type_id])
self.assertEqual(facility_type_id.text, ftypes)
# Check that only 1 query per renderer was needed for the export
self.assertEqual(org_id_renderer.queries, 1)
self.assertEqual(fac_type_renderer.queries, 1)
self.assertEqual(loc_id_renderer.queries, 1)
# -------------------------------------------------------------------------
def tearDown(self):
db = current.db
db.rollback()
current.auth.override = False
try:
db[self.tablename].drop()
except:
pass
# =============================================================================
class S3ReusableFieldTests(unittest.TestCase):
""" Test multiple named widgets in reusable fields """
# -------------------------------------------------------------------------
def widget1(self):
""" Dummy widget """
pass
def widget2(self):
""" Dummy widget """
pass
def widget3(self):
""" Dummy widget """
pass
# -------------------------------------------------------------------------
def testWidgetOverrideWithoutDefault(self):
""" Test setting the widget in the instance (no default) """
rf = S3ReusableField("test", "integer")
# Default None
field = rf()
self.assertEqual(field.widget, None)
# Widget-parameter overrides default
field = rf(widget=self.widget1)
self.assertEqual(field.widget, self.widget1)
# -------------------------------------------------------------------------
def testWidgetOverrideWithDefault(self):
""" Test overriding the default widget in the instance """
rf = S3ReusableField("test", "integer",
widget=self.widget1)
# Default widget
field = rf()
self.assertEqual(field.widget, self.widget1)
# Widget-parameter overrides default
field = rf(widget=self.widget2)
self.assertEqual(field.widget, self.widget2)
# -------------------------------------------------------------------------
def testSingleWidget(self):
""" Test using widget set with single widget """
rf = S3ReusableField("test", "integer",
widgets=self.widget1)
# Default
field = rf()
self.assertEqual(field.widget, self.widget1)
# Deliberate default
field = rf(widget="default")
self.assertEqual(field.widget, self.widget1)
# Override
field = rf(widget=self.widget2)
self.assertEqual(field.widget, self.widget2)
# Undefined widget
self.assertRaises(NameError, rf, widget="alternative")
# -------------------------------------------------------------------------
def testMultipleWidgets(self):
""" Test using widget set with multiple widgets """
rf = S3ReusableField("test", "integer",
widgets={"default": self.widget1,
"alternative": self.widget2,
},
)
# Using default from set
field = rf()
self.assertEqual(field.widget, self.widget1)
# Deliberate default
field = rf(widget="default")
self.assertEqual(field.widget, self.widget1)
# Other choice
field = rf(widget="alternative")
self.assertEqual(field.widget, self.widget2)
# Override
field = rf(widget=self.widget3)
self.assertEqual(field.widget, self.widget3)
# Undefined widget
self.assertRaises(NameError, rf, widget="other")
# -------------------------------------------------------------------------
def testMultipleWidgetsWithDefault(self):
""" Test using widget set with multiple widgets and override default """
rf = S3ReusableField("test", "integer",
widgets={"default": self.widget1,
"alternative": self.widget2,
},
widget=self.widget3,
)
# "widget"-setting overrides "default"
field = rf()
self.assertEqual(field.widget, self.widget3)
# "widget"-setting overrides "default"
field = rf(widget="default")
self.assertEqual(field.widget, self.widget3)
# Other alternatives still available
field = rf(widget="alternative")
self.assertEqual(field.widget, self.widget2)
# And can still override
field = rf(widget=self.widget1)
self.assertEqual(field.widget, self.widget1)
# Undefined widget
self.assertRaises(NameError, rf, widget="other")
# -------------------------------------------------------------------------
def testFallbackWithDefault(self):
""" Test fallback to default widget """
rf = S3ReusableField("test", "integer",
widget=self.widget1,
widgets={"alternative": self.widget2},
)
# Standard fallback
field = rf()
self.assertEqual(field.widget, self.widget1)
# Deliberate default
field = rf(widget="default")
self.assertEqual(field.widget, self.widget1)
# Alternative
field = rf(widget="alternative")
self.assertEqual(field.widget, self.widget2)
# Override
field = rf(widget=self.widget1)
self.assertEqual(field.widget, self.widget1)
# Undefined widget
self.assertRaises(NameError, rf, widget="other")
# -------------------------------------------------------------------------
def testExplicitNone(self):
""" Test explicit None-widget in instance """
rf = S3ReusableField("test", "integer",
widgets={"default": self.widget1,
"alternative": self.widget2,
},
widget=self.widget3,
)
# Standard fallback
field = rf(widget=None)
self.assertEqual(field.widget, None)
# -------------------------------------------------------------------------
def testFallbackWithoutDefault(self):
""" Test fallback to None """
rf = S3ReusableField("test", "integer",
widgets={"alternative": self.widget2},
)
# Standard fallback
field = rf()
self.assertEqual(field.widget, None)
# Deliberate default
field = rf(widget="default")
self.assertEqual(field.widget, None)
# Alternative
field = rf(widget="alternative")
self.assertEqual(field.widget, self.widget2)
# Override
field = rf(widget=self.widget1)
self.assertEqual(field.widget, self.widget1)
# Undefined widget
self.assertRaises(NameError, rf, widget="other")
# -------------------------------------------------------------------------
def testFallbackWithoutWidgets(self):
""" Test fallback to None """
rf = S3ReusableField("test", "integer")
# Standard fallback
field = rf()
self.assertEqual(field.widget, None)
# Deliberate default
field = rf(widget="default")
self.assertEqual(field.widget, None)
# Alternative
self.assertRaises(NameError, rf, widget="alternative")
# Override
field = rf(widget=self.widget1)
self.assertEqual(field.widget, self.widget1)
# Undefined widget
self.assertRaises(NameError, rf, widget="other")
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner(verbosity=2).run(suite)
return
if __name__ == "__main__":
run_suite(
S3RepresentTests,
S3ExtractLazyFKRepresentationTests,
S3ExportLazyFKRepresentationTests,
S3ReusableFieldTests,
)
# END ========================================================================
|
py | 1a3e3f50b02ec84e5ef6d0ff057c16ba5a38ca22 | # -*- coding: utf-8 -*-
"""
sentence, word, morph, ...
__author__ = 'Jamie ([email protected])'
__copyright__ = 'Copyright (C) 2019-, Kakao Corp. All rights reserved.'
"""
###########
# imports #
###########
import logging
import re
from typing import List, Tuple
MAX_LEN = 64
#########
# types #
#########
class Sentence:
"""
raw sentence
"""
def __init__(self, raw: str):
"""
:param raw: raw sentence
"""
self.words = raw.split()
class PosMorph:
"""
morpheme
"""
def __init__(self, morph: str, pos_tag: str = 'O', beg: int = -1, end: int = -1):
'''
각 음절마다 'O' Tag와 begin과 end 초기화
Args:
morph: 음절
Example:
morph 저 pos_tag NP beg 0 end 1
morph 는 pos_tag JX beg 1 end 2
morph 일 pos_tag NNG beg 0 end 1
morph 아 pos_tag NNG beg 0 end 1
morph 을 pos_tag JKO beg 2 end 3
morph 사 pos_tag NNG beg 0 end 1
morph 합 pos_tag XSV beg 2 end 3
morph 니 pos_tag EF beg 3 end 4
morph . pos_tag SF beg 5 end 6
'''
self.morph = morph
self.pos_tag = pos_tag
self.beg = beg
self.end = end
def __str__(self):
return '{}/{}'.format(self.morph, self.pos_tag)
def __len__(self):
return self.end - self.beg
class PosWord:
"""
part-of-speech tagged word
"""
def __init__(self, raw: str):
"""
Args:
raw: raw word
"""
self.raw = raw
self.tags = [] # output tags for each character
self.res_chrs = raw # 원형 복원된 형태소들의 음절들을 합친 것
def __str__(self):
return '{}\t{}'.format(self.raw, ' '.join([str(x) for x in self.pos_tagged_morphs]))
def for_pretrain(self) -> str:
"""
pre-training을 위한 출력
Returns:
pre-training을 위한 문장
"""
morph_strs = []
morph = ''
prev_tag = ''
for char, iob_tag in zip(self.raw, self.tags):
try:
iob_tag, _ = iob_tag.split(':', 1)
except ValueError:
pass
try:
iob, tag = iob_tag.split('-')
except ValueError as val_err:
logging.error('raw: %s', self.raw)
logging.error('tags: %s', self.tags)
logging.error('iob_tag: %s', iob_tag)
raise val_err
if iob == 'B':
if morph:
morph_strs.append('%s/%s' % (re.sub(r'\d', '0', morph), prev_tag))
morph = char
prev_tag = tag
elif iob == 'I':
if prev_tag == tag:
morph += char
else:
if morph:
morph_strs.append('%s/%s' % (re.sub(r'\d', '0', morph), prev_tag))
morph = char
prev_tag = tag
if morph:
morph_strs.append('%s/%s' % (re.sub(r'\d', '0', morph), prev_tag))
return ' '.join(morph_strs)
def __eq__(self, other: 'PosWord'):
"""
어절의 형태소 분석 결과가 일치할 경우 같다고 간주한다. (평가 프로그램에서 어절 단위 일치 여부 판단 시 사용)
Args:
other: other object
"""
return self.res_chrs == other.res_chrs and self.res_tags == other.res_tags
def set_pos_result(self, tags: List[str], restore_dic: dict = None):
"""
외부에서 생성된 PosWord객체의 정보를 현재 인스턴스에 설정합니다.
Args:
tags: 파일로 부터 읽은 형태소 태그(음절단위)
restore_dic: 원형 복원 사전
"""
if not restore_dic:
tags = [x.split(':', 1)[0] for x in tags]
self.tags = tags
# print(self.raw)
# print(len(self.raw))
# print(self.tags)
# print(len(self.tags))
# assert len(self.raw) == len(self.tags) # 음절수와 태그수는 동일해야 한다.
self.pos_tagged_morphs = self._make_pos_morphs(restore_dic)
def _make_pos_morphs(self, restore_dic: dict = None):
"""
형태소 태그리스트를 대상으로 B/I 로 병합되는 위치를 구합니다.
Args:
restore_dic: 원형 복원 사전
Returns:
pos_morphs: 단어/Tag을 담은 List Ex. 기억/NNG
"""
if not self.tags:
return []
self._restore(restore_dic)
pos_morphs = []
for beg, (lex, iob_tag) in enumerate(zip(self.res_chrs, self.res_tags)):
try:
iob, pos_tag = iob_tag.rsplit('-', 1)
except ValueError as val_err:
logging.error('invalid char/tag: %s/%s in [%s] %s', lex, iob_tag, self.res_chrs,
self.res_tags)
raise val_err
if iob == 'B' or not pos_morphs or pos_morphs[-1].pos_tag != pos_tag:
pos_morphs.append(PosMorph(lex, pos_tag, beg, beg+1))
elif iob == 'I':
if pos_morphs[-1].pos_tag == pos_tag:
pos_morphs[-1].morph += lex
pos_morphs[-1].end += len(lex)
else:
logging.debug('tag is different between B and I: %s vs %s',
pos_morphs[-1].pos_tag, pos_tag)
pos_morphs.append(PosMorph(lex, pos_tag, beg, beg+1))
else:
raise ValueError('invalid IOB tag: {}/{} in [{}] {}'.format \
(lex, iob_tag, self.res_chrs, self.res_tags))
return pos_morphs
def _restore(self, restore_dic: dict):
"""
원형 복원 사전을 이용하여 형태소의 원형을 복원한다.
Args:
restore_dic: 원형 복원 사전
"""
if not restore_dic:
self.res_chrs = self.raw
self.res_tags = self.tags
return
res_chrs = []
self.res_tags = []
for char, tag in zip(self.raw, self.tags):
if ':' in tag:
key = '{}/{}'.format(char, tag)
if key in restore_dic:
for char_tag in restore_dic[key].split():
res_chr, res_tag = char_tag.rsplit('/', 1)
res_chrs.append(res_chr)
self.res_tags.append(res_tag)
continue
else:
logging.debug('mapping not found: %s/%s', char, tag)
tag, _ = tag.split(':', 1)
res_chrs.append(char)
self.res_tags.append(tag)
self.res_chrs = ''.join(res_chrs)
class PosSentence(Sentence):
"""
part-of-speech tagged sentence
"""
def __init__(self, raw: str):
"""
Args:
raw: raw sentence
"""
super().__init__(raw)
self.pos_tagged_words = [] # list of PosWord
def __str__(self):
return '\n'.join([str(pos_word) for pos_word in self.pos_tagged_words])
def get_beg_end_list(self) -> Tuple[List[int], List[int]]:
"""
모든 형태소의 시작위치를 담는 리스트와, 끝 위치를 담는 리스트를 구합니다.
Returns:
list of begin positions
list of end positions
"""
begs = []
ends = []
for word in self.pos_tagged_words:
for morph in word.pos_tagged_morphs:
begs.append(morph.beg)
ends.append(morph.end)
return begs, ends
def set_raw_by_words(self):
"""
Sentence 객체의 'words' 멤버를 PosWords 객체의 raw를 이용하여 채운다.
"""
self.words = [pos_word.raw for pos_word in self.pos_tagged_words]
# Example:
# words: ['저는', '일요일', '아침을', '사랑합니다.']
def init_pos_tags(self):
"""
PosWord 객체를 생성하고 태그를 'O'로 세팅한다.
"""
if self.pos_tagged_words:
raise RuntimeError('PoS tagged words are already initialized')
for word in self.words:
# 각word를 PosWord Class에게 전달
self.pos_tagged_words.append(PosWord(word))
print('pos_tagged_words', self.pos_tagged_words)
def set_pos_result(self, tags: List[str], restore_dic: dict = None):
"""
문장 전체에 대한 형태소 태그 출력 레이블 정보를 세팅하고 형태소를 복원한다.
Args:
tags: 문장 전체 태그 출력 레이블 정보
restore_dic: 원형 복원 사전
"""
total_char_num = 0
for pos_word in self.pos_tagged_words:
pos_word.set_pos_result(tags[total_char_num:total_char_num + len(pos_word.raw)],
restore_dic)
total_char_num += len(pos_word.raw)
# PAD한 char만큼 길이 변경
total_char_num = len(tags)
assert total_char_num == len(tags)
def get_sequence(self, morph: bool = True, tag: bool = True, simple: bool = False) -> List[str]:
"""
태그를 포함한 형태소 문자열을 생성하여 리턴합니다.
Args:
morph: 형태소 출력
tag: 태그 표시 방법
simple: tag를 1byte만 출력
Returns:
문자열의 리스트
"""
sequence = []
for word in self.pos_tagged_words:
for pos_morph in word.pos_tagged_morphs:
morphs = []
if morph:
morphs.append(pos_morph.morph)
if tag:
morphs.append(pos_morph.pos_tag if not simple else pos_morph.pos_tag[0])
sequence.append('/'.join(morphs))
return sequence
def get_all_morphs(self) -> List[str]:
"""
문장을 구성하는 모든 PosMorph의 리스트를 리턴합니다.
Returns:
모든 형태소 리스트
"""
return [morph for word in self.pos_tagged_words for morph in word.pos_tagged_morphs]
|
py | 1a3e4027305147b3af09d1b3b43aae8cf1cba704 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Phase estimation for the spectrum of a Hamiltonian"""
from typing import Optional, Union
from qiskit import QuantumCircuit
from qiskit.utils import QuantumInstance
from qiskit.opflow import (EvolutionBase, PauliTrotterEvolution, OperatorBase,
SummedOp, PauliOp, MatrixOp, PauliSumOp, StateFn)
from qiskit.providers import BaseBackend
from .phase_estimation import PhaseEstimation
from .hamiltonian_phase_estimation_result import HamiltonianPhaseEstimationResult
from .phase_estimation_scale import PhaseEstimationScale
class HamiltonianPhaseEstimation:
r"""Run the Quantum Phase Estimation algorithm to find the eigenvalues of a Hermitian operator.
This class is nearly the same as :class:`~qiskit.algorithms.PhaseEstimation`, differing only
in that the input in that class is a unitary operator, whereas here the input is a Hermitian
operator from which a unitary will be obtained by scaling and exponentiating. The scaling is
performed in order to prevent the phases from wrapping around :math:`2\pi`.
The problem of estimating eigenvalues :math:`\lambda_j` of the Hermitian operator
:math:`H` is solved by running a circuit representing
.. math::
\exp(i b H) |\psi\rangle = \sum_j \exp(i b \lambda_j) c_j |\lambda_j\rangle,
where the input state is
.. math::
|\psi\rangle = \sum_j c_j |\lambda_j\rangle,
and :math:`\lambda_j` are the eigenvalues of :math:`H`.
Here, :math:`b` is a scaling factor sufficiently large to map positive :math:`\lambda` to
:math:`[0,\pi)` and negative :math:`\lambda` to :math:`[\pi,2\pi)`. Each time the circuit is
run, one measures a phase corresponding to :math:`lambda_j` with probability :math:`|c_j|^2`.
If :math:`H` is a Pauli sum, the bound :math:`b` is computed from the sum of the absolute
values of the coefficients of the terms. There is no way to reliably recover eigenvalues
from phases very near the endpoints of these intervals. Because of this you should be aware
that for degenerate cases, such as :math:`H=Z`, the eigenvalues :math:`\pm 1` will be
mapped to the same phase, :math:`\pi`, and so cannot be distinguished. In this case, you need
to specify a larger bound as an argument to the method ``estimate``.
This class uses and works together with :class:`~qiskit.algorithms.PhaseEstimationScale` to
manage scaling the Hamiltonian and the phases that are obtained by the QPE algorithm. This
includes setting, or computing, a bound on the eigenvalues of the operator, using this
bound to obtain a scale factor, scaling the operator, and shifting and scaling the measured
phases to recover the eigenvalues.
Note that, although we speak of "evolving" the state according the the Hamiltonian, in the
present algorithm, we are not actually considering time evolution. Rather, the role of time is
played by the scaling factor, which is chosen to best extract the eigenvalues of the
Hamiltonian.
A few of the ideas in the algorithm may be found in Ref. [1].
**Reference:**
[1]: Quantum phase estimation of multiple eigenvalues for small-scale (noisy) experiments
T.E. O'Brien, B. Tarasinski, B.M. Terhal
`arXiv:1809.09697 <https://arxiv.org/abs/1809.09697>`_
"""
def __init__(self,
num_evaluation_qubits: int,
quantum_instance: Optional[Union[QuantumInstance, BaseBackend]] = None) -> None:
"""
Args:
num_evaluation_qubits: The number of qubits used in estimating the phase. The phase will
be estimated as a binary string with this many bits.
quantum_instance: The quantum instance on which the circuit will be run.
"""
self._phase_estimation = PhaseEstimation(
num_evaluation_qubits=num_evaluation_qubits,
quantum_instance=quantum_instance)
def _get_scale(self, hamiltonian, bound=None) -> None:
if bound is None:
return PhaseEstimationScale.from_pauli_sum(hamiltonian)
return PhaseEstimationScale(bound)
def _get_unitary(self, hamiltonian, pe_scale, evolution) -> QuantumCircuit:
"""Evolve the Hamiltonian to obtain a unitary.
Apply the scaling to the Hamiltonian that has been computed from an eigenvalue bound
and compute the unitary by applying the evolution object.
"""
# scale so that phase does not wrap.
scaled_hamiltonian = -pe_scale.scale * hamiltonian
unitary = evolution.convert(scaled_hamiltonian.exp_i())
if not isinstance(unitary, QuantumCircuit):
unitary_circuit = unitary.to_circuit()
else:
unitary_circuit = unitary
# Decomposing twice allows some 1Q Hamiltonians to give correct results
# when using MatrixEvolution(), that otherwise would give incorrect results.
# It does not break any others that we tested.
return unitary_circuit.decompose().decompose()
# pylint: disable=arguments-differ
def estimate(self, hamiltonian: OperatorBase,
state_preparation: Optional[StateFn] = None,
evolution: Optional[EvolutionBase] = None,
bound: Optional[float] = None) -> HamiltonianPhaseEstimationResult:
"""Run the Hamiltonian phase estimation algorithm.
Args:
hamiltonian: A Hermitian operator.
state_preparation: The ``StateFn`` to be prepared, whose eigenphase will be
measured. If this parameter is omitted, no preparation circuit will be run and
input state will be the all-zero state in the computational basis.
evolution: An evolution converter that generates a unitary from ``hamiltonian``. If
``None``, then the default ``PauliTrotterEvolution`` is used.
bound: An upper bound on the absolute value of the eigenvalues of
``hamiltonian``. If omitted, then ``hamiltonian`` must be a Pauli sum, or a
``PauliOp``, in which case a bound will be computed. If ``hamiltonian``
is a ``MatrixOp``, then ``bound`` may not be ``None``. The tighter the bound,
the higher the resolution of computed phases.
Returns:
HamiltonianPhaseEstimationResult instance containing the result of the estimation
and diagnostic information.
Raises:
ValueError: If ``bound`` is ``None`` and ``hamiltonian`` is not a Pauli sum, i.e. a
``PauliSumOp`` or a ``SummedOp`` whose terms are of type ``PauliOp``.
TypeError: If ``evolution`` is not of type ``EvolutionBase``.
"""
if evolution is None:
evolution = PauliTrotterEvolution()
elif not isinstance(evolution, EvolutionBase):
raise TypeError(f'Expecting type EvolutionBase, got {type(evolution)}')
if isinstance(hamiltonian, PauliSumOp):
hamiltonian = hamiltonian.to_pauli_op()
elif isinstance(hamiltonian, PauliOp):
hamiltonian = SummedOp([hamiltonian])
if isinstance(hamiltonian, SummedOp):
# remove identitiy terms
# The term propto the identity is removed from hamiltonian.
# This is done for three reasons:
# 1. Work around an unknown bug that otherwise causes the energies to be wrong in some
# cases.
# 2. Allow working with a simpler Hamiltonian, one with fewer terms.
# 3. Tighten the bound on the eigenvalues so that the spectrum is better resolved, i.e.
# occupies more of the range of values representable by the qubit register.
# The coefficient of this term will be added to the eigenvalues.
id_coefficient, hamiltonian_no_id = _remove_identity(hamiltonian)
# get the rescaling object
pe_scale = self._get_scale(hamiltonian_no_id, bound)
# get the unitary
unitary = self._get_unitary(hamiltonian_no_id, pe_scale, evolution)
elif isinstance(hamiltonian, MatrixOp):
if bound is None:
raise ValueError('bound must be specified if Hermitian operator is MatrixOp')
# Do not subtract an identity term from the matrix, so do not compensate.
id_coefficient = 0.0
pe_scale = self._get_scale(hamiltonian, bound)
unitary = self._get_unitary(hamiltonian, pe_scale, evolution)
else:
raise TypeError(f'Hermitian operator of type {type(hamiltonian)} not supported.')
if state_preparation is not None:
state_preparation = state_preparation.to_circuit_op().to_circuit()
# run phase estimation
phase_estimation_result = self._phase_estimation.estimate(
unitary=unitary, state_preparation=state_preparation)
return HamiltonianPhaseEstimationResult(
phase_estimation_result=phase_estimation_result,
id_coefficient=id_coefficient,
phase_estimation_scale=pe_scale)
def _remove_identity(pauli_sum):
"""Remove any identity operators from `pauli_sum`. Return
the sum of the coefficients of the identities and the new operator.
"""
idcoeff = 0.0
ops = []
for op in pauli_sum:
p = op.primitive
if p.x.any() or p.z.any():
ops.append(op)
else:
idcoeff += op.coeff
return idcoeff, SummedOp(ops)
|
py | 1a3e420a3a87429ef11679f5e562596748869090 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Job']
class Job(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
input: Optional[pulumi.Input[Union[pulumi.InputType['JobInputAssetArgs'], pulumi.InputType['JobInputClipArgs'], pulumi.InputType['JobInputHttpArgs'], pulumi.InputType['JobInputsArgs']]]] = None,
job_name: Optional[pulumi.Input[str]] = None,
outputs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['JobOutputAssetArgs']]]]] = None,
priority: Optional[pulumi.Input[Union[str, 'Priority']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
transform_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A Job resource type. The progress and state can be obtained by polling a Job or subscribing to events using EventGrid.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The Media Services account name.
:param pulumi.Input[str] description: Optional customer supplied description of the Job.
:param pulumi.Input[Union[pulumi.InputType['JobInputAssetArgs'], pulumi.InputType['JobInputClipArgs'], pulumi.InputType['JobInputHttpArgs'], pulumi.InputType['JobInputsArgs']]] input: The inputs for the Job.
:param pulumi.Input[str] job_name: The Job name.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['JobOutputAssetArgs']]]] outputs: The outputs for the Job.
:param pulumi.Input[Union[str, 'Priority']] priority: Priority with which the job should be processed. Higher priority jobs are processed before lower priority jobs. If not set, the default is normal.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the Azure subscription.
:param pulumi.Input[str] transform_name: The Transform name.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
__props__['description'] = description
if input is None and not opts.urn:
raise TypeError("Missing required property 'input'")
__props__['input'] = input
__props__['job_name'] = job_name
if outputs is None and not opts.urn:
raise TypeError("Missing required property 'outputs'")
__props__['outputs'] = outputs
__props__['priority'] = priority
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if transform_name is None and not opts.urn:
raise TypeError("Missing required property 'transform_name'")
__props__['transform_name'] = transform_name
__props__['created'] = None
__props__['last_modified'] = None
__props__['name'] = None
__props__['state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:media/v20180330preview:Job"), pulumi.Alias(type_="azure-native:media:Job"), pulumi.Alias(type_="azure-nextgen:media:Job"), pulumi.Alias(type_="azure-native:media/latest:Job"), pulumi.Alias(type_="azure-nextgen:media/latest:Job"), pulumi.Alias(type_="azure-native:media/v20180601preview:Job"), pulumi.Alias(type_="azure-nextgen:media/v20180601preview:Job"), pulumi.Alias(type_="azure-native:media/v20180701:Job"), pulumi.Alias(type_="azure-nextgen:media/v20180701:Job"), pulumi.Alias(type_="azure-native:media/v20200501:Job"), pulumi.Alias(type_="azure-nextgen:media/v20200501:Job")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Job, __self__).__init__(
'azure-native:media/v20180330preview:Job',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Job':
"""
Get an existing Job resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["created"] = None
__props__["description"] = None
__props__["input"] = None
__props__["last_modified"] = None
__props__["name"] = None
__props__["outputs"] = None
__props__["priority"] = None
__props__["state"] = None
__props__["type"] = None
return Job(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def created(self) -> pulumi.Output[str]:
"""
The UTC date and time when the Job was created, in 'YYYY-MM-DDThh:mm:ssZ' format.
"""
return pulumi.get(self, "created")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Optional customer supplied description of the Job.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def input(self) -> pulumi.Output[Any]:
"""
The inputs for the Job.
"""
return pulumi.get(self, "input")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> pulumi.Output[str]:
"""
The UTC date and time when the Job was last updated, in 'YYYY-MM-DDThh:mm:ssZ' format.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def outputs(self) -> pulumi.Output[Sequence['outputs.JobOutputAssetResponse']]:
"""
The outputs for the Job.
"""
return pulumi.get(self, "outputs")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[Optional[str]]:
"""
Priority with which the job should be processed. Higher priority jobs are processed before lower priority jobs. If not set, the default is normal.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The current state of the job.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | 1a3e427ed7792b362f149c35b23197880c619577 | def helloworld():
print 'hello, world!'
print 'this is my first module'
|
py | 1a3e429dfded27b8f07e7400ddd234557f9bd8f5 | """
Galaxy Metadata
"""
import copy
import json
import logging
import os
import shutil
import sys
import tempfile
import weakref
from collections import OrderedDict
from collections.abc import Mapping
from os.path import abspath
from typing import Any, Iterator, Optional, TYPE_CHECKING, Union
from sqlalchemy.orm import object_session
from sqlalchemy.orm.attributes import flag_modified
import galaxy.model
from galaxy.model.scoped_session import galaxy_scoped_session
from galaxy.security.object_wrapper import sanitize_lists_to_string
from galaxy.util import (
form_builder,
listify,
string_as_bool,
stringify_dictionary_keys,
unicodify,
)
from galaxy.util.json import safe_dumps
if TYPE_CHECKING:
from galaxy.model import DatasetInstance
from galaxy.model.none_like import NoneDataset
from galaxy.model.store import SessionlessContext
log = logging.getLogger(__name__)
STATEMENTS = "__galaxy_statements__" # this is the name of the property in a Datatype class where new metadata spec element Statements are stored
class Statement:
"""
This class inserts its target into a list in the surrounding
class. the data.Data class has a metaclass which executes these
statements. This is how we shove the metadata element spec into
the class.
"""
def __init__(self, target):
self.target = target
def __call__(self, *args, **kwargs):
# get the locals dictionary of the frame object one down in the call stack (i.e. the Datatype class calling MetadataElement)
class_locals = sys._getframe(1).f_locals
# get and set '__galaxy_statements__' to an empty list if not in locals dict
statements = class_locals.setdefault(STATEMENTS, [])
# add Statement containing info to populate a MetadataElementSpec
statements.append((self, args, kwargs))
@classmethod
def process(cls, element):
for statement, args, kwargs in getattr(element, STATEMENTS, []):
statement.target(element, *args, **kwargs) # statement.target is MetadataElementSpec, element is a Datatype class
class MetadataCollection(Mapping):
"""
MetadataCollection is not a collection at all, but rather a proxy
to the real metadata which is stored as a Dictionary. This class
handles processing the metadata elements when they are set and
retrieved, returning default values in cases when metadata is not set.
"""
def __init__(self, parent: Union["DatasetInstance", "NoneDataset"], session: Optional[Union[galaxy_scoped_session, 'SessionlessContext']] = None) -> None:
self.parent = parent
self._session = session
# initialize dict if needed
if self.parent._metadata is None:
self.parent._metadata = {}
def get_parent(self):
if "_parent" in self.__dict__:
return self.__dict__["_parent"]()
return None
def set_parent(self, parent):
# use weakref to prevent a circular reference interfering with garbage
# collection: hda/lda (parent) <--> MetadataCollection (self) ; needs to be
# hashable, so cannot use proxy.
self.__dict__["_parent"] = weakref.ref(parent)
parent = property(get_parent, set_parent)
@property
def spec(self):
return self.parent.datatype.metadata_spec
def _object_session(self, item):
return self._session if self._session else object_session(item)
def __iter__(self) -> Iterator[Any]:
yield from self.spec.keys()
def __getitem__(self, key):
try:
self.__getattribute__(key)
except AttributeError:
try:
return self.__getattr__(key)
except Exception:
raise KeyError
# `key` is an attribute of this instance, not some metadata: raise
# KeyError to prevent e.g. `'items' in dataset.metadata` from returning
# True
# Not doing this would also break Cheetah's NameMapper._valueForName()
# since dataset.metadata['items'] would be None
raise KeyError
def __len__(self):
return len(self.spec)
def __str__(self):
return dict(self.items()).__str__()
def __bool__(self):
return bool(self.parent._metadata)
__nonzero__ = __bool__
def __getattr__(self, name):
if name in self.spec:
if name in self.parent._metadata:
return self.spec[name].wrap(self.parent._metadata[name], self._object_session(self.parent))
return self.spec[name].wrap(self.spec[name].default, self._object_session(self.parent))
if name in self.parent._metadata:
return self.parent._metadata[name]
# Instead of raising an AttributeError for non-existing metadata, we return None
return None
def __setattr__(self, name, value):
if name == "parent":
return self.set_parent(value)
elif name == '_session':
super().__setattr__(name, value)
else:
if name in self.spec:
self.parent._metadata[name] = self.spec[name].unwrap(value)
else:
self.parent._metadata[name] = value
flag_modified(self.parent, '_metadata')
def remove_key(self, name):
if name in self.parent._metadata:
del self.parent._metadata[name]
else:
log.info(f"Attempted to delete invalid key '{name}' from MetadataCollection")
def element_is_set(self, name) -> bool:
"""
check if the meta data with the given name is set, i.e.
- if the such a metadata actually exists and
- if its value differs from no_value
:param name: the name of the metadata element
:returns: True if the value differes from the no_value
False if its equal of if no metadata with the name is specified
"""
try:
meta_val = self[name]
except KeyError:
log.debug(f"no metadata with name {name} found")
return False
meta_spec = self.parent.metadata.spec[name]
return meta_val != meta_spec.no_value
def get_metadata_parameter(self, name, **kwd):
if name in self.spec:
field = self.spec[name].param.get_field(getattr(self, name), self, None, **kwd)
field.value = getattr(self, name)
return field
def make_dict_copy(self, to_copy):
"""Makes a deep copy of input iterable to_copy according to self.spec"""
rval = {}
for key, value in to_copy.items():
if key in self.spec:
rval[key] = self.spec[key].param.make_copy(value, target_context=self, source_context=to_copy)
return rval
@property
def requires_dataset_id(self):
for key in self.spec:
if isinstance(self.spec[key].param, FileParameter):
return True
return False
def from_JSON_dict(self, filename=None, path_rewriter=None, json_dict=None):
dataset = self.parent
if filename is not None:
log.debug(f'loading metadata from file for: {dataset.__class__.__name__} {dataset.id}')
with open(filename) as fh:
JSONified_dict = json.load(fh)
elif json_dict is not None:
log.debug(f'loading metadata from dict for: {dataset.__class__.__name__} {dataset.id}')
if isinstance(json_dict, str):
JSONified_dict = json.loads(json_dict)
elif isinstance(json_dict, dict):
JSONified_dict = json_dict
else:
raise ValueError(f"json_dict must be either a dictionary or a string, got {type(json_dict)}.")
else:
raise ValueError("You must provide either a filename or a json_dict")
# We build a dictionary for metadata name / value pairs
# because when we copy MetadataTempFile objects we flush the datasets'
# session, but only include the newly created MetadataFile object.
# If we were to set the metadata elements in the first for loop we'd
# lose all previously set metadata elements
metadata_name_value = {}
for name, spec in self.spec.items():
if name in JSONified_dict:
from_ext_kwds = {}
external_value = JSONified_dict[name]
param = spec.param
if isinstance(param, FileParameter):
from_ext_kwds['path_rewriter'] = path_rewriter
value = param.from_external_value(external_value, dataset, **from_ext_kwds)
metadata_name_value[name] = value
elif name in dataset._metadata:
# if the metadata value is not found in our externally set metadata but it has a value in the 'old'
# metadata associated with our dataset, we'll delete it from our dataset's metadata dict
del dataset._metadata[name]
for name, value in metadata_name_value.items():
dataset._metadata[name] = value
if '__extension__' in JSONified_dict:
dataset.extension = JSONified_dict['__extension__']
if '__validated_state__' in JSONified_dict:
dataset.validated_state = JSONified_dict['__validated_state__']
if '__validated_state_message__' in JSONified_dict:
dataset.validated_state_message = JSONified_dict['__validated_state_message__']
flag_modified(dataset, '_metadata')
def to_JSON_dict(self, filename=None):
meta_dict = {}
dataset_meta_dict = self.parent._metadata
for name, spec in self.spec.items():
if name in dataset_meta_dict:
meta_dict[name] = spec.param.to_external_value(dataset_meta_dict[name])
if '__extension__' in dataset_meta_dict:
meta_dict['__extension__'] = dataset_meta_dict['__extension__']
if '__validated_state__' in dataset_meta_dict:
meta_dict['__validated_state__'] = dataset_meta_dict['__validated_state__']
if '__validated_state_message__' in dataset_meta_dict:
meta_dict['__validated_state_message__'] = dataset_meta_dict['__validated_state_message__']
try:
encoded_meta_dict = galaxy.model.custom_types.json_encoder.encode(meta_dict)
except Exception as e:
raise Exception(f"Failed encoding metadata dictionary: {meta_dict}") from e
if filename is None:
return encoded_meta_dict
with open(filename, 'wt+') as fh:
fh.write(encoded_meta_dict)
def __getstate__(self):
# cannot pickle a weakref item (self._parent), when
# data._metadata_collection is None, it will be recreated on demand
return None
class MetadataSpecCollection(OrderedDict):
"""
A simple extension of OrderedDict which allows cleaner access to items
and allows the values to be iterated over directly as if it were a
list. append() is also implemented for simplicity and does not
"append".
"""
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
def append(self, item):
self[item.name] = item
def __getattr__(self, name):
if name not in self:
raise AttributeError
return self.get(name)
def __repr__(self):
# force elements to draw with __str__ for sphinx-apidoc
return ', '.join(item.__str__() for item in self.values())
class MetadataParameter:
def __init__(self, spec):
self.spec = spec
def get_field(self, value=None, context=None, other_values=None, **kwd):
context = context or {}
other_values = other_values or {}
return form_builder.TextField(self.spec.name, value=value)
def to_string(self, value):
return str(value)
def to_safe_string(self, value):
return sanitize_lists_to_string(self.to_string(value))
def make_copy(self, value, target_context: MetadataCollection, source_context):
return copy.deepcopy(value)
@classmethod
def marshal(cls, value):
"""
This method should/can be overridden to convert the incoming
value to whatever type it is supposed to be.
"""
return value
def validate(self, value):
"""
Throw an exception if the value is invalid.
"""
def unwrap(self, form_value):
"""
Turns a value into its storable form.
"""
value = self.marshal(form_value)
self.validate(value)
return value
def wrap(self, value, session):
"""
Turns a value into its usable form.
"""
return value
def from_external_value(self, value, parent):
"""
Turns a value read from an external dict into its value to be pushed directly into the metadata dict.
"""
return value
def to_external_value(self, value):
"""
Turns a value read from a metadata into its value to be pushed directly into the external dict.
"""
return value
class MetadataElementSpec:
"""
Defines a metadata element and adds it to the metadata_spec (which
is a MetadataSpecCollection) of datatype.
"""
def __init__(self, datatype, name=None, desc=None,
param=MetadataParameter, default=None, no_value=None,
visible=True, set_in_upload=False, **kwargs):
self.name = name
self.desc = desc or name
self.default = default
self.no_value = no_value
self.visible = visible
self.set_in_upload = set_in_upload
# Catch-all, allows for extra attributes to be set
self.__dict__.update(kwargs)
# set up param last, as it uses values set above
self.param = param(self)
# add spec element to the spec
datatype.metadata_spec.append(self)
def get(self, name, default=None):
return self.__dict__.get(name, default)
def wrap(self, value, session):
"""
Turns a stored value into its usable form.
"""
return self.param.wrap(value, session)
def unwrap(self, value):
"""
Turns an incoming value into its storable form.
"""
return self.param.unwrap(value)
def __str__(self):
# TODO??: assuming param is the class of this MetadataElementSpec - add the plain class name for that
spec_dict = dict(param_class=self.param.__class__.__name__)
spec_dict.update(self.__dict__)
return ("{name} ({param_class}): {desc}, defaults to '{default}'".format(**spec_dict))
# create a statement class that, when called,
# will add a new MetadataElementSpec to a class's metadata_spec
MetadataElement = Statement(MetadataElementSpec)
"""
MetadataParameter sub-classes.
"""
class SelectParameter(MetadataParameter):
def __init__(self, spec):
MetadataParameter.__init__(self, spec)
self.values = self.spec.get("values")
self.multiple = string_as_bool(self.spec.get("multiple"))
def to_string(self, value):
if value in [None, []]:
return str(self.spec.no_value)
if not isinstance(value, list):
value = [value]
return ",".join(map(str, value))
def get_field(self, value=None, context=None, other_values=None, values=None, **kwd):
context = context or {}
other_values = other_values or {}
field = form_builder.SelectField(self.spec.name, multiple=self.multiple, display=self.spec.get("display"))
if self.values:
value_list = self.values
elif values:
value_list = values
elif value:
value_list = [(v, v) for v in listify(value)]
else:
value_list = []
for val, label in value_list:
try:
if (self.multiple and val in value) or (not self.multiple and val == value):
field.add_option(label, val, selected=True)
else:
field.add_option(label, val, selected=False)
except TypeError:
field.add_option(val, label, selected=False)
return field
def wrap(self, value, session):
# do we really need this (wasteful)? - yes because we are not sure that
# all existing selects have been stored previously as lists. Also this
# will handle the case where defaults/no_values are specified and are
# single non-list values.
value = self.marshal(value)
if self.multiple:
return value
elif value:
return value[0] # single select, only return the first value
return None
@classmethod
def marshal(cls, value):
# Store select as list, even if single item
if value is None:
return []
if not isinstance(value, list):
return [value]
return value
class DBKeyParameter(SelectParameter):
def get_field(self, value=None, context=None, other_values=None, values=None, **kwd):
context = context or {}
other_values = other_values or {}
try:
values = kwd['trans'].app.genome_builds.get_genome_build_names(kwd['trans'])
except KeyError:
pass
return super().get_field(value, context, other_values, values, **kwd)
class RangeParameter(SelectParameter):
def __init__(self, spec):
SelectParameter.__init__(self, spec)
# The spec must be set with min and max values
self.min = spec.get("min") or 1
self.max = spec.get("max") or 1
self.step = self.spec.get("step") or 1
def get_field(self, value=None, context=None, other_values=None, values=None, **kwd):
context = context or {}
other_values = other_values or {}
if values is None:
values = list(zip(range(self.min, self.max, self.step), range(self.min, self.max, self.step)))
return SelectParameter.get_field(self, value=value, context=context, other_values=other_values, values=values, **kwd)
@classmethod
def marshal(cls, value):
value = SelectParameter.marshal(value)
values = [int(x) for x in value]
return values
class ColumnParameter(RangeParameter):
def get_field(self, value=None, context=None, other_values=None, values=None, **kwd):
context = context or {}
other_values = other_values or {}
if values is None and context:
column_range = range(1, (context.columns or 0) + 1, 1)
values = list(zip(column_range, column_range))
return RangeParameter.get_field(self, value=value, context=context, other_values=other_values, values=values, **kwd)
class ColumnTypesParameter(MetadataParameter):
def to_string(self, value):
return ",".join(map(str, value))
class ListParameter(MetadataParameter):
def to_string(self, value):
return ",".join(str(x) for x in value)
class DictParameter(MetadataParameter):
def to_string(self, value):
return json.dumps(value)
def to_safe_string(self, value):
# We do not sanitize json dicts
return safe_dumps(value)
class PythonObjectParameter(MetadataParameter):
def to_string(self, value):
if not value:
return self.spec._to_string(self.spec.no_value)
return self.spec._to_string(value)
def get_field(self, value=None, context=None, other_values=None, **kwd):
context = context or {}
other_values = other_values or {}
return form_builder.TextField(self.spec.name, value=self._to_string(value))
@classmethod
def marshal(cls, value):
return value
class FileParameter(MetadataParameter):
def to_string(self, value):
if not value:
return str(self.spec.no_value)
return value.file_name
def to_safe_string(self, value):
# We do not sanitize file names
return self.to_string(value)
def get_field(self, value=None, context=None, other_values=None, **kwd):
context = context or {}
other_values = other_values or {}
return form_builder.TextField(self.spec.name, value=str(value.id))
def wrap(self, value, session):
if value is None:
return None
if isinstance(value, galaxy.model.MetadataFile) or isinstance(value, MetadataTempFile):
return value
if isinstance(value, int):
return session.query(galaxy.model.MetadataFile).get(value)
else:
return session.query(galaxy.model.MetadataFile).filter_by(uuid=value).one()
def make_copy(self, value, target_context: MetadataCollection, source_context):
session = target_context._object_session(target_context.parent)
value = self.wrap(value, session=session)
target_dataset = target_context.parent.dataset
if value and target_dataset.object_store.exists(target_dataset):
# Only copy MetadataFile if the target dataset has been created in an object store.
# All current datatypes re-generate MetadataFile objects when setting metadata,
# so this would ultimately get overwritten anyway.
new_value = galaxy.model.MetadataFile(dataset=target_context.parent, name=self.spec.name)
session.add(new_value)
try:
shutil.copy(value.file_name, new_value.file_name)
except AssertionError:
session(target_context.parent).flush()
shutil.copy(value.file_name, new_value.file_name)
return self.unwrap(new_value)
return None
@classmethod
def marshal(cls, value):
if isinstance(value, galaxy.model.MetadataFile):
# We want to push value.id to the database, but need to skip this when no session is available,
# as in extended_metadata mode, so there we just accept MetadataFile.
# We will only serialize MetadataFile in this mode and not push to the database, so this is OK.
value = value.id or value
if not isinstance(value, int) and object_session(value):
value = str(value.uuid)
return value
def from_external_value(self, value, parent, path_rewriter=None):
"""
Turns a value read from a external dict into its value to be pushed directly into the metadata dict.
"""
if MetadataTempFile.is_JSONified_value(value):
value = MetadataTempFile.from_JSON(value)
if isinstance(value, MetadataTempFile):
mf = parent.metadata.get(self.spec.name, None)
if mf is None:
mf = self.new_file(dataset=parent, **value.kwds)
# Ensure the metadata file gets updated with content
file_name = value.file_name
if path_rewriter:
# Job may have run with a different (non-local) tmp/working
# directory. Correct.
file_name = path_rewriter(file_name)
parent.dataset.object_store.update_from_file(mf,
file_name=file_name,
extra_dir='_metadata_files',
extra_dir_at_root=True,
alt_name=os.path.basename(mf.file_name))
os.unlink(file_name)
value = mf.id
return value
def to_external_value(self, value):
"""
Turns a value read from a metadata into its value to be pushed directly into the external dict.
"""
if isinstance(value, galaxy.model.MetadataFile):
value = value.id
elif isinstance(value, MetadataTempFile):
value = MetadataTempFile.to_JSON(value)
return value
def new_file(self, dataset=None, **kwds):
# If there is a place to store the file (i.e. an object_store has been bound to
# Dataset) then use a MetadataFile and assume it is accessible. Otherwise use
# a MetadataTempFile.
if getattr(dataset.dataset, "object_store", False):
mf = galaxy.model.MetadataFile(name=self.spec.name, dataset=dataset, **kwds)
sa_session = object_session(dataset)
if sa_session:
sa_session.add(mf)
sa_session.flush() # flush to assign id
return mf
else:
# we need to make a tmp file that is accessable to the head node,
# we will be copying its contents into the MetadataFile objects filename after restoring from JSON
# we do not include 'dataset' in the kwds passed, as from_JSON_value() will handle this for us
return MetadataTempFile(**kwds)
# This class is used when a database file connection is not available
class MetadataTempFile:
tmp_dir = 'database/tmp' # this should be overwritten as necessary in calling scripts
def __init__(self, **kwds):
self.kwds = kwds
self._filename = None
@property
def file_name(self):
if self._filename is None:
# we need to create a tmp file, accessable across all nodes/heads, save the name, and return it
self._filename = abspath(tempfile.NamedTemporaryFile(dir=self.tmp_dir, prefix="metadata_temp_file_").name)
open(self._filename, 'wb+') # create an empty file, so it can't be reused using tempfile
return self._filename
def to_JSON(self):
return {'__class__': self.__class__.__name__,
'filename': self.file_name,
'kwds': self.kwds}
@classmethod
def from_JSON(cls, json_dict):
# need to ensure our keywords are not unicode
rval = cls(**stringify_dictionary_keys(json_dict['kwds']))
rval._filename = json_dict['filename']
return rval
@classmethod
def is_JSONified_value(cls, value):
return (isinstance(value, dict) and value.get('__class__', None) == cls.__name__)
@classmethod
def cleanup_from_JSON_dict_filename(cls, filename):
try:
with open(filename) as fh:
for value in json.load(fh).values():
if cls.is_JSONified_value(value):
value = cls.from_JSON(value)
if isinstance(value, cls) and os.path.exists(value.file_name):
log.debug('Cleaning up abandoned MetadataTempFile file: %s', value.file_name)
os.unlink(value.file_name)
except Exception as e:
log.debug('Failed to cleanup MetadataTempFile temp files from %s: %s', filename, unicodify(e))
__all__ = (
"Statement",
"MetadataElement",
"MetadataCollection",
"MetadataSpecCollection",
"MetadataParameter",
"MetadataElementSpec",
"SelectParameter",
"DBKeyParameter",
"RangeParameter",
"ColumnParameter",
"ColumnTypesParameter",
"ListParameter",
"DictParameter",
"PythonObjectParameter",
"FileParameter",
"MetadataTempFile",
)
|
py | 1a3e43bfc71768043d20f0ba5df7bb836f3cff14 | #!/usr/bin/python
import os, sys
from django.core.management import execute_manager
sys.path.insert(0, os.path.abspath('./../../'))
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
|
py | 1a3e43f2c938656700d98ea9a304c121dcd31fde | #!/usr/bin/python
from ansible.module_utils.opsmanager import ansible_setup
if __name__ == '__main__':
module, opsmanager = ansible_setup()
group = opsmanager.get_group_by_name(module.params['cluster'])
response = opsmanager.delete_maintenance(group)
module.exit_json(changed=False, meta=response)
|
py | 1a3e450c89b842bc118c069cf5f2ecef94c435ab | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-15 06:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('todolist_rest', '0004_auto_20171215_1437'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='expire_date',
field=models.DateField(default=django.utils.timezone.now),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.