repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
CCallahanIV/data-structures | src/test_priorityq.py | 1 | 4519 | """This Module contains testing for the Priority Q."""
import pytest
TEST_SET = [
[(17, 1), (99, 2), (15, 1), (99, 3), (1, 2), (9, 3)]
]
BAD_PRIO = [True, False, [1, 2], (), {"oops": "This is bad"}, "No more, please!"]
BAD_INIT = [[(1, 2), (1, 2, 3)], True, False, "whoops"]
@pytest.fixture
def empty_priority_q():
"""Thie fixture creates and empty priority queue."""
from priorityq import PriorityQ
new_pq = PriorityQ()
return new_pq
@pytest.fixture
def filled_priority_q():
"""The fixture creates a filled priority queue."""
from priorityq import PriorityQ
new_pq = PriorityQ(TEST_SET[0])
return new_pq
def test_creation_of_empty_priority_q(empty_priority_q):
"""The creates an empty queue and tests the size."""
assert len(empty_priority_q) == 0
assert empty_priority_q._high_p is None
assert len(empty_priority_q._pdict) == 0
def test_initialize_with_single_tuple():
"""The test initializes priority q with a single tuple."""
from priorityq import PriorityQ
new_pq = PriorityQ((3, 2))
assert len(new_pq) == 1
assert new_pq._high_p == 2
assert new_pq.peek() == 3
def test_intitalize_with_single_digit():
"""The test initialized a prio q with a single digit."""
from priorityq import PriorityQ
with pytest.raises(TypeError):
PriorityQ(3)
# def test_intialize_with_bad_format_raises_type_error():
# """Test initializing with badly formatted arguments."""
# from priorityq import PriorityQ
# for item in BAD_INIT:
# with pytest.raises(TypeError):
# PriorityQ(item)
def test_insert_empty_with_val_and_no_prio(empty_priority_q):
"""The test inserts val w/o prio to empty list."""
empty_priority_q.insert(4)
assert empty_priority_q._high_p == 0
assert empty_priority_q._pdict[0].peek() == 4
def test_insert_filled_with_val_and_prio_where_prio_not_already_there(filled_priority_q):
"""The test inserts with val and prio, where prio not already there."""
filled_priority_q.insert(7, 4)
assert filled_priority_q._pdict[4].peek() == 7
def insert_val_into_empty_priorty_q(empty_priority_q):
"""The tests inserting into an empty priority queue."""
new_prq = empty_priority_q
new_prq.insert(3, 1)
assert len(empty_priority_q) == 1
assert empty_priority_q._high_p[0] == 3
assert empty_priority_q._pdict[0] == 3
def test_insert_into_full_prio_already_there(filled_priority_q):
"""Test inserting into a filled priority q, with priority already present."""
old_len = len(filled_priority_q)
filled_priority_q.insert("something", 1)
assert len(filled_priority_q) == old_len + 1
assert filled_priority_q.peek() == 17
def test_insert_into_full_with_an_iterable(filled_priority_q):
"""Test attempting to insert into a priority q with an iterable."""
with pytest.raises(TypeError):
filled_priority_q.insert([1, 2, 3])
def test_insert_weird_cases_for_priority(empty_priority_q):
"""Test that priorities can only be int."""
for item in BAD_PRIO:
with pytest.raises(TypeError):
empty_priority_q.insert("anything", item)
def pop_filled_priorty_q(filled_priority_q):
"""The tests inserting into a filled priority queue."""
new_fprq = filled_priority_q
val = new_fprq.pop()
assert len(empty_priority_q) == 5
assert filled_priority_q._high_p[0] == 1
assert val == 17
assert filled_priority_q.peek() == 15
def test_pop_on_empty_priority_q(empty_priority_q):
"""Test popping on an empty priority q."""
with pytest.raises(IndexError):
empty_priority_q.pop()
def test_pop_on_filled_until_empty(filled_priority_q):
"""Test pop on filled Priority Q until empty."""
expected = [17, 15, 99, 1, 99, 9]
for i in range(len(filled_priority_q)):
assert filled_priority_q.pop() == expected[i]
assert len(filled_priority_q) == 0
assert filled_priority_q._high_p is None
def test_peek_on_empty(empty_priority_q):
"""Test peek() on an empty priority Q, should return None."""
assert empty_priority_q.peek() is None
def test_peek_on_filled(filled_priority_q):
"""Test peek() on a filled priorityq."""
assert filled_priority_q.peek() == 17
def test_len_on_filled(filled_priority_q):
"""Test len method on full PQ."""
assert len(filled_priority_q) == len(TEST_SET[0])
def test_len_on_empty(empty_priority_q):
"""Test len method on empty PQ."""
assert len(empty_priority_q) == 0
| mit | -5,064,511,433,917,170,000 | 30.381944 | 89 | 0.658553 | false |
platformio/platformio-core | platformio/commands/update.py | 1 | 2091 | # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from platformio.cache import cleanup_content_cache
from platformio.commands.lib.command import CTX_META_STORAGE_DIRS_KEY
from platformio.commands.lib.command import lib_update as cmd_lib_update
from platformio.commands.platform import platform_update as cmd_platform_update
from platformio.package.manager.core import update_core_packages
from platformio.package.manager.library import LibraryPackageManager
@click.command(
"update", short_help="Update installed platforms, packages and libraries"
)
@click.option("--core-packages", is_flag=True, help="Update only the core packages")
@click.option(
"-c",
"--only-check",
is_flag=True,
help="DEPRECATED. Please use `--dry-run` instead",
)
@click.option(
"--dry-run", is_flag=True, help="Do not update, only check for the new versions"
)
@click.pass_context
def cli(ctx, core_packages, only_check, dry_run):
# cleanup lib search results, cached board and platform lists
cleanup_content_cache("http")
only_check = dry_run or only_check
update_core_packages(only_check)
if core_packages:
return
click.echo()
click.echo("Platform Manager")
click.echo("================")
ctx.invoke(cmd_platform_update, only_check=only_check)
click.echo()
click.echo("Library Manager")
click.echo("===============")
ctx.meta[CTX_META_STORAGE_DIRS_KEY] = [LibraryPackageManager().package_dir]
ctx.invoke(cmd_lib_update, only_check=only_check)
| apache-2.0 | -3,055,954,892,819,605,500 | 34.440678 | 84 | 0.726925 | false |
fridayy/movie-trailer-uc | main/fresh_tomatoes.py | 1 | 6763 | import webbrowser
import os
import re
# Styles and scripting for the page
main_page_head = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Fresh Tomatoes!</title>
<!-- Bootstrap 3 -->
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap.min.css">
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap-theme.min.css">
<script src="http://code.jquery.com/jquery-1.10.1.min.js"></script>
<script src="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/js/bootstrap.min.js"></script>
<link href='https://fonts.googleapis.com/css?family=Droid+Sans:400,700' rel='stylesheet' type='text/css'>
<style type="text/css" media="screen">
body {
padding-top: 80px;
font-family: 'Droid Sans', sans-serif;
}
#trailer .modal-dialog {
margin-top: 200px;
width: 640px;
height: 480px;
}
.hanging-close {
position: absolute;
top: -12px;
right: -12px;
z-index: 9001;
}
#trailer-video {
width: 100%;
height: 100%;
}
.movie-tile {
margin-bottom: 20px;
padding-top: 20px;
cursor: default;
}
.movie-tile:hover {
background-color: #EEE;
cursor: pointer;
}
.scale-media {
padding-bottom: 56.25%;
position: relative;
}
.scale-media iframe {
border: none;
height: 100%;
position: absolute;
width: 100%;
left: 0;
top: 0;
background-color: white;
}
.panel-heading {
background: #212121 !important;
text-align: left;
}
.panel-body {
text-align: left;
}
.panel-title {
color: white;
}
</style>
<script type="text/javascript" charset="utf-8">
//initialize tooltips
$(function () {
$('[data-toggle="tooltip"]').tooltip()
})
// Pause the video when the modal is closed
$(document).on('click', '.hanging-close, .modal-backdrop, .modal', function (event) {
// Remove the src so the player itself gets removed, as this is the only
// reliable way to ensure the video stops playing in IE
$("#trailer-video-container").empty();
});
// Start playing the video whenever the trailer modal is opened
$(document).on('click', '.movie-tile', function (event) {
var trailerYouTubeId = $(this).attr('data-trailer-youtube-id')
var sourceUrl = 'http://www.youtube.com/embed/' + trailerYouTubeId + '?autoplay=1&html5=1';
$("#trailer-video-container").empty().append($("<iframe></iframe>", {
'id': 'trailer-video',
'type': 'text-html',
'src': sourceUrl,
'frameborder': 0
}));
});
// Animate in the movies when the page loads
$(document).ready(function () {
$('.movie-tile').hide().first().show("fast", function showNext() {
$(this).next("div").show("fast", showNext);
});
});
</script>
</head>
'''
# The main page layout and title bar
main_page_content = '''
<body>
<!-- Trailer Video Modal -->
<div class="modal" id="trailer">
<div class="modal-dialog">
<div class="modal-content">
<a href="#" class="hanging-close" data-dismiss="modal" aria-hidden="true">
<img src="https://lh5.ggpht.com/v4-628SilF0HtHuHdu5EzxD7WRqOrrTIDi_MhEG6_qkNtUK5Wg7KPkofp_VJoF7RS2LhxwEFCO1ICHZlc-o_=s0#w=24&h=24"/>
</a>
<div class="scale-media" id="trailer-video-container">
</div>
</div>
</div>
</div>
<!-- Main Page Content -->
<div class="container">
<div class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container">
<div class="navbar-header">
<a class="navbar-brand" href="#">Fresh Tomatoes Movie Trailers</a>
</div>
</div>
</div>
</div>
<div class="container">
{movie_tiles}
</div>
</body>
</html>
'''
# A single movie entry html template
movie_tile_content = '''
<div class="col-md-6 col-lg-4 movie-tile text-center" data-trailer-youtube-id="{trailer_youtube_id}" data-toggle="modal" data-target="#trailer">
<img src="{poster_image_url}" width="220" height="342">
<h2 data-toggle="tooltip" data-placement="right" title="Click to open trailer...">{movie_title}</h2>
<div class="panel panel-default">
<div class="panel-heading">
<h3 class="panel-title">Information</h3>
</div>
<div class="panel-body">
<p>Release Date: {movie_year}</p>
<p>Runtime: {movie_runtime}</p>
<p>Country: {movie_country}</p>
<p>Actors: {movie_actors}</p>
<p>{movie_plot}</p>
</div>
</div>
</div>
'''
def create_movie_tiles_content(movies):
# The HTML content for this section of the page
content = ''
for movie in movies:
# Extract the youtube ID from the url
youtube_id_match = re.search(
r'(?<=v=)[^&#]+', movie.trailer_youtube_url)
youtube_id_match = youtube_id_match or re.search(
r'(?<=be/)[^&#]+', movie.trailer_youtube_url)
trailer_youtube_id = (youtube_id_match.group(0) if youtube_id_match
else None)
# Append the tile for the movie with its content filled in
content += movie_tile_content.format(
movie_title=movie.title,
movie_year=movie.year,
movie_runtime=movie.runtime,
movie_country=movie.country,
movie_actors=movie.actors,
movie_genre=movie.genre,
movie_plot=movie.plot,
poster_image_url=movie.poster_image_url,
trailer_youtube_id=trailer_youtube_id
)
return content
def open_movies_page(movies):
# Create or overwrite the output file
output_file = open('fresh_tomatoes.html', 'w')
# Replace the movie tiles placeholder generated content
rendered_content = main_page_content.format(
movie_tiles=create_movie_tiles_content(movies))
# Output the file
output_file.write(main_page_head + rendered_content)
output_file.close()
# open the output file in the browser (in a new tab, if possible)
url = os.path.abspath(output_file.name)
webbrowser.open('file://' + url, new=2)
| gpl-3.0 | -8,687,864,254,606,987,000 | 32.815 | 144 | 0.554488 | false |
Jolopy/GimpHub | app/Gimp_Plugins/GimpHub.py | 1 | 11216 | #!/usr/bin/env python
from socketIO_client import SocketIO, BaseNamespace
from gimpfu import *
import os
import time
from threading import Thread
from array import array
import requests
import configparser
import websocket
import _thread
import http.client
import numpy as np
class GimpHubImage(object):
def __init__(self, drawable):
self.currentImage = self.get_pix()
self.drawable = drawable
self.update_suspended = False
def set_pix(self, x, y, r, g, b):
pdb.gimp_drawable_set_pixel(self.drawable, y, x, 3, [r, g, b])
def split_img_evenly(self, n):
activeImage, layer, tm, tn = self._get_active_image()
vertical = layer.height / n
srcRgn = layer.get_pixel_rgn(0, 0, layer.width, layer.height,
False, False)
# not done
def get_pix(self):
activeImage, layer, tm, tn = self._get_active_image()
srcRgn = layer.get_pixel_rgn(0, 0, layer.width, layer.height,
False, False)
src_pixels = array("B", srcRgn[0:layer.width, 0:layer.height])
imageArr = []
index = 0
for x in range(layer.width):
row = []
for y in range(layer.height):
row.append(src_pixels[index:index+3])
index += 3
imageArr.append(row)
print(src_pixels)
return imageArr
def get_changes(self):
activeImage, layer, tm, tn = self._get_active_image()
changes = []
srcRgn = layer.get_pixel_rgn(0, 0, layer.width, layer.height,
False, False)
src_pixels = array("B", srcRgn[0:layer.width, 0:layer.height])
verificationArray = []
changes = []
outerIndex = 0
print("---------------------------------------------------")
while True:
if outerIndex % 2 == 0:
changes = []
workingArr = changes
else:
verificationArray = []
workingArr = verificationArray
index = 0
for x in range(layer.width):
#row = []
for y in range(layer.height):
#row.append(src_pixels[index:index + 3])
# Save the value in the channel layers.
# print "(%s, %s) : (%r, %r, %r)" % (x, y, pixelR, pixelG, pixelB)
if self.currentImage[x][y] != src_pixels[index:index + 3]:
workingArr.append((x, y, src_pixels[index],
src_pixels[index+1],
src_pixels[index+2]))
index += 3
outerIndex += 1
if changes == verificationArray:
for change in changes:
self.currentImage[change[0]][change[1]] = array('B', change[2:5])
break
time.sleep(2)
return changes
def _get_active_image(self):
activeImage = gimp.image_list()[0]
layer = pdb.gimp_image_get_active_layer(activeImage)
# Calculate the number of tiles.
tn = int(layer.width / 64)
if (layer.width % 64 > 0):
tn += 1
tm = int(layer.height / 64)
if (layer.height % 64 > 0):
tm += 1
return activeImage, layer, tm, tn
def _get_img_pixels(self):
activeImage, layer, tm, tn = self._get_active_image()
imageArr = []
# Iterate over the tiles.
for i in range(tn):
for j in range(tm):
# Get the tiles.
tile = layer.get_tile(False, j, i)
# Iterate over the pixels of each tile.
for x in range(tile.ewidth):
row = []
for y in range(tile.eheight):
# Get the pixel and separate his colors.
pixel = tile[x, y]
pixelR = pixel[0] + "\x00\x00"
pixelG = "\x00" + pixel[1] + "\x00"
pixelB = "\x00\x00" + pixel[2]
# If the image has an alpha channel (or any other channel) copy his values.
if (len(pixel) > 3):
for k in range(len(pixel) - 3):
pixelR += pixel[k + 3]
pixelG += pixel[k + 3]
pixelB += pixel[k + 3]
# Save the value in the channel layers.
#print "(%s, %s) : (%r, %r, %r)" % (x, y, pixelR, pixelG, pixelB)
row.append([pixelR, pixelG, pixelB])
imageArr.append(row)
#print imageArr
return imageArr
class ChatNamespace(BaseNamespace):
def on_aaa_response(self, *args):
print('on_aaa_response', args)
class GimpHubLive(object):
def __init__(self, drawable, user):
#config = ConfigParser.ConfigParser()
#config.readfp(open(os.path.join(os.path.realpath(__file__), 'gimphub.ini')))
self.drawable = drawable
self.project = 'test2'
#self.user = '[email protected]'
self.user = user
#self.remote_server = "gimphub.duckdns.org"
self.remote_server = 'localhost'
self.remote_port = '5000'
self.lockfile_path = '/tmp/GHLIVE_LOCK_%s' % self.user
if os.path.exists(self.lockfile_path):
os.remove(self.lockfile_path)
#websocket.enableTrace(True)
self.running = True
self.socketIO = SocketIO(self.remote_server, self.remote_port)
self.socketIO.emit('connect')
self.chatNamespace = self.socketIO.define(ChatNamespace, '/chat')
self.chatNamespace.on('imgupdate', self.on_update)
self.chatNamespace.on('joined', self.on_joined)
self.chatNamespace.on('echo2', self.on_echo)
self.chatNamespace.emit('joined', self.user, self.project)
self.chatNamespace.emit('connect')
Thread(target=self.run_th).start()
time.sleep(2)
self.chatNamespace.emit('echo')
#
def on_update(self, obj):
print("UPDATE")
print(obj['user'] != self.user)
if obj['user'] != self.user and hasattr(self, 'GHIMG'):
self.GHIMG.update_suspended = True
for px in obj['update']:
#print px
self.GHIMG.set_pix(px[0], px[1], px[2], px[3], px[4])
pdb.gimp_drawable_update(self.drawable, 0, 0, self.drawable.width, self.drawable.height)
pdb.gimp_displays_flush()
self.GHIMG.update_suspended = False
def on_echo(self, *args):
print("ECHO")
def on_joined(self, *args):
print("JOINED")
print(args)
def run_th(self):
while True:
self.socketIO.wait(seconds=10)
if self.running is False:
print("SOCKETIO DISCONNECT")
self.socketIO.disconnect()
break
#
# def run_ws_th(self):
# self.ws.run_forever()
def send_update(self, update):
self.chatNamespace.emit('imgpush', update, self.project, self.user)
#
# url = "http://%s:%s/imgupdate" % (self.remote_server, self.remote_port)
# data = {'update': [list(x) for x in update], 'user':self.user, 'project':self.project}
# print data
# r = requests.post(url, data=data)
# print r
#
# def ws_on_message(self, ws, message):
# print message
#
# def ws_on_error(self, ws, error):
# print error
#
# def ws_on_close(self, ws):
# print "### closed ###"
#
# def ws_on_open(self, ws):
# def run(*args):
# ws.send("joined", self.user, self.project)
# while True:
# time.sleep(1)
# if self.running is False:
# ws.close()
# print "thread terminated"
# return None
# # ws.send("Hello %d" % i)
# thread.start_new_thread(run, ())
def __del__(self):
try:
if os.path.exists(self.lockfile_path):
os.remove(self.lockfile_path)
except:
pass
def start_live(self):
self.GHIMG = GimpHubImage(self.drawable)
while True:
if os.path.exists(self.lockfile_path):
print("CLIENT PROCESS ENDED")
self.running = False
os.remove(self.lockfile_path)
break
time.sleep(4)
update = self.GHIMG.get_changes()
print(len(update))
if update:
try:
self.send_update(update)
except Exception as e:
print("Can not POST to server! : %s " % str(e))
def gimphub_live_DEV(img, drawable):
t1 = GimpHubImage(drawable)
for i in range(5):
time.sleep(1)
print(t1.get_pix())
def gimphub_live(img, drawable):
imgProc = GimpHubLive(drawable, "user1")
imgProc.start_live()
def gimphub_live_2(img, drawable):
imgProc = GimpHubLive(drawable, "user2")
imgProc.start_live()
def gimphub_live_end(img, drawable):
lockfile_path = '/tmp/GHLIVE_LOCK_%s' % "user1"
if os.path.exists(lockfile_path):
print("Already shutting down!")
return None
with open(lockfile_path, 'w'):
pass
def gimphub_live_end_2(img, drawable):
lockfile_path = '/tmp/GHLIVE_LOCK_%s' % "user2"
if os.path.exists(lockfile_path):
print("Already shutting down!")
return None
with open(lockfile_path, 'w'):
pass
def gimphub_test_px(img, drawable):
g = GimpHubImage(drawable)
g.set_pix(2, 2, 100, 100, 100)
register("gimphub-livestart_DEV", "", "", "", "", "",
"<Image>/Image/DEV", "RGB, RGB*",
[
# (PF_STRING, "arg0", "argument 0", "test string"),
],
[],
gimphub_live_DEV
)
# register("gimphub-livestart", "", "", "", "", "",
# "<Image>/Image/Activate Gimphub", "RGB, RGB*",
# [
# # (PF_STRING, "arg0", "argument 0", "test string"),
# ],
# [],
# gimphub_live
# )
#
# register("gimphub-livestart2", "", "", "", "", "",
# "<Image>/Image/Activate Gimphub (2)", "RGB, RGB*",
# [
# # (PF_STRING, "arg0", "argument 0", "test string"),
# ],
# [],
# gimphub_live_2
# )
#
# register("gimphub-liveend", "", "", "", "", "",
# "<Image>/Image/End Gimphub", "RGB, RGB*",
# [
# # (PF_STRING, "arg0", "argument 0", "test string"),
# ],
# [],
# gimphub_live_end
# )
#
# register("gimphub-liveend2", "", "", "", "", "",
# "<Image>/Image/End Gimphub (2)", "RGB, RGB*",
# [
# # (PF_STRING, "arg0", "argument 0", "test string"),
# ],
# [],
# gimphub_live_end_2
# )
#
# register("gimphub-asf", "", "", "", "", "",
# "<Image>/Image/GH TEST", "RGB, RGB*",
# [
# # (PF_STRING, "arg0", "argument 0", "test string"),
# ],
# [],
# gimphub_test_px
# )
main()
| gpl-2.0 | 6,784,750,085,857,365,000 | 27.612245 | 100 | 0.502675 | false |
trondth/master | masters_project_config.py | 1 | 1060 | import ConfigParser
import os.path
PROJECTDIR = os.path.dirname(os.path.abspath(__file__))
if os.path.isfile(PROJECTDIR + '/masters_project.cfg'):
CONFIGFILE = PROJECTDIR + '/masters_project.cfg'
else:
CONFIGFILE = PROJECTDIR + '/masters_project-default.cfg'
#print CONFIGFILE
config = ConfigParser.ConfigParser()
config.read(CONFIGFILE)
DATA_PREFIX = config.get('general', 'DATA_PREFIX')
def getdoclist(filename):
f = open(filename)
l = f.read().splitlines()
f.close()
return l
DOCLIST_TRAINSET = getdoclist(DATA_PREFIX + config.get('doclist', 'PATH') +
'/' + config.get('doclist', 'TRAINSET'))
DOCLIST_TESTSET = getdoclist(DATA_PREFIX + config.get('doclist', 'PATH') + '/' + config.get('doclist', 'TESTSET'))
try:
DOCLIST_DEVTRAINSET = getdoclist(DATA_PREFIX + config.get('doclist', 'DEVPATH') + '/' + config.get('doclist', 'DEVTRAINSET'))
DOCLIST_DEVTESTSET = getdoclist(DATA_PREFIX + config.get('doclist', 'DEVPATH') + '/' + config.get('doclist', 'DEVTESTSET'))
except:
print "feil"
| mit | -3,513,187,572,238,241,000 | 33.193548 | 129 | 0.667925 | false |
protojour/aux | aux/internals/__init__.py | 1 | 1574 | from aux import working_dir
import os
import sys
def no_plugin_name_message():
print "No plugin name"
sys.exit(0)
def plugin_creator_routine(plugincreator, arguments):
# print plugincreator, arguments
plugin_home_directory = working_dir()
if 'service' in plugincreator:
if len(arguments) > 0:
packagename = "_".join(['aux', 'service', arguments[0]])
os.system('paster create -t basic_package -o %s --no-interactive %s' % (plugin_home_directory,
packagename) )
else:
no_plugin_name_message()
elif 'device' in plugincreator:
if len(arguments) > 0:
packagename = "_".join(['aux', 'device', arguments[0]])
os.system('paster create -t basic_package -o %s --no-interactive %s' % (plugin_home_directory,
packagename))
else:
no_plugin_name_message()
elif 'protocol' in plugincreator:
if len(arguments) > 0:
packagename = "_".join(['aux', 'protocol', arguments[0]])
os.system('paster create -t basic_package -o %s --no-interactive %s' % (plugin_home_directory,
packagename))
else:
no_plugin_name_message()
print 'Install plugin by running:\npip install -e %s' % packagename
sys.exit(0)
| bsd-3-clause | -8,956,666,608,450,789,000 | 41.540541 | 106 | 0.494917 | false |
shiyifuchen/PyFem | pyfem/pyfem/fem/Assembly.py | 1 | 4289 | # -*- coding: utf-8 -*-
from numpy import zeros, ones, ix_
from pyfem.util.dataStructures import Properties
from pyfem.util.dataStructures import elementData
from scipy.sparse import lil_matrix
import time
#######################################
# General array assembly routine for: #
# * assembleInternalForce #
# * assembleTangentStiffness #
#######################################
def assembleArray ( props, globdat, rank, action ):
t0=time.time()
#Initialize the global array A with rank 2
A = lil_matrix((len(globdat.dofs),len(globdat.dofs)))
B = zeros( len(globdat.dofs) * ones(1,dtype=int) )
globdat.resetNodalOutput()
outlabel=[]
if hasattr(props,'outlabel'):
outlabel = getattr(props,'outlabel')
#Loop over the element groups
for elementGroup in globdat.elements.iterGroupNames():
#Get the properties corresponding to the elementGroup
el_props = getattr( props, elementGroup )
#Loop over the elements in the elementGroup
for element in globdat.elements.iterElementGroup( elementGroup ):
#Get the element nodes
el_nodes = element.getNodes()
#Get the element coordinates
el_coords = globdat.nodes.getNodeCoords( el_nodes )
#Get the element degrees of freedom
el_dofs = globdat.dofs.get( el_nodes )
#Get the element state
el_a = globdat.state [el_dofs].copy()
el_Da = globdat.Dstate[el_dofs].copy()
factor1 = 1.0
factor2 = 1.0
if elementGroup in props.kill:
el_a = zeros(el_a.shape)
el_Da = zeros(el_Da.shape)
factor1 = 0.0
factor2 = 1e-6
if hasattr(element,"mat"):
element.mat.clearHistory()
# if elementGroup == 'Elem1':
# el_a = zeros(el_a.shape)
# el_Da = zeros(el_Da.shape)
#Create the an element state to pass through to the element
#el_state = Properties( { 'state' : el_a, 'Dstate' : el_Da } )
elemdat = elementData( el_a , el_Da )
elemdat.coords = el_coords
elemdat.nodes = el_nodes
elemdat.props = el_props
elemdat.outlabel = outlabel
if hasattr( element , "matProps" ):
elemdat.matprops = element.matProps
if hasattr( element , "mat" ):
element.mat.reset()
#Get the element contribution by calling the specified action
getattr( element, action )( elemdat )
# for label in elemdat.outlabel:
# element.appendNodalOutput( label , globdat , elemdat.outdata )
if rank == 0:
if elementGroup in props.kill:
continue
for i,label in enumerate(elemdat.outlabel):
element.appendNodalOutput( label , globdat , elemdat.outdata[i] )
elif rank == 1:
B[el_dofs] += elemdat.fint*factor1
elif rank == 2 and action is "getTangentStiffness":
A[ix_(el_dofs,el_dofs)] += elemdat.stiff*factor2
B[el_dofs] += elemdat.fint*factor1
elif rank == 2 and action is "getMassMatrix":
A[ix_(el_dofs,el_dofs)] += elemdat.mass*factor1
B[el_dofs] += elemdat.lumped*factor1
else:
raise NotImplementedError('assemleArray is only implemented for vectors and matrices.')
# A=A.tocsr()
t1=time.time()
print "Time Elapse for Assembly: ",t1-t0
if rank == 1:
return B
elif rank == 2:
return A.tocsr(),B
##########################################
# Internal force vector assembly routine #
##########################################
def assembleInternalForce ( props, globdat ):
return assembleArray( props, globdat, rank = 1, action = 'getInternalForce' )
#############################################
# Tangent stiffness matrix assembly routine #
#############################################
def assembleTangentStiffness ( props, globdat ):
return assembleArray( props, globdat, rank = 2, action = 'getTangentStiffness' )
#############################################
# Mass matrix assembly routine #
#############################################
def assembleMassMatrix ( props, globdat ):
return assembleArray( props, globdat, rank = 2, action = 'getMassMatrix' )
def assembleOutputData ( props, globdat ):
return assembleArray( props, globdat, rank = 0, action = 'getOutputData' ) | gpl-3.0 | 7,672,155,459,087,517,000 | 31.5 | 95 | 0.592213 | false |
alanamarzoev/ray | python/ray/rllib/ppo/ppo.py | 1 | 10442 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import pickle
import tensorflow as tf
from tensorflow.python import debug as tf_debug
import ray
from ray.rllib.common import Agent, TrainingResult
from ray.rllib.ppo.runner import Runner, RemoteRunner
from ray.rllib.ppo.rollout import collect_samples
from ray.rllib.ppo.utils import shuffle
DEFAULT_CONFIG = {
# Discount factor of the MDP
"gamma": 0.995,
# Number of steps after which the rollout gets cut
"horizon": 2000,
# If true, use the Generalized Advantage Estimator (GAE)
# with a value function, see https://arxiv.org/pdf/1506.02438.pdf.
"use_gae": True,
# GAE(lambda) parameter
"lambda": 1.0,
# Initial coefficient for KL divergence
"kl_coeff": 0.2,
# Number of SGD iterations in each outer loop
"num_sgd_iter": 30,
# Stepsize of SGD
"sgd_stepsize": 5e-5,
# TODO(pcm): Expose the choice between gpus and cpus
# as a command line argument.
"devices": ["/cpu:%d" % i for i in range(4)],
"tf_session_args": {
"device_count": {"CPU": 4},
"log_device_placement": False,
"allow_soft_placement": True,
},
# Batch size for policy evaluations for rollouts
"rollout_batchsize": 1,
# Total SGD batch size across all devices for SGD
"sgd_batchsize": 128,
# Coefficient of the value function loss
"vf_loss_coeff": 1.0,
# Coefficient of the entropy regularizer
"entropy_coeff": 0.0,
# PPO clip parameter
"clip_param": 0.3,
# Target value for KL divergence
"kl_target": 0.01,
# Config params to pass to the model
"model": {"free_log_std": False},
# If >1, adds frameskip
"extra_frameskip": 1,
# Number of timesteps collected in each outer loop
"timesteps_per_batch": 40000,
# Each tasks performs rollouts until at least this
# number of steps is obtained
"min_steps_per_task": 1000,
# Number of actors used to collect the rollouts
"num_workers": 5,
# Dump TensorFlow timeline after this many SGD minibatches
"full_trace_nth_sgd_batch": -1,
# Whether to profile data loading
"full_trace_data_load": False,
# Outer loop iteration index when we drop into the TensorFlow debugger
"tf_debug_iteration": -1,
# If this is True, the TensorFlow debugger is invoked if an Inf or NaN
# is detected
"tf_debug_inf_or_nan": False,
# If True, we write tensorflow logs and checkpoints
"write_logs": True
}
class PPOAgent(Agent):
def __init__(self, env_name, config, upload_dir=None):
config.update({"alg": "PPO"})
Agent.__init__(self, env_name, config, upload_dir=upload_dir)
with tf.Graph().as_default():
self._init()
def _init(self):
self.global_step = 0
self.j = 0
self.kl_coeff = self.config["kl_coeff"]
self.model = Runner(self.env_name, 1, self.config, self.logdir, False)
self.agents = [
RemoteRunner.remote(
self.env_name, 1, self.config, self.logdir, True)
for _ in range(self.config["num_workers"])]
self.start_time = time.time()
if self.config["write_logs"]:
self.file_writer = tf.summary.FileWriter(
self.logdir, self.model.sess.graph)
else:
self.file_writer = None
self.saver = tf.train.Saver(max_to_keep=None)
def train(self):
agents = self.agents
config = self.config
model = self.model
j = self.j
self.j += 1
print("===> iteration", self.j)
iter_start = time.time()
weights = ray.put(model.get_weights())
[a.load_weights.remote(weights) for a in agents]
trajectory, total_reward, traj_len_mean = collect_samples(
agents, config)
print("total reward is ", total_reward)
print("trajectory length mean is ", traj_len_mean)
print("timesteps:", trajectory["dones"].shape[0])
if self.file_writer:
traj_stats = tf.Summary(value=[
tf.Summary.Value(
tag="ppo/rollouts/mean_reward",
simple_value=total_reward),
tf.Summary.Value(
tag="ppo/rollouts/traj_len_mean",
simple_value=traj_len_mean)])
self.file_writer.add_summary(traj_stats, self.global_step)
self.global_step += 1
def standardized(value):
# Divide by the maximum of value.std() and 1e-4
# to guard against the case where all values are equal
return (value - value.mean()) / max(1e-4, value.std())
if config["use_gae"]:
trajectory["advantages"] = standardized(trajectory["advantages"])
else:
trajectory["returns"] = standardized(trajectory["returns"])
rollouts_end = time.time()
print("Computing policy (iterations=" + str(config["num_sgd_iter"]) +
", stepsize=" + str(config["sgd_stepsize"]) + "):")
names = [
"iter", "total loss", "policy loss", "vf loss", "kl", "entropy"]
print(("{:>15}" * len(names)).format(*names))
trajectory = shuffle(trajectory)
shuffle_end = time.time()
tuples_per_device = model.load_data(
trajectory, j == 0 and config["full_trace_data_load"])
load_end = time.time()
rollouts_time = rollouts_end - iter_start
shuffle_time = shuffle_end - rollouts_end
load_time = load_end - shuffle_end
sgd_time = 0
for i in range(config["num_sgd_iter"]):
sgd_start = time.time()
batch_index = 0
num_batches = (
int(tuples_per_device) // int(model.per_device_batch_size))
loss, policy_loss, vf_loss, kl, entropy = [], [], [], [], []
permutation = np.random.permutation(num_batches)
# Prepare to drop into the debugger
if j == config["tf_debug_iteration"]:
model.sess = tf_debug.LocalCLIDebugWrapperSession(model.sess)
while batch_index < num_batches:
full_trace = (
i == 0 and j == 0 and
batch_index == config["full_trace_nth_sgd_batch"])
batch_loss, batch_policy_loss, batch_vf_loss, batch_kl, \
batch_entropy = model.run_sgd_minibatch(
permutation[batch_index] * model.per_device_batch_size,
self.kl_coeff, full_trace,
self.file_writer)
loss.append(batch_loss)
policy_loss.append(batch_policy_loss)
vf_loss.append(batch_vf_loss)
kl.append(batch_kl)
entropy.append(batch_entropy)
batch_index += 1
loss = np.mean(loss)
policy_loss = np.mean(policy_loss)
vf_loss = np.mean(vf_loss)
kl = np.mean(kl)
entropy = np.mean(entropy)
sgd_end = time.time()
print(
"{:>15}{:15.5e}{:15.5e}{:15.5e}{:15.5e}{:15.5e}".format(
i, loss, policy_loss, vf_loss, kl, entropy))
values = []
if i == config["num_sgd_iter"] - 1:
metric_prefix = "ppo/sgd/final_iter/"
values.append(tf.Summary.Value(
tag=metric_prefix + "kl_coeff",
simple_value=self.kl_coeff))
values.extend([
tf.Summary.Value(
tag=metric_prefix + "mean_entropy",
simple_value=entropy),
tf.Summary.Value(
tag=metric_prefix + "mean_loss",
simple_value=loss),
tf.Summary.Value(
tag=metric_prefix + "mean_kl",
simple_value=kl)])
if self.file_writer:
sgd_stats = tf.Summary(value=values)
self.file_writer.add_summary(sgd_stats, self.global_step)
self.global_step += 1
sgd_time += sgd_end - sgd_start
if kl > 2.0 * config["kl_target"]:
self.kl_coeff *= 1.5
elif kl < 0.5 * config["kl_target"]:
self.kl_coeff *= 0.5
info = {
"kl_divergence": kl,
"kl_coefficient": self.kl_coeff,
"rollouts_time": rollouts_time,
"shuffle_time": shuffle_time,
"load_time": load_time,
"sgd_time": sgd_time,
"sample_throughput": len(trajectory["observations"]) / sgd_time
}
print("kl div:", kl)
print("kl coeff:", self.kl_coeff)
print("rollouts time:", rollouts_time)
print("shuffle time:", shuffle_time)
print("load time:", load_time)
print("sgd time:", sgd_time)
print("sgd examples/s:", len(trajectory["observations"]) / sgd_time)
print("total time so far:", time.time() - self.start_time)
result = TrainingResult(
self.experiment_id.hex, j, total_reward, traj_len_mean, info)
return result
def save(self):
checkpoint_path = self.saver.save(
self.model.sess,
os.path.join(self.logdir, "checkpoint"),
global_step=self.j)
agent_state = ray.get([a.save.remote() for a in self.agents])
extra_data = [
self.model.save(),
self.global_step,
self.j,
self.kl_coeff,
agent_state]
pickle.dump(extra_data, open(checkpoint_path + ".extra_data", "wb"))
return checkpoint_path
def restore(self, checkpoint_path):
self.saver.restore(self.model.sess, checkpoint_path)
extra_data = pickle.load(open(checkpoint_path + ".extra_data", "rb"))
self.model.restore(extra_data[0])
self.global_step = extra_data[1]
self.j = extra_data[2]
self.kl_coeff = extra_data[3]
ray.get([
a.restore.remote(o)
for (a, o) in zip(self.agents, extra_data[4])])
def compute_action(self, observation):
observation = self.model.observation_filter(observation)
return self.model.common_policy.compute([observation])[0][0]
| apache-2.0 | 5,208,887,426,928,940,000 | 37.249084 | 79 | 0.559088 | false |
Axelrod-Python/axelrod-evolver | tests/integration/test_fsm.py | 1 | 12018 | import unittest
import tempfile
import csv
import axelrod as axl
import axelrod_dojo as dojo
C, D = axl.Action.C, axl.Action.D
player_class=axl.EvolvableFSMPlayer
class TestFSMPopulation(unittest.TestCase):
temporary_file = tempfile.NamedTemporaryFile()
def test_score(self):
name = "score"
turns = 10
noise = 0
repetitions = 5
num_states = 2
opponents = [s() for s in axl.demo_strategies]
size = 10
objective = dojo.prepare_objective(name=name,
turns=turns,
noise=noise,
repetitions=repetitions)
population = dojo.Population(player_class=player_class,
params_kwargs={"num_states": num_states},
size=size,
objective=objective,
output_filename=self.temporary_file.name,
opponents=opponents,
bottleneck=2,
mutation_probability = .01,
processes=1)
generations = 4
axl.seed(0)
population.run(generations, print_output=False)
self.assertEqual(population.generation, 4)
# Manually read from tempo file to find best strategy
best_score, best_params = 0, None
with open(self.temporary_file.name, "r") as f:
reader = csv.reader(f)
for row in reader:
_, mean_score, sd_score, max_score, arg_max = row
if float(max_score) > best_score:
best_score = float(max_score)
best_params = arg_max
# Test the load params function
for num in range(1, 4 + 1):
best = dojo.load_params(player_class=player_class,
filename=self.temporary_file.name,
num=num)
self.assertEqual(len(best), num)
# Test that can use these loaded params in a new algorithm instance
population = dojo.Population(player_class=player_class,
params_kwargs={"num_states": num_states},
size=size,
objective=objective,
output_filename=self.temporary_file.name,
opponents=opponents,
population=best,
bottleneck=2,
mutation_probability = .01,
processes=1)
generations = 4
axl.seed(0)
population.run(generations, print_output=False)
self.assertEqual(population.generation, 4)
def test_score_with_weights(self):
name = "score"
turns = 10
noise = 0
repetitions = 5
num_states = 2
opponents = [s() for s in axl.demo_strategies]
size = 10
objective = dojo.prepare_objective(name=name,
turns=turns,
noise=noise,
repetitions=repetitions)
population = dojo.Population(player_class=player_class,
params_kwargs={"num_states": num_states},
size=size,
objective=objective,
output_filename=self.temporary_file.name,
opponents=opponents,
weights=[5, 1, 1, 1, 1],
bottleneck=2,
mutation_probability = .01,
processes=1)
generations = 4
axl.seed(0)
population.run(generations, print_output=False)
self.assertEqual(population.generation, 4)
# Manually read from tempo file to find best strategy
best_score, best_params = 0, None
with open(self.temporary_file.name, "r") as f:
reader = csv.reader(f)
for row in reader:
_, mean_score, sd_score, max_score, arg_max = row
if float(max_score) > best_score:
best_score = float(max_score)
best_params = arg_max
# Test the load params function
for num in range(1, 4 + 1):
best = dojo.load_params(player_class=player_class,
filename=self.temporary_file.name,
num=num)
self.assertEqual(len(best), num)
self.assertEqual(player_class.serialize_parameters(best[0]), best_params)
def test_score_with_sample_count(self):
name = "score"
turns = 10
noise = 0
repetitions = 5
num_states = 2
opponents = [s() for s in axl.demo_strategies]
size = 10
objective = dojo.prepare_objective(name=name,
turns=turns,
noise=noise,
repetitions=repetitions)
population = dojo.Population(player_class=player_class,
params_kwargs={"num_states": num_states},
size=size,
objective=objective,
output_filename=self.temporary_file.name,
opponents=opponents,
sample_count=2, # Randomly sample 2 opponents at each step
bottleneck=2,
mutation_probability = .01,
processes=1)
generations = 4
axl.seed(0)
population.run(generations, print_output=False)
self.assertEqual(population.generation, 4)
# Manually read from tempo file to find best strategy
best_score, best_params = 0, None
with open(self.temporary_file.name, "r") as f:
reader = csv.reader(f)
for row in reader:
_, mean_score, sd_score, max_score, arg_max = row
if float(max_score) > best_score:
best_score = float(max_score)
best_params = arg_max
# Test the load params function
for num in range(1, 4 + 1):
best = dojo.load_params(player_class=player_class,
filename=self.temporary_file.name,
num=num)
self.assertEqual(len(best), num)
self.assertEqual(len(best), num)
self.assertEqual(player_class.serialize_parameters(best[0]), best_params)
def test_score_with_sample_count_and_weights(self):
name = "score"
turns = 10
noise = 0
repetitions = 5
num_states = 2
opponents = [s() for s in axl.demo_strategies]
size = 10
objective = dojo.prepare_objective(name=name,
turns=turns,
noise=noise,
repetitions=repetitions)
population = dojo.Population(player_class=player_class,
params_kwargs={"num_states": num_states},
size=size,
objective=objective,
output_filename=self.temporary_file.name,
opponents=opponents,
sample_count=2, # Randomly sample 2 opponents at each step
weights=[5, 1, 1, 1, 1],
bottleneck=2,
mutation_probability = .01,
processes=1)
generations = 4
axl.seed(0)
population.run(generations, print_output=False)
self.assertEqual(population.generation, 4)
# Manually read from tempo file to find best strategy
best_score, best_params = 0, None
with open(self.temporary_file.name, "r") as f:
reader = csv.reader(f)
for row in reader:
_, mean_score, sd_score, max_score, arg_max = row
if float(max_score) > best_score:
best_score = float(max_score)
best_params = arg_max
# Test the load params function
for num in range(1, 4 + 1):
best = dojo.load_params(player_class=player_class,
filename=self.temporary_file.name,
num=num)
self.assertEqual(len(best), num)
self.assertEqual(player_class.serialize_parameters(best[0]), best_params)
def test_score_with_particular_players(self):
"""
These are players that are known to be difficult to pickle
"""
name = "score"
turns = 10
noise = 0
repetitions = 5
num_states = 2
opponents = [axl.ThueMorse(),
axl.MetaHunter(),
axl.BackStabber(),
axl.Alexei()]
size = 10
objective = dojo.prepare_objective(name=name,
turns=turns,
noise=noise,
repetitions=repetitions)
population = dojo.Population(player_class=player_class,
params_kwargs={"num_states": num_states},
size=size,
objective=objective,
output_filename=self.temporary_file.name,
opponents=opponents,
bottleneck=2,
mutation_probability = .01,
processes=0)
generations = 4
axl.seed(0)
population.run(generations, print_output=False)
self.assertEqual(population.generation, 4)
def test_population_init_with_given_rate(self):
name = "score"
turns = 10
noise = 0
repetitions = 5
num_states = 2
opponents = [s() for s in axl.demo_strategies]
size = 10
objective = dojo.prepare_objective(name=name,
turns=turns,
noise=noise,
repetitions=repetitions)
population = dojo.Population(player_class=player_class,
params_kwargs={"num_states": num_states,
"mutation_probability": .5},
size=size,
objective=objective,
output_filename=self.temporary_file.name,
opponents=opponents,
bottleneck=2,
mutation_probability = .01,
processes=1)
for p in population.population:
self.assertEqual(p.mutation_probability, .5)
generations = 1
axl.seed(0)
population.run(generations, print_output=False)
self.assertEqual(population.generation, 1)
| mit | -4,841,699,481,762,460,000 | 40.298969 | 96 | 0.451739 | false |
3dfxsoftware/cbss-addons | project_conf/model/project.py | 1 | 4367 | #
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import fields, osv
from openerp.tools.translate import _
class project_task(osv.osv):
_inherit = 'project.task'
def _message_get_auto_subscribe_fields(self, cr, uid, updated_fields, auto_follow_fields=['user_id'], context=None):
res = super(project_task, self)._message_get_auto_subscribe_fields(cr, uid, updated_fields, auto_follow_fields=auto_follow_fields, context=context)
res.append('project_leader_id')
return res
def send_mail_task_new_test(self, cr, uid, ids, context=None):
'''
Send mail automatically to change task to Backlog and to Testing Leader.
'''
context = context or {}
#Dont send context to dont get language of user in read method
if ids.get('stage_id'):
type = self.pool.get('project.task.type').read(cr, uid, ids['stage_id'][0], ['name'])
if type.get('name', False) == 'Backlog':
self.send_mail_task(cr,uid,ids,'template_send_email_task_new',context)
elif type.get('name', False) == 'Testing Leader':
self.send_mail_task(cr,uid,ids,'template_send_email_task_end',context)
def send_mail_task(self,cr,uid,ids,template,context=None):
imd_obj = self.pool.get('ir.model.data')
template_ids = imd_obj.search(
cr, uid, [('model', '=', 'email.template'), ('name', '=', template)])
if template_ids:
res_id = imd_obj.read(
cr, uid, template_ids, ['res_id'])[0]['res_id']
followers = self.read(cr, uid, ids.get('id'), [
'message_follower_ids'])['message_follower_ids']
ids = [ids.get('id')]
body_html = self.pool.get('email.template').read(
cr, uid, res_id, ['body_html']).get('body_html')
context.update({'default_template_id': res_id,
'default_body': body_html,
'default_use_template': True,
'default_composition_mode': 'comment',
'active_model': 'project.task',
'default_partner_ids': followers,
'mail_post_autofollow_partner_ids': followers,
'active_id': ids and type(ids) is list and
ids[0] or ids,
'active_ids': ids and type(ids) is list and
ids or [ids],
})
mail_obj = self.pool.get('mail.compose.message')
fields = mail_obj.fields_get(cr, uid)
mail_ids = mail_obj.default_get(
cr, uid, fields.keys(), context=context)
mail_ids.update(
{'model': 'project.task', 'body': body_html, 'composition_mode': 'mass_mail', 'partner_ids': [(6, 0, followers)]})
mail_ids = mail_obj.create(cr, uid, mail_ids, context=context)
mail_obj.send_mail(cr, uid, [mail_ids], context=context)
return False
_track = {'stage_id': {'project.mt_task_stage': send_mail_task_new_test, }}
_columns = {
'project_leader_id': fields.many2one('res.users','Project Leader',help="""Person responsible of task review, when is in Testing Leader state. The person should review: Work Summary, Branch and Make Functional Tests. When everything works this person should change task to done."""),
}
_defaults = {
'project_leader_id': lambda obj,cr,uid,context: uid,
}
| gpl-2.0 | -1,541,785,040,471,365,400 | 45.956989 | 294 | 0.580948 | false |
stephrdev/django-formwizard | formwizard/storage/cookie.py | 1 | 1712 | from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils import simplejson as json
from django.utils.hashcompat import sha_constructor
import hmac
from formwizard import storage
sha_hmac = sha_constructor
class CookieStorage(storage.BaseStorage):
encoder = json.JSONEncoder(separators=(',', ':'))
def __init__(self, *args, **kwargs):
super(CookieStorage, self).__init__(*args, **kwargs)
self.data = self.load_data()
if self.data is None:
self.init_data()
def unsign_cookie_data(self, data):
if data is None:
return None
bits = data.split('$', 1)
if len(bits) == 2:
if bits[0] == self.get_cookie_hash(bits[1]):
return bits[1]
raise SuspiciousOperation('FormWizard cookie manipulated')
def load_data(self):
data = self.request.COOKIES.get(self.prefix, None)
cookie_data = self.unsign_cookie_data(data)
if cookie_data is None:
return None
return json.loads(cookie_data, cls=json.JSONDecoder)
def update_response(self, response):
if self.data:
response.set_cookie(self.prefix, self.create_cookie_data(self.data))
else:
response.delete_cookie(self.prefix)
return response
def create_cookie_data(self, data):
encoded_data = self.encoder.encode(self.data)
cookie_data = '%s$%s' % (self.get_cookie_hash(encoded_data),
encoded_data)
return cookie_data
def get_cookie_hash(self, data):
return hmac.new('%s$%s' % (settings.SECRET_KEY, self.prefix),
data, sha_hmac).hexdigest()
| bsd-3-clause | -7,821,872,325,361,776,000 | 30.127273 | 80 | 0.625584 | false |
Parisson/TimeSide | timeside/server/tests/test_provider.py | 1 | 3126 | #! /usr/bin/env python
from timeside.server.tests.timeside_test_server import TimeSideTestServer
from rest_framework import status
from django.conf import settings
import os
class TestProvider(TimeSideTestServer):
""" test item creation with providers """
def setUp(self):
TimeSideTestServer.setUp(self)
# self.items_url = reverse('')
request_providers = self.client.get('/timeside/api/providers/', format='json')
for provider in request_providers.data:
if provider['pid'] == 'youtube':
self.youtube_uuid = provider['uuid']
if provider['pid'] == 'deezer_preview':
self.deezer_uuid = provider['uuid']
def testProviderYoutubeFromURI(self):
""" test item creation with youtube's MJ 'Beat It' track's URI """
params = {'title':'Beat It',
'description':'Music from Michael Jackson',
'external_uri':'https://www.youtube.com/watch?v=oRdxUFDoQe0',
'provider': '/timeside/api/providers/' + self.youtube_uuid + '/'
}
response = self.client.post('/timeside/api/items/', params, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.data = response.data
def testProviderYoutubeFromID(self):
""" test item creation with youtube's MJ 'Beat It' track's ID """
params = {'title':'Beat It',
'description':'Music from Michael Jackson',
'external_id':'oRdxUFDoQe0',
'provider': '/timeside/api/providers/' + self.youtube_uuid + '/'
}
response = self.client.post('/timeside/api/items/', params, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.data = response.data
def testProviderDeezerFromURI(self):
""" test item creation with Beatles' deezer's track's URI """
params = {'title':'Come Together',
'description':'Music from The Beatles',
'external_uri':'https://www.deezer.com/fr/track/116348452',
'provider': '/timeside/api/providers/' + self.deezer_uuid + '/'
}
response = self.client.post('/timeside/api/items/', params, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.data = response.data
def testProviderDeezerFromID(self):
""" test item creation with Beatles' deezer's track's ID """
params = {'title':'Come Together',
'description':'Music from The Beatles',
'external_id':'116348452',
'provider': '/timeside/api/providers/' + self.deezer_uuid + '/'
}
response = self.client.post('/timeside/api/items/', params, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.data = response.data
def tearDown(self):
if self.data['source_file']:
os.remove(self.data['source_file'].replace('http://testserver/media/',settings.MEDIA_ROOT)) | agpl-3.0 | 454,302,127,132,744,260 | 41.256757 | 103 | 0.601408 | false |
foursquare/pants | tests/python/pants_test/backend/python/tasks/test_select_interpreter.py | 1 | 7548 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from builtins import str
from textwrap import dedent
import mock
from pex.interpreter import PythonInterpreter
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.backend.python.subsystems.python_setup import PythonSetup
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.tasks.select_interpreter import SelectInterpreter
from pants.base.exceptions import TaskError
from pants.util.dirutil import chmod_plus_x, safe_mkdtemp
from pants_test.task_test_base import TaskTestBase
class SelectInterpreterTest(TaskTestBase):
@classmethod
def task_type(cls):
return SelectInterpreter
def setUp(self):
super(SelectInterpreterTest, self).setUp()
self.set_options(interpreter=['IronPython>=2.55'])
self.set_options_for_scope(PythonSetup.options_scope)
# We're tied tightly to pex implementation details here faking out a python binary that outputs
# only one value no matter what arguments, environment or input stream it has attached. That
# value is the interpreter identity which is - minimally, one line containing:
# <impl> <abi> <impl_version> <major> <minor> <patch>
def fake_interpreter(id_str):
interpreter_dir = safe_mkdtemp()
binary = os.path.join(interpreter_dir, 'binary')
with open(binary, 'w') as fp:
fp.write(dedent("""
#!{}
from __future__ import print_function
print({!r})
""".format(PythonInterpreter.get().binary, id_str)).strip())
chmod_plus_x(binary)
return PythonInterpreter.from_binary(binary)
# impl, abi, impl_version, major, minor, patch
self.fake_interpreters = [
fake_interpreter('ip ip2 2 2 77 777'),
fake_interpreter('ip ip2 2 2 88 888'),
fake_interpreter('ip ip2 2 2 99 999')
]
self.reqtgt = self.make_target(
spec='req',
target_type=PythonRequirementLibrary,
requirements=[],
)
self.tgt1 = self._fake_target('tgt1')
self.tgt2 = self._fake_target('tgt2', compatibility=['IronPython>2.77.777'])
self.tgt3 = self._fake_target('tgt3', compatibility=['IronPython>2.88.888'])
self.tgt4 = self._fake_target('tgt4', compatibility=['IronPython<2.99.999'])
self.tgt20 = self._fake_target('tgt20', dependencies=[self.tgt2])
self.tgt30 = self._fake_target('tgt30', dependencies=[self.tgt3])
self.tgt40 = self._fake_target('tgt40', dependencies=[self.tgt4])
def _fake_target(self, spec, compatibility=None, sources=None, dependencies=None):
return self.make_target(spec=spec, target_type=PythonLibrary, sources=sources or [],
dependencies=dependencies, compatibility=compatibility)
def _select_interpreter(self, target_roots, should_invalidate=None):
context = self.context(target_roots=target_roots)
task = self.create_task(context)
if should_invalidate is not None:
task._create_interpreter_path_file = mock.MagicMock(wraps=task._create_interpreter_path_file)
# Mock out the interpreter cache setup, so we don't actually look for real interpreters
# on the filesystem.
with mock.patch.object(PythonInterpreterCache, 'setup', autospec=True) as mock_resolve:
def se(me, *args, **kwargs):
me._interpreters = self.fake_interpreters
return self.fake_interpreters
mock_resolve.side_effect = se
task.execute()
if should_invalidate is not None:
if should_invalidate:
task._create_interpreter_path_file.assert_called_once()
else:
task._create_interpreter_path_file.assert_not_called()
return context.products.get_data(PythonInterpreter)
def _select_interpreter_and_get_version(self, target_roots, should_invalidate=None):
"""Return the version string of the interpreter selected for the target roots."""
interpreter = self._select_interpreter(target_roots, should_invalidate)
self.assertTrue(isinstance(interpreter, PythonInterpreter))
return interpreter.version_string
def test_interpreter_selection(self):
self.assertIsNone(self._select_interpreter([]))
self.assertEquals('IronPython-2.77.777', self._select_interpreter_and_get_version([self.reqtgt]))
self.assertEquals('IronPython-2.77.777', self._select_interpreter_and_get_version([self.tgt1]))
self.assertEquals('IronPython-2.88.888', self._select_interpreter_and_get_version([self.tgt2]))
self.assertEquals('IronPython-2.99.999', self._select_interpreter_and_get_version([self.tgt3]))
self.assertEquals('IronPython-2.77.777', self._select_interpreter_and_get_version([self.tgt4]))
self.assertEquals('IronPython-2.88.888', self._select_interpreter_and_get_version([self.tgt20]))
self.assertEquals('IronPython-2.99.999', self._select_interpreter_and_get_version([self.tgt30]))
self.assertEquals('IronPython-2.77.777', self._select_interpreter_and_get_version([self.tgt40]))
self.assertEquals('IronPython-2.99.999', self._select_interpreter_and_get_version([self.tgt2, self.tgt3]))
self.assertEquals('IronPython-2.88.888', self._select_interpreter_and_get_version([self.tgt2, self.tgt4]))
with self.assertRaises(TaskError) as cm:
self._select_interpreter_and_get_version([self.tgt3, self.tgt4])
self.assertIn('Unable to detect a suitable interpreter for compatibilities: '
'IronPython<2.99.999 && IronPython>2.88.888', str(cm.exception))
def test_interpreter_selection_invalidation(self):
tgta = self._fake_target('tgta', compatibility=['IronPython>2.77.777'],
dependencies=[self.tgt3])
self.assertEquals('IronPython-2.99.999',
self._select_interpreter_and_get_version([tgta], should_invalidate=True))
# A new target with different sources, but identical compatibility, shouldn't invalidate.
self.create_file('tgtb/foo/bar/baz.py', 'fake content')
tgtb = self._fake_target('tgtb', compatibility=['IronPython>2.77.777'],
dependencies=[self.tgt3], sources=['foo/bar/baz.py'])
self.assertEquals('IronPython-2.99.999',
self._select_interpreter_and_get_version([tgtb], should_invalidate=False))
def test_compatibility_AND(self):
tgt = self._fake_target('tgt5', compatibility=['IronPython>2.77.777,<2.99.999'])
self.assertEquals('IronPython-2.88.888', self._select_interpreter_and_get_version([tgt]))
def test_compatibility_AND_impossible(self):
tgt = self._fake_target('tgt5', compatibility=['IronPython>2.77.777,<2.88.888'])
with self.assertRaises(PythonInterpreterCache.UnsatisfiableInterpreterConstraintsError):
self._select_interpreter_and_get_version([tgt])
def test_compatibility_OR(self):
tgt = self._fake_target('tgt6', compatibility=['IronPython>2.88.888', 'IronPython<2.7'])
self.assertEquals('IronPython-2.99.999', self._select_interpreter_and_get_version([tgt]))
def test_compatibility_OR_impossible(self):
tgt = self._fake_target('tgt6', compatibility=['IronPython>2.99.999', 'IronPython<2.77.777'])
with self.assertRaises(PythonInterpreterCache.UnsatisfiableInterpreterConstraintsError):
self._select_interpreter_and_get_version([tgt])
| apache-2.0 | -4,960,126,751,737,372,000 | 47.696774 | 110 | 0.714361 | false |
carloscadena/django-imager | imagersite/imagersite/test.py | 1 | 7374 | """Test for registration view."""
from bs4 import BeautifulSoup
from django.contrib.auth.models import User
from django.core import mail
from django.test import Client
from django.test import RequestFactory
from django.test import TestCase
from django.urls import reverse
from imagersite.views import home_view
from django.urls import reverse_lazy
from imager_images.models import Photo
import factory
from django.core.files.uploadedfile import SimpleUploadedFile
import os
class ViewTest(TestCase):
"""Test Home View"""
def setUp(self):
"""Setup home fixture"""
self.client = Client()
self.ger_request = RequestFactory().get('/')
def test_home_route_returns_status_200(self):
"""Home route returns 200."""
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
def test_home_view_has_some_heading(self):
"""Has heading"""
response = home_view(self.ger_request)
self.assertTrue(b'h1' in response.content)
class RegistrationTests(TestCase):
"""Test Registration."""
def setUp(self):
"""Make Reg"""
self.client = Client()
def test_registration_page_uses_proper_template(self):
"""Registration is returned."""
response = self.client.get(reverse('registration_register'))
self.assertIn(
'registration/registration_form.html',
response.template_name
)
def test_registration_creates_new_inactive_user(self):
"""Register adds user."""
self.assertTrue(User.objects.count() == 0)
response = self.client.get(reverse('registration_register'))
html = BeautifulSoup(response.rendered_content, "html.parser")
token = html.find(
'input', {'name': "csrfmiddlewaretoken"}
).attrs['value']
info = {
'csrfmiddlewaretoken': token,
'username': 'test',
'email': '[email protected]',
'password1': 'testtest123',
'password2': 'testtest123'
}
self.client.post(
reverse('registration_register'),
info
)
self.assertFalse(User.objects.first().is_active)
self.assertTrue(len(mail.outbox) == 1)
def test_registration_success_redirects_to_reg_complete_html(self):
"""Test that the registration complete page shows after registering."""
self.assertTrue(User.objects.count() == 0)
response = self.client.get(reverse('registration_register'))
html = BeautifulSoup(response.rendered_content, "html.parser")
token = html.find(
'input', {'name': "csrfmiddlewaretoken"}
).attrs['value']
info = {
'csrfmiddlewaretoken': token,
'username': 'test',
'email': '[email protected]',
'password1': 'testtest123',
'password2': 'testtest123'
}
response = self.client.post(
reverse('registration_register'),
info,
follow=True
)
self.assertIn(
'Registration complete',
response.rendered_content
)
def test_activation_key_activates_user(self):
"""Test that that the activation key activates the user."""
self.assertTrue(User.objects.count() == 0)
response = self.client.get(reverse('registration_register'))
html = BeautifulSoup(response.rendered_content, "html.parser")
token = html.find(
'input', {'name': "csrfmiddlewaretoken"}
).attrs['value']
info = {
'csrfmiddlewaretoken': token,
'username': 'test',
'email': '[email protected]',
'password1': 'testtest123',
'password2': 'testtest123'
}
response = self.client.post(
reverse('registration_register'),
info
)
key = response.context['activation_key']
response = self.client.get(
"/accounts/activate/" + key + "/",
follow=True
)
self.assertIn('Activated!!', response.rendered_content)
# ========================= Tests from class July 13 ========================
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
class PhotoFactory(factory.django.DjangoModelFactory):
"""Create Photos for tests."""
class Meta:
"""Model for Photo Factory."""
model = Photo
title = factory.Sequence(lambda n: "photo{}".format(n))
image = SimpleUploadedFile(
name='somephoto.jpg',
content=open(os.path.join(BASE_DIR, 'imagersite/static/imagersite/testing.png'), 'rb').read(),
content_type='image/jpeg'
)
class HomePageTests(TestCase):
"""Test the home page."""
def setUp(self):
"""Set up for home page tests."""
self.client = Client()
self.user = User(username='carl', email='[email protected]')
self.user.save()
def add_photos(self):
"""Build photos to test."""
photos = [PhotoFactory.build() for _ in range(1)]
for photo in photos:
photo.profile = self.user.profile
photo.save()
def test_when_no_images_placeholder_appears(self):
"""Test that before any images are uploaded a placeholder appears."""
response = self.client.get(reverse_lazy('home'))
html = BeautifulSoup(response.content, 'html.parser')
# import pdb; pdb.set_trace()
self.assertTrue(html.find('img', {'src': '/static/imagersite/testing.png'}))
def test_when_images_exist_one_of_them_is_on_the_page(self):
"""Test that if image exists, it displays."""
self.add_photos()
response = self.client.get(reverse_lazy('home'))
html = BeautifulSoup(response.content, 'html.parser')
img_tag = html.find_all('img')
self.assertTrue(img_tag[0].attrs['src'] == Photo.objects.first().image.url)
class ProfilePageTests(TestCase):
"""Test suite for the profile page."""
def setUp(self):
"""Set up for Profile page tests."""
self.client = Client()
self.user = User(username='carl', email='[email protected]')
self.user.set_password('bobloblaw')
self.user.save()
def test_user_profile_info_on_profile_page(self):
"""Test that a user's profile info displays on page."""
self.client.force_login(self.user)
response = self.client.get(reverse_lazy('profile', kwargs={'username': 'carl'}))
self.assertTrue(b'<p>Username: carl</p>' in response.content)
def test_user_profile_page_has_link_to_library_page(self):
"""Test that profile page has a link to the library page."""
self.client.force_login(self.user)
response = self.client.get(reverse_lazy('profile', kwargs={'username': 'carl'}))
html = BeautifulSoup(response.content, 'html.parser')
self.assertTrue(html.find('a', {'href': '/images/library/1/1'}))
def test_when_user_logs_in_redirect_to_profile_page(self):
"""Test log in redirects to profile page."""
response = self.client.post(reverse_lazy('login'), {
'username': self.user.username, 'password': 'bobloblaw'
}, follow=True)
# import pdb; pdb.set_trace()
self.assertTrue(b'<p>Username: carl</p>' in response.content)
| mit | -4,183,117,079,725,359,000 | 34.451923 | 102 | 0.60415 | false |
MalloyDelacroix/DownloaderForReddit | DownloaderForReddit/viewmodels/add_reddit_object_list_model.py | 1 | 5090 | """
Downloader for Reddit takes a list of reddit users and subreddits and downloads content posted to reddit either by the
users or on the subreddits.
Copyright (C) 2017, Kyle Hickey
This file is part of the Downloader for Reddit.
Downloader for Reddit is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Downloader for Reddit is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Downloader for Reddit. If not, see <http://www.gnu.org/licenses/>.
"""
from PyQt5.QtCore import QAbstractListModel, QModelIndex, Qt, QThread, pyqtSignal
from PyQt5.QtGui import QPixmap
import os
from queue import Queue
from ..utils.reddit_utils import NameChecker
class AddRedditObjectListModel(QAbstractListModel):
"""
A list model that handles the the list view for the AddRedditObjectDialog.
"""
name_list_updated = pyqtSignal()
def __init__(self, object_type, parent=None):
super().__init__()
self.parent = parent
self.object_type = object_type
self.queue = Queue()
self.name_list = []
self.validation_dict = {}
self.complete_reddit_object_list = []
self.name_checker = None
self.start_name_check_thread()
self.checker_running = True
valid_path = os.path.abspath('Resources/Images/valid_checkmark.png')
non_valid_path = os.path.abspath('Resources/Images/non_valid_x.png')
self.valid_img = QPixmap(valid_path)
self.non_valid_img = QPixmap(non_valid_path)
def rowCount(self, parent=None, *args, **kwargs):
return len(self.name_list)
def insertRow(self, name, parent=QModelIndex(), *args, **kwargs):
self.beginInsertRows(parent, self.rowCount() - 1, self.rowCount())
self.name_list.append(name)
self.validation_dict[name] = None
self.queue.put(name)
self.name_list_updated.emit()
self.endInsertRows()
return True
def removeRows(self, pos, rows, parent=QModelIndex(), *args, **kwargs):
self.beginRemoveRows(parent, pos, pos + rows - 1)
for x in range(rows):
name = self.name_list[pos]
self.name_list.remove(name)
del self.validation_dict[name]
try:
del self.complete_reddit_object_list[pos]
except IndexError:
pass
self.name_list_updated.emit()
self.endRemoveRows()
return True
def clear_non_valid(self):
"""Removes all non-valid names from the name list."""
name_list = []
for key, value in self.validation_dict.items():
if not value:
name_list.append(key)
for name in name_list:
self.removeRows(self.name_list.index(name), 1)
def data(self, index, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
return self.name_list[index.row()]
elif role == Qt.DecorationRole:
name = self.name_list[index.row()]
if self.validation_dict[name] is None:
return None
if self.validation_dict[name]:
return self.valid_img
else:
return self.non_valid_img
def add_complete_object(self, reddit_object):
"""
Adds a complete reddit object to the complete reddit object list and inserts the name of the reddit object in
the data view and to be checked by the name checker.
:param reddit_object: A complete reddit object that has been imported from a file.
"""
self.complete_reddit_object_list.append(reddit_object)
self.insertRow(reddit_object.name)
def start_name_check_thread(self):
"""Initializes a NameChecker object, then runs it in another thread."""
self.name_checker = NameChecker(self.object_type, self.queue)
self.thread = QThread(self)
self.name_checker.moveToThread(self.thread)
self.name_checker.name_validation.connect(self.validate_name)
self.name_checker.finished.connect(self.thread.quit)
self.name_checker.finished.connect(self.name_checker.deleteLater)
self.thread.finished.connect(self.thread.deleteLater)
self.thread.started.connect(self.name_checker.run)
self.thread.start()
def stop_name_checker(self):
if self.name_checker:
self.name_checker.stop_run()
def validate_name(self, name_tup):
try:
self.validation_dict[name_tup[0]] = name_tup[1]
index = self.createIndex(self.name_list.index(name_tup[0]), 0)
self.dataChanged.emit(index, index)
except ValueError:
self.stop_name_checker()
| gpl-3.0 | 6,999,803,051,143,106,000 | 36.426471 | 118 | 0.651277 | false |
binarybana/samcnet | exps/priorstrength.py | 1 | 2301 | import sys, os, random
import zlib, cPickle
############### SAMC Setup ###############
import numpy as np
import scipy as sp
import networkx as nx
from samcnet.samc import SAMCRun
from samcnet.bayesnetcpd import BayesNetSampler, BayesNetCPD
from samcnet import utils
from samcnet.generator import *
if 'WORKHASH' in os.environ:
try:
redis_server = os.environ['REDIS']
import redis
r = redis.StrictRedis(redis_server)
except:
sys.exit("ERROR in worker: Need REDIS environment variable defined.")
############### /SAMC Setup ###############
N = 9
iters = 3e5
numdata = 0 #NEED TO ADD NOISE FIRST
temperature = 1.0
burn = 1000
stepscale = 10000
thin = 10
refden = 0.0
random.seed(12345)
np.random.seed(12345)
groundgraph = generateHourGlassGraph(nodes=N)
#joint, states = generateJoint(groundgraph, method='dirichlet')
joint, states = generateJoint(groundgraph, method='noisylogic')
data = generateData(groundgraph, joint, numdata)
groundbnet = BayesNetCPD(states, data, limparent=3)
groundbnet.set_cpds(joint)
if 'WORKHASH' in os.environ:
jobhash = os.environ['WORKHASH']
if not r.hexists('jobs:grounds', jobhash):
r.hset('jobs:grounds', jobhash, zlib.compress(cPickle.dumps(groundgraph)))
random.seed()
np.random.seed()
#p_struct = float(sys.argv[1])
p_struct = 30.0
for numtemplate in [4,8]:
for cpd in [True, False]:
if cpd:
p_cpd = p_struct
else:
p_cpd = 0.0
random.seed(12345)
np.random.seed(12345)
obj = BayesNetCPD(states, data, limparent=3)
template = sampleTemplate(groundgraph, numtemplate)
random.seed()
np.random.seed()
b = BayesNetSampler(obj,
template,
groundbnet,
p_struct=p_struct,
p_cpd=p_cpd)
s = SAMCRun(b,burn,stepscale,refden,thin)
s.sample(iters, temperature)
s.compute_means(cummeans=False)
if 'WORKHASH' in os.environ:
r.lpush('jobs:done:' + jobhash, s.read_db())
r.lpush('custom:%s:p_struct=%d:ntemplate=%d:p_cpd=%d' %
(jobhash, int(p_struct*10), numtemplate, int(p_cpd*10)),
s.db.root.computed.means._v_attrs['kld'] )
s.db.close()
| mit | 5,560,503,565,455,464,000 | 26.722892 | 82 | 0.616688 | false |
cordery/django-countries-plus | countries_plus/utils.py | 1 | 7136 | # coding=utf-8
import re
import requests
import six
from django.core.exceptions import ValidationError
from .models import Country
DATA_HEADERS_ORDERED = [
'ISO', 'ISO3', 'ISO-Numeric', 'fips', 'Country', 'Capital', 'Area(in sq km)',
'Population', 'Continent', 'tld', 'CurrencyCode', 'CurrencyName', 'Phone',
'Postal Code Format', 'Postal Code Regex', 'Languages', 'geonameid', 'neighbours',
'EquivalentFipsCode'
]
DATA_HEADERS_MAP = {
'ISO': 'iso',
'ISO3': 'iso3',
'ISO-Numeric': 'iso_numeric',
'fips': 'fips',
'Country': 'name',
'Capital': 'capital',
'Area(in sq km)': 'area',
'Population': 'population',
'Continent': 'continent',
'tld': 'tld',
'CurrencyCode': 'currency_code',
'CurrencyName': 'currency_name',
'Phone': 'phone',
'Postal Code Format': 'postal_code_format',
'Postal Code Regex': 'postal_code_regex',
'Languages': 'languages',
'geonameid': 'geonameid',
'neighbours': 'neighbours',
'EquivalentFipsCode': 'equivalent_fips_code'
}
CURRENCY_SYMBOLS = {
"AED": "د.إ",
"AFN": "؋",
"ALL": "L",
"AMD": "դր.",
"ANG": "ƒ",
"AOA": "Kz",
"ARS": "$",
"AUD": "$",
"AWG": "ƒ",
"AZN": "m",
"BAM": "KM",
"BBD": "$",
"BDT": "৳",
"BGN": "лв",
"BHD": "ب.د",
"BIF": "Fr",
"BMD": "$",
"BND": "$",
"BOB": "Bs.",
"BRL": "R$",
"BSD": "$",
"BTN": "Nu",
"BWP": "P",
"BYR": "Br",
"BZD": "$",
"CAD": "$",
"CDF": "Fr",
"CHF": "Fr",
"CLP": "$",
"CNY": "¥",
"COP": "$",
"CRC": "₡",
"CUP": "$",
"CVE": "$, Esc",
"CZK": "Kč",
"DJF": "Fr",
"DKK": "kr",
"DOP": "$",
"DZD": "د.ج",
"EEK": "KR",
"EGP": "£,ج.م",
"ERN": "Nfk",
"ETB": "Br",
"EUR": "€",
"FJD": "$",
"FKP": "£",
"GBP": "£",
"GEL": "ლ",
"GHS": "₵",
"GIP": "£",
"GMD": "D",
"GNF": "Fr",
"GTQ": "Q",
"GYD": "$",
"HKD": "$",
"HNL": "L",
"HRK": "kn",
"HTG": "G",
"HUF": "Ft",
"IDR": "Rp",
"ILS": "₪",
"INR": "₨",
"IQD": "ع.د",
"IRR": "﷼",
"ISK": "kr",
"JMD": "$",
"JOD": "د.ا",
"JPY": "¥",
"KES": "Sh",
"KGS": "лв",
"KHR": "៛",
"KMF": "Fr",
"KPW": "₩",
"KRW": "₩",
"KWD": "د.ك",
"KYD": "$",
"KZT": "Т",
"LAK": "₭",
"LBP": "ل.ل",
"LKR": "ரூ",
"LRD": "$",
"LSL": "L",
"LTL": "Lt",
"LVL": "Ls",
"LYD": "ل.د",
"MAD": "د.م.",
"MDL": "L",
"MGA": "Ar",
"MKD": "ден",
"MMK": "K",
"MNT": "₮",
"MOP": "P",
"MRO": "UM",
"MUR": "₨",
"MVR": "ރ.",
"MWK": "MK",
"MXN": "$",
"MYR": "RM",
"MZN": "MT",
"NAD": "$",
"NGN": "₦",
"NIO": "C$",
"NOK": "kr",
"NPR": "₨",
"NZD": "$",
"OMR": "ر.ع.",
"PAB": "B/.",
"PEN": "S/.",
"PGK": "K",
"PHP": "₱",
"PKR": "₨",
"PLN": "zł",
"PYG": "₲",
"QAR": "ر.ق",
"RON": "RON",
"RSD": "RSD",
"RUB": "р.",
"RWF": "Fr",
"SAR": "ر.س",
"SBD": "$",
"SCR": "₨",
"SDG": "S$",
"SEK": "kr",
"SGD": "$",
"SHP": "£",
"SLL": "Le",
"SOS": "Sh",
"SRD": "$",
"STD": "Db",
"SYP": "£, ل.س",
"SZL": "L",
"THB": "฿",
"TJS": "ЅМ",
"TMT": "m",
"TND": "د.ت",
"TOP": "T$",
"TRY": "₤",
"TTD": "$",
"TWD": "$",
"TZS": "Sh",
"UAH": "₴",
"UGX": "Sh",
"USD": "$",
"UYU": "$",
"UZS": "лв",
"VEF": "Bs",
"VND": "₫",
"VUV": "Vt",
"WST": "T",
"XAF": "Fr",
"XCD": "$",
"XOF": "Fr",
"XPF": "Fr",
"YER": "﷼",
"ZAR": "R",
"ZMK": "ZK",
"ZWL": "$"
}
class GeonamesParseError(Exception):
def __init__(self, message=None):
message = "I couldn't parse the Geonames file (" \
"http://download.geonames.org/export/dump/countryInfo.txt). " \
"The format may have changed. An updated version of this software may be " \
"required, " \
"please check for updates and/or raise an issue on github. Specific error: " \
"%s" % message
super(GeonamesParseError, self).__init__(message)
def update_geonames_data():
"""
Requests the countries table from geonames.org, and then calls parse_geonames_data to parse it.
:return: num_updated, num_created
:raise GeonamesParseError:
"""
r = requests.get('http://download.geonames.org/export/dump/countryInfo.txt', stream=True)
return parse_geonames_data(r.iter_lines())
def parse_geonames_data(lines_iterator):
"""
Parses countries table data from geonames.org, updating or adding records as needed.
currency_symbol is not part of the countries table and is supplemented using the data
obtained from the link provided in the countries table.
:type lines_iterator: collections.iterable
:return: num_updated: int, num_created: int
:raise GeonamesParseError:
"""
data_headers = []
num_created = 0
num_updated = 0
for line in lines_iterator:
line = line.decode()
if line[0] == "#":
if line[0:4] == "#ISO":
data_headers = line.strip('# ').split('\t')
if data_headers != DATA_HEADERS_ORDERED:
raise GeonamesParseError(
"The table headers do not match the expected headers.")
continue
if not data_headers:
raise GeonamesParseError("No table headers found.")
bits = line.split('\t')
data = {DATA_HEADERS_MAP[DATA_HEADERS_ORDERED[x]]: bits[x] for x in range(0, len(bits))}
if 'currency_code' in data and data['currency_code']:
data['currency_symbol'] = CURRENCY_SYMBOLS.get(data['currency_code'])
# Remove empty items
clean_data = {x: y for x, y in data.items() if y}
# Puerto Rico and the Dominican Republic have two phone prefixes in the format "123 and
# 456"
if 'phone' in clean_data:
if 'and' in clean_data['phone']:
clean_data['phone'] = ",".join(re.split(r'\s*and\s*', clean_data['phone']))
# Avoiding update_or_create to maintain compatibility with Django 1.5
try:
country = Country.objects.get(iso=clean_data['iso'])
created = False
except Country.DoesNotExist:
try:
country = Country.objects.create(**clean_data)
except ValidationError as e:
raise GeonamesParseError("Unexpected field length: %s" % e.message_dict)
created = True
for k, v in six.iteritems(clean_data):
setattr(country, k, v)
try:
country.save()
except ValidationError as e:
raise GeonamesParseError("Unexpected field length: %s" % e.message_dict)
if created:
num_created += 1
else:
num_updated += 1
return num_updated, num_created
| mit | 7,095,548,300,540,033,000 | 23.904255 | 99 | 0.467749 | false |
tensorflow/graphics | tensorflow_graphics/image/color_space/tests/srgb_test.py | 1 | 3322 | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for srgb."""
from absl.testing import flagsaver
from absl.testing import parameterized
import numpy as np
from tensorflow_graphics.image.color_space import linear_rgb
from tensorflow_graphics.image.color_space import srgb
from tensorflow_graphics.util import test_case
class SrgbTest(test_case.TestCase):
def test_cycle_linear_rgb_srgb_linear_rgb_for_random_input(self):
"""Tests loop from linear RGB to sRGB and back for random inputs."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
linear_input = np.random.uniform(size=tensor_shape + [3])
srgb_output = srgb.from_linear_rgb(linear_input)
linear_reverse = linear_rgb.from_srgb(srgb_output)
self.assertAllClose(linear_input, linear_reverse)
@parameterized.parameters(
(((0., 0.5, 1.), (0.00312, 0.0031308, 0.00314)),
((0., 0.735357, 1.), (0.04031, 0.04045, 0.040567))),)
def test_from_linear_rgb_preset(self, test_inputs, test_outputs):
"""Tests conversion from linear to sRGB color space for preset inputs."""
self.assert_output_is_correct(srgb.from_linear_rgb, (test_inputs,),
(test_outputs,))
def test_from_linear_rgb_jacobian_random(self):
"""Tests the Jacobian of the from_linear_rgb function for random inputs."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
linear_random_init = np.random.uniform(size=tensor_shape + [3])
self.assert_jacobian_is_correct_fn(srgb.from_linear_rgb,
[linear_random_init])
@parameterized.parameters((np.array((0., 0.001, 0.002)),), (np.array(
(0.004, 0.005, 1.)),), (np.array((0.00312, 0.004, 0.00314)),))
@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)
def test_from_linear_rgb_jacobian_preset(self, inputs_init):
"""Tests the Jacobian of the from_linear_rgb function for preset inputs."""
self.assert_jacobian_is_correct_fn(srgb.from_linear_rgb, [inputs_init])
@parameterized.parameters(
((3,),),
((None, None, None, 3),),
)
def test_from_linear_rgb_exception_not_raised(self, *shape):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(srgb.from_linear_rgb, shape)
@parameterized.parameters(
("must have a rank greater than 0", ()),
("must have exactly 3 dimensions in axis -1", (2, 3, 4)),
)
def test_from_linear_rgb_exception_raised(self, error_msg, *shape):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(srgb.from_linear_rgb, error_msg, shape)
if __name__ == "__main__":
test_case.main()
| apache-2.0 | 3,111,544,979,320,867,000 | 40.525 | 79 | 0.688742 | false |
EderSantana/seya | tests/test_conv_rnn.py | 1 | 2708 | # encoding: utf-8
"""Test seya.layers.recurrent module"""
from __future__ import print_function
import unittest
import numpy as np
from keras.models import Sequential
from keras.layers.core import TimeDistributedDense
from keras.layers.convolutional import Convolution2D
from seya.layers.conv_rnn import ConvRNN, ConvGRU, TimeDistributedModel
class TestConvRNNs(unittest.TestCase):
"""Test seya.layers.conv_rnn layer"""
def test_conv_rnn(self):
"""Just check that the ConvRNN layer can compile and run"""
nb_samples, timesteps, ndim, filter_dim = 5, 10, 28, 3
input_flat = ndim ** 2
layer = ConvRNN(filter_dim=(1, filter_dim, filter_dim),
reshape_dim=(1, ndim, ndim),
input_shape=(timesteps, input_flat),
return_sequences=True)
model = Sequential()
model.add(layer)
model.add(TimeDistributedDense(10))
model.compile('sgd', 'mse')
x = np.random.randn(nb_samples, timesteps, input_flat)
y = model.predict(x)
assert y.shape == (nb_samples, timesteps, 10)
def test_conv_gru(self):
"""Just check that the ConvGRU layer can compile and run"""
nb_samples, timesteps, ndim, filter_dim = 5, 10, 28, 3
input_flat = ndim ** 2
layer = ConvGRU(filter_dim=(1, filter_dim, filter_dim),
reshape_dim=(1, ndim, ndim),
# input_shape=(timesteps, input_flat),
return_sequences=True)
model = Sequential()
model.add(TimeDistributedDense(input_flat, input_dim=input_flat))
model.add(layer)
model.compile('sgd', 'mse')
x = np.random.randn(nb_samples, timesteps, input_flat)
y = model.predict(x)
assert y.shape == (nb_samples, timesteps, input_flat)
def test_time_distributed(self):
"""Just check that the TimeDistributedModel layer can compile and run"""
nb_samples, timesteps, ndim, filter_dim = 5, 10, 28, 3
input_flat = ndim ** 2
inner = Sequential()
inner.add(Convolution2D(1, filter_dim, filter_dim, border_mode='same',
input_shape=(1, ndim, ndim)))
layer = TimeDistributedModel(
inner, batch_size=nb_samples, input_shape=(timesteps, input_flat))
model = Sequential()
model.add(layer)
model.add(TimeDistributedDense(10))
model.compile('sgd', 'mse')
x = np.random.randn(nb_samples, timesteps, input_flat)
y = model.predict(x)
assert y.shape == (nb_samples, timesteps, 10)
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause | -2,647,198,628,031,803,000 | 35.594595 | 80 | 0.60192 | false |
open-mmlab/mmdetection | configs/scnet/scnet_r50_fpn_1x_coco.py | 1 | 4975 | _base_ = '../htc/htc_r50_fpn_1x_coco.py'
# model settings
model = dict(
type='SCNet',
roi_head=dict(
_delete_=True,
type='SCNetRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SCNetBBoxHead',
num_shared_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='SCNetBBoxHead',
num_shared_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='SCNetBBoxHead',
num_shared_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='SCNetMaskHead',
num_convs=12,
in_channels=256,
conv_out_channels=256,
num_classes=80,
conv_to_res=True,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='SCNetSemanticHead',
num_ins=5,
fusion_level=1,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
ignore_label=255,
loss_weight=0.2,
conv_to_res=True),
glbctx_head=dict(
type='GlobalContextHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_weight=3.0,
conv_to_res=True),
feat_relay_head=dict(
type='FeatureRelayHead',
in_channels=1024,
out_conv_channels=256,
roi_feat_size=7,
scale_factor=2)))
# uncomment below code to enable test time augmentations
# img_norm_cfg = dict(
# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# test_pipeline = [
# dict(type='LoadImageFromFile'),
# dict(
# type='MultiScaleFlipAug',
# img_scale=[(600, 900), (800, 1200), (1000, 1500), (1200, 1800),
# (1400, 2100)],
# flip=True,
# transforms=[
# dict(type='Resize', keep_ratio=True),
# dict(type='RandomFlip', flip_ratio=0.5),
# dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size_divisor=32),
# dict(type='ImageToTensor', keys=['img']),
# dict(type='Collect', keys=['img']),
# ])
# ]
# data = dict(
# val=dict(pipeline=test_pipeline),
# test=dict(pipeline=test_pipeline))
| apache-2.0 | 1,226,587,664,530,429,400 | 35.580882 | 79 | 0.464925 | false |
jiaaro/django-alert | alert/utils.py | 1 | 8238 | from alert.exceptions import AlertIDAlreadyInUse, AlertBackendIDAlreadyInUse,\
InvalidApplicableUsers
import django
from django.conf import settings
from django.utils import timezone
from django.template.loader import render_to_string, get_template
from django.contrib.sites.models import Site
from django.template import TemplateDoesNotExist
from django.db import models
from itertools import islice
from alert.compat import get_user_model
ALERT_TYPES = {}
ALERT_BACKENDS = {}
ALERT_TYPE_CHOICES = []
ALERT_BACKEND_CHOICES = []
def grouper(n, iterable):
iterable = iter(iterable)
while True:
chunk = tuple(islice(iterable, n))
if not chunk: return
yield chunk
def render_email_to_string(tmpl, cx, alert_type="txt"):
cx['alert_shard_ext'] = alert_type
rendered = render_to_string(tmpl, cx)
return rendered.strip()
class AlertMeta(type):
def __new__(cls, name, bases, attrs):
new_alert = super(AlertMeta, cls).__new__(cls, name, bases, attrs)
# If this isn't a subclass of BaseAlert, don't do anything special.
parents = [b for b in bases if isinstance(b, AlertMeta)]
if not parents:
return new_alert
# allow subclasses to use the auto id feature
id = getattr(new_alert, 'id', name)
for parent in parents:
if getattr(parent, 'id', None) == id:
id = name
break
new_alert.id = id
if new_alert.id in ALERT_TYPES.keys():
raise AlertIDAlreadyInUse("The alert ID, \"%s\" was delared more than once" % new_alert.id)
ALERT_TYPES[new_alert.id] = new_alert()
ALERT_TYPE_CHOICES.append((new_alert.id, new_alert.title))
return new_alert
class BaseAlert(object):
__metaclass__ = AlertMeta
default = False
sender = None
template_filetype = "txt"
def __init__(self):
kwargs = {}
if self.sender:
kwargs['sender'] = self.sender
self.signal.connect(self.signal_handler, **kwargs)
def __repr__(self):
return "<Alert: %s>" % self.id
def __str__(self):
return str(self.id)
def signal_handler(self, **kwargs):
if self.before(**kwargs) is False:
return
from alert.models import AlertPreference
from alert.models import Alert
users = self.get_applicable_users(**kwargs)
if isinstance(users, models.Model):
users = [users]
try:
user_count = users.count()
except:
user_count = len(users)
User = get_user_model()
if user_count and not isinstance(users[0], User):
raise InvalidApplicableUsers("%s.get_applicable_users() returned an invalid value. Acceptable values are a django.contrib.auth.models.User instance OR an iterable containing 0 or more User instances" % (self.id))
site = Site.objects.get_current()
def mk_alert(user, backend):
context = self.get_template_context(BACKEND=backend, USER=user, SITE=site, ALERT=self, **kwargs)
template_kwargs = {'backend': backend, 'context': context }
return Alert(
user=user,
backend=backend.id,
alert_type=self.id,
when=self.get_send_time(**kwargs),
title=self.get_title(**template_kwargs),
body=self.get_body(**template_kwargs)
)
alerts = (mk_alert(user, backend) for (user, backend) in AlertPreference.objects.get_recipients_for_notice(self.id, users))
# bulk create is much faster so use it when available
if django.VERSION >= (1, 4) and getattr(settings, 'ALERT_USE_BULK_CREATE', True):
created = 0
for alerts_group in grouper(100, alerts):
# break bulk create into groups of 100 to avoid the dreaded
# OperationalError: (2006, 'MySQL server has gone away')
Alert.objects.bulk_create(alerts_group)
created += 100
else:
for alert in alerts: alert.save()
def before(self, **kwargs):
pass
def get_send_time(self, **kwargs):
return timezone.now()
def get_applicable_users(self, instance, **kwargs):
return [instance.user]
def get_template_context(self, **kwargs):
return kwargs
def _get_template(self, backend, part, filetype='txt'):
template = "alerts/%s/%s/%s.%s" % (self.id, backend.id, part, filetype)
try:
get_template(template)
return template
except TemplateDoesNotExist:
pass
template = "alerts/%s/%s.%s" % (self.id, part, filetype)
get_template(template)
return template
def get_title_template(self, backend, context):
return self._get_template(backend, 'title', self.template_filetype)
def get_body_template(self, backend, context):
return self._get_template(backend, 'body', self.template_filetype)
def get_title(self, backend, context):
template = self.get_title_template(backend, context)
return render_to_string(template, context)
def get_body(self, backend, context):
template = self.get_body_template(backend, context)
return render_to_string(template, context)
def get_default(self, backend):
if isinstance(self.default, bool):
return self.default
return self.default[backend]
class AlertBackendMeta(type):
def __new__(cls, name, bases, attrs):
new_alert_backend = super(AlertBackendMeta, cls).__new__(cls, name, bases, attrs)
# If this isn't a subclass of BaseAlert, don't do anything special.
parents = [b for b in bases if isinstance(b, AlertBackendMeta)]
if not parents:
return new_alert_backend
new_alert_backend.id = getattr(new_alert_backend, 'id', name)
if new_alert_backend.id in ALERT_BACKENDS.keys():
raise AlertBackendIDAlreadyInUse("The alert ID, \"%s\" was delared more than once" % new_alert_backend.id)
ALERT_BACKENDS[new_alert_backend.id] = new_alert_backend()
ALERT_BACKEND_CHOICES.append((new_alert_backend.id, new_alert_backend.title))
return new_alert_backend
class BaseAlertBackend(object):
__metaclass__ = AlertBackendMeta
def __repr__(self):
return "<AlertBackend: %s>" % self.id
def __str__(self):
return str(self.id)
def mass_send(self, alerts):
from .models import Alert
if isinstance(alerts, Alert):
self.send(alerts)
else:
[self.send(alert) for alert in alerts]
def super_accepter(arg, lookup_dict):
"""
for the alerts and backends keyword arguments...
- provides resonable defaults
- accept a single alert/backend or a list of them
- accept alert/backend class or the a string containing the alert/backend id
"""
# reasonable default
if arg is None: return lookup_dict.values()
# single item or a list
if not isinstance(arg, (tuple, list)):
arg = [arg]
# normalize the arguments
ids = ((a if isinstance(a, basestring) else a.id) for a in arg)
# remove duplicates
_set = {}
ids = (_set.setdefault(id,id) for id in ids if id not in _set)
# lookup the objects
return [lookup_dict[id] for id in ids]
def unsubscribe_user(user, alerts=None, backends=None):
from .forms import UnsubscribeForm
form = UnsubscribeForm(user=user, alerts=alerts, backends=backends)
data = dict((field, False) for field in form.fields.keys())
form = UnsubscribeForm(data, user=user, alerts=alerts, backends=backends)
assert(form.is_valid())
form.save()
| mit | -3,880,105,067,049,292,300 | 30.930233 | 224 | 0.59092 | false |
mrgigabyte/proxybot | original_proxy_bot.py | 1 | 5906 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import telebot
import config
import dbhelper
# Initialize bot
bot = telebot.TeleBot(config.token)
# Handle always first "/start" message when new chat with your bot is created
@bot.message_handler(commands=["start"])
def command_start(message):
bot.send_message(message.chat.id, "Hello! Now please write your message to forward it to my owner!")
@bot.message_handler(func=lambda message: message.chat.id == config.my_id, content_types=["text"])
def my_text(message):
# If we're just sending messages to bot (not replying) -> do nothing and notify about it.
# Else -> get ID whom to reply and send message FROM bot.
if message.reply_to_message:
who_to_send_id = dbhelper.get_user_id(message.reply_to_message.message_id)
if who_to_send_id:
# You can add parse_mode="Markdown" or parse_mode="HTML", however, in this case you MUST make sure,
# that your markup if well-formed as described here: https://core.telegram.org/bots/api#formatting-options
# Otherwise, your message won't be sent.
bot.send_message(who_to_send_id, message.text)
# Temporarly disabled freeing message ids. They don't waste too much space
# dbhelper.delete_message(message.reply_to_message.message_id)
else:
bot.send_message(message.chat.id, "No one to reply!")
@bot.message_handler(func=lambda message: message.chat.id == config.my_id, content_types=["sticker"])
def my_sticker(message):
if message.reply_to_message:
who_to_send_id = dbhelper.get_user_id(message.reply_to_message.message_id)
if who_to_send_id:
bot.send_sticker(who_to_send_id, message.sticker.file_id)
else:
bot.send_message(message.chat.id, "No one to reply!")
@bot.message_handler(func=lambda message: message.chat.id == config.my_id, content_types=["photo"])
def my_photo(message):
if message.reply_to_message:
who_to_send_id = dbhelper.get_user_id(message.reply_to_message.message_id)
if who_to_send_id:
# Send the largest available (last item in photos array)
bot.send_photo(who_to_send_id, list(message.photo)[-1].file_id)
else:
bot.send_message(message.chat.id, "No one to reply!")
@bot.message_handler(func=lambda message: message.chat.id == config.my_id, content_types=["voice"])
def my_voice(message):
if message.reply_to_message:
who_to_send_id = dbhelper.get_user_id(message.reply_to_message.message_id)
if who_to_send_id:
# bot.send_chat_action(who_to_send_id, "record_audio")
bot.send_voice(who_to_send_id, message.voice.file_id, duration=message.voice.duration)
else:
bot.send_message(message.chat.id, "No one to reply!")
@bot.message_handler(func=lambda message: message.chat.id == config.my_id, content_types=["document"])
def my_document(message):
if message.reply_to_message:
who_to_send_id = dbhelper.get_user_id(message.reply_to_message.message_id)
if who_to_send_id:
# bot.send_chat_action(who_to_send_id, "upload_document")
bot.send_document(who_to_send_id, data=message.document.file_id)
else:
bot.send_message(message.chat.id, "No one to reply!")
@bot.message_handler(func=lambda message: message.chat.id == config.my_id, content_types=["audio"])
def my_audio(message):
if message.reply_to_message:
who_to_send_id = dbhelper.get_user_id(message.reply_to_message.message_id)
if who_to_send_id:
# bot.send_chat_action(who_to_send_id, "upload_audio")
bot.send_audio(who_to_send_id, performer=message.audio.performer,
audio=message.audio.file_id, title=message.audio.title,
duration=message.audio.duration)
else:
bot.send_message(message.chat.id, "No one to reply!")
@bot.message_handler(func=lambda message: message.chat.id == config.my_id, content_types=["video"])
def my_video(message):
if message.reply_to_message:
who_to_send_id = dbhelper.get_user_id(message.reply_to_message.message_id)
if who_to_send_id:
# bot.send_chat_action(who_to_send_id, "upload_video")
bot.send_video(who_to_send_id, data=message.video.file_id, duration=message.video.duration)
else:
bot.send_message(message.chat.id, "No one to reply!")
# No Google Maps on my phone, so this function is untested, should work fine though.
@bot.message_handler(func=lambda message: message.chat.id == config.my_id, content_types=["location"])
def my_location(message):
if message.reply_to_message:
who_to_send_id = dbhelper.get_user_id(message.reply_to_message.message_id)
if who_to_send_id:
# bot.send_chat_action(who_to_send_id, "find_location")
bot.send_location(who_to_send_id, latitude=message.location.latitude, longitude=message.location.longitude)
else:
bot.send_message(message.chat.id, "No one to reply!")
# Handle all incoming messages except group ones
@bot.message_handler(func=lambda message: message.chat.id != config.my_id,
content_types=['text', 'audio', 'document', 'photo', 'sticker', 'video',
'voice', 'location', 'contact'])
def check(message):
# Forward all messages from other people and save their message_id + 1 to shelve storage.
# +1, because message_id = X for message FROM user TO bot and
# message_id = X+1 for message FROM bot TO you
bot.forward_message(config.my_id, message.chat.id, message.message_id)
dbhelper.add_message(message.message_id + 1, message.chat.id)
print('Bot has Started\nPlease text the bot on:@{}'.format(bot.get_me().username))
bot.send_message(config.my_id,'Bot Started')
if __name__ == '__main__':
bot.polling(none_stop=True)
| mit | 1,987,259,348,465,000,200 | 44.782946 | 119 | 0.66661 | false |
Forage/Gramps | gramps/gen/merge/mergerepositoryquery.py | 1 | 2677 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Michiel D. Nauta
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Provide merge capabilities for repositories.
"""
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..lib import Source
from ..db import DbTxn
from ..const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().sgettext
from ..errors import MergeError
#-------------------------------------------------------------------------
#
# MergeRepoQuery
#
#-------------------------------------------------------------------------
class MergeRepositoryQuery(object):
"""
Create database query to merge two repositories.
"""
def __init__(self, dbstate, phoenix, titanic):
self.database = dbstate.db
self.phoenix = phoenix
self.titanic = titanic
def execute(self):
"""
Merges two repositories into a single repository.
"""
new_handle = self.phoenix.get_handle()
old_handle = self.titanic.get_handle()
self.phoenix.merge(self.titanic)
with DbTxn(_("Merge Repositories"), self.database) as trans:
self.database.commit_repository(self.phoenix, trans)
for (class_name, handle) in self.database.find_backlink_handles(
old_handle):
if class_name == Source.__name__:
source = self.database.get_source_from_handle(handle)
assert source.has_handle_reference('Repository', old_handle)
source.replace_repo_references(old_handle, new_handle)
self.database.commit_source(source, trans)
else:
raise MergeError("Encounter an object of type %s that has "
"a repository reference." % class_name)
self.database.remove_repository(old_handle, trans)
| gpl-2.0 | -7,466,816,308,397,948,000 | 35.671233 | 80 | 0.583115 | false |
ottogroup/palladium | palladium/tests/test_server.py | 1 | 27350 | from datetime import datetime
import io
import json
import math
from threading import Thread
from time import sleep
from unittest.mock import call
from unittest.mock import Mock
from unittest.mock import patch
import dateutil.parser
from flask import request
import numpy as np
import pytest
import ujson
from werkzeug.exceptions import BadRequest
def dec(func):
def inner(*args, **kwargs):
"""dec"""
return func(*args, **kwargs) + '_decorated'
return inner
class TestPredictService:
@pytest.fixture
def PredictService(self):
from palladium.server import PredictService
return PredictService
def test_functional(self, PredictService, flask_app):
model = Mock()
model.threshold = 0.3
model.size = 10
# needed as hasattr would evaluate to True otherwise
del model.threshold2
del model.size2
model.predict.return_value = np.array(['class1'])
service = PredictService(
mapping=[
('sepal length', 'float'),
('sepal width', 'float'),
('petal length', 'float'),
('petal width', 'float'),
('color', 'str'),
('age', 'int'),
('active', 'bool'),
('austrian', 'bool'),
],
params=[
('threshold', 'float'), # default will be overwritten
('size', 'int'), # not provided, default value kept
('threshold2', 'float'), # will be used, no default value
('size2', 'int'), # not provided, no default value
])
with flask_app.test_request_context():
with patch('palladium.util.get_config') as get_config:
get_config.return_value = {
'service_metadata': {
'service_name': 'iris',
'service_version': '0.1'
}
}
request = Mock(
args=dict([
('sepal length', '5.2'),
('sepal width', '3.5'),
('petal length', '1.5'),
('petal width', '0.2'),
('color', 'purple'),
('age', '1'),
('active', 'True'),
('austrian', 'False'),
('threshold', '0.7'),
('threshold2', '0.8'),
]),
method='GET',
)
resp = service(model, request)
assert (model.predict.call_args[0][0] ==
np.array([[5.2, 3.5, 1.5, 0.2,
'purple', 1, True, False]], dtype='object')).all()
assert model.predict.call_args[1]['threshold'] == 0.7
assert model.predict.call_args[1]['size'] == 10
assert model.predict.call_args[1]['threshold2'] == 0.8
assert 'size2' not in model.predict.call_args[1]
assert resp.status_code == 200
expected_resp_data = {
"metadata": {
"status": "OK",
"error_code": 0,
"service_name": "iris",
"service_version": "0.1",
},
"result": "class1"
}
assert json.loads(resp.get_data(as_text=True)) == expected_resp_data
def test_bad_request(self, PredictService, flask_app):
predict_service = PredictService(mapping=[])
model = Mock()
request = Mock()
with patch.object(predict_service, 'do') as psd:
with flask_app.test_request_context():
bad_request = BadRequest()
bad_request.args = ('daniel',)
psd.side_effect = bad_request
resp = predict_service(model, request)
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 400
assert resp_data == {
"metadata": {
"status": "ERROR",
"error_code": -1,
"error_message": "BadRequest: ('daniel',)"
}
}
def test_predict_error(self, PredictService, flask_app):
from palladium.interfaces import PredictError
predict_service = PredictService(mapping=[])
model = Mock()
request = Mock()
with patch.object(predict_service, 'do') as psd:
with flask_app.test_request_context():
psd.side_effect = PredictError("mymessage", 123)
resp = predict_service(model, request)
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 500
assert resp_data == {
"metadata": {
"status": "ERROR",
"error_code": 123,
"error_message": "mymessage",
}
}
def test_generic_error(self, PredictService, flask_app):
predict_service = PredictService(mapping=[])
model = Mock()
request = Mock()
with patch.object(predict_service, 'do') as psd:
with flask_app.test_request_context():
psd.side_effect = KeyError("model")
resp = predict_service(model, request)
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 500
assert resp_data == {
"metadata": {
"status": "ERROR",
"error_code": -1,
"error_message": "KeyError: 'model'",
}
}
def test_sample_from_data(self, PredictService):
predict_service = PredictService(
mapping=[
('name', 'str'),
('sepal width', 'int'),
],
)
model = Mock()
request_args = {'name': 'myflower', 'sepal width': 3}
sample = predict_service.sample_from_data(model, request_args)
assert sample[0] == 'myflower'
assert sample[1] == 3
def test_unwrap_sample_get(self, PredictService, flask_app):
predict_service = PredictService(
mapping=[('text', 'str')],
unwrap_sample=True,
)
model = Mock()
model.predict.return_value = np.array([1])
with flask_app.test_request_context():
request = Mock(
args=dict([
('text', 'Hi this is text'),
]),
method='GET',
)
resp = predict_service(model, request)
assert model.predict.call_args[0][0].ndim == 1
model.predict.assert_called_with(np.array(['Hi this is text']))
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 200
assert resp_data == {
"metadata": {
"status": "OK",
"error_code": 0,
},
"result": 1,
}
def test_unwrap_sample_post(self, PredictService, flask_app):
predict_service = PredictService(
mapping=[('text', 'str')],
unwrap_sample=True,
)
model = Mock()
model.predict.return_value = np.array([1, 2])
with flask_app.test_request_context():
request = Mock(
json=[
{'text': 'First piece of text'},
{'text': 'Second piece of text'},
],
method='POST',
mimetype='application/json',
)
resp = predict_service(model, request)
assert model.predict.call_args[0][0].ndim == 1
assert (
model.predict.call_args[0] ==
np.array(['First piece of text', 'Second piece of text'])
).all()
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 200
assert resp_data == {
"metadata": {
"status": "OK",
"error_code": 0,
},
"result": [1, 2],
}
def test_probas(self, PredictService, flask_app):
model = Mock()
model.predict_proba.return_value = np.array([[0.1, 0.5, math.pi]])
predict_service = PredictService(mapping=[], predict_proba=True)
with flask_app.test_request_context():
resp = predict_service(model, request)
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 200
assert resp_data == {
"metadata": {
"status": "OK",
"error_code": 0,
},
"result": [0.1, 0.5, math.pi],
}
def test_post_request(self, PredictService, flask_app):
model = Mock()
model.predict.return_value = np.array([3, 2])
service = PredictService(
mapping=[
('sepal length', 'float'),
('sepal width', 'float'),
('petal length', 'float'),
('petal width', 'float'),
],
params=[
('threshold', 'float'),
],
)
request = Mock(
json=[
{
'sepal length': '5.2',
'sepal width': '3.5',
'petal length': '1.5',
'petal width': '0.2',
},
{
'sepal length': '5.7',
'sepal width': '4.0',
'petal length': '2.0',
'petal width': '0.7',
},
],
args=dict(threshold=1.0),
method='POST',
mimetype='application/json',
)
with flask_app.test_request_context():
resp = service(model, request)
assert (model.predict.call_args[0][0] == np.array([
[5.2, 3.5, 1.5, 0.2],
[5.7, 4.0, 2.0, 0.7],
],
dtype='object',
)).all()
assert model.predict.call_args[1]['threshold'] == 1.0
assert resp.status_code == 200
expected_resp_data = {
"metadata": {
"status": "OK",
"error_code": 0,
},
"result": [3, 2],
}
assert json.loads(resp.get_data(as_text=True)) == expected_resp_data
@pytest.yield_fixture
def mock_predict(self, monkeypatch):
def mock_predict(model_persister, predict_service):
return predict_service.entry_point
monkeypatch.setattr(
'palladium.server.predict', mock_predict)
yield mock_predict
def test_entry_point_not_set(
self, config, flask_app_test, flask_client, mock_predict):
from palladium.config import process_config
config['model_persister'] = Mock()
config['predict_service'] = {
'!': 'palladium.server.PredictService',
'mapping': [
('param', 'str'),
],
}
# set default predict_decorators
config['predict_decorators'] = ['palladium.tests.test_server.dec']
with flask_app_test.test_request_context():
process_config(config)
resp1 = flask_client.get(
'predict?param=bla')
# decorated result: default predict_decorators is defined
assert resp1.get_data().decode('utf-8') == '/predict_decorated'
def test_entry_point_multiple(
self, config, flask_app_test, flask_client, mock_predict):
from palladium.config import process_config
config['model_persister'] = Mock()
config['my_predict_service'] = {
'!': 'palladium.server.PredictService',
'mapping': [
('param', 'str'),
],
'entry_point': '/predict1',
}
config['my_predict_service2'] = {
'!': 'palladium.server.PredictService',
'mapping': [
('param', 'str'),
],
'entry_point': '/predict2',
'decorator_list_name': 'predict_decorators2',
}
# only second predict service uses decorator list
config['predict_decorators2'] = ['palladium.tests.test_server.dec']
with flask_app_test.test_request_context():
process_config(config)
resp1 = flask_client.get(
'predict1?param=bla')
# no decorated result: default predict_decorators is not defined
assert resp1.get_data().decode('utf-8') == '/predict1'
resp2 = flask_client.get(
'predict2?param=bla')
# decorated result using predict_decorators2
assert resp2.get_data().decode('utf-8') == '/predict2_decorated'
def test_entry_point_multiple_conflict(
self, config, flask_app_test, flask_client, mock_predict):
from palladium.config import process_config
config['model_persister'] = Mock()
config['my_predict_service'] = {
'!': 'palladium.server.PredictService',
'mapping': [
('param', 'str'),
],
'entry_point': '/predict1', # <--
}
config['my_predict_service2'] = {
'!': 'palladium.server.PredictService',
'mapping': [
('param', 'str'),
],
'entry_point': '/predict1', # conflict: entry point exists
}
with pytest.raises(AssertionError):
with flask_app_test.test_request_context():
process_config(config)
class TestPredict:
@pytest.fixture
def predict(self):
from palladium.server import predict
return predict
def test_predict_functional(self, config, flask_app_test, flask_client):
from palladium.server import make_ujson_response
model_persister = config['model_persister'] = Mock()
predict_service = config['predict_service'] = Mock()
with flask_app_test.test_request_context():
from palladium.server import create_predict_function
create_predict_function(
'/predict', predict_service, 'predict_decorators', config)
predict_service.return_value = make_ujson_response(
'a', status_code=200)
model = model_persister.read()
resp = flask_client.get(
'predict?sepal length=1.0&sepal width=1.1&'
'petal length=0.777&petal width=5')
resp_data = json.loads(resp.get_data(as_text=True))
assert resp_data == 'a'
assert resp.status_code == 200
with flask_app_test.test_request_context():
predict_service.assert_called_with(model, request)
def test_unknown_exception(self, predict, flask_app):
model_persister = Mock()
model_persister.read.side_effect = KeyError('model')
with flask_app.test_request_context():
resp = predict(model_persister, Mock())
resp_data = json.loads(resp.get_data(as_text=True))
assert resp.status_code == 500
assert resp_data == {
"status": "ERROR",
"error_code": -1,
"error_message": "KeyError: 'model'",
}
class TestAliveFunctional:
def test_empty_process_state(self, config, flask_client):
config['service_metadata'] = {'hello': 'world'}
resp = flask_client.get('alive')
assert resp.status_code == 200
resp_data = json.loads(resp.get_data(as_text=True))
assert sorted(resp_data.keys()) == ['memory_usage',
'memory_usage_vms',
'palladium_version',
'process_metadata',
'service_metadata']
assert resp_data['service_metadata'] == config['service_metadata']
def test_filled_process_state(self, config, process_store, flask_client):
config['alive'] = {'process_store_required': ('model', 'data')}
before = datetime.now()
process_store['model'] = Mock(__metadata__={'hello': 'is it me'})
process_store['data'] = Mock(__metadata__={'bye': 'not you'})
after = datetime.now()
resp = flask_client.get('alive')
assert resp.status_code == 200
resp_data = json.loads(resp.get_data(as_text=True))
model_updated = dateutil.parser.parse(resp_data['model']['updated'])
data_updated = dateutil.parser.parse(resp_data['data']['updated'])
assert before < model_updated < after
assert resp_data['model']['metadata'] == {'hello': 'is it me'}
assert before < data_updated < after
assert resp_data['data']['metadata'] == {'bye': 'not you'}
def test_missing_process_state(self, config, process_store, flask_client):
config['alive'] = {'process_store_required': ('model', 'data')}
process_store['model'] = Mock(__metadata__={'hello': 'is it me'})
resp = flask_client.get('alive')
assert resp.status_code == 503
resp_data = json.loads(resp.get_data(as_text=True))
assert resp_data['model']['metadata'] == {'hello': 'is it me'}
assert resp_data['data'] == 'N/A'
class TestPredictStream:
@pytest.fixture
def PredictStream(self):
from palladium.server import PredictStream
return PredictStream
@pytest.fixture
def stream(self, config, PredictStream):
config['model_persister'] = Mock()
predict_service = config['predict_service'] = Mock()
predict_service.sample_from_data.side_effect = (
lambda model, data: data)
predict_service.params_from_data.side_effect = (
lambda model, data: data)
return PredictStream()
def test_listen_direct_exit(self, stream):
io_in = io.StringIO()
io_out = io.StringIO()
io_err = io.StringIO()
stream_thread = Thread(
target=stream.listen(io_in, io_out, io_err))
stream_thread.start()
io_in.write('EXIT\n')
stream_thread.join()
io_out.seek(0)
io_err.seek(0)
assert len(io_out.read()) == 0
assert len(io_err.read()) == 0
assert stream.predict_service.predict.call_count == 0
def test_listen(self, stream):
io_in = io.StringIO()
io_out = io.StringIO()
io_err = io.StringIO()
lines = [
'[{"id": 1, "color": "blue", "length": 1.0}]\n',
'[{"id": 1, "color": "{\\"a\\": 1, \\"b\\": 2}", "length": 1.0}]\n',
'[{"id": 1, "color": "blue", "length": 1.0}, {"id": 2, "color": "{\\"a\\": 1, \\"b\\": 2}", "length": 1.0}]\n',
]
for line in lines:
io_in.write(line)
io_in.write('EXIT\n')
io_in.seek(0)
predict = stream.predict_service.predict
predict.side_effect = (
lambda model, samples, **params:
np.array([{'result': 1}] * len(samples))
)
stream_thread = Thread(
target=stream.listen(io_in, io_out, io_err))
stream_thread.start()
stream_thread.join()
io_out.seek(0)
io_err.seek(0)
assert len(io_err.read()) == 0
assert io_out.read() == (
('[{"result":1}]\n' * 2) + ('[{"result":1},{"result":1}]\n'))
assert predict.call_count == 3
# check if the correct arguments are passed to predict call
assert predict.call_args_list[0][0][1] == np.array([
{'id': 1, 'color': 'blue', 'length': 1.0}])
assert predict.call_args_list[1][0][1] == np.array([
{'id': 1, 'color': '{"a": 1, "b": 2}', 'length': 1.0}])
assert (predict.call_args_list[2][0][1] == np.array([
{'id': 1, 'color': 'blue', 'length': 1.0},
{'id': 2, 'color': '{"a": 1, "b": 2}', 'length': 1.0},
])).all()
# check if string representation of attribute can be converted to json
assert ujson.loads(predict.call_args_list[1][0][1][0]['color']) == {
"a": 1, "b": 2}
def test_predict_error(self, stream):
from palladium.interfaces import PredictError
io_in = io.StringIO()
io_out = io.StringIO()
io_err = io.StringIO()
line = '[{"hey": "1"}]\n'
io_in.write(line)
io_in.write('EXIT\n')
io_in.seek(0)
stream.predict_service.predict.side_effect = PredictError('error')
stream_thread = Thread(
target=stream.listen(io_in, io_out, io_err))
stream_thread.start()
stream_thread.join()
io_out.seek(0)
io_err.seek(0)
assert io_out.read() == '[]\n'
assert io_err.read() == (
"Error while processing input row: {}"
"<class 'palladium.interfaces.PredictError'>: "
"error (-1)\n".format(line))
assert stream.predict_service.predict.call_count == 1
def test_predict_params(self, config, stream):
from palladium.server import PredictService
line = '[{"length": 1.0, "width": 1.0, "turbo": "true"}]'
model = Mock()
model.predict.return_value = np.array([[{'class': 'a'}]])
model.turbo = False
model.magic = False
stream.model = model
mapping = [
('length', 'float'),
('width', 'float'),
]
params = [
('turbo', 'bool'), # will be set by request args
('magic', 'bool'), # default value will be used
]
stream.predict_service = PredictService(
mapping=mapping,
params=params,
)
expected = [{'class': 'a'}]
result = stream.process_line(line)
assert result == expected
assert model.predict.call_count == 1
assert (model.predict.call_args[0][0] == np.array([[1.0, 1.0]])).all()
assert model.predict.call_args[1]['turbo'] is True
assert model.predict.call_args[1]['magic'] is False
class TestList:
@pytest.fixture
def list(self):
from palladium.server import list
return list
def test_it(self, config, process_store, flask_client):
mp = config['model_persister'] = Mock()
mp.list_models.return_value = ['one', 'two']
mp.list_properties.return_value = {'hey': 'there'}
resp = flask_client.get('list')
assert resp.status_code == 200
resp_data = json.loads(resp.get_data(as_text=True))
assert resp_data == {
'models': ['one', 'two'],
'properties': {'hey': 'there'},
}
class TestFitFunctional:
@pytest.fixture
def fit(self):
from palladium.server import fit
return fit
@pytest.fixture
def jobs(self, process_store):
jobs = process_store['process_metadata'].setdefault('jobs', {})
yield jobs
jobs.clear()
def test_it(self, fit, config, jobs, flask_app):
dsl, model, model_persister = Mock(), Mock(), Mock()
del model.cv_results_
X, y = Mock(), Mock()
dsl.return_value = X, y
config['dataset_loader_train'] = dsl
config['model'] = model
config['model_persister'] = model_persister
with flask_app.test_request_context(method='POST'):
resp = fit()
sleep(0.05)
resp_json = json.loads(resp.get_data(as_text=True))
job = jobs[resp_json['job_id']]
assert job['status'] == 'finished'
assert job['info'] == str(model)
@pytest.mark.parametrize('args, args_expected', [
(
{'persist': '1', 'activate': '0', 'evaluate': 't'},
{'persist': True, 'activate': False, 'evaluate': True},
),
(
{'persist_if_better_than': '0.234'},
{'persist_if_better_than': 0.234},
),
])
def test_pass_args(self, fit, flask_app, args, args_expected):
with patch('palladium.server.fit_base') as fit_base:
fit_base.__name__ = 'mock'
with flask_app.test_request_context(method='POST', data=args):
fit()
sleep(0.02)
assert fit_base.call_args == call(**args_expected)
class TestUpdateModelCacheFunctional:
@pytest.fixture
def update_model_cache(self):
from palladium.server import update_model_cache
return update_model_cache
@pytest.fixture
def jobs(self, process_store):
jobs = process_store['process_metadata'].setdefault('jobs', {})
yield jobs
jobs.clear()
def test_success(self, update_model_cache, config, jobs, flask_app):
model_persister = Mock()
config['model_persister'] = model_persister
with flask_app.test_request_context(method='POST'):
resp = update_model_cache()
sleep(0.02)
resp_json = json.loads(resp.get_data(as_text=True))
job = jobs[resp_json['job_id']]
assert job['status'] == 'finished'
assert job['info'] == repr(model_persister.update_cache())
def test_unavailable(self, update_model_cache, config, jobs, flask_app):
model_persister = Mock()
del model_persister.update_cache
config['model_persister'] = model_persister
with flask_app.test_request_context(method='POST'):
resp = update_model_cache()
assert resp.status_code == 503
class TestActivateFunctional:
@pytest.fixture
def activate(self):
from palladium.server import activate
return activate
@pytest.fixture
def activate_base_mock(self, monkeypatch):
func = Mock()
monkeypatch.setattr('palladium.server.activate_base', func)
return func
def test_success(self, activate, activate_base_mock, config, flask_app):
model_persister = Mock(
list_models=lambda: {'be': 'first'},
list_properties=lambda: {'be': 'twice'},
)
config['model_persister'] = model_persister
with flask_app.test_request_context(
method='POST',
data={'model_version': 123},
):
resp = activate()
assert resp.status_code == 200
assert resp.json == {
'models': {'be': 'first'},
'properties': {'be': 'twice'},
}
def test_lookuperror(self, activate, activate_base_mock, flask_app):
activate_base_mock.side_effect = LookupError
with flask_app.test_request_context(
method='POST',
data={'model_version': 123},
):
resp = activate()
assert resp.status_code == 503
def _test_add_url_rule_func():
return b'A OK'
class TestAddUrlRule:
@pytest.fixture
def add_url_rule(self):
from palladium.server import add_url_rule
return add_url_rule
def test_it(self, add_url_rule, flask_client):
add_url_rule(
'/okay',
view_func='palladium.tests.test_server._test_add_url_rule_func',
)
resp = flask_client.get('/okay')
assert resp.data == b'A OK'
| apache-2.0 | -4,794,314,816,951,428,000 | 34.064103 | 123 | 0.519305 | false |
griimick/feature-mlsite | app/static/hindi-dependency-parser-2.0/bin/normalize_bojar_lrec_2010.py | 1 | 1281 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
vowels_to_be_replaced= {}
def replace_null(from_chr_num, to_chr_num):
for x in range(from_chr_num, to_chr_num):
vowels_to_be_replaced[chr(x)]= ""
#replace_null(0x0900, 0x0904)
#replace_null(0x093A, 0x0950)
#replace_null(0x0951, 0x0958)
#replace_null(0x0962, 0x0964)
#replace_null(0x0971, 0x0972)
vowels_to_be_replaced[b'0x0901']= b'0x0902'
vowels_to_be_replaced[""]= "न"
vowels_to_be_replaced["ऩ"]= "न"
vowels_to_be_replaced['ऱ']= "र"
vowels_to_be_replaced['ऴ']= "ळ"
vowels_to_be_replaced['क़']= "क"
vowels_to_be_replaced['ख़']= "ख"
vowels_to_be_replaced['ग़']= "ग"
vowels_to_be_replaced['ज़']= "ज"
vowels_to_be_replaced['ड़']= "ड"
vowels_to_be_replaced['ढ़']= "ढ"
vowels_to_be_replaced['फ़']= "फ"
vowels_to_be_replaced['य़']= "य"
vowels_to_be_replaced['ॠ']= "ऋ"
vowels_to_be_replaced['ॡ']= "ऌ"
def normalise(word):
# Word should be unicode encoding
nword=""
for char in word:
if char in vowels_to_be_replaced:
nword+= vowels_to_be_replaced[char]
else:
nword+= char
return nword
if __name__=="__main__":
print((normalise("भागता")))
print((normalise("तृष्णा")))
| mit | 5,023,343,353,184,191,000 | 25.777778 | 47 | 0.617427 | false |
UTSA-ICS/keystone-kerberos | keystone/common/cache/core.py | 1 | 7934 | # Copyright 2013 Metacloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keystone Caching Layer Implementation."""
import dogpile.cache
from dogpile.cache import proxy
from dogpile.cache import util
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from keystone import exception
from keystone.i18n import _, _LE
CONF = cfg.CONF
LOG = log.getLogger(__name__)
make_region = dogpile.cache.make_region
dogpile.cache.register_backend(
'keystone.common.cache.noop',
'keystone.common.cache.backends.noop',
'NoopCacheBackend')
dogpile.cache.register_backend(
'keystone.cache.mongo',
'keystone.common.cache.backends.mongo',
'MongoCacheBackend')
dogpile.cache.register_backend(
'keystone.cache.memcache_pool',
'keystone.common.cache.backends.memcache_pool',
'PooledMemcachedBackend')
class DebugProxy(proxy.ProxyBackend):
"""Extra Logging ProxyBackend."""
# NOTE(morganfainberg): Pass all key/values through repr to ensure we have
# a clean description of the information. Without use of repr, it might
# be possible to run into encode/decode error(s). For logging/debugging
# purposes encode/decode is irrelevant and we should be looking at the
# data exactly as it stands.
def get(self, key):
value = self.proxied.get(key)
LOG.debug('CACHE_GET: Key: "%(key)r" Value: "%(value)r"',
{'key': key, 'value': value})
return value
def get_multi(self, keys):
values = self.proxied.get_multi(keys)
LOG.debug('CACHE_GET_MULTI: "%(keys)r" Values: "%(values)r"',
{'keys': keys, 'values': values})
return values
def set(self, key, value):
LOG.debug('CACHE_SET: Key: "%(key)r" Value: "%(value)r"',
{'key': key, 'value': value})
return self.proxied.set(key, value)
def set_multi(self, keys):
LOG.debug('CACHE_SET_MULTI: "%r"', keys)
self.proxied.set_multi(keys)
def delete(self, key):
self.proxied.delete(key)
LOG.debug('CACHE_DELETE: "%r"', key)
def delete_multi(self, keys):
LOG.debug('CACHE_DELETE_MULTI: "%r"', keys)
self.proxied.delete_multi(keys)
def build_cache_config():
"""Build the cache region dictionary configuration.
:returns: dict
"""
prefix = CONF.cache.config_prefix
conf_dict = {}
conf_dict['%s.backend' % prefix] = CONF.cache.backend
conf_dict['%s.expiration_time' % prefix] = CONF.cache.expiration_time
for argument in CONF.cache.backend_argument:
try:
(argname, argvalue) = argument.split(':', 1)
except ValueError:
msg = _LE('Unable to build cache config-key. Expected format '
'"<argname>:<value>". Skipping unknown format: %s')
LOG.error(msg, argument)
continue
arg_key = '.'.join([prefix, 'arguments', argname])
conf_dict[arg_key] = argvalue
LOG.debug('Keystone Cache Config: %s', conf_dict)
# NOTE(yorik-sar): these arguments will be used for memcache-related
# backends. Use setdefault for url to support old-style setting through
# backend_argument=url:127.0.0.1:11211
conf_dict.setdefault('%s.arguments.url' % prefix,
CONF.cache.memcache_servers)
for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
'pool_unused_timeout', 'pool_connection_get_timeout'):
value = getattr(CONF.cache, 'memcache_' + arg)
conf_dict['%s.arguments.%s' % (prefix, arg)] = value
return conf_dict
def configure_cache_region(region):
"""Configure a cache region.
:param region: optional CacheRegion object, if not provided a new region
will be instantiated
:raises: exception.ValidationError
:returns: dogpile.cache.CacheRegion
"""
if not isinstance(region, dogpile.cache.CacheRegion):
raise exception.ValidationError(
_('region not type dogpile.cache.CacheRegion'))
if not region.is_configured:
# NOTE(morganfainberg): this is how you tell if a region is configured.
# There is a request logged with dogpile.cache upstream to make this
# easier / less ugly.
config_dict = build_cache_config()
region.configure_from_config(config_dict,
'%s.' % CONF.cache.config_prefix)
if CONF.cache.debug_cache_backend:
region.wrap(DebugProxy)
# NOTE(morganfainberg): if the backend requests the use of a
# key_mangler, we should respect that key_mangler function. If a
# key_mangler is not defined by the backend, use the sha1_mangle_key
# mangler provided by dogpile.cache. This ensures we always use a fixed
# size cache-key.
if region.key_mangler is None:
region.key_mangler = util.sha1_mangle_key
for class_path in CONF.cache.proxies:
# NOTE(morganfainberg): if we have any proxy wrappers, we should
# ensure they are added to the cache region's backend. Since
# configure_from_config doesn't handle the wrap argument, we need
# to manually add the Proxies. For information on how the
# ProxyBackends work, see the dogpile.cache documents on
# "changing-backend-behavior"
cls = importutils.import_class(class_path)
LOG.debug("Adding cache-proxy '%s' to backend.", class_path)
region.wrap(cls)
return region
def should_cache_fn(section):
"""Build a function that returns a config section's caching status.
For any given driver in keystone that has caching capabilities, a boolean
config option for that driver's section (e.g. ``token``) should exist and
default to ``True``. This function will use that value to tell the caching
decorator if caching for that driver is enabled. To properly use this
with the decorator, pass this function the configuration section and assign
the result to a variable. Pass the new variable to the caching decorator
as the named argument ``should_cache_fn``. e.g.::
from keystone.common import cache
SHOULD_CACHE = cache.should_cache_fn('token')
@cache.on_arguments(should_cache_fn=SHOULD_CACHE)
def function(arg1, arg2):
...
:param section: name of the configuration section to examine
:type section: string
:returns: function reference
"""
def should_cache(value):
if not CONF.cache.enabled:
return False
conf_group = getattr(CONF, section)
return getattr(conf_group, 'caching', True)
return should_cache
def key_generate_to_str(s):
# NOTE(morganfainberg): Since we need to stringify all arguments, attempt
# to stringify and handle the Unicode error explicitly as needed.
try:
return str(s)
except UnicodeEncodeError:
return s.encode('utf-8')
def function_key_generator(namespace, fn, to_str=key_generate_to_str):
# NOTE(morganfainberg): This wraps dogpile.cache's default
# function_key_generator to change the default to_str mechanism.
return util.function_key_generator(namespace, fn, to_str=to_str)
REGION = dogpile.cache.make_region(
function_key_generator=function_key_generator)
on_arguments = REGION.cache_on_arguments
| apache-2.0 | -1,035,624,296,600,986,400 | 35.731481 | 79 | 0.659692 | false |
balint256/ice | tlm/ui.py | 1 | 18027 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ui.py
#
# Copyright 2014 Balint Seeber <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# FIXME:
# * Update prediction (using detected bitrate from Network)
# * Colour
# * Handle when screen size isn't large enough (curses throws ERR)
import curses, datetime, math
import state
from constants import *
from primitives import *
class Layout():
def __init__(self, name, ui):
self.name = name
self.ui = ui
self.active = False
self.y_offset = 0
def draw(self, y):
pass
def deactivate(self):
self.active = False
def activate(self, y):
self.y_offset = y
self.active = True
class MinorFrameLayout(Layout):
def __init__(self, *args, **kwds):
Layout.__init__(self, *args, **kwds)
self.deframer = self.ui.engine.deframer
self.last_updated_idx = 0
self.changed = False
self.prev_frame_idx = 0
def activate(self, y):
self.ui.engine.register(EVENT_NEW_BYTE, self)
# FIXME: Draw the frame thus far
Layout.activate(self, y)
def deactivate(self):
self.ui.engine.unregister(EVENT_NEW_BYTE, self)
Layout.deactivate(self)
def __call__(self, *args, **kwds):
if not self.active:
self.changed = True
raise Exception("MinorFrameLayout callback while not active")
return
stdscr = self.ui.scr
byte = kwds['byte']
frame = kwds['frame']
if kwds['idx'] is None:
frame_idx = len(frame) - 1
else:
frame_idx = kwds['idx']
width = 16
section_length = 8
y_factor = 2
prev_frame_idx = frame_idx - 1
if prev_frame_idx == -1:
prev_frame_idx = MINOR_FRAME_LEN - 1
#if prev_frame_idx < len(frame):
if True: # FIXME: Being lazy here
y = prev_frame_idx / width
x = prev_frame_idx % width
#stdscr.move(y + y_offset, x * section_length)
#stdscr.addstr("%03d %02x " % (prev_frame_idx, frame[prev_frame_idx]))
stdscr.move(y*y_factor + self.y_offset, x * section_length + 3)
stdscr.addstr(" ")
stdscr.move(y*y_factor + self.y_offset, x * section_length + 3 + 3)
stdscr.addstr(" ")
y = frame_idx / width
x = frame_idx % width
stdscr.move(y*y_factor + self.y_offset, x * section_length)
stdscr.addstr("%03d[%02x]" % (frame_idx, byte))
def draw(self, y):
#if not self.changed:
# return
#self.deframer
pass # Purely event driven at the moment
class SubcomSubLayout():
def __init__(self, key, subcom_tracker, y_offset):
self.key = key
self.subcom_tracker = subcom_tracker
self.last_updated_idx = None
self.y_offset = y_offset
def name(self): return self.key
class SubcomLayout(Layout):
def __init__(self, *args, **kwds):
Layout.__init__(self, *args, **kwds)
self.subcom_trackers = self.ui.engine.subcom_trackers
self.subcom_sublayouts = {}
self.width = 16
self.y_factor = 2
self.max_name_len = 0
y = 0
for subcom_key in self.subcom_trackers.keys():
subcom_tracker = self.subcom_trackers[subcom_key]
sublayout = SubcomSubLayout(subcom_key, subcom_tracker, y)
self.max_name_len = max(self.max_name_len, len(sublayout.name()))
self.subcom_sublayouts[subcom_key] = sublayout
height = int(math.ceil(1.*subcom_tracker.length / self.width)) * self.y_factor - (self.y_factor - 1)
y += (height + 3)
self.x_offset = self.max_name_len + 4 # Additional space
self.changed = False
def draw(self, y):
scr = self.ui.scr
for subcom_key in self.subcom_trackers.keys():
subcom_tracker = self.subcom_trackers[subcom_key]
subcom_sublayout = self.subcom_sublayouts[subcom_key]
scr.move(y + subcom_sublayout.y_offset + 2, 1)
scr.addstr("%03d" % (subcom_tracker.discontinuity_cnt))
def activate(self, y):
for subcom_key in self.subcom_trackers.keys():
self.subcom_trackers[subcom_key].register(EVENT_NEW_BYTE, self)
scr = self.ui.scr
for subcom_key in self.subcom_sublayouts.keys():
subcom_sublayout = self.subcom_sublayouts[subcom_key]
scr.move(y + subcom_sublayout.y_offset, 1)
scr.addstr(subcom_sublayout.name())
# FIXME: Draw the frame thus far
Layout.activate(self, y)
def deactivate(self):
for subcom_key in self.subcom_trackers.keys():
self.subcom_trackers[subcom_key].unregister(EVENT_NEW_BYTE, self)
Layout.deactivate(self)
def __call__(self, event, source, *args, **kwds):
if not self.active:
self.changed = True
raise Exception("SubcomLayout callback while not active")
return
stdscr = self.ui.scr
byte = kwds['byte']
frame = kwds['frame']
frame_idx = len(frame) - 1
sublayout = self.subcom_sublayouts[source.key]
section_length = 8
prev_frame_idx = frame_idx - 1
if prev_frame_idx == -1:
prev_frame_idx = sublayout.subcom_tracker.length - 1
#if prev_frame_idx < len(frame):
if True: # FIXME: Being lazy here
y = prev_frame_idx / self.width
x = prev_frame_idx % self.width
stdscr.move(y*self.y_factor + self.y_offset + sublayout.y_offset, self.x_offset + x * section_length + 3)
stdscr.addstr(" ")
stdscr.move(y*self.y_factor + self.y_offset + sublayout.y_offset, self.x_offset + x * section_length + 3 + 3)
stdscr.addstr(" ")
y = frame_idx / self.width
x = frame_idx % self.width
stdscr.move(self.y_offset + sublayout.y_offset + y*self.y_factor, self.x_offset + x * section_length)
stdscr.addstr("%03d[%02x]" % (frame_idx, byte))
class ElementsLayout(Layout):
def __init__(self, elements, padding=10, *args, **kwds):
Layout.__init__(self, *args, **kwds)
self.elements = elements
self.max_id_len = 0
self.y_offset_map = {}
self.trigger_map = {}
self.padding = padding
self.last_draw_time = {}
self.draw_count = {}
self.draw_time_delta = datetime.timedelta(milliseconds=250)
self.max_value_len = 0
self.full_refresh = False
for element in self.elements:
self.last_draw_time[element.id()] = None
self.draw_count[element.id()] = 0
self.max_id_len = max(self.max_id_len, len(element.id()))
trigger_indices = self.ui.engine.get_element_state(element).get_element().positions().get_trigger_indices(mode=self.ui.engine.options.mode)
for trigger_index in trigger_indices:
if trigger_index not in self.trigger_map.keys(): self.trigger_map[trigger_index] = []
self.trigger_map[trigger_index] += [element]
def activate(self, y):
scr = self.ui.scr
cnt = 0
self.y_offset_map = {}
for element in self.elements:
self.y_offset_map[element.id()] = y+cnt
self.ui.engine.track(element.positions().get_trigger_indices(mode=self.ui.engine.options.mode), self)
scr.move(self.y_offset_map[element.id()], 1)
scr.addstr(element.id())
self.draw_element(element)
cnt += 1
Layout.activate(self, y)
def deactivate(self):
for element in self.elements:
self.ui.engine.untrack(element.positions().get_trigger_indices(mode=self.ui.engine.options.mode), self)
Layout.deactivate(self)
def __call__(self, *args, **kwds):
trigger = kwds['trigger']
res, map_res = trigger.check_map(self.trigger_map)
if not res:
raise Exception("%s not in %s" % (trigger, self.trigger_map.keys()))
triggered_elements = map_res
for element in triggered_elements:
self.draw_element(element)
def draw_element(self, element):
scr = self.ui.scr
element_state = self.ui.engine.get_element_state(element)
scr.move(self.y_offset_map[element.id()], 1 + self.max_id_len + self.padding)
scr.clrtoeol()
if element_state.last_value is None:
return
self.draw_count[element.id()] += 1
count_str = "[%04d]" % element_state.update_count
scr.addstr(count_str)
s = " = "
value_str = element.formatter().format(element_state.last_value)
s += value_str
if element.unit() is not None and len(element.unit()) > 0:
s += " " + element.unit()
if element_state.last_valid is not None:
if element_state.last_valid == True:
s += " (valid)" # FIXME: Green
elif element_state.last_valid == False:
s += " (invalid)" # FIXME: Red
if len(s) > self.max_value_len:
self.max_value_len = len(s)
self.full_refresh = True
scr.addstr(s)
if element_state.previous_value is not None:
scr.move(self.y_offset_map[element.id()], 1 + self.max_id_len + self.padding + self.max_value_len + 10) # MAGIC
s = " (%03d: %s)" % ((self.ui.engine.get_local_time_now() - element_state.previous_value_time).total_seconds(), element.formatter().format(element_state.previous_value))
scr.addstr(s)
time_delta = self.ui.engine.get_local_time_now() - element_state.last_update_time
time_str = "%03d" % time_delta.total_seconds()
scr.move(self.y_offset_map[element.id()], self.ui.max_x - len(time_str))
scr.addstr(time_str)
trigger_str = str(element_state.last_trigger)
scr.move(self.y_offset_map[element.id()], self.ui.max_x - len(time_str) - 3 - len(trigger_str))
scr.addstr(trigger_str)
self.last_draw_time[element.id()] = self.ui.engine.get_local_time_now()
def draw(self, y_offset):
for element in self.elements:
if not self.full_refresh and self.last_draw_time[element.id()] is not None and (self.ui.engine.get_local_time_now() - self.last_draw_time[element.id()]) < self.draw_time_delta:
return
self.draw_element(element)
self.full_refresh = False
class HistoryLayout(Layout):
def __init__(self, width, elements, *args, **kwds):
Layout.__init__(self, *args, **kwds)
self.trigger_map = {}
self.history_map = {}
self.elements = elements
self.history_lengths = {}
self.width = width
for spec in elements:
element, history_length = spec
self.history_lengths[element] = history_length
self.history_map[element] = []
trigger_indices = self.ui.engine.get_element_state(element).get_element().positions().get_trigger_indices(mode=self.ui.engine.options.mode)
self.ui.engine.track(trigger_indices, self)
for trigger_index in trigger_indices:
if trigger_index not in self.trigger_map.keys(): self.trigger_map[trigger_index] = []
self.trigger_map[trigger_index] += [element]
self.changed = False
def __call__(self, *args, **kwds):
self.changed = True
trigger = kwds['trigger']
res, map_res = trigger.check_map(self.trigger_map)
if not res:
raise Exception("%s not in %s" % (trigger, self.trigger_map.keys()))
triggered_elements = map_res
for element in triggered_elements:
element_state = self.ui.engine.get_element_state(element)
if element_state.last_value is None:
return
value_str = element_state.get_element().formatter().format(element_state.last_value)
history = self.history_map[element]
history += [value_str]
diff = len(history) - self.history_lengths[element]
if diff > 0:
self.history_map[element] = history[diff:]
def draw(self, y):
if not self.changed:
return
scr = self.ui.scr
x = 8
n = 0
for spec in self.elements:
element, history_length = spec
history = self.history_map[element]
cnt = 0
scr.move(y + cnt, x)
scr.addstr(element)
cnt += 2
for val in history:
if n == 0:
scr.move(y + cnt, 0)
scr.clrtoeol()
scr.move(y + cnt, x)
scr.addstr(val)
cnt += 1
x += self.width
n += 1
class UserInterface():
def __init__(self, engine, timeout=10):
self.engine = engine
self.timeout = timeout
self.scr = None
self.active_layout = None
self.max_y, self.max_x = 0, 0
self.prev_max_y, self.prev_max_x = 0, 0
self.log_message = ""
self.update_log_message = False
self.last_engine_state = state.STATE_NONE
self.last_active_layout_name = ""
self.element_layout_key_shortcuts = {}
self.element_layouts = []
self.layout_y_offset = 5
def start(self, element_layouts):
self.minor_frame_layout = MinorFrameLayout("raw", self)
self.element_layout_key_shortcuts['`'] = self.minor_frame_layout
self.subcom_layout = SubcomLayout("subcom", self)
self.element_layout_key_shortcuts['~'] = self.subcom_layout
print "Building history layout..."
history_length = 40
self.history_layout = HistoryLayout(name="history", ui=self, width=24, elements=[
('hps_1_temp_supercom', history_length),
('hps_2_temp_supercom', history_length),
('hps_1_tc', history_length),
#('hps_1_tcX', history_length),
('hps_2_tc', history_length),
#('hps_2_tcX', history_length),
('accelerometer', history_length),
]) # MAGIC
self.element_layout_key_shortcuts['h'] = self.history_layout
print "Building layouts..."
for element_layout in element_layouts:
name = element_layout[0]
shortcut = name[0]
if len(element_layout) >= 3:
shortcut = element_layout[2]
elements = []
for element_name in element_layout[1]:
element = self.engine.get_element(element_name, safe=False)
if element is None:
print "The element '%s' was not found for layout '%s'" % (element_name, name)
element = self.engine.get_element(element_name)
elements += [element]
layout = ElementsLayout(elements, name=name, ui=self)
self.element_layouts += [layout]
if shortcut not in self.element_layout_key_shortcuts.keys():
self.element_layout_key_shortcuts[shortcut] = layout
else:
print "ElementLayout '%s' already has shortcut key '%s'" % (self.element_layout_key_shortcuts[shortcut].name, shortcut)
self.scr = curses.initscr()
#curses.start_color() # FIXME
self.scr.timeout(self.timeout) # -1 for blocking
self.scr.keypad(1) # Otherwise app will end when pressing arrow keys
curses.noecho()
#curses.raw()
#curses.cbreak()
#curses.nl / curses.nonl
#self.scr.deleteln()
self.switch_layout(self.minor_frame_layout)
self.update()
#self.scr.refresh() # Done in 'update'
def run(self):
if not self.handle_keys():
return False
self.update()
return True
def log(self, msg):
self.log_message = msg
self.update_log_message = True
def refresh_screen_state(self):
self.max_y, self.max_x = self.scr.getmaxyx()
changed = (self.max_y != self.prev_max_y) or (self.prev_max_x != self.max_x)
self.prev_max_y, self.prev_max_x = self.max_y, self.max_x
return changed
def update(self):
if self.refresh_screen_state():
self.clear()
self.prev_max_y, self.prev_max_x
if self.last_engine_state != self.engine.get_state():
self.scr.move(self.max_y-1, 0)
self.scr.clrtoeol()
self.scr.addstr(state.STATE_TXT[self.engine.get_state()])
self.last_engine_state = self.engine.get_state()
if True:
self.scr.move(0, 0)
#self.scr.clrtoeol() # Don't since current layout name is on RHS
self.scr.addstr("Current time: %s" % (self.engine.get_local_time_now()))
if self.engine.net.last_enqueue_time:
self.scr.move(1, 0)
#self.scr.clrtoeol() # Don't since layout shortcuts are on RHS
self.scr.addstr("Data arrived: %s" % (self.engine.net.last_enqueue_time))
if True:
self.scr.move(2, 0)
self.scr.clrtoeol()
self.scr.addstr("Data lag : %+f" % (self.engine.net.get_time_diff().total_seconds()))
self.scr.move(2, 32)
self.scr.addstr("Data source: %s" % (self.engine.net.get_status_string()))
self.scr.move(3, 0)
self.scr.clrtoeol()
self.scr.addstr("Complete frame count: %d, sync reset count: %d, minor frame discontinuities: %d, minor frame index lock: %s, auto minor frame index: %s" % (
self.engine.deframer.get_complete_frame_count(),
self.engine.deframer.get_sync_reset_count(),
self.engine.frame_tracker.frame_discontinuity_cnt,
self.engine.frame_tracker.ignore_minor_frame_idx,
self.engine.frame_tracker.minor_frame_idx,
))
if self.update_log_message:
self.scr.move(self.max_y-2, 0)
self.scr.clrtoeol()
self.scr.addstr(self.log_message)
self.update_log_message = False
if self.active_layout:
if self.last_active_layout_name != self.active_layout.name:
# Screen should have been cleared when changing layout
self.scr.move(0, self.max_x - len(self.active_layout.name))
self.scr.addstr(self.active_layout.name)
self.last_active_layout_name = self.active_layout.name
self.active_layout.draw(self.layout_y_offset)
self.scr.refresh()
def draw_underlay(self):
shortcuts = "".join(self.element_layout_key_shortcuts.keys())
self.scr.move(1, self.max_x - len(shortcuts))
self.scr.addstr(shortcuts)
def clear(self):
self.scr.erase()
self.last_engine_state = None
self.last_active_layout_name = ""
def switch_layout(self, layout, erase=True):
if self.active_layout:
self.active_layout.deactivate()
if erase:
self.clear()
self.refresh_screen_state()
self.draw_underlay()
self.active_layout = layout
self.active_layout.activate(self.layout_y_offset)
def handle_keys(self):
ch = self.scr.getch()
if ch > -1:
if ch == 27: # ESC (quit)
return False
elif ch >= ord('0') and ch <= ord('9'):
idx = (ch - ord('0') - 1) % 10
if idx < len(self.element_layouts):
self.switch_layout(self.element_layouts[idx])
elif ch >= 0 and ch < 256 and chr(ch) in self.element_layout_key_shortcuts.keys():
self.switch_layout(self.element_layout_key_shortcuts[chr(ch)])
else:
self.scr.move(self.max_y-3, 0)
self.scr.clrtoeol()
self.scr.addstr(str(ch))
return True
def stop(self):
if not self.scr:
return
self.scr.erase()
self.scr.refresh()
curses.nocbreak()
self.scr.keypad(0)
curses.echo()
curses.endwin()
def main():
return 0
if __name__ == '__main__':
main()
| gpl-3.0 | -1,446,360,939,147,403,300 | 30.460733 | 179 | 0.669107 | false |
jwilliamn/handwritten | extraction/FormatModel/TestingCornersAlgorithms.py | 1 | 1349 | import numpy as np
import cv2
import time
import helloworld
def countNonZero(sum, i_j, x_y = None):
if x_y is None:
i = i_j[0]
j = i_j[1]
if i<0 or j<0:
return 0
return sum[i,j]
else:
i = i_j[0]
j = i_j[1]
x = x_y[0]
y = x_y[1]
T = countNonZero(sum, i_j=x_y)
A = countNonZero(sum, i_j=(i-1,j-1))
P = countNonZero(sum, i_j=(x, j-1))
Q = countNonZero(sum, i_j=(i-1, y))
return T-P-Q+A
def createSum(A):
sum = np.zeros(A.shape)
rows, cols = A.shape
for x in range(rows):
for y in range(cols):
T = countNonZero(sum, i_j=(x-1, y - 1))
P = countNonZero(sum, i_j=(x - 1, y))
Q = countNonZero(sum, i_j=(x, y - 1))
S = P + Q - T
if A[x,y] != 0:
S += 1
sum[x,y] = S
return sum
if __name__ == '__main__':
A = np.zeros((4,3))
A[0, 1] = 1
A[1, 2] = 1
A[3, 2] = 1
A[2, 0] = 1
print(A)
S = createSum(A)
print(S)
start_time = time.time()
A = cv2.imread('/home/vmchura/Documents/handwritten/input/pagina1_1.png', 0)
print("--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
S = createSum(A)
print("--- %s seconds ---" % (time.time() - start_time)) | gpl-3.0 | -184,997,578,644,377,950 | 24.961538 | 80 | 0.4596 | false |
Kate-Willett/HadISDH_Marine_Build | EUSTACE_SST_MAT/make_and_full_qc.py | 1 | 45398 | #!/usr/local/sci/bin/python2.7
'''
# KW changed make_and_qc_db.py to make_and_full_qc.py
# KW changed by adding --month1 and --month2
make_and_full_qc.py invoked by typing::
python2.7 make_and_full_qc.py -i configuration.txt --year1 1850 --year2 1855 --month 1 --month2 12
# KW edited to reflect that code now produces QC'd ascii files rather than setting up a database
This builds an ascii database for the chosen years. The location
of the data base, the locations of the climatology files are
all to be specified in the configuration files.
# KW edited to kick out silly values and not bother passing them through.
# KW edited to only pass through those obs that have an AT, DPT and humidity climatology /standard devation present
# KW edited to read in threshold limits from the configuration.txt file. If these are not provided in the config file then defaults
are used. This is to allow me to play with the thresholds - knowing that obs clims and stdevs are interpolated from monthly so stdevs
at least are likely too low! This also involves changes to base_qc_report, ex.mdsKATE_buddy_check and ex.get_buddy_limits
# KW edited to only read in ships (0-5) and moored buoys (6-8)
'''
import gzip
from netCDF4 import Dataset
import qc
# KW This isn't used here but is called by Extended_IMMA.py
import qc_new_track_check as tc
# KW I don't think this is used
#import qc_buddy_check as bc
import spherical_geometry as sph
from IMMA2 import IMMA # KW I think this is ok for the RECENT R2.5P_ENH (or similar) files too.
import Extended_IMMA as ex
import sys, getopt
import time
# KW Added for debugging
import pdb # pdb.set_trace() or c
# KW Added to convert a string to float
import numpy as np
# KW Added to search for files - prevent falling over
import os.path
def base_qc_report(rep,HardLimit):
'''
Take a marine report and do some base qc on it.
HardLimit: either a float value or None - this is a given maximum limit for clim test
'''
#Basic positional QC
rep.set_qc('POS', 'pos',
qc.position_check(rep.getvar('LAT'),
rep.getvar('LON')))
rep.set_qc('POS', 'date',
qc.date_check(rep.getvar('YR'), rep.getvar('MO'),
rep.getvar('DY'), rep.getvar('HR')))
# KW Test for day 1=day, 0=night
if (rep.get_qc('POS', 'pos') == 0 and
rep.get_qc('POS', 'date') == 0):
rep.set_qc('POS', 'day',
qc.day_test(rep.getvar('YR'),
rep.getvar('MO'),
rep.getvar('DY'),
rep.getvar('HR'),
rep.getvar('LAT'),
rep.getvar('LON')))
else:
rep.set_qc('POS', 'day', 1)
rep.set_qc('POS', 'blklst',
qc.blacklist(rep.getvar('ID'),
rep.getvar('DCK'),
rep.getvar('YR'),
rep.getvar('LAT'),
rep.getvar('LON')))
# KW NEW climatology check that uses the simultaneous climatological stdev (of all obs in pentad climatology) to
# provide a threshold for outlier detection. According to ERA-Interim (?) AT over ocean stdev doesn't vary that much
# but it is higher in the mid- to high lats, especially around the n. hemi coastlines. It is a little higher around the El Nino
# tropical pacific warm pool region. stdev for DPT is higher than for AT - esp in the mid-lats.
# Howmany stdevs to use? Looks like average stdev is 1-2. So 4.5*stdev = 4.5 to 9 deg.
# 1 stdev ~68.2%, 2 stdev ~95.4%, 3 stdev 99.7%, 4 stdev ~99.9%, 4.5 stdev >99.9%
# So for the 138196 workable obs from Dec 1973 4.5 stdev < 138 obs-ish
# Lets start with 4.5
# I have added in the climatological stdevs to each rep so this should be easy
# I'm only applying to AT and DPT
# This really needs a minimum and maximum threshold on it to prevent too much removal of very small anomalies and not
# enough removal of ridiculously large ones (>50deg for Dec 1973 which does seem crazy - needs checking with old use of SST clim
# Min: stdev<0.5 are forced to be 0.5 so minimum threshold is 2.25 deg
# Max: (was previously 10 deg - needs to be large enough to account for diurnal cycle vs pentad mean) stdev>3 forced
# to be 3 so max threshold is 13.25
# KW - NEED TO CHANGE THE MAX/MIN PERMITTED SD AS WE'RE CUTTING OFF ABRUPTLY, ESP IF WE CONTINUE TO USE ERA
# PROBABLY GO FOR MIN = 1 (4.5 deg) and MAX = 4 (18 deg)? Don't want to let too much rubbish in
#SST base QC
# KW Could noval = 0 be a value that is present in IMMA but actually a missing data indicator e.g. -99.9 or 99.9?
rep.set_qc('SST', 'noval', qc.value_check(rep.getvar('SST')))
rep.set_qc('SST', 'freez',
qc.sst_freeze_check(rep.getvar('SST'), 0.0))
rep.set_qc('SST', 'clim',
qc.climatology_check(rep.getvar('SST'), rep.getnorm('SST'), 8.0))
rep.set_qc('SST', 'nonorm', qc.no_normal_check(rep.getnorm('SST')))
#MAT base QC
# KW Could noval = 0 be a value that is present in IMMA but actually a missing data indicator e.g. -99.9 or 99.9?
rep.set_qc('AT', 'noval', qc.value_check(rep.getvar('AT')))
# KW commented out old clim test and trying new one that uses 4.5*stdev as the threshold with minimum allowed limit and test for
# no stdev found
# rep.set_qc('AT', 'clim',
# qc.climatology_check(rep.getvar('AT'), rep.getnorm('AT'), 10.0))
if (qc.value_check(rep.getstdev('AT')) == 0):
# KW check for HardLimit or set to default of 4.5
if HardLimit != None:
MyMulti = HardLimit
else:
MyMulti = 4.5
if (rep.getstdev('AT') > 4.):
atlimit = MyMulti*4
elif ((rep.getstdev('AT') >= 1.) & (rep.getstdev('AT') <= 4.)):
atlimit = MyMulti*rep.getstdev('AT')
else:
atlimit = MyMulti*1.
else:
atlimit = 10.
rep.set_qc('AT', 'clim',
qc.climatology_check(rep.getvar('AT'), rep.getnorm('AT'), atlimit))
#print('CLIMTEST: ',rep.getvar('AT'), rep.getnorm('AT'),rep.getstdev('AT'),qc.climatology_check(rep.getvar('AT'), rep.getnorm('AT'), HardLimit,dptlimit))
#pdb.set_trace()
rep.set_qc('AT', 'nonorm', qc.no_normal_check(rep.getnorm('AT')))
# KW Added QC for DPT
# DPT base QC
rep.set_qc('DPT', 'noval', qc.value_check(rep.getvar('DPT')))
# KW commented out old clim test and trying new one that uses 4.5*stdev as the threshold with minimum allowed limit and test for
# no stdev found
# rep.set_qc('DPT', 'clim',
# qc.climatology_check(rep.getvar('DPT'), rep.getnorm('DPT'), 10.0))
if (qc.value_check(rep.getstdev('DPT')) == 0):
# KW check for HardLimit or set to default of 4.5
if HardLimit != None:
MyMulti = HardLimit
else:
MyMulti = 4.5
if (rep.getstdev('DPT') > 4.):
dptlimit = MyMulti*4. # greater than clim+/-10deg (13.5 deg)
elif ((rep.getstdev('DPT') >= 1.) & (rep.getstdev('DPT') <= 4)):
dptlimit = MyMulti*rep.getstdev('DPT')
else:
dptlimit = MyMulti*1. # less than clim+/- 10deg (2.25 deg)
else:
dptlimit = 10.
rep.set_qc('DPT', 'clim',
qc.climatology_check(rep.getvar('DPT'), rep.getnorm('DPT'), dptlimit))
#print('CLIMTEST: ',rep.getvar('DPT'), rep.getnorm('DPT'),rep.getstdev('DPT'),qc.climatology_check(rep.getvar('DPT'), rep.getnorm('DPT'), HardLimit,dptlimit))
#pdb.set_trace()
rep.set_qc('DPT', 'nonorm', qc.no_normal_check(rep.getnorm('DPT')))
# KW New QC tests specifically for humidity
rep.set_qc('DPT', 'ssat', qc.supersat_check(rep.getvar('DPT'),rep.getvar('AT')))
return rep
def process_bad_id_file(bad_id_file):
'''
Read in each entry in the bad id file and if it is shorter than 9 characters
pad with white space at the end of the string
'''
idfile = open(bad_id_file, 'r')
ids_to_exclude = []
for line in idfile:
line = line.rstrip()
while len(line) < 9:
line = line+' '
if line != ' ':
ids_to_exclude.append(line)
idfile.close()
return ids_to_exclude
def split_generic_callsign(invoyage):
'''
Prototype function to identify when a callsign is being used by multiple ships
and to split the observations into pseudo IDs that each represents a different ship
:param invoyage: a voyage object containing marine reports
:type invoyage: Voyage
:return: list of separate Voyages that the input Voyage has been split into.
:return type: Voyage
The function works by comparing consecutive observations in the input lists
and calculating the implied speed. If it is greater than 40 knots, a new ship
is generated. Each subsequent observation is assigned to the closest ship
unless the speed exceed 40 knots. If it does, a new ship is generated.
'''
knots_conversion = 0.539957
if len(invoyage) <= 0:
return []
result = [1]
n_ships = 1
outvoyages = [ex.Voyage()]
outvoyages[0].add_report(invoyage.getrep(0))
ntimes = len(invoyage)
if ntimes > 1:
for i in range(1, ntimes):
#calculate speeds from last position for each ship
speeds = []
distances = []
for j in range(0, n_ships):
last_rep = outvoyages[j].last_rep()
speed, distance, course, timediff = invoyage.getrep(i)-last_rep
#calc predicted position and distance of new ob from predicted position
pred_lat, pred_lon = outvoyages[j].predict_next_point(timediff)
dist = sph.sphere_distance(invoyage.getvar(i, 'LAT'),
invoyage.getvar(i, 'LON'),
pred_lat,
pred_lon)
distances.append(dist)
if timediff != 0:
speeds.append(speed)
else:
speeds.append(10000.)
#if all speeds exceed 40 knots then create new ship
if min(speeds) > 40.0 / knots_conversion:
n_ships = n_ships + 1
voy = ex.Voyage()
voy.add_report(invoyage.getrep(i))
outvoyages.append(voy)
result.append(n_ships)
#else ob is assigned to ship whose predicted location is closest to the ob
else:
winner = distances.index(min(distances))
outvoyages[winner].add_report(invoyage.getrep(i))
result.append(winner+1)
return outvoyages
def get_clim(rep, clim):
'''
Get the climatological value for this particular observation
KW Also now used to pull out the climatological stdev for the ob
:param rep: a MarineReport
:param clim: a masked array containing the climatological averages
:type rep: MarineReport
:type clim: numpy array
'''
try:
rep_clim = qc.get_sst(rep.getvar('LAT'),
rep.getvar('LON'),
rep.getvar('MO'),
rep.getvar('DY'),
clim)
rep_clim = float(rep_clim)
except:
rep_clim = None
return rep_clim
def read_climatology(infile, var):
'''
Read in the climatology for variable var from infile
KW Also used to read in the climatological stdevs
KW WHAT HAPPENS IF THERE IS MISSING DATA? THIS NEEDS TO BE IDENTIFIED !!!
:param infile: filename of a netcdf file
:param var: the variable name to be extracted from the netcdf file
:type infile: string
:type var: string
'''
climatology = Dataset(infile)
return climatology.variables[var][:]
def main(argv):
'''
This program builds the marine data base which will be used to store the subset of ICOADS used in QC and
other data processing. The current version reads in IMMA1 data from ICOADS.2.5.1 and the UID is used as the
primary key for the data base so that it can be easily matched to individual obs if need be.
#KW added para
The database is now just a set of ascii files for each year/month. Later it may be the SQL database.
The first step of the process is to read in the SST and MAT climatologies from file. These are 1degree latitude
by 1 degree longitude by 73 pentad fields in NetCDF format. The data are read into numpy arrays.
Next a connection is made to the data base, which may or may not already exist. If it does not exist, a database
will be created.
The program then loops over all years and months and DROPs existing tables for each year if they already exist and
then recreates them. It then loops over all months in the year, opens the appropriate IMMA file and reads in
the data one observation at a time.
'''
print '########################'
print 'Running make_and_full_qc'
print '########################'
inputfile = 'configuration.txt'
month1 = 1
month2 = 1
year1 = 1880
year2 = 1880
# KW Querying second instance of inputfile - I have commented this out for now
# inputfile = 'configuration_local.txt'
try:
opts, args = getopt.getopt(argv, "hi:",
["ifile=",
"year1=",
"year2=",
"month1=",
"month2="])
except getopt.GetoptError:
# KW changed Make_DB.py to make_and_full_qc.py
print 'Usage make_and_full_qc.py -i <configuration_file> '+\
'--year1 <start year> --year2 <end year> '+\
'--month1 <start month> --month2 <end month>'
sys.exit(2)
inputfile, year1, year2, month1, month2 = qc.get_arguments(opts)
print 'Input file is ', inputfile
print 'Running from ', year1, ' to ', year2
print ''
config = qc.get_config(inputfile)
# KW Added a 'switch' to tell the code whether to run in HadISDH only (HadISDHSwitch == True) mode or
# full mode (HadISDHSwitch == False)
HadISDHSwitch = config['HadISDHSwitch']
sst_climatology_file = config['SST_climatology']
nmat_climatology_file = config['MAT_climatology']
# KW Added climatology files for the humidity variables
at_climatology_file = config['AT_climatology']
dpt_climatology_file = config['DPT_climatology']
shu_climatology_file = config['SHU_climatology']
vap_climatology_file = config['VAP_climatology']
crh_climatology_file = config['CRH_climatology']
cwb_climatology_file = config['CWB_climatology']
dpd_climatology_file = config['DPD_climatology']
# KW Added climatology file for the SLP which is needed if no SLP ob exists, or if it has failed qc - or if we choose to derive humidity using climatological P (which we have)
slp_climatology_file = config['SLP_climatology']
icoads_dir = config['ICOADS_dir']
#KW Added the 'recent' ICOADS dir for files 2015+
recent_icoads_dir = config['RECENT_ICOADS_dir']
bad_id_file = config['IDs_to_exclude']
# KW added an item for the database dir to write out the QC'd ascii data to - hijacking SQL data_base_dir for now
data_base_dir = config['data_base_dir']
# KW added an item as a suffix for the output file name to note which iteration we're on
output_suffix = config['output_suffix']
# KW Noting this is set to read the OLD SST stdevs - nothing reads in the newer OSTIA one yet.
sst_stdev_climatology_file = config['Old_SST_stdev_climatology']
sst_stdev_1_file = config['SST_buddy_one_box_to_buddy_avg']
sst_stdev_2_file = config['SST_buddy_one_ob_to_box_avg']
sst_stdev_3_file = config['SST_buddy_avg_sampling']
# KW added standard deviation files for AT and DPT - for MDSKate_buddy_check
at_stdev_climatology_file = config['AT_stdev_climatology']
dpt_stdev_climatology_file = config['DPT_stdev_climatology']
# KW Added a look for hardwired limits passed through the config file or set to None
if ('HardLimits' in config):
HardLimit = np.float(config['HardLimits'])
else:
HardLimit = None
print "This is the provided HardLimit: ",HardLimit
#pdb.set_trace()
print 'SST climatology =', sst_climatology_file
print 'NMAT climatology =', nmat_climatology_file
# KW Added climatology files for the humidity variables
print 'DPT climatology =', dpt_climatology_file
print 'SHU climatology =', shu_climatology_file
print 'VAP climatology =', vap_climatology_file
print 'CRH climatology =', crh_climatology_file
print 'CWB climatology =', cwb_climatology_file
print 'DPD climatology =', dpd_climatology_file
## KW Added climatology files for SLP for calculation of humidity variables if no good quality SLP ob exists
print 'SLP climatology =', slp_climatology_file
print 'ICOADS directory =', icoads_dir
# KW added 'recent' icoads dir
print 'RECENT ICOADS directory =', recent_icoads_dir
print 'List of bad IDs =', bad_id_file
# KW added an item for the database dir to write out the QC'd ascii data to - hijacking SQL data_base_dir for now
print 'QCd Database directory =', data_base_dir
print 'QCd File Suffix =', output_suffix
print ''
ids_to_exclude = process_bad_id_file(bad_id_file)
#read in climatology files
climsst = read_climatology(sst_climatology_file, 'sst')
climnmat = read_climatology(nmat_climatology_file, 'nmat')
# KW Added climatology read in files for the humidity variables
climat = read_climatology(at_climatology_file, 't2m_clims')
climdpt = read_climatology(dpt_climatology_file, 'td2m_clims')
climshu = read_climatology(shu_climatology_file, 'q2m_clims')
climvap = read_climatology(vap_climatology_file, 'e2m_clims')
climcrh = read_climatology(crh_climatology_file, 'rh2m_clims')
climcwb = read_climatology(cwb_climatology_file, 'tw2m_clims')
climdpd = read_climatology(dpd_climatology_file, 'dpd2m_clims')
## KW Added climatology read in files for SLP for calculating humidity variabls if no SLP value exists
climslp = read_climatology(slp_climatology_file, 'p2m_clims')
# KW Note that if this points to OLD_SST_stdev_climatology then it is a 73,180,360 array whereas the SST_stdev_climatology file is just 180,360
sst_pentad_stdev = read_climatology(sst_stdev_climatology_file, 'sst')
sst_stdev_1 = read_climatology(sst_stdev_1_file, 'sst')
sst_stdev_2 = read_climatology(sst_stdev_2_file, 'sst')
sst_stdev_3 = read_climatology(sst_stdev_3_file, 'sst')
# KW added standard deviation files for AT and DPT - for MDSKate_buddy_check
at_pentad_stdev = read_climatology(at_stdev_climatology_file, 't2m_stdevs')
dpt_pentad_stdev = read_climatology(dpt_stdev_climatology_file, 'td2m_stdevs')
print 'Read climatology files'
tim00 = time.time()
for year, month in qc.year_month_gen(year1, month1, year2, month2):
tim0 = time.time()
print year, month
last_year, last_month = qc.last_month_was(year, month)
next_year, next_month = qc.next_month_is(year, month)
if last_year < 1850:
last_year = 1850 # KW don't understand why last year forced to be 1850 yet
last_month = 1
print last_year, last_month, next_year, next_month
reps = ex.Deck()
count = 0
# KW This takes a long time to read in each year/month and process
# For every candidate year/month the year/month before and after are also read in
# Can we store the candidate year/month and following year/month for the next loop?
# Hopefully there will be enough memory on spice
# HOWEVER - IF WE RUN MANY YEARS IN PARALELL THEN OK TO READ IN EACH TIME
for readyear, readmonth in qc.year_month_gen(last_year,
last_month,
next_year,
next_month):
print readyear, readmonth
syr = str(readyear)
smn = "%02d" % (readmonth)
# KW THIS BIT IS FOR 2.5.0/1
# filename = icoads_dir+'/R2.5.1.'+syr+'.'+smn+'.gz'
# KW FOUND A BUG - changed 'year' to 'readyear' below because it was trying to
# read R2.5.2.2007.12.gz because 'year'=2008, 'month'=1
# KW Now added a catch for 'recent' years - at present this is anything from 2015 onwards - data only available in IMMA (not IMMA2) format - no UID!
# if ((readyear > 2007) & (readyear < 2015)):
# filename = icoads_dir+'/R2.5.2.'+syr+'.'+smn+'.gz'
# if (readyear >= 2015):
# filename = recent_icoads_dir+'/IMMA.'+syr+'.'+smn+'.gz'
# KW THIS BIT IS FOR 3.0.0/1
filename = icoads_dir+'/IMMA1_R3.0.0_'+syr+'-'+smn+'.gz'
if (readyear >= 2015):
filename = recent_icoads_dir+'/IMMA1_R3.0.1_'+syr+'-'+smn+'.gz'
# KW Added a catch to stop the program trying to read in files that aren't yet there - if it can't find a file but its not THE candidate month then
# its ok to proceed - just won't be as good a buddy checker - a 'provisional' month!!!
# Requires import os.path so I have added this at the top
# Test if file is not there and its not a candidate file - then continue without reading in that file - or it will crash when it tries to read in the file that isn't there
# in which case - something is up!
if ((os.path.exists(filename) == False) & (readyear == next_year) & (readmonth == next_month)):
continue
icoads_file = gzip.open(filename,"r")
# KW Noted that this creates an object of whole month of IMMA data separated into all available parameters from all available attachments
# The rec.read bit later could be speeded up by ignoring the attachments we are not interested in in the first place?
# The rec object has a .data dictionary of all variables (see IMMA2.py for variable IDs/keys
rec = IMMA()
EOF = False
while not(EOF):
#need to wrap the read in a exception catching thingy
#becasuse there are some IMMA records which contain control
#characters
try:
result = rec.read(icoads_file)
if result == None:
EOF = True
# KW are we sure this isn't doing anything silly later when rec is overwritten with a new rec - could
# this overwrite ids_to_exclude[0]?
rec.data['ID'] = ids_to_exclude[0]
except:
rec.data['ID'] = ids_to_exclude[0]
if not(rec.data['ID'] in ids_to_exclude):
#strip everything out of the IMMA record except what we # KW (Kate Robert and John)# need
# KW this should work for both IMMA and IMMA1 e.g. C4 (IMMA) and C7 (IMMA1) use same 'key's so it 'should' find
# them because both are encoded in IMMA2.py
keys = []
for key in rec.data:
keys.append(key)
for key in keys:
# KW Added quite a few things in here - assume these don't have to be all from attachment 0 because UID isn't
# Assume they don't have to be in a particular order either
# I've put them in the order they appear in the attachments
# See: RequiredIMMAColumnsforHadISDH.xlsx
# Only a few of these will be written out but they are useful in the QC and bias adjustment process
# May remove some of these later if they are not useful - to save time/memory
# if not(key in ['YR','MO','DY','HR','LAT','LON',
# 'SST','AT','DCK','ID','PT','SI',
# 'SIM','DS','VS','SLP','UID','SID']):
if not(key in ['YR','MO','DY','HR','LAT','LON',
'DS','VS','II','ID','C1',
'DI','D','WI','W','VI','VV','SLP',
'IT','AT','WBTI','WBT','DPTI','DPT','SI','SST',
'DCK','SID','PT','DUPS',
'COR','TOB','TOT','EOT','TOH','EOH',
'SIM','LOV','HOP','HOT','HOB','HOA','SMF',
'UID']):
if key in rec.data: del rec.data[key]
# KW So I've noticed that if one of the listed keys above isn't in the ob then a data['key'] isn't
# set up (makes sense!) so when I come to print them later it all goes to pot
# So, I loop through the non-core0 keys here to add blank keys where they are missing
# KW Added 'UID' to this list because it is not present in the RECENT_ICOADS (2015+)
for inkey in ['DUPS','COR','TOB','TOT','EOT',
'TOH','EOH','SIM','LOV','HOP','HOT','HOB','HOA','SMF','UID']:
if not(inkey in keys):
#print("Missing key: ",inkey)
rec.data[inkey] = None
rep = ex.MarineReport(rec)
del rec
#************HadISDH ONLY*******************************
# KW Added a catch here to check the platform type and whether there is both a T (AT) and DPT present.
# Only keep the ob if it is from a ship (0,1,2,3,4,5) or moored platform/buoy (6,8,9,10,15) and has
# AT and DPT present.
# This may not be desirable for a full run but should save time/memory for HadISDH
# If HadISDHSwitch == True then the ob needs to pass the test else all obs are processed
# No QC performed yet so cannot call get_qc - qc.value_check returns 0 if present and 1 if noval
# Previously I had also pulled through PT=14 but this can be a coastal or island station - so not what we want.
# KW Oct 2016 - I've now decided that future runs shoudl NOT include any platforms. We don't have height
# info and they can vary from <10 to >200m so its just too screwy
# if (not (HadISDHSwitch)) | ((rep.data['PT'] in [0,1,2,3,4,5,6,8,9,10,15]) &
if (not (HadISDHSwitch)) | ((rep.data['PT'] in [0,1,2,3,4,5,6,8]) &
(qc.value_check(rep.getvar('AT')) == 0) &
(qc.value_check(rep.getvar('DPT')) == 0)):
# KW TESTED: WORKS IF VALUES ARE BLANK AT LEAST
# KW CHECK THAT THIS KICKS OUT OBS WITH REPORTED MISSING VALUES (e.g. -99.9 or 99.9) FOR AT or DPT
#*******************************************************
# KW Call my rep.setvar routine that I built into the MarineReport in Extended_IMMA.py
# Use this to add blank var containers for the humidity variables that are calculated
# later
rep.setvar(['SHU','VAP','CRH','CWB','DPD'])
# KW Get climatologies for slp to calculate humidity values if no good quality qc ob exists
rep_slp_clim = get_clim(rep, climslp)
#print('SLP: ',rep_slp_clim)
#if (count == 10):
# pdb.set_trace()
rep.add_climate_variable('SLP', rep_slp_clim)
# KW Calculate humidity variables here - so we can then kick out anything really silly e.g. RH>150
# Very silly values can cause longer line lengths at output which is an extra problem for post processing
# For the longer term these could be set to missing but we just want to focus on 'good' humidity obs for now
# Use my new routine as part of the Extended_IMMA MarineReport class rep.calcvar()
# This routine returns values as None if there is no climslp or if RH is < 0 or > 150.
rep.calcvar(['SHU','VAP','CRH','CWB','DPD'])
# Now we have the checker for very silly values - which will just break the loop
# No RH - means that there is either an AT or DPT missing
# RH must be between 0 and 150
# AT must be between -80 and 65
# DPT must be between -80 and 65
# SHU must be greater than 0.0
# Inadvertantly, this kicks out any ob for which no climatology is available - the ones that would late fail pos or date checks
# Later on - we may change this to just set the humidity values to missing rather than delete the ob. SST might be ok after all.
if (rep.getvar('CRH') == None):
# print('Found a SILLINESS ',rep.getvar('AT'),rep.getvar('DPT'))
# pdb.set_trace()
# delete the rep to keep things tidy
del rep
# create a new rec because we're skipping the end of the WHILE loop
rec = IMMA()
continue
if ((rep.getvar('CRH') <= 0.0) | (rep.getvar('CRH') > 150.0)):
# print('Found a SILLINESS ',rep.getvar('AT'),rep.getvar('DPT'))
# pdb.set_trace()
# delete the rep to keep things tidy
del rep
# create a new rec because we're skipping the end of the WHILE loop
rec = IMMA()
continue
if ((rep.getvar('AT') < -80.) | (rep.getvar('AT') > 65.)):
# print('Found a SILLINESS ',rep.getvar('AT'),rep.getvar('DPT'))
# pdb.set_trace()
# delete the rep to keep things tidy
del rep
# create a new rec because we're skipping the end of the WHILE loop
rec = IMMA()
continue
if ((rep.getvar('DPT') < -80.) | (rep.getvar('DPT') > 65.)):
# print('Found a SILLINESS ',rep.getvar('AT'),rep.getvar('DPT'))
# pdb.set_trace()
# delete the rep to keep things tidy
del rep
# create a new rec because we're skipping the end of the WHILE loop
rec = IMMA()
continue
if (rep.getvar('SHU') <= 0.0):
# print('Found a SILLINESS ',rep.getvar('AT'),rep.getvar('DPT'))
# pdb.set_trace()
# delete the rep to keep things tidy
del rep
# create a new rec because we're skipping the end of the WHILE loop
rec = IMMA()
continue
# Get climatologies for all variables (for outlier test and anomaly creation [done in buddy check and for final print out] - if AT or DPT are missing (None) then do not carry on processing that variable
# If we're using OBSclims then there are missing data which will be returned as None (NOT A STRING!!!)
# KW Added bit to find and store climatological stdev for AT and DPT - for outlier test
rep_sst_clim = get_clim(rep, climsst)
rep.add_climate_variable('SST', rep_sst_clim)
# KW Set to read in ERA (or OBS+ERA) clim file for AT (not NMAT)
# rep_mat_clim = get_clim(rep, climnmat)
rep_mat_clim = get_clim(rep, climat)
rep_mat_stdev = get_clim(rep, at_pentad_stdev)
#print(rep_mat_clim,rep_mat_stdev)
#pdb.set_trace()
## KW added to test clim value pulled out
# print(rep.getvar('UID'),rep.getvar('AT'),rep_mat_clim,rep.getnorm('AT'))
# print(rep.getvar('UID'),rep.getvar('AT'),rep_mat_stdev,rep.getstdev('AT'))
# if (count == 10):
# pdb.set_trace()
## KW This seems to be pulling out the correct climatological value
if ((rep_mat_clim == None) | (rep_mat_stdev == None)):
del rep
# create a new rec because we're skipping the end of the WHILE loop
rec = IMMA()
continue
else:
rep.add_climate_variable('AT', rep_mat_clim)
rep.add_stdev_variable('AT', rep_mat_stdev)
rep_dpt_clim = get_clim(rep, climdpt)
rep_dpt_stdev = get_clim(rep, dpt_pentad_stdev)
if ((rep_dpt_clim == None) | (rep_dpt_stdev == None)):
del rep
rec = IMMA()
continue
else:
rep.add_climate_variable('DPT', rep_dpt_clim)
rep.add_stdev_variable('DPT', rep_dpt_stdev)
rep_shu_clim = get_clim(rep, climshu)
if (rep_shu_clim == None) : # if there is no SHU then either an AT or DPT would be missing I think so loop shoudld already be stopped
del rep
rec = IMMA()
continue
else:
rep.add_climate_variable('SHU', rep_shu_clim)
rep_vap_clim = get_clim(rep, climvap)
if (rep_vap_clim == None) : # if there is no SHU then either an AT or DPT would be missing I think so loop shoudld already be stopped
del rep
rec = IMMA()
continue
else:
rep.add_climate_variable('VAP', rep_vap_clim)
rep_crh_clim = get_clim(rep, climcrh)
if (rep_crh_clim == None) : # if there is no SHU then either an AT or DPT would be missing I think so loop shoudld already be stopped
del rep
rec = IMMA()
continue
else:
rep.add_climate_variable('CRH', rep_crh_clim)
rep_cwb_clim = get_clim(rep, climcwb)
if (rep_cwb_clim == None) : # if there is no SHU then either an AT or DPT would be missing I think so loop shoudld already be stopped
del rep
rec = IMMA()
continue
else:
rep.add_climate_variable('CWB', rep_cwb_clim)
rep_dpd_clim = get_clim(rep, climdpd)
if (rep_dpd_clim == None) : # if there is no SHU then either an AT or DPT would be missing I think so loop shoudld already be stopped
del rep
rec = IMMA()
continue
else:
rep.add_climate_variable('DPD', rep_dpd_clim)
#Deck 701 has a whole bunch of otherwise good obs with missing Hours.
#Set to 0000UTC and recalculate the ob time
if (rep.getvar('DCK') == 701 and
rep.getvar('YR') < 1860 and
rep.getvar('HR') == None):
rep.data['HR'] = 0
rep.calculate_dt()
# KW Added a HardLimit variable that has to be passed to the base_qc_report
#rep = base_qc_report(rep)
rep = base_qc_report(rep,HardLimit)
# print(rep.getvar('ID'),rep.getvar('AT'),rep.getvar('DPT'),rep.getvar('SHU'),rep.getvar('CRH'),rep.getvar('VAP'))
# pdb.set_trace()
reps.append(rep)
count += 1
rec = IMMA()
icoads_file.close()
tim1 = time.time()
print count, " obs read and base QC ", tim1-tim0
#filter the obs into passes and fails of basic positional QC
# KW NOtes that this uses the month before and after to apply track check - and so actually spends time applying
# track check to the month before and month after too, which will then be ignored and redone later, with its following month
# Is there scope to save effort here by only checking the candidate month while still passing the surrounding months for info
reps.sort()
filt = ex.QC_filter()
filt.add_qc_filter('POS', 'date', 0)
filt.add_qc_filter('POS', 'pos', 0)
filt.add_qc_filter('POS', 'blklst', 0)
passes, reps = filt.split_reports(reps)
passes.sort()
tim2 = time.time()
print "obs filtered and sorted in ", tim2-tim1, len(reps)+len(passes)
# KW So in here we could put some kind of parsing loop to say that if you are looping through more than one month
# then you could save the candidate and previous month
# KW ALSO NOW ONLY CARRY ON WITH THOSE OBS THAT PASS BASE QC (date, pos, blacklist)
# KW commented out the following:
##all fails pass track check
# reps.set_qc('POS', 'trk', 0)
# reps.set_qc('POS', 'few', 0)
# reps.set_qc('SST', 'rep', 0)
# reps.set_qc('AT', 'rep', 0)
## KW Added for DPT
# reps.set_qc('DPT', 'rep', 0)
# reps.set_qc('DPT', 'repsat', 0)
# KW End of commenting out
# KW now clear and reset reps so that it gets overwritten and filled with only passes
del reps
reps = ex.Deck()
#track check the passes one ship at a time
for one_ship in passes.get_one_ship_at_a_time():
one_ship.track_check()
# KW I don't think we need to spend time doing this for SST so have commented out
# one_ship.find_repeated_values(threshold=0.7, intype='SST')
# KW FOr AT and DPT this procedure now also looks at the proportion of obs in a track (>20 obs - same as rep value check) that have .0 precision
# Where >=50% obs end in .0 the ATround or DPTround flag is set to 1
one_ship.find_repeated_values(threshold=0.7, intype='AT')
# KW Added for DPT
# KW For DPT this QC procedure now also searches for persistant streaks of 100% RH (AT == DPT) and flags repsat
one_ship.find_repeated_values(threshold=0.7, intype='DPT')
for rep in one_ship.rep_feed():
rep.reset_ext()
reps.append(rep)
del passes
reps.sort()
tim3 = time.time()
print "obs track checked in ", tim3-tim2, len(reps)
#*******************************
# KW Commented out for now to save time on debug
##SST buddy check
## KW NOtes that this uses the month before and after to apply track check - and so actually spends time applying
## track check to the month before and month after too, which will then be ignored and redone later, with its following month
## Is there scope to save effort here by only checking the candidate month while still passing the surrounding months for info
# filt = ex.QC_filter()
# filt.add_qc_filter('POS', 'date', 0)
# filt.add_qc_filter('POS', 'pos', 0)
# filt.add_qc_filter('POS', 'blklst', 0)
# filt.add_qc_filter('POS', 'trk', 0)
# filt.add_qc_filter('SST', 'noval', 0)
# filt.add_qc_filter('SST', 'freez', 0)
# filt.add_qc_filter('SST', 'clim', 0)
# filt.add_qc_filter('SST', 'nonorm', 0)
#
## KW Notes splitting marine obs into passes and fails
# passes, reps = filt.split_reports(reps)
#
## KW Thinks this only buddy checks those obs that pass the filter of QC above
# passes.bayesian_buddy_check('SST', sst_stdev_1, sst_stdev_2, sst_stdev_3)
# passes.mds_buddy_check('SST', sst_pentad_stdev)
#
#******************************************
## KW Thinks all fails obs that do not pass teh QC filter above are not buddy checked - they are set to 0
## which means pass but should not be used later because they fail one of the other basic checks
# reps.set_qc('SST', 'bbud', 0)
# reps.set_qc('SST', 'bud', 0)
#****************************************
# KW Commented out to save time
# for i in range(0, len(passes)):
# rep = passes.pop(0)
# reps.append(rep)
#
# del passes
#
# reps.sort()
#****************************************
tim4 = time.time()
print "obs SST buddy checked in ", tim4-tim3, len(reps)
#NMAT buddy check
# KW NOtes that this uses the month before and after to apply track check - and so actually spends time applying
# track check to the month before and month after too, which will then be ignored and redone later, with its following month
# Is there scope to save effort here by only checking the candidate month while still passing the surrounding months for info?
# For now I've made mdsKATE_buddy_check which only applies actual check to candidate month and year. It also uses actual pentad
# for that time of year rather than the average pentad stdev.
filt = ex.QC_filter()
## KW Commented out date/pos/blklst as these have already been filtered out
# filt.add_qc_filter('POS', 'date', 0)
# filt.add_qc_filter('POS', 'pos', 0)
# filt.add_qc_filter('POS', 'blklst', 0)
filt.add_qc_filter('POS', 'trk', 0)
# KW commented out because we want to try to use all obs for AT and SPT
# filt.add_qc_filter('POS', 'day', 0)
# KW Commented out because we've already filtered so that only present obs are retained
# filt.add_qc_filter('AT', 'noval', 0)
filt.add_qc_filter('AT', 'clim', 0)
filt.add_qc_filter('AT', 'nonorm', 0)
# KW Notes that 'reps' are those obs that have failed one of the tests in the filter above
passes, reps = filt.split_reports(reps)
# KW Notes that passes is an object containing a months worth of marine obs that pass (flag=0) for all above filters
# Both the bayesian buddy check and the mds buddy check test for distance to neighbours in space and time and flag
# with a 1 where it is too great/fails.
# KW NOT GOING TO APPLY BAYESIAN BUDDY CHECK BECAUSE WE CAN'T USE IT FOR DPT AND
# ITS EXPERIMENTAL???
# passes.bayesian_buddy_check('AT', sst_stdev_1, sst_stdev_2, sst_stdev_3)
# KW Commented out original mds_buddy_check to use mdsKATE_buddy_check instead (like DPT) which uses the seasonal stdev
# rather than the average and only applies buddy check to candidate month
# ALSO = we now use clim T stdevs from ERA (will eventually be obs+ERA combo?)
# passes.mds_buddy_check('AT', sst_pentad_stdev)
# KW Added a HardLimit variable that has to be passed to mdsKATE_buddy_check for the stdev multiplier
passes.mdsKATE_buddy_check('AT', at_pentad_stdev, year, month, HardLimit)
# KW - all fails (reps) are set to have a flag of 0 which means to pass the buddy checks.because there is no point in spending
# further time buddy checking them, same as for track checks
# KW NOT GOING TO APPLY BAYESIAN BUDDY CHECK BECAUSE WE CAN'T USE IT FOR DPT AND
# ITS EXPERIMENTAL???
# reps.set_qc('AT', 'bbud', 8)
reps.set_qc('AT', 'bud', 8)
for i in range(0, len(passes)):
rep = passes.pop(0)
reps.append(rep)
del passes
reps.sort()
tim5 = time.time()
print "obs MAT buddy checked in ", tim5-tim4, len(reps)
# Don't think we need to set - if its not set it will be 9!
## KW Added buddy check for DPT - NOT RUNNING BAYESIAN BECAUSE WE DON'T HAVE APPROPRIATE DATA - SET FLAG TO 8!
# reps.set_qc('DPT', 'bbud', 8)
#DPT buddy check
# KW NOtes that this uses the month before and after to apply track check - and so actually spends time applying
# track check to the month before and month after too, which will then be ignored and redone later, with its following month
# Is there scope to save effort here by only checking the candidate month while still passing the surrounding months for info
filt = ex.QC_filter()
# KW commented out date, pos, blklst because we've already got rid of those that fail these
# filt.add_qc_filter('POS', 'date', 0)
# filt.add_qc_filter('POS', 'pos', 0)
# filt.add_qc_filter('POS', 'blklst', 0)
filt.add_qc_filter('POS', 'trk', 0)
# KW Commented out day because we want to try to use all obs for DPT and AT
# filt.add_qc_filter('POS', 'day', 0) # Hmmm so only checking the nightime obs
# KW Commented out because we've already filtered so that only present obs are retained
# filt.add_qc_filter('DPT', 'noval', 0)
filt.add_qc_filter('DPT', 'clim', 0)
# KW commented out nonorm because there will always be a norm (if using ERA or combo ERA+obs)
# filt.add_qc_filter('DPT', 'nonorm', 0) # KW could change this to ERANorm when we have actual climatologies from data - more useful because there always will be a norm from ERA
# KW Notes that 'reps' are those obs that have failed one of the tests in the filter above
passes, reps = filt.split_reports(reps)
# KW Notes that passes is an object containing a months worth of marine obs that pass (flag=0) for all above filters
# Both the bayesian buddy check and the mds buddy check test for distance to neighbours in space and time and flag
# with a 1 where it is too great/fails.
# passes.bayesian_buddy_check('DPT', sst_stdev_1, sst_stdev_2, sst_stdev_3)
# passes.mds_buddy_check('DPT', dpt_pentad_stdev)
# KW Added a HardLimit variable that has to be passed to mdsKATE_buddy_check for the stdev multiplier
# KW Using Kate's version of MDS buddy check now which has a stdev for each pentad and only checks candidate month
passes.mdsKATE_buddy_check('DPT', dpt_pentad_stdev, year, month, HardLimit)
# KW - all fails (reps) are set to have a flag of 0 which means to pass the buddy checks.because there is no point in spending
# further time buddy checking them, same as for track checks
# reps.set_qc('DPT', 'bbud', 8)
reps.set_qc('DPT', 'bud', 8) # KW set as 8 for now
for i in range(0, len(passes)):
rep = passes.pop(0)
reps.append(rep)
del passes
reps.sort()
tim6 = time.time()
print "obs DPT buddy checked in ", tim6-tim5, len(reps)
syr = str(year)
smn = "%02d" % (month)
# KW changed outfile from icoards_dir to data_base_dir so that it writes to a different place to where the original
# data are stored - don't want to mess with John's working version.
outfile = open(data_base_dir+'/new_suite_'+syr+smn+'_'+output_suffix+'.txt', 'w')
for rep in reps.reps:
if rep.data['YR'] == year and rep.data['MO'] == month:
outfile.write(rep.print_report())
outfile.close()
del reps
tim11 = time.time()
print year, " so far in ", tim11-tim00
if __name__ == '__main__':
main(sys.argv[1:])
| cc0-1.0 | -7,735,936,863,802,000,000 | 46.289583 | 202 | 0.617296 | false |
maximeolivier/pyCAF | pycaf/importer/importNetwork/importSwitch/cisco_switch.py | 1 | 3141 | #| This file is part of pyCAF. |
#| |
#| pyCAF is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| pyCAF is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 6 14:08:02 2014
@author: thierry
"""
import os
from pycaf.importer.importNetwork import functions as nf
from pycaf.architecture.devices.switch import Switch
import pycaf.tools as tools
def Import_cisco_switch_file(filename, config):
""" Create a Server object from an extraction script result archive
"""
import time
logger = tools.create_logger(__name__, config)
switch_to_import = Switch()
startTime = time.time()
if not os.path.isfile(filename):
logger.error("Cisco switch import error, file not foud : " + str(filename))
return False
else:
switch_to_import.name = filename.split('/')[-1]
switch_to_import.manufacturer = "Cisco"
# Open the file and store lines in a list
file_switch = open(filename, 'rb')
file_content_lines = file_switch.readlines()
file_switch.seek(0, 0)
file_content_exclamation = file_switch.read().split('!\n')
file_switch.close()
nf.import_cisco_hostname(switch_to_import, file_content_lines, logger)
nf.import_cisco_osversion(switch_to_import, file_content_lines, logger)
nf.import_cisco_vlan(switch_to_import, file_content_exclamation, logger)
nf.import_cisco_interfaces_and_switchport(switch_to_import, file_content_exclamation, logger)
nf.import_cisco_route(switch_to_import, file_content_lines, logger)
nf.import_cisco_catalyst_acl_table(switch_to_import, file_content_lines, logger)
print switch_to_import
print switch_to_import.acl_table
print switch_to_import.vlan
print switch_to_import.interfaces
print switch_to_import.switchport
print switch_to_import.routes
# import_osname(server_to_import, xtract_dir, logger)
endTime = time.time()
logger.info("Cisco switch successfully imported. Time : {0:.2} secs\n".format(endTime - startTime))
return switch_to_import
| gpl-3.0 | 3,787,811,056,711,934,000 | 41.445946 | 107 | 0.584527 | false |
mdomke/signaling | docs/source/conf.py | 1 | 9322 | # -*- coding: utf-8 -*-
#
# signaling documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 24 11:07:31 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import signaling
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'signaling'
copyright = u'2016, Martin Domke'
author = u'Martin Domke'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = signaling.__version__
# The full version, including alpha/beta/rc tags.
# release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'signalingdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'signaling.tex', u'Signaling Documentation',
u'Martin Domke', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'signaling', u'Signaling Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'signaling', u'Signaling Documentation',
author, 'signaling', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | -5,357,621,713,983,783,000 | 31.144828 | 80 | 0.706393 | false |
evilncrazy/vake | vake.py | 1 | 2076 | import sys, os
import subprocess
import re
import select
"""
Runs an instance of make, echoing the stdout and storing the stderr
line by line.
"""
def run_make(args):
p = subprocess.Popen(["make"] + args,
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stderr = []
while p.poll() == None:
reads = [p.stdout.fileno(), p.stderr.fileno()]
ret = select.select(reads, [], [])
for fd in ret[0]:
if fd == p.stdout.fileno():
read = p.stdout.readline()
sys.stdout.write(read)
if fd == p.stderr.fileno():
read = p.stderr.readline()
sys.stderr.write(read)
stderr.append(read)
return stderr
"""
Parse the output of a Make instance.
"""
def parse_output(stderr):
parsed_output = []
for line in stderr:
# Regex to extract file, line, column number and error message
m = re.search(r"(.*?):([0-9]+):([0-9]+):\s(error|warning):\s(.*)", line)
if m:
parsed_output.append(m.groups())
return parsed_output
"""
Get the nth line of a file.
"""
def get_nth_line(file_name, n):
with open(file_name) as f:
for i, line in enumerate(f):
if i == n - 1:
return line
if __name__ == "__main__":
VAKE_HEADER = '\033[41m'
ENDC = '\033[0m'
parsed_output = parse_output(run_make(sys.argv[1:]))
if len(parsed_output) > 0:
# Give the user a choice of running vake or not
choice = raw_input(VAKE_HEADER + 'vake: ' + str(len(parsed_output)) + ' errors or warnings. Run vake? [Y/n]' + ENDC + ' ')
if (choice == "" or choice.lower() == 'y'):
# Print the instructions
print "<Enter> to edit. 'q' to skip."
for output in parsed_output:
# Print out the error message
file_name, line_no, col_no, errwarn, msg = output
print "{0}:{1}:{2} {3}".format(file_name, line_no, col_no, errwarn) + ':', msg
print ' ', get_nth_line(file_name, int(line_no)),
print ' ', ' ' * (int(col_no) - 1), '^'
cmd = raw_input(":")
subprocess.call(['vim', file_name,
'+call cursor({0}, {1})'.format(line_no, col_no), '+{0}'.format(cmd)]) | mit | -6,059,614,288,028,511,000 | 27.067568 | 124 | 0.587669 | false |
cloudera/recordservice | tests/query_test/test_partitioning.py | 1 | 4640 | # Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pytest
import shlex
import time
from tests.common.test_result_verifier import *
from subprocess import call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.test_vector import *
from tests.common.test_dimensions import ALL_NODES_ONLY
from tests.common.impala_test_suite import *
from tests.common.skip import SkipIfS3, SkipIfIsilon
# Tests to validate HDFS partitioning.
class TestPartitioning(ImpalaTestSuite):
TEST_DBS = ['hdfs_partitioning', 'bool_partitions']
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestPartitioning, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
# There is no reason to run these tests using all dimensions.
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text' and\
v.get_value('table_format').compression_codec == 'none')
@classmethod
def setup_class(cls):
super(TestPartitioning, cls).setup_class()
map(cls.cleanup_db, cls.TEST_DBS)
cls.hdfs_client.delete_file_dir("test-warehouse/all_insert_partition_col_types/",\
recursive=True)
@classmethod
def teardown_class(cls):
map(cls.cleanup_db, cls.TEST_DBS)
super(TestPartitioning, cls).teardown_class()
@SkipIfS3.insert
@SkipIfLocal.root_path
@pytest.mark.execute_serially
def test_partition_col_types(self, vector):
self.execute_query("create database hdfs_partitioning");
self.run_test_case('QueryTest/partition-col-types', vector,
use_db='hdfs_partitioning')
# Missing Coverage: Impala deals with boolean partitions created by Hive on a non-hdfs
# filesystem.
@SkipIfS3.hive
@SkipIfIsilon.hive
@pytest.mark.execute_serially
@SkipIfS3.insert
def test_boolean_partitions(self, vector):
# This test takes about a minute to complete due to the Hive commands that are
# executed. To cut down on runtime, limit the test to exhaustive exploration
# strategy.
if self.exploration_strategy() != 'exhaustive': pytest.skip()
db_name = 'bool_partitions'
tbl_name = 'tbl'
self.execute_query("create database " + db_name)
self.execute_query("use " + db_name)
self.execute_query("create table %s (i int) partitioned by (b boolean)" % tbl_name)
# Insert some data using Hive. Due to HIVE-6590, Hive may create multiple
# partitions, mapping to the same boolean literal value.
# For example, Hive may create partitions: /b=FALSE and /b=false, etc
call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s PARTITION(b=false) SELECT 1 from "\
"functional.alltypes limit 1" % (db_name, tbl_name)])
call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s PARTITION(b=FALSE) SELECT 2 from "\
"functional.alltypes limit 1" % (db_name, tbl_name)])
call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s PARTITION(b=true) SELECT 10 from "\
"functional.alltypes limit 1" % (db_name, tbl_name)])
# Update the Impala metadata
self.execute_query("refresh " + tbl_name)
# List the partitions. Show table stats returns 1 row for each partition + 1 summary
# row
result = self.execute_query("show table stats %s" % tbl_name)
assert len(result.data) == 3 + 1
# Verify Impala properly merges the results of the bad Hive metadata.
assert '13' == self.execute_scalar("select sum(i) from %s" % tbl_name);
assert '10' == self.execute_scalar("select sum(i) from %s where b=true" % tbl_name)
assert '3' == self.execute_scalar("select sum(i) from %s where b=false" % tbl_name)
# INSERT into a boolean column is disabled in Impala due to this Hive bug.
try:
self.execute_query("insert into %s partition(bool_col=true) select 1" % tbl_name)
except ImpalaBeeswaxException, e:
assert 'AnalysisException: INSERT into table with BOOLEAN partition column (%s) '\
'is not supported: %s.%s' % ('b', db_name, tbl_name) in str(e)
| apache-2.0 | 1,965,043,666,579,426,000 | 40.428571 | 89 | 0.708405 | false |
hickeroar/simplebayes | simplebayes/__init__.py | 1 | 10281 | # coding: utf-8
"""
The MIT License (MIT)
Copyright (c) 2015 Ryan Vennell
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from simplebayes.categories import BayesCategories
import pickle
import os
class SimpleBayes(object):
"""A memory-based, optional-persistence naïve bayesian text classifier."""
cache_file = '_simplebayes.pickle'
def __init__(self, tokenizer=None, cache_path='/tmp/'):
"""
:param tokenizer: A tokenizer override
:type tokenizer: function (optional)
:param cache_path: path to data storage
:type cache_path: str
"""
self.categories = BayesCategories()
self.tokenizer = tokenizer or SimpleBayes.tokenize_text
self.cache_path = cache_path
self.probabilities = {}
@classmethod
def tokenize_text(cls, text):
"""
Default tokenize method; can be overridden
:param text: the text we want to tokenize
:type text: str
:return: list of tokenized text
:rtype: list
"""
return [w for w in text.split() if len(w) > 2]
@classmethod
def count_token_occurrences(cls, words):
"""
Creates a key/value set of word/count for a given sample of text
:param words: full list of all tokens, non-unique
:type words: list
:return: key/value pairs of words and their counts in the list
:rtype: dict
"""
counts = {}
for word in words:
if word in counts:
counts[word] += 1
else:
counts[word] = 1
return counts
def flush(self):
"""
Deletes all tokens & categories
"""
self.categories = BayesCategories()
def calculate_category_probability(self):
"""
Caches the individual probabilities for each category
"""
total_tally = 0.0
probs = {}
for category, bayes_category in \
self.categories.get_categories().items():
count = bayes_category.get_tally()
total_tally += count
probs[category] = count
# Calculating the probability
for category, count in probs.items():
if total_tally > 0:
probs[category] = float(count)/float(total_tally)
else:
probs[category] = 0.0
for category, probability in probs.items():
self.probabilities[category] = {
# Probability that any given token is of this category
'prc': probability,
# Probability that any given token is not of this category
'prnc': sum(probs.values()) - probability
}
def train(self, category, text):
"""
Trains a category with a sample of text
:param category: the name of the category we want to train
:type category: str
:param text: the text we want to train the category with
:type text: str
"""
try:
bayes_category = self.categories.get_category(category)
except KeyError:
bayes_category = self.categories.add_category(category)
tokens = self.tokenizer(str(text))
occurrence_counts = self.count_token_occurrences(tokens)
for word, count in occurrence_counts.items():
bayes_category.train_token(word, count)
# Updating our per-category overall probabilities
self.calculate_category_probability()
def untrain(self, category, text):
"""
Untrains a category with a sample of text
:param category: the name of the category we want to train
:type category: str
:param text: the text we want to untrain the category with
:type text: str
"""
try:
bayes_category = self.categories.get_category(category)
except KeyError:
return
tokens = self.tokenizer(str(text))
occurance_counts = self.count_token_occurrences(tokens)
for word, count in occurance_counts.items():
bayes_category.untrain_token(word, count)
# Updating our per-category overall probabilities
self.calculate_category_probability()
def classify(self, text):
"""
Chooses the highest scoring category for a sample of text
:param text: sample text to classify
:type text: str
:return: the "winning" category
:rtype: str
"""
score = self.score(text)
if not score:
return None
return sorted(score.items(), key=lambda v: v[1])[-1][0]
def score(self, text):
"""
Scores a sample of text
:param text: sample text to score
:type text: str
:return: dict of scores per category
:rtype: dict
"""
occurs = self.count_token_occurrences(self.tokenizer(text))
scores = {}
for category in self.categories.get_categories().keys():
scores[category] = 0
categories = self.categories.get_categories().items()
for word, count in occurs.items():
token_scores = {}
# Adding up individual token scores
for category, bayes_category in categories:
token_scores[category] = \
float(bayes_category.get_token_count(word))
# We use this to get token-in-category probabilities
token_tally = sum(token_scores.values())
# If this token isn't found anywhere its probability is 0
if token_tally == 0.0:
continue
# Calculating bayes probabiltity for this token
# http://en.wikipedia.org/wiki/Naive_Bayes_spam_filtering
for category, token_score in token_scores.items():
# Bayes probability * the number of occurances of this token
scores[category] += count * \
self.calculate_bayesian_probability(
category,
token_score,
token_tally
)
# Removing empty categories from the results
final_scores = {}
for category, score in scores.items():
if score > 0:
final_scores[category] = score
return final_scores
def calculate_bayesian_probability(self, cat, token_score, token_tally):
"""
Calculates the bayesian probability for a given token/category
:param cat: The category we're scoring for this token
:type cat: str
:param token_score: The tally of this token for this category
:type token_score: float
:param token_tally: The tally total for this token from all categories
:type token_tally: float
:return: bayesian probability
:rtype: float
"""
# P that any given token IS in this category
prc = self.probabilities[cat]['prc']
# P that any given token is NOT in this category
prnc = self.probabilities[cat]['prnc']
# P that this token is NOT of this category
prtnc = (token_tally - token_score) / token_tally
# P that this token IS of this category
prtc = token_score / token_tally
# Assembling the parts of the bayes equation
numerator = (prtc * prc)
denominator = (numerator + (prtnc * prnc))
# Returning the calculated bayes probability unless the denom. is 0
return numerator / denominator if denominator != 0.0 else 0.0
def tally(self, category):
"""
Gets the tally for a requested category
:param category: The category we want a tally for
:type category: str
:return: tally for a given category
:rtype: int
"""
try:
bayes_category = self.categories.get_category(category)
except KeyError:
return 0
return bayes_category.get_tally()
def get_cache_location(self):
"""
Gets the location of the cache file
:return: the location of the cache file
:rtype: string
"""
filename = self.cache_path if \
self.cache_path[-1:] == '/' else \
self.cache_path + '/'
filename += self.cache_file
return filename
def cache_persist(self):
"""
Saves the current trained data to the cache.
This is initiated by the program using this module
"""
filename = self.get_cache_location()
pickle.dump(self.categories, open(filename, 'wb'))
def cache_train(self):
"""
Loads the data for this classifier from a cache file
:return: whether or not we were successful
:rtype: bool
"""
filename = self.get_cache_location()
if not os.path.exists(filename):
return False
categories = pickle.load(open(filename, 'rb'))
assert isinstance(categories, BayesCategories), \
"Cache data is either corrupt or invalid"
self.categories = categories
# Updating our per-category overall probabilities
self.calculate_category_probability()
return True
| mit | -7,677,386,702,332,478,000 | 32.376623 | 78 | 0.603502 | false |
prasanna08/oppia | core/storage/topic/gae_models.py | 1 | 25011 | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for topics and related constructs."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from constants import constants
from core.platform import models
import feconf
import python_utils
from google.appengine.ext import ndb
(base_models, user_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.user])
class TopicSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a topic snapshot."""
pass
class TopicSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a topic snapshot."""
pass
class TopicModel(base_models.VersionedModel):
"""Model for storing Topics.
This class should only be imported by the topic services file
and the topic model test file.
"""
SNAPSHOT_METADATA_CLASS = TopicSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = TopicSnapshotContentModel
ALLOW_REVERT = False
# The name of the topic.
name = ndb.StringProperty(required=True, indexed=True)
# The canonical name of the topic, created by making `name` lowercase.
canonical_name = ndb.StringProperty(required=True, indexed=True)
# The abbreviated name of the topic.
abbreviated_name = ndb.StringProperty(indexed=True, default='')
# The thumbnail filename of the topic.
thumbnail_filename = ndb.StringProperty(indexed=True)
# The thumbnail background color of the topic.
thumbnail_bg_color = ndb.StringProperty(indexed=True)
# The description of the topic.
description = ndb.TextProperty(indexed=False)
# This consists of the list of objects referencing canonical stories that
# are part of this topic.
canonical_story_references = ndb.JsonProperty(repeated=True, indexed=False)
# This consists of the list of objects referencing additional stories that
# are part of this topic.
additional_story_references = ndb.JsonProperty(repeated=True, indexed=False)
# The schema version for the story reference object on each of the above 2
# lists.
story_reference_schema_version = ndb.IntegerProperty(
required=True, indexed=True)
# This consists of the list of uncategorized skill ids that are not part of
# any subtopic.
uncategorized_skill_ids = ndb.StringProperty(repeated=True, indexed=True)
# The list of subtopics that are part of the topic.
subtopics = ndb.JsonProperty(repeated=True, indexed=False)
# The schema version of the subtopic dict.
subtopic_schema_version = ndb.IntegerProperty(required=True, indexed=True)
# The id for the next subtopic.
next_subtopic_id = ndb.IntegerProperty(required=True)
# The ISO 639-1 code for the language this topic is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
# The url fragment of the topic.
url_fragment = ndb.StringProperty(required=True, indexed=True)
# Whether to show practice tab in the Topic viewer page.
practice_tab_is_displayed = ndb.BooleanProperty(
required=True, default=False)
# The content of the meta tag in the Topic viewer page.
meta_tag_content = ndb.StringProperty(indexed=True)
@staticmethod
def get_deletion_policy():
"""Topic should be kept if it is published."""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def has_reference_to_user_id(cls, unused_user_id):
"""Check whether TopicModel snapshots references the given user.
Args:
unused_user_id: str. The ID of the user whose data should be
checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return False
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(TopicModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
topic_rights = TopicRightsModel.get_by_id(self.id)
if topic_rights.topic_is_published:
status = constants.ACTIVITY_STATUS_PUBLIC
else:
status = constants.ACTIVITY_STATUS_PRIVATE
topic_commit_log_entry = TopicCommitLogEntryModel.create(
self.id, self.version, committer_id, commit_type,
commit_message, commit_cmds, status, False
)
topic_commit_log_entry.topic_id = self.id
topic_commit_log_entry.put()
@classmethod
def get_by_name(cls, topic_name):
"""Gets TopicModel by topic_name. Returns None if the topic with
name topic_name doesn't exist.
Args:
topic_name: str. The name of the topic.
Returns:
TopicModel|None. The topic model of the topic or None if not
found.
"""
return TopicModel.query().filter(
cls.canonical_name == topic_name.lower()).filter(
cls.deleted == False).get() # pylint: disable=singleton-comparison
@classmethod
def get_by_url_fragment(cls, url_fragment):
"""Gets TopicModel by url_fragment. Returns None if the topic with
name url_fragment doesn't exist.
Args:
url_fragment: str. The url fragment of the topic.
Returns:
TopicModel|None. The topic model of the topic or None if not
found.
"""
# TODO(#10210): Make fetching by URL fragment faster.
return TopicModel.query().filter(
cls.url_fragment == url_fragment).filter(
cls.deleted == False).get() # pylint: disable=singleton-comparison
@classmethod
def get_export_policy(cls):
"""Model does not contain user data."""
return dict(super(cls, cls).get_export_policy(), **{
'name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'canonical_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'abbreviated_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_filename': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_bg_color': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'description': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'canonical_story_references':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'additional_story_references':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'story_reference_schema_version':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'uncategorized_skill_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'subtopics': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'subtopic_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'next_subtopic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'meta_tag_content': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'practice_tab_is_displayed':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE,
})
class TopicCommitLogEntryModel(base_models.BaseCommitLogEntryModel):
"""Log of commits to topics.
A new instance of this model is created and saved every time a commit to
TopicModel occurs.
The id for this model is of the form 'topic-[topic_id]-[version]'.
"""
# The id of the topic being edited.
topic_id = ndb.StringProperty(indexed=True, required=True)
@staticmethod
def get_deletion_policy():
"""Topic commit log is deleted only if the correspondingm topic is not
public.
"""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def _get_instance_id(cls, topic_id, version):
"""This function returns the generated id for the get_commit function
in the parent class.
Args:
topic_id: str. The id of the topic being edited.
version: int. The version number of the topic after the commit.
Returns:
str. The commit id with the topic id and version number.
"""
return 'topic-%s-%s' % (topic_id, version)
@classmethod
def get_export_policy(cls):
"""This model is only stored for archive purposes. The commit log of
entities is not related to personal user data.
"""
return dict(super(cls, cls).get_export_policy(), **{
'topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
class TopicSummaryModel(base_models.BaseModel):
"""Summary model for an Oppia Topic.
This should be used whenever the content blob of the topic is not
needed (e.g. search results, etc).
A TopicSummaryModel instance stores the following information:
id, description, language_code, last_updated, created_on, version,
url_fragment.
The key of each instance is the topic id.
"""
# The name of the topic.
name = ndb.StringProperty(required=True, indexed=True)
# The canonical name of the topic, created by making `name` lowercase.
canonical_name = ndb.StringProperty(required=True, indexed=True)
# The ISO 639-1 code for the language this topic is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
# The description of the topic.
description = ndb.TextProperty(indexed=False)
# The url fragment of the topic.
url_fragment = ndb.StringProperty(required=True, indexed=True)
# Time when the topic model was last updated (not to be
# confused with last_updated, which is the time when the
# topic *summary* model was last updated).
topic_model_last_updated = ndb.DateTimeProperty(required=True, indexed=True)
# Time when the topic model was created (not to be confused
# with created_on, which is the time when the topic *summary*
# model was created).
topic_model_created_on = ndb.DateTimeProperty(required=True, indexed=True)
# The number of canonical stories that are part of this topic.
canonical_story_count = ndb.IntegerProperty(required=True, indexed=True)
# The number of additional stories that are part of this topic.
additional_story_count = ndb.IntegerProperty(required=True, indexed=True)
# The total number of skills in the topic (including those that are
# uncategorized).
total_skill_count = ndb.IntegerProperty(required=True, indexed=True)
# The number of skills that are not part of any subtopic.
uncategorized_skill_count = ndb.IntegerProperty(required=True, indexed=True)
# The number of subtopics of the topic.
subtopic_count = ndb.IntegerProperty(required=True, indexed=True)
# The thumbnail filename of the topic.
thumbnail_filename = ndb.StringProperty(indexed=True)
# The thumbnail background color of the topic.
thumbnail_bg_color = ndb.StringProperty(indexed=True)
version = ndb.IntegerProperty(required=True)
@staticmethod
def get_deletion_policy():
"""Topic summary should be kept if associated topic is published."""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def has_reference_to_user_id(cls, unused_user_id):
"""Check whether TopicSummaryModel references the given user.
Args:
unused_user_id: str. The (unused) ID of the user whose data should
be checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return False
@classmethod
def get_export_policy(cls):
"""Model does not contain user data."""
return dict(super(cls, cls).get_export_policy(), **{
'name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'canonical_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'description': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'topic_model_last_updated':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'topic_model_created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'canonical_story_count': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'additional_story_count': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'total_skill_count': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'uncategorized_skill_count':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'subtopic_count': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_filename': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_bg_color': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'version': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
class SubtopicPageSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a subtopic page snapshot."""
pass
class SubtopicPageSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a subtopic page snapshot."""
pass
class SubtopicPageModel(base_models.VersionedModel):
"""Model for storing Subtopic pages.
This stores the HTML data for a subtopic page.
"""
SNAPSHOT_METADATA_CLASS = SubtopicPageSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = SubtopicPageSnapshotContentModel
ALLOW_REVERT = False
# The topic id that this subtopic is a part of.
topic_id = ndb.StringProperty(required=True, indexed=True)
# The json data of the subtopic consisting of subtitled_html,
# recorded_voiceovers and written_translations fields.
page_contents = ndb.JsonProperty(required=True)
# The schema version for the page_contents field.
page_contents_schema_version = ndb.IntegerProperty(
required=True, indexed=True)
# The ISO 639-1 code for the language this subtopic page is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
@staticmethod
def get_deletion_policy():
"""Subtopic should be kept if associated topic is published."""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def has_reference_to_user_id(cls, unused_user_id):
"""Check whether SubtopicPageModel snapshots references the given user.
Args:
unused_user_id: str. The ID of the user whose data should be
checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return False
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(SubtopicPageModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
subtopic_page_commit_log_entry = SubtopicPageCommitLogEntryModel.create(
self.id, self.version, committer_id, commit_type, commit_message,
commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False
)
subtopic_page_commit_log_entry.subtopic_page_id = self.id
subtopic_page_commit_log_entry.put()
@classmethod
def get_export_policy(cls):
"""Model does not contain user data."""
return dict(super(cls, cls).get_export_policy(), **{
'topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'page_contents': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'page_contents_schema_version':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
class SubtopicPageCommitLogEntryModel(base_models.BaseCommitLogEntryModel):
"""Log of commits to subtopic pages.
A new instance of this model is created and saved every time a commit to
SubtopicPageModel occurs.
The id for this model is of the form
'subtopicpage-[subtopic_page_id]-[version]'.
"""
# The id of the subtopic page being edited.
subtopic_page_id = ndb.StringProperty(indexed=True, required=True)
@staticmethod
def get_deletion_policy():
"""Subtopic page commit log is deleted only if the corresponding
topic is not public.
"""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def _get_instance_id(cls, subtopic_page_id, version):
"""This function returns the generated id for the get_commit function
in the parent class.
Args:
subtopic_page_id: str. The id of the subtopic page being edited.
version: int. The version number of the subtopic page after the
commit.
Returns:
str. The commit id with the subtopic page id and version number.
"""
return 'subtopicpage-%s-%s' % (subtopic_page_id, version)
@classmethod
def get_export_policy(cls):
"""This model is only stored for archive purposes. The commit log of
entities is not related to personal user data.
"""
return dict(super(cls, cls).get_export_policy(), **{
'subtopic_page_id': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
class TopicRightsSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a topic rights snapshot."""
pass
class TopicRightsSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a topic rights snapshot."""
pass
class TopicRightsModel(base_models.VersionedModel):
"""Storage model for rights related to a topic.
The id of each instance is the id of the corresponding topic.
"""
SNAPSHOT_METADATA_CLASS = TopicRightsSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = TopicRightsSnapshotContentModel
ALLOW_REVERT = False
# The user_ids of the managers of this topic.
manager_ids = ndb.StringProperty(indexed=True, repeated=True)
# Whether this topic is published.
topic_is_published = ndb.BooleanProperty(
indexed=True, required=True, default=False)
@staticmethod
def get_deletion_policy():
"""Topic rights should be kept if associated topic is published."""
return base_models.DELETION_POLICY.KEEP_IF_PUBLIC
@classmethod
def has_reference_to_user_id(cls, user_id):
"""Check whether TopicRightsModel references user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return cls.query(
cls.manager_ids == user_id
).get(keys_only=True) is not None
@classmethod
def get_by_user(cls, user_id):
"""Retrieves the rights object for all topics assigned to given user
Args:
user_id: str. ID of user.
Returns:
list(TopicRightsModel). The list of TopicRightsModel objects in
which the given user is a manager.
"""
topic_rights_models = cls.query(
cls.manager_ids == user_id
)
return topic_rights_models
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(TopicRightsModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
topic_rights = TopicRightsModel.get_by_id(self.id)
if topic_rights.topic_is_published:
status = constants.ACTIVITY_STATUS_PUBLIC
else:
status = constants.ACTIVITY_STATUS_PRIVATE
TopicCommitLogEntryModel(
id=('rights-%s-%s' % (self.id, self.version)),
user_id=committer_id,
topic_id=self.id,
commit_type=commit_type,
commit_message=commit_message,
commit_cmds=commit_cmds,
version=None,
post_commit_status=status,
post_commit_community_owned=False,
post_commit_is_private=not topic_rights.topic_is_published
).put()
snapshot_metadata_model = self.SNAPSHOT_METADATA_CLASS.get(
self.get_snapshot_id(self.id, self.version))
snapshot_metadata_model.content_user_ids = list(sorted(set(
self.manager_ids)))
commit_cmds_user_ids = set()
for commit_cmd in commit_cmds:
user_id_attribute_names = python_utils.NEXT(
cmd['user_id_attribute_names']
for cmd in feconf.TOPIC_RIGHTS_CHANGE_ALLOWED_COMMANDS
if cmd['name'] == commit_cmd['cmd']
)
for user_id_attribute_name in user_id_attribute_names:
commit_cmds_user_ids.add(commit_cmd[user_id_attribute_name])
snapshot_metadata_model.commit_cmds_user_ids = list(
sorted(commit_cmds_user_ids))
snapshot_metadata_model.put()
@classmethod
def get_export_policy(cls):
"""Model contains user data."""
return dict(super(cls, cls).get_export_policy(), **{
'manager_ids': base_models.EXPORT_POLICY.EXPORTED,
'topic_is_published': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
@classmethod
def export_data(cls, user_id):
"""(Takeout) Export user-relevant properties of TopicRightsModel.
Args:
user_id: str. The user_id denotes which user's data to extract.
Returns:
dict. The user-relevant properties of TopicRightsModel in a dict
format. In this case, we are returning all the ids of the topics
this user manages.
"""
managed_topics = cls.get_all().filter(cls.manager_ids == user_id)
managed_topic_ids = [right.id for right in managed_topics]
return {
'managed_topic_ids': managed_topic_ids
}
| apache-2.0 | -248,851,925,992,345,250 | 38.7 | 82 | 0.65963 | false |
CentralLabFacilities/m3meka | python/m3/omnibase.py | 1 | 31978 | # -*- coding: utf-8 -*-
#M3 -- Meka Robotics Robot Components
#Copyright (c) 2010 Meka Robotics
#Author: [email protected] (Aaron Edsinger)
#M3 is free software: you can redistribute it and/or modify
#it under the terms of the GNU Lesser General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#M3 is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Lesser General Public License for more details.
#You should have received a copy of the GNU Lesser General Public License
#along with M3. If not, see <http://www.gnu.org/licenses/>.
from m3.vehicle import M3Vehicle
import m3.omnibase_pb2 as mob
from m3.component import M3Component
import yaml
import numpy as nu
import m3.toolbox as m3t
import time
from m3.unit_conversion import *
import math
class M3OmniBase(M3Vehicle):
"""
The M3OmniBase class has been designed as the principal interface for controlling an M3 omnidirectional mobile base.
It must be defined in the m3_config.yml file and can be created using the m3.component_factory module.
The example belows creates an interface for an M3OmniBase defined as M3OmniBase_mb0.
>>> import m3.omnibase as m3o
>>> omni = m3o.M3OmniBase('M3OmniBase_mb0') # creates M3OmniBase class
The M3OmniBase class can be used to send commands and retrieve status updates to and from the m3 realtime server. The
example below configures the realtime proxy to update the M3OmniBase class with updates from the robot and to recieve commands.
It also sets the omnibase controller to goal position control, subscribes to the power board to enable motor power,
and runs the wheel calibration routine if needed.
>>> import m3.rt_proxy as m3p
>>> proxy = m3p.M3RtProxy()
>>> proxy.start() # m3rt server must be running first
>>> proxy.make_operational_all()
>>> proxy.subscribe_status(omni)
>>> proxy.publish_command(omni)
>>> proxy.publish_param(omni)
>>> pwr_name=proxy.get_available_components('m3pwr')
>>> if len(pwr_name)>1:
pwr_name=m3t.user_select_components_interactive(pwr_name,single=True)
>>> pwr=m3f.create_component(pwr_name[0])
>>> proxy.subscribe_status(pwr)
>>> proxy.publish_command(pwr)
>>> pwr.set_motor_power_on()
>>> proxy.step()
>>> time.sleep(0.5)
>>> omni.calibrate(proxy)
>>> time.sleep(0.5)
>>> omni.set_local_position(0,0,0,proxy)
>>> omni.set_global_position(0,0,0,proxy)
>>> omni.set_max_linear_accel(0.3)
>>> omni.set_max_linear_velocity(0.3)
>>> omni.set_max_rotation_velocity(30)
>>> omni.set_max_rotation_accel(30)
>>> proxy.step()
>>> omni.set_mode_traj_goal()
>>> omni.set_traj_goal(0, 0, 0)
>>> proxy.step()
Now the M3OmniBase class can be used to issue global position commands and report our position:
>>> omni.set_traj_goal(2.0, 0, 180)
>>> proxy.step()
>>> print 'Position (x,y,yaw):', omni.get_global_position()
"""
def __init__(self,name):
M3Vehicle.__init__(self,name,type='m3omnibase')
self.status=mob.M3OmnibaseStatus()
self.command=mob.M3OmnibaseCommand()
self.param=mob.M3OmnibaseParam()
self.num_casters = 4
for i in range(3):
self.command.opspace_force_desired.append(0)
self.command.local_position_desired.append(0)
self.command.local_velocity_desired.append(0)
self.command.local_acceleration_desired.append(0)
self.command.global_position_desired.append(0)
self.command.global_velocity_desired.append(0)
self.command.global_acceleration_desired.append(0)
self.command.traj_goal.append(0)
self.command.local_position.append(0)
self.command.global_position.append(0)
for i in range(self.num_casters):
self.command.roll_torque_desired.append(0)
self.command.steer_torque_desired.append(0)
self.command.roll_velocity_desired.append(0)
self.command.steer_velocity_desired.append(0)
self.command.steer_theta_desired.append(0)
self.command.caster_mode.append(mob.OMNIBASE_CASTER_OFF)
self.param.enable_breakbeam.append(0)
self.vias=[]
self.via_idx=0
self.read_config()
def calibrate(self,proxy):
"""
Calibrates Omnibase casters if necessary.
:param proxy: running proxy
:type proxy: M3RtProxy
"""
need_to_calibrate = False
for i in range(self.num_casters):
if (not self.is_calibrated(i)):
need_to_calibrate = True
if need_to_calibrate:
print '------------------------------------------------'
print 'All casters not calibrated. Do calibration [y]?'
if m3t.get_yes_no('y'):
print 'Note: Orientations are facing robot'
print "Turn power on to robot and press any key."
raw_input()
self.set_mode_caster()
proxy.step()
time.sleep(4)
caster_names=['FrontRight','RearRight','RearLeft','FrontLeft']
wiggle = [1,2,1,2]
last_calib = -1
repeat_calib = 0
while need_to_calibrate:
for i in [1,2,3,0]:
if (not self.is_calibrated(i)):
print '-------------------------------------------'
print 'Calibrating caster: ', caster_names[i], '..'
#print 'Manual assist required in CCW direction'
if i == last_calib:
repeat_calib += 1
if repeat_calib == 0:
wiggle = [1,2,1,2]
self.home(i,proxy, wiggle[i])
elif repeat_calib == 1:
wiggle = [3,0,3,0]
self.home(i,proxy, wiggle[i])
elif repeat_calib == 2:
wiggle = [2,3,0,1]
self.home(i,proxy, wiggle[i])
elif repeat_calib >= 3:
raise m3t.M3Exception('Error calibrating. Please reposition base and try again.')
last_calib = i
need_to_calibrate = False
for i in range(self.num_casters):
if (not self.is_calibrated(i)):
need_to_calibrate = True
self.set_mode_caster_off(range(self.num_casters))
self.set_mode_off()
else:
print "Skipping Calibration.."
def home(self, idx, proxy, idx_wiggle):
time_out = 20.0
caster_names=['FrontRight','RearRight','RearLeft','FrontLeft']
self.set_mode_caster_off(range(4))
#self.set_mode_caster_theta(idx)
#self.set_mode_caster_theta(idx_wiggle)
self.set_roll_torques(0.0, idx)
self.enable_breakbeam(idx)
#start_theta = self.get_steer_theta()[idx]
#print 'Start theta:', idx, start_theta
#start_theta_wiggle = self.get_steer_theta()[idx_wiggle]
#theta = 0
#theta_cnt = 0\
self.set_mode_caster_torque(idx)
ts = time.time()
proxy.step()
while (not self.is_calibrated(idx)):
#theta_des = start_theta + theta
#self.set_steer_theta(theta_des, idx )
#theta_wig = start_theta_wiggle + 30.0 * math.cos(deg2rad(4.0 * theta_cnt))
#torque_roll = 2.0 * math.cos(deg2rad(6.0 * theta_cnt))
#self.set_steer_theta(theta_wig, idx_wiggle )
#self.set_roll_torques(torque_roll, idx)
proxy.step()
#str_tqs = self.get_steer_torques()
#rol_tqs = self.get_roll_torques()
#print 'Steer Joint Tq at idx', idx, ':', str_tqs[idx]
#print 'Roll Joint Tq at idx', idx, ':', rol_tqs[idx]
#print 'Steer Tq at idx', idx_wiggle, ':', str_tqs[idx_wiggle]
#print '.'
self.set_steer_torques(10.0, idx)
#theta_step = 2.0
#theta_cnt += theta_step
#theta_err = theta_des - self.get_steer_theta()[idx]
#print 'theta err:', theta_err
#if theta_err < 40.0:
# theta += theta_step
if time.time() - ts > time_out:
self.disable_breakbeam(idx)
self.set_mode_caster_off(idx)
#self.set_mode_caster_off(idx_wiggle)
self.set_roll_torques(0.0, idx)
self.set_steer_torques(0.0, idx)
proxy.step()
return
time.sleep(0.1)
self.set_steer_torques(0.0, idx)
self.set_roll_torques(0.0, idx)
self.set_mode_caster_off(idx)
#self.set_mode_caster_off(idx_wiggle)
self.disable_breakbeam(idx)
proxy.step()
print "Caster: ", caster_names[idx], " Calibrated."
def enable_breakbeam(self,idx):
self.param.enable_breakbeam[idx] = 1
def disable_breakbeam(self,idx):
self.param.enable_breakbeam[idx] = 0
def is_calibrated(self,idx):
return self.status.calibrated[idx]
def set_ctrl_mode(self, mode):
self.command.ctrl_mode=mode
def set_traj_mode(self, mode):
self.command.traj_mode=mode
def set_mode_off(self):
"""
Sets all caster controller modes to off.
"""
self.command.ctrl_mode=mob.OMNIBASE_CTRL_OFF
def set_mode_cart_local(self):
self.command.ctrl_mode=mob.OMNIBASE_CTRL_CART_LOCAL
def set_mode_caster_velocity(self, caster_idx):
self.command.ctrl_mode=mob.OMNIBASE_CTRL_CASTER
M3Component.set_int_array(self,self.command.caster_mode,mob.OMNIBASE_CASTER_VELOCITY,caster_idx)
def set_mode_caster_theta(self, caster_idx):
self.command.ctrl_mode=mob.OMNIBASE_CTRL_CASTER
M3Component.set_int_array(self,self.command.caster_mode,mob.OMNIBASE_CASTER_THETA,caster_idx)
def set_mode_caster_torque(self,caster_idx):
"""
Allows specified caster to be controlled with torque commands and places omnibase in 'caster_mode'.
:param caster_idx: Index of caster.
:type caster_idx: array_like, shape < ncasters, optional
:See Also:
:meth:`M3OmniBase.set_mode_caster_off`
:meth:`M3OmniBase.set_mode_caster`
"""
self.command.ctrl_mode=mob.OMNIBASE_CTRL_CASTER
M3Component.set_int_array(self,self.command.caster_mode,mob.OMNIBASE_CASTER_TORQUE,caster_idx)
def set_mode_caster_off(self,caster_idx):
"""
Turns off controller for specified caster and places omnibase in 'caster_mode'.
:param caster_idx: Index of caster.
:type caster_idx: array_like, shape < ncasters, optional
:See Also:
:meth:`M3OmniBase.set_mode_caster_torque`
:meth:`M3OmniBase.set_mode_caster`
"""
self.command.ctrl_mode=mob.OMNIBASE_CTRL_CASTER
M3Component.set_int_array(self,self.command.caster_mode,mob.OMNIBASE_CASTER_OFF,caster_idx)
def set_mode_caster(self,mode,caster_idx=None):
M3Component.set_int_array(self,self.command.caster_mode,mode, caster_idx)
def set_mode_traj_goal(self):
"""
Allows omnibase to be controlled by issuing a goal position in global cartesian space.
:See Also:
:meth:`M3OmniBase.is_traj_goal_reached`
:meth:`M3OmniBase.set_traj_goal`
:meth:`M3OmniBase.set_mode_off`
:meth:`M3OmniBase.set_mode_caster`
"""
self.command.traj_mode = mob.OMNIBASE_TRAJ_GOAL
self.command.ctrl_mode = mob.OMNIBASE_CTRL_OPSPACE_TRAJ
def set_mode_traj_via(self):
self.command.traj_mode = mob.OMNIBASE_TRAJ_VIAS
self.command.ctrl_mode = mob.OMNIBASE_CTRL_OPSPACE_TRAJ
def set_mode_joystick(self):
"""
Allows omnibase to be controlled by joystick commands.
:See Also:
:meth:`M3OmniBase.set_joystick_x`
:meth:`M3OmniBase.set_joystick_y`
:meth:`M3OmniBase.set_joystick_yaw`
:meth:`M3OmniBase.set_joystick_button`
"""
self.command.traj_mode = mob.OMNIBASE_TRAJ_JOYSTICK
self.command.ctrl_mode = mob.OMNIBASE_CTRL_OPSPACE_TRAJ
def set_mode_caster(self):
"""
Allows omnibase to be controlled at the caster level as opposed cartestian space.
Additional commands must be issued to set the control mode for each individual caster.
:See Also:
:meth:`M3OmniBase.set_mode_caster_torque`
:meth:`M3OmniBase.set_mode_caster_off`
"""
self.command.ctrl_mode=mob.OMNIBASE_CTRL_CASTER
def set_mode_op_space_force(self):
self.command.ctrl_mode=mob.OMNIBASE_CTRL_OPSPACE_FORCE
def set_traj_mode_off(self):
self.command.traj_mode=mob.OMNIBASE_TRAJ_OFF
def set_local_velocities(self,x_dot,y_dot,heading_dot):
self.command.local_velocity_desired[0] = x_dot
self.command.local_velocity_desired[1] = y_dot
self.command.local_velocity_desired[2] = heading_dot
def set_local_positions(self,x,y,heading):
self.command.local_position_desired[0] = x
self.command.local_position_desired[1] = y
self.command.local_position_desired[2] = heading
def set_local_accelerations(self,x_dotdot,y_dotdot,heading_dotdot):
self.command.local_acceleration_desired[0] = x_dotdot
self.command.local_acceleration_desired[1] = y_dotdot
self.command.local_acceleration_desired[2] = heading_dotdot
def set_roll_torques(self, tq, ind=None):
"""
Sets roll torque values for selected casters. A list of caster indexes can be supplied
to set specific caster torques, or the index
can be omitted if the length of tq is equal to the number of degrees of casters.
:param tq: Roll torque values in Nm.
:type tq: array_like
:param ind: Index of casters.
:type ind: array_like, shape(len(tq)), optional
:See Also:
:meth:`M3OmniBase.set_mode_caster_torque`
:meth:`M3OmniBase.set_steer_torques`
"""
M3Component.set_float_array(self,self.command.roll_torque_desired,tq,ind)
def set_steer_torques(self, tq, ind=None):
"""
Sets steer torque values for selected casters. A list of caster indexes can be supplied
to set specific caster torques, or the index
can be omitted if the length of tq is equal to the number of degrees of casters.
:param tq: Steer torque values in Nm.
:type tq: array_like
:param ind: Index of casters.
:type ind: array_like, shape(len(tq)), optional
:See Also:
:meth:`M3OmniBase.set_mode_caster_torque`
:meth:`M3OmniBase.set_roll_torques`
"""
M3Component.set_float_array(self,self.command.steer_torque_desired,tq,ind)
def set_steer_theta(self, th, ind=None):
M3Component.set_float_array(self,self.command.steer_theta_desired,th,ind)
def set_steer_velocities(self, v, ind=None):
M3Component.set_float_array(self,self.command.steer_velocity_desired,v,ind)
def set_roll_velocities(self, v, ind=None):
M3Component.set_float_array(self,self.command.roll_velocity_desired,v,ind)
def set_max_linear_accel(self, x):
"""
Sets maximum linear acceleration of omnibase in m/s^2
:param x: Max linear acceleration in m/s^2
:type x: float
.. Note:: Omnibase acceleration is still upper limited by absolute values
defined by parameters in configuration file.
:See Also:
:meth:`M3OmniBase.set_max_linear_velocity`
:meth:`M3OmniBase.set_max_rotation_accel`
:meth:`M3OmniBase.set_max_rotation_velocity`
"""
self.command.max_linear_acceleration = x
def set_max_linear_velocity(self, x):
"""
Sets maximum linear velocity of omnibase in m/s
:param x: Max linear velocity in m/s
:type x: float
.. Note:: Omnibase velocity is still upper limited by absolute values
defined by parameters in configuration file.
:See Also:
:meth:`M3OmniBase.set_max_linear_accel`
:meth:`M3OmniBase.set_max_rotation_accel`
:meth:`M3OmniBase.set_max_rotation_velocity`
"""
self.command.max_linear_velocity = x
def set_max_rotation_accel(self, x):
"""
Sets maximum rotational acceleration of omnibase in deg/sec^2
:param x: Max rotational acceleration in deg/sec^2
:type x: float
.. Note:: Omnibase acceleration is still upper limited by absolute values
defined by parameters in configuration file.
:See Also:
:meth:`M3OmniBase.set_max_linear_accel`
:meth:`M3OmniBase.set_max_linear_velocity`
:meth:`M3OmniBase.set_max_rotation_velocity`
"""
self.command.max_rotation_acceleration = x
def set_max_rotation_velocity(self, x):
"""
Sets maximum rotational velocity of omnibase in deg/s
:param x: Max rotational velocity in deg/s
:type x: float
.. Note:: Omnibase velocity is still upper limited by absolute values
defined by parameters in configuration file.
:See Also:
:meth:`M3OmniBase.set_max_linear_accel`
:meth:`M3OmniBase.set_max_rotation_accel`
:meth:`M3OmniBase.set_max_linear_velocity`
"""
self.command.max_rotation_velocity = x
def set_joystick_x(self, x):
"""
Sets value of X-axis command from joystick.
:param x: X-axis joystick command.
:type x: float (-1.0 <-> 1.0)
:See Also:
:meth:`M3OmniBase.set_mode_joystick`
:meth:`M3OmniBase.set_joystick_y`
:meth:`M3OmniBase.set_joystick_yaw`
:meth:`M3OmniBase.set_joystick_button`
"""
self.command.joystick_x = x
def set_joystick_y(self,y):
"""
Sets value of Y-axis command from joystick.
:param y: Y-axis joystick command.
:type y: float (-1.0 <-> 1.0)
:See Also:
:meth:`M3OmniBase.set_mode_joystick`
:meth:`M3OmniBase.set_joystick_x`
:meth:`M3OmniBase.set_joystick_yaw`
:meth:`M3OmniBase.set_joystick_button`
"""
self.command.joystick_y = y
def set_joystick_yaw(self,yaw):
"""
Sets value of Yaw-axis command from joystick.
:param yaw: Yaw-axis joystick command.
:type yaw: float (-1.0 <-> 1.0)
:See Also:
:meth:`M3OmniBase.set_mode_joystick`
:meth:`M3OmniBase.set_joystick_x`
:meth:`M3OmniBase.set_joystick_y`
:meth:`M3OmniBase.set_joystick_button`
"""
self.command.joystick_yaw = yaw
def set_joystick_button(self,b):
"""
Sets value of joystick button command. Currently a value of -1 should be sent to disable joystick,
and a value of 0 should be sent to enable joystick in default mode.
:param b: joystick button command.
:type b: int [-1,0]
:See Also:
:meth:`M3OmniBase.set_mode_joystick`
:meth:`M3OmniBase.set_joystick_x`
:meth:`M3OmniBase.set_joystick_y`
:meth:`M3OmniBase.set_joystick_yaw`
"""
self.command.joystick_button = b
def set_op_space_forces(self, x, y, torque):
self.command.opspace_force_desired[0] = x
self.command.opspace_force_desired[1] = y
self.command.opspace_force_desired[2] = torque
def get_global_position(self):
"""
Gets position of omnibase origin frame in the global frame.
:returns: position (x,y,yaw) in (m,m,deg)
:rtype: array, shape (3)
"""
return nu.array(self.status.global_position,float)
def get_motor_torques(self):
"""
Gets motor torque values at the actuator level (not joint/caster output).
:returns: torque values in Nm
:rtype: array, shape (ncasters*2)
:See Also:
:meth:`M3OmniBase.get_steer_torques`
:meth:`M3OmniBase.get_roll_torques`
"""
return nu.array(self.status.motor_torque_desired,float)
def get_steer_torques(self):
"""
Gets steer joint torque values at the caster level.
:returns: torque values in Nm
:rtype: array, shape (ncasters)
:See Also:
:meth:`M3OmniBase.get_motor_torques`
:meth:`M3OmniBase.get_roll_torques`
"""
return nu.array(self.status.steer_torque_desired,float)
def get_steer_theta(self):
"""
Gets steer joint torque values at the caster level.
:returns: torque values in Nm
:rtype: array, shape (ncasters)
:See Also:
:meth:`M3OmniBase.get_motor_torques`
:meth:`M3OmniBase.get_roll_torques`
"""
return nu.array(self.status.steer_angle,float)
def get_roll_torques(self):
"""
Gets roll joint torque values at the caster level.
:returns: torque values in Nm
:rtype: array, shape (ncasters)
:See Also:
:meth:`M3OmniBase.get_steer_torques`
:meth:`M3OmniBase.get_roll_torques`
"""
return nu.array(self.status.roll_torque_desired,float)
def get_local_position(self):
return nu.array(self.status.local_position,float)
def get_desired_position(self):
return nu.array(self.status.position_desired,float)
def get_desired_acceleration(self):
return nu.array(self.status.local_acceleration,float)
def get_bus_voltage(self):
"""
Gets bus voltage for motor power.
:returns: value in volts
:rtype: float
"""
return self.status.bus_voltage
def set_traj_goal(self, x, y, heading):
"""
Sets desired end location goal in global frame for trajectory controller.
:param x: desired X-axis value in global frame
:type x: float
:param y: desired Y-axis value in global frame
:type y: float
:param heading: desired Yaw-axis value in global frame
:type heading: float
:See Also:
:meth:`M3OmniBase.set_mode_traj_goal`
:meth:`M3OmniBase.is_traj_goal_reached`
"""
self.command.traj_goal[0] = x
self.command.traj_goal[1] = y
self.command.traj_goal[2] = heading
def is_traj_goal_reached(self):
"""
Returns true or false depending if the active goal location has been
reached by the controller.
:returns: true/false
:rtype: bool
:See Also:
:meth:`M3OmniBase.set_traj_goal`
:meth:`M3OmniBase.set_mode_traj_goal`
"""
return self.status.traj_goal_reached
def set_local_position(self,x,y,yaw,proxy):
"""
Sets the current local position of the odometry system.
:param x: desired X-axis value in local frame
:type x: float
:param y: desired Y-axis value in local frame
:type y: float
:param yaw: desired Yaw-axis value in local frame
:type yaw: float
.. Note:: Should be set to zero after starting real-time server component
because of initial drift caused by non-zero encoder values.
:See Also:
:meth:`M3OmniBase.set_global_position`
"""
self.command.local_position[0] = x
self.command.local_position[1] = y
self.command.local_position[2] = yaw
self.command.adjust_local_position = 1
proxy.step()
time.sleep(0.1)
self.command.adjust_local_position = 0
proxy.step()
'''def set_local_zero(self):
self.command.local_position[0] = 0
self.command.local_position[1] = 0
self.command.local_position[2] = 0
self.command.adjust_local_position = 1'''
def set_global_position(self,x,y,yaw,proxy):
"""
Sets the current global position of the odometry system.
:param x: desired X-axis value in global frame
:type x: float
:param y: desired Y-axis value in global frame
:type y: float
:param yaw: desired Yaw-axis value in global frame
:type yaw: float
.. Note:: Should be set to zero after starting real-time server component
because of initial drift caused by non-zero encoder values.
:See Also:
:meth:`M3OmniBase.set_local_position`
"""
self.command.global_position[0] = x
self.command.global_position[1] = y
self.command.global_position[2] = yaw
self.command.adjust_global_position = 1
proxy.step()
time.sleep(0.1)
self.command.adjust_global_position = 0
proxy.step()
def add_via(self,x_des, y_des, yaw_des):
self.vias.append([[x_des, y_des, yaw_des], 0 , 0])
def load_command(self):
self.command.ClearField('vias')
nadd=min(20,len(self.vias)) #only add 20 per cycle to keep packet size down
for n in range(nadd):
self.via_idx=self.via_idx+1
pos_des=self.vias[n][0]
lin_vel_avg=self.vias[n][1]
ang_vel_avg=self.vias[n][2]
self.command.vias.add()
for i in range(3):
self.command.vias[-1].position_desired.append(pos_des[i])
self.command.vias[-1].lin_velocity_avg = lin_vel_avg
self.command.vias[-1].ang_velocity_avg = ang_vel_avg
self.command.vias[-1].idx=self.via_idx
print self.command.vias[-1]
self.vias=self.vias[nadd:]
| mit | 7,803,926,033,021,083,000 | 42.44837 | 150 | 0.492682 | false |
exu/poligon | python/python_koans/python2/koans/about_dice_project.py | 1 | 2001 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import random
class DiceSet(object):
def __init__(self):
self._values = []
@property
def values(self):
return self._values
def roll(self, n):
self._values = []
for i in range(0, n):
self._values.append(random.randint(1, 6))
# Needs implementing!
# Tip: random.randint(min, max) can be used to generate random numbers
class AboutDiceProject(Koan):
def test_can_create_a_dice_set(self):
dice = DiceSet()
self.assertTrue(dice)
def test_rolling_the_dice_returns_a_set_of_integers_between_1_and_6(self):
dice = DiceSet()
dice.roll(5)
self.assertTrue(isinstance(dice.values, list), "should be a list")
self.assertEqual(5, len(dice.values))
for value in dice.values:
self.assertTrue(
value >= 1 and value <= 6,
"value " + str(value) + " must be between 1 and 6")
def test_dice_values_do_not_change_unless_explicitly_rolled(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
second_time = dice.values
self.assertEqual(first_time, second_time)
def test_dice_values_should_change_between_rolls(self):
dice = DiceSet()
dice.roll(5)
first_time = list(dice.values)
dice.roll(5)
second_time = list(dice.values)
self.assertNotEqual(first_time, second_time, \
"Two rolls should not be equal")
# THINK ABOUT IT:
#
# If the rolls are random, then it is possible (although not
# likely) that two consecutive rolls are equal. What would be a
# better way to test this?
def test_you_can_roll_different_numbers_of_dice(self):
dice = DiceSet()
dice.roll(3)
self.assertEqual(3, len(dice.values))
dice.roll(1)
self.assertEqual(1, len(dice.values))
| mit | -2,024,257,727,222,899,500 | 26.791667 | 78 | 0.587206 | false |
blckshrk/Weboob | modules/parolesmania/backend.py | 1 | 1697 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.lyrics import ICapLyrics, SongLyrics
from weboob.tools.backend import BaseBackend
from .browser import ParolesmaniaBrowser
from urllib import quote_plus
__all__ = ['ParolesmaniaBackend']
class ParolesmaniaBackend(BaseBackend, ICapLyrics):
NAME = 'parolesmania'
MAINTAINER = u'Julien Veyssier'
EMAIL = '[email protected]'
VERSION = '0.h'
DESCRIPTION = 'Paroles Mania lyrics website'
LICENSE = 'AGPLv3+'
BROWSER = ParolesmaniaBrowser
def get_lyrics(self, id):
return self.browser.get_lyrics(id)
def iter_lyrics(self, criteria, pattern):
return self.browser.iter_lyrics(criteria, quote_plus(pattern.encode('utf-8')))
def fill_songlyrics(self, songlyrics, fields):
if 'content' in fields:
sl = self.get_lyrics(songlyrics.id)
songlyrics.content = sl.content
return songlyrics
OBJECTS = {
SongLyrics: fill_songlyrics
}
| agpl-3.0 | 5,074,640,716,121,560,000 | 31.018868 | 86 | 0.714791 | false |
harisbal/pandas | pandas/tests/generic/test_generic.py | 1 | 36186 | # -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from copy import copy, deepcopy
from warnings import catch_warnings, simplefilter
import pytest
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_scalar
from pandas import (Series, DataFrame, Panel,
date_range, MultiIndex)
import pandas.io.formats.printing as printing
from pandas.compat import range, zip, PY3
from pandas.util.testing import (assert_raises_regex,
assert_series_equal,
assert_panel_equal,
assert_frame_equal)
import pandas.util.testing as tm
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if is_scalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
idx = list('ABCD')
# relabeling values passed into self.rename
args = [
str.lower,
{x: x.lower() for x in idx},
Series({x: x.lower() for x in idx}),
]
for axis in self._axes():
kwargs = {axis: idx}
obj = self._construct(4, **kwargs)
for arg in args:
# rename a single axis
result = obj.rename(**{axis: arg})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
assert s.get(i) == d
assert s.get(i, d) == d
assert s.get(i, "z") == d
for other in others:
assert s.get(other, "z") == "z"
assert s.get(other, other) == other
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
pytest.raises(ValueError, lambda: bool(obj == 0))
pytest.raises(ValueError, lambda: bool(obj == 1))
pytest.raises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
pytest.raises(ValueError, lambda: bool(obj == 0))
pytest.raises(ValueError, lambda: bool(obj == 1))
pytest.raises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
pytest.raises(ValueError, lambda: bool(obj == 0))
pytest.raises(ValueError, lambda: bool(obj == 1))
pytest.raises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
pytest.raises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
printing.pprint_thing("this works and shouldn't")
pytest.raises(ValueError, f)
pytest.raises(ValueError, lambda: obj1 and obj2)
pytest.raises(ValueError, lambda: obj1 or obj2)
pytest.raises(ValueError, lambda: not obj1)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# see gh-5191
# Compound dtypes should raise NotImplementedError.
def f(dtype):
return self._construct(shape=3, value=1, dtype=dtype)
pytest.raises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
assert v is None
else:
assert v == getattr(y, m, None)
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
pytest.skip('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
os1, os2 = [], []
for _ in range(2):
np.random.seed(test)
os1.append(o.sample(n=4))
os2.append(o.sample(frac=0.7))
self._compare(*os1)
self._compare(*os2)
# Check for error when random_state argument invalid.
with pytest.raises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with pytest.raises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with pytest.raises(ValueError):
o.sample(n=-3)
with pytest.raises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with pytest.raises(ValueError):
o.sample(n=3.2)
# Check lengths are right
assert len(o.sample(n=4) == 4)
assert len(o.sample(frac=0.34) == 3)
assert len(o.sample(frac=0.36) == 4)
###
# Check weights
###
# Weight length must be right
with pytest.raises(ValueError):
o.sample(n=3, weights=[0, 1])
with pytest.raises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with pytest.raises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with pytest.raises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with pytest.raises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with pytest.raises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with pytest.raises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with pytest.raises(ValueError):
o.sample(n=3, weights=nan_weights)
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
assert o.size == np.prod(o.shape)
assert o.size == 10 ** len(o.axes)
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
assert len(np.array_split(o, 5)) == 5
assert len(np.array_split(o, 2)) == 2
def test_unexpected_keyword(self): # GH8597
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assert_raises_regex(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assert_raises_regex(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assert_raises_regex(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assert_raises_regex(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
# See gh-12301
def test_stat_unexpected_keyword(self):
obj = self._construct(5)
starwars = 'Star Wars'
errmsg = 'unexpected keyword'
with assert_raises_regex(TypeError, errmsg):
obj.max(epic=starwars) # stat_function
with assert_raises_regex(TypeError, errmsg):
obj.var(epic=starwars) # stat_function_ddof
with assert_raises_regex(TypeError, errmsg):
obj.sum(epic=starwars) # cum_function
with assert_raises_regex(TypeError, errmsg):
obj.any(epic=starwars) # logical_function
def test_api_compat(self):
# GH 12021
# compat for __name__, __qualname__
obj = self._construct(5)
for func in ['sum', 'cumsum', 'any', 'var']:
f = getattr(obj, func)
assert f.__name__ == func
if PY3:
assert f.__qualname__.endswith(func)
def test_stat_non_defaults_args(self):
obj = self._construct(5)
out = np.array([0])
errmsg = "the 'out' parameter is not supported"
with assert_raises_regex(ValueError, errmsg):
obj.max(out=out) # stat_function
with assert_raises_regex(ValueError, errmsg):
obj.var(out=out) # stat_function_ddof
with assert_raises_regex(ValueError, errmsg):
obj.sum(out=out) # cum_function
with assert_raises_regex(ValueError, errmsg):
obj.any(out=out) # logical_function
def test_truncate_out_of_bounds(self):
# GH11382
# small
shape = [int(2e3)] + ([1] * (self._ndim - 1))
small = self._construct(shape, dtype='int8', value=1)
self._compare(small.truncate(), small)
self._compare(small.truncate(before=0, after=3e3), small)
self._compare(small.truncate(before=-1, after=2e3), small)
# big
shape = [int(2e6)] + ([1] * (self._ndim - 1))
big = self._construct(shape, dtype='int8', value=1)
self._compare(big.truncate(), big)
self._compare(big.truncate(before=0, after=3e6), big)
self._compare(big.truncate(before=-1, after=2e6), big)
def test_validate_bool_args(self):
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
super(DataFrame, df).rename_axis(mapper={'a': 'x', 'b': 'y'},
axis=1, inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).drop('a', axis=1, inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).sort_index(inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df)._consolidate(inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).fillna(value=0, inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).replace(to_replace=1, value=7,
inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).interpolate(inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df)._where(cond=df.a > 2, inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).mask(cond=df.a > 2, inplace=value)
def test_copy_and_deepcopy(self):
# GH 15444
for shape in [0, 1, 2]:
obj = self._construct(shape)
for func in [copy,
deepcopy,
lambda x: x.copy(deep=False),
lambda x: x.copy(deep=True)]:
obj_copy = func(obj)
assert obj_copy is not obj
self._compare(obj_copy, obj)
@pytest.mark.parametrize("periods,fill_method,limit,exp", [
(1, "ffill", None, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, 0]),
(1, "ffill", 1, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, np.nan]),
(1, "bfill", None, [np.nan, 0, 0, 1, 1, 1.5, np.nan, np.nan]),
(1, "bfill", 1, [np.nan, np.nan, 0, 1, 1, 1.5, np.nan, np.nan]),
(-1, "ffill", None, [np.nan, np.nan, -.5, -.5, -.6, 0, 0, np.nan]),
(-1, "ffill", 1, [np.nan, np.nan, -.5, -.5, -.6, 0, np.nan, np.nan]),
(-1, "bfill", None, [0, 0, -.5, -.5, -.6, np.nan, np.nan, np.nan]),
(-1, "bfill", 1, [np.nan, 0, -.5, -.5, -.6, np.nan, np.nan, np.nan])
])
def test_pct_change(self, periods, fill_method, limit, exp):
vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
obj = self._typ(vals)
func = getattr(obj, 'pct_change')
res = func(periods=periods, fill_method=fill_method, limit=limit)
if type(obj) is DataFrame:
tm.assert_frame_equal(res, DataFrame(exp))
else:
tm.assert_series_equal(res, Series(exp))
class TestNDFrame(object):
# tests that don't fit elsewhere
def test_sample(sel):
# Fixes issue: 2419
# additional specific object based tests
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with pytest.raises(ValueError):
s.sample(n=3, weights='weight_column')
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
panel = Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with pytest.raises(ValueError):
panel.sample(n=1, weights='weight_column')
with pytest.raises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with pytest.raises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with pytest.raises(ValueError):
df.sample(n=1, axis=2)
with pytest.raises(ValueError):
df.sample(n=1, axis='not_a_name')
with pytest.raises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with pytest.raises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
p = Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with pytest.raises(ValueError):
df.sample(1, weights=s4)
def test_squeeze(self):
# noop
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries()]:
tm.assert_series_equal(s.squeeze(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.squeeze(), df)
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
for p in [tm.makePanel()]:
tm.assert_panel_equal(p.squeeze(), p)
# squeezing
df = tm.makeTimeDataFrame().reindex(columns=['A'])
tm.assert_series_equal(df.squeeze(), df['A'])
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
p = tm.makePanel().reindex(items=['ItemA'])
tm.assert_frame_equal(p.squeeze(), p['ItemA'])
p = tm.makePanel().reindex(items=['ItemA'], minor_axis=['A'])
tm.assert_series_equal(p.squeeze(), p.loc['ItemA', :, 'A'])
# don't fail with 0 length dimensions GH11229 & GH8999
empty_series = Series([], name='five')
empty_frame = DataFrame([empty_series])
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
empty_panel = Panel({'six': empty_frame})
[tm.assert_series_equal(empty_series, higher_dim.squeeze())
for higher_dim in [empty_series, empty_frame, empty_panel]]
# axis argument
df = tm.makeTimeDataFrame(nper=1).iloc[:, :1]
assert df.shape == (1, 1)
tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis='index'), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0])
tm.assert_series_equal(df.squeeze(axis='columns'), df.iloc[:, 0])
assert df.squeeze() == df.iloc[0, 0]
pytest.raises(ValueError, df.squeeze, axis=2)
pytest.raises(ValueError, df.squeeze, axis='x')
df = tm.makeTimeDataFrame(3)
tm.assert_frame_equal(df.squeeze(axis=0), df)
def test_numpy_squeeze(self):
s = tm.makeFloatSeries()
tm.assert_series_equal(np.squeeze(s), s)
df = tm.makeTimeDataFrame().reindex(columns=['A'])
tm.assert_series_equal(np.squeeze(df), df['A'])
def test_transpose(self):
msg = (r"transpose\(\) got multiple values for "
r"keyword argument 'axes'")
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries()]:
# calls implementation in pandas/core/base.py
tm.assert_series_equal(s.transpose(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.transpose().transpose(), df)
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
for p in [tm.makePanel()]:
tm.assert_panel_equal(p.transpose(2, 0, 1)
.transpose(1, 2, 0), p)
tm.assert_raises_regex(TypeError, msg, p.transpose,
2, 0, 1, axes=(2, 0, 1))
def test_numpy_transpose(self):
msg = "the 'axes' parameter is not supported"
s = tm.makeFloatSeries()
tm.assert_series_equal(
np.transpose(s), s)
tm.assert_raises_regex(ValueError, msg,
np.transpose, s, axes=1)
df = tm.makeTimeDataFrame()
tm.assert_frame_equal(np.transpose(
np.transpose(df)), df)
tm.assert_raises_regex(ValueError, msg,
np.transpose, df, axes=1)
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
p = tm.makePanel()
tm.assert_panel_equal(np.transpose(
np.transpose(p, axes=(2, 0, 1)),
axes=(1, 2, 0)), p)
def test_take(self):
indices = [1, 5, -2, 6, 3, -1]
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries()]:
out = s.take(indices)
expected = Series(data=s.values.take(indices),
index=s.index.take(indices), dtype=s.dtype)
tm.assert_series_equal(out, expected)
for df in [tm.makeTimeDataFrame()]:
out = df.take(indices)
expected = DataFrame(data=df.values.take(indices, axis=0),
index=df.index.take(indices),
columns=df.columns)
tm.assert_frame_equal(out, expected)
indices = [-3, 2, 0, 1]
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
for p in [tm.makePanel()]:
out = p.take(indices)
expected = Panel(data=p.values.take(indices, axis=0),
items=p.items.take(indices),
major_axis=p.major_axis,
minor_axis=p.minor_axis)
tm.assert_panel_equal(out, expected)
def test_take_invalid_kwargs(self):
indices = [-3, 2, 0, 1]
s = tm.makeFloatSeries()
df = tm.makeTimeDataFrame()
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
p = tm.makePanel()
for obj in (s, df, p):
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, obj.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, obj.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, obj.take,
indices, mode='clip')
def test_equals(self):
s1 = pd.Series([1, 2, 3], index=[0, 2, 1])
s2 = s1.copy()
assert s1.equals(s2)
s1[1] = 99
assert not s1.equals(s2)
# NaNs compare as equal
s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3])
s2 = s1.copy()
assert s1.equals(s2)
s2[0] = 9.9
assert not s1.equals(s2)
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s1 = Series([1, 2, np.nan], index=idx)
s2 = s1.copy()
assert s1.equals(s2)
# Add object dtype column with nans
index = np.random.random(10)
df1 = DataFrame(
np.random.random(10, ), index=index, columns=['floats'])
df1['text'] = 'the sky is so blue. we could use more chocolate.'.split(
)
df1['start'] = date_range('2000-1-1', periods=10, freq='T')
df1['end'] = date_range('2000-1-1', periods=10, freq='D')
df1['diff'] = df1['end'] - df1['start']
df1['bool'] = (np.arange(10) % 3 == 0)
df1.loc[::2] = np.nan
df2 = df1.copy()
assert df1['text'].equals(df2['text'])
assert df1['start'].equals(df2['start'])
assert df1['end'].equals(df2['end'])
assert df1['diff'].equals(df2['diff'])
assert df1['bool'].equals(df2['bool'])
assert df1.equals(df2)
assert not df1.equals(object)
# different dtype
different = df1.copy()
different['floats'] = different['floats'].astype('float32')
assert not df1.equals(different)
# different index
different_index = -index
different = df2.set_index(different_index)
assert not df1.equals(different)
# different columns
different = df2.copy()
different.columns = df2.columns[::-1]
assert not df1.equals(different)
# DatetimeIndex
index = pd.date_range('2000-1-1', periods=10, freq='T')
df1 = df1.set_index(index)
df2 = df1.copy()
assert df1.equals(df2)
# MultiIndex
df3 = df1.set_index(['text'], append=True)
df2 = df1.set_index(['text'], append=True)
assert df3.equals(df2)
df2 = df1.set_index(['floats'], append=True)
assert not df3.equals(df2)
# NaN in index
df3 = df1.set_index(['floats'], append=True)
df2 = df1.set_index(['floats'], append=True)
assert df3.equals(df2)
# GH 8437
a = pd.Series([False, np.nan])
b = pd.Series([False, np.nan])
c = pd.Series(index=range(2))
d = pd.Series(index=range(2))
e = pd.Series(index=range(2))
f = pd.Series(index=range(2))
c[:-1] = d[:-1] = e[0] = f[0] = False
assert a.equals(a)
assert a.equals(b)
assert a.equals(c)
assert a.equals(d)
assert a.equals(e)
assert e.equals(f)
def test_describe_raises(self):
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
with pytest.raises(NotImplementedError):
tm.makePanel().describe()
def test_pipe(self):
df = DataFrame({'A': [1, 2, 3]})
f = lambda x, y: x ** y
result = df.pipe(f, 2)
expected = DataFrame({'A': [1, 4, 9]})
assert_frame_equal(result, expected)
result = df.A.pipe(f, 2)
assert_series_equal(result, expected.A)
def test_pipe_tuple(self):
df = DataFrame({'A': [1, 2, 3]})
f = lambda x, y: y
result = df.pipe((f, 'y'), 0)
assert_frame_equal(result, df)
result = df.A.pipe((f, 'y'), 0)
assert_series_equal(result, df.A)
def test_pipe_tuple_error(self):
df = DataFrame({"A": [1, 2, 3]})
f = lambda x, y: y
with pytest.raises(ValueError):
df.pipe((f, 'y'), x=1, y=0)
with pytest.raises(ValueError):
df.A.pipe((f, 'y'), x=1, y=0)
def test_pipe_panel(self):
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
wp = Panel({'r1': DataFrame({"A": [1, 2, 3]})})
f = lambda x, y: x + y
result = wp.pipe(f, 2)
expected = wp + 2
assert_panel_equal(result, expected)
result = wp.pipe((f, 'y'), x=1)
expected = wp + 1
assert_panel_equal(result, expected)
with pytest.raises(ValueError):
result = wp.pipe((f, 'y'), x=1, y=1)
@pytest.mark.parametrize('box', [pd.Series, pd.DataFrame])
def test_axis_classmethods(self, box):
obj = box()
values = (list(box._AXIS_NAMES.keys()) +
list(box._AXIS_NUMBERS.keys()) +
list(box._AXIS_ALIASES.keys()))
for v in values:
assert obj._get_axis_number(v) == box._get_axis_number(v)
assert obj._get_axis_name(v) == box._get_axis_name(v)
assert obj._get_block_manager_axis(v) == \
box._get_block_manager_axis(v)
| bsd-3-clause | 2,502,932,496,458,174,000 | 34.03001 | 79 | 0.520063 | false |
tdjordan/tortoisegit | gitproc.py | 1 | 3594 | #
# front-end for TortoiseHg dialogs
#
# Copyright (C) 2007 TK Soh <[email protected]>
#
import os
import sys
from tortoisegit.tgitutil import get_prog_root
# always use git exe installed with TortoiseHg
tgitdir = get_prog_root()
try:
os.environ['PATH'] = os.path.pathsep.join([tgitdir, os.environ['PATH']])
except KeyError:
os.environ['PATH'] = tgitdir
if not sys.stdin.isatty():
try:
import win32traceutil
except ImportError:
pass
except pywintypes.error:
pass
# Map gitproc commands to dialog modules in gitgtk/
from gitgtk import commit, status, addremove, tagadd, tags, history, merge
from gitgtk import diff, revisions, update, serve, clone, synch, gitcmd, about
from gitgtk import recovery, tgitconfig, datamine
_dialogs = { 'commit' : commit, 'status' : status, 'revert' : status,
'add' : addremove, 'remove' : addremove, 'tag' : tagadd,
'tags' : tags, 'log' : history, 'history': history,
'diff' : diff, 'merge' : merge, 'tip' : revisions,
'parents': revisions, 'heads' : revisions, 'update' : update,
'clone' : clone, 'serve' : serve, 'synch' : synch,
'about' : about, 'config' : tgitconfig, 'recovery': recovery,
'datamine': datamine }
def get_list_from_file(filename):
fd = open(filename, "r")
lines = [ x.replace("\n", "") for x in fd.readlines() ]
fd.close()
return lines
def get_option(args):
import getopt
long_opt_list = ('command=', 'exepath=', 'listfile=', 'root=', 'cwd=',
'deletelistfile', 'nogui')
opts, args = getopt.getopt(args, "c:e:l:dR:", long_opt_list)
# Set default options
options = {}
options['gitcmd'] = 'help'
options['cwd'] = os.getcwd()
options['files'] = []
options['gui'] = True
listfile = None
delfile = False
for o, a in opts:
if o in ("-c", "--command"):
options['gitcmd'] = a
elif o in ("-l", "--listfile"):
listfile = a
elif o in ("-d", "--deletelistfile"):
delfile = True
elif o in ("--nogui"):
options['gui'] = False
elif o in ("-R", "--root"):
options['root'] = a
elif o in ("--cwd"):
options['cwd'] = a
if listfile:
options['files'] = get_list_from_file(listfile)
if delfile:
os.unlink(listfile)
return (options, args)
def parse(args):
option, args = get_option(args)
cmdline = ['git', option['gitcmd']]
if 'root' in option:
cmdline.append('--repository')
cmdline.append(option['root'])
cmdline.extend(args)
cmdline.extend(option['files'])
option['cmdline'] = cmdline
global _dialogs
dialog = _dialogs.get(option['gitcmd'], gitcmd)
dialog.run(**option)
def run_trapped(args):
try:
dlg = parse(sys.argv[1:])
except:
import traceback
from gitgtk.dialog import error_dialog
tr = traceback.format_exc()
print tr
error_dialog(None, "Error executing gitproc", tr)
if __name__=='__main__':
#dlg = parse(['-c', 'help', '--', '-v'])
#dlg = parse(['-c', 'log', '--root', 'c:\git\h1', '--', '-l1'])
#dlg = parse(['-c', 'status', '--root', 'c:\hg\h1', ])
#dlg = parse(['-c', 'add', '--root', 'c:\hg\h1', '--listfile', 'c:\\hg\\h1\\f1', '--notify'])
#dlg = parse(['-c', 'rollback', '--root', 'c:\\hg\\h1'])
print "gitproc sys.argv =", sys.argv
dlg = run_trapped(sys.argv[1:])
| gpl-2.0 | -3,280,306,436,063,970,300 | 30.80531 | 97 | 0.553422 | false |
Guymer/PyGuymer | return_dict_of_ISO_subtitle_streams.py | 1 | 2124 | # -*- coding: utf-8 -*-
##############################################################################################
# This file is deprecated because Python 2.x is deprecated #
# A Python 3.x version of this file can be found at: #
# #
# https://github.com/Guymer/PyGuymer3/blob/master/return_dict_of_ISO_subtitle_streams.py #
##############################################################################################
def return_dict_of_ISO_subtitle_streams(fname, usr_track = -1):
# Import modules ...
import subprocess
import xml.etree.ElementTree
# Check input ...
if usr_track == -1:
raise Exception("no track was requested")
# Find track info ...
proc = subprocess.Popen(
[
"lsdvd",
"-x",
"-Ox",
fname
],
stderr = subprocess.PIPE,
stdout = subprocess.PIPE
)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise Exception(u"\"lsdvd\" command failed")
# Clean up ...
# NOTE: "lsdvd" sometimes returns invalid XML as it does not: escape
# characters; or remove invalid characters.
stdout = unicode(stdout, "utf-8", "ignore").replace(u"&", u"&")
# Loop over all tracks ...
for track in xml.etree.ElementTree.fromstring(stdout).findall("track"):
# Skip if this track is not the chosen one ...
if int(track.find("ix").text) != int(usr_track):
continue
# Create empty dictionary ...
ans = {}
# Loop over all subtitle channels in this track ...
for subp in track.findall("subp"):
# Append information ...
ans[subp.find("streamid").text] = {
"content" : subp.find("content").text,
"langcode" : subp.find("langcode").text,
"language" : subp.find("language").text
}
# Return dictionary ...
return ans
| apache-2.0 | -7,274,281,633,515,023,000 | 35.62069 | 94 | 0.469397 | false |
hyperspy/hyperspy_gui_ipywidgets | hyperspy_gui_ipywidgets/tests/test_tools.py | 1 | 9853 | import numpy as np
import hyperspy.api as hs
from hyperspy_gui_ipywidgets.tests.utils import KWARGS
from hyperspy.signal_tools import (Signal1DCalibration, ImageContrastEditor,
EdgesRange)
class TestTools:
def setup_method(self, method):
self.s = hs.signals.Signal1D(1 + np.arange(100)**2)
self.s.change_dtype('float')
self.s.axes_manager[0].offset = 10
self.s.axes_manager[0].scale = 2
self.s.axes_manager[0].units = "m"
def test_calibrate(self):
s = self.s
cal = Signal1DCalibration(s)
cal.ss_left_value = 10
cal.ss_right_value = 30
wd = cal.gui(**KWARGS)["ipywidgets"]["wdict"]
wd["new_left"].value = 0
wd["new_right"].value = 10
wd["units"].value = "nm"
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
assert s.axes_manager[0].scale == 1
assert s.axes_manager[0].offset == 0
assert s.axes_manager[0].units == "nm"
def test_calibrate_from_s(self):
s = self.s
wd = s.calibrate(**KWARGS)["ipywidgets"]["wdict"]
wd["left"].value = 10
wd["right"].value = 30
wd["new_left"].value = 1
wd["new_right"].value = 11
wd["units"].value = "nm"
assert wd["offset"].value == 1
assert wd["scale"].value == 1
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
assert s.axes_manager[0].scale == 1
assert s.axes_manager[0].offset == 1
assert s.axes_manager[0].units == "nm"
def test_smooth_sg(self):
s = self.s
s.add_gaussian_noise(0.1)
s2 = s.deepcopy()
wd = s.smooth_savitzky_golay(**KWARGS)["ipywidgets"]["wdict"]
wd["window_length"].value = 11
wd["polynomial_order"].value = 5
wd["differential_order"].value = 1
wd["color"].value = "red"
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
s2.smooth_savitzky_golay(polynomial_order=5, window_length=11,
differential_order=1)
np.testing.assert_allclose(s.data, s2.data)
def test_smooth_lowess(self):
s = self.s
s.add_gaussian_noise(0.1)
s2 = s.deepcopy()
wd = s.smooth_lowess(**KWARGS)["ipywidgets"]["wdict"]
wd["smoothing_parameter"].value = 0.9
wd["number_of_iterations"].value = 3
wd["color"].value = "red"
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
s2.smooth_lowess(smoothing_parameter=0.9, number_of_iterations=3)
np.testing.assert_allclose(s.data, s2.data)
def test_smooth_tv(self):
s = self.s
s.add_gaussian_noise(0.1)
s2 = s.deepcopy()
wd = s.smooth_tv(**KWARGS)["ipywidgets"]["wdict"]
wd["smoothing_parameter"].value = 300
wd["color"].value = "red"
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
s2.smooth_tv(smoothing_parameter=300)
np.testing.assert_allclose(s.data, s2.data)
def test_filter_butterworth(self):
s = self.s
s.add_gaussian_noise(0.1)
s2 = s.deepcopy()
wd = s.filter_butterworth(**KWARGS)["ipywidgets"]["wdict"]
wd["cutoff"].value = 0.5
wd["order"].value = 3
wd["type"].value = "high"
wd["color"].value = "red"
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
s2.filter_butterworth(
cutoff_frequency_ratio=0.5,
order=3,
type="high")
np.testing.assert_allclose(s.data, s2.data)
def test_remove_background(self):
s = self.s
s.add_gaussian_noise(0.1)
s2 = s.remove_background(
signal_range=(15., 50.),
background_type='Polynomial',
polynomial_order=2,
fast=False,
zero_fill=True)
wd = s.remove_background(**KWARGS)["ipywidgets"]["wdict"]
assert wd["polynomial_order"].layout.display == "none" # not visible
wd["background_type"].value = "Polynomial"
assert wd["polynomial_order"].layout.display == "" # visible
wd["polynomial_order"].value = 2
wd["fast"].value = False
wd["zero_fill"] = True
wd["left"].value = 15.
wd["right"].value = 50.
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
np.testing.assert_allclose(s.data[2:], s2.data[2:])
np.testing.assert_allclose(np.zeros(2), s2.data[:2])
def test_spikes_removal_tool(self):
s = hs.signals.Signal1D(np.ones((2, 3, 30)))
# Add three spikes
s.data[1, 0, 1] += 2
s.data[0, 2, 29] += 1
s.data[1, 2, 14] += 1
wd = s.spikes_removal_tool(**KWARGS)["ipywidgets"]["wdict"]
def next():
wd["next_button"]._click_handlers(wd["next_button"])
def previous():
wd["previous_button"]._click_handlers(wd["previous_button"])
def remove():
wd["remove_button"]._click_handlers(wd["remove_button"])
wd["threshold"].value = 1.5
next()
assert s.axes_manager.indices == (0, 1)
wd["threshold"].value = 0.5
assert s.axes_manager.indices == (0, 0)
next()
assert s.axes_manager.indices == (2, 0)
next()
assert s.axes_manager.indices == (0, 1)
previous()
assert s.axes_manager.indices == (2, 0)
wd["add_noise"].value = False
remove()
assert s.data[0, 2, 29] == 1
assert s.axes_manager.indices == (0, 1)
remove()
assert s.data[1, 0, 1] == 1
assert s.axes_manager.indices == (2, 1)
np.random.seed(1)
wd["add_noise"].value = True
wd["interpolator_kind"].value = "Spline"
wd["spline_order"].value = 3
remove()
assert s.data[1, 2, 14] == 0
assert s.axes_manager.indices == (0, 0)
def test_constrast_editor(self):
# To get this test to work, matplotlib backend needs to set to 'Agg'
np.random.seed(1)
im = hs.signals.Signal2D(np.random.random((32, 32)))
im.plot()
ceditor = ImageContrastEditor(im._plot.signal_plot)
ceditor.ax.figure.canvas.draw_idle()
wd = ceditor.gui(**KWARGS)["ipywidgets"]["wdict"]
assert wd["linthresh"].layout.display == "none" # not visible
assert wd["linscale"].layout.display == "none" # not visible
assert wd["gamma"].layout.display == "none" # not visible
wd["bins"].value = 50
assert ceditor.bins == 50
wd["norm"].value = 'Log'
assert ceditor.norm == 'Log'
assert wd["linthresh"].layout.display == "none" # not visible
assert wd["linscale"].layout.display == "none" # not visible
wd["norm"].value = 'Symlog'
assert ceditor.norm == 'Symlog'
assert wd["linthresh"].layout.display == "" # visible
assert wd["linscale"].layout.display == "" # visible
assert wd["linthresh"].value == 0.01 # default value
assert wd["linscale"].value == 0.1 # default value
wd["linthresh"].value = 0.1
assert ceditor.linthresh == 0.1
wd["linscale"].value = 0.2
assert ceditor.linscale == 0.2
wd["norm"].value = 'Linear'
percentile = [1.0, 99.0]
wd["percentile"].value = percentile
assert ceditor.vmin_percentile == percentile[0]
assert ceditor.vmax_percentile == percentile[1]
assert im._plot.signal_plot.vmin == f'{percentile[0]}th'
assert im._plot.signal_plot.vmax == f'{percentile[1]}th'
wd["norm"].value = 'Power'
assert ceditor.norm == 'Power'
assert wd["gamma"].layout.display == "" # visible
assert wd["gamma"].value == 1.0 # default value
wd["gamma"].value = 0.1
assert ceditor.gamma == 0.1
assert wd["auto"].value is True # default value
wd["auto"].value = False
assert ceditor.auto is False
wd["left"].value = 0.2
assert ceditor.ss_left_value == 0.2
wd["right"].value = 0.5
assert ceditor.ss_right_value == 0.5
# Setting the span selector programmatically from the widgets will
# need to be implemented properly
wd["apply_button"]._click_handlers(wd["apply_button"]) # Trigger it
# assert im._plot.signal_plot.vmin == 0.2
# assert im._plot.signal_plot.vmax == 0.5
# Reset to default values
wd["reset_button"]._click_handlers(wd["reset_button"]) # Trigger it
assert im._plot.signal_plot.vmin == '0.0th'
assert im._plot.signal_plot.vmax == '100.0th'
def test_eels_table_tool(self):
s = hs.datasets.artificial_data.get_core_loss_eels_line_scan_signal(True)
s.plot()
er = EdgesRange(s)
er.ss_left_value = 500
er.ss_right_value = 550
wd = er.gui(**KWARGS)["ipywidgets"]["wdict"]
wd["update"]._click_handlers(wd["update"]) # refresh the table
assert wd["units"].value == 'eV'
assert wd["left"].value == 500
assert wd["right"].value == 550
assert len(wd['gb'].children) == 36 # 9 edges displayed
wd['major'].value = True
wd["update"]._click_handlers(wd["update"]) # refresh the table
assert len(wd['gb'].children) == 24 # 6 edges displayed
assert wd['gb'].children[4].description == 'Sb_M4'
wd['order'].value = 'ascending'
wd["update"]._click_handlers(wd["update"]) # refresh the table
assert wd['gb'].children[4].description == 'V_L3'
wd["reset"]._click_handlers(wd["reset"]) # reset the selector
assert len(wd['gb'].children) == 4 # only header displayed
| gpl-3.0 | 6,585,736,890,534,469,000 | 37.944664 | 81 | 0.563077 | false |
google-research/language | language/nqg/model/parser/training/training_utils.py | 1 | 3956 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to define model training loop."""
from language.nqg.model.parser.training import forest_utils
import tensorflow as tf
def get_training_step(optimizer, model, verbose=False):
"""Get training step function."""
forest_score_function = forest_utils.get_forest_score_function(
verbose=verbose)
def training_step(inputs):
"""Executes a step of training."""
with tf.GradientTape() as tape:
loss = tf.constant(0.0, dtype=tf.float32)
application_scores_batch = model(inputs["wordpiece_ids"],
inputs["num_wordpieces"],
inputs["application_span_begin"],
inputs["application_span_end"],
inputs["application_rule_idx"])
nu_num_nodes_batch = tf.squeeze(inputs["nu_num_nodes"], 1)
de_num_nodes_batch = tf.squeeze(inputs["de_num_nodes"], 1)
with tf.name_scope("forest_score"):
# TODO(petershaw): Consider a batched implementation of
# forest_score_function to avoid iteration over examples in the batch.
for idx in tf.range(model.batch_size):
application_scores = application_scores_batch[idx]
nu_node_type = inputs["nu_node_type"][idx]
nu_node_1_idx = inputs["nu_node_1_idx"][idx]
nu_node_2_idx = inputs["nu_node_2_idx"][idx]
nu_application_idx = inputs["nu_application_idx"][idx]
nu_num_nodes = nu_num_nodes_batch[idx]
# Log score for numerator (sum over derivations of target).
nu_score = forest_score_function(application_scores, nu_num_nodes,
nu_node_type, nu_node_1_idx,
nu_node_2_idx, nu_application_idx)
de_node_type = inputs["de_node_type"][idx]
de_node_1_idx = inputs["de_node_1_idx"][idx]
de_node_2_idx = inputs["de_node_2_idx"][idx]
de_application_idx = inputs["de_application_idx"][idx]
de_num_nodes = de_num_nodes_batch[idx]
# Log score for denominator (partition function).
de_score = forest_score_function(application_scores, de_num_nodes,
de_node_type, de_node_1_idx,
de_node_2_idx, de_application_idx)
# -log(numerator/denominator) = log(denominator) - log(numerator)
example_loss = de_score - nu_score
loss += example_loss
loss /= tf.cast(model.batch_size, dtype=tf.float32)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return loss
return training_step
def get_train_for_n_steps_fn(strategy, optimizer, model):
"""Return train_for_n_steps_fn."""
training_step = get_training_step(optimizer, model)
@tf.function
def train_for_n_steps_fn(iterator, steps):
mean_loss = tf.constant(0.0, dtype=tf.float32)
for _ in tf.range(steps):
inputs = next(iterator)
loss = strategy.run(training_step, args=(inputs,))
mean_loss += strategy.reduce(tf.distribute.ReduceOp.MEAN, loss, axis=None)
mean_loss /= tf.cast(steps, dtype=tf.float32)
return mean_loss
return train_for_n_steps_fn
| apache-2.0 | 2,828,534,664,817,358,000 | 40.642105 | 80 | 0.623357 | false |
Nablaquabla/sns-analysis | sns-test.py | 1 | 2499 | import os
import time as tm
import sys
# Handles the creation of condor files for a given set of directories
# -----------------------------------------------------------------------------
def createCondorFile(dataDir,outDir,time):
# Condor submission file name convention: run-day-time.condor
with open('/home/bjs66/CondorFiles/test.condor','w') as f:
# Fixed program location'
f.write('Executable = /home/bjs66/GitHub/sns-analysis/sns-analysis-v4\n')
# Arguments passed to the exe:
# Set main run directory, e.g. Run-15-10-02-27-32-23/151002
# Set current time to be analzyed (w/o .zip extension!), e.g. 184502
# Set output directory, eg Output/ Run-15-10-02-27-32-23/151002
f.write('Arguments = \"1 %s %s %s 1\"\n'%(dataDir,time,outDir))
# Standard cluster universe
f.write('universe = vanilla\n')
f.write('getenv = true\n')
# Program needs at least 250 MB of free memory to hold unzipped data
f.write('request_memory = 400\n')
# Output, error and log name convention: run-day-time.log/out/err
f.write('log = ../../Logs/test.log\n')
f.write('Output = ../../Outs/test.out\n')
f.write('Error = ../../Errs/test.err\n')
# Do not write any emails
f.write('notification = never\n')
f.write('+Department = Physics\n')
f.write('should_transfer_files = NO\n')
# Add single job to queue
f.write('Queue')
# Main function handling all internals
# -----------------------------------------------------------------------------
def main():
# Choose main directory, i.e. ~/csi/beam_on_data/Run-15-06-25-xyz/
mainRunDir = '/var/phy/project/phil/grayson/COHERENT/CsI/'
# Choose output directory, i.e. ~/output/Run-15-06-25-xyz/
mainOutDir = '/var/phy/project/phil/grayson/COHERENT/CsI/bjs-analysis/test/'
pathToFile = 'beam_on_data/Run-15-09-21-20-58-01/150923/'
time = '065101'
dataRunDir = mainRunDir + pathToFile
createCondorFile(dataRunDir,mainOutDir,time)
cmd = 'condor_submit /home/bjs66/CondorFiles/test.condor'
os.system(cmd)
if __name__ == '__main__':
main()
| gpl-3.0 | -34,610,708,783,304,316 | 28.4 | 81 | 0.517407 | false |
seikichi/tuitwi | tuitwi/state.py | 1 | 11226 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import curses
import curses.ascii
class State(object):
'''Stateパターン用の基底クラス'''
def __init__(self, stdscr, form):
self._form = form
self._stdscr = stdscr
self._func = {}
self._func[curses.KEY_RESIZE] = self._resize
self._func['default'] = self._do_nothing
def _resize(self):
self._form.resize(self._stdscr)
self._form.controls['edit_line'].cur_set()
return self
def _do_nothing(self, ch):
return self
def execute(self, ch):
if self._func.get(ch):
return self._func[ch]()
else:
return self._func['default'](ch)
class ExitState(State):
'''終了を確認する'''
def __init__(self, stdscr, form, viewstate):
State.__init__(self, stdscr, form)
self._viewstate = viewstate
self._form.controls['status_line'].text = u'ほんとに終了する? (y/n)'
self._func[ord('y')] = self._quit
self._func[ord('n')] = self._cancel
def _quit(self):
return None
def _cancel(self):
self._form.controls['status_line'].text = u'TuiTwi ver 0.2'
return self._viewstate
class ConfirmDestroyMessageState(State):
'''postの削除の確認'''
def __init__(self, stdscr, form, viewstate):
State.__init__(self, stdscr, form)
self._viewstate = viewstate
self._form.controls['status_line'].text = u'発言を削除しますか? (y/n)'
self._func[ord('y')] = self._yes
self._func[ord('n')] = self._no
def _yes(self):
i = self._form.controls['view_tab'].current_win.current_status().id
self._viewstate.queue.put(("DestroyStatus", i))
return self._viewstate
def _no(self):
self._form.controls['status_line'].text = u'TuiTwi ver 0.2'
return self._viewstate
class SearchInputState(State):
'''検索語句を入力する'''
def __init__(self, stdscr, form, viewstate):
State.__init__(self, stdscr, form)
curses.curs_set(True)
self._viewstate = viewstate
self._form.controls['status_line'].text = u'検索語句を入力して下さい.無ければTABで戻れます.'
self._form.controls['search_line'].show()
self._form.controls['edit_line'].hide()
self._func[curses.ascii.TAB] = self._quit
self._func[curses.ascii.CR] = self._func[curses.ascii.LF] = self._update
self._func['default'] = self._edit
def _update(self):
text = self._form.controls['search_line'].text
self._viewstate.search_word = text
self._form.controls['fullstatus_area'].keyword = text
self._form.controls['search_word_line'].text = "search word: %s" % text
curses.curs_set(False)
return self._quit()
def _quit(self):
curses.curs_set(False)
self._form.controls['status_line'].text = u'TuiTwi ver 0.2'
self._form.controls['search_line'].hide()
self._form.controls['edit_line'].show()
return self._viewstate
def _edit(self, ch):
self._form.controls['search_line'].edit(ch)
return self
class HelpState(State):
'''ヘルプの表示'''
def __init__(self, stdscr, form, viewstate):
State.__init__(self, stdscr, form)
self._form.controls['help_area'].show()
self._form.controls['fullstatus_area'].hide()
self._form.controls['view_tab'].hide()
self._form.controls['status_line'].text = u"press 'q' to quit help."
self._viewstate = viewstate
self._func[ord('q')] = self._quit
def _quit(self):
self._form.controls['status_line'].text = u"TuiTwi ver 0.2"
self._viewstate.resume()
return self._viewstate
class EditState(State):
'''発言を入力する'''
def __init__(self, stdscr, form, viewstate):
State.__init__(self, stdscr, form)
curses.curs_set(True)
self._viewstate = viewstate
self._func[curses.ascii.TAB] = self._view
self._func[curses.ascii.CR] = self._func[curses.ascii.LF] = self._update
self._func['default'] = self._edit
def _update(self):
self._viewstate.queue.put(("PostUpdate", self._form.controls['edit_line'].text, self._viewstate.reply_id))
self._form.controls['edit_line'].clear()
return self._view()
def _view(self):
curses.curs_set(False)
return self._viewstate
def _edit(self, ch):
self._form.controls['edit_line'].edit(ch)
return self
class ViewState(State):
'''閲覧用.'''
def __init__(self, stdscr, form, queue, conf):
State.__init__(self, stdscr, form)
curses.curs_set(False)
self._form.controls['status_line'].text = u'TuiTwi ver 0.2'
self._form.controls['view_tab'].show()
self._form.controls['fullstatus_area'].show()
self._form.controls['help_area'].hide()
self._form.controls['search_line'].hide()
self._queue = queue
self._command = conf.get('options').get('browser_command')
self._search_word = ''
self._conf = conf
self.reply_id = None
self._func[ord('q')] = self._quit
self._func[ord('j')] = self._func[curses.KEY_DOWN] = self._next
self._func[ord('k')] = self._func[curses.KEY_UP] = self._prev
self._func[ord('g')] = self._top
self._func[ord('G')] = self._bottom
self._func[ord('r')] = self._update
self._func[ord('f')] = self._fav
self._func[ord('n')] = self._next_user_post
self._func[ord('p')] = self._prev_user_post
self._func[ord('P')] = self._move_to_reply_to
self._func[ord('N')] = self._move_to_reply_from
self._func[ord('h')] = self._func[curses.KEY_LEFT] = self._prev_tab
self._func[ord('l')] = self._func[curses.KEY_RIGHT] = self._next_tab
self._func[ord('o')] = self._open
self._func[ord('H')] = self._home
self._func[ord('R')] = self._rt
self._func[curses.ascii.DC2] = self._official_rt
self._func[ord('/')] = self._search_input
self._func[ord('d')] = self._delete
self._func[curses.ascii.SO] = self._search_next
self._func[curses.ascii.DLE] = self._search_prev
self._func[curses.ascii.CR] = self._func[curses.ascii.LF] = self._reply
self._func[curses.ascii.ACK] = self._func[ord(' ')] = self._scroll_down
self._func[curses.ascii.STX] = self._func[ord('-')] = self._scroll_up
self._func[ord('q')] = self._quit
self._func[curses.ascii.TAB] = self._edit
self._func[ord('?')] = self._help
def get_search_word(self): return self._search_word
def set_search_word(self, val): self._search_word = val
search_word = property(get_search_word, set_search_word)
@property
def queue(self): return self._queue
def resume(self):
self._form.controls['help_area'].hide()
self._form.controls['view_tab'].show()
self._form.controls['fullstatus_area'].show()
def execute(self, ch):
ret = State.execute(self, ch)
self._form.controls['fullstatus_area'].status = self._form.controls['view_tab'].current_win.current_status()
return ret
def _delete(self):
status = self._form.controls['view_tab'].current_win.current_status()
if status and self._conf['credential']['user'] == status.user.screen_name:
return ConfirmDestroyMessageState(self._stdscr, self._form, self)
else:
return self
def _search_input(self):
return SearchInputState(self._stdscr, self._form, self)
def _search_next(self):
self._form.controls['view_tab'].current_win.search_next_word(self._search_word)
return self
def _search_prev(self):
self._form.controls['view_tab'].current_win.search_prev_word(self._search_word)
return self
def _open(self):
# TODO(seikichi) URLの連結あやしい?
status = self._form.controls['view_tab'].current_win.current_status()
url = "http://twitter.com/%s/status/%d" % (status.user.screen_name, status.id)
os.system(self._command % url)
return self
def _home(self):
status = self._form.controls['view_tab'].current_win.current_status()
url = "http://twitter.com/%s" % status.user.screen_name
os.system(self._command % url)
return self
def _next_tab(self):
self._form.controls['view_tab'].next_tab()
return self
def _prev_tab(self):
self._form.controls['view_tab'].prev_tab()
return self
def _move_to_reply_from(self):
self._form.controls['view_tab'].current_win.move_to_reply_from()
return self
def _move_to_reply_to(self):
self._form.controls['view_tab'].current_win.move_to_reply_to()
return self
def _prev_user_post(self):
self._form.controls['view_tab'].current_win.prev_user_post()
return self
def _next_user_post(self):
self._form.controls['view_tab'].current_win.next_user_post()
return self
def _fav(self):
status = self._form.controls['view_tab'].current_win.current_status()
if not status.favorited:
self.queue.put(("CreateFavorite", status))
else:
self.queue.put(("DestroyFavorite", status))
return self
def _reply(self):
win = self._form.controls['view_tab'].current_win
if win.current_status() is not None:
self.reply_id = win.current_status().id
self._form.controls['edit_line'].insert_string(win.reply_string())
return EditState(self._stdscr, self._form, self)
def _official_rt(self):
status = self._form.controls['view_tab'].current_win.current_status()
if status is not None:
self._queue.put(('OfficialRT', status.id))
return self
def _rt(self):
status = self._form.controls['view_tab'].current_win.current_status()
if status is not None:
self._form.controls['edit_line'].insert_rt(status)
return EditState(self._stdscr, self._form, self)
def _update(self):
self._queue.put(('GetFriendsTimeline',))
return self
def _scroll_down(self):
self._form.controls['view_tab'].current_win.scroll_down()
return self
def _scroll_up(self):
self._form.controls['view_tab'].current_win.scroll_up()
return self
def _top(self):
self._form.controls['view_tab'].current_win.move_to_top()
return self
def _bottom(self):
self._form.controls['view_tab'].current_win.move_to_bottom()
return self
def _next(self):
self._form.controls['view_tab'].current_win.next()
return self
def _edit(self):
return EditState(self._stdscr, self._form, self)
def _prev(self):
self._form.controls['view_tab'].current_win.prev()
return self
def _help(self):
return HelpState(self._stdscr, self._form, self)
def _quit(self):
return ExitState(self._stdscr, self._form, self)
| mit | -3,273,382,893,051,582,000 | 32.550152 | 116 | 0.588965 | false |
knaggsy2000/stormforce-mq | plugins/plugin_core_serverdetails.py | 1 | 8457 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#########################################################################
# Copyright/License Notice (Modified BSD License) #
#########################################################################
#########################################################################
# Copyright (c) 2008-2012, 2014, 2016 Daniel Knaggs - 2E0DPK/M6DPK #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: - #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the author nor the names of its contributors #
# may be used to endorse or promote products derived from this #
# software without specific prior written permission. #
# #
# * This Software is not to be used for safety purposes. #
# #
# * You agree and abide the Disclaimer for your Boltek products. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#########################################################################
###################################################
# StormForce Server Details Plugin #
###################################################
from plugin_core_base import PluginBase
from smq_shared import MQ
###########
# Classes #
###########
class Plugin(PluginBase):
def __init__(self):
self.SERVER_COPYRIGHT = "(c)2008-2012, 2014, 2016 - Daniel Knaggs"
self.SERVER_NAME = "StormForce MQ"
self.SERVER_VERSION = "0.2.0"
self.STRIKE_COPYRIGHT = "Lightning Data (c) 2016 - Daniel Knaggs"
self.UPDATE_PERIOD = 1.
PluginBase.__init__(self)
self.MQ_ROUTING_KEY = "{0}.core.serverdetails".format(self.MQ_ROUTING_KEY)
self.MQ_RX_ENABLED = False
def getScriptPath(self):
return self.os.path.realpath(__file__)
def readXMLSettings(self):
PluginBase.readXMLSettings(self)
if self.os.path.exists(self.XML_SETTINGS_FILE):
xmldoc = self.minidom.parse(self.XML_SETTINGS_FILE)
myvars = xmldoc.getElementsByTagName("Setting")
for var in myvars:
for key in var.attributes.keys():
val = str(var.attributes[key].value)
if key == "Enabled":
self.ENABLED = self.cBool(val)
elif key == "StrikeCopyright":
self.STRIKE_COPYRIGHT = val
elif key == "UpdatePeriod":
self.UPDATE_PERIOD = float(val)
def run(self):
self.log.debug("Starting...")
time_wait = self.datetime.now() + self.timedelta(seconds = self.UPDATE_PERIOD)
while self.running:
t = self.datetime.now()
if t >= time_wait:
try:
myconn = []
self.db.connectToDatabase(myconn)
rows = self.db.executeSQLQuery("SELECT ServerStarted, DATE_PART('epoch', ServerStarted) AS ServerStartedUT, DATE_PART('epoch', LOCALTIMESTAMP) - DATE_PART('epoch', ServerStarted) AS ServerUptime, ServerApplication, ServerCopyright, ServerVersion, StrikeCopyright FROM tblServerDetails LIMIT 1", conn = myconn)
self.db.disconnectFromDatabase(myconn)
# Send out the server details
self.log.info("Sending out the server details...")
for row in rows:
m = self.constructMessage("ServerDetails", {"ServerStarted": str(row[0]), "ServerStartedUT": row[1], "ServerUptime": row[2], "ServerApplication": row[3], "ServerCopyright": row[4], "ServerVersion": row[5], "StrikeCopyright": row[6]})
self.mq.publishMessage(m[1], headers = m[0])
break
except Exception, ex:
self.log.error("An error occurred while running the current time.")
self.log.error(ex)
finally:
time_wait = self.datetime.now() + self.timedelta(seconds = self.UPDATE_PERIOD)
self.time.sleep(0.1)
def start(self, use_threading = True):
PluginBase.start(self, use_threading)
if self.ENABLED:
self.log.info("Starting server details...")
self.running = True
if use_threading:
t = self.threading.Thread(target = self.run)
t.setDaemon(1)
t.start()
else:
self.run()
def stop(self):
if self.ENABLED:
self.running = False
def updateDatabase(self):
PluginBase.updateDatabase(self)
myconn = []
self.db.connectToDatabase(myconn)
##########
# Tables #
##########
self.log.info("Creating tables...")
# tblServerDetails
self.log.debug("TABLE: tblServerDetails")
self.db.executeSQLCommand("DROP TABLE IF EXISTS tblServerDetails CASCADE", conn = myconn)
self.db.executeSQLCommand("CREATE TABLE tblServerDetails(ID bigserial PRIMARY KEY)", conn = myconn) # MEMORY
self.db.executeSQLCommand("ALTER TABLE tblServerDetails ADD COLUMN ServerStarted timestamp", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblServerDetails ADD COLUMN ServerApplication varchar(20)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblServerDetails ADD COLUMN ServerCopyright varchar(100)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblServerDetails ADD COLUMN ServerVersion varchar(8)", conn = myconn)
self.db.executeSQLCommand("ALTER TABLE tblServerDetails ADD COLUMN StrikeCopyright varchar(100)", conn = myconn)
self.db.executeSQLCommand("INSERT INTO tblServerDetails(ServerStarted, ServerApplication, ServerCopyright, ServerVersion, StrikeCopyright) VALUES(LOCALTIMESTAMP, %(ServerApplication)s, %(ServerCopyright)s, %(ServerVersion)s, %(StrikeCopyright)s)", {"ServerApplication": self.SERVER_NAME, "ServerCopyright": self.SERVER_COPYRIGHT, "ServerVersion": self.SERVER_VERSION, "StrikeCopyright": self.STRIKE_COPYRIGHT}, myconn)
self.db.disconnectFromDatabase(myconn)
def writeXMLSettings(self):
PluginBase.writeXMLSettings(self)
if not self.os.path.exists(self.XML_SETTINGS_FILE):
xmldoc = self.minidom.Document()
settings = xmldoc.createElement("PluginServerDetails")
xmldoc.appendChild(settings)
var = xmldoc.createElement("Setting")
var.setAttribute("Enabled", str(self.ENABLED))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("StrikeCopyright", str(self.STRIKE_COPYRIGHT))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("UpdatePeriod", str(self.UPDATE_PERIOD))
settings.appendChild(var)
xmloutput = file(self.XML_SETTINGS_FILE, "w")
xmloutput.write(xmldoc.toprettyxml())
xmloutput.close()
########
# Main #
########
if __name__ == "__main__":
try:
p = Plugin()
p.start(use_threading = False)
p = None
except Exception, ex:
print "Exception: {0}".format(ex)
| bsd-3-clause | 8,567,733,712,468,731,000 | 36.923767 | 420 | 0.601987 | false |
BIGBALLON/cifar-10-cnn | Tensorflow_version/vgg_19_pretrain.py | 1 | 12039 | # -*- coding:utf-8 -*-
# ========================================================== #
# File name: vgg_19.py
# Author: BIGBALLON
# Date created: 07/22/2017
# Python Version: 3.5.2
# Description: implement vgg19 network to train cifar10
# ========================================================== #
import tensorflow as tf
from data_utility import *
iterations = 200
batch_size = 250
total_epoch = 164
weight_decay = 0.0005 # change it for test
dropout_rate = 0.5
momentum_rate = 0.9
log_save_path = './pretrain_vgg_logs'
model_save_path = './model/'
# ========================================================== #
# ├─ bias_variable()
# ├─ conv2d() With Batch Normalization
# ├─ max_pool()
# └─ global_avg_pool()
# ========================================================== #
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32 )
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
def max_pool(input, k_size=1, stride=1, name=None):
return tf.nn.max_pool(input, ksize=[1, k_size, k_size, 1], strides=[1, stride, stride, 1], padding='SAME',name=name)
def batch_norm(input):
return tf.contrib.layers.batch_norm(input, decay=0.9, center=True, scale=True, epsilon=1e-3, is_training=train_flag, updates_collections=None)
# ========================================================== #
# ├─ _random_crop()
# ├─ _random_flip_leftright()
# ├─ data_augmentation()
# ├─ data_preprocessing()
# └─ learning_rate_schedule()
# ========================================================== #
def _random_crop(batch, crop_shape, padding=None):
oshape = np.shape(batch[0])
if padding:
oshape = (oshape[0] + 2*padding, oshape[1] + 2*padding)
new_batch = []
npad = ((padding, padding), (padding, padding), (0, 0))
for i in range(len(batch)):
new_batch.append(batch[i])
if padding:
new_batch[i] = np.lib.pad(batch[i], pad_width=npad,
mode='constant', constant_values=0)
nh = random.randint(0, oshape[0] - crop_shape[0])
nw = random.randint(0, oshape[1] - crop_shape[1])
new_batch[i] = new_batch[i][nh:nh + crop_shape[0],
nw:nw + crop_shape[1]]
return new_batch
def _random_flip_leftright(batch):
for i in range(len(batch)):
if bool(random.getrandbits(1)):
batch[i] = np.fliplr(batch[i])
return batch
def data_preprocessing(x_train,x_test):
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train[:,:,:,0] = (x_train[:,:,:,0]-123.680)
x_train[:,:,:,1] = (x_train[:,:,:,1]-116.779)
x_train[:,:,:,2] = (x_train[:,:,:,2]-103.939)
x_test[:,:,:,0] = (x_test[:,:,:,0]-123.680)
x_test[:,:,:,1] = (x_test[:,:,:,1]-116.779)
x_test[:,:,:,2] = (x_test[:,:,:,2]-103.939)
return x_train, x_test
def learning_rate_schedule(epoch_num):
if epoch_num < 81:
return 0.1
elif epoch_num < 121:
return 0.01
else:
return 0.001
def data_augmentation(batch):
batch = _random_flip_leftright(batch)
batch = _random_crop(batch, [32,32], 4)
return batch
def run_testing(sess,ep):
acc = 0.0
loss = 0.0
pre_index = 0
add = 1000
for it in range(10):
batch_x = test_x[pre_index:pre_index+add]
batch_y = test_y[pre_index:pre_index+add]
pre_index = pre_index + add
loss_, acc_ = sess.run([cross_entropy,accuracy],feed_dict={x:batch_x, y_:batch_y, keep_prob: 1.0, train_flag: False})
loss += loss_ / 10.0
acc += acc_ / 10.0
summary = tf.Summary(value=[tf.Summary.Value(tag="test_loss", simple_value=loss),
tf.Summary.Value(tag="test_accuracy", simple_value=acc)])
return acc, loss, summary
# ========================================================== #
# ├─ main()
# Training and Testing
# Save train/teset loss and acc for visualization
# Save Model in ./model
# ========================================================== #
if __name__ == '__main__':
train_x, train_y, test_x, test_y = prepare_data()
train_x, test_x = data_preprocessing(train_x, test_x)
# load pretrained weight from vgg19.npy
params_dict = np.load('vgg19.npy',encoding='latin1').item()
# define placeholder x, y_ , keep_prob, learning_rate
x = tf.placeholder(tf.float32,[None, image_size, image_size, 3])
y_ = tf.placeholder(tf.float32, [None, class_num])
keep_prob = tf.placeholder(tf.float32)
learning_rate = tf.placeholder(tf.float32)
train_flag = tf.placeholder(tf.bool)
# build_network
W_conv1_1 = tf.Variable(params_dict['conv1_1'][0])
b_conv1_1 = tf.Variable(params_dict['conv1_1'][1])
output = tf.nn.relu( batch_norm(conv2d(x,W_conv1_1) + b_conv1_1))
W_conv1_2 = tf.Variable(params_dict['conv1_2'][0])
b_conv1_2 = tf.Variable(params_dict['conv1_2'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv1_2) + b_conv1_2))
output = max_pool(output, 2, 2, "pool1")
W_conv2_1 = tf.Variable(params_dict['conv2_1'][0])
b_conv2_1 = tf.Variable(params_dict['conv2_2'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv2_1) + b_conv2_1))
W_conv2_2 = tf.Variable(params_dict['conv2_2'][0])
b_conv2_2 = tf.Variable(params_dict['conv2_2'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv2_2) + b_conv2_2))
output = max_pool(output, 2, 2, "pool2")
W_conv3_1 = tf.Variable(params_dict['conv3_1'][0])
b_conv3_1 = tf.Variable(params_dict['conv3_1'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv3_1) + b_conv3_1))
W_conv3_2 = tf.Variable(params_dict['conv3_2'][0])
b_conv3_2 = tf.Variable(params_dict['conv3_2'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv3_2) + b_conv3_2))
W_conv3_3 = tf.Variable(params_dict['conv3_3'][0])
b_conv3_3 = tf.Variable(params_dict['conv3_3'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv3_3) + b_conv3_3))
W_conv3_4 = tf.Variable(params_dict['conv3_4'][0])
b_conv3_4 = tf.Variable(params_dict['conv3_4'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv3_4) + b_conv3_4))
output = max_pool(output, 2, 2, "pool3")
W_conv4_1 = tf.Variable(params_dict['conv4_1'][0])
b_conv4_1 = tf.Variable(params_dict['conv4_1'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv4_1) + b_conv4_1))
W_conv4_2 = tf.Variable(params_dict['conv4_2'][0])
b_conv4_2 = tf.Variable(params_dict['conv4_2'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv4_2) + b_conv4_2))
W_conv4_3 = tf.Variable(params_dict['conv4_3'][0])
b_conv4_3 = tf.Variable(params_dict['conv4_3'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv4_3) + b_conv4_3))
W_conv4_4 = tf.Variable(params_dict['conv4_4'][0])
b_conv4_4 = tf.Variable(params_dict['conv4_4'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv4_4)) + b_conv4_4)
output = max_pool(output, 2, 2)
W_conv5_1 = tf.Variable(params_dict['conv5_1'][0])
b_conv5_1 = tf.Variable(params_dict['conv5_1'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv5_1) + b_conv5_1))
W_conv5_2 = tf.Variable(params_dict['conv5_2'][0])
b_conv5_2 = tf.Variable(params_dict['conv5_2'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv5_2) + b_conv5_2))
W_conv5_3 = tf.Variable(params_dict['conv5_3'][0])
b_conv5_3 = tf.Variable(params_dict['conv5_3'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv5_3) + b_conv5_3))
W_conv5_4 = tf.Variable(params_dict['conv5_4'][0])
b_conv5_4 = tf.Variable(params_dict['conv5_4'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv5_4) + b_conv5_4))
output = tf.reshape(output,[-1,2*2*512])
W_fc1 = tf.get_variable('fc1', shape=[2048,4096], initializer=tf.contrib.keras.initializers.he_normal())
b_fc1 = bias_variable([4096])
output = tf.nn.relu( batch_norm(tf.matmul(output,W_fc1) + b_fc1) )
output = tf.nn.dropout(output,keep_prob)
W_fc2 = tf.Variable(params_dict['fc7'][0])
b_fc2 = tf.Variable(params_dict['fc7'][1])
output = tf.nn.relu( batch_norm(tf.matmul(output,W_fc2) + b_fc2) )
output = tf.nn.dropout(output,keep_prob)
W_fc3 = tf.get_variable('fc3', shape=[4096,10], initializer=tf.contrib.keras.initializers.he_normal())
b_fc3 = bias_variable([10])
output = tf.nn.relu( batch_norm(tf.matmul(output,W_fc3) + b_fc3) )
# loss function: cross_entropy
# train_step: training operation
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=output))
l2 = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
train_step = tf.train.MomentumOptimizer(learning_rate, momentum_rate,use_nesterov=True).minimize(cross_entropy + l2 * weight_decay)
correct_prediction = tf.equal(tf.argmax(output,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
# initial an saver to save model
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(log_save_path,sess.graph)
# epoch = 164
# make sure [bath_size * iteration = data_set_number]
for ep in range(1,total_epoch+1):
lr = learning_rate_schedule(ep)
pre_index = 0
train_acc = 0.0
train_loss = 0.0
start_time = time.time()
print("\nepoch %d/%d:" %(ep,total_epoch))
for it in range(1,iterations+1):
batch_x = train_x[pre_index:pre_index+batch_size]
batch_y = train_y[pre_index:pre_index+batch_size]
batch_x = data_augmentation(batch_x)
_, batch_loss = sess.run([train_step, cross_entropy],feed_dict={x:batch_x, y_:batch_y, keep_prob: dropout_rate, learning_rate: lr, train_flag: True})
batch_acc = accuracy.eval(feed_dict={x:batch_x, y_:batch_y, keep_prob: 1.0, train_flag: True})
train_loss += batch_loss
train_acc += batch_acc
pre_index += batch_size
if it == iterations:
train_loss /= iterations
train_acc /= iterations
loss_, acc_ = sess.run([cross_entropy,accuracy],feed_dict={x:batch_x, y_:batch_y, keep_prob: 1.0, train_flag: True})
train_summary = tf.Summary(value=[tf.Summary.Value(tag="train_loss", simple_value=train_loss),
tf.Summary.Value(tag="train_accuracy", simple_value=train_acc)])
val_acc, val_loss, test_summary = run_testing(sess,ep)
summary_writer.add_summary(train_summary, ep)
summary_writer.add_summary(test_summary, ep)
summary_writer.flush()
print("iteration: %d/%d, cost_time: %ds, train_loss: %.4f, train_acc: %.4f, test_loss: %.4f, test_acc: %.4f" %(it, iterations, int(time.time()-start_time), train_loss, train_acc, val_loss, val_acc))
else:
print("iteration: %d/%d, train_loss: %.4f, train_acc: %.4f" %(it, iterations, train_loss / it, train_acc / it) , end='\r')
save_path = saver.save(sess, model_save_path)
print("Model saved in file: %s" % save_path)
| mit | 7,394,086,289,216,242,000 | 38.952218 | 218 | 0.553963 | false |
kaichogami/wavenet | model.py | 1 | 15261 | """Model for wavenet. Defines ops in tensorflow sense"""
import numpy as np
import tensorflow as tf
MIN_DIL = 2
MAX_DIL = 4096
def _dilated_convolution(X, filters, dilation, name):
"""Helper function to carry out dilated convolution
Parameters
==========
X : tf.Tensor of shape(batch, width, height, in_channels)
The input data
filters : tf.Tensor of shape(height, width, in_channels, out_channels)
The filter tensor
dilation : int
the dilation factor
"""
return tf.nn.atrous_conv2d(X, filters, dilation, "SAME", name)
def _create_variable(name, shape):
"""Helped function to create variables using xavier initialization
Parameters
==========
name : string
Then name of the variable
shape : tuple, list
The shape of the variable
"""
return tf.get_variable(name, shape=shape,
initializer=tf.contrib.layers.xavier_initializer())
class Wavenet:
"""Model for Wavenet.
Parameters
==========
audio_frequency : int, secs
The frequency of the audio
receptive_seconds : int, secs
The size of the receptive field in seconds.
filter_width : int,
Size of the filter.
residual_channels : int
No of filters to learn for residual block.
dilation_channels : int
No of filters to learn for dilation block.
skip_channels : int
No of filters to learn for skip block.
quantization_channels : int
No of channels to encode the audio with
"""
def __init__(self, audio_frequency, receptive_seconds,
filter_width,residual_channels,
dilation_channels, skip_channels, quantization_channels):
self.audio_frequency = audio_frequency
self.receptive_seconds = receptive_seconds
self.filter_width = filter_width
self.residual_channels = residual_channels
self.dilation_channels = dilation_channels
self.skip_channels = skip_channels
self.quantization_channels = quantization_channels
self.dilations = _get_dilations(audio_frequency, receptive_seconds)
self.variables = self._get_all_variables()
self.quantization_channels = quantization_channels
def _get_all_variables(self):
"""Helper function to create a dict of all variables
"""
variables = dict()
# first causal convolution
with tf.variable_scope("initial_causal_conv"):
variables['initial_filter'] = _create_variable("filter",
[1,1,
self.quantization_channels,
self.residual_channels])
variables['dilated_stack'] = list()
# Dilated stack dictionary with list of variables
with tf.variable_scope('dilated_stack'):
for i, _ in enumerate(self.dilations):
current = dict()
with tf.variable_scope("dilated_layer_{}".format(i)):
current['filter'] = _create_variable(
"filter", [1, self.filter_width,
self.residual_channels,
self.dilation_channels])
current['gate'] = _create_variable(
"gate", [1, self.filter_width,
self.residual_channels,
self.dilation_channels])
current['skip'] = _create_variable(
"skip", [1, self.filter_width,
self.dilation_channels,
self.skip_channels])
variables['dilated_stack'].append(current)
with tf.variable_scope('post_processing'):
variables['post_1'] = _create_variable(
"post_1", [1, 1, self.skip_channels, self.skip_channels])
variables['post_2'] = _create_variable(
"post_2", [1, 1, self.skip_channels,
self.quantization_channels])
return variables
def _dilated_stack(self, X, dilation, layer_index):
"""create dilation layer or use it again.
Parameters
==========
X : np.ndarray or tf.tensor of shape(batch_size, height, width,
in_channels)
Input to the dilation stack
dilation : int
The dilation rate.
layer_index : int
Index of layer. Used for defining scope.
Output
======
residual, skip: np.ndarray of shape(batch_size, height, width,
in_channels)
Output of the dilated stack
"""
with tf.variable_scope('dilated_layer_{}'.format(layer_index)):
var_dict = self.variables['dilated_stack'][layer_index]
conv_filter = _dilated_convolution(X, var_dict['filter'],
dilation, name="conv_filter")
conv_gate = _dilated_convolution(X, var_dict['gate'],
dilation, name="conv_gate")
# final output
# Question: should the final skip and residual convolution have
# different weight vector or same? here, the same is used.
out = tf.tanh(conv_filter) * tf.sigmoid(conv_gate)
out = tf.nn.conv2d(out, var_dict['skip'], padding="SAME", strides=[1,1,1,1])
# return residual and skip output
return out + X, out
def create_network(self, X):
"""Create the network, by using dilated stack, postprocessing.
Parameters
==========
X : np.ndarray, of shape(batch, height, width, in_channels)
The input data.
Output
======
conv2 : np.ndarray of shape(batch, height, width, in_channels)
The output of the total network, unnormalized
"""
with tf.variable_scope('initial_causal_conv'):
initial_conv_result = tf.nn.conv2d(X, self.variables[
'initial_filter'],
padding="SAME", strides=[1,1,1,1])
residual = initial_conv_result
# create dilated stack results
skip_list = list()
with tf.variable_scope("dilated_stack"):
for i, dilation in enumerate(self.dilations):
residual, skip_result = self._dilated_stack(residual, dilation,
i)
skip_list.append(skip_result)
# post-processing
# addition --> Relu --> convolution --> Relu --> convolution
with tf.variable_scope("post_processing"):
total_output = sum(skip_list)
relu1 = tf.nn.tanh(total_output)
conv1 = tf.nn.conv2d(relu1, self.variables['post_1'],
padding="SAME", strides=[1,1,1,1])
relu2 = tf.nn.tanh(conv1)
conv2 = tf.nn.conv2d(relu2, self.variables['post_2'],
padding="SAME", strides=[1,1,1,1])
return conv2
def loss(self, input_samples):
"""Generate the cross entropy loss and reduce mean between batches
Parameters
==========
input_samples : np.ndarray of shape(batch, height, width, in_channels)
The input samples
"""
with tf.variable_scope("loss"):
# flip the input samples so that convolution depends on previous
# samples
input_samples = tf.reverse(input_samples, [2])
input_samples = _mu_law_encode(input_samples,
self.quantization_channels)
encoded = self._one_hot(input_samples)
network_output = self.create_network(encoded)
network_output = tf.reshape(network_output,
[1, 1, -1,
self.quantization_channels])
# slice receptive field from the end(of flipped audio
# signal) to preserve causility
shape = network_output.shape
receptive_samples = _get_rounded_receptive_samples(self.audio_frequency,
self.receptive_seconds)
output_sliced = tf.slice(network_output, [0, 0, 0, 0],
[-1, -1, int(shape[2]-receptive_samples),
-1])
encoded_sliced = tf.slice(encoded, [0, 0, 0, 0],
[-1, -1, int(shape[2]-receptive_samples),
-1])
sliced_shape = encoded_sliced.shape
# shift the input by left(reversed audio)
encoded_shifted = tf.slice(tf.pad(encoded_sliced, [[0,0], [0,0], [1,0], [0,0]]),
[0,0,0,0], [-1,-1, int(sliced_shape[2]),
-1])
# reshape to find the cross entropy loss
output_sliced = tf.reshape(output_sliced, [-1, self.quantization_channels])
encoded_shifted = tf.reshape(encoded_shifted, [-1, self.quantization_channels])
loss = tf.nn.softmax_cross_entropy_with_logits(
logits = output_sliced,
labels = encoded_shifted)
average_loss = tf.reduce_mean(loss)
return average_loss
def _generate_next_sample(self, waveform):
"""Generate the probabilty distribution of the next sample,
based on current waveform.
Parameters
==========
waveform : np.ndarray of shape(batch, in_height, in_width,
quantization_channels)
reversed input waveform
Output
======
new_waveform : np.ndarray of shape(batch, in_height,
in_width,
quantization_channels)
reversed generated waveform
"""
with tf.variable_scope("Generate"):
encoded = self._one_hot(waveform)
network_output = self.create_network(encoded)
out = tf.reshape(network_output, [-1, self.quantization_channels])
prob = tf.nn.softmax(out)
# return index + 1 to get the quantization channel value
return tf.to_int32(tf.reshape(tf.argmax(prob, axis=1)[0], [1,1,1,1])) + 1
def generate(self, seconds, song):
"""Generate audio based on trained model.
Output
======
generated_audio : np.ndarray of shape(out_width)
"""
with tf.variable_scope("Generate"):
receptive_samples = _get_rounded_receptive_samples(self.audio_frequency,
self.receptive_seconds)
total_samples = _get_receptive_samples(self.audio_frequency,
seconds)
# randomly generate first samples
if len(song) < receptive_samples:
print(len(song), receptive_samples)
raise ValueError("enter longer song or shorter receptive field")
current = song[1000:receptive_samples+3000]
current = np.reshape(current, [1,1,current.shape[0], 1])
total_waveform = tf.to_int32(tf.reverse(np.copy(current), [2]))
current = tf.reverse(current, [2])
current = _mu_law_encode(current, self.quantization_channels)
for i in xrange(receptive_samples, total_samples):
next_sample = self._generate_next_sample(current)
total_waveform = tf.concat([next_sample, total_waveform], 2)
# insert the next sample at the beginning and pop the last element
current = tf.slice(current, [0,0,0,0], [-1,-1,int(current.shape[2]-1),-1])
current = tf.concat([next_sample, current], 2)
print(i)
return _mu_law_decode(tf.reverse(total_waveform, [2]),
self.quantization_channels)
def _one_hot(self, input_samples):
"""Helper function to one_hot input samples.
"""
encoded = tf.one_hot(input_samples, depth=self.quantization_channels,
dtype=tf.float32)
return tf.reshape(encoded, [1, 1, -1, self.quantization_channels])
def _get_receptive_samples(audio_frequency, receptive_field):
"""helper function to get receptive seconds"""
return audio_frequency * receptive_field
def _get_dilations(audio_frequency, receptive_field):
"""Create dilated factors list based on receiptive field
These dilated factors are in the power of 2, till a max limit
after which they start again.
Parameters
==========
audio_frequency : int, in Khz
Frequency of the audio
receptive_field : int,
No of seconds to take into account
"""
receptive_samples = _get_rounded_receptive_samples(audio_frequency,
receptive_field)
limit = np.log2(receptive_samples)
dilated_list = list()
counter = 0
while True:
for j in xrange(int(np.log2(MIN_DIL)), int(np.log2(MAX_DIL)) + 1):
if counter == limit:
return dilated_list
dilated_list.append(2**j)
counter += 1
def _get_rounded_receptive_samples(audio_frequency, receptive_field):
"""Get rounded receptive samples nearest to the power of 2
"""
receptive_samples = _get_receptive_samples(audio_frequency,
receptive_field)
return 2 ** int(np.floor(np.log2(receptive_samples)))
def _mu_law_encode(audio, quantization_channels):
'''Quantizes waveform amplitudes.'''
with tf.name_scope('encode'):
mu = tf.to_float(quantization_channels - 1)
# Perform mu-law companding transformation (ITU-T, 1988).
# Minimum operation is here to deal with rare large amplitudes caused
# by resampling.
safe_audio_abs = tf.minimum(tf.abs(audio), 1.0)
magnitude = tf.log1p(mu * safe_audio_abs) / tf.log1p(mu)
signal = tf.sign(audio) * magnitude
# Quantize signal to the specified number of levels.
return tf.to_int32((signal + 1) / 2 * mu + 0.5)
def _mu_law_decode(output, quantization_channels):
'''Recovers waveform from quantized values.'''
# copied from https://github.com/ibab/tensorflow-wavenet/blob/master/wavenet/ops.py
mu = quantization_channels - 1
# Map values back to [-1, 1].
signal = 2 * (tf.to_float(output) / mu) - 1
# Perform inverse of mu-law transformation.
magnitude = (1. / mu) * ((1 + mu)**abs(signal) - 1)
return tf.sign(signal) * magnitude
| mit | -5,600,043,305,285,726,000 | 39.914209 | 92 | 0.539283 | false |
evernym/zeno | plenum/test/bls/test_state_proof.py | 2 | 7251 | from plenum.common.constants import ROOT_HASH, MULTI_SIGNATURE, PROOF_NODES, TXN_TYPE, DATA, TXN_TIME, STATE_PROOF, \
MULTI_SIGNATURE_VALUE, MULTI_SIGNATURE_PARTICIPANTS, MULTI_SIGNATURE_SIGNATURE, \
MULTI_SIGNATURE_VALUE_LEDGER_ID, \
MULTI_SIGNATURE_VALUE_STATE_ROOT, MULTI_SIGNATURE_VALUE_TXN_ROOT, MULTI_SIGNATURE_VALUE_POOL_STATE_ROOT, \
MULTI_SIGNATURE_VALUE_TIMESTAMP, DOMAIN_LEDGER_ID, CURRENT_PROTOCOL_VERSION
from plenum.common.plenum_protocol_version import PlenumProtocolVersion
from plenum.common.request import SafeRequest
from plenum.common.txn_util import get_type, get_from, get_req_id, get_seq_no, get_txn_time
from plenum.common.types import f
from plenum.common.util import get_utc_epoch
from plenum.test.bls.helper import validate_multi_signature, validate_proof_for_write, validate_proof_for_read
from plenum.test.buy_handler import BuyHandler
from plenum.test.constants import GET_BUY
from plenum.test.helper import wait_for_requests_ordered, \
randomOperation, sdk_send_random_requests, sdk_json_couples_to_request_list, sdk_send_random_and_check, \
sdk_json_to_request_object
nodeCount = 4
nodes_wth_bls = 4
def check_result(txnPoolNodeSet, req, should_have_proof):
for node in txnPoolNodeSet:
req_handler = node.read_manager.request_handlers[GET_BUY]
key = BuyHandler.prepare_buy_key(req.identifier, req.reqId)
_, _, _, proof = req_handler.lookup(key, with_proof=True)
txn_time = get_utc_epoch()
result = req_handler.make_result(req,
{TXN_TYPE: "buy"},
2,
txn_time,
proof)
assert result
assert result[DATA] == {TXN_TYPE: "buy"}
assert result[f.IDENTIFIER.nm] == req.identifier
assert result[f.REQ_ID.nm] == req.reqId
assert result[f.SEQ_NO.nm] == 2
assert result[TXN_TIME] == txn_time
if should_have_proof:
assert result[STATE_PROOF] == proof
assert validate_proof_for_read(result, req)
else:
assert STATE_PROOF not in result
def test_make_proof_bls_enabled(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client):
reqs = sdk_json_couples_to_request_list(
sdk_send_random_requests(
looper, sdk_pool_handle, sdk_wallet_client, 1))
wait_for_requests_ordered(looper, txnPoolNodeSet, reqs)
req = reqs[0]
for node in txnPoolNodeSet:
req_handler = node.read_manager.request_handlers[GET_BUY]
key = BuyHandler.prepare_buy_key(req.identifier, req.reqId)
_, _, _, proof = req_handler.lookup(key, with_proof=True)
assert proof
assert ROOT_HASH in proof
assert MULTI_SIGNATURE in proof
assert PROOF_NODES in proof
multi_sig = proof[MULTI_SIGNATURE]
assert MULTI_SIGNATURE_SIGNATURE in multi_sig
assert MULTI_SIGNATURE_PARTICIPANTS in multi_sig
assert MULTI_SIGNATURE_VALUE in multi_sig
multi_sig_value = multi_sig[MULTI_SIGNATURE_VALUE]
assert MULTI_SIGNATURE_VALUE_LEDGER_ID in multi_sig_value
assert MULTI_SIGNATURE_VALUE_STATE_ROOT in multi_sig_value
assert MULTI_SIGNATURE_VALUE_TXN_ROOT in multi_sig_value
assert MULTI_SIGNATURE_VALUE_POOL_STATE_ROOT in multi_sig_value
assert MULTI_SIGNATURE_VALUE_TIMESTAMP in multi_sig_value
# check that multi sig values are in order
value_keys = list(multi_sig_value.keys())
assert [MULTI_SIGNATURE_VALUE_LEDGER_ID,
MULTI_SIGNATURE_VALUE_POOL_STATE_ROOT,
MULTI_SIGNATURE_VALUE_STATE_ROOT,
MULTI_SIGNATURE_VALUE_TIMESTAMP,
MULTI_SIGNATURE_VALUE_TXN_ROOT] == value_keys
assert validate_multi_signature(proof, txnPoolNodeSet)
def test_make_result_bls_enabled(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client):
req_dict, _ = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)[0]
req = sdk_json_to_request_object(req_dict)
wait_for_requests_ordered(looper, txnPoolNodeSet, [req])
assert req.protocolVersion
assert req.protocolVersion >= PlenumProtocolVersion.STATE_PROOF_SUPPORT.value
check_result(txnPoolNodeSet, req, True)
def test_make_result_no_protocol_version(looper, txnPoolNodeSet):
request = SafeRequest(identifier="1" * 16,
reqId=1,
operation=randomOperation(),
signature="signature",
protocolVersion=CURRENT_PROTOCOL_VERSION)
request.protocolVersion = None
check_result(txnPoolNodeSet, request, False)
def test_make_result_protocol_version_less_than_state_proof(looper,
txnPoolNodeSet):
request = SafeRequest(identifier="1" * 16,
reqId=1,
operation=randomOperation(),
signature="signature",
protocolVersion=CURRENT_PROTOCOL_VERSION)
request.protocolVersion = 0
check_result(txnPoolNodeSet, request, False)
def test_proof_in_write_reply(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client):
resp = sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client, 1)
req = resp[0][0]
result = resp[0][1]['result']
assert result
assert get_type(result) == "buy"
assert get_from(result) == req[f.IDENTIFIER.nm]
assert get_req_id(result) == req[f.REQ_ID.nm]
assert get_seq_no(result)
assert get_txn_time(result)
assert STATE_PROOF in result
state_proof = result[STATE_PROOF]
assert ROOT_HASH in state_proof
assert MULTI_SIGNATURE in state_proof
assert PROOF_NODES in state_proof
multi_sig = state_proof[MULTI_SIGNATURE]
assert MULTI_SIGNATURE_SIGNATURE in multi_sig
assert MULTI_SIGNATURE_PARTICIPANTS in multi_sig
assert MULTI_SIGNATURE_VALUE in multi_sig
multi_sig_value = multi_sig[MULTI_SIGNATURE_VALUE]
assert MULTI_SIGNATURE_VALUE_LEDGER_ID in multi_sig_value
assert MULTI_SIGNATURE_VALUE_STATE_ROOT in multi_sig_value
assert MULTI_SIGNATURE_VALUE_TXN_ROOT in multi_sig_value
assert MULTI_SIGNATURE_VALUE_POOL_STATE_ROOT in multi_sig_value
assert MULTI_SIGNATURE_VALUE_TIMESTAMP in multi_sig_value
assert validate_multi_signature(state_proof, txnPoolNodeSet)
assert validate_proof_for_write(result)
def test_make_proof_committed_head_used(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client):
req_dict, _ = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)[0]
req = sdk_json_to_request_object(req_dict)
wait_for_requests_ordered(looper, txnPoolNodeSet, [req])
key = BuyHandler.prepare_buy_key(req.identifier, req.reqId)
for node in txnPoolNodeSet:
node.states[DOMAIN_LEDGER_ID].set(key, b'somevalue')
check_result(txnPoolNodeSet, req, True)
| apache-2.0 | -3,428,471,672,437,637,600 | 42.680723 | 117 | 0.654255 | false |
ping/instagram_private_api | instagram_private_api/endpoints/collections.py | 1 | 3411 | import json
from ..compatpatch import ClientCompatPatch
class CollectionsEndpointsMixin(object):
"""For endpoints in related to collections functionality."""
def list_collections(self):
return self._call_api('collections/list/')
def collection_feed(self, collection_id, **kwargs):
"""
Get the items in a collection.
:param collection_id: Collection ID
:return:
"""
endpoint = 'feed/collection/{collection_id!s}/'.format(**{'collection_id': collection_id})
res = self._call_api(endpoint, query=kwargs)
if self.auto_patch and res.get('items'):
[ClientCompatPatch.media(m['media'], drop_incompat_keys=self.drop_incompat_keys)
for m in res.get('items', []) if m.get('media')]
return res
def create_collection(self, name, added_media_ids=None):
"""
Create a new collection.
:param name: Name for the collection
:param added_media_ids: list of media_ids
:return:
.. code-block:: javascript
{
"status": "ok",
"collection_id": "1700000000123",
"cover_media": {
"media_type": 1,
"original_width": 1080,
"original_height": 1080,
"id": 1492726080000000,
"image_versions2": {
"candidates": [
{
"url": "http://scontent-xx4-1.cdninstagram.com/...123.jpg",
"width": 1080,
"height": 1080
},
...
]
}
},
"collection_name": "A Collection"
}
"""
params = {'name': name}
if added_media_ids and isinstance(added_media_ids, str):
added_media_ids = [added_media_ids]
if added_media_ids:
params['added_media_ids'] = json.dumps(added_media_ids, separators=(',', ':'))
params.update(self.authenticated_params)
return self._call_api('collections/create/', params=params)
def edit_collection(self, collection_id, added_media_ids):
"""
Add media IDs to an existing collection.
:param collection_id: Collection ID
:param added_media_ids: list of media IDs
:return: Returns same object as :meth:`create_collection`
"""
if isinstance(added_media_ids, str):
added_media_ids = [added_media_ids]
params = {
'added_media_ids': json.dumps(added_media_ids, separators=(',', ':'))
}
params.update(self.authenticated_params)
endpoint = 'collections/{collection_id!s}/edit/'.format(**{'collection_id': collection_id})
return self._call_api(endpoint, params=params)
def delete_collection(self, collection_id):
"""
Delete a collection.
:param collection_id: Collection ID
:return:
.. code-block:: javascript
{
"status": "ok"
}
"""
params = self.authenticated_params
endpoint = 'collections/{collection_id!s}/delete/'.format(**{'collection_id': collection_id})
return self._call_api(endpoint, params=params)
| mit | 5,582,873,594,909,179,000 | 34.905263 | 101 | 0.522427 | false |
sudhir-serpentcs/business-requirement | business_requirement_deliverable_project/models/project.py | 1 | 3606 | # -*- coding: utf-8 -*-
# © 2016 Elico Corp (https://www.elico-corp.com).
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, fields, models
from openerp.tools.translate import _
from openerp.exceptions import ValidationError
class Project(models.Model):
_inherit = "project.project"
origin = fields.Char('Source Document')
business_requirement_id = fields.Many2one(
'business.requirement',
string='Business Requirement',
help='Link the Project and the business requirement',
)
business_requirement_deliverable_id = fields.Many2one(
comodel_name='business.requirement.deliverable',
string='Business Requirement Deliverable',
help='Link the Project and the business requirement deliverable',
)
@api.multi
def generate_project_wizard(self):
br_ids = self.env.context.get('br_ids', False)
from_project = False
if not br_ids:
br_ids = self.br_ids
from_project = True
default_uom = self.env['project.config.settings'].\
get_default_time_unit('time_unit').get('time_unit', False)
if not default_uom:
raise ValidationError(
_("""Please set working time default unit in project
config settings"""))
lines = self.env['business.requirement.resource']
for br in br_ids:
if br.state not in ['stakeholder_approval', 'cancel', 'done']:
raise ValidationError(
_("All business requirements of the project should "
"be stakeholder_approval/canceled/done"))
for deliverables in br.deliverable_lines:
for line in deliverables.resource_ids:
if line.resource_type != 'task':
continue
generated = self.env['project.task'].search(
[('br_resource_id', '=', line.id)],
limit=1)
if generated:
continue
lines |= line
for resource_line in br.resource_lines.filtered(
lambda resource: resource.resource_type == 'task'):
generated = self.env['project.task'].search(
[('br_resource_id', '=', resource_line.id)],
limit=1)
if generated:
continue
lines |= resource_line
if not lines and not br.linked_project:
raise ValidationError(
_("""There is no available business requirement resource line
to generate task"""))
if from_project:
br_ids.filtered(lambda br_id: not br_id.parent_id)
vals = {
'partner_id': self.partner_id.id,
'project_id': self.id,
'br_ids': [(6, 0, br_ids.ids)]
}
wizard_obj = self.env['br.generate.projects']
wizard = wizard_obj.with_context(
default_uom=default_uom, br_ids=False).create(vals)
action = wizard.wizard_view()
return action
class ProjectTask(models.Model):
_inherit = "project.task"
business_requirement_id = fields.Many2one(
'business.requirement',
string='Business Requirement',
help='Link the task and the business requirement',
)
br_resource_id = fields.Many2one(
comodel_name='business.requirement.resource',
string='Business Requirement Resource',
ondelete='set null'
)
| agpl-3.0 | -5,362,266,994,235,058,000 | 36.947368 | 77 | 0.56699 | false |
Azure/azure-sdk-for-python | sdk/scheduler/azure-mgmt-scheduler/azure/mgmt/scheduler/aio/operations/_job_collections_operations.py | 1 | 32666 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class JobCollectionsOperations:
"""JobCollectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.scheduler.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_subscription(
self,
**kwargs
) -> AsyncIterable["models.JobCollectionListResult"]:
"""Gets all job collections under specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobCollectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.scheduler.models.JobCollectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.JobCollectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('JobCollectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Scheduler/jobCollections'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.JobCollectionListResult"]:
"""Gets all job collections under specified resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobCollectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.scheduler.models.JobCollectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.JobCollectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('JobCollectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections'} # type: ignore
async def get(
self,
resource_group_name: str,
job_collection_name: str,
**kwargs
) -> "models.JobCollectionDefinition":
"""Gets a job collection.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param job_collection_name: The job collection name.
:type job_collection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobCollectionDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.scheduler.models.JobCollectionDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.JobCollectionDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobCollectionDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
job_collection_name: str,
job_collection: "models.JobCollectionDefinition",
**kwargs
) -> "models.JobCollectionDefinition":
"""Provisions a new job collection or updates an existing job collection.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param job_collection_name: The job collection name.
:type job_collection_name: str
:param job_collection: The job collection definition.
:type job_collection: ~azure.mgmt.scheduler.models.JobCollectionDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobCollectionDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.scheduler.models.JobCollectionDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.JobCollectionDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(job_collection, 'JobCollectionDefinition')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('JobCollectionDefinition', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('JobCollectionDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}'} # type: ignore
async def patch(
self,
resource_group_name: str,
job_collection_name: str,
job_collection: "models.JobCollectionDefinition",
**kwargs
) -> "models.JobCollectionDefinition":
"""Patches an existing job collection.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param job_collection_name: The job collection name.
:type job_collection_name: str
:param job_collection: The job collection definition.
:type job_collection: ~azure.mgmt.scheduler.models.JobCollectionDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobCollectionDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.scheduler.models.JobCollectionDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.JobCollectionDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self.patch.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(job_collection, 'JobCollectionDefinition')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobCollectionDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
patch.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
job_collection_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
job_collection_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a job collection.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param job_collection_name: The job collection name.
:type job_collection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
job_collection_name=job_collection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}'} # type: ignore
async def _enable_initial(
self,
resource_group_name: str,
job_collection_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-01"
# Construct URL
url = self._enable_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_enable_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/enable'} # type: ignore
async def begin_enable(
self,
resource_group_name: str,
job_collection_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Enables all of the jobs in the job collection.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param job_collection_name: The job collection name.
:type job_collection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._enable_initial(
resource_group_name=resource_group_name,
job_collection_name=job_collection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_enable.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/enable'} # type: ignore
async def _disable_initial(
self,
resource_group_name: str,
job_collection_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-01"
# Construct URL
url = self._disable_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_disable_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/disable'} # type: ignore
async def begin_disable(
self,
resource_group_name: str,
job_collection_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Disables all of the jobs in the job collection.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param job_collection_name: The job collection name.
:type job_collection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._disable_initial(
resource_group_name=resource_group_name,
job_collection_name=job_collection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_disable.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/disable'} # type: ignore
| mit | 8,264,408,306,828,113,000 | 47.465875 | 198 | 0.644799 | false |
crowdworks/redash | redash/cli/groups.py | 1 | 3319 | from __future__ import print_function
from sys import exit
from sqlalchemy.orm.exc import NoResultFound
from flask.cli import AppGroup
from click import argument, option
from redash import models
manager = AppGroup(help="Groups management commands.")
@manager.command()
@argument('name')
@option('--org', 'organization', default='default',
help="The organization the user belongs to (leave blank for "
"'default').")
@option('--permissions', default=None,
help="Comma separated list of permissions ('create_dashboard',"
" 'create_query', 'edit_dashboard', 'edit_query', "
"'view_query', 'view_source', 'execute_query', 'list_users',"
" 'schedule_query', 'list_dashboards', 'list_alerts',"
" 'list_data_sources') (leave blank for default).")
def create(name, permissions=None, organization='default'):
print("Creating group (%s)..." % (name))
org = models.Organization.get_by_slug(organization)
permissions = extract_permissions_string(permissions)
print("permissions: [%s]" % ",".join(permissions))
try:
models.db.session.add(models.Group(
name=name, org=org,
permissions=permissions))
models.db.session.commit()
except Exception as e:
print("Failed create group: %s" % e.message)
exit(1)
@manager.command()
@argument('group_id')
@option('--permissions', default=None,
help="Comma separated list of permissions ('create_dashboard',"
" 'create_query', 'edit_dashboard', 'edit_query',"
" 'view_query', 'view_source', 'execute_query', 'list_users',"
" 'schedule_query', 'list_dashboards', 'list_alerts',"
" 'list_data_sources') (leave blank for default).")
def change_permissions(group_id, permissions=None):
print("Change permissions of group %s ..." % group_id)
try:
group = models.Group.query.get(group_id)
except NoResultFound:
print("User [%s] not found." % group_id)
exit(1)
permissions = extract_permissions_string(permissions)
print("current permissions [%s] will be modify to [%s]" % (
",".join(group.permissions), ",".join(permissions)))
group.permissions = permissions
try:
models.db.session.add(group)
models.db.session.commit()
except Exception as e:
print("Failed change permission: %s" % e.message)
exit(1)
def extract_permissions_string(permissions):
if permissions is None:
permissions = models.Group.DEFAULT_PERMISSIONS
else:
permissions = permissions.split(',')
permissions = [p.strip() for p in permissions]
return permissions
@manager.command()
@option('--org', 'organization', default=None,
help="The organization to limit to (leave blank for all).")
def list(organization=None):
"""List all groups"""
if organization:
org = models.Organization.get_by_slug(organization)
groups = models.Group.query.filter(models.Group.org == org)
else:
groups = models.Group.query
for i, group in enumerate(groups):
if i > 0:
print("-" * 20)
print("Id: {}\nName: {}\nType: {}\nOrganization: {}\nPermission: {}".format(
group.id, group.name, group.type, group.org.slug, ",".join(group.permissions)))
| bsd-2-clause | -6,601,441,871,846,674,000 | 32.525253 | 91 | 0.637843 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/StLa/PyScripts/Strangeland.py | 1 | 7705 |
import dsz.version.checks
import dsz.lp
import dsz.version
import dsz.ui
import dsz.path
import dsz.file
import dsz.control
import dsz.menu
import dsz.env
tool = 'StLa'
version = '1.2.0.1'
resDir = dsz.lp.GetResourcesDirectory()
logdir = dsz.lp.GetLogsDirectory()
STLA_PATH = ('%s%s' % (resDir, tool))
def stlaverify(input):
storageSuccessFlag = True
success = True
if dsz.file.Exists('tm154d.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154d.da dump file exists ... this should not be here', dsz.ERROR)
if dsz.file.Exists('tm154p.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154p.da overflow file exists ... log may be full', dsz.ERROR)
if dsz.file.Exists('tm154_.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154_.da config file exists ... ', dsz.GOOD)
if dsz.file.Exists('tm154o.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154o.da storage file exists ... SUCCESSFUL', dsz.GOOD)
else:
dsz.ui.Echo('tm154o.da storage file missing ... FAILED', dsz.ERROR)
storageSuccessFlag = False
if (storageSuccessFlag == True):
dsz.ui.Echo('STRANGELAND should be installed on target... only way to confirm is with DOUBLEFEATURE', dsz.GOOD)
else:
dsz.ui.Echo("STRANGELAND doesn't look like it is on target... only way to confirm is with DOUBLEFEATURE", dsz.ERROR)
success = False
return success
def dll_u(dllfile):
dsz.ui.Echo(('Executing %s via dllload -export dll_u' % dllfile))
dsz.control.echo.Off()
runsuccess = dsz.cmd.Run(('dllload -export dll_u -library "%s"' % dllfile))
dsz.control.echo.On()
if (not runsuccess):
dsz.ui.Echo(('Could not execute %s via dll_u' % dllfile), dsz.ERROR)
return False
dsz.ui.Echo(('Successfully executed %s via dll_u' % dllfile), dsz.GOOD)
return True
def collectfiles():
dsz.control.echo.Off()
runsuccess = dsz.cmd.Run('processinfo -minimal', dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
(currentPath, file) = dsz.path.Split(dsz.cmd.data.Get('processinfo::modules::module::modulename', dsz.TYPE_STRING)[0])
dsz.ui.Echo(('Getting collection file, "%s\\Tprf3~"' % currentPath))
dsz.control.echo.Off()
runsuccess = dsz.cmd.Run(('get "%s\\Tprf3~"' % currentPath), dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
if (not runsuccess):
dsz.ui.Echo(('Could not get collection file, %s\\Tprf3~. You may need to collect and clean this manually.' % currentPath), dsz.ERROR)
return False
getfilename = dsz.cmd.data.Get('FileLocalName::localname', dsz.TYPE_STRING)[0]
dsz.ui.Echo(('Deleting collection file, %s\\Tprf3~' % currentPath))
dsz.control.echo.Off()
if (not dsz.cmd.Run(('delete "%s\\Tprf3~"' % currentPath))):
dsz.ui.Echo(('Could not delete collection file, "%s\\Tprf3~". You may need to clean this manually.' % currentPath), dsz.ERROR)
dsz.control.echo.On()
dsz.ui.Echo('Moving file to NOSEND directory...')
dsz.control.echo.Off()
dsz.cmd.Run(('local mkdir %s\\GetFiles\\NOSEND' % logdir))
dsz.cmd.Run(('local mkdir %s\\GetFiles\\STRANGELAND_Decrypted' % logdir))
if (not dsz.cmd.Run(('local move %s\\GetFiles\\%s %s\\GetFiles\\NOSEND\\%s' % (logdir, getfilename, logdir, getfilename)))):
dsz.ui.Echo('Failed to move files to NOSEND', dsz.ERROR)
dsz.control.echo.On()
return parsefile(('%s\\GetFiles\\NOSEND\\%s' % (logdir, getfilename)))
def parsefile(file):
(path, filename) = dsz.path.Split(file)
dsz.control.echo.Off()
runsuccess = dsz.cmd.Run(('local run -command "%s\\Tools\\i386-winnt\\SlDecoder.exe %s %s\\GetFiles\\STRANGELAND_Decrypted\\%s.xml"' % (STLA_PATH, file, logdir, filename)), dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
if (not runsuccess):
dsz.ui.Echo('There was an error parsing the collection', dsz.ERROR)
return runsuccess
def stlaparse(input):
fullpath = dsz.ui.GetString('Please enter the full path to the file you want parse: ', '')
if (fullpath == ''):
dsz.ui.Echo('No string entered', dsz.ERROR)
return False
return parsefile(fullpath)
def stlainstall(input):
if dsz.version.checks.IsOs64Bit():
dll_path = 'Uploads\\x64\\mssli64.dll'
else:
dll_path = 'Uploads\\i386\\mssli.dll'
return dll_u(('%s\\%s' % (STLA_PATH, dll_path)))
def stlacollect(input):
if dsz.version.checks.IsOs64Bit():
dll_path = 'Uploads\\x64\\mssld64.dll'
else:
dll_path = 'Uploads\\i386\\mssld.dll'
if dll_u(('%s\\%s' % (STLA_PATH, dll_path))):
return collectfiles()
return False
def stlauninstall(input):
if dsz.version.checks.IsOs64Bit():
dll_path = 'Uploads\\x64\\msslu64.dll'
else:
dll_path = 'Uploads\\i386\\msslu.dll'
if (not dll_u(('%s\\%s' % (STLA_PATH, dll_path)))):
dsz.ui.Echo('Failed to load the uninstaller. Process aborted.', dsz.ERROR)
return False
if (not collectfiles()):
dsz.ui.Echo('Failed to collect and parse file.', dsz.ERROR)
if dsz.file.Exists('tm154*.da', ('%s\\..\\temp' % systemPath)):
dsz.ui.Echo('tm154*.da files exist, deleting')
dsz.control.echo.Off()
if (not dsz.cmd.Run(('delete -mask tm154*.da -path "%s\\..\\temp" -max 1' % systemPath))):
dsz.ui.Echo('Failed to delete tm154*.da', dsz.ERROR)
dsz.control.echo.On()
return True
def main():
menuOption = 0
if dsz.version.checks.IsOs64Bit():
architecture = 'x64'
else:
architecture = 'x86'
if dsz.path.windows.GetSystemPath():
global systemPath
systemPath = dsz.path.windows.GetSystemPath()
else:
dsz.ui.Echo('Could not find system path', dsz.ERROR)
return 0
menu_list = list()
menu_list.append({dsz.menu.Name: 'Install', dsz.menu.Function: stlainstall})
menu_list.append({dsz.menu.Name: 'Uninstall', dsz.menu.Function: stlauninstall})
menu_list.append({dsz.menu.Name: 'Verify Install', dsz.menu.Function: stlaverify})
menu_list.append({dsz.menu.Name: 'Collect and Parse', dsz.menu.Function: stlacollect})
menu_list.append({dsz.menu.Name: 'Parse Local', dsz.menu.Function: stlaparse})
while (menuOption != (-1)):
(retvalue, menuOption) = dsz.menu.ExecuteSimpleMenu(('\n\n===============================\nSTRANGELAND v%s %s Menu\n===============================\n' % (version, architecture)), menu_list)
if (menuOption == 0):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'DEPLOYED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'DEPLOYED', 'Unsuccessful')
elif (menuOption == 1):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'DELETED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'DELETED', 'Unsuccessful')
elif (menuOption == 2):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Unsuccessful')
elif (menuOption == 3):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Unsuccessful')
dsz.ui.Echo('**********************************')
dsz.ui.Echo('* STRANGELAND script completed. *')
dsz.ui.Echo('**********************************')
return 0
if (__name__ == '__main__'):
main() | unlicense | -3,926,526,841,632,269,000 | 44.064327 | 197 | 0.615834 | false |
crickert1234/ParamAP | ParamAP.py | 1 | 51951 | #!/usr/bin/env python3
'''
ParamAP.py (parametrization of sinoatrial myocyte action potentials)
Copyright (C) 2018 Christian Rickert <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
# imports
# runtime
import fnmatch
import functools
import gc
import math
import os
import sys
# numpy
import numpy as np
# scipy
import scipy.signal as sp_sig
import scipy.spatial as sp_spat
import scipy.stats as sp_stat
# matplotlib
import matplotlib.backends.backend_pdf as mpbp
import matplotlib.pyplot as mpp
# variables
APEXT = 0.5 # margin of ap extension (%)
FFRAME = 0.5 # time interval for filtering (ms)
POLYNOM = 2 # polynomial order used for filtering
# functions
def askboolean(dlabel="custom boolean", dval=True):
"""Returns a boolean provided by the user."""
if dval: # True
dstr = "Y/n"
else: # False
dstr = "y/N"
while True:
uchoice = input(dlabel + " [" + dstr + "]: ") or dstr
if uchoice.lower().startswith("y") and not uchoice.endswith("N"):
print("True\n")
return True # break
elif (uchoice.endswith("N") and not uchoice.startswith("Y")) or uchoice.lower().startswith("n"):
print("False\n")
return False # break
else:
continue
def askext(dlabel="custom extension", dext='atf'):
"""Returns a file extention provided by the user."""
while True:
uext = str(input("Enter " + dlabel + " [" + dext + "]: ")).lower() or dext
if uext not in ["dat", "log", "pdf"] and len(uext) == 3:
print(uext + "\n")
return uext # break
else:
print("Invalid file extension!\n")
continue
def askunit(dlabel="custom unit", daxis='', dunit=''):
"""Returns a unit provided by the user."""
while True:
uunit = input("Enter " + dlabel + " [" + dunit + "]: ") or dunit
if daxis in ["x", "X"]:
if uunit in ["ms", "s"]:
print(uunit + "\n")
return uunit # break
else:
print("Invalid unit for X-axis!\n")
continue
elif daxis in ["y", "Y"]:
if uunit in ["mV", "V"]:
print(uunit + "\n")
return uunit # break
else:
print("Invalid unit for Y-axis!\n")
continue
def askvalue(dlabel="custom value", dval=1.0, dunit="", dtype="float"):
"""Returns a value provided by the user."""
while True:
try:
uval = float(input("Enter " + dlabel + " [" + str(dval) + "]" + dunit + ": ") or dval)
break
except ValueError:
print("Non-numerical input!\n")
continue
if dtype == "float": # default
pass
elif dtype == "int":
uval = int(round(uval))
print(str(uval) + "\n")
return uval
def getfiles(path='/home/user/', pattern='*'):
"""Returns all files in path matching the pattern."""
abspath = os.path.abspath(path)
for fileobject in os.listdir(abspath):
filename = os.path.join(abspath, fileobject)
if os.path.isfile(filename) and fnmatch.fnmatchcase(fileobject, pattern):
yield os.path.join(abspath, filename)
def getneighbors(origin_i=np.empty(0), vicinity=np.empty(0), origin_x=np.empty(0), origin_y=np.empty(0), hwidth=float("inf"), fheight=0.0, limit=None, within=float("inf"), bads=False):
"""Returns all nearest-neighbors in ascending (i.e. increasing distance) order."""
neighbors = np.zeros(0)
badorigins = np.zeros(0)
vicinity_kdt = sp_spat.KDTree(list(zip(vicinity, np.zeros(vicinity.size)))) # KDTree for the nearest neighbors search
for origin in origin_i:
neighbor_left, neighbor_right = False, False
for position in vicinity_kdt.query([origin, 0.0], k=limit, distance_upper_bound=within)[1]: # return nearest neighbors in ascending order
if not neighbor_left or not neighbor_right:
neighbor = vicinity[position]
if (abs(origin_x[origin]-origin_x[neighbor]) <= hwidth) and (abs(origin_y[origin]-origin_y[neighbor]) >= fheight): # relative criteria for minima left and right of maximum
if not neighbor_left and (neighbor < origin): # criteria for minimum left of maximum only
neighbors = np.append(neighbors, neighbor)
neighbor_left = True
elif not neighbor_right and (neighbor > origin): # criteria for minimum right of maximum only
neighbors = np.append(neighbors, neighbor)
neighbor_right = True
else: # odd origins with missing neighbors
badorigins = np.append(badorigins, np.argwhere(origin == origin_i))
neighbors = np.sort(np.unique(neighbors)) # unique elements only
if neighbors.size <= 1: # missing neighbor
if neighbor_left:
neighbors = np.append(neighbors, 0.0) # append neighbor_right
if neighbor_right:
neighbors = np.insert(neighbors, 0, 0.0) # insert neighbor_left
badorigins = np.sort(np.unique(badorigins))
return (neighbors.astype(int), badorigins.astype(int)) if bads else (neighbors.astype(int))
def getrunavg(xdata=np.empty(0), xinterval=FFRAME, porder=POLYNOM):
"""Returns the running average count based on a given time interval."""
tmprun = int(round(xinterval/(xdata[1]-xdata[0])))
while tmprun <= porder: # prevents filtering
tmprun += 1
return (tmprun) if tmprun % 2 else (tmprun + 1) # odd number
def getpartitions(pstart=0, pstop=100, pint=5, pmin=10):
"""Returns a partition list in percent to segment an interval."""
plist = []
for part_l in list(range(int(pstart), int(pstop)+int(pint), int(pint))):
for part_r in list(range(int(pstart), int(pstop)+int(pint), int(pint))):
if part_r > part_l and part_r-part_l >= int(pmin): # no duplication or empty partitions, minimum size
plist.append([part_l, part_r]) # return statement removes the outmost list
return plist
def getbestlinearfit(xaxis=np.empty(0), yaxis=np.empty(0), xmin=0.0, xmax=1.0, pstart=0, pstop=100, pint=1, pmin=10):
"""Returns the best linear fit from segments of an interval."""
bst_r = 0 # regression coefficient
seg_i = np.argwhere((xaxis >= xmin) & (xaxis <= xmax)).ravel() # analyzing partial segment only
seg_t = xaxis[seg_i[-1]]-xaxis[seg_i[0]] # full interval from partial segment
seg_m, seg_n, seg_r = 0.0, 0.0, 0.0
for partition in getpartitions(pstart, pstop, pint, pmin):
seg_i = np.argwhere((xaxis >= (avgfmin_x[0]+(seg_t*partition[0]/100))) & (xaxis <= (avgfmin_x[0]+(seg_t*partition[1]/100)))).ravel() # 'ravel()' required for 'sp_stat.linregress()'
seg_x = xaxis[seg_i]
seg_y = yaxis[seg_i]
seg_m, seg_n, seg_r = sp_stat.linregress(seg_x, seg_y)[0:3] # tuple unpacking and linear regression of partial ap segment
if math.pow(seg_r, 2.0) >= math.pow(bst_r, 2.0):
bst_m, bst_n, bst_r = seg_m, seg_n, seg_r
bst_i, bst_x, bst_y = seg_i, seg_x, seg_y
# print(partition[0], " - ", partition[1], " : ", str(partition[1]-partition[0]), " ~ ", str(math.pow(bst_r, 2.0))) # shows progress, but is slow!
return (bst_i, bst_x, bst_y, bst_m, bst_n, bst_r)
def mpp_setup(title='Plot title', xlabel='Time [ ]', ylabel='Voltage [ ]'):
"""Provides a title and axes labels to a Matplotlib plot."""
mpp.title(title)
mpp.xlabel(xlabel)
mpp.ylabel(ylabel)
def readfile(inputfile='name'):
"""Extracts the xy pairs from an ASCII raw file and stores its values into a numpy array."""
defux = ["ms", "s"]
defuy = ["mV", "V"]
inpunits = False
with open(inputfile, 'r') as datafile:
line = 1
inpuxy = [] # header might be missing
while line <= 25: # arbitrary Clampfit header limit for ATF
headerline = datafile.readline()
if headerline.startswith("\""):
inpuxy = str(headerline).split() # last line of header contains units
skipline = line
if not inpuxy:
skipline = 0
line += 1
try:
inpux = inpuxy[1][1:-2]
inpuy = inpuxy[4][1:-2]
except IndexError: # missing header
inpux, inpuy = str(defux)[1:-1], str(defuy)[1:-1]
else: # header found
if inpux in defux and inpuy in defuy:
inpunits = True
datafile.seek(0) # reset the file index to the first byte
inp_xy = np.loadtxt(datafile, dtype='float64', delimiter='\t', skiprows=skipline, unpack=True) # slower than np.genfromtxt or native python, but uses less main memory at peak
return inp_xy, inpunits, inpux, inpuy
# main routine
AUTHOR = "Copyright (C) 2018 Christian Rickert"
SEPARBOLD = 79*'='
SEPARNORM = 79*'-'
SOFTWARE = "ParamAP"
VERSION = "version 1.1," # (2018-03-10)
WORKDIR = SOFTWARE # working directory for parameterization
print('{0:^79}'.format(SEPARBOLD) + os.linesep)
GREETER = '{0:<{w0}}{1:<{w1}}{2:<{w2}}'.format(SOFTWARE, VERSION, AUTHOR, w0=len(SOFTWARE)+1, w1=len(VERSION)+1, w2=len(AUTHOR)+1)
INTERMEDIATELINE1 = '{0:}'.format("Laboratory of Cathy Proenza")
INTERMEDIATELINE2 = '{0:}'.format("Department of Physiology & Biophysics")
INTERMEDIATELINE3 = '{0:}'.format("University of Colorado, Anschutz Medical Campus")
DISCLAIMER = "ParamAP is distributed in the hope that it will be useful, but it comes without\nany guarantee or warranty. This program is free software; you can redistribute\nit and/or modify it under the terms of the GNU General Public License:"
URL = "https://www.gnu.org/licenses/gpl-2.0.en.html"
print('{0:^79}'.format(GREETER) + os.linesep)
print('{0:^79}'.format(INTERMEDIATELINE1))
print('{0:^79}'.format(INTERMEDIATELINE2))
print('{0:^79}'.format(INTERMEDIATELINE3) + os.linesep)
print('{0:^79}'.format(DISCLAIMER) + os.linesep)
print('{0:^79}'.format(URL) + os.linesep)
print('{0:^79}'.format(SEPARBOLD) + os.linesep)
# customize use case
AUTORUN = askboolean("Use automatic mode?", False)
SERIES = askboolean("Run time series analysis?", False)
APMODE = askboolean("Analyze action potentials?", True)
print('{0:^79}'.format(SEPARNORM))
# set up working directory
WORKPATH = os.path.abspath(WORKDIR)
if not os.path.exists(WORKPATH):
os.mkdir(WORKPATH)
print("FOLDER:\t" + WORKPATH + "\n")
FILE = 0 # file
EXTENSION = askext(dlabel="custom file type", dext='atf') # file extension used to filter files in working directory
if SERIES:
AVG_FRAME = askvalue(dlabel="analysis frame time", dval=5000.0, dunit=' ms') # time interval for series analysis (ms)
ATFFILES = getfiles(path=WORKDIR, pattern=("*." + EXTENSION))
for ATFFILE in ATFFILES: # iterate through files
name = os.path.splitext(os.path.split(ATFFILE)[1])[0]
print('{0:^79}'.format(SEPARNORM))
print("FILE:\t" + str(name) + os.linesep)
ap_amp = 50.0 # minimum acceptable ap amplitude (mV)
ap_hwd = 250.0 # maximum acceptable ap half width (ms)
ap_max = 50.0 # maximum acceptable ap value (mV)
ap_min = -10.0 # minimum acceptable ap value (mV)
mdp_max = -50.0 # maximum acceptable mdp value (mV)
mdp_min = -90.0 # minimum acceptable mdp value (mV)
wm_der = 1.0 # window multiplier for derivative filtering
wm_max = 4.0 # window multiplier for maximum detection
wm_min = 16.0 # window multiplier for minimum detection
# read file raw data
sys.stdout.write(">> READING... ")
sys.stdout.flush()
RAW_XY, UNITS, UNIT_X, UNIT_Y = readfile(ATFFILE)
if not UNITS: # missing or incomplete units from header
print("\n")
UNIT_X = askunit(dlabel="X-axis unit", daxis="X", dunit=UNIT_X)
UNIT_Y = askunit(dlabel="Y-axis unit", daxis="Y", dunit=UNIT_Y)
sys.stdout.write(1*"\t")
toms = 1000.0 if UNIT_X == "s" else 1.0
RAW_XY[0] *= toms # full X-axis, UNIT_X = "ms"
raw_x = RAW_XY[0] # partial X-axis for time series analysis
tomv = 1000.0 if UNIT_Y == "V" else 1.0
RAW_XY[1] *= tomv # full Y-axis, UNIT_Y = "mV"
raw_y = RAW_XY[1] # partial Y-axis for time series analysis
runavg = getrunavg(RAW_XY[0]) # used for filtering and peak detection
ipg_t = RAW_XY[0][1]-RAW_XY[0][0] # time increment for interpolation grid
if not APMODE: # avoid noise artifacts in beat detection mode
runavg = 10.0*runavg+1
wm_max *= 1.5
wm_min = wm_max
avg_start = RAW_XY[0][0] # interval start for averaging
avg_stop = RAW_XY[0][-1] # interval stop for averaging
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
while True: # repeat data analysis for current file
startpdf = True # overwrite existing file
segment = 0.0
while True: # time series analysis
try:
# create raw data plot
sys.stdout.write(">> PLOTTING... ")
sys.stdout.flush()
mpp_setup(title="Raw data: " + name, xlabel='Time (ms)', ylabel='Voltage (mV)')
mpp.plot(raw_x, raw_y, '0.75') # raw data (grey line)
if startpdf:
pdf_file = mpbp.PdfPages(os.path.join(WORKDIR, name + ".pdf"), keep_empty=False) # multi-pdf file
startpdf = False # append existing file
mpp.tight_layout() # avoid label overlaps
if segment == 0.0:
mpp.savefig(pdf_file, format='pdf', dpi=600) # save before .show()!
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
if not AUTORUN:
mpp.show()
# set parameters for averaging
sys.stdout.write(">> SETTING... ")
sys.stdout.flush()
if not AUTORUN:
print("\n")
if segment == 0.0: # initialize values
avg_start = askvalue(dlabel="analysis start time", dval=avg_start, dunit=' ms')
avg_stop = askvalue(dlabel="analysis stop time", dval=avg_stop, dunit=' ms')
ap_max = askvalue(dlabel="upper limit for maxima", dval=ap_max, dunit=' mV')
ap_min = askvalue(dlabel="lower limit for maxima", dval=ap_min, dunit=' mV')
mdp_max = askvalue(dlabel="upper limit for minima", dval=mdp_max, dunit=' mV')
mdp_min = askvalue(dlabel="lower limit for minima", dval=mdp_min, dunit=' mV')
if APMODE:
ap_hwd = askvalue(dlabel="maximum peak half width", dval=ap_hwd, dunit=' ms')
ap_amp = askvalue(dlabel="minimum peak amplitude", dval=ap_amp, dunit=' mV')
runavg = askvalue(dlabel="running average window size", dval=runavg, dunit='', dtype='int')
wm_der = askvalue(dlabel="window multiplier for derivative", dval=wm_der, dunit='')
wm_max = askvalue(dlabel="window multiplier for maxima", dval=wm_max, dunit='')
wm_min = askvalue(dlabel="window multiplier for minima", dval=wm_min, dunit='')
mpp.clf() # clear canvas
if segment == 0.0: # set first frame
tmp_start = avg_start + (segment*AVG_FRAME if SERIES else 0.0)
tmp_stop = (tmp_start + AVG_FRAME) if SERIES else avg_stop
raw_i = np.argwhere((RAW_XY[0] >= tmp_start) & (RAW_XY[0] <= tmp_stop)).ravel()
raw_x = RAW_XY[0][raw_i[0]:raw_i[-1]+1]
raw_y = RAW_XY[1][raw_i[0]:raw_i[-1]+1]
sys.stdout.write(("" if AUTORUN else 1*"\t") + 8*"\t" + " [OK]\n")
sys.stdout.flush()
# filter noise of raw data with Savitzky-Golay
sys.stdout.write(">> FILTERING... ")
sys.stdout.flush()
rawf_y = sp_sig.savgol_filter(raw_y, runavg, POLYNOM, mode='nearest')
sys.stdout.write(7*"\t" + " [OK]\n")
sys.stdout.flush()
# detect extrema in filtered raw data
sys.stdout.write(">> SEARCHING... ")
sys.stdout.flush()
if AUTORUN: # use unrestricted dataset (slower)
# detect maxima in filtered raw data
tmpavg = int(round(wm_max*runavg)) if int(round(wm_max*runavg)) % 2 else int(round(wm_max*runavg))+1
rawfmax_iii = np.asarray(sp_sig.argrelmax(rawf_y, order=tmpavg)).ravel() # unfiltered maxima
rawfmax_x = raw_x[rawfmax_iii]
rawfmax_y = rawf_y[rawfmax_iii]
# detect minima in filtered raw data
tmpavg = int(round(wm_min*runavg)) if int(round(wm_min*runavg)) % 2 else int(round(wm_min*runavg))+1
rawfmin_iii = np.asarray(sp_sig.argrelmin(rawf_y, order=tmpavg)).ravel() # unfiltered minima
rawfmin_x = raw_x[rawfmin_iii]
rawfmin_y = rawf_y[rawfmin_iii]
sys.stdout.write(7*"\t" + " [OK]\n")
sys.stdout.flush()
else: # use restricted dataset (faster)
# detect maxima in filtered raw data
tmpmax_x = raw_x[np.intersect1d(np.argwhere(rawf_y >= ap_min), np.argwhere(rawf_y <= ap_max))]
tmpmax_y = rawf_y[np.intersect1d(np.argwhere(rawf_y >= ap_min), np.argwhere(rawf_y <= ap_max))]
tmpavg = int(round(wm_max*runavg)) if int(round(wm_max*runavg)) % 2 else int(round(wm_max*runavg))+1
rawfmax_iii = np.asarray(sp_sig.argrelmax(tmpmax_y, order=tmpavg)).ravel() # unfiltered maxima
rawfmax_ii = np.asarray(np.where(np.in1d(raw_x.ravel(), np.intersect1d(raw_x, tmpmax_x[rawfmax_iii]).ravel()).reshape(raw_x.shape))).ravel() # back to full dataset
rawfmax_x = raw_x[rawfmax_ii]
rawfmax_y = rawf_y[rawfmax_ii]
# detect minima in filtered raw data
tmpmin_x = raw_x[np.intersect1d(np.argwhere(rawf_y >= mdp_min), np.argwhere(rawf_y <= mdp_max))]
tmpmin_y = rawf_y[np.intersect1d(np.argwhere(rawf_y >= mdp_min), np.argwhere(rawf_y <= mdp_max))]
tmpavg = int(round(wm_min*runavg)) if int(round(wm_min*runavg)) % 2 else int(round(wm_min*runavg))+1
rawfmin_iii = np.asarray(sp_sig.argrelmin(tmpmin_y, order=tmpavg)).ravel() # unfiltered minima
rawfmin_ii = np.asarray(np.where(np.in1d(raw_x.ravel(), np.intersect1d(raw_x, tmpmin_x[rawfmin_iii]).ravel()).reshape(raw_x.shape))).ravel()
rawfmin_x = raw_x[rawfmin_ii]
rawfmin_y = rawf_y[rawfmin_ii]
sys.stdout.write(7*"\t" + " [OK]\n")
sys.stdout.flush()
# analyze and reduce extrema in filtered raw data
sys.stdout.write(">> REDUCING... ")
sys.stdout.flush()
rawfmax_m = np.mean(rawfmax_y) # rough estimate due to assignment errors
rawfmin_m = np.mean(rawfmin_y)
rawfmaxmin_m = (rawfmax_m + rawfmin_m) / 2.0 # center between unreduced maxima and minima within limits (may differ from average of AVGMAX and AVGMIN)
if AUTORUN: # estimate range for reduction of extrema
# reduce maxima from unrestricted dataset
rawfmax_ii = np.argwhere(rawfmax_y >= rawfmaxmin_m).ravel() # use center to discriminate between maxima and minima
rawfmax_x = rawfmax_x[rawfmax_ii]
rawfmax_y = rawfmax_y[rawfmax_ii]
rawfmax_std = np.std(rawfmax_y, ddof=1) # standard deviation from the (estimated) arithmetic mean
ap_max = np.mean(rawfmax_y) + 4.0 * rawfmax_std # 99% confidence interval
ap_min = np.mean(rawfmax_y) - 4.0 * rawfmax_std
rawfmax_ii = functools.reduce(np.intersect1d, (rawfmax_iii, np.argwhere(rawf_y >= ap_min), np.argwhere(rawf_y <= ap_max)))
rawfmax_x = raw_x[rawfmax_ii]
rawfmax_y = rawf_y[rawfmax_ii]
# reduce minima from unrestricted dataset
rawfmin_ii = np.argwhere(rawfmin_y <= rawfmaxmin_m)
rawfmin_x = rawfmin_x[rawfmin_ii].ravel()
rawfmin_y = rawfmin_y[rawfmin_ii].ravel()
rawfmin_std = np.std(rawfmin_y, ddof=1)
mdp_max = np.mean(rawfmin_y) + 4.0 * rawfmin_std
mdp_min = np.mean(rawfmin_y) - 4.0 *rawfmin_std
rawfmin_ii = functools.reduce(np.intersect1d, (rawfmin_iii, np.argwhere(rawf_y >= mdp_min), np.argwhere(rawf_y <= mdp_max)))
rawfmin_x = raw_x[rawfmin_ii]
rawfmin_y = rawf_y[rawfmin_ii]
if APMODE: # check extrema for consistency - reduce maxima
badmax_ii = np.zeros(0)
badmin_ii = np.zeros(0)
rawfmin_i, badmax_ii = getneighbors(rawfmax_ii, rawfmin_ii, raw_x, rawf_y, ap_hwd, ap_amp, bads=True)
rawfmax_i = np.delete(rawfmax_ii, badmax_ii)
rawfmin_i = rawfmin_i.astype(int) # casting required for indexing
# check extrema for boundary violations - reduce maxima and minima
while True: # rough check, assignment happens later
if rawfmax_i[0] < rawfmin_i[0]: # starts with a maximum
rawfmax_i = rawfmax_i[1:]
continue
elif rawfmin_i[1] < rawfmax_i[0]: # starts with two minima
rawfmin_i = rawfmin_i[1:]
continue
elif rawfmax_i[-1] > rawfmin_i[-1]: # ends with a maximum
rawfmax_i = rawfmax_i[0:-1]
continue
elif rawfmin_i[-2] > rawfmax_i[-1]: # ends with two minima
rawfmin_i = rawfmin_i[0:-1]
continue
else:
break
rawfmax_x = raw_x[rawfmax_i] # filtered and extracted maxima
rawfmax_y = rawf_y[rawfmax_i]
# assign minima to corresponding maxima - reduce minima
minmaxmin = np.asarray([3*[0] for i in range(rawfmax_i.size)]) # [[min_left_index, max_index, min_right_index], ...]
rawfmin_kdt = sp_spat.KDTree(list(zip(rawfmin_i, np.zeros(rawfmin_i.size))))
i = 0 # index
for max_i in rawfmax_i:
min_left, min_right = False, False
minmaxmin[i][1] = max_i
for order_i in rawfmin_kdt.query([max_i, 0.0], k=None)[1]:
min_i = rawfmin_i[order_i]
if not min_left and (min_i < max_i):
minmaxmin[i][0] = min_i
min_left = True
elif not min_right and (min_i > max_i):
minmaxmin[i][2] = min_i
min_right = True
i += 1
rawfmin_i = np.unique(minmaxmin[:, [0, 2]].ravel())
rawfmin_x = raw_x[rawfmin_i] # filtered and extracted minima
rawfmin_y = rawf_y[rawfmin_i]
# find largest distance between left minima and maxima
ipg_hwl, ipg_tmp = 0.0, 0.0
for min_l, max_c in minmaxmin[:, [0, 1]]:
ipg_tmp = raw_x[max_c] - raw_x[min_l]
if ipg_tmp > ipg_hwl:
ipg_hwl = ipg_tmp
# find largest distance between right minima and maxima
ipg_hwr, ipg_tmp = 0.0, 0.0
for max_c, min_r in minmaxmin[:, [1, 2]]:
ipg_tmp = raw_x[min_r] - raw_x[max_c]
if ipg_tmp > ipg_hwr:
ipg_hwr = ipg_tmp
else: # beating rate
rawfmax_x = raw_x[rawfmax_ii] # pre-filtered maxima
rawfmax_y = rawf_y[rawfmax_ii]
rawfmin_x = raw_x[rawfmin_ii] # pre-filtered minima
rawfmin_y = rawf_y[rawfmin_ii]
rawfmax_m = np.mean(rawfmax_y) # refined estimate due to exlusion (ap_mode)
rawfmin_m = np.mean(rawfmin_y)
if rawfmax_y.size == 0: # no APs detected
raise Warning
else: # two or more APs
frate = 60000.0*(rawfmax_y.size/(rawfmax_x[-1]-rawfmax_x[0])) if rawfmax_y.size > 1 else float('nan') # AP firing rate (FR) [1/min]
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
# create extrema plot
sys.stdout.write(">> PLOTTING... ")
sys.stdout.flush()
mpp_setup(title="Extrema: " + name, xlabel='Time (ms)', ylabel='Voltage (mV)')
mpp.plot([raw_x[0], raw_x[-1]], [0.0, 0.0], '0.85') # X-Axis (grey line)
mpp.plot([raw_x[0], raw_x[-1]], [rawfmaxmin_m, rawfmaxmin_m], 'k--') # center between unfiltered maxima and unfiltered minima, i.e. not between AVGMAX and AVGMIN (black dashed line)
mpp.plot(raw_x, raw_y, '0.50', raw_x, rawf_y, 'r') # raw data and averaged data (grey, red line)
mpp.plot([raw_x[0], raw_x[-1]], [ap_max, ap_max], 'b') # upper limit for maxima (blue dotted line)
mpp.plot([raw_x[0], raw_x[-1]], [ap_min, ap_min], 'b:') # lower limit for maxima (blue dotted line)
mpp.plot([rawfmax_x, rawfmax_x], [ap_min, ap_max], 'b') # accepted maxima (blue line)
mpp.plot([raw_x[0], raw_x[-1]], [mdp_min, mdp_min], 'g') # lower limit for minima (green line)
mpp.plot([raw_x[0], raw_x[-1]], [mdp_max, mdp_max], 'g:') # upper limit for minima (green dotted line)
mpp.plot([rawfmin_x, rawfmin_x], [mdp_min, mdp_max], 'g') # accepted minima (green line)
mpp.plot([rawfmax_x[0], rawfmax_x[-1]], [rawfmax_m, rawfmax_m], 'k') # average of maxima, time interval used for firing rate count (black line)
mpp.plot([rawfmin_x[0], rawfmin_x[-1]], [rawfmin_m, rawfmin_m], 'k') # average of minima (black line)
mpp.plot(raw_x[rawfmax_ii], rawf_y[rawfmax_ii], 'bo') # maxima (blue dots)
mpp.plot(raw_x[rawfmin_ii], rawf_y[rawfmin_ii], 'go') # minima (green dots)
mpp.figtext(0.12, 0.90, "{0:<s} {1:<.4G}".format("AVGMAX (mV):", rawfmax_m), ha='left', va='center')
mpp.figtext(0.12, 0.87, "{0:<s} {1:<.4G}".format("FR (AP/min):", frate), ha='left', va='center')
mpp.figtext(0.12, 0.84, "{0:<s} {1:<.4G}".format("AVGMIN (mV):", rawfmin_m), ha='left', va='center')
mpp.tight_layout()
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
sys.stdout.write(">> SAVING... ")
sys.stdout.flush()
mpp.savefig(pdf_file, format='pdf', dpi=600)
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
if not AUTORUN:
mpp.show()
mpp.clf()
if APMODE:
# slice raw data segments by minima and align by maxima
sys.stdout.write(">> AVERAGING... ")
sys.stdout.flush()
# align ap segments by maxima, extend and average ap segments
ipg_max = float((1.0+APEXT)*ipg_hwr)
ipg_min = -float((1.0+APEXT)*ipg_hwl)
avg_x = np.arange(ipg_min, ipg_max, ipg_t, dtype='float64') # interpolation grid
avgxsize = avg_x.size
avg_y = np.zeros(avgxsize, dtype='float64') # ap average array
mpp.subplot2grid((4, 1), (0, 0), rowspan=3) # upper subplot
timestamp = "[" + str(round(tmp_start, 2)) + "ms-" + str(round(tmp_stop, 2)) + "ms]"
mpp_setup(title='Analysis: ' + name + ' ' + timestamp, xlabel='Time (ms)', ylabel='Voltage (mV)')
mpp.plot([avg_x[0], avg_x[-1]], [0.0, 0.0], '0.85') # X-axis
n = 0 # current maximum
for min_l, max_c, min_r in minmaxmin: # slicing of ap segments, extend ap parts if possible
minext_l = int(min_l - APEXT*(max_c - min_l)) # use int for index slicing
minext_r = int(min_r + APEXT*(min_r - max_c))
# prepare ap segment
tmp_x = np.asarray(raw_x[:] - raw_x[max_c]) # align by maximum
tmp_y = np.interp(avg_x, tmp_x, raw_y[:])
# average ap segments
if n == 0: # first average
avg_y = np.copy(tmp_y)
else: # all other averages
i = 0 # array index
nw = (1.0/(n+1.0)) # new data weight
pw = (n/(n+1.0)) # previous data weight
for y in np.nditer(avg_y, op_flags=['readwrite']):
y[...] = pw*y + nw*tmp_y[i] # integrate raw data into averaged data
i += 1
n += 1
mpp.plot(avg_x, tmp_y, '0.75') # plot aligned raw data segments
sys.stdout.write("\t\t\t\t\t\t\t [OK]\n")
sys.stdout.flush()
# analyze AP parameters with given criteria
sys.stdout.write(">> ANALYZING... ")
sys.stdout.flush()
# filter noise of averaged data with Savitzky-Golay
avgf_y = sp_sig.savgol_filter(avg_y, runavg, POLYNOM, mode='nearest')
# detect "Peak potential: Maximum potential of AP" (PP) (mV)
avgfmax_i = np.argwhere(avg_x == 0.0) # data point for maximum centered
if not avgfmax_i: # data point for maximum left or right of center
tmpavg = int(round(wm_max*runavg)) if int(round(wm_max*runavg)) % 2 else int(round(wm_max*runavg))+1
avgfmax_ii = np.asarray(sp_sig.argrelmax(avgf_y, order=tmpavg)).ravel() # find all maxima
avgfmax_i = avgfmax_ii[np.argmin(np.abs(avg_x[avgfmax_ii] - 0.0))] # return the maximum closes to X = 0.0
avgfmax_x = avg_x[avgfmax_i]
avgfmax_y = avgf_y[avgfmax_i]
pp_y = float(avgfmax_y)
pp = pp_y
# detect and reduce (several) minima in filtered average data,
tmpavg = int(round(wm_min*runavg)) if int(round(wm_min*runavg)) % 2 else int(round(wm_min*runavg))+1
avgfmin_ii = np.asarray(sp_sig.argrelmin(avgf_y, order=tmpavg)).ravel() # find all minima
avgfmin_i = getneighbors(np.asarray([avgfmax_i]), avgfmin_ii, avg_x, avgf_y, ap_hwd, ap_amp)
avgfmin_x = avg_x[avgfmin_i]
avgfmin_y = avgf_y[avgfmin_i]
# determine "Maximum diastolic potential 1: Minimum potential preceding PP" (MDP1) (mV)
mdp1_i = avgfmin_i[0]
mdp1_x = avg_x[mdp1_i]
mdp1_y = avgf_y[mdp1_i]
mdp1 = mdp1_y
# determine "Maximum diastolic potential 2: Minimum potential following PP" (MDP2) (mV)
mdp2_i = avgfmin_i[-1]
mdp2_x = avg_x[mdp2_i]
mdp2_y = avgf_y[mdp2_i]
mdp2 = mdp2_y
# determine "Cycle length: Time interval MDP1-MDP2" (CL) (ms)
cl = float(mdp2_x - mdp1_x)
# determine "Action potential amplitude: Potential difference of PP minus MDP2" (APA) (mV)
apa = pp - mdp2
# determine "AP duration 50: Time interval at 50% of maximum repolarization" (APD50) (ms)
apd50_l = (pp - 0.50*apa) # threshold value
apd50_i = functools.reduce(np.intersect1d, (np.argwhere(avgf_y > apd50_l), np.argwhere(avg_x >= mdp1_x), np.argwhere(avg_x <= mdp2_x)))
apd50_x = (avg_x[apd50_i[0]-1], avg_x[apd50_i[-1]+1]) # equal or smaller than apd50_l
apd50_y = (avgf_y[apd50_i[0]-1], avgf_y[apd50_i[-1]+1])
apd50 = float(apd50_x[-1] - apd50_x[0])
# determine "AP duration 90: Time interval at 90% of maximum repolarization" (APD90) (ms)
apd90_l = pp - 0.90*apa
apd90_i = functools.reduce(np.intersect1d, (np.argwhere(avgf_y > apd90_l), np.argwhere(avg_x >= mdp1_x), np.argwhere(avg_x <= mdp2_x)))
apd90_x = (avg_x[apd90_i[0]-1], avg_x[apd90_i[-1]+1]) # equal or smaller than apd90_l
apd90_y = (avgf_y[apd90_i[0]-1], avgf_y[apd90_i[-1]+1])
apd90 = float(apd90_x[-1] - apd90_x[0])
# calculate derivative of averaged data (mV/ms)
avgfg_y = np.ediff1d(avgf_y) # dY/1, differences between values
avgfg_y = np.insert(avgfg_y, 0, avgfg_y[0]) # preserve array size
avgfg_y = avgfg_y / ipg_t # dY/dX, differences per increment
# filter derivative of averaged data
tmpavg = int(round(wm_der*runavg)) if int(round(wm_der*runavg)) % 2 else int(round(wm_der*runavg))+1
avgfgf_y = sp_sig.savgol_filter(avgfg_y, tmpavg, POLYNOM, mode='nearest')
# determine "Maximum upstroke velocity: Maximum of derivative between MDP1 and PP" (MUV) (mV/ms)
tmpavg = int(round(wm_max*runavg)) if int(round(wm_max*runavg)) % 2 else int(round(wm_max*runavg))+1
avgfgfmax_ii = functools.reduce(np.intersect1d, (sp_sig.argrelmax(avgfgf_y, order=tmpavg), np.argwhere(avg_x >= mdp1_x), np.argwhere(avg_x <= avgfmax_x)))
avgfgfmax_i = getneighbors(np.asarray([avgfmax_i]), avgfgfmax_ii, avg_x, avgfgf_y)[0] # avoid errors from large ap part extensions
avgfgfmax_x = avg_x[avgfgfmax_i]
avgfgfmax_y = avgfgf_y[avgfgfmax_i]
muv = float(avgfgfmax_y)
# determine "Maximum repolarization rate: Minimum of derivative between PP and MDP2" (MRR) (mV/ms)
tmpavg = int(round(wm_min*runavg)) if int(round(wm_min*runavg)) % 2 else int(round(wm_min*runavg))+1
avgfgfmin_ii = functools.reduce(np.intersect1d, (sp_sig.argrelmin(avgfgf_y, order=tmpavg), np.argwhere(avg_x >= avgfmax_x), np.argwhere(avg_x <= mdp2_x)))
avgfgfmin_i = getneighbors(np.asarray([apd90_i[-1]+1]), avgfgfmin_ii, avg_x, avgfgf_y)[0] # mrr or trr
avgfgfmin_i = np.append(avgfgfmin_i, getneighbors(np.asarray([avgfgfmax_i]), avgfgfmin_ii, avg_x, avgfgf_y)[1]) # trr only
if avgfgfmin_i[0] == avgfgfmin_i[1]: # no trr
trr = 0.0
else:
# determine "Transient repolarization rate: Second minimum of derivative between PP and MDP2 after PP, if distinct from MRR" (TRR) (mV/ms)
trr = float(avgfgf_y[avgfgfmin_i][1])
avgfgfmin_x = avg_x[avgfgfmin_i]
avgfgfmin_y = avgfgf_y[avgfgfmin_i]
mrr = float(avgfgf_y[avgfgfmin_i][0])
# approximate diastolic duration in filtered derivative
da_i, da_x, da_y, da_m, da_n, da_r = getbestlinearfit(avg_x, avgfgf_y, mdp1_x, apd90_x[0], 10, 90, 1, 40) # get a baseline for the derivative before exceeding the threshold
# determine "Threshold potential: Potential separating DD and APD." (THR) (mV)
thr_i = functools.reduce(np.intersect1d, (np.argwhere(avgfgf_y >= ((da_m*avg_x + da_n) + 0.5)), np.argwhere(avg_x >= avg_x[da_i[-1]]), np.argwhere(avg_x <= apd50_x[0])))[0].astype(int) # determine baseline-corrected threshold level
thr_x = avg_x[thr_i]
thr_y = avgf_y[thr_i]
thr = float(thr_y)
# determine "Early diastolic duration: Time from MDP1 to end of linear fit for DDR" (EDD) (ms)
edd_i, edd_x, edd_y, edd_m, edd_n, edd_r = getbestlinearfit(avg_x, avgf_y, mdp1_x, thr_x, 10, 50, 1, 20) # fit EDD within the threshold level determined earlier
edd = float(edd_x[-1]-mdp1_x)
# determine "Diastolic depolarization rate: Potential change rate at end of EDD" (DDR) (mV/ms)
ddr = float(edd_m) # or: np.mean(avgfgf_y[edd_i])
# determine "Diastolic duration: EDD plus LDD" (DD) (ms)
dd = float(thr_x - mdp1_x)
# determine "Late diastolic duration: Time from end of linear fit for DDR to THR" (LDD) (ms)
ldd = float(thr_x - edd_x[-1])
# determine "Action potential duration: Time between THR and MDP2" (APD) (ms)
apd = float(mdp2_x - thr_x)
sys.stdout.write("\t\t\t\t\t\t\t [OK]\n")
sys.stdout.flush()
# create analysis plot
sys.stdout.write(">> PLOTTING... ") # the X-axis and the individual segments are already plotted during averaging
sys.stdout.flush()
mpp.plot([mdp1_x, thr_x], [mdp1_y, mdp1_y], 'k-.') # DD (black dashed/dotted line)
mpp.plot([thr_x, mdp2_x], [mdp2_y, mdp2_y], 'k') # APD (black line)
mpp.plot([apd50_x[0], apd50_x[1]], [apd50_y[1], apd50_y[1]], 'k') # APD50 (black line)
mpp.plot([apd90_x[0], apd90_x[1]], [apd90_y[1], apd90_y[1]], 'k') # APD90 (black line)
mpp.plot([mdp1_x, mdp1_x], [mdp1_y, 0.0], 'k:') # MDP1 indicator (black dotted line)
mpp.plot([mdp2_x, mdp2_x], [mdp2_y, 0.0], 'k:') # MDP2 indicator (black dotted line)
mpp.plot([avgfgfmax_x, avgfgfmax_x], [mdp2_y, avgf_y[avgfgfmax_i]], 'k:') # MUV indicator (black dotted line)
mpp.plot([avgfgfmin_x[0], avgfgfmin_x[0]], [mdp2_y, avgf_y[avgfgfmin_i[0]]], 'k:') # MRR indicator (black dotted line)
if trr:
mpp.plot([avgfgfmin_x[1], avgfgfmin_x[1]], [mdp2_y, avgf_y[avgfgfmin_i[1]]], 'k:') # TRR indicator (black dotted line)
mpp.plot([edd_x[-1], edd_x[-1]], [mdp2_y, 0.0], 'k:') # EDD/LDD separator (black dashed line)
mpp.plot([thr_x, thr_x], [thr_y, 0.0], 'k:') # DD/APD upper separator (black dotted line)
mpp.plot([thr_x, thr_x], [mdp2_y, thr_y], 'k:') # DD/APD lower separator (black dotted line)
mpp.plot(avg_x, avg_y, 'k', avg_x, avgf_y, 'r') # averaged data and filtered averaged data (black, red lines)
mpp.plot(avg_x[edd_i], avgf_y[edd_i], 'g') # best linear fit segment for DDR (green line)
mpp.plot(avg_x, (edd_m*avg_x + edd_n), 'k--') # DDR (black dashed line)
mpp.plot([edd_x[-1]], [edd_y[-1]], 'ko') # EDD-LDD separator (black dot)
mpp.plot([apd50_x[1]], [apd50_y[1]], 'ko') # APD50 (black dots)
mpp.plot(apd90_x[1], apd90_y[1], 'ko') # APD90 (black dots)
mpp.plot(thr_x, avgf_y[thr_i], 'ro') # THR (red dot)
mpp.plot(avgfgfmax_x, avgf_y[avgfgfmax_i], 'wo') # MUV (white dot)
mpp.plot(avgfgfmin_x[0], avgf_y[avgfgfmin_i[0]], 'wo') # MRR (white dot)
if trr:
mpp.plot(avgfgfmin_x[1], avgf_y[avgfgfmin_i[1]], 'wo') # TRR (dot)
mpp.plot(avgfmax_x, pp_y, 'bo') # PP (blue dot)
mpp.plot(avgfmin_x, avgfmin_y, 'go') # MDP1, MDP2 (green dots)
mpp.figtext(0.12, 0.90, "{0:<s} {1:<.4G}".format("APs (#):", rawfmax_y.size), ha='left', va='center')
mpp.figtext(0.12, 0.87, "{0:<s} {1:<.4G}".format("FR (AP/min):", frate), ha='left', va='center')
mpp.figtext(0.12, 0.84, "{0:<s} {1:<.4G}".format("CL (ms):", cl), ha='left', va='center')
mpp.figtext(0.12, 0.81, "{0:<s} {1:<.4G}".format("DD (ms):", dd), ha='left', va='center')
mpp.figtext(0.12, 0.78, "{0:<s} {1:<.4G}".format("EDD (ms):", edd), ha='left', va='center')
mpp.figtext(0.12, 0.75, "{0:<s} {1:<.4G}".format("LDD (ms):", ldd), ha='left', va='center')
mpp.figtext(0.12, 0.72, "{0:<s} {1:<.4G}".format("APD (ms):", apd), ha='left', va='center')
mpp.figtext(0.12, 0.69, "{0:<s} {1:<.4G}".format("APD50 (ms):", apd50), ha='left', va='center')
mpp.figtext(0.12, 0.66, "{0:<s} {1:<.4G}".format("APD90 (ms):", apd90), ha='left', va='center')
mpp.figtext(0.12, 0.63, "{0:<s} {1:<.4G}".format("MDP1 (mV):", mdp1), ha='left', va='center')
mpp.figtext(0.12, 0.60, "{0:<s} {1:<.4G}".format("MDP2 (mV):", mdp2), ha='left', va='center')
mpp.figtext(0.12, 0.57, "{0:<s} {1:<.4G}".format("THR (mV):", thr), ha='left', va='center')
mpp.figtext(0.12, 0.54, "{0:<s} {1:<.4G}".format("PP (mV):", pp), ha='left', va='center')
mpp.figtext(0.12, 0.51, "{0:<s} {1:<.4G}".format("APA (mV):", apa), ha='left', va='center')
mpp.figtext(0.12, 0.48, "{0:<s} {1:<.4G}".format("DDR (mV/ms):", ddr), ha='left', va='center')
mpp.figtext(0.12, 0.45, "{0:<s} {1:<.4G}".format("MUV (mV/ms):", muv), ha='left', va='center')
mpp.figtext(0.12, 0.42, "{0:<s} {1:<.4G}".format("TRR (mV/ms):", trr), ha='left', va='center')
mpp.figtext(0.12, 0.39, "{0:<s} {1:<.4G}".format("MRR (mV/ms):", mrr), ha='left', va='center')
mpp.subplot2grid((4, 1), (3, 0)) # lower subplot
mpp_setup(title="", xlabel='Time (ms)', ylabel='(mV/ms)')
mpp.plot([avg_x[0], avg_x[-1]], [0.0, 0.0], '0.85') # x axis
mpp.plot([avgfgfmin_x[0], avgfgfmin_x[0]], [avgfgfmin_y[0], avgfgfmax_y], 'k:') # MRR indicator (black dotted line)
if trr:
mpp.plot([avgfgfmin_x[1], avgfgfmin_x[1]], [avgfgfmin_y[1], avgfgfmax_y], 'k:') # TRR indicator (black dotted line)
mpp.plot([thr_x, thr_x], [avgfgf_y[thr_i], avgfgfmax_y], 'k:') # THR indicator (black dotted line)
mpp.plot(avg_x, avgfg_y, 'c', avg_x, avgfgf_y, 'm') # derivative and filtered derivative
mpp.plot(avg_x[da_i], avgfgf_y[da_i], 'g') # best linear fit segment for THR (green line)
mpp.plot(avg_x, (da_m*avg_x + da_n), 'k--') # best linear fit for THR (black dashed line)
mpp.plot(thr_x, avgfgf_y[thr_i], 'ro') # THR (red dot)
mpp.plot(avgfgfmax_x, avgfgfmax_y, 'bo') # derivative maximum (blue dot)
mpp.plot(avgfgfmin_x, avgfgfmin_y, 'go') # derivative minima (green dots)
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
# data summary
sys.stdout.write(">> SAVING... ")
sys.stdout.flush()
avg_file = os.path.join(WORKDIR, name + "_" + timestamp + "_avg.dat")
uheader = "" +\
"Analysis start time: " + 4*"\t" + str(tmp_start) + " ms\n" + \
"Analysis stop time:" + 4*"\t" + str(tmp_stop) + " ms\n" + \
"Upper limit for maxima:" + 3*"\t" + str(ap_max) + " mV\n" + \
"Lower limit for maxima:" + 3*"\t" + str(ap_min) + " mV\n" + \
"Upper limit for minima:" + 3*"\t" + str(mdp_max) + " mV\n" + \
"Lower limit for minima:" + 3*"\t" + str(mdp_min) + " mV\n" + \
"Maximum peak half width:" + 3*"\t" + str(ap_hwd) + " ms\n" + \
"Minimum peak amplitude:" + 3*"\t" + str(ap_amp) + " mV\n" + \
"Running average window size:" + 2*"\t" + str(runavg) + "\n" + \
"Window multiplier for derivative:" + "\t" + str(wm_der) + "\n" + \
"Window multiplier for maxima:" + 2*"\t" + str(wm_max) + "\n" + \
"Window multiplier for minima:" + 2*"\t" + str(wm_min) + "\n" + \
"Time (ms)" + "\t" + "Averaged signal (mV)" + "\t" + "Filtered average (mV)"
np.savetxt(avg_file, np.column_stack((avg_x, avg_y, avgf_y)), fmt='%e', delimiter='\t', header=uheader)
mpp.tight_layout()
mpp.savefig(pdf_file, format='pdf', dpi=600)
sum_file = os.path.join(WORKDIR, "ParamAP.log")
newfile = not bool(os.path.exists(sum_file))
with open(sum_file, 'a') as targetfile: # append file
if newfile: # write header
targetfile.write(
"{0:s}\t{1:s}\t{2:s}\t{3:s}\t{4:s}\t{5:s}\t{6:s}\t{7:s}\t{8:s}\t{9:s}\t{10:s}\t{11:s}\t{12:s}\t{13:s}\t{14:s}\t{15:s}\t{16:s}\t{17:s}\t{18:s}\t{19:s}\t{20:s}".format(
"File ( )", "Start (ms)", "Stop (ms)", "APs (#)", "FR (AP/min)", "CL (ms)", "DD (ms)", "EDD (ms)", "LDD (ms)", "APD (ms)", "APD50 (ms)", "APD90 (ms)", "MDP1 (mV)", "MDP2 (mV)", "THR (mV)", "PP (mV)", "APA (mV)", "DDR (mV/ms)", "MUV (mV/ms)", "TRR (mV/ms)", "MRR (mV/ms)") + "\n")
targetfile.write(
"{0:s}\t{1:4G}\t{2:4G}\t{3:4G}\t{4:4G}\t{5:4G}\t{6:4G}\t{7:4G}\t{8:4G}\t{9:4G}\t{10:4G}\t{11:4G}\t{12:4G}\t{13:4G}\t{14:4G}\t{15:4G}\t{16:4G}\t{17:4G}\t{18:4G}\t{19:4G}\t{20:4G}".format(
name, tmp_start, tmp_stop, rawfmax_y.size, frate, cl, dd, edd, ldd, apd, apd50, apd90, mdp1, mdp2, thr, pp, apa, ddr, muv, trr, mrr) + "\n")
targetfile.flush()
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
if not AUTORUN:
mpp.show()
except IndexError as ierr: # check running average and window multiplier
sys.stdout.write("\n" + 9*"\t" + " [ER]")
print("\r ## Run failed. Detection of extrema or threshold failed.")
except PermissionError as perr: # file already opened or storage read-only
sys.stdout.write("\n" + 9*"\t" + " [ER]")
print("\r ## Run failed. File access denied by system.")
except Warning as werr: # increase averaging window time
sys.stdout.write("\n" + 9*"\t" + " [ER]")
print("\r ## Run failed. Identification of action potentials failed.")
except Exception as uerr: # unknown
sys.stdout.write("\n" + 9*"\t" + " [UN]")
print("\r ## Run failed. Error was: {0}".format(uerr) + ".")
except KeyboardInterrupt as kerr: # user canceled this file
sys.stdout.write("\n" + 9*"\t" + " [KO]")
print("\r ## Run skipped. Canceled by user.")
if SERIES: # check for next frame
if tmp_stop + AVG_FRAME <= avg_stop:
segment += 1.0
tmp_start = avg_start + segment*AVG_FRAME # prepare next frame for preview
tmp_stop = tmp_start + AVG_FRAME
raw_i = np.argwhere((RAW_XY[0] >= tmp_start) & (RAW_XY[0] <= tmp_stop)).ravel()
raw_x = RAW_XY[0][raw_i[0]:raw_i[-1]+1]
raw_y = RAW_XY[1][raw_i[0]:raw_i[-1]+1]
print()
print("RUN:\t" + str(int(segment + 1)) + "/" + str(math.floor((avg_stop-avg_start)/AVG_FRAME)))
print()
else: # not enough data left in file
break
else: # no time series analysis
break
if not AUTORUN: # check for next file
print()
nextfile = askboolean("Continue with next file?", True)
if nextfile:
break
else: # re-run current file
raw_x = RAW_XY[0] # recover original rawdata
raw_y = RAW_XY[1]
continue
else: # autorun
break
# housekeeping after each file
FILE += 1
sys.stdout.write(">> CLEANING... ")
sys.stdout.flush()
pdf_file.close() # close multi-pdf file and remove if empty
mpp.clf() # clear canvas
gc.collect() # start garbage collection to prevent memory fragmentation
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
# print summary
print('{0:^79}'.format(SEPARBOLD))
SUMMARY = "End of run: " + str(FILE) + str(" files" if FILE != 1 else " file") + " processed."
print('{0:^79}'.format(SUMMARY))
print('{0:^79}'.format(SEPARBOLD) + os.linesep)
WAIT = input("Press ENTER to end this program.")
| gpl-2.0 | 1,966,777,943,420,654,000 | 57.713793 | 315 | 0.51666 | false |
baohaojun/dico | dicoweb/settings-sample.py | 1 | 2553 | # Django settings for Dicoweb project.
#
# This file is part of GNU Dico.
# Copyright (C) 2008-2010, 2012 Wojciech Polak
#
# GNU Dico is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Dico is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Dico. If not, see <http://www.gnu.org/licenses/>.
import os
SITE_ROOT = os.path.dirname (os.path.realpath (__file__))
DEBUG = True
TEMPLATE_DEBUG = True
ADMINS = (
('Your Name', 'Your e-mail address'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = ''
DATABASE_NAME = ''
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
SITE_ID = 1
USE_I18N = True
TIME_ZONE = 'Europe/Warsaw'
LANGUAGE_CODE = 'en-us'
LANGUAGE_COOKIE_NAME = 'dicoweb_lang'
SESSION_COOKIE_NAME = 'dicoweb_sid'
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Caching, see http://docs.djangoproject.com/en/dev/topics/cache/#topics-cache
CACHE_BACKEND = 'memcached://127.0.0.1:11211/'
# Absolute path to the directory that holds media/static files.
MEDIA_ROOT = os.path.join (SITE_ROOT, 'static')
# URL that handles the media served from MEDIA_ROOT.
MEDIA_URL = 'static'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'SET THIS TO A RANDOM STRING'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
ROOT_URLCONF = 'dicoweb.urls'
TEMPLATE_DIRS = (
os.path.join (SITE_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'dicoweb',
)
DICT_SERVERS = ('gnu.org.ua',)
DICT_TIMEOUT = 10
| gpl-3.0 | 7,779,301,981,850,678,000 | 27.685393 | 78 | 0.725421 | false |
freedomofpress/securethenews | api/tests.py | 1 | 3776 | """
Tests basic API operations against simple test data.
"""
from rest_framework.test import APITestCase
from rest_framework import status
from django.urls import reverse
from sites.models import Site, Scan
from urllib.parse import urljoin
urlroot = reverse('api-root-v1')
def create_site():
"""
Make an example site + scans
"""
site = Site.objects.create(
name='Secure the News', domain='securethe.news')
Scan.objects.create(site=site, live=True, defaults_to_https=False)
Scan.objects.create(site=site, live=True, defaults_to_https=True)
class APIDirectoryTests(APITestCase):
def test_get_directory(self):
"""
API root should return a directory of API operations
"""
response = self.client.get(urlroot, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# We're deliberately just testing one key so the directory can be
# modified without breaking tests
self.assertIn('sites', response.data)
class APISiteTests(APITestCase):
def setUp(self):
create_site()
def test_get_sites(self):
"""
<api root>/sites should list sites/scan that have been created
"""
url = urljoin(urlroot, 'sites/')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('results', response.data)
self.assertEqual(len(response.data['results']), 1)
sitedata = response.data['results'][0]
self.assertEqual(sitedata['name'], 'Secure the News')
self.assertIn('latest_scan', sitedata)
self.assertIn('all_scans', sitedata)
self.assertTrue(sitedata['latest_scan']['live'])
class APISiteDetailTests(APITestCase):
def setUp(self):
create_site()
def test_get_site(self):
"""
<api root>/sites/securethe.news should return created site details
"""
url = urljoin(urlroot, 'sites/securethe.news/')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['name'], 'Secure the News')
class APISiteScansTests(APITestCase):
def setUp(self):
create_site()
def test_get_site_scans(self):
"""
<api root>/sites/securethe.news/scans should return two scans
"""
url = urljoin(urlroot, 'sites/securethe.news/scans/')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# The API itself should return a result count
self.assertEqual(response.data['count'], 2)
self.assertTrue(response.data['results'][0]['live'])
class APIPermissionTests(APITestCase):
def setUp(self):
create_site()
def test_forbidden_actions(self):
"""
<api root>/sites/ should not permit POST, PUT or DELETE operations
"""
url = urljoin(urlroot, 'sites/securethe.news/')
response1 = self.client.post(
url, json={'name': 'Insecure the News?',
'domain': 'insecurethe.news'})
self.assertEqual(response1.status_code,
status.HTTP_405_METHOD_NOT_ALLOWED)
response2 = self.client.delete(url)
self.assertEqual(response2.status_code,
status.HTTP_405_METHOD_NOT_ALLOWED)
url = urljoin(urlroot, 'sites/insecurethe.news/')
response3 = self.client.put(
url, json={'name': 'Insecure the News?',
'domain': 'insecurethe.news'})
self.assertEqual(response3.status_code,
status.HTTP_405_METHOD_NOT_ALLOWED)
| agpl-3.0 | 1,047,488,480,759,842,800 | 32.714286 | 74 | 0.628708 | false |
danielktaylor/PyLimitBook | pylimitbook/book.py | 1 | 4650 | #!/usr/bin/python
from collections import deque
from pylimitbook.tick import Bid, Ask, Trade
from pylimitbook.tree import Tree
from builtins import input
from six.moves import cStringIO as StringIO
def parse_csv(columns, line):
"""
Parse a CSV line that has ',' as a separator.
Columns is a list of the column names, must match the number of
comma-separated values in the input line.
"""
data = {}
split = line.split(',')
for idx, name in enumerate(columns):
data[name] = split[idx]
return data
class Book(object):
def __init__(self):
self.trades = deque(maxlen=100) # Index [0] is most recent trade
self.bids = Tree()
self.asks = Tree()
self.last_tick = None
self.last_timestamp = 0
def process_bid_ask(self, tick):
"""
Generic method to process bid or ask.
"""
tree = self.asks
if tick.is_bid:
tree = self.bids
if tick.qty == 0:
# Quantity is zero -> remove the entry
tree.remove_order_by_id(tick.id_num)
else:
if tree.order_exists(tick.id_num):
tree.update_order(tick)
else:
# New order
tree.insert_tick(tick)
def bid(self, csv):
columns = ['event', 'symbol', 'exchange', 'id_num', 'qty', 'price', 'timestamp']
data = parse_csv(columns, csv)
bid = Bid(data)
if bid.timestamp > self.last_timestamp:
self.last_timestamp = bid.timestamp
self.last_tick = bid
self.process_bid_ask(bid)
return bid
def bid_split(self, symbol, id_num, qty, price, timestamp):
data = {
'timestamp': timestamp,
'qty': qty,
'price': price,
'id_num': id_num
}
bid = Bid(data)
if bid.timestamp > self.last_timestamp:
self.last_timestamp = bid.timestamp
self.last_tick = bid
self.process_bid_ask(bid)
return bid
def ask(self, csv):
columns = ['event', 'symbol', 'exchange', 'id_num', 'qty', 'price', 'timestamp']
data = parse_csv(columns, csv)
ask = Ask(data)
if ask.timestamp > self.last_timestamp:
self.last_timestamp = ask.timestamp
self.last_tick = ask
self.process_bid_ask(ask)
return ask
def ask_split(self, symbol, id_num, qty, price, timestamp):
data = {
'timestamp': timestamp,
'qty': qty,
'price': price,
'id_num': id_num
}
ask = Ask(data)
if ask.timestamp > self.last_timestamp:
self.last_timestamp = ask.timestamp
self.last_tick = ask
self.process_bid_ask(ask)
return ask
def trade(self, csv):
columns = ['event', 'symbol', 'exchange', 'id_num', 'qty', 'price', 'timestamp']
data = parse_csv(columns, csv)
data['id_num'] = 0
trade = Trade(data)
if trade.timestamp > self.last_timestamp:
self.last_timestamp = trade.timestamp
self.last_tick = trade
self.trades.appendleft(trade)
return trade
def trade_split(self, symbol, qty, price, timestamp):
data = {
'timestamp': timestamp,
'qty': qty,
'price': price,
'id_num': 0
}
trade = Trade(data)
if trade.timestamp > self.last_timestamp:
self.last_timestamp = trade.timestamp
self.last_tick = trade
self.trades.appendleft(trade)
return trade
def __str__(self):
# Efficient string concat
file_str = StringIO()
file_str.write("------ Bids -------\n")
if self.bids != None and len(self.bids) > 0:
for k, v in self.bids.price_tree.items(reverse=True):
file_str.write('%s' % v)
file_str.write("\n------ Asks -------\n")
if self.asks != None and len(self.asks) > 0:
for k, v in self.asks.price_tree.items():
file_str.write('%s' % v)
file_str.write("\n------ Trades ------\n")
if self.trades != None and len(self.trades) > 0:
num = 0
for entry in self.trades:
if num < 5:
file_str.write(str(entry.qty) + " @ " \
+ str(entry.price / 10000) \
+ " (" + str(entry.timestamp) + ")\n")
num += 1
else:
break
file_str.write("\n")
return file_str.getvalue()
| mit | -3,987,454,480,899,093,000 | 31.517483 | 88 | 0.517849 | false |
evernote/pootle | pootle/apps/pootle_store/views.py | 1 | 33949 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010-2013 Zuza Software Foundation
# Copyright 2013-2014 Evernote Corporation
#
# This file is part of Pootle.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import logging
from itertools import groupby
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db.models import Max, Q
from django.http import HttpResponse, Http404
from django.shortcuts import redirect, render
from django.template import loader, RequestContext
from django.utils.translation import to_locale, ugettext as _
from django.utils.translation.trans_real import parse_accept_lang_header
from django.utils import timezone
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_http_methods
from translate.filters.decorators import Category
from translate.lang import data
from pootle.core.decorators import (get_path_obj, get_resource,
permission_required)
from pootle.core.exceptions import Http400
from pootle.core.mixins.treeitem import CachedMethods
from pootle_app.models.permissions import check_user_permission
from pootle_misc.checks import check_names
from pootle_misc.forms import make_search_form
from pootle_misc.util import ajax_required, jsonify, to_int
from pootle_statistics.models import (Submission, SubmissionFields,
SubmissionTypes)
from .decorators import get_unit_context
from .fields import to_python
from .forms import (unit_comment_form_factory, unit_form_factory,
highlight_whitespace)
from .models import Unit, SuggestionStates
from .signals import translation_submitted
from .templatetags.store_tags import (highlight_diffs, pluralize_source,
pluralize_target)
from .util import (UNTRANSLATED, FUZZY, TRANSLATED, STATES_MAP,
find_altsrcs)
#: Mapping of allowed sorting criteria.
#: Keys are supported query strings, values are the field + order that
#: will be used against the DB.
ALLOWED_SORTS = {
'units': {
'oldest': 'mtime',
'newest': '-mtime',
},
'suggestions': {
'oldest': 'submission__suggestion__creation_time',
'newest': '-submission__suggestion__creation_time',
},
'submissions': {
'oldest': 'submission__creation_time',
'newest': '-submission__creation_time',
},
}
#: List of fields from `ALLOWED_SORTS` that can be sorted by simply using
#: `order_by(field)`
SIMPLY_SORTED = ['units']
def get_alt_src_langs(request, user, translation_project):
language = translation_project.language
project = translation_project.project
source_language = project.source_language
langs = user.alt_src_langs.exclude(
id__in=(language.id, source_language.id)
).filter(translationproject__project=project)
if not user.alt_src_langs.count():
from pootle_language.models import Language
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
continue
simplified = data.simplify_to_common(accept_lang)
normalized = to_locale(data.normalize_code(simplified))
code = to_locale(accept_lang)
if (normalized in
('en', 'en_US', source_language.code, language.code) or
code in ('en', 'en_US', source_language.code, language.code)):
continue
langs = Language.objects.filter(
code__in=(normalized, code),
translationproject__project=project,
)
if langs.count():
break
return langs
def get_search_query(form, units_queryset):
words = form.cleaned_data['search'].split()
result = units_queryset.none()
if 'source' in form.cleaned_data['sfields']:
subresult = units_queryset
for word in words:
subresult = subresult.filter(source_f__icontains=word)
result = result | subresult
if 'target' in form.cleaned_data['sfields']:
subresult = units_queryset
for word in words:
subresult = subresult.filter(target_f__icontains=word)
result = result | subresult
if 'notes' in form.cleaned_data['sfields']:
translator_subresult = units_queryset
developer_subresult = units_queryset
for word in words:
translator_subresult = translator_subresult.filter(
translator_comment__icontains=word,
)
developer_subresult = developer_subresult.filter(
developer_comment__icontains=word,
)
result = result | translator_subresult | developer_subresult
if 'locations' in form.cleaned_data['sfields']:
subresult = units_queryset
for word in words:
subresult = subresult.filter(locations__icontains=word)
result = result | subresult
return result
def get_search_exact_query(form, units_queryset):
phrase = form.cleaned_data['search']
result = units_queryset.none()
if 'source' in form.cleaned_data['sfields']:
subresult = units_queryset.filter(source_f__contains=phrase)
result = result | subresult
if 'target' in form.cleaned_data['sfields']:
subresult = units_queryset.filter(target_f__contains=phrase)
result = result | subresult
if 'notes' in form.cleaned_data['sfields']:
translator_subresult = units_queryset
developer_subresult = units_queryset
translator_subresult = translator_subresult.filter(
translator_comment__contains=phrase,
)
developer_subresult = developer_subresult.filter(
developer_comment__contains=phrase,
)
result = result | translator_subresult | developer_subresult
if 'locations' in form.cleaned_data['sfields']:
subresult = units_queryset.filter(locations__contains=phrase)
result = result | subresult
return result
def get_search_step_query(form, units_queryset):
"""Narrows down units query to units matching search string."""
if 'exact' in form.cleaned_data['soptions']:
logging.debug(u"Using exact database search")
return get_search_exact_query(form, units_queryset)
return get_search_query(form, units_queryset)
def get_step_query(request, units_queryset):
"""Narrows down unit query to units matching conditions in GET."""
if 'filter' in request.GET:
unit_filter = request.GET['filter']
username = request.GET.get('user', None)
sort_by_param = request.GET.get('sort', None)
sort_on = 'units'
user = request.profile
if username is not None:
User = get_user_model()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
pass
if unit_filter:
match_queryset = units_queryset.none()
if unit_filter == 'all':
match_queryset = units_queryset
elif unit_filter == 'translated':
match_queryset = units_queryset.filter(state=TRANSLATED)
elif unit_filter == 'untranslated':
match_queryset = units_queryset.filter(state=UNTRANSLATED)
elif unit_filter == 'fuzzy':
match_queryset = units_queryset.filter(state=FUZZY)
elif unit_filter == 'incomplete':
match_queryset = units_queryset.filter(
Q(state=UNTRANSLATED) | Q(state=FUZZY),
)
elif unit_filter == 'suggestions':
match_queryset = units_queryset.filter(
suggestion__state=SuggestionStates.PENDING
).distinct()
elif unit_filter in ('my-suggestions', 'user-suggestions'):
match_queryset = units_queryset.filter(
suggestion__state=SuggestionStates.PENDING,
suggestion__user=user,
).distinct()
sort_on = 'suggestions'
elif unit_filter == 'user-suggestions-accepted':
match_queryset = units_queryset.filter(
suggestion__state=SuggestionStates.ACCEPTED,
suggestion__user=user,
).distinct()
elif unit_filter == 'user-suggestions-rejected':
match_queryset = units_queryset.filter(
suggestion__state=SuggestionStates.REJECTED,
suggestion__user=user,
).distinct()
elif unit_filter in ('my-submissions', 'user-submissions'):
match_queryset = units_queryset.filter(
submission__submitter=user,
submission__type__in=SubmissionTypes.EDIT_TYPES,
).distinct()
sort_on = 'submissions'
elif (unit_filter in ('my-submissions-overwritten',
'user-submissions-overwritten')):
match_queryset = units_queryset.filter(
submission__submitter=user,
).exclude(submitted_by=user).distinct()
elif unit_filter == 'checks' and 'checks' in request.GET:
checks = request.GET['checks'].split(',')
if checks:
match_queryset = units_queryset.filter(
qualitycheck__false_positive=False,
qualitycheck__name__in=checks,
).distinct()
sort_by = ALLOWED_SORTS[sort_on].get(sort_by_param, None)
if sort_by is not None:
if sort_on in SIMPLY_SORTED:
match_queryset = match_queryset.order_by(sort_by)
else:
# It's necessary to use `Max()` here because we can't
# use `distinct()` and `order_by()` at the same time
# (unless PostreSQL is used and `distinct(field_name)`)
sort_by_max = '%s__max' % sort_by
# Omit leading `-` sign
max_field = sort_by[1:] if sort_by[0] == '-' else sort_by
match_queryset = match_queryset.annotate(Max(max_field)) \
.order_by(sort_by_max)
units_queryset = match_queryset
if 'search' in request.GET and 'sfields' in request.GET:
# Accept `sfields` to be a comma-separated string of fields (#46)
GET = request.GET.copy()
sfields = GET['sfields']
if isinstance(sfields, unicode) and u',' in sfields:
GET.setlist('sfields', sfields.split(u','))
# use the search form for validation only
search_form = make_search_form(GET)
if search_form.is_valid():
units_queryset = get_search_step_query(search_form, units_queryset)
return units_queryset
#
# Views used with XMLHttpRequest requests.
#
def _filter_ctx_units(units_qs, unit, how_many, gap=0):
"""Returns ``how_many``*2 units that are before and after ``index``."""
result = {'before': [], 'after': []}
if how_many and unit.index - gap > 0:
before = units_qs.filter(store=unit.store_id, index__lt=unit.index) \
.order_by('-index')[gap:how_many+gap]
result['before'] = _build_units_list(before, reverse=True)
result['before'].reverse()
#FIXME: can we avoid this query if length is known?
if how_many:
after = units_qs.filter(store=unit.store_id,
index__gt=unit.index)[gap:how_many+gap]
result['after'] = _build_units_list(after)
return result
def _prepare_unit(unit):
"""Constructs a dictionary with relevant `unit` data."""
return {
'id': unit.id,
'url': unit.get_translate_url(),
'isfuzzy': unit.isfuzzy(),
'source': [source[1] for source in pluralize_source(unit)],
'target': [target[1] for target in pluralize_target(unit)],
}
def _path_units_with_meta(path, units):
"""Constructs a dictionary which contains a list of `units`
corresponding to `path` as well as its metadata.
"""
meta = None
units_list = []
for unit in iter(units):
if meta is None:
# XXX: Watch out for the query count
store = unit.store
tp = store.translation_project
project = tp.project
meta = {
'source_lang': project.source_language.code,
'source_dir': project.source_language.direction,
'target_lang': tp.language.code,
'target_dir': tp.language.direction,
'project_code': project.code,
'project_style': project.checkstyle,
}
units_list.append(_prepare_unit(unit))
return {
path: {
'meta': meta,
'units': units_list,
},
}
def _build_units_list(units, reverse=False):
"""Given a list/queryset of units, builds a list with the unit data
contained in a dictionary ready to be returned as JSON.
:return: A list with unit id, source, and target texts. In case of
having plural forms, a title for the plural form is also provided.
"""
return_units = []
for unit in iter(units):
return_units.append(_prepare_unit(unit))
return return_units
@ajax_required
def get_units(request):
"""Gets source and target texts and its metadata.
:return: A JSON-encoded string containing the source and target texts
grouped by the store they belong to.
The optional `count` GET parameter defines the chunk size to
consider. The user's preference will be used by default.
When the `initial` GET parameter is present, a sorted list of
the result set ids will be returned too.
"""
pootle_path = request.GET.get('path', None)
if pootle_path is None:
raise Http400(_('Arguments missing.'))
User = get_user_model()
request.profile = User.get(request.user)
limit = request.profile.get_unit_rows()
units_qs = Unit.objects.get_for_path(pootle_path, request.profile)
units_qs = units_qs.select_related(
'store__translation_project__project',
'store__translation_project__language',
)
step_queryset = get_step_query(request, units_qs)
is_initial_request = request.GET.get('initial', False)
chunk_size = request.GET.get('count', limit)
uids_param = filter(None, request.GET.get('uids', '').split(u','))
uids = filter(None, map(to_int, uids_param))
units = None
unit_groups = []
uid_list = []
if is_initial_request:
# Not using `values_list()` here because it doesn't know about all
# existing relations when `extra()` has been used before in the
# queryset. This affects annotated names such as those ending in
# `__max`, where Django thinks we're trying to lookup a field on a
# relationship field.
# https://code.djangoproject.com/ticket/19434
uid_list = [u.id for u in step_queryset]
if len(uids) == 1:
try:
uid = uids[0]
index = uid_list.index(uid)
begin = max(index - chunk_size, 0)
end = min(index + chunk_size + 1, len(uid_list))
uids = uid_list[begin:end]
except ValueError:
raise Http404 # `uid` not found in `uid_list`
else:
count = 2 * chunk_size
units = step_queryset[:count]
if units is None and uids:
units = step_queryset.filter(id__in=uids)
units_by_path = groupby(units, lambda x: x.store.pootle_path)
for pootle_path, units in units_by_path:
unit_groups.append(_path_units_with_meta(pootle_path, units))
response = {
'unitGroups': unit_groups,
}
if uid_list:
response['uIds'] = uid_list
return HttpResponse(jsonify(response), content_type="application/json")
def _is_filtered(request):
"""Checks if unit list is filtered."""
return ('filter' in request.GET or 'checks' in request.GET or
'user' in request.GET or
('search' in request.GET and 'sfields' in request.GET))
@ajax_required
@get_unit_context('view')
def get_more_context(request, unit):
"""Retrieves more context units.
:return: An object in JSON notation that contains the source and target
texts for units that are in the context of unit ``uid``.
"""
store = request.store
json = {}
gap = int(request.GET.get('gap', 0))
qty = int(request.GET.get('qty', 1))
json["ctx"] = _filter_ctx_units(store.units, unit, qty, gap)
rcode = 200
response = jsonify(json)
return HttpResponse(response, status=rcode, content_type="application/json")
@never_cache
@get_unit_context('view')
def timeline(request, unit):
"""Returns a JSON-encoded string including the changes to the unit
rendered in HTML.
"""
timeline = Submission.objects.filter(unit=unit, field__in=[
SubmissionFields.TARGET, SubmissionFields.STATE,
SubmissionFields.COMMENT, SubmissionFields.NONE
]).exclude(
field=SubmissionFields.COMMENT,
creation_time=unit.commented_on
).order_by("id")
timeline = timeline.select_related("submitter__user",
"translation_project__language")
User = get_user_model()
entries_group = []
context = {}
# Group by submitter id and creation_time because
# different submissions can have same creation time
for key, values in \
groupby(timeline,
key=lambda x: "%d\001%s" % (x.submitter.id, x.creation_time)):
entry_group = {
'entries': [],
}
for item in values:
# Only add creation_time information for the whole entry group once
entry_group['datetime'] = item.creation_time
# Only add submitter information for the whole entry group once
entry_group.setdefault('submitter', item.submitter)
context.setdefault('language', item.translation_project.language)
entry = {
'field': item.field,
'field_name': SubmissionFields.NAMES_MAP[item.field],
}
if item.field == SubmissionFields.STATE:
entry['old_value'] = STATES_MAP[int(to_python(item.old_value))]
entry['new_value'] = STATES_MAP[int(to_python(item.new_value))]
elif item.check:
entry.update({
'check_name': item.check.name,
'check_display_name': check_names[item.check.name],
'checks_url': reverse('pootle-staticpages-display',
args=['help/quality-checks']),
'action': {
SubmissionTypes.MUTE_CHECK: 'Muted',
SubmissionTypes.UNMUTE_CHECK: 'Unmuted'
}.get(item.type, '')
})
else:
entry['new_value'] = to_python(item.new_value)
entry_group['entries'].append(entry)
entries_group.append(entry_group)
if (len(entries_group) > 0 and
entries_group[0]['datetime'] == unit.creation_time):
entries_group[0]['created'] = True
else:
created = {
'created': True,
'submitter': User.objects.get_system_user(),
}
if unit.creation_time:
created['datetime'] = unit.creation_time
entries_group[:0] = [created]
# Let's reverse the chronological order
entries_group.reverse()
context['entries_group'] = entries_group
if request.is_ajax():
# The client will want to confirm that the response is relevant for
# the unit on screen at the time of receiving this, so we add the uid.
json = {'uid': unit.id}
t = loader.get_template('editor/units/xhr_timeline.html')
c = RequestContext(request, context)
json['timeline'] = t.render(c).replace('\n', '')
response = jsonify(json)
return HttpResponse(response, content_type="application/json")
else:
return render(request, "editor/units/timeline.html", context)
@ajax_required
@require_http_methods(['POST', 'DELETE'])
@get_unit_context('translate')
def comment(request, unit):
"""Dispatches the comment action according to the HTTP verb."""
if request.method == 'DELETE':
return delete_comment(request, unit)
elif request.method == 'POST':
return save_comment(request, unit)
def delete_comment(request, unit):
"""Deletes a comment by blanking its contents and records a new
submission.
"""
unit.commented_by = None
unit.commented_on = None
language = request.translation_project.language
comment_form_class = unit_comment_form_factory(language)
form = comment_form_class({}, instance=unit, request=request)
if form.is_valid():
form.save()
json = {}
rcode = 200
else:
json = {'msg': _("Failed to remove comment.")}
rcode = 400
response = jsonify(json)
return HttpResponse(response, status=rcode, content_type="application/json")
def save_comment(request, unit):
"""Stores a new comment for the given ``unit``.
:return: If the form validates, the cleaned comment is returned.
An error message is returned otherwise.
"""
# Update current unit instance's attributes
unit.commented_by = request.profile
unit.commented_on = timezone.now()
language = request.translation_project.language
form = unit_comment_form_factory(language)(request.POST, instance=unit,
request=request)
if form.is_valid():
form.save()
context = {
'unit': unit,
'language': language,
}
t = loader.get_template('editor/units/xhr_comment.html')
c = RequestContext(request, context)
json = {'comment': t.render(c)}
rcode = 200
else:
json = {'msg': _("Comment submission failed.")}
rcode = 400
response = jsonify(json)
return HttpResponse(response, status=rcode, content_type="application/json")
@never_cache
@ajax_required
@get_unit_context('view')
def get_edit_unit(request, unit):
"""Given a store path ``pootle_path`` and unit id ``uid``, gathers all the
necessary information to build the editing widget.
:return: A templatised editing widget is returned within the ``editor``
variable and paging information is also returned if the page
number has changed.
"""
json = {}
translation_project = request.translation_project
language = translation_project.language
if unit.hasplural():
snplurals = len(unit.source.strings)
else:
snplurals = None
form_class = unit_form_factory(language, snplurals, request)
form = form_class(instance=unit, request=request)
comment_form_class = unit_comment_form_factory(language)
comment_form = comment_form_class({}, instance=unit, request=request)
store = unit.store
directory = store.parent
user = request.profile
alt_src_langs = get_alt_src_langs(request, user, translation_project)
project = translation_project.project
template_vars = {
'unit': unit,
'form': form,
'comment_form': comment_form,
'store': store,
'directory': directory,
'profile': user,
'user': request.user,
'project': project,
'language': language,
'source_language': translation_project.project.source_language,
'cantranslate': check_user_permission(user, "translate", directory),
'cansuggest': check_user_permission(user, "suggest", directory),
'canreview': check_user_permission(user, "review", directory),
'is_admin': check_user_permission(user, 'administrate', directory),
'altsrcs': find_altsrcs(unit, alt_src_langs, store=store,
project=project),
}
if translation_project.project.is_terminology or store.is_terminology:
t = loader.get_template('editor/units/term_edit.html')
else:
t = loader.get_template('editor/units/edit.html')
c = RequestContext(request, template_vars)
json['editor'] = t.render(c)
json['tm_suggestions'] = unit.get_tm_suggestions()
rcode = 200
# Return context rows if filtering is applied but
# don't return any if the user has asked not to have it
current_filter = request.GET.get('filter', 'all')
show_ctx = request.COOKIES.get('ctxShow', 'true')
if ((_is_filtered(request) or current_filter not in ('all',)) and
show_ctx == 'true'):
# TODO: review if this first 'if' branch makes sense
if translation_project.project.is_terminology or store.is_terminology:
json['ctx'] = _filter_ctx_units(store.units, unit, 0)
else:
ctx_qty = int(request.COOKIES.get('ctxQty', 1))
json['ctx'] = _filter_ctx_units(store.units, unit, ctx_qty)
response = jsonify(json)
return HttpResponse(response, status=rcode, content_type="application/json")
@get_unit_context('view')
def permalink_redirect(request, unit):
return redirect(request.build_absolute_uri(unit.get_translate_url()))
@ajax_required
@get_path_obj
@permission_required('view')
@get_resource
def get_qualitycheck_stats(request, *args, **kwargs):
failing_checks = request.resource_obj.get_checks()
response = jsonify(failing_checks)
return HttpResponse(response, mimetype="application/json")
@ajax_required
@get_path_obj
@permission_required('view')
@get_resource
def get_overview_stats(request, *args, **kwargs):
stats = request.resource_obj.get_stats()
response = jsonify(stats)
return HttpResponse(response, mimetype="application/json")
@ajax_required
@get_unit_context('translate')
def submit(request, unit):
"""Processes translation submissions and stores them in the database.
:return: An object in JSON notation that contains the previous and last
units for the unit next to unit ``uid``.
"""
json = {}
translation_project = request.translation_project
language = translation_project.language
if unit.hasplural():
snplurals = len(unit.source.strings)
else:
snplurals = None
# Store current time so that it is the same for all submissions
current_time = timezone.now()
form_class = unit_form_factory(language, snplurals, request)
form = form_class(request.POST, instance=unit, request=request)
if form.is_valid():
if form.updated_fields:
for field, old_value, new_value in form.updated_fields:
sub = Submission(
creation_time=current_time,
translation_project=translation_project,
submitter=request.profile,
unit=unit,
store=unit.store,
field=field,
type=SubmissionTypes.NORMAL,
old_value=old_value,
new_value=new_value,
similarity=form.cleaned_data['similarity'],
mt_similarity=form.cleaned_data['mt_similarity'],
)
sub.save()
# Update current unit instance's attributes
# important to set these attributes after saving Submission
# because we need to access the unit's state before it was saved
if SubmissionFields.TARGET in \
map(lambda x: x[0], form.updated_fields):
form.instance.submitted_by = request.profile
form.instance.submitted_on = current_time
form.instance.reviewed_by = None
form.instance.reviewed_on = None
form.instance._log_user = request.profile
form.save()
translation_submitted.send(
sender=translation_project,
unit=form.instance,
profile=request.profile,
)
has_critical_checks = unit.qualitycheck_set.filter(
category=Category.CRITICAL
).exists()
if has_critical_checks:
can_review = check_user_permission(request.profile, 'review',
unit.store.parent)
ctx = {
'canreview': can_review,
'unit': unit
}
template = loader.get_template('editor/units/xhr_checks.html')
context = RequestContext(request, ctx)
json['checks'] = template.render(context)
rcode = 200
json['user_score'] = request.profile.public_score
else:
# Form failed
#FIXME: we should display validation errors here
rcode = 400
json["msg"] = _("Failed to process submission.")
response = jsonify(json)
return HttpResponse(response, status=rcode, content_type="application/json")
@ajax_required
@get_unit_context('suggest')
def suggest(request, unit):
"""Processes translation suggestions and stores them in the database.
:return: An object in JSON notation that contains the previous and last
units for the unit next to unit ``uid``.
"""
json = {}
translation_project = request.translation_project
language = translation_project.language
if unit.hasplural():
snplurals = len(unit.source.strings)
else:
snplurals = None
form_class = unit_form_factory(language, snplurals, request)
form = form_class(request.POST, instance=unit, request=request)
if form.is_valid():
if form.instance._target_updated:
# TODO: Review if this hackish method is still necessary
#HACKISH: django 1.2 stupidly modifies instance on
# model form validation, reload unit from db
unit = Unit.objects.get(id=unit.id)
unit.add_suggestion(
form.cleaned_data['target_f'],
user=request.profile,
similarity=form.cleaned_data['similarity'],
mt_similarity=form.cleaned_data['mt_similarity'],
)
json['user_score'] = request.profile.public_score
rcode = 200
else:
# Form failed
#FIXME: we should display validation errors here
rcode = 400
json["msg"] = _("Failed to process suggestion.")
response = jsonify(json)
return HttpResponse(response, status=rcode, content_type="application/json")
@ajax_required
@get_unit_context('review')
def reject_suggestion(request, unit, suggid):
json = {
'udbid': unit.id,
'sugid': suggid,
}
if request.POST.get('reject'):
try:
sugg = unit.suggestion_set.get(id=suggid)
except ObjectDoesNotExist:
raise Http404
unit.reject_suggestion(sugg, request.translation_project,
request.profile)
json['user_score'] = request.profile.public_score
response = jsonify(json)
return HttpResponse(response, content_type="application/json")
@ajax_required
@get_unit_context('review')
def accept_suggestion(request, unit, suggid):
json = {
'udbid': unit.id,
'sugid': suggid,
}
if request.POST.get('accept'):
try:
suggestion = unit.suggestion_set.get(id=suggid)
except ObjectDoesNotExist:
raise Http404
unit.accept_suggestion(suggestion, request.translation_project,
request.profile)
json['user_score'] = request.profile.public_score
json['newtargets'] = [highlight_whitespace(target)
for target in unit.target.strings]
json['newdiffs'] = {}
for sugg in unit.get_suggestions():
json['newdiffs'][sugg.id] = \
[highlight_diffs(unit.target.strings[i], target)
for i, target in enumerate(sugg.target.strings)]
response = jsonify(json)
return HttpResponse(response, content_type="application/json")
@ajax_required
@get_unit_context('review')
def toggle_qualitycheck(request, unit, check_id):
json = {}
json["udbid"] = unit.id
json["checkid"] = check_id
try:
unit.toggle_qualitycheck(check_id,
bool(request.POST.get('mute')), request.profile)
except ObjectDoesNotExist:
raise Http404
response = jsonify(json)
return HttpResponse(response, content_type="application/json")
| gpl-2.0 | 359,415,182,582,297,860 | 34.474399 | 80 | 0.605732 | false |
wallstreetweb/django-time-metrics | runtests.py | 1 | 1187 | import sys
try:
from django.conf import settings
from django.test.utils import get_runner
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
ROOT_URLCONF="time_metrics.urls",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"time_metrics",
],
SITE_ID=1,
MIDDLEWARE_CLASSES=(),
)
try:
import django
setup = django.setup
except AttributeError:
pass
else:
setup()
except ImportError:
import traceback
traceback.print_exc()
msg = "To fix this error, run: pip install -r requirements_test.txt"
raise ImportError(msg)
def run_tests(*test_args):
if not test_args:
test_args = ['tests']
# Run tests
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(test_args)
if failures:
sys.exit(bool(failures))
if __name__ == '__main__':
run_tests(*sys.argv[1:])
| mit | 1,709,264,231,324,621,600 | 20.196429 | 72 | 0.555181 | false |
Andrew414/KForest | KForest/KForest/KForest.py | 1 | 4318 | import random
import pylab as pl
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.lda import LDA
from sklearn.qda import QDA
#test mode
mode = 3
if mode == 1:
test = pd.read_csv("test.csv")
data = pd.read_csv("train.csv")
elif mode == 2:
test = pd.read_csv("train.csv")
data = pd.read_csv("train2.csv")
else:
test = pd.read_csv("train2.csv")
data = pd.read_csv("train3.csv")
#tr_features = ['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12', 'f13', 'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f20', 'f21', 'f22', 'f23', 'f24', 'f25', 'f26', 'f27', 'f28', 'f29', 'f30', 'f31', 'f32', 'f33', 'f34', 'f35', 'f36', 'f37', 'f38', 'f39', 'f40', 'f41', 'f42', 'f43', 'f44', 'f45', 'f46', 'f47', 'f48', 'f49', 'f50', 'f51', 'f52', 'f53', 'f54']
#tr_features = ['f11', 'f12', 'f13', 'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f20', 'f21', 'f22', 'f23', 'f24', 'f25', 'f26', 'f27', 'f28', 'f29', 'f30', 'f31', 'f32', 'f33', 'f34', 'f35', 'f36', 'f37', 'f38', 'f39', 'f40', 'f41', 'f42', 'f43', 'f44', 'f45', 'f46', 'f47', 'f48', 'f49', 'f50', 'f51', 'f52', 'f53', 'f54']
tr_features = ['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10']
ts_features = 'class'
classifiers = [
KNeighborsClassifier(1), # 0 - !
KNeighborsClassifier(3), # 1
KNeighborsClassifier(6), # 2
SVC(kernel="linear", C=0.025), # 3
SVC(kernel="linear", C=1), # 4
SVC(kernel="linear", C=100), # 5
SVC(gamma=0.5, C=0.1), # 6
SVC(gamma=2, C=1), # 7
SVC(gamma=50, C=100), # 8
DecisionTreeClassifier(max_depth=5), # 9
DecisionTreeClassifier(max_depth=10), # 10 - !
SVC(gamma=2, C=1000), # 11
SVC(gamma=2, C=100), # 12
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1), # 13
RandomForestClassifier(max_depth=10, n_estimators=10, max_features=1), # 14 - !
RandomForestClassifier(max_depth=15, n_estimators=50, max_features=5), # 15 - !
AdaBoostClassifier(), # 16
GaussianNB(), # 17
MultinomialNB(), # 18
BernoulliNB(), # 19
LDA(), # 20
QDA() # 21
]
#0 #1 #2 #3 #4 #5 #6 #7 #8 #9 #10 #11 #12 #13 #14 #15 #16 #17 #18 #19 #20 #21
needtocheck = [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0]
i = -1;
for fier in classifiers:
i += 1
if needtocheck[i] == 1:
clf = fier
clf.fit(data[tr_features], data[ts_features])
preds = clf.predict(data[tr_features])
if mode != 1:
print i
accuracy = np.where(preds==data[ts_features], 1, 0).sum() / float(len(data))
print accuracy
if mode == 1:
print 'class'
for i in preds:
print i
| gpl-2.0 | -3,674,565,233,713,391,600 | 49.209302 | 384 | 0.432608 | false |
rgbkrk/binder | web/app.py | 1 | 4824 | import Queue
import json
import signal
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler
from tornado.httpserver import HTTPServer
from binder.service import Service
from binder.app import App
from .builder import Builder
# TODO move settings into a config file
PORT = 8080
NUM_WORKERS = 10
PRELOAD = True
QUEUE_SIZE = 50
ALLOW_ORIGIN = True
build_queue = Queue.Queue(QUEUE_SIZE)
class BinderHandler(RequestHandler):
def get(self):
if ALLOW_ORIGIN:
self.set_header('Access-Control-Allow-Origin', '*')
def post(self):
if ALLOW_ORIGIN:
self.set_header('Access-Control-Allow-Origin', '*')
class BuildHandler(BinderHandler):
def _is_malformed(self, spec):
# by default, there aren't any required fields in an app specification
pass
def _write_build_state(self, app):
if app.build_state == App.BuildState.BUILDING:
self.write({"build_status": "building"})
elif app.build_state == App.BuildState.FAILED:
self.write({"build_status": "failed"})
elif app.build_state == App.BuildState.COMPLETED:
self.write({"build_status": "completed"})
else:
self.write({"build_status": "unknown"})
class GithubHandler(BuildHandler):
def _is_malformed(self, spec):
# in the GithubHandler, the repo field is inferred from organization/repo
return "repo" in spec
def _make_app_name(self, organization, repo):
return organization + "-" + repo
class GithubStatusHandler(GithubHandler):
def get(self, organization, repo):
super(GithubStatusHandler, self).get()
app_name = self._make_app_name(organization, repo)
app = App.get_app(app_name)
if not app:
self.set_status(404)
self.write({"error": "app does not exist"})
else:
self._write_build_state(app)
class GithubBuildHandler(GithubHandler):
@gen.coroutine
def get(self, organization, repo):
# if the app is still building, return an error. If the app is built, deploy it and return
# the redirect url
super(GithubHandler, self).get()
app_name = self._make_app_name(organization, repo)
app = App.get_app(app_name)
if app and app.build_state == App.BuildState.COMPLETED:
redirect_url = app.deploy("single-node")
self.write({"redirect_url": redirect_url})
else:
self.set_status(404)
self.write({"error": "no app available to deploy"})
def post(self, organization, repo):
# if the spec is properly formed, create/build the app
super(GithubBuildHandler, self).post()
print("request.body: {}".format(self.request.body))
spec = json.loads(self.request.body)
if self._is_malformed(spec):
self.set_status(400)
self.write({"error": "malformed app specification"})
else:
try:
spec["name"] = self._make_app_name(organization, repo)
spec["repo"] = "https://www.github.com/{0}/{1}".format(organization, repo)
build_queue.put(spec)
self.write({"success": "app submitted to build queue"})
except Queue.Full:
self.write({"error": "build queue full"})
class OtherSourceHandler(BuildHandler):
def get(self, app_id):
pass
def post(self, app_id):
pass
class ServicesHandler(BinderHandler):
def get(self):
super(ServicesHandler, self).get()
services = Service.get_service()
self.write({"services": [service.full_name for service in services]})
class AppsHandler(BinderHandler):
def get(self):
super(AppsHandler, self).get()
apps = App.get_app()
self.write({"apps": [app.name for app in apps]})
def sig_handler(sig, frame):
IOLoop.instance().add_callback(shutdown)
def shutdown():
print("Shutting down...")
IOLoop.instance().stop()
builder.stop()
def main():
application = Application([
(r"/apps/(?P<organization>.+)/(?P<repo>.+)/status", GithubStatusHandler),
(r"/apps/(?P<organization>.+)/(?P<repo>.+)", GithubBuildHandler),
(r"/apps/(?P<app_id>.+)", OtherSourceHandler),
(r"/services", ServicesHandler),
(r"/apps", AppsHandler)
], debug=True)
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGINT, sig_handler)
global builder
builder = Builder(build_queue, PRELOAD)
builder.start()
http_server = HTTPServer(application)
http_server.listen(PORT)
print("Binder API server running on port {}".format(PORT))
IOLoop.current().start()
if __name__ == "__main__":
main()
| apache-2.0 | 1,329,055,373,027,206,000 | 28.595092 | 98 | 0.623134 | false |
kzvyahin/cfme_tests | cfme/tests/infrastructure/test_host_analysis.py | 1 | 5050 | # -*- coding: utf-8 -*-
import pytest
from cfme import test_requirements
from cfme.configure.tasks import is_host_analysis_finished
from cfme.exceptions import ListAccordionLinkNotFound
from cfme.infrastructure import host
from cfme.web_ui import listaccordion as list_acc, toolbar, InfoBlock
from utils import conf
from utils import testgen
from utils import version
from utils.update import update
from utils.wait import wait_for
pytestmark = [
test_requirements.smartstate,
pytest.mark.tier(3)
]
HOST_TYPES = ('rhev', 'rhel', 'esx', 'esxi')
def pytest_generate_tests(metafunc):
argnames, argvalues, idlist = testgen.infra_providers(metafunc)
argnames = argnames + ['host_type', 'host_name']
new_argvalues = []
new_idlist = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(zip(argnames, argvalue_tuple))
prov_hosts = args['provider'].data['hosts']
if not prov_hosts:
continue
for test_host in prov_hosts:
if not test_host.get('test_fleece', False):
continue
assert test_host.get('type', None) in HOST_TYPES,\
'host type must be set to [{}] for smartstate analysis tests'\
.format('|'.join(HOST_TYPES))
argvalues[i] = argvalues[i] + [test_host['type'], test_host['name']]
test_id = '{}-{}-{}'.format(args['provider'].key, test_host['type'], test_host['name'])
new_argvalues.append(argvalues[i])
new_idlist.append(test_id)
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
def get_host_data_by_name(provider_key, host_name):
for host_obj in conf.cfme_data.get('management_systems', {})[provider_key].get('hosts', []):
if host_name == host_obj['name']:
return host_obj
return None
@pytest.mark.uncollectif(
lambda provider: version.current_version() == version.UPSTREAM and provider.type == 'rhevm')
def test_run_host_analysis(request, setup_provider, provider, host_type, host_name, register_event,
soft_assert, bug):
""" Run host SmartState analysis
Metadata:
test_flag: host_analysis
"""
# Add credentials to host
host_data = get_host_data_by_name(provider.key, host_name)
test_host = host.Host(name=host_name)
wait_for(lambda: test_host.exists, delay=10, num_sec=120)
if not test_host.has_valid_credentials:
with update(test_host):
test_host.credentials = host.get_credentials_from_config(host_data['credentials'])
wait_for(lambda: test_host.has_valid_credentials, delay=10, num_sec=120)
# Remove creds after test
@request.addfinalizer
def _host_remove_creds():
with update(test_host):
test_host.credentials = host.Host.Credential(
principal="", secret="", verify_secret="")
register_event('Host', host_name, ["request_host_scan", "host_scan_complete"])
# Initiate analysis
test_host.run_smartstate_analysis()
wait_for(lambda: is_host_analysis_finished(host_name),
delay=15, timeout="10m", fail_func=lambda: toolbar.select('Reload'))
# Check results of the analysis
drift_history = test_host.get_detail('Relationships', 'Drift History')
soft_assert(drift_history != '0', 'No drift history change found')
if provider.type == "rhevm":
soft_assert(test_host.get_detail('Configuration', 'Services') != '0',
'No services found in host detail')
if host_type in ('rhel', 'rhev'):
soft_assert(InfoBlock.text('Security', 'Users') != '0',
'No users found in host detail')
soft_assert(InfoBlock.text('Security', 'Groups') != '0',
'No groups found in host detail')
soft_assert(InfoBlock.text('Security', 'SSH Root') != '',
'No packages found in host detail')
soft_assert(InfoBlock.text('Configuration', 'Packages') != '0',
'No packages found in host detail')
soft_assert(InfoBlock.text('Configuration', 'Files') != '0',
'No files found in host detail')
soft_assert(InfoBlock.text('Security', 'Firewall Rules') != '0',
'No firewall rules found in host detail')
elif host_type in ('esx', 'esxi'):
soft_assert(InfoBlock.text('Configuration', 'Advanced Settings') != '0',
'No advanced settings found in host detail')
if not(provider.type == "virtualcenter" and provider.version < "5"):
# If the Firewall Rules are 0, the element can't be found (it's not a link)
try:
# This fails for vsphere4... https://bugzilla.redhat.com/show_bug.cgi?id=1055657
list_acc.select('Security', 'Show the firewall rules on this Host')
except ListAccordionLinkNotFound:
# py.test's .fail would wipe the soft_assert data
soft_assert(False, "No firewall rules found in host detail accordion")
| gpl-2.0 | 2,383,283,383,539,621,400 | 39.4 | 99 | 0.629703 | false |
blaze33/django | tests/modeltests/model_forms/tests.py | 1 | 66035 | from __future__ import absolute_import, unicode_literals
import datetime
import os
from decimal import Decimal
from django import forms
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import ValidationError
from django.db import connection
from django.db.models.query import EmptyQuerySet
from django.forms.models import model_to_dict
from django.utils._os import upath
from django.utils.unittest import skipUnless
from django.test import TestCase
from django.utils import six
from .models import (Article, ArticleStatus, BetterWriter, BigInt, Book,
Category, CommaSeparatedInteger, CustomFieldForExclusionModel, DerivedBook,
DerivedPost, ExplicitPK, FlexibleDatePost, ImprovedArticle,
ImprovedArticleWithParentLink, Inventory, Post, Price,
Product, TextFile, Writer, WriterProfile, test_images)
if test_images:
from .models import ImageFile, OptionalImageFile
class ImageFileForm(forms.ModelForm):
class Meta:
model = ImageFile
class OptionalImageFileForm(forms.ModelForm):
class Meta:
model = OptionalImageFile
class ProductForm(forms.ModelForm):
class Meta:
model = Product
class PriceForm(forms.ModelForm):
class Meta:
model = Price
class BookForm(forms.ModelForm):
class Meta:
model = Book
class DerivedBookForm(forms.ModelForm):
class Meta:
model = DerivedBook
class ExplicitPKForm(forms.ModelForm):
class Meta:
model = ExplicitPK
fields = ('key', 'desc',)
class PostForm(forms.ModelForm):
class Meta:
model = Post
class DerivedPostForm(forms.ModelForm):
class Meta:
model = DerivedPost
class CustomWriterForm(forms.ModelForm):
name = forms.CharField(required=False)
class Meta:
model = Writer
class FlexDatePostForm(forms.ModelForm):
class Meta:
model = FlexibleDatePost
class BaseCategoryForm(forms.ModelForm):
class Meta:
model = Category
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
class PartialArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline','pub_date')
class RoykoForm(forms.ModelForm):
class Meta:
model = Writer
class TestArticleForm(forms.ModelForm):
class Meta:
model = Article
class PartialArticleFormWithSlug(forms.ModelForm):
class Meta:
model = Article
fields=('headline', 'slug', 'pub_date')
class ArticleStatusForm(forms.ModelForm):
class Meta:
model = ArticleStatus
class InventoryForm(forms.ModelForm):
class Meta:
model = Inventory
class SelectInventoryForm(forms.Form):
items = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
class CustomFieldForExclusionForm(forms.ModelForm):
class Meta:
model = CustomFieldForExclusionModel
fields = ['name', 'markup']
class ShortCategory(forms.ModelForm):
name = forms.CharField(max_length=5)
slug = forms.CharField(max_length=5)
url = forms.CharField(max_length=3)
class ImprovedArticleForm(forms.ModelForm):
class Meta:
model = ImprovedArticle
class ImprovedArticleWithParentLinkForm(forms.ModelForm):
class Meta:
model = ImprovedArticleWithParentLink
class BetterWriterForm(forms.ModelForm):
class Meta:
model = BetterWriter
class WriterProfileForm(forms.ModelForm):
class Meta:
model = WriterProfile
class TextFileForm(forms.ModelForm):
class Meta:
model = TextFile
class BigIntForm(forms.ModelForm):
class Meta:
model = BigInt
class ModelFormWithMedia(forms.ModelForm):
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
class Meta:
model = TextFile
class CommaSeparatedIntegerForm(forms.ModelForm):
class Meta:
model = CommaSeparatedInteger
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
exclude = ('quantity',)
class ModelFormBaseTest(TestCase):
def test_base_form(self):
self.assertEqual(list(BaseCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_extra_fields(self):
class ExtraFields(BaseCategoryForm):
some_extra_field = forms.BooleanField()
self.assertEqual(list(ExtraFields.base_fields),
['name', 'slug', 'url', 'some_extra_field'])
def test_replace_field(self):
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
self.assertTrue(isinstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField))
def test_override_field(self):
class WriterForm(forms.ModelForm):
book = forms.CharField(required=False)
class Meta:
model = Writer
wf = WriterForm({'name': 'Richard Lockridge'})
self.assertTrue(wf.is_valid())
def test_limit_fields(self):
class LimitFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url']
self.assertEqual(list(LimitFields.base_fields),
['url'])
def test_exclude_fields(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['url']
self.assertEqual(list(ExcludeFields.base_fields),
['name', 'slug'])
def test_confused_form(self):
class ConfusedForm(forms.ModelForm):
""" Using 'fields' *and* 'exclude'. Not sure why you'd want to do
this, but uh, "be liberal in what you accept" and all.
"""
class Meta:
model = Category
fields = ['name', 'url']
exclude = ['url']
self.assertEqual(list(ConfusedForm.base_fields),
['name'])
def test_mixmodel_form(self):
class MixModelForm(BaseCategoryForm):
""" Don't allow more than one 'model' definition in the
inheritance hierarchy. Technically, it would generate a valid
form, but the fact that the resulting save method won't deal with
multiple objects is likely to trip up people not familiar with the
mechanics.
"""
class Meta:
model = Article
# MixModelForm is now an Article-related thing, because MixModelForm.Meta
# overrides BaseCategoryForm.Meta.
self.assertEqual(
list(MixModelForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_article_form(self):
self.assertEqual(
list(ArticleForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_bad_form(self):
#First class with a Meta class wins...
class BadForm(ArticleForm, BaseCategoryForm):
pass
self.assertEqual(
list(BadForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_subcategory_form(self):
class SubCategoryForm(BaseCategoryForm):
""" Subclassing without specifying a Meta on the class will use
the parent's Meta (or the first parent in the MRO if there are
multiple parent classes).
"""
pass
self.assertEqual(list(SubCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_subclassmeta_form(self):
class SomeCategoryForm(forms.ModelForm):
checkbox = forms.BooleanField()
class Meta:
model = Category
class SubclassMeta(SomeCategoryForm):
""" We can also subclass the Meta inner class to change the fields
list.
"""
class Meta(SomeCategoryForm.Meta):
exclude = ['url']
self.assertHTMLEqual(
str(SubclassMeta()),
"""<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_checkbox">Checkbox:</label></th><td><input type="checkbox" name="checkbox" id="id_checkbox" /></td></tr>"""
)
def test_orderfields_form(self):
class OrderFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url', 'name']
self.assertEqual(list(OrderFields.base_fields),
['url', 'name'])
self.assertHTMLEqual(
str(OrderFields()),
"""<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>
<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>"""
)
def test_orderfields2_form(self):
class OrderFields2(forms.ModelForm):
class Meta:
model = Category
fields = ['slug', 'url', 'name']
exclude = ['url']
self.assertEqual(list(OrderFields2.base_fields),
['slug', 'name'])
class TestWidgetForm(forms.ModelForm):
class Meta:
model = Category
fields = ['name', 'url', 'slug']
widgets = {
'name': forms.Textarea,
'url': forms.TextInput(attrs={'class': 'url'})
}
class TestWidgets(TestCase):
def test_base_widgets(self):
frm = TestWidgetForm()
self.assertHTMLEqual(
str(frm['name']),
'<textarea id="id_name" rows="10" cols="40" name="name"></textarea>'
)
self.assertHTMLEqual(
str(frm['url']),
'<input id="id_url" type="text" class="url" name="url" maxlength="40" />'
)
self.assertHTMLEqual(
str(frm['slug']),
'<input id="id_slug" type="text" name="slug" maxlength="20" />'
)
class IncompleteCategoryFormWithFields(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
fields = ('name', 'slug')
model = Category
class IncompleteCategoryFormWithExclude(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
exclude = ['url']
model = Category
class ValidationTest(TestCase):
def test_validates_with_replaced_field_not_specified(self):
form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_validates_with_replaced_field_excluded(self):
form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_notrequired_overrides_notblank(self):
form = CustomWriterForm({})
assert form.is_valid()
# unique/unique_together validation
class UniqueTest(TestCase):
def setUp(self):
self.writer = Writer.objects.create(name='Mike Royko')
def test_simple_unique(self):
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertTrue(form.is_valid())
obj = form.save()
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Product with this Slug already exists.'])
form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj)
self.assertTrue(form.is_valid())
def test_unique_together(self):
"""ModelForm test of unique_together constraint"""
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])
def test_unique_null(self):
title = 'I May Be Wrong But I Doubt It'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
def test_inherited_unique(self):
title = 'Boss'
Book.objects.create(title=title, author=self.writer, special_id=1)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'special_id': '1', 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['special_id'], ['Book with this Special id already exists.'])
def test_inherited_unique_together(self):
title = 'Boss'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = DerivedBookForm({'title': title, 'author': self.writer.pk, 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
def test_abstract_inherited_unique(self):
title = 'Boss'
isbn = '12345'
dbook = DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'isbn': isbn})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['isbn'], ['Derived book with this Isbn already exists.'])
def test_abstract_inherited_unique_together(self):
title = 'Boss'
isbn = '12345'
dbook = DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other',
'author': self.writer.pk,
'isbn': '9876',
'suffix1': '0',
'suffix2': '0'
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'],
['Derived book with this Suffix1 and Suffix2 already exists.'])
def test_explicitpk_unspecified(self):
"""Test for primary_key being in the form and failing validation."""
form = ExplicitPKForm({'key': '', 'desc': '' })
self.assertFalse(form.is_valid())
def test_explicitpk_unique(self):
"""Ensure keys and blank character strings are tested for uniqueness."""
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors['__all__'], ['Explicit pk with this Key and Desc already exists.'])
self.assertEqual(form.errors['desc'], ['Explicit pk with this Desc already exists.'])
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
def test_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = PostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['posted'], ['This field is required.'])
def test_inherited_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = DerivedPostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
def test_unique_for_date_with_nullable_date(self):
p = FlexibleDatePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = FlexDatePostForm({'title': "Django 1.0 is released"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'slug': "Django 1.0"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0"}, instance=p)
self.assertTrue(form.is_valid())
class ModelToDictTests(TestCase):
"""
Tests for forms.models.model_to_dict
"""
def test_model_to_dict_many_to_many(self):
categories=[
Category(name='TestName1', slug='TestName1', url='url1'),
Category(name='TestName2', slug='TestName2', url='url2'),
Category(name='TestName3', slug='TestName3', url='url3')
]
for c in categories:
c.save()
writer = Writer(name='Test writer')
writer.save()
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=writer,
article='Hello.'
)
art.save()
for c in categories:
art.categories.add(c)
art.save()
with self.assertNumQueries(1):
d = model_to_dict(art)
#Ensure all many-to-many categories appear in model_to_dict
for c in categories:
self.assertIn(c.pk, d['categories'])
#Ensure many-to-many relation appears as a list
self.assertIsInstance(d['categories'], list)
class OldFormForXTests(TestCase):
def test_base_form(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm()
self.assertHTMLEqual(
str(f),
"""<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>"""
)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" /></li>
<li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" /></li>
<li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" /></li>"""
)
self.assertHTMLEqual(
str(f["name"]),
"""<input id="id_name" type="text" name="name" maxlength="20" />""")
def test_auto_id(self):
f = BaseCategoryForm(auto_id=False)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li>Name: <input type="text" name="name" maxlength="20" /></li>
<li>Slug: <input type="text" name="slug" maxlength="20" /></li>
<li>The URL: <input type="text" name="url" maxlength="40" /></li>"""
)
def test_with_data(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm({'name': 'Entertainment',
'slug': 'entertainment',
'url': 'entertainment'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Entertainment')
self.assertEqual(f.cleaned_data['slug'], 'entertainment')
self.assertEqual(f.cleaned_data['url'], 'entertainment')
c1 = f.save()
# Testing wether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(c1, Category.objects.all()[0])
self.assertEqual(c1.name, "Entertainment")
self.assertEqual(Category.objects.count(), 1)
f = BaseCategoryForm({'name': "It's a test",
'slug': 'its-test',
'url': 'test'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], "It's a test")
self.assertEqual(f.cleaned_data['slug'], 'its-test')
self.assertEqual(f.cleaned_data['url'], 'test')
c2 = f.save()
# Testing wether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(c2, Category.objects.get(pk=c2.pk))
self.assertEqual(c2.name, "It's a test")
self.assertEqual(Category.objects.count(), 2)
# If you call save() with commit=False, then it will return an object that
# hasn't yet been saved to the database. In this case, it's up to you to call
# save() on the resulting model instance.
f = BaseCategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['url'], 'third')
self.assertEqual(f.cleaned_data['name'], 'Third test')
self.assertEqual(f.cleaned_data['slug'], 'third-test')
c3 = f.save(commit=False)
self.assertEqual(c3.name, "Third test")
self.assertEqual(Category.objects.count(), 2)
c3.save()
self.assertEqual(Category.objects.count(), 3)
# If you call save() with invalid data, you'll get a ValueError.
f = BaseCategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'})
self.assertEqual(f.errors['name'], ['This field is required.'])
self.assertEqual(f.errors['slug'], ["Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."])
self.assertEqual(f.cleaned_data, {'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
f = BaseCategoryForm({'name': '', 'slug': '', 'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
# Create a couple of Writers.
w_royko = Writer(name='Mike Royko')
w_royko.save()
w_woodward = Writer(name='Bob Woodward')
w_woodward.save()
# ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any
# fields with the 'choices' attribute are represented by a ChoiceField.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>
<tr><th>Writer:</th><td><select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></td></tr>
<tr><th>Article:</th><td><textarea rows="10" cols="40" name="article"></textarea></td></tr>
<tr><th>Categories:</th><td><select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select><br /><span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></td></tr>
<tr><th>Status:</th><td><select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></td></tr>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
# You can restrict a form to a subset of the complete list of fields
# by providing a 'fields' argument. If you try to save a
# model created with such a form, you need to ensure that the fields
# that are _not_ on the form have default values, or are allowed to have
# a value of None. If a field isn't specified on a form, the object created
# from the form can't provide a value for that field!
f = PartialArticleForm(auto_id=False)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>''')
# When the ModelForm is passed an instance, that instance's current values are
# inserted as 'initial' data in each Field.
w = Writer.objects.get(name='Mike Royko')
f = RoykoForm(auto_id=False, instance=w)
self.assertHTMLEqual(six.text_type(f), '''<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" /><br /><span class="helptext">Use both first and last names.</span></td></tr>''')
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=w,
article='Hello.'
)
art.save()
art_id_1 = art.id
self.assertEqual(art_id_1 is not None, True)
f = TestArticleForm(auto_id=False, instance=art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
f = TestArticleForm({
'headline': 'Test headline',
'slug': 'test-headline',
'pub_date': '1984-02-06',
'writer': six.text_type(w_royko.pk),
'article': 'Hello.'
}, instance=art)
self.assertEqual(f.errors, {})
self.assertEqual(f.is_valid(), True)
test_art = f.save()
self.assertEqual(test_art.id == art_id_1, True)
test_art = Article.objects.get(id=art_id_1)
self.assertEqual(test_art.headline, 'Test headline')
# You can create a form over a subset of the available fields
# by specifying a 'fields' argument to form_for_instance.
f = PartialArticleFormWithSlug({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04'
}, auto_id=False, instance=art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>''')
self.assertEqual(f.is_valid(), True)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertEqual(new_art.headline, 'New headline')
# Add some categories and test the many-to-many form output.
self.assertQuerysetEqual(new_art.categories.all(), [])
new_art.categories.add(Category.objects.get(name='Entertainment'))
self.assertQuerysetEqual(new_art.categories.all(), ["Entertainment"])
f = TestArticleForm(auto_id=False, instance=new_art)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
# Initial values can be provided for model forms
f = TestArticleForm(
auto_id=False,
initial={
'headline': 'Your headline here',
'categories': [str(c1.id), str(c2.id)]
})
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s" selected="selected">It's a test</option>
<option value="%s">Third test</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
f = TestArticleForm({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04',
'writer': six.text_type(w_royko.pk),
'article': 'Hello.',
'categories': [six.text_type(c1.id), six.text_type(c2.id)]
}, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertQuerysetEqual(new_art.categories.order_by('name'),
["Entertainment", "It's a test"])
# Now, submit form data with no categories. This deletes the existing categories.
f = TestArticleForm({'headline': 'New headline', 'slug': 'new-headline', 'pub_date': '1988-01-04',
'writer': six.text_type(w_royko.pk), 'article': 'Hello.'}, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id == art_id_1, True)
new_art = Article.objects.get(id=art_id_1)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form.
f = ArticleForm({'headline': 'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': '1967-11-01',
'writer': six.text_type(w_royko.pk), 'article': 'Test.', 'categories': [six.text_type(c1.id), six.text_type(c2.id)]})
new_art = f.save()
art_id_2 = new_art.id
self.assertEqual(art_id_2 not in (None, art_id_1), True)
new_art = Article.objects.get(id=art_id_2)
self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"])
# Create a new article, with no categories, via the form.
f = ArticleForm({'headline': 'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': '1967-11-01',
'writer': six.text_type(w_royko.pk), 'article': 'Test.'})
new_art = f.save()
art_id_3 = new_art.id
self.assertEqual(art_id_3 not in (None, art_id_1, art_id_2), True)
new_art = Article.objects.get(id=art_id_3)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form, but use commit=False.
# The m2m data won't be saved until save_m2m() is invoked on the form.
f = ArticleForm({'headline': 'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': '1967-11-01',
'writer': six.text_type(w_royko.pk), 'article': 'Test.', 'categories': [six.text_type(c1.id), six.text_type(c2.id)]})
new_art = f.save(commit=False)
# Manually save the instance
new_art.save()
art_id_4 = new_art.id
self.assertEqual(art_id_4 not in (None, art_id_1, art_id_2, art_id_3), True)
# The instance doesn't have m2m data yet
new_art = Article.objects.get(id=art_id_4)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Save the m2m data on the form
f.save_m2m()
self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"])
# Here, we define a custom ModelForm. Because it happens to have the same fields as
# the Category model, we can just call the form's save() to apply its changes to an
# existing Category instance.
cat = Category.objects.get(name='Third test')
self.assertEqual(cat.name, "Third test")
self.assertEqual(cat.id == c3.id, True)
form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat)
self.assertEqual(form.save().name, 'Third')
self.assertEqual(Category.objects.get(id=c3.id).name, 'Third')
# Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
# at runtime, based on the data in the database when the form is displayed, not
# the data in the database when the form is instantiated.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_royko.pk, c1.pk, c2.pk, c3.pk))
c4 = Category.objects.create(name='Fourth', url='4th')
self.assertEqual(c4.name, 'Fourth')
w_bernstein = Writer.objects.create(name='Carl Bernstein')
self.assertEqual(w_bernstein.name, 'Carl Bernstein')
self.assertHTMLEqual(f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third</option>
<option value="%s">Fourth</option>
</select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (w_woodward.pk, w_bernstein.pk, w_royko.pk, c1.pk, c2.pk, c3.pk, c4.pk))
# ModelChoiceField ############################################################
f = forms.ModelChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
('', '---------'),
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third'),
(c4.pk, 'Fourth')])
with self.assertRaises(ValidationError):
f.clean('')
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean(0)
self.assertEqual(f.clean(c3.id).name, 'Third')
self.assertEqual(f.clean(c2.id).name, "It's a test")
# Add a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
c5 = Category.objects.create(name='Fifth', url='5th')
self.assertEqual(c5.name, 'Fifth')
self.assertEqual(f.clean(c5.id).name, 'Fifth')
# Delete a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='5th').delete()
with self.assertRaises(ValidationError):
f.clean(c5.id)
f = forms.ModelChoiceField(Category.objects.filter(pk=c1.id), required=False)
self.assertEqual(f.clean(''), None)
f.clean('')
self.assertEqual(f.clean(str(c1.id)).name, "Entertainment")
with self.assertRaises(ValidationError):
f.clean('100')
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Fourth')
self.assertEqual(list(f.choices), [
('', '---------'),
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third')])
self.assertEqual(f.clean(c3.id).name, 'Third')
with self.assertRaises(ValidationError):
f.clean(c4.id)
# check that we can safely iterate choices repeatedly
gen_one = list(f.choices)
gen_two = f.choices
self.assertEqual(gen_one[2], (c2.pk, "It's a test"))
self.assertEqual(list(gen_two), [
('', '---------'),
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third')])
# check that we can override the label_from_instance method to print custom labels (#4620)
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "category " + str(obj)
self.assertEqual(list(f.choices), [
('', '---------'),
(c1.pk, 'category Entertainment'),
(c2.pk, "category It's a test"),
(c3.pk, 'category Third'),
(c4.pk, 'category Fourth')])
# ModelMultipleChoiceField ####################################################
f = forms.ModelMultipleChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third'),
(c4.pk, 'Fourth')])
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean([])
self.assertQuerysetEqual(f.clean([c1.id]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([c2.id]), ["It's a test"])
self.assertQuerysetEqual(f.clean([str(c1.id)]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([str(c1.id), str(c2.id)]), ["Entertainment", "It's a test"])
self.assertQuerysetEqual(f.clean([c1.id, str(c2.id)]), ["Entertainment", "It's a test"])
self.assertQuerysetEqual(f.clean((c1.id, str(c2.id))), ["Entertainment", "It's a test"])
with self.assertRaises(ValidationError):
f.clean(['100'])
with self.assertRaises(ValidationError):
f.clean('hello')
with self.assertRaises(ValidationError):
f.clean(['fail'])
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
# Note, we are using an id of 1006 here since tests that run before
# this may create categories with primary keys up to 6. Use
# a number that is will not conflict.
c6 = Category.objects.create(id=1006, name='Sixth', url='6th')
self.assertEqual(c6.name, 'Sixth')
self.assertQuerysetEqual(f.clean([c6.id]), ["Sixth"])
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='6th').delete()
with self.assertRaises(ValidationError):
f.clean([c6.id])
f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False)
self.assertIsInstance(f.clean([]), EmptyQuerySet)
self.assertIsInstance(f.clean(()), EmptyQuerySet)
with self.assertRaises(ValidationError):
f.clean(['10'])
with self.assertRaises(ValidationError):
f.clean([str(c3.id), '10'])
with self.assertRaises(ValidationError):
f.clean([str(c1.id), '10'])
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Fourth')
self.assertEqual(list(f.choices), [
(c1.pk, 'Entertainment'),
(c2.pk, "It's a test"),
(c3.pk, 'Third')])
self.assertQuerysetEqual(f.clean([c3.id]), ["Third"])
with self.assertRaises(ValidationError):
f.clean([c4.id])
with self.assertRaises(ValidationError):
f.clean([str(c3.id), str(c4.id)])
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "multicategory " + str(obj)
self.assertEqual(list(f.choices), [
(c1.pk, 'multicategory Entertainment'),
(c2.pk, "multicategory It's a test"),
(c3.pk, 'multicategory Third'),
(c4.pk, 'multicategory Fourth')])
# OneToOneField ###############################################################
self.assertEqual(list(ImprovedArticleForm.base_fields), ['article'])
self.assertEqual(list(ImprovedArticleWithParentLinkForm.base_fields), [])
bw = BetterWriter(name='Joe Better', score=10)
bw.save()
self.assertEqual(sorted(model_to_dict(bw)),
['id', 'name', 'score', 'writer_ptr'])
form = BetterWriterForm({'name': 'Some Name', 'score': 12})
self.assertEqual(form.is_valid(), True)
bw2 = form.save()
bw2.delete()
form = WriterProfileForm()
self.assertHTMLEqual(form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Joe Better</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="text" name="age" id="id_age" /></p>''' % (w_woodward.pk, w_bernstein.pk, bw.pk, w_royko.pk))
data = {
'writer': six.text_type(w_woodward.pk),
'age': '65',
}
form = WriterProfileForm(data)
instance = form.save()
self.assertEqual(six.text_type(instance), 'Bob Woodward is 65')
form = WriterProfileForm(instance=instance)
self.assertHTMLEqual(form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="">---------</option>
<option value="%s" selected="selected">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Joe Better</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="text" name="age" value="65" id="id_age" /></p>''' % (w_woodward.pk, w_bernstein.pk, bw.pk, w_royko.pk))
def test_file_field(self):
# Test conditions when files is either not given or empty.
f = TextFileForm(data={'description': 'Assistance'})
self.assertEqual(f.is_valid(), False)
f = TextFileForm(data={'description': 'Assistance'}, files={})
self.assertEqual(f.is_valid(), False)
# Upload a file and ensure it all works as expected.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
instance.file.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Check if the max_length attribute has been inherited from the model.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test-maxlength.txt', b'hello world')})
self.assertEqual(f.is_valid(), False)
# Edit an instance that already has the file defined in the model. This will not
# save the file again, but leave it exactly as it is.
f = TextFileForm(
data={'description': 'Assistance'},
instance=instance)
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['file'].name, 'tests/test1.txt')
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
# Override the file by uploading a new one.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
# Test the non-required FileField
f = TextFileForm(data={'description': 'Assistance'})
f.fields['file'].required = False
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, '')
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Instance can be edited w/out re-uploading the file and existing file should be preserved.
f = TextFileForm(
data={'description': 'New Description'},
instance=instance)
f.fields['file'].required = False
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_big_integer_field(self):
bif = BigIntForm({'biggie': '-9223372036854775808'})
self.assertEqual(bif.is_valid(), True)
bif = BigIntForm({'biggie': '-9223372036854775809'})
self.assertEqual(bif.is_valid(), False)
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is greater than or equal to -9223372036854775808.']})
bif = BigIntForm({'biggie': '9223372036854775807'})
self.assertEqual(bif.is_valid(), True)
bif = BigIntForm({'biggie': '9223372036854775808'})
self.assertEqual(bif.is_valid(), False)
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is less than or equal to 9223372036854775807.']})
@skipUnless(test_images, "PIL not installed")
def test_image_field(self):
# ImageField and FileField are nearly identical, but they differ slighty when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
with open(os.path.join(os.path.dirname(upath(__file__)), "test.png"), 'rb') as fp:
image_data = fp.read()
with open(os.path.join(os.path.dirname(upath(__file__)), "test2.png"), 'rb') as fp:
image_data2 = fp.read()
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertEqual(f.is_valid(), True)
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Edit an instance that already has the (required) image defined in the model. This will not
# save the image again, but leave it exactly as it is.
f = ImageFileForm(data={'description': 'Look, it changed'}, instance=instance)
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data['image'].name, 'tests/test.png')
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.height, 16)
self.assertEqual(instance.width, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
# Override the file by uploading a new one.
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
# Test the non-required ImageField
# Note: In Oracle, we expect a null ImageField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_imagefield_repr = ''
else:
expected_null_imagefield_repr = None
f = OptionalImageFileForm(data={'description': 'Test'})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, expected_null_imagefield_repr)
self.assertEqual(instance.width, None)
self.assertEqual(instance.height, None)
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test3.png', image_data)}, instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Editing the instance without re-uploading the image should not affect the image or its width/height properties
f = OptionalImageFileForm(
data={'description': 'New Description'},
instance=instance)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django.
instance.image.delete()
instance.delete()
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test4.png', image_data2)}
)
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test4.png')
self.assertEqual(instance.width, 48)
self.assertEqual(instance.height, 32)
instance.delete()
# Test callable upload_to behavior that's dependent on the value of another field in the model
f = ImageFileForm(
data={'description': 'And a final one', 'path': 'foo'},
files={'image': SimpleUploadedFile('test4.png', image_data)})
self.assertEqual(f.is_valid(), True)
instance = f.save()
self.assertEqual(instance.image.name, 'foo/test4.png')
instance.delete()
def test_media_on_modelform(self):
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
f = ModelFormWithMedia()
self.assertHTMLEqual(six.text_type(f.media), '''<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/form/javascript"></script>''')
f = CommaSeparatedIntegerForm({'field': '1,2,3'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': '1,2,3'})
f = CommaSeparatedIntegerForm({'field': '1a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': ',,,,'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': ',,,,'})
f = CommaSeparatedIntegerForm({'field': '1.2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,,2'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': '1,,2'})
f = CommaSeparatedIntegerForm({'field': '1'})
self.assertEqual(f.is_valid(), True)
self.assertEqual(f.cleaned_data, {'field': '1'})
# This Price instance generated by this form is not valid because the quantity
# field is required, but the form is valid because the field is excluded from
# the form. This is for backwards compatibility.
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertEqual(form.is_valid(), True)
price = form.save(commit=False)
with self.assertRaises(ValidationError):
price.full_clean()
# The form should not validate fields that it doesn't contain even if they are
# specified using 'fields', not 'exclude'.
class Meta:
model = Price
fields = ('price',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertEqual(form.is_valid(), True)
# The form should still have an instance of a model that is not complete and
# not saved into a DB yet.
self.assertEqual(form.instance.price, Decimal('6.00'))
self.assertEqual(form.instance.quantity is None, True)
self.assertEqual(form.instance.pk is None, True)
# Choices on CharField and IntegerField
f = ArticleForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('42')
f = ArticleStatusForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('z')
def test_foreignkeys_which_use_to_field(self):
apple = Inventory.objects.create(barcode=86, name='Apple')
pear = Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(barcode=87, name='Core', parent=apple)
field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), (
('', '---------'),
(86, 'Apple'),
(87, 'Core'),
(22, 'Pear')))
form = InventoryForm(instance=core)
self.assertHTMLEqual(six.text_type(form['parent']), '''<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected="selected">Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>''')
data = model_to_dict(core)
data['parent'] = '22'
form = InventoryForm(data=data, instance=core)
core = form.save()
self.assertEqual(core.parent.name, 'Pear')
class CategoryForm(forms.ModelForm):
description = forms.CharField()
class Meta:
model = Category
fields = ['description', 'url']
self.assertEqual(list(CategoryForm.base_fields),
['description', 'url'])
self.assertHTMLEqual(six.text_type(CategoryForm()), '''<tr><th><label for="id_description">Description:</label></th><td><input type="text" name="description" id="id_description" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>''')
# to_field_name should also work on ModelMultipleChoiceField ##################
field = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), ((86, 'Apple'), (87, 'Core'), (22, 'Pear')))
self.assertQuerysetEqual(field.clean([86]), ['Apple'])
form = SelectInventoryForm({'items': [87, 22]})
self.assertEqual(form.is_valid(), True)
self.assertEqual(len(form.cleaned_data), 1)
self.assertQuerysetEqual(form.cleaned_data['items'], ['Core', 'Pear'])
def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self):
self.assertEqual(list(CustomFieldForExclusionForm.base_fields),
['name'])
self.assertHTMLEqual(six.text_type(CustomFieldForExclusionForm()),
'''<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="10" /></td></tr>''')
| bsd-3-clause | -1,320,706,784,182,750,000 | 42.529993 | 219 | 0.605936 | false |
dfreedman55/Python4NetworkEngineers | week2/exercise3.py | 1 | 1448 | #!/usr/bin/env python
import telnetlib
import time
import socket
import sys
class DefineNetworkDevice(object):
def __init__(self, ip, uname, pword):
'''
Object initialization
'''
self.ip = ip
self.uname = uname
self.pword = pword
self.TELNET_PORT = 23
self.TELNET_TIMEOUT = 6
def TelnetConnect(self):
'''
Connect, Receive Username Prompt, Send Username, Receive Password Prompt, Send Password, Receive Router Prompt
'''
self.connection = telnetlib.Telnet(self.ip, self.TELNET_PORT, self.TELNET_TIMEOUT)
# output = self.connection.read_until('sername:', TELNET_TIMEOUT)
# print output
self.connection.write(self.uname + '\n')
time.sleep(1)
# output = self.connection.read_until('assword:', TELNET_TIMEOUT)
# print output
self.connection.write(self.pword + '\n')
time.sleep(1)
# output = self.connection.read_very_eager()
# print output
def TelnetSendCommand(self, command):
'''
Send command to established telnet session
'''
self.connection.write(command + '\n')
time.sleep(1)
def TelnetReceiveData(self):
'''
Receive command output from establish telnet session
'''
output = self.connection.read_very_eager()
print output
# if __name__ == '__main__':
rtr1 = DefineNetworkDevice('50.76.53.27', 'pyclass', '88newclass')
rtr1.TelnetConnect()
rtr1.TelnetSendCommand('terminal length 0')
rtr1.TelnetReceiveData()
rtr1.TelnetSendCommand('show version')
rtr1.TelnetReceiveData()
| gpl-2.0 | -9,119,668,020,921,804,000 | 24.403509 | 112 | 0.709945 | false |
lgarren/spack | var/spack/repos/builtin/packages/perl-file-pushd/package.py | 1 | 1587 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlFilePushd(PerlPackage):
"""Change directory temporarily for a limited scope"""
homepage = "http://search.cpan.org/~dagolden/File-pushd-1.014/lib/File/pushd.pm"
url = "http://search.cpan.org/CPAN/authors/id/D/DA/DAGOLDEN/File-pushd-1.014.tar.gz"
version('1.014', '09c03001fb653c35663842191e315f5f')
| lgpl-2.1 | 8,270,129,451,982,892,000 | 45.676471 | 93 | 0.680529 | false |
sirex/databot | databot/pipes.py | 1 | 26342 | import collections
import datetime
import itertools
import sqlalchemy as sa
import traceback
import tqdm
from databot.db.serializers import serrow, serkey
from databot.db.utils import strip_prefix, create_row, get_or_create, Row
from databot.db.windowedquery import windowed_query
from databot.db.models import Compression
from databot.handlers import download, html
from databot.bulkinsert import BulkInsert
from databot.exporters.services import export
from databot.expressions.base import Expression
from databot.tasks import Task
from databot.services import merge_rows
NONE = object()
def keyvalueitems(key, value=None):
if isinstance(key, tuple) and value is None and len(key) == 2:
return [key]
elif isinstance(key, collections.Iterable) and not isinstance(key, (str, bytes)):
items = iter(key)
else:
return [(key, value)]
try:
item = next(items)
except StopIteration:
return []
if isinstance(item, tuple):
return itertools.chain([item], items)
else:
return itertools.chain([(item, None)], ((k, None) for k in items))
class ItemNotFound(Exception):
pass
class PipeErrors(Task):
def __init__(self, task):
super().__init__()
self.task = task
self.bot = task.bot
def __call__(self, key=None, reverse=False):
if self.task.source:
state = self.task.get_state()
error = self.task.target.models.errors.alias('error')
table = self.task.source.table.alias('table')
# Filter by key if provided
if key is not None:
row = self.task.source.last(key)
if row is None:
return
where = sa.and_(
error.c.state_id == state.id,
error.c.row_id == row.id,
)
else:
where = error.c.state_id == state.id
# Ordering
if reverse:
order_by = error.c.id.desc()
else:
order_by = error.c.id
# Query if all tables stored in same database
if self.task.target.samedb and self.task.source.samedb:
query = (
sa.select([error, table], use_labels=True).
select_from(
error.
join(table, error.c.row_id == table.c.id)
).
where(where).
order_by(order_by)
)
for row in windowed_query(self.task.target.engine, query, table.c.id):
item = strip_prefix(row, 'error_')
item['row'] = create_row(strip_prefix(row, 'table_'))
yield item
# Query if some tables are stored in external database
else:
query = error.select(where).order_by(order_by)
for err in windowed_query(self.task.target.engine, query, error.c.id):
query = table.select(table.c.id == err['row_id'])
row = self.task.source.engine.execute(query).first()
if row:
yield Row(err, row=create_row(row))
def last(self, key=None):
for err in self(key, reverse=True):
return err
def count(self):
if self.task.source:
error = self.task.target.models.errors
state = self.task.get_state()
return self.task.target.engine.execute(error.count(error.c.state_id == state.id)).scalar()
else:
return 0
def rows(self):
for error in self():
yield error.row
def items(self):
for row in self.rows():
yield row.key, row.value
def keys(self):
for row in self.rows():
yield row.key
def values(self):
for row in self.rows():
yield row.value
def report(self, error_or_row, message, bulk=None):
now = datetime.datetime.utcnow()
if 'retries' in error_or_row:
error = error_or_row
self.task.target.engine.execute(
self.bot.models.errors.update(sa.and_(
self.bot.models.errors.c.state_id == error.state_id,
self.bot.models.errors.c.row_id == error.row_id,
)).values(
retries=self.bot.models.errors.c.retries + 1,
traceback=message,
updated=datetime.datetime.utcnow(),
),
)
elif bulk:
row = error_or_row
state = self.task.get_state()
bulk.append(dict(
state_id=state.id,
row_id=row.id,
retries=0,
traceback=message,
created=now,
updated=now,
))
else:
row = error_or_row
state = self.task.get_state()
self.bot.engine.execute(
self.bot.models.errors.insert(),
state_id=state.id,
row_id=row.id,
retries=0,
traceback=message,
created=now,
updated=now,
)
def resolve(self, key=None):
if self.task.source:
state = self.task.get_state()
error = self.task.target.models.errors
table = self.task.source.table
if key is None:
self.task.target.engine.execute(error.delete(error.c.state_id == state.id))
elif self.task.target.samedb and self.task.source.samedb:
query = (
sa.select([error.c.id]).
select_from(table.join(error, table.c.id == error.c.row_id)).
where(sa.and_(error.c.state_id == state.id, table.c.key == serkey(key)))
)
if self.bot.engine.name == 'mysql':
# http://stackoverflow.com/a/45498/475477
query = sa.select([query.alias().c.id])
self.task.target.engine.execute(error.delete(error.c.id.in_(query)))
else:
query = table.select(table.c.key == serkey(key))
row_ids = {row['id'] for row in self.task.source.engine.execute(query)}
if row_ids:
query = error.delete(sa.and_(error.c.state_id == state.id, error.c.row_id.in_(row_ids)))
self.task.target.engine.execute(query)
class TaskPipe(Task):
def __init__(self, bot, source, target):
super().__init__()
self.bot = bot
self.source = source
self.target = target
self.errors = PipeErrors(self)
def __repr__(self):
return '<databot.pipes.TaskPipe(%r, %r) at 0x%x>' % (
self.source.name if self.source else None,
self.target.name,
id(self),
)
def get_state(self):
return get_or_create(self.target.engine, self.target.models.state, ['source_id', 'target_id'], dict(
source_id=(self.source.id if self.source else None),
target_id=self.target.id,
offset=0,
))
def is_filled(self):
if self.source:
table = self.source.table
state = self.get_state()
query = table.select(table.c.id > state.offset).limit(1)
return len(self.source.engine.execute(query).fetchall()) > 0
else:
return False
def reset(self):
engine = self.target.engine
models = self.target.models
state = self.get_state()
engine.execute(models.state.update(models.state.c.id == state.id), offset=0)
return self
def skip(self):
engine = self.target.engine
models = self.target.models
state = self.get_state()
source = self.source.table
query = sa.select([source.c.id]).order_by(source.c.id.desc()).limit(1)
offset = self.source.engine.execute(query).scalar()
if offset:
engine.execute(models.state.update(models.state.c.id == state.id), offset=offset)
return self
def offset(self, value=None):
"""Move cursor to the specified offset.
For example, let say you have 5 items in your pipe:
[-----]
Then you will get following state after calling offset:
offset(1) [*----]
offset(-1) [****-]
offset(3) [***--]
offset(10) [*****]
offset(0) [-----]
"""
state = self.get_state()
source = self.source.table
offset = None
if value:
query = sa.select([source.c.id])
if value > 0:
query = query.where(source.c.id > state.offset).order_by(source.c.id.asc())
else:
query = query.where(source.c.id < state.offset).order_by(source.c.id.desc())
query = query.limit(1).offset(abs(value) - 1)
offset = self.source.engine.execute(query).scalar()
if offset is None:
if value > 0:
return self.skip()
else:
return self.reset()
if offset is not None:
self.target.engine.execute(
self.target.models.state.update(self.target.models.state.c.id == state.id),
offset=offset,
)
return self
def count(self):
"""How much items left to process."""
if self.source:
state = self.get_state()
table = self.source.table
return self.source.engine.execute(table.count(table.c.id > state.offset)).scalar()
else:
return 0
def rows(self):
if self.source:
table = self.source.table
query = table.select(table.c.id > self.get_state().offset).order_by(table.c.id)
for row in windowed_query(self.source.engine, query, table.c.id):
yield create_row(row)
def items(self):
for row in self.rows():
yield row.key, row.value
def keys(self):
for row in self.rows():
yield row.key
def values(self):
for row in self.rows():
yield row.value
def _verbose_append(self, handler, row, bulk, append=True):
print('-' * 72, file=self.bot.output.output)
print('source: id=%d key=%r' % (row.id, row.key), file=self.bot.output.output)
for key, value in keyvalueitems(handler(row)):
if append:
self.target.append(key, value, bulk=bulk)
self.bot.output.key_value(key, value, short=True)
def call(self, handler, error_limit=NONE):
error_limit = self.bot.error_limit if error_limit is NONE else error_limit
state = self.get_state()
desc = '%s -> %s' % (self.source, self.target)
if self.bot.retry:
self.retry(handler)
if self.bot.verbosity == 1 and not self.bot.debug:
total = min(self.bot.limit, self.count()) if self.bot.limit else self.count()
rows = tqdm.tqdm(self.rows(), desc, total, leave=True)
else:
rows = self.rows()
def post_save():
if row:
engine = self.target.engine
models = self.target.models
engine.execute(models.state.update(models.state.c.id == state.id), offset=row.id)
pipe = BulkInsert(self.target.engine, self.target.table)
errors = BulkInsert(self.target.engine, self.target.models.errors)
if not self.bot.debug:
pipe.post_save(post_save)
n = 0
n_errors = 0
row = None
interrupt = None
last_row = None
for row in rows:
if self.bot.limit and n >= self.bot.limit:
row = last_row
break
if self.bot.debug:
self._verbose_append(handler, row, pipe, append=False)
else:
try:
if self.bot.verbosity > 1:
self._verbose_append(handler, row, pipe)
else:
self.target.append(handler(row), bulk=pipe)
except KeyboardInterrupt as e:
interrupt = e
break
except Exception as e:
n_errors += 1
if error_limit is not None and n_errors >= error_limit:
interrupt = e
if self.bot.verbosity > 0:
print('Interrupting bot because error limit of %d was reached.' % error_limit)
self.bot.output.key_value(row.key, row.value, short=True)
if error_limit > 0:
self.errors.report(row, traceback.format_exc(), errors)
row = last_row
break
else:
self.errors.report(row, traceback.format_exc(), errors)
n += 1
last_row = row
pipe.save(post_save=True)
errors.save()
if self.bot.verbosity > 1:
print('%s, rows processed: %d' % (desc, n))
if interrupt:
raise interrupt
return self
def retry(self, handler):
desc = '%s -> %s (retry)' % (self.source, self.target)
if self.bot.verbosity == 1 and not self.bot.debug:
total = min(self.bot.limit, self.errors.count()) if self.bot.limit else self.errors.count()
errors = tqdm.tqdm(self.errors(), desc, total, leave=True, file=self.bot.output.output)
else:
errors = self.errors()
def post_save():
nonlocal error_ids
if error_ids:
engine = self.target.engine
models = self.target.models
engine.execute(models.errors.delete(models.errors.c.id.in_(error_ids)))
error_ids = []
pipe = BulkInsert(self.target.engine, self.target.table)
pipe.post_save(post_save)
n = 0
interrupt = None
error_ids = []
for error in errors:
if self.bot.limit and n >= self.bot.limit:
break
if self.bot.debug:
self._verbose_append(handler, error.row, pipe, append=False)
error_ids.append(error.id)
else:
try:
if self.bot.verbosity > 1:
self._verbose_append(handler, error.row, pipe)
else:
self.target.append(handler(error.row), bulk=pipe)
except KeyboardInterrupt as e:
interrupt = e
break
except:
self.errors.report(error, traceback.format_exc())
else:
error_ids.append(error.id)
n += 1
pipe.save(post_save=True)
if self.bot.verbosity > 1:
print('%s, errors retried: %d' % (desc, n))
if interrupt:
raise interrupt
return self
def download(self, urls=None, **kwargs):
kwargs.setdefault('delay', self.bot.download_delay)
urls = urls or Expression().key
return self.call(download.download(self.bot.requests, urls, **kwargs))
def select(self, key, value=None, **kwargs):
return self.call(html.Select(key, value, **kwargs))
def dedup(self):
self.target.dedup()
def compact(self):
self.target.compact()
def age(self, key=None):
return self.target.age(key)
def max(self, expr):
row = max((row for row in self.source.rows()), key=expr._eval, default=None)
if row:
self.target.append(row.key, row.value)
return self
def min(self, expr):
row = min((row for row in self.source.rows()), key=expr._eval, default=None)
if row:
self.target.append(row.key, row.value)
return self
class Pipe(Task):
def __init__(self, bot, id, name, table, engine, samedb=True, compress=None):
"""
Parameters
----------
bot : databot.Bot
id : int
Primary key of this pipe from ``databot.db.models.pipes.id``.
name : str
Human readable pipe identifier.
table: sqlalchemy.Table
A table where data is stored.
engine : sqlalchemy.Engine
samedb : bool
Identifies if this pipe is stored in same database as other pipes of ``bot``.
If a pipe is stored in an external database, some queries will be executed in a bit different way.
compress : databot.db.models.Compression or bool, optional
Data compression algorithm.
"""
super().__init__()
self.bot = bot
self.id = id
self.name = name
self.table = table
self.models = bot.models
self.engine = engine
self.samedb = samedb
self.compression = Compression.gzip if compress is True else compress
self.tasks = {}
def __str__(self):
return self.name
def __repr__(self):
return '<databot.pipes.Pipe(%r) at ox%x>' % (self.name, id(self))
def __call__(self, source):
source_id = source.id if source else None
if source_id not in self.tasks:
self.tasks[source_id] = TaskPipe(self.bot, source, self)
return self.tasks[source_id]
def append(self, key, value=None, conn=None, bulk=None, only_missing=False, progress=None, total=-1):
"""Append data to the pipe
You can call this method in following ways::
append(key)
append(key, value)
append((key, value))
append([key, key, ...])
append([(key, value), (key, value), ...])
"""
conn = conn or self.engine
# Progress bar
rows = keyvalueitems(key, value)
if progress and self.bot.verbosity == 1 and not self.bot.debug:
rows = tqdm.tqdm(rows, progress, total, file=self.bot.output.output, leave=True)
# Bulk insert start
save_bulk = False
if bulk is None:
save_bulk = True
bulk = BulkInsert(conn, self.table)
# Append
for key, value in rows:
# Skip all items if key is None
if key is not None and (not only_missing or not self.exists(key)):
now = datetime.datetime.utcnow()
bulk.append(serrow(key, value, created=now, compression=self.compression))
# Bulk insert finish
if save_bulk:
bulk.save()
return self
def clean(self, age=None, now=None, key=None):
if key is not None:
row = self.last(key)
if row is None:
raise ItemNotFound()
else:
query = self.table.delete(self.table.c.id == row.id)
elif age:
now = now or datetime.datetime.utcnow()
timestamp = now - age
query = self.table.delete(self.table.c.created <= timestamp)
else:
query = self.table.delete()
self.engine.execute(query)
return self
def dedup(self):
"""Delete all records with duplicate keys except ones created first."""
agg = (
sa.select([self.table.c.key, sa.func.min(self.table.c.id).label('id')]).
group_by(self.table.c.key).
having(sa.func.count(self.table.c.id) > 1).
alias()
)
query = (
sa.select([self.table.c.id]).
select_from(self.table.join(agg, sa.and_(
self.table.c.key == agg.c.key,
self.table.c.id != agg.c.id,
)))
)
if self.engine.name == 'mysql':
# http://stackoverflow.com/a/45498/475477
query = sa.select([query.alias().c.id])
self.engine.execute(self.table.delete(self.table.c.id.in_(query)))
return self
def compact(self):
"""Delete all records with duplicate keys except ones created last."""
agg = (
sa.select([self.table.c.key, sa.func.max(self.table.c.id).label('id')]).
group_by(self.table.c.key).
having(sa.func.count(self.table.c.id) > 1).
alias()
)
query = (
sa.select([self.table.c.id]).
select_from(self.table.join(agg, sa.and_(
self.table.c.key == agg.c.key,
self.table.c.id != agg.c.id,
)))
)
if self.engine.name == 'mysql':
# http://stackoverflow.com/a/45498/475477
query = sa.select([query.alias().c.id])
self.engine.execute(self.table.delete(self.table.c.id.in_(query)))
return self
def merge(self):
"""Merge all duplicate value, newer values overwrites older values.
Dicts will be merged recursively.
After merge, old values will be left as is, use compact to remove them.
"""
query = self.table.select().order_by(self.table.c.key, self.table.c.created)
rows = (create_row(row) for row in windowed_query(self.engine, query, self.table.c.id))
self.append(merge_rows((row.key, row.value) for row in rows))
return self
def compress(self):
table = self.table
rows = self.rows()
if self.bot.verbosity == 1:
rows = tqdm.tqdm(rows, ('compress %s' % self.name), total=self.count(), file=self.bot.output.output)
for row in rows:
if row.compression != Compression.gzip:
data = serrow(row.key, row.value, created=row.created, compression=Compression.gzip)
self.engine.execute(table.update().where(table.c.id == row['id']).values(data))
def decompress(self):
table = self.table
rows = self.rows()
if self.bot.verbosity == 1:
rows = tqdm.tqdm(rows, ('decompress %s' % self.name), total=self.count(), file=self.bot.output.output)
for row in rows:
if row.compression is not None:
data = serrow(row.key, row.value, created=row.created, compression=None)
self.engine.execute(table.update().where(table.c.id == row['id']).values(data))
def last(self, key=None):
if key:
query = self.table.select().where(self.table.c.key == serkey(key)).order_by(self.table.c.id.desc())
else:
query = self.table.select().order_by(self.table.c.id.desc())
row = self.engine.execute(query).first()
return create_row(row) if row else None
def age(self, key=None):
row = self.last(key)
return (datetime.datetime.utcnow() - row.created) if row else datetime.timedelta.max
def count(self):
return self.engine.execute(self.table.count()).scalar()
def rows(self, desc=False):
order_by = self.table.c.id.desc() if desc else self.table.c.id
query = self.table.select().order_by(order_by)
for row in windowed_query(self.engine, query, self.table.c.id):
yield create_row(row)
def items(self):
for row in self.rows():
yield row.key, row.value
def keys(self):
for row in self.rows():
yield row.key
def values(self):
for row in self.rows():
yield row.value
def exists(self, key):
query = sa.select([sa.exists().where(self.table.c.key == serkey(key))])
return self.engine.execute(query).scalar()
def getall(self, key, reverse=False):
order_by = self.table.c.id.desc() if reverse else self.table.c.id
query = self.table.select().where(self.table.c.key == serkey(key)).order_by(order_by)
for row in windowed_query(self.engine, query, self.table.c.id):
yield create_row(row)
def get(self, key, default=Exception):
rows = self.getall(key)
try:
row = next(rows)
except StopIteration:
if default is Exception:
raise ValueError('%r not found.' % key)
else:
return default
try:
next(rows)
except StopIteration:
return row
else:
raise ValueError('%r returned more that one row.' % key)
def export(self, dest, **kwargs):
return export(self.rows(), dest, **kwargs)
def download(self, urls=None, **kwargs):
"""Download list of URLs and store downloaded content into a pipe.
Parameters
----------
urls : None or str or list or callable or databot.rowvalue.Row
List of URLs to download.
URL's can be provided in following ways:
- `str` - string containing single URL.
- `list` - list of strings where each string is a URL.
- `None` - takes URLs from connected pipe's key field.
- `databot.rowvalue.Row` - takes URLs from a specified location in a row.
For example, code below will take all rows from `a` pipe and will take URL from `a.row.value.url`, which
is `http://example.com`.
.. code-block:: python
import databot
bot = databot.Bot()
a = bot.define('a').append([(1, {'url': 'http://example.com'})])
bot.define('b').download(a.row.value.url)
delay : int
Amount of seconds to delay between requests.
By default delay is `bot.download_delay`.
"""
kwargs.setdefault('delay', self.bot.download_delay)
urls = [urls] if isinstance(urls, str) else urls
fetch = download.download(self.bot.requests, urls, **kwargs)
for url in urls:
try:
self.append(fetch(url))
except KeyboardInterrupt:
raise
except Exception as e:
self.bot.output.key_value(url, None, short=True)
raise
return self
| agpl-3.0 | -4,314,377,841,758,033,000 | 33.033592 | 118 | 0.534242 | false |
wateraccounting/wa | Collect/ALEXI/monthly.py | 1 | 1291 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 28 07:54:17 2017
@author: tih
"""
import os
import sys
from DataAccess import DownloadData
def main(Dir, Startdate='', Enddate='', latlim=[-60, 70], lonlim=[-180, 180], Waitbar = 1):
"""
This function downloads monthly ALEXI data
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax] (values must be between -60 and 70)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
"""
print '\nDownload monthly ALEXI evapotranspiration data for the period %s till %s' %(Startdate, Enddate)
# Download data
DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Waitbar)
# Define directory
Dir_ALEXI_Weekly = os.path.join(Dir, 'Evaporation', 'ALEXI', 'Weekly')
Dir_ALEXI_Monthly = os.path.join(Dir, 'Evaporation', 'ALEXI', 'Monthly')
# Create output directory
if not os.path.exists(Dir_ALEXI_Monthly):
os.mkdir(Dir_ALEXI_Monthly)
# Create monthly maps based on weekly maps
import wa.Functions.Start.Weekly_to_monthly_flux as Week2month
Week2month.Nearest_Interpolate(Dir_ALEXI_Weekly, Startdate, Enddate, Dir_ALEXI_Monthly)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | 8,970,401,376,894,572,000 | 31.275 | 108 | 0.656081 | false |
GStreamer/cerbero | recipes/custom.py | 1 | 6402 | # -*- Mode: Python -*- vi:si:et:sw=4:sts=4:ts=4:syntax=python
import os
from collections import defaultdict
from cerbero.build import recipe
from cerbero.build.source import SourceType
from cerbero.build.cookbook import CookBook
from cerbero.enums import License, FatalError
def running_on_cerbero_ci():
return os.environ.get('CI_PROJECT_NAME', '') == 'cerbero'
class GStreamer(recipe.Recipe):
licenses = [License.LGPLv2Plus]
version = '1.19.0.1'
tagged_for_release = False
# Decide what stype to use
use_git = True
if tagged_for_release:
# If we're using a manifest, that means we want to use the specified
# commits and remotes.
use_git = recipe.Recipe._using_manifest_force_git
# If we're tagged for release and we're running on Cerbero CI, we want
# to use the release tarballs even if a manifest is specified, because
# we want to test that the tarballs work.
if running_on_cerbero_ci():
use_git = False
if use_git:
stype = SourceType.GIT
remotes = {'origin': 'https://gitlab.freedesktop.org/gstreamer/%(name)s.git'}
if int(version.split('.')[1]) % 2 == 0:
# Even version, use the specific branch
commit = 'origin/' + '.'.join(version.split('.')[0:2])
else:
# Odd version, use git master
commit = 'origin/master'
else:
stype = SourceType.TARBALL
url = 'https://gstreamer.freedesktop.org/src/%(name)s/%(name)s-%(version)s.tar.xz'
tarball_dirname = '%(name)s-%(version)s'
def enable_plugin(self, plugin, category, variant, option=None, dep=None):
if option is None:
option = plugin
if getattr(self.config.variants, variant):
if dep is not None:
self.deps.append(dep)
plugin = 'lib/gstreamer-1.0/libgst' + plugin
if not hasattr(self, 'files_plugins_' + category):
setattr(self, 'files_plugins_' + category, [])
f = getattr(self, 'files_plugins_' + category)
f += [plugin + '%(mext)s']
if not hasattr(self, 'files_plugins_{}_devel'.format(category)):
setattr(self, 'files_plugins_{}_devel'.format(category), [])
d = getattr(self, 'files_plugins_{}_devel'.format(category))
d += [plugin + '.a', plugin + '.la']
self.meson_options[option] = 'enabled'
else:
self.meson_options[option] = 'disabled'
def _remove_files_category_entry(self, files_category, entry):
if hasattr(self, files_category):
fc = getattr(self, files_category)
if entry in fc:
fc.remove(entry)
return
platform_files_category = 'platform_' + files_category
if hasattr(self, platform_files_category):
pf = getattr(self, platform_files_category)
if self.config.target_platform not in pf:
raise FatalError('plugin {!r} not found in category {!r}'.format(entry, files_category))
pfc = getattr(self, platform_files_category)[self.config.target_platform]
if entry in pfc:
pfc.remove(entry)
return
raise FatalError('{} not found in category {}'.format(entry, files_category))
def _remove_plugin_file(self, plugin, category):
plugin = 'lib/gstreamer-1.0/libgst' + plugin
plugin_shared_lib = plugin + '%(mext)s'
plugin_static_lib = plugin + '.a'
plugin_libtool_lib = plugin + '.la'
self._remove_files_category_entry('files_plugins_' + category, plugin_shared_lib)
self._remove_files_category_entry('files_plugins_{}_devel'.format(category), plugin_static_lib)
self._remove_files_category_entry('files_plugins_{}_devel'.format(category), plugin_libtool_lib)
def disable_plugin(self, plugin, category, option=None, dep=None, library_name=None):
if option is None:
option = plugin
if dep is not None and dep in self.deps:
self.deps.remove(dep)
self._remove_plugin_file(plugin, category)
if library_name is not None:
library = 'libgst' + library_name + '-1.0'
self.files_libs.remove(library)
pcname = 'lib/pkgconfig/gstreamer-' + library_name + '-1.0.pc'
self.files_plugins_devel.remove(pcname)
includedir = 'include/gstreamer-1.0/gst/' + library_name
self.files_plugins_devel.remove(includedir)
libincdir = 'lib/gstreamer-1.0/include/gst/' + library_name
if libincdir in self.files_plugins_devel:
self.files_plugins_devel.remove(libincdir)
self.meson_options[option] = 'disabled'
def list_gstreamer_1_0_plugins_by_category(config):
cookbook = CookBook(config)
plugins = defaultdict(list)
for r in ['gstreamer-1.0', 'gst-plugins-base-1.0', 'gst-plugins-good-1.0',
'gst-plugins-bad-1.0', 'gst-plugins-ugly-1.0', 'libnice',
'gst-libav-1.0', 'gst-editing-services-1.0', 'gst-rtsp-server-1.0']:
r = cookbook.get_recipe(r)
for attr_name in dir(r):
if attr_name.startswith('files_plugins_'):
cat_name = attr_name[len('files_plugins_'):]
plugins_list = getattr(r, attr_name)
elif attr_name.startswith('platform_files_plugins_'):
cat_name = attr_name[len('platform_files_plugins_'):]
plugins_dict = getattr(r, attr_name)
plugins_list = plugins_dict.get(config.target_platform, [])
else:
continue
for e in plugins_list:
if not e.startswith('lib/gstreamer-'):
continue
c = e.split('/')
if len(c) != 3:
continue
e = c[2]
# we only care about files with the replaceable %(mext)s extension
if not e.endswith ('%(mext)s'):
continue
if e.startswith('libgst'):
e = e[6:-8]
else:
e = e[3:-8]
plugins[cat_name].append(e)
return plugins
| lgpl-2.1 | 1,029,590,351,478,202,200 | 44.728571 | 104 | 0.565761 | false |
rs2/pandas | pandas/tests/resample/test_datetime_index.py | 1 | 59222 | from datetime import datetime
from functools import partial
from io import StringIO
import numpy as np
import pytest
import pytz
from pandas._libs import lib
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Series, Timedelta, Timestamp, isna, notna
import pandas._testing as tm
from pandas.core.groupby.grouper import Grouper
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import Period, period_range
from pandas.core.resample import DatetimeIndex, _get_timestamp_range_edges
import pandas.tseries.offsets as offsets
from pandas.tseries.offsets import Minute
@pytest.fixture()
def _index_factory():
return date_range
@pytest.fixture
def _index_freq():
return "Min"
@pytest.fixture
def _static_values(index):
return np.random.rand(len(index))
def test_custom_grouper(index):
dti = index
s = Series(np.array([1] * len(dti)), index=dti, dtype="int64")
b = Grouper(freq=Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ["add", "mean", "prod", "ohlc", "min", "max", "var"]
for f in funcs:
g._cython_agg_general(f)
b = Grouper(freq=Minute(5), closed="right", label="right")
g = s.groupby(b)
# check all cython functions work
funcs = ["add", "mean", "prod", "ohlc", "min", "max", "var"]
for f in funcs:
g._cython_agg_general(f)
assert g.ngroups == 2593
assert notna(g.mean()).all()
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
idx = pd.DatetimeIndex(idx, freq="5T")
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
tm.assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype="float64")
r = df.groupby(b).agg(np.sum)
assert len(r.columns) == 10
assert len(r.index) == 2593
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
@pytest.mark.parametrize(
"closed, expected",
[
(
"right",
lambda s: Series(
[s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=date_range("1/1/2000", periods=4, freq="5min", name="index"),
),
),
(
"left",
lambda s: Series(
[s[:5].mean(), s[5:10].mean(), s[10:].mean()],
index=date_range(
"1/1/2000 00:05", periods=3, freq="5min", name="index"
),
),
),
],
)
def test_resample_basic(series, closed, expected):
s = series
expected = expected(s)
result = s.resample("5min", closed=closed, label="right").mean()
tm.assert_series_equal(result, expected)
def test_resample_integerarray():
# GH 25580, resample on IntegerArray
ts = pd.Series(
range(9), index=pd.date_range("1/1/2000", periods=9, freq="T"), dtype="Int64"
)
result = ts.resample("3T").sum()
expected = Series(
[3, 12, 21],
index=pd.date_range("1/1/2000", periods=3, freq="3T"),
dtype="Int64",
)
tm.assert_series_equal(result, expected)
result = ts.resample("3T").mean()
expected = Series(
[1, 4, 7], index=pd.date_range("1/1/2000", periods=3, freq="3T"), dtype="Int64"
)
tm.assert_series_equal(result, expected)
def test_resample_basic_grouper(series):
s = series
result = s.resample("5Min").last()
grouper = Grouper(freq=Minute(5), closed="left", label="left")
expected = s.groupby(grouper).agg(lambda x: x[-1])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
@pytest.mark.parametrize(
"keyword,value",
[("label", "righttt"), ("closed", "righttt"), ("convention", "starttt")],
)
def test_resample_string_kwargs(series, keyword, value):
# see gh-19303
# Check that wrong keyword argument strings raise an error
msg = f"Unsupported value {value} for `{keyword}`"
with pytest.raises(ValueError, match=msg):
series.resample("5min", **({keyword: value}))
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
def test_resample_how(series, downsample_method):
if downsample_method == "ohlc":
pytest.skip("covered by test_resample_how_ohlc")
s = series
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
expected = s.groupby(grouplist).agg(downsample_method)
expected.index = date_range("1/1/2000", periods=4, freq="5min", name="index")
result = getattr(
s.resample("5min", closed="right", label="right"), downsample_method
)()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
def test_resample_how_ohlc(series):
s = series
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
expected = DataFrame(
s.groupby(grouplist).agg(_ohlc).values.tolist(),
index=date_range("1/1/2000", periods=4, freq="5min", name="index"),
columns=["open", "high", "low", "close"],
)
result = s.resample("5min", closed="right", label="right").ohlc()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max", "sum", "prod", "mean", "var", "std"])
def test_numpy_compat(func):
# see gh-12811
s = Series([1, 2, 3, 4, 5], index=date_range("20130101", periods=5, freq="s"))
r = s.resample("2s")
msg = "numpy operations are not valid with resample"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, func)(func, 1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, func)(axis=1)
def test_resample_how_callables():
# GH#7929
data = np.arange(5, dtype=np.int64)
ind = date_range(start="2014-01-01", periods=len(data), freq="d")
df = DataFrame({"A": data, "B": data}, index=ind)
def fn(x, a=1):
return str(type(x))
class FnClass:
def __call__(self, x):
return str(type(x))
df_standard = df.resample("M").apply(fn)
df_lambda = df.resample("M").apply(lambda x: str(type(x)))
df_partial = df.resample("M").apply(partial(fn))
df_partial2 = df.resample("M").apply(partial(fn, a=2))
df_class = df.resample("M").apply(FnClass())
tm.assert_frame_equal(df_standard, df_lambda)
tm.assert_frame_equal(df_standard, df_partial)
tm.assert_frame_equal(df_standard, df_partial2)
tm.assert_frame_equal(df_standard, df_class)
def test_resample_rounding():
# GH 8371
# odd results when rounding is needed
data = """date,time,value
11-08-2014,00:00:01.093,1
11-08-2014,00:00:02.159,1
11-08-2014,00:00:02.667,1
11-08-2014,00:00:03.175,1
11-08-2014,00:00:07.058,1
11-08-2014,00:00:07.362,1
11-08-2014,00:00:08.324,1
11-08-2014,00:00:08.830,1
11-08-2014,00:00:08.982,1
11-08-2014,00:00:09.815,1
11-08-2014,00:00:10.540,1
11-08-2014,00:00:11.061,1
11-08-2014,00:00:11.617,1
11-08-2014,00:00:13.607,1
11-08-2014,00:00:14.535,1
11-08-2014,00:00:15.525,1
11-08-2014,00:00:17.960,1
11-08-2014,00:00:20.674,1
11-08-2014,00:00:21.191,1"""
df = pd.read_csv(
StringIO(data),
parse_dates={"timestamp": ["date", "time"]},
index_col="timestamp",
)
df.index.name = None
result = df.resample("6s").sum()
expected = DataFrame(
{"value": [4, 9, 4, 2]}, index=date_range("2014-11-08", freq="6s", periods=4)
)
tm.assert_frame_equal(result, expected)
result = df.resample("7s").sum()
expected = DataFrame(
{"value": [4, 10, 4, 1]}, index=date_range("2014-11-08", freq="7s", periods=4)
)
tm.assert_frame_equal(result, expected)
result = df.resample("11s").sum()
expected = DataFrame(
{"value": [11, 8]}, index=date_range("2014-11-08", freq="11s", periods=2)
)
tm.assert_frame_equal(result, expected)
result = df.resample("13s").sum()
expected = DataFrame(
{"value": [13, 6]}, index=date_range("2014-11-08", freq="13s", periods=2)
)
tm.assert_frame_equal(result, expected)
result = df.resample("17s").sum()
expected = DataFrame(
{"value": [16, 3]}, index=date_range("2014-11-08", freq="17s", periods=2)
)
tm.assert_frame_equal(result, expected)
def test_resample_basic_from_daily():
# from daily
dti = date_range(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D", name="index"
)
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample("w-sun").last()
assert len(result) == 3
assert (result.index.dayofweek == [6, 6, 6]).all()
assert result.iloc[0] == s["1/2/2005"]
assert result.iloc[1] == s["1/9/2005"]
assert result.iloc[2] == s.iloc[-1]
result = s.resample("W-MON").last()
assert len(result) == 2
assert (result.index.dayofweek == [0, 0]).all()
assert result.iloc[0] == s["1/3/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-TUE").last()
assert len(result) == 2
assert (result.index.dayofweek == [1, 1]).all()
assert result.iloc[0] == s["1/4/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-WED").last()
assert len(result) == 2
assert (result.index.dayofweek == [2, 2]).all()
assert result.iloc[0] == s["1/5/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-THU").last()
assert len(result) == 2
assert (result.index.dayofweek == [3, 3]).all()
assert result.iloc[0] == s["1/6/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-FRI").last()
assert len(result) == 2
assert (result.index.dayofweek == [4, 4]).all()
assert result.iloc[0] == s["1/7/2005"]
assert result.iloc[1] == s["1/10/2005"]
# to biz day
result = s.resample("B").last()
assert len(result) == 7
assert (result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all()
assert result.iloc[0] == s["1/2/2005"]
assert result.iloc[1] == s["1/3/2005"]
assert result.iloc[5] == s["1/9/2005"]
assert result.index.name == "index"
def test_resample_upsampling_picked_but_not_correct():
# Test for issue #3020
dates = date_range("01-Jan-2014", "05-Jan-2014", freq="D")
series = Series(1, index=dates)
result = series.resample("D").mean()
assert result.index[0] == dates[0]
# GH 5955
# incorrect deciding to upsample when the axis frequency matches the
# resample frequency
s = Series(
np.arange(1.0, 6), index=[datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
)
expected = Series(
np.arange(1.0, 6), index=date_range("19750101", periods=5, freq="D")
)
result = s.resample("D").count()
tm.assert_series_equal(result, Series(1, index=expected.index))
result1 = s.resample("D").sum()
result2 = s.resample("D").mean()
tm.assert_series_equal(result1, expected)
tm.assert_series_equal(result2, expected)
def test_resample_frame_basic():
df = tm.makeTimeDataFrame()
b = Grouper(freq="M")
g = df.groupby(b)
# check all cython functions work
funcs = ["add", "mean", "prod", "min", "max", "var"]
for f in funcs:
g._cython_agg_general(f)
result = df.resample("A").mean()
tm.assert_series_equal(result["A"], df["A"].resample("A").mean())
result = df.resample("M").mean()
tm.assert_series_equal(result["A"], df["A"].resample("M").mean())
df.resample("M", kind="period").mean()
df.resample("W-WED", kind="period").mean()
def test_resample_upsample():
# from daily
dti = date_range(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D", name="index"
)
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample("Min").pad()
assert len(result) == 12961
assert result[0] == s[0]
assert result[-1] == s[-1]
assert result.index.name == "index"
def test_resample_how_method():
# GH9915
s = Series(
[11, 22],
index=[
Timestamp("2015-03-31 21:48:52.672000"),
Timestamp("2015-03-31 21:49:52.739000"),
],
)
expected = Series(
[11, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 22],
index=pd.DatetimeIndex(
[
Timestamp("2015-03-31 21:48:50"),
Timestamp("2015-03-31 21:49:00"),
Timestamp("2015-03-31 21:49:10"),
Timestamp("2015-03-31 21:49:20"),
Timestamp("2015-03-31 21:49:30"),
Timestamp("2015-03-31 21:49:40"),
Timestamp("2015-03-31 21:49:50"),
],
freq="10s",
),
)
tm.assert_series_equal(s.resample("10S").mean(), expected)
def test_resample_extra_index_point():
# GH#9756
index = date_range(start="20150101", end="20150331", freq="BM")
expected = DataFrame({"A": Series([21, 41, 63], index=index)})
index = date_range(start="20150101", end="20150331", freq="B")
df = DataFrame({"A": Series(range(len(index)), index=index)}, dtype="int64")
result = df.resample("BM").last()
tm.assert_frame_equal(result, expected)
def test_upsample_with_limit():
rng = date_range("1/1/2000", periods=3, freq="5t")
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample("t").ffill(limit=2)
expected = ts.reindex(result.index, method="ffill", limit=2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("freq", ["5D", "10H", "5Min", "10S"])
@pytest.mark.parametrize("rule", ["Y", "3M", "15D", "30H", "15Min", "30S"])
def test_nearest_upsample_with_limit(tz_aware_fixture, freq, rule):
# GH 33939
rng = date_range("1/1/2000", periods=3, freq=freq, tz=tz_aware_fixture)
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample(rule).nearest(limit=2)
expected = ts.reindex(result.index, method="nearest", limit=2)
tm.assert_series_equal(result, expected)
def test_resample_ohlc(series):
s = series
grouper = Grouper(freq=Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample("5Min").ohlc()
assert len(result) == len(expect)
assert len(result.columns) == 4
xs = result.iloc[-2]
assert xs["open"] == s[-6]
assert xs["high"] == s[-6:-1].max()
assert xs["low"] == s[-6:-1].min()
assert xs["close"] == s[-2]
xs = result.iloc[0]
assert xs["open"] == s[0]
assert xs["high"] == s[:5].max()
assert xs["low"] == s[:5].min()
assert xs["close"] == s[4]
def test_resample_ohlc_result():
# GH 12332
index = pd.date_range("1-1-2000", "2-15-2000", freq="h")
index = index.union(pd.date_range("4-15-2000", "5-15-2000", freq="h"))
s = Series(range(len(index)), index=index)
a = s.loc[:"4-15-2000"].resample("30T").ohlc()
assert isinstance(a, DataFrame)
b = s.loc[:"4-14-2000"].resample("30T").ohlc()
assert isinstance(b, DataFrame)
# GH12348
# raising on odd period
rng = date_range("2013-12-30", "2014-01-07")
index = rng.drop(
[
Timestamp("2014-01-01"),
Timestamp("2013-12-31"),
Timestamp("2014-01-04"),
Timestamp("2014-01-05"),
]
)
df = DataFrame(data=np.arange(len(index)), index=index)
result = df.resample("B").mean()
expected = df.reindex(index=date_range(rng[0], rng[-1], freq="B"))
tm.assert_frame_equal(result, expected)
def test_resample_ohlc_dataframe():
df = (
DataFrame(
{
"PRICE": {
Timestamp("2011-01-06 10:59:05", tz=None): 24990,
Timestamp("2011-01-06 12:43:33", tz=None): 25499,
Timestamp("2011-01-06 12:54:09", tz=None): 25499,
},
"VOLUME": {
Timestamp("2011-01-06 10:59:05", tz=None): 1500000000,
Timestamp("2011-01-06 12:43:33", tz=None): 5000000000,
Timestamp("2011-01-06 12:54:09", tz=None): 100000000,
},
}
)
).reindex(["VOLUME", "PRICE"], axis=1)
res = df.resample("H").ohlc()
exp = pd.concat(
[df["VOLUME"].resample("H").ohlc(), df["PRICE"].resample("H").ohlc()],
axis=1,
keys=["VOLUME", "PRICE"],
)
tm.assert_frame_equal(exp, res)
df.columns = [["a", "b"], ["c", "d"]]
res = df.resample("H").ohlc()
exp.columns = pd.MultiIndex.from_tuples(
[
("a", "c", "open"),
("a", "c", "high"),
("a", "c", "low"),
("a", "c", "close"),
("b", "d", "open"),
("b", "d", "high"),
("b", "d", "low"),
("b", "d", "close"),
]
)
tm.assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index():
# GH 4812
# dup columns with resample raising
df = DataFrame(
np.random.randn(4, 12),
index=[2000, 2000, 2000, 2000],
columns=[Period(year=2000, month=i + 1, freq="M") for i in range(12)],
)
df.iloc[3, :] = np.nan
result = df.resample("Q", axis=1).mean()
expected = df.groupby(lambda x: int((x.month - 1) / 3), axis=1).mean()
expected.columns = [Period(year=2000, quarter=i + 1, freq="Q") for i in range(4)]
tm.assert_frame_equal(result, expected)
def test_resample_reresample():
dti = date_range(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D")
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample("B", closed="right", label="right").mean()
result = bs.resample("8H").mean()
assert len(result) == 22
assert isinstance(result.index.freq, offsets.DateOffset)
assert result.index.freq == offsets.Hour(8)
def test_resample_timestamp_to_period(simple_date_range_series):
ts = simple_date_range_series("1/1/1990", "1/1/2000")
result = ts.resample("A-DEC", kind="period").mean()
expected = ts.resample("A-DEC").mean()
expected.index = period_range("1990", "2000", freq="a-dec")
tm.assert_series_equal(result, expected)
result = ts.resample("A-JUN", kind="period").mean()
expected = ts.resample("A-JUN").mean()
expected.index = period_range("1990", "2000", freq="a-jun")
tm.assert_series_equal(result, expected)
result = ts.resample("M", kind="period").mean()
expected = ts.resample("M").mean()
expected.index = period_range("1990-01", "2000-01", freq="M")
tm.assert_series_equal(result, expected)
result = ts.resample("M", kind="period").mean()
expected = ts.resample("M").mean()
expected.index = period_range("1990-01", "2000-01", freq="M")
tm.assert_series_equal(result, expected)
def test_ohlc_5min():
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range("1/1/2000 00:00:00", "1/1/2000 5:59:50", freq="10s")
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample("5min", closed="right", label="right").ohlc()
assert (resampled.loc["1/1/2000 00:00"] == ts[0]).all()
exp = _ohlc(ts[1:31])
assert (resampled.loc["1/1/2000 00:05"] == exp).all()
exp = _ohlc(ts["1/1/2000 5:55:01":])
assert (resampled.loc["1/1/2000 6:00:00"] == exp).all()
def test_downsample_non_unique():
rng = date_range("1/1/2000", "2/29/2000")
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample("M").mean()
expected = ts.groupby(lambda x: x.month).mean()
assert len(result) == 2
tm.assert_almost_equal(result[0], expected[1])
tm.assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique():
# GH #1077
rng = date_range("1/1/2000", "2/29/2000")
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
ts.asfreq("B")
def test_resample_axis1():
rng = date_range("1/1/2000", "2/29/2000")
df = DataFrame(np.random.randn(3, len(rng)), columns=rng, index=["a", "b", "c"])
result = df.resample("M", axis=1).mean()
expected = df.T.resample("M").mean().T
tm.assert_frame_equal(result, expected)
def test_resample_anchored_ticks():
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the
# middle of a desired interval
rng = date_range("1/1/2000 04:00:00", periods=86400, freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ["t", "5t", "15t", "30t", "4h", "12h"]
for freq in freqs:
result = ts[2:].resample(freq, closed="left", label="left").mean()
expected = ts.resample(freq, closed="left", label="left").mean()
tm.assert_series_equal(result, expected)
def test_resample_single_group():
mysum = lambda x: x.sum()
rng = date_range("2000-1-1", "2000-2-10", freq="D")
ts = Series(np.random.randn(len(rng)), index=rng)
tm.assert_series_equal(ts.resample("M").sum(), ts.resample("M").apply(mysum))
rng = date_range("2000-1-1", "2000-1-10", freq="D")
ts = Series(np.random.randn(len(rng)), index=rng)
tm.assert_series_equal(ts.resample("M").sum(), ts.resample("M").apply(mysum))
# GH 3849
s = Series(
[30.1, 31.6],
index=[Timestamp("20070915 15:30:00"), Timestamp("20070915 15:40:00")],
)
expected = Series([0.75], index=pd.DatetimeIndex([Timestamp("20070915")], freq="D"))
result = s.resample("D").apply(lambda x: np.std(x))
tm.assert_series_equal(result, expected)
def test_resample_offset():
# GH 31809
rng = date_range("1/1/2000 00:00:00", "1/1/2000 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample("5min", offset="2min").mean()
exp_rng = date_range("12/31/1999 23:57:00", "1/1/2000 01:57", freq="5min")
tm.assert_index_equal(resampled.index, exp_rng)
def test_resample_origin():
# GH 31809
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
exp_rng = date_range("1999-12-31 23:57:00", "2000-01-01 01:57", freq="5min")
resampled = ts.resample("5min", origin="1999-12-31 23:57:00").mean()
tm.assert_index_equal(resampled.index, exp_rng)
offset_timestamp = pd.Timestamp(0) + pd.Timedelta("2min")
resampled = ts.resample("5min", origin=offset_timestamp).mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("5min", origin="epoch", offset="2m").mean()
tm.assert_index_equal(resampled.index, exp_rng)
# origin of '1999-31-12 12:02:00' should be equivalent for this case
resampled = ts.resample("5min", origin="1999-12-31 12:02:00").mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("5min", offset="-3m").mean()
tm.assert_index_equal(resampled.index, exp_rng)
@pytest.mark.parametrize(
"origin", ["invalid_value", "epch", "startday", "startt", "2000-30-30", object()]
)
def test_resample_bad_origin(origin):
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
msg = (
"'origin' should be equal to 'epoch', 'start', 'start_day' or "
f"should be a Timestamp convertible type. Got '{origin}' instead."
)
with pytest.raises(ValueError, match=msg):
ts.resample("5min", origin=origin)
@pytest.mark.parametrize("offset", ["invalid_value", "12dayys", "2000-30-30", object()])
def test_resample_bad_offset(offset):
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
msg = f"'offset' should be a Timedelta convertible type. Got '{offset}' instead."
with pytest.raises(ValueError, match=msg):
ts.resample("5min", offset=offset)
def test_resample_origin_prime_freq():
# GH 31809
start, end = "2000-10-01 23:30:00", "2000-10-02 00:30:00"
rng = pd.date_range(start, end, freq="7min")
ts = Series(np.random.randn(len(rng)), index=rng)
exp_rng = date_range("2000-10-01 23:14:00", "2000-10-02 00:22:00", freq="17min")
resampled = ts.resample("17min").mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("17min", origin="start_day").mean()
tm.assert_index_equal(resampled.index, exp_rng)
exp_rng = date_range("2000-10-01 23:30:00", "2000-10-02 00:21:00", freq="17min")
resampled = ts.resample("17min", origin="start").mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("17min", offset="23h30min").mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("17min", origin="start_day", offset="23h30min").mean()
tm.assert_index_equal(resampled.index, exp_rng)
exp_rng = date_range("2000-10-01 23:18:00", "2000-10-02 00:26:00", freq="17min")
resampled = ts.resample("17min", origin="epoch").mean()
tm.assert_index_equal(resampled.index, exp_rng)
exp_rng = date_range("2000-10-01 23:24:00", "2000-10-02 00:15:00", freq="17min")
resampled = ts.resample("17min", origin="2000-01-01").mean()
tm.assert_index_equal(resampled.index, exp_rng)
def test_resample_origin_with_tz():
# GH 31809
msg = "The origin must have the same timezone as the index."
tz = "Europe/Paris"
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s", tz=tz)
ts = Series(np.random.randn(len(rng)), index=rng)
exp_rng = date_range("1999-12-31 23:57:00", "2000-01-01 01:57", freq="5min", tz=tz)
resampled = ts.resample("5min", origin="1999-12-31 23:57:00+00:00").mean()
tm.assert_index_equal(resampled.index, exp_rng)
# origin of '1999-31-12 12:02:00+03:00' should be equivalent for this case
resampled = ts.resample("5min", origin="1999-12-31 12:02:00+03:00").mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("5min", origin="epoch", offset="2m").mean()
tm.assert_index_equal(resampled.index, exp_rng)
with pytest.raises(ValueError, match=msg):
ts.resample("5min", origin="12/31/1999 23:57:00").mean()
# if the series is not tz aware, origin should not be tz aware
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
with pytest.raises(ValueError, match=msg):
ts.resample("5min", origin="12/31/1999 23:57:00+03:00").mean()
def test_resample_origin_epoch_with_tz_day_vs_24h():
# GH 34474
start, end = "2000-10-01 23:30:00+0500", "2000-12-02 00:30:00+0500"
rng = pd.date_range(start, end, freq="7min")
random_values = np.random.randn(len(rng))
ts_1 = pd.Series(random_values, index=rng)
result_1 = ts_1.resample("D", origin="epoch").mean()
result_2 = ts_1.resample("24H", origin="epoch").mean()
tm.assert_series_equal(result_1, result_2)
# check that we have the same behavior with epoch even if we are not timezone aware
ts_no_tz = ts_1.tz_localize(None)
result_3 = ts_no_tz.resample("D", origin="epoch").mean()
result_4 = ts_no_tz.resample("24H", origin="epoch").mean()
tm.assert_series_equal(result_1, result_3.tz_localize(rng.tz), check_freq=False)
tm.assert_series_equal(result_1, result_4.tz_localize(rng.tz), check_freq=False)
# check that we have the similar results with two different timezones (+2H and +5H)
start, end = "2000-10-01 23:30:00+0200", "2000-12-02 00:30:00+0200"
rng = pd.date_range(start, end, freq="7min")
ts_2 = pd.Series(random_values, index=rng)
result_5 = ts_2.resample("D", origin="epoch").mean()
result_6 = ts_2.resample("24H", origin="epoch").mean()
tm.assert_series_equal(result_1.tz_localize(None), result_5.tz_localize(None))
tm.assert_series_equal(result_1.tz_localize(None), result_6.tz_localize(None))
def test_resample_origin_with_day_freq_on_dst():
# GH 31809
tz = "America/Chicago"
def _create_series(values, timestamps, freq="D"):
return pd.Series(
values,
index=pd.DatetimeIndex(
[Timestamp(t, tz=tz) for t in timestamps], freq=freq, ambiguous=True
),
)
# test classical behavior of origin in a DST context
start = pd.Timestamp("2013-11-02", tz=tz)
end = pd.Timestamp("2013-11-03 23:59", tz=tz)
rng = pd.date_range(start, end, freq="1h")
ts = pd.Series(np.ones(len(rng)), index=rng)
expected = _create_series([24.0, 25.0], ["2013-11-02", "2013-11-03"])
for origin in ["epoch", "start", "start_day", start, None]:
result = ts.resample("D", origin=origin).sum()
tm.assert_series_equal(result, expected)
# test complex behavior of origin/offset in a DST context
start = pd.Timestamp("2013-11-03", tz=tz)
end = pd.Timestamp("2013-11-03 23:59", tz=tz)
rng = pd.date_range(start, end, freq="1h")
ts = pd.Series(np.ones(len(rng)), index=rng)
expected_ts = ["2013-11-02 22:00-05:00", "2013-11-03 22:00-06:00"]
expected = _create_series([23.0, 2.0], expected_ts)
result = ts.resample("D", origin="start", offset="-2H").sum()
tm.assert_series_equal(result, expected)
expected_ts = ["2013-11-02 22:00-05:00", "2013-11-03 21:00-06:00"]
expected = _create_series([22.0, 3.0], expected_ts, freq="24H")
result = ts.resample("24H", origin="start", offset="-2H").sum()
tm.assert_series_equal(result, expected)
expected_ts = ["2013-11-02 02:00-05:00", "2013-11-03 02:00-06:00"]
expected = _create_series([3.0, 22.0], expected_ts)
result = ts.resample("D", origin="start", offset="2H").sum()
tm.assert_series_equal(result, expected)
expected_ts = ["2013-11-02 23:00-05:00", "2013-11-03 23:00-06:00"]
expected = _create_series([24.0, 1.0], expected_ts)
result = ts.resample("D", origin="start", offset="-1H").sum()
tm.assert_series_equal(result, expected)
expected_ts = ["2013-11-02 01:00-05:00", "2013-11-03 01:00:00-0500"]
expected = _create_series([1.0, 24.0], expected_ts)
result = ts.resample("D", origin="start", offset="1H").sum()
tm.assert_series_equal(result, expected)
def test_resample_daily_anchored():
rng = date_range("1/1/2000 0:00:00", periods=10000, freq="T")
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample("D", closed="left", label="left").mean()
expected = ts.resample("D", closed="left", label="left").mean()
tm.assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet():
# GH #1259
rng = date_range("1/1/2000", "12/31/2000")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample("M", kind="period").mean()
exp_index = period_range("Jan-2000", "Dec-2000", freq="M")
tm.assert_index_equal(result.index, exp_index)
def test_period_with_agg():
# aggregate a period resampler with a lambda
s2 = Series(
np.random.randint(0, 5, 50),
index=pd.period_range("2012-01-01", freq="H", periods=50),
dtype="float64",
)
expected = s2.to_timestamp().resample("D").mean().to_period()
result = s2.resample("D").agg(lambda x: x.mean())
tm.assert_series_equal(result, expected)
def test_resample_segfault():
# GH 8573
# segfaulting in older versions
all_wins_and_wagers = [
(1, datetime(2013, 10, 1, 16, 20), 1, 0),
(2, datetime(2013, 10, 1, 16, 10), 1, 0),
(2, datetime(2013, 10, 1, 18, 15), 1, 0),
(2, datetime(2013, 10, 1, 16, 10, 31), 1, 0),
]
df = DataFrame.from_records(
all_wins_and_wagers, columns=("ID", "timestamp", "A", "B")
).set_index("timestamp")
result = df.groupby("ID").resample("5min").sum()
expected = df.groupby("ID").apply(lambda x: x.resample("5min").sum())
tm.assert_frame_equal(result, expected)
def test_resample_dtype_preservation():
# GH 12202
# validation tests for dtype preservation
df = DataFrame(
{
"date": pd.date_range(start="2016-01-01", periods=4, freq="W"),
"group": [1, 1, 2, 2],
"val": Series([5, 6, 7, 8], dtype="int32"),
}
).set_index("date")
result = df.resample("1D").ffill()
assert result.val.dtype == np.int32
result = df.groupby("group").resample("1D").ffill()
assert result.val.dtype == np.int32
def test_resample_dtype_coercion():
pytest.importorskip("scipy.interpolate")
# GH 16361
df = {"a": [1, 3, 1, 4]}
df = DataFrame(df, index=pd.date_range("2017-01-01", "2017-01-04"))
expected = df.astype("float64").resample("H").mean()["a"].interpolate("cubic")
result = df.resample("H")["a"].mean().interpolate("cubic")
tm.assert_series_equal(result, expected)
result = df.resample("H").mean()["a"].interpolate("cubic")
tm.assert_series_equal(result, expected)
def test_weekly_resample_buglet():
# #1327
rng = date_range("1/1/2000", freq="B", periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample("W").mean()
expected = ts.resample("W-SUN").mean()
tm.assert_series_equal(resampled, expected)
def test_monthly_resample_error():
# #1451
dates = date_range("4/16/2012 20:00", periods=5000, freq="h")
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample("M")
def test_nanosecond_resample_error():
# GH 12307 - Values falls after last bin when
# Resampling using pd.tseries.offsets.Nano as period
start = 1443707890427
exp_start = 1443707890400
indx = pd.date_range(start=pd.to_datetime(start), periods=10, freq="100n")
ts = Series(range(len(indx)), index=indx)
r = ts.resample(pd.tseries.offsets.Nano(100))
result = r.agg("mean")
exp_indx = pd.date_range(start=pd.to_datetime(exp_start), periods=10, freq="100n")
exp = Series(range(len(exp_indx)), index=exp_indx)
tm.assert_series_equal(result, exp)
def test_resample_anchored_intraday(simple_date_range_series):
# #1471, #1458
rng = date_range("1/1/2012", "4/1/2012", freq="100min")
df = DataFrame(rng.month, index=rng)
result = df.resample("M").mean()
expected = df.resample("M", kind="period").mean().to_timestamp(how="end")
expected.index += Timedelta(1, "ns") - Timedelta(1, "D")
expected.index = expected.index._with_freq("infer")
assert expected.index.freq == "M"
tm.assert_frame_equal(result, expected)
result = df.resample("M", closed="left").mean()
exp = df.shift(1, freq="D").resample("M", kind="period").mean()
exp = exp.to_timestamp(how="end")
exp.index = exp.index + Timedelta(1, "ns") - Timedelta(1, "D")
exp.index = exp.index._with_freq("infer")
assert exp.index.freq == "M"
tm.assert_frame_equal(result, exp)
rng = date_range("1/1/2012", "4/1/2012", freq="100min")
df = DataFrame(rng.month, index=rng)
result = df.resample("Q").mean()
expected = df.resample("Q", kind="period").mean().to_timestamp(how="end")
expected.index += Timedelta(1, "ns") - Timedelta(1, "D")
expected.index._data.freq = "Q"
expected.index._freq = lib.no_default
tm.assert_frame_equal(result, expected)
result = df.resample("Q", closed="left").mean()
expected = df.shift(1, freq="D").resample("Q", kind="period", closed="left").mean()
expected = expected.to_timestamp(how="end")
expected.index += Timedelta(1, "ns") - Timedelta(1, "D")
expected.index._data.freq = "Q"
expected.index._freq = lib.no_default
tm.assert_frame_equal(result, expected)
ts = simple_date_range_series("2012-04-29 23:00", "2012-04-30 5:00", freq="h")
resampled = ts.resample("M").mean()
assert len(resampled) == 1
def test_resample_anchored_monthstart(simple_date_range_series):
ts = simple_date_range_series("1/1/2000", "12/31/2002")
freqs = ["MS", "BMS", "QS-MAR", "AS-DEC", "AS-JUN"]
for freq in freqs:
ts.resample(freq).mean()
def test_resample_anchored_multiday():
# When resampling a range spanning multiple days, ensure that the
# start date gets used to determine the offset. Fixes issue where
# a one day period is not a multiple of the frequency.
#
# See: https://github.com/pandas-dev/pandas/issues/8683
index = pd.date_range(
"2014-10-14 23:06:23.206", periods=3, freq="400L"
) | pd.date_range("2014-10-15 23:00:00", periods=2, freq="2200L")
s = Series(np.random.randn(5), index=index)
# Ensure left closing works
result = s.resample("2200L").mean()
assert result.index[-1] == Timestamp("2014-10-15 23:00:02.000")
# Ensure right closing works
result = s.resample("2200L", label="right").mean()
assert result.index[-1] == Timestamp("2014-10-15 23:00:04.200")
def test_corner_cases(simple_period_range_series, simple_date_range_series):
# miscellaneous test coverage
rng = date_range("1/1/2000", periods=12, freq="t")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample("5t", closed="right", label="left").mean()
ex_index = date_range("1999-12-31 23:55", periods=4, freq="5t")
tm.assert_index_equal(result.index, ex_index)
len0pts = simple_period_range_series("2007-01", "2010-05", freq="M")[:0]
# it works
result = len0pts.resample("A-DEC").mean()
assert len(result) == 0
# resample to periods
ts = simple_date_range_series("2000-04-28", "2000-04-30 11:00", freq="h")
result = ts.resample("M", kind="period").mean()
assert len(result) == 1
assert result.index[0] == Period("2000-04", freq="M")
def test_anchored_lowercase_buglet():
dates = date_range("4/16/2012 20:00", periods=50000, freq="s")
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample("d").mean()
def test_upsample_apply_functions():
# #1596
rng = pd.date_range("2012-06-12", periods=4, freq="h")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample("20min").aggregate(["mean", "sum"])
assert isinstance(result, DataFrame)
def test_resample_not_monotonic():
rng = pd.date_range("2012-06-12", periods=200, freq="h")
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample("D").sum()
exp = ts.sort_index().resample("D").sum()
tm.assert_series_equal(result, exp)
def test_resample_median_bug_1688():
for dtype in ["int64", "int32", "float64", "float32"]:
df = DataFrame(
[1, 2],
index=[datetime(2012, 1, 1, 0, 0, 0), datetime(2012, 1, 1, 0, 5, 0)],
dtype=dtype,
)
result = df.resample("T").apply(lambda x: x.mean())
exp = df.asfreq("T")
tm.assert_frame_equal(result, exp)
result = df.resample("T").median()
exp = df.asfreq("T")
tm.assert_frame_equal(result, exp)
def test_how_lambda_functions(simple_date_range_series):
ts = simple_date_range_series("1/1/2000", "4/1/2000")
result = ts.resample("M").apply(lambda x: x.mean())
exp = ts.resample("M").mean()
tm.assert_series_equal(result, exp)
foo_exp = ts.resample("M").mean()
foo_exp.name = "foo"
bar_exp = ts.resample("M").std()
bar_exp.name = "bar"
result = ts.resample("M").apply([lambda x: x.mean(), lambda x: x.std(ddof=1)])
result.columns = ["foo", "bar"]
tm.assert_series_equal(result["foo"], foo_exp)
tm.assert_series_equal(result["bar"], bar_exp)
# this is a MI Series, so comparing the names of the results
# doesn't make sense
result = ts.resample("M").aggregate(
{"foo": lambda x: x.mean(), "bar": lambda x: x.std(ddof=1)}
)
tm.assert_series_equal(result["foo"], foo_exp, check_names=False)
tm.assert_series_equal(result["bar"], bar_exp, check_names=False)
def test_resample_unequal_times():
# #1772
start = datetime(1999, 3, 1, 5)
# end hour is less than start
end = datetime(2012, 7, 31, 4)
bad_ind = date_range(start, end, freq="30min")
df = DataFrame({"close": 1}, index=bad_ind)
# it works!
df.resample("AS").sum()
def test_resample_consistency():
# GH 6418
# resample with bfill / limit / reindex consistency
i30 = pd.date_range("2002-02-02", periods=4, freq="30T")
s = Series(np.arange(4.0), index=i30)
s[2] = np.NaN
# Upsample by factor 3 with reindex() and resample() methods:
i10 = pd.date_range(i30[0], i30[-1], freq="10T")
s10 = s.reindex(index=i10, method="bfill")
s10_2 = s.reindex(index=i10, method="bfill", limit=2)
rl = s.reindex_like(s10, method="bfill", limit=2)
r10_2 = s.resample("10Min").bfill(limit=2)
r10 = s.resample("10Min").bfill()
# s10_2, r10, r10_2, rl should all be equal
tm.assert_series_equal(s10_2, r10)
tm.assert_series_equal(s10_2, r10_2)
tm.assert_series_equal(s10_2, rl)
def test_resample_timegrouper():
# GH 7227
dates1 = [
datetime(2014, 10, 1),
datetime(2014, 9, 3),
datetime(2014, 11, 5),
datetime(2014, 9, 5),
datetime(2014, 10, 8),
datetime(2014, 7, 15),
]
dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:]
dates3 = [pd.NaT] + dates1 + [pd.NaT]
for dates in [dates1, dates2, dates3]:
df = DataFrame(dict(A=dates, B=np.arange(len(dates))))
result = df.set_index("A").resample("M").count()
exp_idx = pd.DatetimeIndex(
["2014-07-31", "2014-08-31", "2014-09-30", "2014-10-31", "2014-11-30"],
freq="M",
name="A",
)
expected = DataFrame({"B": [1, 0, 2, 2, 1]}, index=exp_idx)
if df["A"].isna().any():
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq="M", key="A")).count()
tm.assert_frame_equal(result, expected)
df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(len(dates))))
result = df.set_index("A").resample("M").count()
expected = DataFrame(
{"B": [1, 0, 2, 2, 1], "C": [1, 0, 2, 2, 1]},
index=exp_idx,
columns=["B", "C"],
)
if df["A"].isna().any():
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq="M", key="A")).count()
tm.assert_frame_equal(result, expected)
def test_resample_nunique():
# GH 12352
df = DataFrame(
{
"ID": {
Timestamp("2015-06-05 00:00:00"): "0010100903",
Timestamp("2015-06-08 00:00:00"): "0010150847",
},
"DATE": {
Timestamp("2015-06-05 00:00:00"): "2015-06-05",
Timestamp("2015-06-08 00:00:00"): "2015-06-08",
},
}
)
r = df.resample("D")
g = df.groupby(pd.Grouper(freq="D"))
expected = df.groupby(pd.Grouper(freq="D")).ID.apply(lambda x: x.nunique())
assert expected.name == "ID"
for t in [r, g]:
result = r.ID.nunique()
tm.assert_series_equal(result, expected)
result = df.ID.resample("D").nunique()
tm.assert_series_equal(result, expected)
result = df.ID.groupby(pd.Grouper(freq="D")).nunique()
tm.assert_series_equal(result, expected)
def test_resample_nunique_preserves_column_level_names():
# see gh-23222
df = tm.makeTimeDataFrame(freq="1D").abs()
df.columns = pd.MultiIndex.from_arrays(
[df.columns.tolist()] * 2, names=["lev0", "lev1"]
)
result = df.resample("1h").nunique()
tm.assert_index_equal(df.columns, result.columns)
def test_resample_nunique_with_date_gap():
# GH 13453
index = pd.date_range("1-1-2000", "2-15-2000", freq="h")
index2 = pd.date_range("4-15-2000", "5-15-2000", freq="h")
index3 = index.append(index2)
s = Series(range(len(index3)), index=index3, dtype="int64")
r = s.resample("M")
# Since all elements are unique, these should all be the same
results = [r.count(), r.nunique(), r.agg(Series.nunique), r.agg("nunique")]
tm.assert_series_equal(results[0], results[1])
tm.assert_series_equal(results[0], results[2])
tm.assert_series_equal(results[0], results[3])
@pytest.mark.parametrize("n", [10000, 100000])
@pytest.mark.parametrize("k", [10, 100, 1000])
def test_resample_group_info(n, k):
# GH10914
# use a fixed seed to always have the same uniques
prng = np.random.RandomState(1234)
dr = date_range(start="2015-08-27", periods=n // 10, freq="T")
ts = Series(prng.randint(0, n // k, n).astype("int64"), index=prng.choice(dr, n))
left = ts.resample("30T").nunique()
ix = date_range(start=ts.index.min(), end=ts.index.max(), freq="30T")
vals = ts.values
bins = np.searchsorted(ix.values, ts.index, side="right")
sorter = np.lexsort((vals, bins))
vals, bins = vals[sorter], bins[sorter]
mask = np.r_[True, vals[1:] != vals[:-1]]
mask |= np.r_[True, bins[1:] != bins[:-1]]
arr = np.bincount(bins[mask] - 1, minlength=len(ix)).astype("int64", copy=False)
right = Series(arr, index=ix)
tm.assert_series_equal(left, right)
def test_resample_size():
n = 10000
dr = date_range("2015-09-19", periods=n, freq="T")
ts = Series(np.random.randn(n), index=np.random.choice(dr, n))
left = ts.resample("7T").size()
ix = date_range(start=left.index.min(), end=ts.index.max(), freq="7T")
bins = np.searchsorted(ix.values, ts.index.values, side="right")
val = np.bincount(bins, minlength=len(ix) + 1)[1:].astype("int64", copy=False)
right = Series(val, index=ix)
tm.assert_series_equal(left, right)
def test_resample_across_dst():
# The test resamples a DatetimeIndex with values before and after a
# DST change
# Issue: 14682
# The DatetimeIndex we will start with
# (note that DST happens at 03:00+02:00 -> 02:00+01:00)
# 2016-10-30 02:23:00+02:00, 2016-10-30 02:23:00+01:00
df1 = DataFrame([1477786980, 1477790580], columns=["ts"])
dti1 = DatetimeIndex(
pd.to_datetime(df1.ts, unit="s")
.dt.tz_localize("UTC")
.dt.tz_convert("Europe/Madrid")
)
# The expected DatetimeIndex after resampling.
# 2016-10-30 02:00:00+02:00, 2016-10-30 02:00:00+01:00
df2 = DataFrame([1477785600, 1477789200], columns=["ts"])
dti2 = DatetimeIndex(
pd.to_datetime(df2.ts, unit="s")
.dt.tz_localize("UTC")
.dt.tz_convert("Europe/Madrid"),
freq="H",
)
df = DataFrame([5, 5], index=dti1)
result = df.resample(rule="H").sum()
expected = DataFrame([5, 5], index=dti2)
tm.assert_frame_equal(result, expected)
def test_groupby_with_dst_time_change():
# GH 24972
index = pd.DatetimeIndex(
[1478064900001000000, 1480037118776792000], tz="UTC"
).tz_convert("America/Chicago")
df = pd.DataFrame([1, 2], index=index)
result = df.groupby(pd.Grouper(freq="1d")).last()
expected_index_values = pd.date_range(
"2016-11-02", "2016-11-24", freq="d", tz="America/Chicago"
)
index = pd.DatetimeIndex(expected_index_values)
expected = pd.DataFrame([1.0] + ([np.nan] * 21) + [2.0], index=index)
tm.assert_frame_equal(result, expected)
def test_resample_dst_anchor():
# 5172
dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz="US/Eastern")
df = DataFrame([5], index=dti)
dti = DatetimeIndex(df.index.normalize(), freq="D")
expected = DataFrame([5], index=dti)
tm.assert_frame_equal(df.resample(rule="D").sum(), expected)
df.resample(rule="MS").sum()
tm.assert_frame_equal(
df.resample(rule="MS").sum(),
DataFrame(
[5],
index=DatetimeIndex([datetime(2012, 11, 1)], tz="US/Eastern", freq="MS"),
),
)
dti = date_range("2013-09-30", "2013-11-02", freq="30Min", tz="Europe/Paris")
values = range(dti.size)
df = DataFrame({"a": values, "b": values, "c": values}, index=dti, dtype="int64")
how = {"a": "min", "b": "max", "c": "count"}
tm.assert_frame_equal(
df.resample("W-MON").agg(how)[["a", "b", "c"]],
DataFrame(
{
"a": [0, 48, 384, 720, 1056, 1394],
"b": [47, 383, 719, 1055, 1393, 1586],
"c": [48, 336, 336, 336, 338, 193],
},
index=date_range("9/30/2013", "11/4/2013", freq="W-MON", tz="Europe/Paris"),
),
"W-MON Frequency",
)
tm.assert_frame_equal(
df.resample("2W-MON").agg(how)[["a", "b", "c"]],
DataFrame(
{
"a": [0, 48, 720, 1394],
"b": [47, 719, 1393, 1586],
"c": [48, 672, 674, 193],
},
index=date_range(
"9/30/2013", "11/11/2013", freq="2W-MON", tz="Europe/Paris"
),
),
"2W-MON Frequency",
)
tm.assert_frame_equal(
df.resample("MS").agg(how)[["a", "b", "c"]],
DataFrame(
{"a": [0, 48, 1538], "b": [47, 1537, 1586], "c": [48, 1490, 49]},
index=date_range("9/1/2013", "11/1/2013", freq="MS", tz="Europe/Paris"),
),
"MS Frequency",
)
tm.assert_frame_equal(
df.resample("2MS").agg(how)[["a", "b", "c"]],
DataFrame(
{"a": [0, 1538], "b": [1537, 1586], "c": [1538, 49]},
index=date_range("9/1/2013", "11/1/2013", freq="2MS", tz="Europe/Paris"),
),
"2MS Frequency",
)
df_daily = df["10/26/2013":"10/29/2013"]
tm.assert_frame_equal(
df_daily.resample("D").agg({"a": "min", "b": "max", "c": "count"})[
["a", "b", "c"]
],
DataFrame(
{
"a": [1248, 1296, 1346, 1394],
"b": [1295, 1345, 1393, 1441],
"c": [48, 50, 48, 48],
},
index=date_range("10/26/2013", "10/29/2013", freq="D", tz="Europe/Paris"),
),
"D Frequency",
)
def test_downsample_across_dst():
# GH 8531
tz = pytz.timezone("Europe/Berlin")
dt = datetime(2014, 10, 26)
dates = date_range(tz.localize(dt), periods=4, freq="2H")
result = Series(5, index=dates).resample("H").mean()
expected = Series(
[5.0, np.nan] * 3 + [5.0],
index=date_range(tz.localize(dt), periods=7, freq="H"),
)
tm.assert_series_equal(result, expected)
def test_downsample_across_dst_weekly():
# GH 9119, GH 21459
df = DataFrame(
index=DatetimeIndex(
["2017-03-25", "2017-03-26", "2017-03-27", "2017-03-28", "2017-03-29"],
tz="Europe/Amsterdam",
),
data=[11, 12, 13, 14, 15],
)
result = df.resample("1W").sum()
expected = DataFrame(
[23, 42],
index=pd.DatetimeIndex(
["2017-03-26", "2017-04-02"], tz="Europe/Amsterdam", freq="W"
),
)
tm.assert_frame_equal(result, expected)
idx = pd.date_range("2013-04-01", "2013-05-01", tz="Europe/London", freq="H")
s = Series(index=idx, dtype=np.float64)
result = s.resample("W").mean()
expected = Series(
index=pd.date_range("2013-04-07", freq="W", periods=5, tz="Europe/London"),
dtype=np.float64,
)
tm.assert_series_equal(result, expected)
def test_downsample_dst_at_midnight():
# GH 25758
start = datetime(2018, 11, 3, 12)
end = datetime(2018, 11, 5, 12)
index = pd.date_range(start, end, freq="1H")
index = index.tz_localize("UTC").tz_convert("America/Havana")
data = list(range(len(index)))
dataframe = pd.DataFrame(data, index=index)
result = dataframe.groupby(pd.Grouper(freq="1D")).mean()
dti = date_range("2018-11-03", periods=3).tz_localize(
"America/Havana", ambiguous=True
)
dti = pd.DatetimeIndex(dti, freq="D")
expected = DataFrame([7.5, 28.0, 44.5], index=dti)
tm.assert_frame_equal(result, expected)
def test_resample_with_nat():
# GH 13020
index = DatetimeIndex(
[
pd.NaT,
"1970-01-01 00:00:00",
pd.NaT,
"1970-01-01 00:00:01",
"1970-01-01 00:00:02",
]
)
frame = DataFrame([2, 3, 5, 7, 11], index=index)
index_1s = DatetimeIndex(
["1970-01-01 00:00:00", "1970-01-01 00:00:01", "1970-01-01 00:00:02"]
)
frame_1s = DataFrame([3, 7, 11], index=index_1s)
tm.assert_frame_equal(frame.resample("1s").mean(), frame_1s)
index_2s = DatetimeIndex(["1970-01-01 00:00:00", "1970-01-01 00:00:02"])
frame_2s = DataFrame([5, 11], index=index_2s)
tm.assert_frame_equal(frame.resample("2s").mean(), frame_2s)
index_3s = DatetimeIndex(["1970-01-01 00:00:00"])
frame_3s = DataFrame([7], index=index_3s)
tm.assert_frame_equal(frame.resample("3s").mean(), frame_3s)
tm.assert_frame_equal(frame.resample("60s").mean(), frame_3s)
def test_resample_datetime_values():
# GH 13119
# check that datetime dtype is preserved when NaT values are
# introduced by the resampling
dates = [datetime(2016, 1, 15), datetime(2016, 1, 19)]
df = DataFrame({"timestamp": dates}, index=dates)
exp = Series(
[datetime(2016, 1, 15), pd.NaT, datetime(2016, 1, 19)],
index=date_range("2016-01-15", periods=3, freq="2D"),
name="timestamp",
)
res = df.resample("2D").first()["timestamp"]
tm.assert_series_equal(res, exp)
res = df["timestamp"].resample("2D").first()
tm.assert_series_equal(res, exp)
def test_resample_apply_with_additional_args(series):
# GH 14615
def f(data, add_arg):
return np.mean(data) * add_arg
multiplier = 10
result = series.resample("D").apply(f, multiplier)
expected = series.resample("D").mean().multiply(multiplier)
tm.assert_series_equal(result, expected)
# Testing as kwarg
result = series.resample("D").apply(f, add_arg=multiplier)
expected = series.resample("D").mean().multiply(multiplier)
tm.assert_series_equal(result, expected)
# Testing dataframe
df = pd.DataFrame({"A": 1, "B": 2}, index=pd.date_range("2017", periods=10))
result = df.groupby("A").resample("D").agg(f, multiplier)
expected = df.groupby("A").resample("D").mean().multiply(multiplier)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("k", [1, 2, 3])
@pytest.mark.parametrize(
"n1, freq1, n2, freq2",
[
(30, "S", 0.5, "Min"),
(60, "S", 1, "Min"),
(3600, "S", 1, "H"),
(60, "Min", 1, "H"),
(21600, "S", 0.25, "D"),
(86400, "S", 1, "D"),
(43200, "S", 0.5, "D"),
(1440, "Min", 1, "D"),
(12, "H", 0.5, "D"),
(24, "H", 1, "D"),
],
)
def test_resample_equivalent_offsets(n1, freq1, n2, freq2, k):
# GH 24127
n1_ = n1 * k
n2_ = n2 * k
s = pd.Series(
0, index=pd.date_range("19910905 13:00", "19911005 07:00", freq=freq1)
)
s = s + range(len(s))
result1 = s.resample(str(n1_) + freq1).mean()
result2 = s.resample(str(n2_) + freq2).mean()
tm.assert_series_equal(result1, result2)
@pytest.mark.parametrize(
"first,last,freq,exp_first,exp_last",
[
("19910905", "19920406", "D", "19910905", "19920407"),
("19910905 00:00", "19920406 06:00", "D", "19910905", "19920407"),
("19910905 06:00", "19920406 06:00", "H", "19910905 06:00", "19920406 07:00"),
("19910906", "19920406", "M", "19910831", "19920430"),
("19910831", "19920430", "M", "19910831", "19920531"),
("1991-08", "1992-04", "M", "19910831", "19920531"),
],
)
def test_get_timestamp_range_edges(first, last, freq, exp_first, exp_last):
first = pd.Period(first)
first = first.to_timestamp(first.freq)
last = pd.Period(last)
last = last.to_timestamp(last.freq)
exp_first = pd.Timestamp(exp_first, freq=freq)
exp_last = pd.Timestamp(exp_last, freq=freq)
freq = pd.tseries.frequencies.to_offset(freq)
result = _get_timestamp_range_edges(first, last, freq)
expected = (exp_first, exp_last)
assert result == expected
def test_resample_apply_product():
# GH 5586
index = date_range(start="2012-01-31", freq="M", periods=12)
ts = Series(range(12), index=index)
df = DataFrame(dict(A=ts, B=ts + 2))
result = df.resample("Q").apply(np.product)
expected = DataFrame(
np.array([[0, 24], [60, 210], [336, 720], [990, 1716]], dtype=np.int64),
index=DatetimeIndex(
["2012-03-31", "2012-06-30", "2012-09-30", "2012-12-31"], freq="Q-DEC"
),
columns=["A", "B"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"first,last,freq_in,freq_out,exp_last",
[
(
"2020-03-28",
"2020-03-31",
"D",
"24H",
"2020-03-30 01:00",
), # includes transition into DST
(
"2020-03-28",
"2020-10-27",
"D",
"24H",
"2020-10-27 00:00",
), # includes transition into and out of DST
(
"2020-10-25",
"2020-10-27",
"D",
"24H",
"2020-10-26 23:00",
), # includes transition out of DST
(
"2020-03-28",
"2020-03-31",
"24H",
"D",
"2020-03-30 00:00",
), # same as above, but from 24H to D
("2020-03-28", "2020-10-27", "24H", "D", "2020-10-27 00:00"),
("2020-10-25", "2020-10-27", "24H", "D", "2020-10-26 00:00"),
],
)
def test_resample_calendar_day_with_dst(
first: str, last: str, freq_in: str, freq_out: str, exp_last: str
):
# GH 35219
ts = pd.Series(1.0, pd.date_range(first, last, freq=freq_in, tz="Europe/Amsterdam"))
result = ts.resample(freq_out).pad()
expected = pd.Series(
1.0, pd.date_range(first, exp_last, freq=freq_out, tz="Europe/Amsterdam")
)
tm.assert_series_equal(result, expected)
| bsd-3-clause | -6,043,365,303,713,031,000 | 32.10341 | 88 | 0.59103 | false |
Gabotero/GNURadioNext | gr-vocoder/python/qa_alaw_vocoder.py | 1 | 1619 | #!/usr/bin/env python
#
# Copyright 2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import vocoder_swig as vocoder
import blocks_swig as blocks
class test_alaw_vocoder (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block()
def tearDown (self):
self.tb = None
def test001_module_load (self):
data = (8,24,40,56,72,88,104,120,136,152,168,184,
200,216,232,248,264,280,296,312,328,344)
src = blocks.vector_source_s(data)
enc = vocoder.alaw_encode_sb()
dec = vocoder.alaw_decode_bs()
snk = blocks.vector_sink_s()
self.tb.connect(src, enc, dec, snk)
self.tb.run()
actual_result = snk.data()
self.assertEqual(data, actual_result)
if __name__ == '__main__':
gr_unittest.run(test_alaw_vocoder, "test_alaw_vocoder.xml")
| gpl-3.0 | -122,898,077,962,167,760 | 32.040816 | 70 | 0.678196 | false |
emilroz/openmicroscopy | components/tools/OmeroPy/src/omero/util/populate_metadata.py | 1 | 21324 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Populate bulk metadata tables from delimited text files.
"""
#
# Copyright (C) 2011-2014 University of Dundee. All rights reserved.
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import tempfile
import logging
import time
import sys
import csv
import re
from threading import Thread
from StringIO import StringIO
from getpass import getpass
from getopt import getopt, GetoptError
from Queue import Queue
import omero.clients
from omero.rtypes import rdouble, rstring, rint
from omero.model import DatasetAnnotationLink, DatasetI, FileAnnotationI, \
OriginalFileI, PlateI, PlateAnnotationLinkI, ScreenI, \
ScreenAnnotationLinkI
from omero.grid import ImageColumn, LongColumn, PlateColumn, StringColumn, \
WellColumn
from omero.util.temp_files import create_path, remove_path
from omero import client
from populate_roi import ThreadPool
from xml.etree.cElementTree import XML, Element, SubElement, ElementTree, dump, iterparse
log = logging.getLogger("omero.util.populate_metadata")
def usage(error):
"""Prints usage so that we don't have to. :)"""
cmd = sys.argv[0]
print """%s
Usage: %s [options] <target_object> <file>
Runs metadata population code for a given object.
Options:
-s OMERO hostname to use [defaults to "localhost"]
-p OMERO port to use [defaults to 4064]
-u OMERO username to use
-w OMERO password
-k OMERO session key to use
-i Dump measurement information and exit (no population)
-d Print debug statements
Examples:
%s -s localhost -p 14064 -u bob Plate:6 metadata.csv
Report bugs to [email protected]""" % (error, cmd, cmd)
sys.exit(2)
# Global thread pool for use by workers
thread_pool = None
# Special column names we may add depending on the data type
PLATE_NAME_COLUMN = 'Plate Name'
WELL_NAME_COLUMN = 'Well Name'
class Skip(object):
"""Instance to denote a row skip request."""
pass
class MetadataError(Exception):
"""
Raised by the metadata parsing context when an error condition
is reached.
"""
pass
class HeaderResolver(object):
"""
Header resolver for known header names which is responsible for creating
the column set for the OMERO.tables instance.
"""
DEFAULT_COLUMN_SIZE = 1
plate_keys = {
'well': WellColumn,
'field': ImageColumn,
'row': LongColumn,
'column': LongColumn,
'wellsample': ImageColumn
}
screen_keys = dict({
'plate': PlateColumn,
}, **plate_keys)
def __init__(self, target_object, headers):
self.target_object = target_object
self.headers = [v.replace('/', '\\') for v in headers]
self.headers_as_lower = [v.lower() for v in self.headers]
def create_columns(self):
target_class = self.target_object.__class__
target_id = self.target_object.id.val
if ScreenI is target_class:
log.debug('Creating columns for Screen:%d' % target_id)
return self.create_columns_screen()
if PlateI is target_class:
log.debug('Creating columns for Plate:%d' % target_id)
return self.create_columns_plate()
if DatasetI is target_class:
log.debug('Creating columns for Dataset:%d' % target_id)
return self.create_columns_dataset()
raise MetadataError('Unsupported target object class: %s' \
% target_class)
def create_columns_screen(self):
columns = list()
for i, header_as_lower in enumerate(self.headers_as_lower):
name = self.headers[i]
try:
column = self.screen_keys[header_as_lower](name, '', list())
except KeyError:
column = StringColumn(name, '', self.DEFAULT_COLUMN_SIZE,
list())
columns.append(column)
for column in columns:
if column.__class__ is PlateColumn:
columns.append(StringColumn(PLATE_NAME_COLUMN, '',
self.DEFAULT_COLUMN_SIZE, list()))
if column.__class__ is WellColumn:
columns.append(StringColumn(WELL_NAME_COLUMN, '',
self.DEFAULT_COLUMN_SIZE, list()))
return columns
def create_columns_plate(self):
columns = list()
for i, header_as_lower in enumerate(self.headers_as_lower):
name = self.headers[i]
try:
column = self.plate_keys[header_as_lower](name, '', list())
except KeyError:
column = StringColumn(name, '', self.DEFAULT_COLUMN_SIZE,
list())
columns.append(column)
for column in columns:
if column.__class__ is PlateColumn:
columns.append(StringColumn(PLATE_NAME_COLUMN, '',
self.DEFAULT_COLUMN_SIZE, list()))
if column.__class__ is WellColumn:
columns.append(StringColumn(WELL_NAME_COLUMN, '',
self.DEFAULT_COLUMN_SIZE, list()))
return columns
def create_columns_dataset(self):
raise Exception('To be implemented!')
class ValueResolver(object):
"""
Value resolver for column types which is responsible for filling up
non-metadata columns with their OMERO data model identifiers.
"""
AS_ALPHA = [chr(v) for v in range(97, 122 + 1)] # a-z
WELL_REGEX = re.compile(r'^([a-zA-Z]+)(\d+)$')
def __init__(self, client, target_object):
self.client = client
self.target_object = target_object
self.target_class = self.target_object.__class__
if PlateI is self.target_class:
return self.load_plate()
if DatasetI is self.target_class:
return self.load_dataset()
if ScreenI is self.target_class:
return self.load_screen()
raise MetadataError('Unsupported target object class: %s' \
% target_class)
def load_screen(self):
query_service = self.client.getSession().getQueryService()
parameters = omero.sys.ParametersI()
parameters.addId(self.target_object.id.val)
log.debug('Loading Screen:%d' % self.target_object.id.val)
self.target_object = query_service.findByQuery(
'select s from Screen as s '
'join fetch s.plateLinks as p_link '
'join fetch p_link.child as p '
'where s.id = :id', parameters, {'omero.group': '-1'})
if self.target_object is None:
raise MetadataError('Could not find target object!')
self.wells_by_location = dict()
self.wells_by_id = dict()
self.plates_by_name = dict()
self.plates_by_id = dict()
for plate in (l.child for l in self.target_object.copyPlateLinks()):
parameters = omero.sys.ParametersI()
parameters.addId(plate.id.val)
plate = query_service.findByQuery(
'select p from Plate as p '
'join fetch p.wells as w '
'join fetch w.wellSamples as ws '
'where p.id = :id', parameters, {'omero.group': '-1'})
self.plates_by_name[plate.name.val] = plate
self.plates_by_id[plate.id.val] = plate
wells_by_location = dict()
wells_by_id = dict()
self.wells_by_location[plate.name.val] = wells_by_location
self.wells_by_id[plate.id.val] = wells_by_id
self.parse_plate(plate, wells_by_location, wells_by_id)
def load_plate(self):
query_service = self.client.getSession().getQueryService()
parameters = omero.sys.ParametersI()
parameters.addId(self.target_object.id.val)
log.debug('Loading Plate:%d' % self.target_object.id.val)
self.target_object = query_service.findByQuery(
'select p from Plate as p '
'join fetch p.wells as w '
'join fetch w.wellSamples as ws '
'where p.id = :id', parameters, {'omero.group': '-1'})
if self.target_object is None:
raise MetadataError('Could not find target object!')
self.wells_by_location = dict()
self.wells_by_id = dict()
wells_by_location = dict()
wells_by_id = dict()
self.wells_by_location[self.target_object.name.val] = wells_by_location
self.wells_by_id[self.target_object.id.val] = wells_by_id
self.parse_plate(self.target_object, wells_by_location, wells_by_id)
def parse_plate(self, plate, wells_by_location, wells_by_id):
# TODO: This should use the PlateNamingConvention. We're assuming rows
# as alpha and columns as numeric.
for well in plate.copyWells():
wells_by_id[well.id.val] = well
row = well.row.val
# 0 offsetted is not what people use in reality
column = str(well.column.val + 1)
try:
columns = wells_by_location[self.AS_ALPHA[row]]
except KeyError:
wells_by_location[self.AS_ALPHA[row]] = columns = dict()
columns[column] = well
log.debug('Completed parsing plate: %s' % plate.name.val)
for row in wells_by_location:
log.debug('%s: %r' % (row, wells_by_location[row].keys()))
def load_dataset(self):
raise Exception('To be implemented!')
def resolve(self, column, value, row):
column_class = column.__class__
column_as_lower = column.name.lower()
if WellColumn is column_class:
m = self.WELL_REGEX.match(value)
if m is None or len(m.groups()) != 2:
raise MetadataError(
'Cannot parse well identifier "%s" from row: %r' % \
(value, [o[1] for o in row]))
plate_row = m.group(1).lower()
plate_column = str(long(m.group(2)))
if len(self.wells_by_location) == 1:
wells_by_location = self.wells_by_location.values()[0]
log.debug('Parsed "%s" row: %s column: %s' % \
(value, plate_row, plate_column))
else:
for column, plate in row:
if column.__class__ is PlateColumn:
wells_by_location = self.wells_by_location[plate]
log.debug('Parsed "%s" row: %s column: %s plate: %s' % \
(value, plate_row, plate_column, plate))
break
try:
return wells_by_location[plate_row][plate_column].id.val
except KeyError:
log.debug('Row: %s Column: %s not found!' % \
(plate_row, plate_column))
return -1L
if PlateColumn is column_class:
try:
return self.plates_by_name[value].id.val
except KeyError:
log.warn('Screen is missing plate: %s' % value)
return Skip()
if column_as_lower in ('row', 'column') \
and column_class is LongColumn:
try:
# The value is not 0 offsetted
return long(value) - 1
except ValueError:
return long(self.AS_ALPHA.index(value.lower()))
if StringColumn is column_class:
return value
raise MetadataError('Unsupported column class: %s' % column_class)
class ParsingContext(object):
"""Generic parsing context for CSV files."""
def __init__(self, client, target_object, file):
self.client = client
self.target_object = target_object
self.file = file
self.value_resolver = ValueResolver(self.client, self.target_object)
def create_annotation_link(self):
self.target_class = self.target_object.__class__
if ScreenI is self.target_class:
return ScreenAnnotationLinkI()
if PlateI is self.target_class:
return PlateAnnotationLinkI()
if DatasetI is self.target_class:
return DatasetAnnotationLinkI()
raise MetadataError('Unsupported target object class: %s' \
% target_class)
def get_column_widths(self):
widths = list()
for column in self.columns:
try:
widths.append(column.size)
except AttributeError:
widths.append(None)
return widths
def parse_from_handle(self, data):
rows = list(csv.reader(data, delimiter=','))
log.debug('Header: %r' % rows[0])
header_resolver = HeaderResolver(self.target_object, rows[0])
self.columns = header_resolver.create_columns()
log.debug('Columns: %r' % self.columns)
self.populate(rows[1:])
self.post_process()
log.debug('Column widths: %r' % self.get_column_widths())
log.debug('Columns: %r' % \
[(o.name, len(o.values)) for o in self.columns])
# Paranoid debugging
#for i in range(len(self.columns[0].values)):
# values = list()
# for column in self.columns:
# values.append(column.values[i])
# log.debug('Row: %r' % values)
def parse(self):
data = open(self.file, 'U')
try:
return self.parse_from_handle(data)
finally:
data.close()
def populate(self, rows):
value = None
for row in rows:
values = list()
row = [(self.columns[i], value) for i, value in enumerate(row)]
for column, original_value in row:
value = self.value_resolver.resolve(column, original_value, row)
if value.__class__ is Skip:
break
values.append(value)
try:
if value.__class__ is not long:
column.size = max(column.size, len(value))
except TypeError:
log.error('Original value "%s" now "%s" of bad type!' % \
(original_value, value))
raise
if value.__class__ is not Skip:
values.reverse()
for column in self.columns:
if column.name in (PLATE_NAME_COLUMN, WELL_NAME_COLUMN):
continue
try:
column.values.append(values.pop())
except IndexError:
log.error('Column %s has no values to pop.' % \
column.name)
raise
def post_process(self):
columns_by_name = dict()
plate_column = None
well_column = None
well_name_column = None
plate_name_column = None
for column in self.columns:
columns_by_name[column.name] = column
if column.__class__ is PlateColumn:
plate_column = column
elif column.__class__ is WellColumn:
well_column = column
elif column.name == WELL_NAME_COLUMN:
well_name_column = column
elif column.name == PLATE_NAME_COLUMN:
plate_name_column = column
if well_name_column is None and plate_name_column is None:
log.info('Nothing to do during post processing.')
for i in range(0, len(self.columns[0].values)):
if well_name_column is not None:
if PlateI is self.value_resolver.target_class:
plate = self.value_resolver.target_object.id.val
elif ScreenI is self.value_resolver.target_class:
plate = columns_by_name['Plate'].values[i]
try:
well = self.value_resolver.wells_by_id[plate]
well = well[well_column.values[i]]
row = well.row.val
col = well.column.val
except KeyError:
log.error('Missing row or column for well name population!')
raise
row = self.value_resolver.AS_ALPHA[row]
v = '%s%d' % (row, col + 1)
well_name_column.size = max(well_name_column.size, len(v))
well_name_column.values.append(v)
else:
log.info('Missing well name column, skipping.')
if plate_name_column is not None:
plate = columns_by_name['Plate'].values[i]
plate = self.value_resolver.plates_by_id[plate]
v = plate.name.val
plate_name_column.size = max(plate_name_column.size, len(v))
plate_name_column.values.append(v)
else:
log.info('Missing plate name column, skipping.')
def write_to_omero(self):
sf = self.client.getSession()
group = str(self.value_resolver.target_object.details.group.id.val)
sr = sf.sharedResources()
update_service = sf.getUpdateService()
name = 'bulk_annotations'
table = sr.newTable(1, name, {'omero.group': group})
if table is None:
raise MetadataError(
"Unable to create table: %s" % name)
original_file = table.getOriginalFile()
log.info('Created new table OriginalFile:%d' % original_file.id.val)
table.initialize(self.columns)
log.info('Table initialized with %d columns.' % (len(self.columns)))
table.addData(self.columns)
log.info('Added data column data.')
table.close()
file_annotation = FileAnnotationI()
file_annotation.ns = \
rstring('openmicroscopy.org/omero/bulk_annotations')
file_annotation.description = rstring(name)
file_annotation.file = OriginalFileI(original_file.id.val, False)
link = self.create_annotation_link()
link.parent = self.target_object
link.child = file_annotation
update_service.saveObject(link, {'omero.group': group})
def parse_target_object(target_object):
type, id = target_object.split(':')
if 'Dataset' == type:
return DatasetI(long(id), False)
if 'Plate' == type:
return PlateI(long(id), False)
if 'Screen' == type:
return ScreenI(long(id), False)
raise ValueError('Unsupported target object: %s' % target_object)
if __name__ == "__main__":
try:
options, args = getopt(sys.argv[1:], "s:p:u:w:k:id")
except GetoptError, (msg, opt):
usage(msg)
try:
target_object, file = args
target_object = parse_target_object(target_object)
except ValueError:
usage('Target object and file must be a specified!')
username = None
password = None
hostname = 'localhost'
port = 4064 # SSL
info = False
session_key = None
logging_level = logging.INFO
thread_count = 1
for option, argument in options:
if option == "-u":
username = argument
if option == "-w":
password = argument
if option == "-s":
hostname = argument
if option == "-p":
port = int(argument)
if option == "-i":
info = True
if option == "-k":
session_key = argument
if option == "-d":
logging_level = logging.DEBUG
if option == "-t":
thread_count = int(argument)
if session_key is None and username is None:
usage("Username must be specified!")
if session_key is None and hostname is None:
usage("Host name must be specified!")
if session_key is None and password is None:
password = getpass()
logging.basicConfig(level = logging_level)
client = client(hostname, port)
client.setAgent("OMERO.populate_metadata")
client.enableKeepAlive(60)
try:
if session_key is not None:
client.joinSession(session_key)
else:
client.createSession(username, password)
log.debug('Creating pool of %d threads' % thread_count)
thread_pool = ThreadPool(thread_count)
ctx = ParsingContext(client, target_object, file)
ctx.parse()
if not info:
ctx.write_to_omero()
finally:
pass
client.closeSession()
| gpl-2.0 | -6,591,823,371,403,086,000 | 38.343173 | 89 | 0.570718 | false |
indictools/grammar | common.py | 1 | 7812 | import sys,os
import time
from os import walk, path
from os.path import splitext, join
from json import dumps
from config import *
import signal
import subprocess
import re
import shutil
import glob
from flask import *
def wl_batchprocess(args, cmd, func):
wloads = args.get('wlnames').split(',')
print "In wl" + cmd
print dumps(wloads)
return (make_response(dumps(func(args))))
def urlize(pathsuffix, text = None, newtab = True):
tabclause = ""
if newtab:
tabclause = 'target="_blank"'
if not text:
text = pathsuffix
return '<a href="/workloads/taillog/15/' + pathsuffix + '" ' + tabclause + '>' + text + '</a>';
def get_all_jsons(path, pattern):
"""
path - where to begin folder scan
"""
pathprefix = repodir()
selected_files = []
print pathprefix
full_path=None
for root, dirs, files in os.walk(path, followlinks=True):
for f in files:
full_path = join(root, f)
ext = splitext(f)[1]
if ext != ".json" :
continue
wpath = full_path.replace(pathprefix + "/", "")
#print "wpath:",wpath
if pattern and not re.search(pattern, full_path):
continue
selected_files.append(wpath)
return selected_files
subprocs = set()
def signal_children(subprocs, signum):
sent_signal = False
for proc in subprocs:
if proc.poll() is None:
sent_signal = True
print "wlwizard child: Killing " + str(proc.pid)
try:
os.killpg(os.getpgid(proc.pid), signal.SIGINT)
except Exception as e:
print e
return False
#proc.send_signal(signum)
return sent_signal
def handle_signal(signum, frame):
print "wlwizard child: caught signal " + str(signum) + " .."
try:
while signal_children(subprocs, signum) == True:
print "wlwizard handler: sleeping"
time.sleep(10)
except Exception as e:
print "wlwizard handler: ", e
def fork_work(wloadname, cmdname, func, parms = {}):
#workload_dirpath = pubroot()+'/'
wdir = join(repodir(), wloadname)
createdir(wdir) #create workload-directory inside parsed folder
logfile = join(wdir, cmdname + "-log.txt")
pidfile = join(wdir, cmdname + ".pid")
print "pidfile:",pidfile
pid = os.fork()
if pid == 0:
# Child
os.setsid()
mypid = os.getpid()
# try:
# Flask(__name__).stop()
# except Exception as e:
# print "Error closing server socket:", e
with open(logfile, 'w', 1) as f:
# Redirect stdout and stderr to logfile
sys.stdout = sys.stderr = f
ret = 1
with open(pidfile, 'w') as f:
f.write(str(mypid))
try :
os.chdir(wdir)
ret = func(wdir, wloadname, cmdname, parms)
except Exception as e:
print "wlwizard fork_child: ", e
print "wlwizard fork_child: removing pid file" + join(wdir, cmdname + ".pid")
print "wlwizard: Workdir: ",wdir
os.remove(join(wdir, cmdname + ".pid"))
print "wlwizard: in child, exiting"
os._exit(ret)
# Parent
return 'Started. ' + urlize(join(wloadname, cmdname + "-log.txt"), \
"Click for details", True)
def dummy_work(wdir, wloadname, cmdname):
# Do the work of the child process here
createdir(join(wdir, "parsed"))#creating directory called parsed
print "IP-addrs:",ipaddrs
print "in child, sleeping"
time.sleep(10000)
return 0
def do_externalcmd(cmd):
#subprocs = set()
cmdstr = " ".join(cmd)
print cmdstr
signal.signal(signal.SIGINT, handle_signal)
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
proc = subprocess.Popen(cmd,shell=False, \
preexec_fn=os.setsid, \
close_fds=True, stdout=sys.stdout, stderr=sys.stderr)
subprocs.add(proc)
while proc.poll() is None:
print "wlwizard child: awaiting subprocess to complete ..."
proc.wait()
#signal_children(subprocs, signal.SIGINT)
print "wlwizard child: subprocess ended..."
return 0
def do_parse(wdir, wloadname, cmdname, parms):
rawfiles = glob.glob("raw/*.raw")
cfgfiles = glob.glob("raw/*[pP]rofile*.txt")
objfiles = glob.glob("raw/*obj*graph*.txt")
#try:
cmd = [cmdpath("processdump.pl"), "-o", "."]
if parms.get('compact') == 'on':
cmd.append("-compact")
# if parms.get('nocharts') == 'on':
# cmd.append("-nographs")
# #os._exit(0)
if cfgfiles:
profile = ["-cfg", ','.join(cfgfiles)]
cmd.extend(profile)
elif objfiles:
objgraph = ["-obj", ','.join(objfiles)]
cmd.extend(objgraph)
cmd.extend(rawfiles)
return do_externalcmd(cmd);
def do_capture(wdir, wloadname, cmdname, parms):
createdir(join(wdir, "raw")) # creating raw directory so
# this workload gets listed
cmd = [cmdpath("wlcollect.pl"), "-o", "raw"]
cmd.extend(parms['ipaddrs'])
return do_externalcmd(cmd)
def wlparse(parms):
wloadnames = parms.get('wlnames').split(',')
print "inside wlparse " + ",".join(wloadnames)
response = []
for w in wloadnames:
wdir = join(repodir(), w)
pidfile = join(wdir, "parse.pid")
if os.path.exists(pidfile):
response.append({ "wlname" : w,
"status" : "Parsing in progress; skipped." });
else:
resp = fork_work(w, "parse", do_parse, parms)
print "return:",resp
response.append({ "wlname" : w,
"status" : resp});
return response
def do_stop(w, cmdname, sig=signal.SIGINT):
response = []
pidfile = join(join(repodir(), w), cmdname + ".pid")
print "pid:",pidfile
if os.path.exists(pidfile):
with open(pidfile) as f:
pid = int(f.read())
print "Stopping workload " + cmdname + " of " + w + " (pid " + str(pid) + ") ..."
try:
os.kill(pid, sig)
#os.remove(pidfile)
response.append({ "wlname" : w,
"status" : cmdname + " stopped (process id " + str(pid) + ")"
});
except Exception as e:
print "Error: ", e
print "pidfile path:",pidfile
os.remove(pidfile)
else:
response.append({ "wlname" : w,
"status" : cmdname + " not running." });
return response
def wlcstop(args):
wloadnames = args.get('wlnames').split(',')
print "inside wlstop " + ",".join(wloadnames)
response = []
for w in wloadnames:
response.extend(do_stop(w, "replay"))
response.extend(do_stop(w, "capture"))
response.extend(do_stop(w, "parse"))
#print dumps(response,indent=4)
return response
def wldelete(args):
wloadnames = args.get('wlnames').split(',')
wlcstop(args)
response = []
for w in wloadnames:
print "inside wldelete " + w
wdir = join(repodir(), w)
try:
if os.path.exists(wdir):
print "deleting " + wdir
shutil.rmtree(wdir)
response.append({ "wlname" : w,
"status" : "Success" })
except Exception as e:
print "Error in rmtree " + wdir + ": ", e
response.append({ "wlname" : w, "status" : "Failed: " + str(e) })
#print dumps(response, indent=4)
return response
| gpl-3.0 | -2,206,379,886,968,884,200 | 31.280992 | 99 | 0.549283 | false |
gongghy/checkio_python | Home/The_Most_Wanted_Letter.py | 1 | 1140 | def checkio(text):
text = text.lower()
text = [letter for letter in text if letter.isalpha()]
d = dict.fromkeys(text, 0)
for char in text:
d[char] += 1
value = 0
for item in d.items():
if item[1] > value:
value = item[1]
lesser_keys = []
for item in d.items():
if item[1] < value:
lesser_keys.append(item[0])
for char in lesser_keys:
d.pop(char)
max_keys = list(d.keys())
max_keys.sort()
# replace this for solution
return max_keys[0]
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert checkio("Hello World!") == "l", "Hello test"
assert checkio("How do you do?") == "o", "O is most wanted"
assert checkio("One") == "e", "All letter only once."
assert checkio("Oops!") == "o", "Don't forget about lower case."
assert checkio("AAaooo!!!!") == "a", "Only letters."
assert checkio("abe") == "a", "The First."
print("Start the long test")
assert checkio("a" * 9000 + "b" * 1000) == "a", "Long."
print("The local tests are done.")
| mit | 6,739,730,774,419,538,000 | 34.625 | 85 | 0.566667 | false |
sassoftware/rbuild | plugins/buildplatform.py | 1 | 1623 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from rbuild import pluginapi
from rbuild.productstore.decorators import requiresProduct
from rbuild.pluginapi import command
class BuildPlatformCommand(command.BaseCommand):
help = 'Create a platform usable by others from this product'
#pylint: disable-msg=R0201,R0903
# could be a function, and too few public methods
def runCommand(self, handle, _, args):
# no allowed parameters
self.requireParameters(args)
handle.BuildPlatform.buildPlatform()
class BuildPlatform(pluginapi.Plugin):
name = 'buildplatform'
def initialize(self):
self.handle.Commands.getCommandClass('build').registerSubCommand(
'platform', BuildPlatformCommand)
@requiresProduct
def buildPlatform(self):
conaryClient = self.handle.facade.conary._getConaryClient()
self.handle.product.savePlatformToRepository(conaryClient)
self.handle.productStore.checkoutPlatform()
self.handle.ui.info('New platform definition created.')
| apache-2.0 | 2,555,375,464,297,706,000 | 34.282609 | 74 | 0.726433 | false |
holinnn/lupin | lupin/validators_combination.py | 1 | 2335 | from .validators import Validator
from .errors import ValidationError
class ValidatorsNullCombination(object):
"""Used as a default validators combination when a field has no
validators.
"""
def __call__(self, *args, **kwargs):
"""Null combination does nothing"""
def __and__(self, other):
return ValidatorsAndCombination([other])
def __or__(self, other):
return ValidatorsOrCombination([other])
class ValidatorsAndCombination(object):
"""Represents an & combination of validators.
It raise error if at least one validator is invalid.
"""
def __init__(self, validators):
"""
Args:
validators list<Validator>: a list of validator
"""
self._validators = validators
def __call__(self, *args, **kwargs):
for validator in self._validators:
validator(*args, **kwargs)
def __and__(self, other):
if isinstance(other, ValidatorsAndCombination):
self._validators.extend(other._validators)
return self
elif isinstance(other, Validator):
self._validators.append(other)
return self
return ValidatorsAndCombination([self, other])
def __or__(self, other):
return ValidatorsOrCombination([self, other])
class ValidatorsOrCombination(object):
"""Represents an | combination of validators.
It raise error if all validators are invalid.
"""
def __init__(self, validators):
"""
Args:
validators list<Validator>: a list of validator
"""
self._validators = validators
def __call__(self, *args, **kwargs):
error = None
for validator in self._validators:
try:
validator(*args, **kwargs)
return
except ValidationError as err:
error = err
raise error
def __and__(self, other):
return ValidatorsAndCombination([self, other])
def __or__(self, other):
if isinstance(other, ValidatorsOrCombination):
self._validators.extend(other._validators)
return self
elif isinstance(other, Validator):
self._validators.append(other)
return self
return ValidatorsOrCombination([self, other])
| mit | -41,909,928,816,466,660 | 27.47561 | 67 | 0.600428 | false |
cmars/pystdf | pystdf/IO.py | 1 | 7630 | #
# PySTDF - The Pythonic STDF Parser
# Copyright (C) 2006 Casey Marshall
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import sys
import struct
import re
from pystdf.Types import *
from pystdf import V4
from pystdf.Pipeline import DataSource
def appendFieldParser(fn, action):
"""Append a field parsing function to a record parsing function.
This is used to build record parsing functions based on the record type specification."""
def newRecordParser(*args):
fields = fn(*args)
try:
fields.append(action(*args))
except EndOfRecordException: pass
return fields
return newRecordParser
class Parser(DataSource):
def readAndUnpack(self, header, fmt):
size = struct.calcsize(fmt)
if (size > header.len):
self.inp.read(header.len)
header.len = 0
raise EndOfRecordException()
buf = self.inp.read(size)
if len(buf) == 0:
self.eof = 1
raise EofException()
header.len -= len(buf)
val,=struct.unpack(self.endian + fmt, buf)
if isinstance(val,bytes):
return val.decode("ascii")
else:
return val
def readAndUnpackDirect(self, fmt):
size = struct.calcsize(fmt)
buf = self.inp.read(size)
if len(buf) == 0:
self.eof = 1
raise EofException()
val,=struct.unpack(self.endian + fmt, buf)
return val
def readField(self, header, stdfFmt):
return self.readAndUnpack(header, packFormatMap[stdfFmt])
def readFieldDirect(self, stdfFmt):
return self.readAndUnpackDirect(packFormatMap[stdfFmt])
def readCn(self, header):
if header.len == 0:
raise EndOfRecordException()
slen = self.readField(header, "U1")
if slen > header.len:
self.inp.read(header.len)
header.len = 0
raise EndOfRecordException()
if slen == 0:
return ""
buf = self.inp.read(slen);
if len(buf) == 0:
self.eof = 1
raise EofException()
header.len -= len(buf)
val,=struct.unpack(str(slen) + "s", buf)
return val.decode("ascii")
def readBn(self, header):
blen = self.readField(header, "U1")
bn = []
for i in range(0, blen):
bn.append(self.readField(header, "B1"))
return bn
def readDn(self, header):
dbitlen = self.readField(header, "U2")
dlen = dbitlen / 8
if dbitlen % 8 > 0:
dlen+=1
dn = []
for i in range(0, int(dlen)):
dn.append(self.readField(header, "B1"))
return dn
def readVn(self, header):
vlen = self.readField(header, "U2")
vn = []
for i in range(0, vlen):
fldtype = self.readField(header, "B1")
if fldtype in self.vnMap:
vn.append(self.vnMap[fldtype](header))
return vn
def readArray(self, header, indexValue, stdfFmt):
if (stdfFmt == 'N1'):
self.readArray(header, indexValue/2+indexValue%2, 'U1')
return
arr = []
for i in range(int(indexValue)):
arr.append(self.unpackMap[stdfFmt](header, stdfFmt))
return arr
def readHeader(self):
hdr = RecordHeader()
hdr.len = self.readFieldDirect("U2")
hdr.typ = self.readFieldDirect("U1")
hdr.sub = self.readFieldDirect("U1")
return hdr
def __detectEndian(self):
self.eof = 0
header = self.readHeader()
if header.typ != 0 and header.sub != 10:
raise InitialSequenceException()
cpuType = self.readFieldDirect("U1")
if self.reopen_fn:
self.inp = self.reopen_fn()
else:
self.inp.seek(0)
if cpuType == 2:
return '<'
else:
return '>'
def header(self, header): pass
def parse_records(self, count=0):
i = 0
self.eof = 0
fields = None
try:
while self.eof==0:
header = self.readHeader()
self.header(header)
if (header.typ, header.sub) in self.recordMap:
recType = self.recordMap[(header.typ, header.sub)]
recParser = self.recordParsers[(header.typ, header.sub)]
fields = recParser(self, header, [])
if len(fields) < len(recType.columnNames):
fields += [None] * (len(recType.columnNames) - len(fields))
self.send((recType, fields))
else:
self.inp.read(header.len)
if count:
i += 1
if i >= count: break
except EofException: pass
def auto_detect_endian(self):
if self.inp.tell() == 0:
self.endian = '@'
self.endian = self.__detectEndian()
def parse(self, count=0):
self.begin()
try:
self.auto_detect_endian()
self.parse_records(count)
self.complete()
except Exception as exception:
self.cancel(exception)
raise
def getFieldParser(self, fieldType):
if (fieldType.startswith("k")):
fieldIndex, arrayFmt = re.match('k(\d+)([A-Z][a-z0-9]+)', fieldType).groups()
return lambda self, header, fields: self.readArray(header, fields[int(fieldIndex)], arrayFmt)
else:
parseFn = self.unpackMap[fieldType]
return lambda self, header, fields: parseFn(header, fieldType)
def createRecordParser(self, recType):
fn = lambda self, header, fields: fields
for stdfType in recType.fieldStdfTypes:
fn = appendFieldParser(fn, self.getFieldParser(stdfType))
return fn
def __init__(self, recTypes=V4.records, inp=sys.stdin, reopen_fn=None, endian=None):
DataSource.__init__(self, ['header']);
self.eof = 1
self.recTypes = set(recTypes)
self.inp = inp
self.reopen_fn = reopen_fn
self.endian = endian
self.recordMap = dict(
[ ( (recType.typ, recType.sub), recType )
for recType in recTypes ])
self.unpackMap = {
"C1": self.readField,
"B1": self.readField,
"U1": self.readField,
"U2": self.readField,
"U4": self.readField,
"U8": self.readField,
"I1": self.readField,
"I2": self.readField,
"I4": self.readField,
"I8": self.readField,
"R4": self.readField,
"R8": self.readField,
"Cn": lambda header, fmt: self.readCn(header),
"Bn": lambda header, fmt: self.readBn(header),
"Dn": lambda header, fmt: self.readDn(header),
"Vn": lambda header, fmt: self.readVn(header)
}
self.recordParsers = dict(
[ ( (recType.typ, recType.sub), self.createRecordParser(recType) )
for recType in recTypes ])
self.vnMap = {
0: lambda header: self.inp.read(header, 1),
1: lambda header: self.readField(header, "U1"),
2: lambda header: self.readField(header, "U2"),
3: lambda header: self.readField(header, "U4"),
4: lambda header: self.readField(header, "I1"),
5: lambda header: self.readField(header, "I2"),
6: lambda header: self.readField(header, "I4"),
7: lambda header: self.readField(header, "R4"),
8: lambda header: self.readField(header, "R8"),
10: lambda header: self.readCn(header),
11: lambda header: self.readBn(header),
12: lambda header: self.readDn(header),
13: lambda header: self.readField(header, "U1")
}
| gpl-2.0 | -556,588,033,002,586,940 | 29.277778 | 99 | 0.634076 | false |
sebastic/NLExtract | bag/src/postgresdb.py | 1 | 6307 | __author__ = "Matthijs van der Deijl"
__date__ = "$Dec 09, 2009 00:00:01 AM$"
"""
Naam: postgresdb.py
Omschrijving: Generieke functies voor databasegebruik binnen BAG Extract+
Auteur: Milo van der Linden, Just van den Broecke, Matthijs van der Deijl (originele versie)
Versie: 1.0
- Deze database klasse is vanaf heden specifiek voor postgres/postgis
Datum: 29 dec 2011
"""
try:
import psycopg2
except ImportError:
raise
from log import Log
from bagconfig import BAGConfig
class Database:
def __init__(self):
# Lees de configuratie uit globaal BAGConfig object
self.config = BAGConfig.config
self.connection = None
def initialiseer(self, bestand):
Log.log.info('Probeer te verbinden...')
self.verbind(True)
Log.log.info('database script uitvoeren...')
try:
script = open(bestand, 'r').read()
self.cursor.execute(script)
self.commit(True)
Log.log.info('script is uitgevoerd')
except psycopg2.DatabaseError as e:
Log.log.fatal("ik krijg deze fout '%s' uit het bestand '%s'" % (str(e), str(bestand)))
def verbind(self, initdb=False):
if self.connection and self.connection.closed == 0:
Log.log.debug("reusing db connection")
return
try:
# Connect using configured parameters
self.connection = psycopg2.connect(
database=self.config.database,
user=self.config.user,
host=self.config.host,
port=self.config.port,
password=self.config.password)
self.cursor = self.connection.cursor()
if initdb:
self.maak_schema()
self.zet_schema()
Log.log.debug("verbonden met de database '%s', schema '%s', connId=%d" % (self.config.database, self.config.schema, self.connection.fileno()))
except Exception as e:
raise (e)
def maak_schema(self):
# Public schema: no further action required
if self.config.schema != 'public':
# A specific schema is required create it and set the search path
self.uitvoeren('''DROP SCHEMA IF EXISTS %s CASCADE;''' % self.config.schema)
self.uitvoeren('''CREATE SCHEMA %s;''' % self.config.schema)
self.commit()
def zet_schema(self):
# Non-public schema set search path
if self.config.schema != 'public':
# Always set search path to our schema
self.uitvoeren('SET search_path TO %s,public' % self.config.schema)
# self.connection.close()
def has_log_actie(self, actie, bestand="n.v.t", error=False):
sql = "SELECT * FROM nlx_bag_log WHERE bestand = %s AND actie = %s AND error = %s"
parameters = (bestand, actie, error)
return self.tx_uitvoeren(sql, parameters)
def log_actie(self, actie, bestand="n.v.t", bericht='geen', error=False):
sql = "INSERT INTO nlx_bag_log(actie, bestand, error, bericht) VALUES (%s, %s, %s, %s)"
parameters = (actie, bestand, error, bericht)
self.tx_uitvoeren(sql, parameters)
def log_meta(self, sleutel, waarde, replace=True):
if replace:
sql = "DELETE FROM nlx_bag_info WHERE sleutel = '%s'" % sleutel
self.tx_uitvoeren(sql)
sql = "INSERT INTO nlx_bag_info(sleutel, waarde) VALUES (%s, %s)"
parameters = (sleutel, waarde)
self.tx_uitvoeren(sql, parameters)
def uitvoeren(self, sql, parameters=None):
try:
if parameters:
self.cursor.execute(sql, parameters)
else:
self.cursor.execute(sql)
# Log.log.debug(self.cursor.statusmessage)
except Exception as e:
Log.log.error("fout %s voor query: %s met parameters %s" % (str(e), str(sql), str(parameters)) )
self.log_actie("uitvoeren_db", "n.v.t", "fout=%s" % str(e), True)
raise
return self.cursor.rowcount
def select(self, sql):
self.verbind()
try:
self.cursor.execute(sql)
rows = self.cursor.fetchall()
self.connection.commit()
return rows
except (psycopg2.Error,), foutmelding:
Log.log.error("*** FOUT *** Kan SQL-statement '%s' niet uitvoeren:\n %s" %(sql, foutmelding))
return []
def file_uitvoeren(self, sqlfile):
self.e = None
try:
Log.log.info("SQL van file = %s uitvoeren..." % sqlfile)
self.verbind()
f = open(sqlfile, 'r')
sql = f.read()
self.uitvoeren(sql)
self.commit(True)
f.close()
Log.log.info("SQL uitgevoerd OK")
except Exception as e:
self.e = e
self.log_actie("uitvoeren_db_file", "n.v.t", "fout=%s" % str(e), True)
Log.log.fatal("ik kan dit script niet uitvoeren vanwege deze fout: %s" % (str(e)))
def tx_uitvoeren(self, sql, parameters=None):
self.e = None
try:
self.verbind()
self.uitvoeren(sql, parameters)
self.commit()
# Log.log.debug(self.cursor.statusmessage)
except Exception as e:
self.e = e
Log.log.error("fout %s voor tx_uitvoeren: %s met parameters %s" % (str(e), str(sql), str(parameters)))
self.close()
return self.cursor.rowcount
def commit(self, close=False):
try:
self.connection.commit()
Log.log.debug("database commit ok connId=%d" % self.connection.fileno())
except Exception as e:
self.e = e
Log.log.error("fout in commit aktie: %s" % str(e))
finally:
if close:
self.close()
def close(self):
try:
connId = self.connection.fileno()
self.connection.close()
Log.log.debug("database connectie %d gesloten" % connId)
except Exception as e:
Log.log.error("fout in close aktie: %s" % str(e))
finally:
self.cursor = None
self.connection = None
| gpl-3.0 | 7,929,668,450,521,569,000 | 35.04 | 154 | 0.562232 | false |
lig/django-actionviews | tests/base/test_views.py | 1 | 7169 | from django.conf.urls import patterns
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import resolve
import pytest
from actionviews.base import TemplateResponseMixin
from actionviews.decorators import require_method
@pytest.fixture(params=list(range(1)))
def TestView(request, monkeypatch):
from actionviews.base import View
class TestView(View):
def do_index(self:''):
return {'result': 'test'}
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {'urlpatterns': patterns('', *TestView.urls)}))
return [TestView][request.param]
@pytest.fixture
def django_request(request_factory):
return request_factory.get('/')
@pytest.fixture
def django_request_post(request_factory):
return request_factory.post('/')
@pytest.fixture
def django_request_options(request_factory):
return request_factory.options('/')
def test_view(TestView, django_request):
class TestGetView(TestView):
def get(self, request):
return self.action()
view = TestGetView.urls[0].callback
response = view(django_request)
assert response == {'result': 'test'}
def test_decorated_action_on_view(TestView, django_request):
def test_decorator(func):
func.is_decorated = True
return func
class TestGetView(TestView):
def get(self, request):
assert self.action.is_decorated
@test_decorator
def do_index(self):
return {'result': 'test'}
view = TestGetView.urls[0].callback
view(django_request)
def test_default_template_name(TestView, django_request):
class TestGetView(TestView, TemplateResponseMixin):
def get(self, request):
assert self.get_template_names() == ['TestGetView/index.html']
view = TestGetView.urls[0].callback
view(django_request)
def test_template_view(django_request, monkeypatch):
from actionviews.base import TemplateView
class TestTemplateView(TemplateView):
def do_index(self:''):
return {'result': 'test'}
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *TestTemplateView.urls)}))
view = TestTemplateView.urls[0].callback
response = view(django_request)
assert response.rendered_content == 'test'
def test_method_allowed(TestView, django_request_post, monkeypatch):
from actionviews.base import TemplateView
class TestPostView(TemplateView):
@require_method('post')
def do_index(self:''):
return {'result': 'test'}
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *TestPostView.urls)}))
view = TestPostView.urls[0].callback
response = view(django_request_post)
assert response.status_code == 200
assert response.rendered_content == 'test'
def test_method_not_allowed(django_request, monkeypatch):
from actionviews.base import TemplateView
class TestPostView(TemplateView):
@require_method('post')
def do_index(self:''):
return {'result': 'test'}
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *TestPostView.urls)}))
view = TestPostView.urls[0].callback
response = view(django_request)
assert response.status_code == 405
def test_options_method(TestView, django_request_options):
view = TestView.urls[0].callback
response = view(django_request_options)
assert response.status_code == 200
assert response['Allow'] == 'OPTIONS'
assert response['Content-Length'] == '0'
def test_child(monkeypatch, django_request):
from actionviews.base import View, TemplateView
from actionviews.decorators import child_view
class ChildView(TemplateView):
def do_index(self:''):
return {'result': 'test'}
class ParentView(View):
@child_view(ChildView)
def do_index(self:''):
pass
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *ParentView.urls)}))
view = resolve('/').func
response = view(django_request)
assert response.rendered_content == 'test'
def test_child_defaults_for_parent(monkeypatch, request_factory):
from actionviews.base import View, TemplateView
from actionviews.decorators import child_view
class ChildView(TemplateView):
def do_index(self):
return {}
class ParentView(View):
@child_view(ChildView)
def do_pindex(self, result='test'):
return {'result': result}
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *ParentView.urls)}))
resolver_match = resolve('/pindex/result/test/index/')
response = resolver_match.func(
request_factory.get('/pindex/result/test/index/'),
**resolver_match.kwargs)
assert response.rendered_content == 'test'
def test_raise_response_from_action(django_request, monkeypatch):
from django.http.response import HttpResponse
from actionviews.base import TemplateView
from actionviews.exceptions import ActionResponse
class TestView(TemplateView):
def do_index(self:''):
raise ActionResponse(HttpResponse())
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *TestView.urls)}))
view = resolve('/').func
response = view(django_request)
assert response.status_code == 200
def test_raise_non_response_from_action(django_request, monkeypatch):
from actionviews.base import TemplateView
from actionviews.exceptions import ActionResponse
class TestView(TemplateView):
def do_index(self:''):
raise ActionResponse({})
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *TestView.urls)}))
view = resolve('/').func
with pytest.raises(ImproperlyConfigured):
view(django_request)
def test_return_response_from_action(django_request, monkeypatch):
from django.http.response import HttpResponse
from actionviews.base import TemplateView
class TestView(TemplateView):
def do_index(self:''):
return HttpResponse()
monkeypatch.setattr(
'django.core.urlresolvers.get_urlconf',
lambda: type(
'urlconf', (), {
'urlpatterns': patterns('', *TestView.urls)}))
view = resolve('/').func
response = view(django_request)
assert response.status_code == 200
| bsd-3-clause | -6,750,235,519,553,566,000 | 25.650558 | 74 | 0.644302 | false |
zstackio/zstack-woodpecker | integrationtest/vm/mini/multiclusters/paths/multi_path212.py | 1 | 2650 | import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[
[TestAction.create_mini_vm, 'vm1', 'cluster=cluster2'],
[TestAction.change_vm_ha, 'vm1'],
[TestAction.create_mini_vm, 'vm2', 'cluster=cluster1'],
[TestAction.create_volume, 'volume1', 'cluster=cluster1', 'flag=scsi'],
[TestAction.attach_volume, 'vm2', 'volume1'],
[TestAction.create_volume_backup, 'volume1', 'volume1-backup1'],
[TestAction.create_mini_vm, 'vm3', 'cluster=cluster2', 'flag=thin'],
[TestAction.resize_volume, 'vm1', 5*1024*1024],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.create_volume, 'volume2', 'cluster=cluster2', 'flag=scsi'],
[TestAction.attach_volume, 'vm3', 'volume2'],
[TestAction.create_volume, 'volume3', 'cluster=cluster1', 'flag=thin,scsi'],
[TestAction.add_image, 'image1', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.create_volume, 'volume4', 'cluster=cluster2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume4'],
[TestAction.create_volume_backup, 'volume4', 'volume4-backup2'],
[TestAction.delete_volume_backup, 'volume4-backup2'],
[TestAction.delete_image, 'image1'],
[TestAction.recover_image, 'image1'],
[TestAction.delete_image, 'image1'],
[TestAction.expunge_image, 'image1'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup3'],
[TestAction.create_mini_vm, 'vm4', 'cluster=cluster1'],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.resize_data_volume, 'volume3', 5*1024*1024],
[TestAction.detach_volume, 'volume1'],
[TestAction.delete_volume, 'volume3'],
[TestAction.recover_volume, 'volume3'],
[TestAction.delete_vm_backup, 'vm1-backup3'],
[TestAction.change_vm_ha, 'vm1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.delete_volume, 'volume4'],
[TestAction.expunge_volume, 'volume4'],
[TestAction.create_mini_vm, 'vm5', 'data_volume=true', 'cluster=cluster1'],
[TestAction.attach_volume, 'vm5', 'volume1'],
[TestAction.create_volume_backup, 'volume1', 'volume1-backup5'],
[TestAction.create_image_from_volume, 'vm2', 'vm2-image2'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.delete_volume_backup, 'volume1-backup5'],
])
'''
The final status:
Running:['vm5']
Stopped:['vm2', 'vm4', 'vm1', 'vm3']
Enadbled:['volume1-backup1', 'vm2-image2']
attached:['volume2', 'auto-volume5', 'volume1']
Detached:['volume3']
Deleted:['volume4-backup2', 'vm1-backup3', 'volume4-backup3', 'volume1-backup5']
Expunged:['volume4', 'image1']
Ha:[]
Group:
''' | apache-2.0 | -6,065,373,242,281,944,000 | 41.758065 | 104 | 0.700377 | false |
pdl30/rnaseq_misc | discarded_splicing.py | 1 | 6194 | #!/usr/bin/python
########################################################################
# 12 Jan 2015
# Patrick Lombard, Centre for Stem Stem Research
# Core Bioinformatics Group
# University of Cambridge
# All right reserved.
########################################################################
import subprocess
import sys, re, os
import argparse
import ConfigParser
from multiprocessing import Pool
import itertools
def ConfigSectionMap(section, Config):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
def seqgsea_count(sample, path, gtf, paired, orientation):
bam_name = os.path.basename(sample)
output = re.sub(".bam$", "_seqgsea.count", bam_name)
if paired:
p = "yes"
else:
p = "no"
command = "python {} -b yes -p {} -s {} {} {} {}".format(path, p, orientation, gtf, sample, output)
subprocess.call(command.split())
def run_seqgsea(conditions, comp1, comp2):
#Current problem is that this takes fecking ages
rscript = 'library("SeqGSEA")\n'
rscript += "pdata <- read.table('tmp_design.txt', header=T)\n"
rscript += "counts1 <- pdata[which(pdata[,2] == '{}'),1]\n".format(comp1)
rscript += "counts2 <- pdata[which(pdata[,2] == '{}'),1]\n".format(comp2)
rscript += "RCS <- loadExonCountData(as.character(counts1), as.character(counts2))\n"
rscript += "RCS <- exonTestability(RCS, cutoff=5)\n"
rscript += "geneTestable <- geneTestability(RCS)\n"
rscript += "RCS <- subsetByGenes(RCS, unique(geneID(RCS))[ geneTestable ])\n"
rscript += "geneIDs <- unique(geneID(RCS))\n"
rscript += "RCS <- estiExonNBstat(RCS)\n"
rscript += "RCS <- estiGeneNBstat(RCS)\n"
rscript += "perm.times <- 1000\n"
rscript += "permuteMat <- genpermuteMat(RCS, times=perm.times)\n"
rscript += "RCS <- DSpermute4GSEA(RCS, permuteMat)\n"
def seqgsea_count_fun(args):
return seqgsea_count(*args)
def spliceR(idir, gtf, genome):
#Uses cuffdiff directories. Not working currently, giving errors
rscript = "library(spliceR)\n"
rscript += "cuffDB <- readCufflinks(dir={},gtf={},genome='{}')\n"
rscript += "cuffDB_spliceR <- prepareCuff(cuffDB)\n"
rscript += "myTranscripts <- transcripts(cuffDB_spliceR); myExons <- exons(cuffDB_spliceR); conditions(cuffDB_spliceR)\n"
#rscript += "cuffDB_spliceR_filtered <- preSpliceRFilter(cuffDB_spliceR,filters=c('expressedIso', 'isoOK', 'expressedGenes', 'geneOK'))\n"
rscript += "mySpliceRList <- spliceR(cuffDB_spliceR, compareTo='preTranscript', filters=c('expressedGenes','geneOK', 'isoOK', 'expressedIso', 'isoClass'))\n"
rscript += "ucscCDS <- getCDS(selectedGenome='hg19', repoName='UCSC'); require('BSgenome.Hsapiens.UCSC.hg19', character.only = TRUE)\n"
rscript += "PTCSpliceRList <- annotatePTC(cuffDB_spliceR, cds=ucscCDS, Hsapiens, PTCDistance=50)\n"
rscript += "generateGTF(mySpliceRList, filters=c('geneOK', 'isoOK', 'expressedGenes', 'expressedIso'), scoreMethod='local', useProgressBar=F)\n"
rscript += "mySpliceRList <- spliceRPlot(mySpliceRList, evaluate='nr_transcript_pr_gene')\n"
rscript += "mySpliceRList <- spliceRPlot(mySpliceRList, evaluate='mean_AS_transcript', asType='ESI')\n"
return rscript
def create_design_for_R(idict):
output = open("tmp_design.txt", "w")
output.write("sampleName\tfileName\tcondition\n"),
for key in sorted(idict.keys()):
bam_name = os.path.basename(sample)
name = re.sub(".bam$", "", bam_name)
count = re.sub(".bam$", "_dexseq.count", bam_name)
output.write("{}\t{}\t{}\n".format(name, count, idict[key]))
output.close()
def reverse_dict(idict):
inv_map = {}
for k, v in idict.iteritems():
inv_map[v] = inv_map.get(v, [])
inv_map[v].append(k)
return inv_map
def main():
parser = argparse.ArgumentParser(description='Overview of a few programs for Splicing analysis\n')
subparsers = parser.add_subparsers(help='Programs included',dest="subparser_name")
splice_parser = subparsers.add_parser('spliceR', help="Runs spliceR")
splice_parser.add_argument('-c','--config', help='Config file containing cuffdiff directories as keys', required=True)
splice_parser.add_argument('-g','--gtf', help='GTF file formatted by spliceR', required=True)
splice_parser.add_argument('-o','--output', help='Output directory', required=True)
seq_parser = subparsers.add_parser('seqGSEA', help="Runs seqGSEA")
seq_parser.add_argument('-c','--config', help='Config file containing bam files, please see documentation for usage!', required=True)
seq_parser.add_argument('-g','--gtf', help='GTF file formatted by prepare_exon_annotation_ensembl.py script', required=True)
seq_parser.add_argument('-t','--threads', help='threads, default=1', default=1, required=False)
seq_parser.add_argument('-p', action='store_true', help='Use if samples are paired end. Will find sd and insert size for bam files', required=False)
seq_parser.add_argument('-o','--orientation', help='Options are yes, no or reverse. Test First!!!', required=True)
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = vars(parser.parse_args())
Config = ConfigParser.ConfigParser()
Config.optionxform = str
Config.read(args["config"])
conditions = ConfigSectionMap("Conditions", Config)
if args["subparser_name"] == "spliceR": #Not even close to finished
for key in conditions:
spliceR(key, args["gtf"], args["output"])
elif args["subparser_name"] == "seqGSEA":
path = "/raid/home/patrick/R/x86_64-pc-linux-gnu-library/3.1/SeqGSEA/extscripts"
count_program = path + "/count_in_exons.py"
pool = Pool(int(args["threads"]))
pool.map(seqgsea_count_fun, itertools.izip(list(conditions.keys()), itertools.repeat(count_program), itertools.repeat(args["gtf"]), itertools.repeat(args["p"]),
itertools.repeat(args["orientation"]))) ##Running annotation in parallel
pool.close()
pool.join()
comparisons = ConfigSectionMap("Comparisons", Config)
for comp in comparisons:
c = comparisons[comp].split(",")
comps = [x.strip(' ') for x in c]
rscipt = run_seqgsea(conditions, comps[0], comps[1])
run_rcode(rscript, "dexseq.R")
main()
| gpl-2.0 | 4,559,426,243,221,062,700 | 43.884058 | 162 | 0.685986 | false |
dmsurti/mayavi | tvtk/pyface/ui/wx/decorated_scene.py | 1 | 11998 | #------------------------------------------------------------------------------
#
# Copyright (c) 2006, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Authors: Prabhu Ramachandran <[email protected]>,
# Dave Peterson <[email protected]>
#
#------------------------------------------------------------------------------
""" A VTK interactor scene which provides a convenient toolbar that allows the
user to set the camera view, turn on the axes indicator, etc.
"""
# System imports.
from os.path import dirname
import wx
# Enthought library imports.
from pyface.api import ImageResource, FileDialog, OK
from pyface.action.api import ToolBarManager, Group, Action
from tvtk.api import tvtk
from traits.api import Instance, false, List, Either
# Local imports.
from .scene import Scene
###########################################################################
# 'DecoratedScene' class
###########################################################################
class DecoratedScene(Scene):
"""A VTK interactor scene which provides a convenient toolbar that
allows the user to set the camera view, turn on the axes indicator
etc.
"""
#######################################################################
# Traits
#######################################################################
if hasattr(tvtk, 'OrientationMarkerWidget'):
# The tvtk orientation marker widget. This only exists in VTK
# 5.x.
marker = Instance(tvtk.OrientationMarkerWidget, ())
# The tvtk axes that will be shown for the orientation.
axes = Instance(tvtk.AxesActor, ())
else:
marker = None
axes = None
# Determine if the orientation axis is shown or not.
show_axes = false
# The list of actions represented in the toolbar
actions = List(Either(Action, Group))
##########################################################################
# `object` interface
##########################################################################
def __init__(self, parent, **traits):
super(DecoratedScene, self).__init__(parent, **traits)
self._setup_axes_marker()
def __get_pure_state__(self):
"""Allows us to pickle the scene."""
# The control attribute is not picklable since it is a VTK
# object so we remove it.
d = super(DecoratedScene, self).__get_pure_state__()
for x in ['_content', '_panel', '_sizer', '_tool_bar', 'actions']:
d.pop(x, None)
return d
##########################################################################
# Non-public interface.
##########################################################################
def _create_control(self, parent):
""" Create the toolkit-specific control that represents the widget.
Overridden to wrap the Scene control within a panel that
also contains a toolbar.
"""
# Create a panel as a wrapper of the scene toolkit control. This
# allows us to also add additional controls.
self._panel = wx.Panel(parent, -1, style=wx.CLIP_CHILDREN)
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._panel.SetSizer(self._sizer)
# Add our toolbar to the panel.
tbm = self._get_tool_bar_manager()
self._tool_bar = tbm.create_tool_bar(self._panel)
self._sizer.Add(self._tool_bar, 0, wx.EXPAND)
# Create the actual scene content
self._content = super(DecoratedScene, self)._create_control(
self._panel)
self._sizer.Add(self._content, 1, wx.EXPAND)
# Ensure the child controls are laid-out.
self._sizer.Layout()
return self._panel
def _setup_axes_marker(self):
axes = self.axes
if axes is None:
# For VTK versions < 5.0.
return
axes.set(
normalized_tip_length=(0.4, 0.4, 0.4),
normalized_shaft_length=(0.6, 0.6, 0.6),
shaft_type='cylinder'
)
p = axes.x_axis_caption_actor2d.caption_text_property
axes.y_axis_caption_actor2d.caption_text_property = p
axes.z_axis_caption_actor2d.caption_text_property = p
p.set(color=(1,1,1), shadow=False, italic=False)
self._background_changed(self.background)
self.marker.set(key_press_activation=False)
self.marker.orientation_marker = axes
def _get_tool_bar_manager(self):
""" Returns the tool_bar_manager for this scene.
"""
tbm = ToolBarManager( *self.actions )
return tbm
def _get_image_path(self):
"""Returns the directory which contains the images used by the
toolbar."""
# So that we can find the images.
import tvtk.pyface.api
return dirname(tvtk.pyface.api.__file__)
def _toggle_projection(self):
""" Toggle between perspective and parallel projection, this
is used for the toolbar.
"""
if self._panel is not None:
self.parallel_projection = not self.parallel_projection
def _toggle_axes(self):
"""Used by the toolbar to turn on/off the axes indicator.
"""
if self._panel is not None:
self.show_axes = not self.show_axes
def _save_snapshot(self):
"""Invoked by the toolbar menu to save a snapshot of the scene
to an image. Note that the extension of the filename
determines what image type is saved. The default is PNG.
"""
if self._panel is not None:
wildcard = "PNG images (*.png)|*.png|Determine by extension (*.*)|*.*"
dialog = FileDialog(
parent = self._panel,
title = 'Save scene to image',
action = 'save as',
default_filename = "snapshot.png",
wildcard = wildcard
)
if dialog.open() == OK:
# The extension of the path will determine the actual
# image type saved.
self.save(dialog.path)
def _configure_scene(self):
"""Invoked when the toolbar icon for configuration is clicked.
"""
self.edit_traits()
######################################################################
# Trait handlers.
######################################################################
def _show_axes_changed(self):
marker = self.marker
if (self._vtk_control is not None) and (marker is not None):
if not self.show_axes:
marker.interactor = None
marker.enabled = False
else:
marker.interactor = self.interactor
marker.enabled = True
self.render()
def _background_changed(self, value):
# Depending on the background, this sets the axes text and
# outline color to something that should be visible.
axes = self.axes
if (self._vtk_control is not None) and (axes is not None):
p = self.axes.x_axis_caption_actor2d.caption_text_property
m = self.marker
s = value[0] + value[1] + value[2]
if s <= 1.0:
p.color = (1,1,1)
m.set_outline_color(1,1,1)
else:
p.color = (0,0,0)
m.set_outline_color(0,0,0)
self.render()
def _actions_default(self):
return [
Group(
Action(
image = ImageResource('16x16/x-axis',
search_path = [self._get_image_path()],
),
tooltip = "View along the -X axis",
on_perform = self.x_minus_view,
),
Action(
image = ImageResource('16x16/x-axis',
search_path = [self._get_image_path()],
),
tooltip = "View along the +X axis",
on_perform = self.x_plus_view,
),
Action(
image = ImageResource('16x16/y-axis',
search_path = [self._get_image_path()],
),
tooltip = "View along the -Y axis",
on_perform = self.y_minus_view,
),
Action(
image = ImageResource('16x16/y-axis',
search_path = [self._get_image_path()],
),
tooltip = "View along the +Y axis",
on_perform = self.y_plus_view,
),
Action(
image = ImageResource('16x16/z-axis',
search_path = [self._get_image_path()],
),
tooltip = "View along the -Z axis",
on_perform = self.z_minus_view,
),
Action(
image = ImageResource('16x16/z-axis',
search_path = [self._get_image_path()],
),
tooltip = "View along the +Z axis",
on_perform = self.z_plus_view,
),
Action(
image = ImageResource('16x16/isometric',
search_path = [self._get_image_path()],
),
tooltip = "Obtain an isometric view",
on_perform = self.isometric_view,
),
),
Group(
Action(
image = ImageResource('16x16/parallel',
search_path = [self._get_image_path()],
),
tooltip = 'Toggle parallel projection',
style="toggle",
on_perform = self._toggle_projection,
checked = self.parallel_projection,
),
Action(
image = ImageResource('16x16/origin_glyph',
search_path = [self._get_image_path()],
),
tooltip = 'Toggle axes indicator',
style="toggle",
enabled=(self.marker is not None),
on_perform = self._toggle_axes,
checked = self.show_axes,
),
Action(
image = ImageResource('16x16/fullscreen',
search_path = [self._get_image_path()],
),
tooltip = 'Full Screen (press "q" or "e" or ESC to exit fullscreen)',
style="push",
on_perform = self._full_screen_fired,
),
),
Group(
Action(
image = ImageResource('16x16/save',
search_path = [self._get_image_path()],
),
tooltip = "Save a snapshot of this scene",
on_perform = self._save_snapshot,
),
Action(
image = ImageResource('16x16/configure',
search_path = [self._get_image_path()],
),
tooltip = 'Configure the scene',
style="push",
on_perform = self._configure_scene,
),
),
]
| bsd-3-clause | 49,747,729,586,761,630 | 37.210191 | 89 | 0.471245 | false |
bakhtout/odoo-educ | addons/openeducat_erp/op_subject/__init__.py | 1 | 1083 | # -*- coding: utf-8 -*-
#/#############################################################################
#
# Tech-Receptives Solutions Pvt. Ltd.
# Copyright (C) 2004-TODAY Tech-Receptives(<http://www.tech-receptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#/#############################################################################
import op_subject
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,839,420,797,204,608,000 | 44.125 | 80 | 0.617729 | false |
yati-sagade/RyDyrect | settings.py | 1 | 1727 | # Initialize App Engine and import the default settings (DB backend, etc.).
# If you want to use a different backend you have to remove all occurences
# of "djangoappengine" from this file.
from djangoappengine.settings_base import *
import os
import people
# Activate django-dbindexer for the default database
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}
AUTOLOAD_SITECONF = 'indexes'
#My conf
MY_EMAIL = '[email protected]'
MY_NAME = 'Yati Sagade'
#End my conf
SECRET_KEY = '=r-$b*8hglm+858&9t043hlm6-&6-3d3vfc4((7yd0dbrakhvi'
INSTALLED_APPS = (
# 'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'djangotoolbox',
'autoload',
'dbindexer',
'people',
# djangoappengine should come last, so it can override a few manage.py commands
'djangoappengine',
)
MIDDLEWARE_CLASSES = (
# This loads the index definitions, so it has to come first
'autoload.middleware.AutoloadMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.media',
)
# This test runner captures stdout and associates tracebacks with their
# corresponding output. Helps a lot with print-debugging.
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
ADMIN_MEDIA_PREFIX = '/media/admin/'
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
ROOT_URLCONF = 'urls'
| bsd-3-clause | 59,960,683,604,811,500 | 31.584906 | 83 | 0.734221 | false |
avih/treeherder | deployment/update/update.py | 1 | 5356 | """
Deploy this project in stage/production.
Requires commander_ which is installed on the systems that need it.
.. _commander: https://github.com/oremj/commander
"""
import os
import requests
import sys
from commander.deploy import hostgroups, task
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import commander_settings as settings # noqa
env_file = os.path.join(settings.SRC_DIR, 'treeherder-env.sh')
th_service_src = os.path.join(settings.SRC_DIR, 'treeherder-service')
is_prod = 'treeherder.mozilla.org' in settings.SRC_DIR
def run_local_with_env(ctx, cmd):
# For commands run from the admin node, we have to manually set the environment
# variables, since the admin node is shared by both stage and prod.
ctx.local("source {} && {}".format(env_file, cmd))
@task
def pre_update(ctx, ref=settings.UPDATE_REF):
# Update the code to a specific git reference (branch/tag/sha) and write
# info about the current repository state to a publicly visible file.
with ctx.lcd(th_service_src):
ctx.local('git fetch --quiet origin %s' % ref)
ctx.local('git reset --hard FETCH_HEAD')
ctx.local('find . -type f -name "*.pyc" -delete')
ctx.local('git status -s')
@task
def update(ctx):
# Create/populate a virtualenv that will be rsynced later along with the source.
with ctx.lcd(settings.SRC_DIR):
activate_script = os.path.join(settings.SRC_DIR, 'venv', 'bin', 'activate_this.py')
# Peep doesn't yet cache downloaded files, so we reuse the virtualenv to speed up deploys.
if not os.path.exists(activate_script):
ctx.local('virtualenv --python=python2.7 venv')
# Activate virtualenv.
execfile(activate_script, dict(__file__=activate_script))
# Install requirements using peep, so hashes are verified.
with ctx.lcd(th_service_src):
ctx.local('python2.7 bin/peep.py install -r requirements/common.txt')
# Make the virtualenv relocatable since paths are hard-coded by default.
ctx.local('virtualenv --relocatable venv')
# Fix lib64 symlink to be relative instead of absolute.
with ctx.lcd('venv'):
ctx.local('rm -f lib64')
ctx.local('ln -s lib lib64')
with ctx.lcd(th_service_src):
# Install nodejs non-dev packages, needed for the grunt build.
ctx.local("npm install --production")
# Generate the UI assets in the `dist/` directory.
ctx.local("./node_modules/.bin/grunt build --production")
# Make the current Git revision accessible at <site-root>/revision.txt
ctx.local("git rev-parse HEAD > dist/revision.txt")
# Generate gzipped versions of files that would benefit from compression, that
# WhiteNoise can then serve in preference to the originals. This is required
# since WhiteNoise's Django storage backend only gzips assets handled by
# collectstatic, and so does not affect files in the `dist/` directory.
ctx.local("python2.7 -m whitenoise.gzip dist")
# Collect the static files (eg for the Persona or Django admin UI)
run_local_with_env(ctx, "python2.7 manage.py collectstatic --noinput")
# Update the database schema, if necessary.
run_local_with_env(ctx, "python2.7 manage.py migrate --noinput")
# Update reference data & tasks config from the in-repo fixtures.
run_local_with_env(ctx, "python2.7 manage.py load_initial_data")
# Populate the datasource table and create the connected databases.
run_local_with_env(ctx, "python2.7 manage.py init_datasources")
@task
def deploy(ctx):
# Use the local, IT-written deploy script to check in changes.
ctx.local(settings.DEPLOY_SCRIPT)
# Rsync the updated code to the nodes & restart processes. These are
# separated out into their own functions, since the IRC bot output includes
# the task function name which is useful given how long these steps take.
deploy_rabbit()
deploy_web_app()
deploy_etl()
deploy_log()
ping_newrelic()
@task
def deploy_rabbit(ctx):
deploy_nodes(ctx, settings.RABBIT_HOSTGROUP, 'rabbit')
@task
def deploy_web_app(ctx):
deploy_nodes(ctx, settings.WEB_HOSTGROUP, 'web')
@task
def deploy_etl(ctx):
deploy_nodes(ctx, settings.ETL_HOSTGROUP, 'etl')
@task
def deploy_log(ctx):
deploy_nodes(ctx, settings.LOG_HOSTGROUP, 'log')
def deploy_nodes(ctx, hostgroup, node_type):
# Run the remote update script on each node in the specified hostgroup.
@hostgroups(hostgroup, remote_kwargs={'ssh_key': settings.SSH_KEY})
def rsync_code(ctx):
ctx.remote(settings.REMOTE_UPDATE_SCRIPT)
rsync_code()
env_flag = '-p' if is_prod else '-s'
ctx.local('/root/bin/restart-jobs %s %s' % (env_flag, node_type))
@task
def ping_newrelic(ctx):
data = {
'deployment[application_id]': settings.NEW_RELIC_APP_ID,
'deployment[user]': 'Chief',
}
headers = {'x-api-key': settings.NEW_RELIC_API_KEY}
r = requests.post('https://api.newrelic.com/deployments.xml',
data=data, headers=headers, timeout=30)
try:
r.raise_for_status()
except requests.exceptions.HTTPError:
print("HTTPError {} notifying New Relic: {}".format(r.status_code, r.text))
raise
| mpl-2.0 | -1,774,610,298,137,748,000 | 36.985816 | 98 | 0.673824 | false |
Lysovenko/wxRays | addons.py | 1 | 11809 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"This is Plugin Manager"
# wxRays (C) 2013 Serhii Lysovenko
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from __future__ import print_function
from sys import modules, path
from imp import find_module, load_module
from os import listdir
from os.path import dirname, realpath, split, splitext, join, isfile, \
isabs, normpath
import wx
from wx.lib.mixins.listctrl import CheckListCtrlMixin
from sys import maxint
import wx.lib.rcsizer as rcs
class Addons:
def __init__(self):
"searches and reads addons descriptions files"
pth1 = dirname(realpath(__file__))
path.append(pth1)
pth2 = APP_SETT.get_home()
adds = []
# find *.addon files
for pth in (pth1, pth2):
dir_lst = [i for i in listdir(pth) if i.endswith('.addon')]
dir_lst.sort()
appr = [join(pth, i) for i in dir_lst]
# ensure that path is not directory or broken link
adds += [i for i in appr if isfile(i)]
descrs = []
found_ids = set()
for adf in adds:
add_descr = {}
# scanning *.addon file
with open(adf) as fp:
for line in fp:
ls = line.split('=', 1)
if len(ls) != 2:
continue
add_descr[ls[0]] = unicode(ls[1].strip(), 'utf-8')
# validating the result of scanning
is_valid = True
for i in ('path', 'name', 'id'):
if i not in add_descr:
is_valid = False
break
if not is_valid:
continue
pth = add_descr['path']
if not isabs(pth):
pth = normpath(join(dirname(adf), pth))
add_descr['path'] = pth
if not isfile(pth):
continue
d_id = add_descr['id']
if d_id.isdigit():
d_id = int(d_id)
add_descr['id'] = d_id
if d_id in found_ids:
continue
add_descr['keys'] = set(add_descr.get('keys', '').split())
found_ids.add(d_id)
descrs.append(add_descr)
self.descriptions = descrs
def set_active(self, id_set=None):
if id_set is None:
id_set = APP_SETT.get("addons_ids", "set()")
id_set = eval(id_set)
for desc in self.descriptions:
desc['isactive'] = desc['id'] in id_set
def get_active(self, wgs=True):
id_set = set()
for desc in self.descriptions:
if desc['isactive']:
id_set.add(desc['id'])
if wgs:
APP_SETT.set("addons_ids", repr(id_set))
return id_set
def introduce(self, adds_dat):
"modules loader"
any_error = False
for desc in self.descriptions:
if desc['isactive'] and 'module' not in desc:
pth, nam = split(splitext(desc['path'])[0])
try:
fptr, pth, dsc = find_module(nam, [pth])
module = load_module(nam, fptr, pth, dsc)
except ImportError:
desc['isactive'] = False
any_error = True
print('ImportError: %s' % nam)
continue
if fptr:
fptr.close()
mdata = dict(adds_dat[' base '])
mdata['id'] = desc['id']
adds_dat[desc['id']] = mdata
if not hasattr(module, 'introduce') or \
module.introduce(mdata):
adds_dat.pop(desc['id'])
desc['isactive'] = False
any_error = True
print("Error: `%s' can't be introduced" % pth)
modules.pop(module.__name__)
continue
desc['module'] = module
if any_error:
self.get_active()
return any_error
def terminate(self, adds_dat, all=False):
"modules unloader"
id_off = []
for desc in self.descriptions:
if 'module' in desc and (all or not desc['isactive']):
module = desc.pop('module')
mdata = adds_dat.pop(desc['id'])
if hasattr(module, 'terminate'):
module.terminate(mdata)
modules.pop(module.__name__)
id_off.append(desc['id'])
return id_off
def mod_from_desc(desc, adds_dat):
"module loader"
desc['isactive'] = True
if 'module' not in desc:
pth, nam = split(splitext(desc['path'])[0])
try:
fptr, pth, dsc = find_module(nam, [pth])
except ImportError:
desc['isactive'] = False
print('ImportError: %s' % nam)
return
module = load_module(nam, fptr, pth, dsc)
if fptr:
fptr.close()
mdata = dict(adds_dat[' base '])
mdata['id'] = desc['id']
adds_dat[desc['id']] = mdata
if not hasattr(module, 'introduce') or \
module.introduce(mdata):
adds_dat.pop(desc['id'])
desc['isactive'] = False
print("Error: `%s' can't be introduced" % pth)
modules.pop(module.__name__)
return
desc['module'] = module
return module
return desc['module']
# ######################## GUI Part ###############################
class ChkListCtrl(wx.ListCtrl, CheckListCtrlMixin):
def __init__(self, parent, size):
wx.ListCtrl.__init__(self, parent, -1, style=wx.LC_REPORT |
wx.BORDER_NONE | wx.LC_SINGLE_SEL,
size=size)
CheckListCtrlMixin.__init__(self)
class AddonsListCtrlPanel(wx.Panel):
def __init__(self, parent, size=(-1, -1)):
wx.Panel.__init__(self, parent, -1, style=wx.WANTS_CHARS |
wx.SUNKEN_BORDER)
sizer = wx.BoxSizer(wx.VERTICAL)
self.list = ChkListCtrl(self, size)
sizer.Add(self.list, 1, wx.EXPAND)
self.SetSizer(sizer)
self.SetAutoLayout(True)
self.cfg = parent.cfg
self.cfg.Bind(wx.EVT_BUTTON, self.on_configure)
def PopulateList(self, descrs):
self.descrs = descrs
lstctr = self.list
lstctr.Freeze()
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
_text = _("Use")
dc = wx.WindowDC(self)
dc.SetFont(font)
fc_wdth = int(dc.GetTextExtent(_text)[0] * 1.25)
lstctr.InsertColumn(0, _text, width=fc_wdth,
format=wx.LIST_FORMAT_CENTRE)
lwdth = lstctr.GetClientSizeTuple()[0]
lstctr.InsertColumn(1, _("Title"), width=lwdth - fc_wdth)
for data in descrs:
index = lstctr.InsertStringItem(maxint, '')
lstctr.SetStringItem(index, 1, data['name'])
lstctr.CheckItem(index, data['isactive'])
self.nitems = index + 1
lstctr.Thaw()
lstctr.Update()
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.on_item_selected, self.list)
def on_item_selected(self, event):
i = event.m_itemIndex
self.cur_descr = self.descrs[i]
self.cfg.Enable('configurable' in self.cur_descr['keys'] and
self.list.IsChecked(i))
def set_addons_active_state(self):
lst = self.list
for i in xrange(len(self.descrs)):
self.descrs[i]['isactive'] = lst.IsChecked(i)
def on_configure(self, evt):
curd = self.cur_descr
parent = self.GetParent()
add_dat = parent.GetParent().addons_data
module = mod_from_desc(curd, add_dat)
if module is None or not hasattr(module, 'Config'):
if 'configurable' in curd['keys']:
curd['keys'].remove('configurable')
return
cfg = module.Config(add_dat[curd['id']])
dlg = DlgAddonConfig(parent, curd, cfg.get_visual)
if dlg.ShowModal() == wx.ID_OK:
cfg.configure()
class DlgAddonMgr(wx.Dialog):
"Missing docstring"
def __init__(self, parent, descrs):
title = _("Addons")
wx.Dialog.__init__(self, parent, -1, title)
sizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
cfg = wx.Button(self, -1, _("Configure..."))
cfg.Enable(False)
is_ok = wx.Button(self, wx.ID_OK)
is_cancel = wx.Button(self, wx.ID_CANCEL)
for i in (cfg, is_cancel, is_ok):
hsizer.Add(i, 0, wx.ALIGN_CENTRE | wx.ALL | wx.EXPAND, 5)
hsms = hsizer.GetMinSize()
list_size = (max(350, hsms[0]), 420)
self.cfg = cfg
self.ulc = AddonsListCtrlPanel(self, list_size)
self.ulc.PopulateList(descrs)
sizer.Add(self.ulc, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
sizer.Add(hsizer, 0, wx.EXPAND | wx.ALL, 5)
sizer.Fit(self)
self.SetSizer(sizer)
def run_addons_dialog(event=None):
if event is None:
window = None
else:
# this may be called from menu only
window = event.GetEventObject().GetMenuBar().GetFrame()
adds = APP_SETT.addons
descrs = adds.descriptions
DLG = DlgAddonMgr(window, descrs)
if event is None:
DLG.CenterOnScreen()
if DLG.ShowModal() == wx.ID_OK and window:
DLG.ulc.set_addons_active_state()
adds.get_active()
add_dat = window.addons_data
adds.introduce(add_dat)
a_menu = window.a_menu
for i in adds.terminate(add_dat):
a_menu.remove_add_id(i)
a_menu.replay_actions()
class DlgAddonConfig(wx.Dialog):
"Missing docstring"
def __init__(self, parent, descr, get_vis):
title = _(descr['name'])
wx.Dialog.__init__(self, parent, -1, title)
sizer = wx.BoxSizer(wx.VERTICAL)
vp = get_vis(self)
sizer.Add(vp, 0, wx.ALL, 5)
line = wx.StaticLine(self, -1, size=(20, -1), style=wx.LI_HORIZONTAL)
sizer.Add(line, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.RIGHT |
wx.TOP, 5)
hsizer = rcs.RowColSizer()
is_ok = wx.Button(self, wx.ID_OK)
is_cancel = wx.Button(self, wx.ID_CANCEL)
hsizer.Add(is_cancel, 0, 0, 5, 0, 0)
hsizer.AddGrowableCol(1)
hsizer.Add(is_ok, 0, 0, 5, 0, 2)
sizer.Add(hsizer, 0, wx.EXPAND | wx.ALL, 5)
sizer.Fit(self)
self.SetSizer(sizer)
if __name__ == '__main__':
idict = {wx.ID_OK: "ID_OK", wx.ID_CANCEL: "ID_CANCEL"}
APP = wx.PySimpleApp()
from settings import prog_init
prog_init()
adds = APP_SETT.addons
descrs = adds.descriptions
adds.set_active(set([u'10', u'1', u'2', u'5', u'9', u'8']))
DLG = DlgAddonMgr(None, descrs)
DLG.CenterOnScreen()
VAL = DLG.ShowModal()
print(idict.get(VAL, VAL))
if VAL == wx.ID_OK:
DLG.ulc.set_addons_active_state()
print(descrs)
print(adds.get_active())
adds.load_modules()
| gpl-3.0 | 4,603,559,879,106,108,400 | 34.676737 | 78 | 0.538234 | false |
spivachuk/sovrin-node | indy_node/test/state_proof/test_state_proofs_for_get_requests.py | 1 | 12398 | import base64
import random
import time
import base58
import pytest
from common.serializers import serialization
from common.serializers.serialization import state_roots_serializer
from crypto.bls.bls_multi_signature import MultiSignature, MultiSignatureValue
from plenum.bls.bls_store import BlsStore
from plenum.common.constants import TXN_TYPE, TARGET_NYM, RAW, DATA, \
IDENTIFIER, NAME, VERSION, ROLE, VERKEY, KeyValueStorageType, \
STATE_PROOF, ROOT_HASH, MULTI_SIGNATURE, PROOF_NODES, TXN_TIME, CURRENT_PROTOCOL_VERSION, DOMAIN_LEDGER_ID
from plenum.common.txn_util import reqToTxn, append_txn_metadata, append_payload_metadata
from plenum.common.types import f
from indy_common.constants import \
ATTRIB, CLAIM_DEF, SCHEMA, CLAIM_DEF_FROM, CLAIM_DEF_SCHEMA_REF, CLAIM_DEF_SIGNATURE_TYPE, \
CLAIM_DEF_PUBLIC_KEYS, CLAIM_DEF_TAG, SCHEMA_NAME, SCHEMA_VERSION, SCHEMA_ATTR_NAMES
from indy_common.types import Request
from indy_node.persistence.attribute_store import AttributeStore
from indy_node.persistence.idr_cache import IdrCache
from indy_node.server.domain_req_handler import DomainReqHandler
from plenum.common.util import get_utc_epoch, friendlyToRaw, rawToFriendly, \
friendlyToHex, hexToFriendly
from state.pruning_state import PruningState
from storage.kv_in_memory import KeyValueStorageInMemory
from indy_common.state import domain
@pytest.fixture()
def bls_store():
return BlsStore(key_value_type=KeyValueStorageType.Memory,
data_location=None,
key_value_storage_name="BlsInMemoryStore",
serializer=serialization.multi_sig_store_serializer)
@pytest.fixture()
def request_handler(bls_store):
state = PruningState(KeyValueStorageInMemory())
cache = IdrCache('Cache', KeyValueStorageInMemory())
attr_store = AttributeStore(KeyValueStorageInMemory())
return DomainReqHandler(ledger=None,
state=state,
config=None,
requestProcessor=None,
idrCache=cache,
attributeStore=attr_store,
bls_store=bls_store,
ts_store=None)
def extract_proof(result, expected_multi_sig):
proof = result[STATE_PROOF]
assert proof
assert proof[ROOT_HASH]
assert proof[PROOF_NODES]
multi_sign = proof[MULTI_SIGNATURE]
assert multi_sign
assert multi_sign == expected_multi_sig
return proof
def save_multi_sig(request_handler):
multi_sig_value = MultiSignatureValue(ledger_id=DOMAIN_LEDGER_ID,
state_root_hash=state_roots_serializer.serialize(
bytes(request_handler.state.committedHeadHash)),
txn_root_hash='2' * 32,
pool_state_root_hash='1' * 32,
timestamp=get_utc_epoch())
multi_sig = MultiSignature('0' * 32, ['Alpha', 'Beta', 'Gamma'], multi_sig_value)
request_handler.bls_store.put(multi_sig)
return multi_sig.as_dict()
def is_proof_verified(request_handler,
proof, path,
value, seq_no, txn_time, ):
encoded_value = domain.encode_state_value(value, seq_no, txn_time)
proof_nodes = base64.b64decode(proof[PROOF_NODES])
root_hash = base58.b58decode(proof[ROOT_HASH])
verified = request_handler.state.verify_state_proof(
root_hash,
path,
encoded_value,
proof_nodes,
serialized=True
)
return verified
def test_state_proofs_for_get_attr(request_handler):
# Adding attribute
nym = 'Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv'
attr_key = 'last_name'
raw_attribute = '{"last_name":"Anderson"}'
seq_no = 0
txn_time = int(time.time())
identifier = "6ouriXMZkLeHsuXrN1X1fd"
txn = {
TXN_TYPE: ATTRIB,
TARGET_NYM: nym,
RAW: raw_attribute,
}
txn = append_txn_metadata(reqToTxn(Request(operation=txn,
protocolVersion=CURRENT_PROTOCOL_VERSION,
identifier=identifier)),
seq_no=seq_no, txn_time=txn_time)
request_handler._addAttr(txn)
request_handler.state.commit()
multi_sig = save_multi_sig(request_handler)
# Getting attribute
get_request = Request(
operation={
TARGET_NYM: nym,
RAW: 'last_name'
},
signatures={},
protocolVersion=CURRENT_PROTOCOL_VERSION
)
result = request_handler.handleGetAttrsReq(get_request)
proof = extract_proof(result, multi_sig)
attr_value = result[DATA]
assert attr_value == raw_attribute
# Verifying signed state proof
path = domain.make_state_path_for_attr(nym, attr_key)
assert is_proof_verified(request_handler,
proof, path,
domain.hash_of(attr_value), seq_no, txn_time)
def test_state_proofs_for_get_claim_def(request_handler):
# Adding claim def
nym = 'Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv'
seq_no = 0
txn_time = int(time.time())
identifier = "6ouriXMZkLeHsuXrN1X1fd"
schema_seqno = 0
signature_type = 'CL'
key_components = '{"key_components": []}'
tag = 'tag1'
txn = {
TXN_TYPE: CLAIM_DEF,
TARGET_NYM: nym,
CLAIM_DEF_SCHEMA_REF: schema_seqno,
CLAIM_DEF_PUBLIC_KEYS: key_components,
CLAIM_DEF_TAG: tag
}
txn = append_txn_metadata(reqToTxn(Request(operation=txn,
protocolVersion=CURRENT_PROTOCOL_VERSION,
identifier=identifier)),
seq_no=seq_no, txn_time=txn_time)
txn = append_payload_metadata(txn, frm=nym)
request_handler._addClaimDef(txn)
request_handler.state.commit()
multi_sig = save_multi_sig(request_handler)
# Getting claim def
request = Request(
operation={
IDENTIFIER: nym,
CLAIM_DEF_FROM: nym,
CLAIM_DEF_SCHEMA_REF: schema_seqno,
CLAIM_DEF_SIGNATURE_TYPE: signature_type,
CLAIM_DEF_TAG: tag
},
signatures={},
protocolVersion=CURRENT_PROTOCOL_VERSION
)
result = request_handler.handleGetClaimDefReq(request)
proof = extract_proof(result, multi_sig)
assert result[DATA] == key_components
# Verifying signed state proof
path = domain.make_state_path_for_claim_def(nym, schema_seqno,
signature_type, tag)
assert is_proof_verified(request_handler,
proof, path,
key_components, seq_no, txn_time)
def test_state_proofs_for_get_schema(request_handler):
# Adding schema
nym = 'Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv'
seq_no = 0
txn_time = int(time.time())
identifier = "6ouriXMZkLeHsuXrN1X1fd"
schema_name = "schema_a"
schema_version = "1.0"
# data = '{"name": "schema_a", "version": "1.0"}'
schema_key = {SCHEMA_NAME: schema_name,
SCHEMA_VERSION: schema_version}
data = {**schema_key,
SCHEMA_ATTR_NAMES: ["Some_Attr", "Attr1"]}
txn = {
TXN_TYPE: SCHEMA,
DATA: data,
}
txn = append_txn_metadata(reqToTxn(Request(operation=txn,
protocolVersion=CURRENT_PROTOCOL_VERSION,
identifier=identifier)),
seq_no=seq_no, txn_time=txn_time)
txn = append_payload_metadata(txn, frm=nym)
request_handler._addSchema(txn)
request_handler.state.commit()
multi_sig = save_multi_sig(request_handler)
# Getting schema
request = Request(
operation={
TARGET_NYM: nym,
DATA: schema_key
},
signatures={},
protocolVersion=CURRENT_PROTOCOL_VERSION
)
result = request_handler.handleGetSchemaReq(request)
proof = extract_proof(result, multi_sig)
assert result[DATA] == data
data.pop(NAME)
data.pop(VERSION)
# Verifying signed state proof
path = domain.make_state_path_for_schema(nym, schema_name, schema_version)
assert is_proof_verified(request_handler,
proof, path,
data, seq_no, txn_time)
def prep_multi_sig(request_handler, nym, role, verkey, seq_no):
txn_time = int(time.time())
identifier = "6ouriXMZkLeHsuXrN1X1fd"
# Adding nym
data = {
f.IDENTIFIER.nm: nym,
ROLE: role,
VERKEY: verkey,
f.SEQ_NO.nm: seq_no,
TXN_TIME: txn_time,
}
txn = append_txn_metadata(reqToTxn(Request(operation=data,
protocolVersion=CURRENT_PROTOCOL_VERSION,
identifier=identifier)),
seq_no=seq_no, txn_time=txn_time)
txn = append_payload_metadata(txn, frm=nym)
request_handler.updateNym(nym, txn)
request_handler.state.commit()
multi_sig = save_multi_sig(request_handler)
return data, multi_sig
def get_nym_verify_proof(request_handler, nym, data, multi_sig):
request = Request(
operation={
TARGET_NYM: nym
},
signatures={},
protocolVersion=CURRENT_PROTOCOL_VERSION
)
result = request_handler.handleGetNymReq(request)
proof = extract_proof(result, multi_sig)
assert proof
if data:
assert result[DATA]
result_data = request_handler.stateSerializer.deserialize(result[DATA])
result_data.pop(TARGET_NYM, None)
assert result_data == data
# Verifying signed state proof
path = request_handler.nym_to_state_key(nym)
# If the value does not exist, serialisation should be null and
# verify_state_proof needs to be given null (None). This is done to
# differentiate between absence of value and presence of empty string value
serialised_value = request_handler.stateSerializer.serialize(data) if data else None
proof_nodes = base64.b64decode(proof[PROOF_NODES])
root_hash = base58.b58decode(proof[ROOT_HASH])
return request_handler.state.verify_state_proof(
root_hash,
path,
serialised_value,
proof_nodes,
serialized=True
)
def test_state_proofs_for_get_nym(request_handler):
nym = 'Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv'
role = "2"
verkey = "~7TYfekw4GUagBnBVCqPjiC"
seq_no = 1
# Check for existing nym
data, multi_sig = prep_multi_sig(request_handler, nym, role, verkey, seq_no)
assert get_nym_verify_proof(request_handler, nym, data, multi_sig)
# Shuffle the bytes of nym
h = list(friendlyToHex(nym))
random.shuffle(h)
garbled_nym = hexToFriendly(bytes(h))
data[f.IDENTIFIER.nm] = garbled_nym
# `garbled_nym` does not exist, proof should verify but data is null
assert get_nym_verify_proof(request_handler, garbled_nym, None, multi_sig)
def test_no_state_proofs_if_protocol_version_less(request_handler):
nym = 'Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv'
role = "2"
verkey = "~7TYfekw4GUagBnBVCqPjiC"
identifier = "6ouriXMZkLeHsuXrN1X1fd"
seq_no = 0
txn_time = int(time.time())
# Adding nym
data = {
f.IDENTIFIER.nm: nym,
ROLE: role,
VERKEY: verkey,
f.SEQ_NO.nm: seq_no,
TXN_TIME: txn_time,
}
txn = append_txn_metadata(reqToTxn(Request(operation=data,
protocolVersion=CURRENT_PROTOCOL_VERSION,
identifier=identifier)),
seq_no=seq_no, txn_time=txn_time)
txn = append_payload_metadata(txn, frm=nym)
request_handler.updateNym(nym, txn)
request_handler.state.commit()
multi_sig = save_multi_sig(request_handler)
# Getting nym
request = Request(
operation={
TARGET_NYM: nym
},
signatures={}
)
result = request_handler.handleGetNymReq(request)
assert STATE_PROOF not in result
| apache-2.0 | 3,612,552,977,990,105,600 | 34.022599 | 110 | 0.611228 | false |
hubert667/AIR | build/billiard/billiard/_reduction3.py | 2 | 7954 | #
# Module which deals with pickling of objects.
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
import copyreg
import functools
import io
import os
import pickle
import socket
import sys
__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump']
HAVE_SEND_HANDLE = (sys.platform == 'win32' or
(hasattr(socket, 'CMSG_LEN') and
hasattr(socket, 'SCM_RIGHTS') and
hasattr(socket.socket, 'sendmsg')))
#
# Pickler subclass
#
class ForkingPickler(pickle.Pickler):
'''Pickler subclass used by multiprocessing.'''
_extra_reducers = {}
_copyreg_dispatch_table = copyreg.dispatch_table
def __init__(self, *args):
super().__init__(*args)
self.dispatch_table = self._copyreg_dispatch_table.copy()
self.dispatch_table.update(self._extra_reducers)
@classmethod
def register(cls, type, reduce):
'''Register a reduce function for a type.'''
cls._extra_reducers[type] = reduce
@classmethod
def dumps(cls, obj, protocol=None):
buf = io.BytesIO()
cls(buf, protocol).dump(obj)
return buf.getbuffer()
loads = pickle.loads
register = ForkingPickler.register
def dump(obj, file, protocol=None):
'''Replacement for pickle.dump() using ForkingPickler.'''
ForkingPickler(file, protocol).dump(obj)
#
# Platform specific definitions
#
if sys.platform == 'win32':
# Windows
__all__ += ['DupHandle', 'duplicate', 'steal_handle']
import _winapi
def duplicate(handle, target_process=None, inheritable=False):
'''Duplicate a handle. (target_process is a handle not a pid!)'''
if target_process is None:
target_process = _winapi.GetCurrentProcess()
return _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(), handle, target_process,
0, inheritable, _winapi.DUPLICATE_SAME_ACCESS)
def steal_handle(source_pid, handle):
'''Steal a handle from process identified by source_pid.'''
source_process_handle = _winapi.OpenProcess(
_winapi.PROCESS_DUP_HANDLE, False, source_pid)
try:
return _winapi.DuplicateHandle(
source_process_handle, handle,
_winapi.GetCurrentProcess(), 0, False,
_winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(source_process_handle)
def send_handle(conn, handle, destination_pid):
'''Send a handle over a local connection.'''
dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)
conn.send(dh)
def recv_handle(conn):
'''Receive a handle over a local connection.'''
return conn.recv().detach()
class DupHandle(object):
'''Picklable wrapper for a handle.'''
def __init__(self, handle, access, pid=None):
if pid is None:
# We just duplicate the handle in the current process and
# let the receiving process steal the handle.
pid = os.getpid()
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
try:
self._handle = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(),
handle, proc, access, False, 0)
finally:
_winapi.CloseHandle(proc)
self._access = access
self._pid = pid
def detach(self):
'''Get the handle. This should only be called once.'''
# retrieve handle from process which currently owns it
if self._pid == os.getpid():
# The handle has already been duplicated for this process.
return self._handle
# We must steal the handle from the process whose pid is self._pid.
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
self._pid)
try:
return _winapi.DuplicateHandle(
proc, self._handle, _winapi.GetCurrentProcess(),
self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(proc)
else:
# Unix
__all__ += ['DupFd', 'sendfds', 'recvfds']
import array
# On MacOSX we should acknowledge receipt of fds -- see Issue14669
ACKNOWLEDGE = sys.platform == 'darwin'
def sendfds(sock, fds):
'''Send an array of fds over an AF_UNIX socket.'''
fds = array.array('i', fds)
msg = bytes([len(fds) % 256])
sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
if ACKNOWLEDGE and sock.recv(1) != b'A':
raise RuntimeError('did not receive acknowledgement of fd')
def recvfds(sock, size):
'''Receive an array of fds over an AF_UNIX socket.'''
a = array.array('i')
bytes_size = a.itemsize * size
msg, ancdata, flags, addr = sock.recvmsg(
1, socket.CMSG_LEN(bytes_size),
)
if not msg and not ancdata:
raise EOFError
try:
if ACKNOWLEDGE:
sock.send(b'A')
if len(ancdata) != 1:
raise RuntimeError(
'received %d items of ancdata' % len(ancdata),
)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
if len(cmsg_data) % a.itemsize != 0:
raise ValueError
a.frombytes(cmsg_data)
assert len(a) % 256 == msg[0]
return list(a)
except (ValueError, IndexError):
pass
raise RuntimeError('Invalid data received')
def send_handle(conn, handle, destination_pid): # noqa
'''Send a handle over a local connection.'''
fd = conn.fileno()
with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s:
sendfds(s, [handle])
def recv_handle(conn): # noqa
'''Receive a handle over a local connection.'''
fd = conn.fileno()
with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s:
return recvfds(s, 1)[0]
def DupFd(fd):
'''Return a wrapper for an fd.'''
from .forking import Popen
return Popen.duplicate_for_child(fd)
#
# Try making some callable types picklable
#
def _reduce_method(m):
if m.__self__ is None:
return getattr, (m.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
class _C:
def f(self):
pass
register(type(_C().f), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
register(type(list.append), _reduce_method_descriptor)
register(type(int.__add__), _reduce_method_descriptor)
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return functools.partial(func, *args, **keywords)
register(functools.partial, _reduce_partial)
#
# Make sockets picklable
#
if sys.platform == 'win32':
def _reduce_socket(s):
from .resource_sharer import DupSocket
return _rebuild_socket, (DupSocket(s),)
def _rebuild_socket(ds):
return ds.detach()
register(socket.socket, _reduce_socket)
else:
def _reduce_socket(s): # noqa
df = DupFd(s.fileno())
return _rebuild_socket, (df, s.family, s.type, s.proto)
def _rebuild_socket(df, family, type, proto): # noqa
fd = df.detach()
return socket.socket(family, type, proto, fileno=fd)
register(socket.socket, _reduce_socket)
| gpl-3.0 | 4,350,371,272,115,698,000 | 30.943775 | 79 | 0.587126 | false |
Afonasev/Blog | backend/posts/management/commands/fill_fake_data.py | 1 | 2311 | from random import choice, randint
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from faker import Faker
from backend.posts import models
fake = Faker()
def make_text(min_paragraphs, max_paragraphs):
return '\n'.join(fake.paragraphs(
nb=randint(min_paragraphs, max_paragraphs)
))
class Command(BaseCommand):
help = 'Fill fake data for dev server'
def handle(self, *args, **options):
admin = self._create_admin_user()
tag_ids = self._create_tags()
posts = self._create_posts(author=admin, tags=tag_ids)
self._create_comments(author=admin, posts=posts)
self.stdout.write(self.style.SUCCESS('Fake data filled!'))
@staticmethod
def _create_admin_user():
return get_user_model().objects.create_user(
username='admin',
password='password13',
is_staff=True,
is_superuser=True,
)
def _create_tags(self):
tag_names = set()
for _ in range(15):
tag_names.add(fake.word())
tag_ids = []
for name in tag_names:
tag = models.Tag(title=name)
tag.save()
tag_ids.append(tag.id)
return tag_ids
def _create_posts(self, author, tags):
posts = []
for _ in range(100):
post = models.Post(
author=author,
title=fake.sentence(nb_words=randint(3, 8)),
text=make_text(10, 30),
hidden=choice([True, False, False]),
)
post.save()
post_tags = set()
for _ in range(randint(3, 8)):
post_tags.add(choice(tags))
for tag in post_tags:
post.tags.add(tag)
post.save()
posts.append(post)
return posts
def _create_comments(self, author, posts):
for post in posts:
for _ in range(randint(5, 20)):
has_author = randint(1, 5) < 2
models.Comment(
post=post,
author=author if has_author else None,
username=fake.user_name() if not has_author else None,
text=make_text(1, 3),
).save()
| mit | -8,112,549,477,395,559,000 | 27.182927 | 74 | 0.536132 | false |
exaile/exaile | plugins/ipconsole/__init__.py | 1 | 8594 | # This plugin is adapted from the Python Console plugin and the IPython
# cookbook at:
# http://ipython.scipy.org/moin/Cookbook/EmbeddingInGTK
# Copyright (C) 2009-2010 Brian Parma
# Updated 2012 Brian Parma
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import logging
import sys
import site
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GLib
from xl.nls import gettext as _
from xl import event
from xl import settings as xl_settings
from xl import providers
from xlgui.widgets import menu
from xlgui import guiutil
from . import ipconsoleprefs
from . import ipython_view as ip
FONT = "Luxi Mono 10"
SETTINGS_STRING = 'plugin_ipconsole_option_set'
LOGGER = logging.getLogger(__name__)
class Quitter:
"""Simple class to handle exit, similar to Python 2.5's.
This Quitter is used to circumvent IPython's circumvention
of the builtin Quitter, since it prevents exaile form closing."""
def __init__(self, exit_function, name):
self.exit_function = exit_function
self.name = name
def __repr__(self):
return 'Type %s() to exit.' % self.name
def __call__(self):
self.exit_function() # Passed in exit function
site.setquit() # Restore default builtins
exit() # Call builtin
class IPView(ip.IPythonView):
'''Extend IPythonView to support closing with Ctrl+D'''
__text_color = None
__background_color = None
__font = None
__css_provider = None
__text_color_str = None
__background_color_str = None
__font_str = None
__iptheme = None
def __init__(self, namespace):
ip.IPythonView.__init__(self)
event.add_ui_callback(self.__on_option_set, SETTINGS_STRING)
self.set_wrap_mode(Gtk.WrapMode.CHAR)
self.updateNamespace(namespace) # expose exaile (passed in)
# prevent exit and quit - freezes window? does bad things
self.updateNamespace({'exit': None, 'quit': None})
style_context = self.get_style_context()
self.__css_provider = Gtk.CssProvider()
style_context.add_provider(
self.__css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
# Trigger setup through options
for option in ('text_color', 'background_color', 'font'):
self.__on_option_set(
None, xl_settings, 'plugin/ipconsole/{option}'.format(option=option)
)
def __on_option_set(self, _event, settings, option):
if option == 'plugin/ipconsole/font':
pango_font_str = settings.get_option(option, FONT)
self.__font_str = guiutil.css_from_pango_font_description(pango_font_str)
GLib.idle_add(self.__update_css)
if option == 'plugin/ipconsole/text_color':
rgba_str = settings.get_option(option, 'lavender')
rgba = Gdk.RGBA()
rgba.parse(rgba_str)
self.__text_color_str = "color: " + guiutil.css_from_rgba_without_alpha(
rgba
)
GLib.idle_add(self.__update_css)
if option == 'plugin/ipconsole/background_color':
rgba_str = settings.get_option(option, 'black')
rgba = Gdk.RGBA()
rgba.parse(rgba_str)
self.__background_color_str = (
"background-color: " + guiutil.css_from_rgba_without_alpha(rgba)
)
GLib.idle_add(self.__update_css)
def __update_css(self):
if (
self.__text_color_str is None
or self.__background_color_str is None
or self.__font_str is None
):
# early initialization state: not all properties have been initialized yet
return False
data_str = "text {%s; %s;} textview {%s;}" % (
self.__background_color_str,
self.__text_color_str,
self.__font_str,
)
self.__css_provider.load_from_data(data_str.encode('utf-8'))
return False
def onKeyPressExtend(self, key_event):
if ip.IPythonView.onKeyPressExtend(self, key_event):
return True
if key_event.string == '\x04': # ctrl+d
self.destroy()
class IPythonConsoleWindow(Gtk.Window):
"""
A Gtk Window with an embedded IPython Console.
"""
__ipv = None
def __init__(self, namespace):
Gtk.Window.__init__(self)
self.set_title(_("IPython Console - Exaile"))
self.set_size_request(750, 550)
self.set_resizable(True)
self.__ipv = IPView(namespace)
self.__ipv.connect('destroy', lambda *_widget: self.destroy())
self.__ipv.updateNamespace({'self': self}) # Expose self to IPython
# make it scrollable
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolled_window.add(self.__ipv)
scrolled_window.show_all()
self.add(scrolled_window)
event.add_ui_callback(self.on_option_set, SETTINGS_STRING)
def on_option_set(self, _event, settings, option):
if option == 'plugin/ipconsole/opacity':
if sys.platform.startswith("win32"):
# Setting opacity on Windows crashes with segfault,
# see https://bugzilla.gnome.org/show_bug.cgi?id=674449
# Ignore this option.
return
value = settings.get_option(option, 80.0)
value = value / 100
if value > 1:
value = 1
self.set_opacity(value)
class IPConsolePlugin:
"""
This class holds the IPConsole plugin itself
"""
__console_window = None
__exaile = None
def enable(self, exaile):
"""
Called when plugin is enabled, or when exaile is loaded with the plugin
on by default.
"""
self.__exaile = exaile
def on_gui_loaded(self):
"""
Called when Exaile finished loading its GUI
"""
# Trigger initial setup through options:
if xl_settings.get_option('plugin/ipconsole/autostart', False):
self.__show_console()
# add menuitem to tools menu
item = menu.simple_menu_item(
'ipconsole',
['plugin-sep'],
_('Show _IPython Console'),
callback=lambda *_args: self.__show_console(),
)
providers.register('menubar-tools-menu', item)
def teardown(self, _exaile):
"""
Called when Exaile is shutting down
"""
# if window is open, kill it
if self.__console_window is not None:
self.__console_window.destroy()
def disable(self, exaile):
"""
Called when the plugin is disabled
"""
for item in providers.get('menubar-tools-menu'):
if item.name == 'ipconsole':
providers.unregister('menubar-tools-menu', item)
break
self.teardown(exaile)
def __show_console(self):
"""
Display window when the menu item is clicked.
"""
if self.__console_window is None:
import xl
import xlgui
self.__console_window = IPythonConsoleWindow(
{'exaile': self.__exaile, 'xl': xl, 'xlgui': xlgui}
)
self.__console_window.connect('destroy', self.__console_destroyed)
self.__console_window.present()
self.__console_window.on_option_set(
None, xl_settings, 'plugin/ipconsole/opacity'
)
def __console_destroyed(self, *_args):
"""
Called when the window is closed.
"""
self.__console_window = None
def get_preferences_pane(self):
"""
Called by Exaile when ipconsole preferences pane should be shown
"""
return ipconsoleprefs
plugin_class = IPConsolePlugin
| gpl-2.0 | 9,188,069,743,243,977,000 | 31.187266 | 86 | 0.603677 | false |
curtisalexander/learning | python/talk-python/jumpstart/file-search/program.py | 1 | 1784 | from collections import namedtuple
import os
SearchResult = namedtuple('SearchResult',
'file, line, text')
def main():
print_header()
folder = get_folder_from_user()
if not folder:
print("Sorry we can't search that location.")
return
text = get_search_text_from_user()
if not text:
print("We can't search for nothing!")
return
matches = search_folders(folder, text)
for m in matches:
print('------- MATCH -------')
print(f'file: {m.file}')
print(f'line: {m.line}')
print(f'match: {m.text.strip()}')
print()
def print_header():
print('-----------------')
print(' FILE SEARCH ')
print('-----------------')
print()
def get_folder_from_user():
folder = input('What folder do you want to search? ')
if not folder or not folder.strip():
return None
if not os.path.isdir(folder):
return None
return os.path.abspath(folder)
def get_search_text_from_user():
text = input('What are you searching for [single phrases only]? ')
return text
def search_folders(folder, text):
items = os.listdir(folder)
for item in items:
full_item = os.path.join(folder, item)
if os.path.isdir(full_item):
yield from search_folders(full_item, text)
else:
yield from search_file(full_item, text)
def search_file(filename, search_text):
with open(filename, 'r', encoding='utf-8') as fin:
line_num = 0
for line in fin:
line_num += 1
if line.lower().find(search_text) >= 0:
m = SearchResult(line=line_num, file=filename, text=line)
yield m
if __name__ == '__main__':
main()
| mit | -3,788,486,095,612,305,400 | 22.473684 | 73 | 0.556054 | false |
lnls-sirius/dev-packages | siriuspy/siriuspy/clientconfigdb/configdb_client.py | 1 | 9599 |
"""Define a class to communicate with configuration database API."""
import json as _json
import datetime as _datetime
from urllib import parse as _parse
from urllib.request import Request as _Request, urlopen as _urlopen
from urllib.error import URLError as _URLError
import dateutil.parser
import numpy as _np
from .. import envars as _envars
from . import _templates
class ConfigDBClient:
"""Perform operation on configuration database."""
_TIMEOUT_DEFAULT = 2.0
_INVALID_CHARACTERS = '\\/:;,?!$'
def __init__(self, url=None, config_type=None):
"""Class constructor.
Parameters
----------
url : str | None
Configuration service host address. For default 'None' value
the URL defined in siripy.envars is used.
"""
self._url = url or _envars.SRVURL_CONFIGDB
self._config_type = config_type
@property
def config_type(self):
"""Type of configuration."""
return self._config_type
@config_type.setter
def config_type(self, name):
if isinstance(name, str):
self._config_type = name
@property
def url(self):
"""Server URL."""
return self._url
@property
def connected(self):
"""Return connection state."""
try:
self.get_dbsize()
except ConfigDBException as err:
return not err.server_code == -2
return True
def get_dbsize(self):
"""Return estimated size of configuration database."""
return self._make_request(stats=True)['size']
def get_nrconfigs(self):
"""Return estimated size of configuration database."""
return self._make_request(stats=True)['count']
def get_config_types(self):
"""Get configuration types existing as database entries."""
return self._make_request()
@staticmethod
def get_config_types_from_templates():
"""Return list of configuration types as defined in templates."""
return list(_templates.get_config_types())
def find_configs(self,
name=None,
begin=None,
end=None,
config_type=None,
discarded=False):
"""Find configurations matching search criteria.
Parameters
----------
discarded : True | False (default) | None
If True, return only discarded configurations, if False, return
only configurations in use. If None, return all configurations
matching the other criteria.
"""
config_type = self._process_config_type(config_type)
# build search dictionary
find_dict = dict(config_type=config_type)
if name is not None:
find_dict['name'] = name
if begin is not None or end is not None:
find_dict['created'] = {}
if begin is not None:
find_dict['created']['$gte'] = begin
if end is not None:
find_dict['created']['$lte'] = end
return self._make_request(
config_type=config_type, discarded=discarded, data=find_dict)
def get_config_value(self, name, config_type=None, discarded=False):
"""Get value field of a given configuration."""
config_type = self._process_config_type(config_type)
return self._make_request(
config_type=config_type, name=name, discarded=discarded)['value']
def get_config_info(self, name, config_type=None, discarded=False):
"""Get information of a given configuration."""
config_type = self._process_config_type(config_type)
res = self.find_configs(
name=name, config_type=config_type, discarded=discarded)
if not res:
raise ConfigDBException(
{'code': 404, 'message': 'Configuration no found.'})
return res[0]
def rename_config(self, oldname, newname, config_type=None):
"""Rename configuration in database."""
config_type = self._process_config_type(config_type)
if not isinstance(newname, str):
raise TypeError(
'Config name must be str, not {}!'.format(type(newname)))
if not self.check_valid_configname(newname):
raise ValueError("There are invalid characters in config name!")
return self._make_request(
config_type=config_type, name=oldname, newname=newname,
method='POST')
def insert_config(self, name, value, config_type=None):
"""Insert configuration into database."""
config_type = self._process_config_type(config_type)
if not isinstance(name, str):
raise TypeError(
'Config name must be str, not {}!'.format(type(name)))
if not self.check_valid_configname(name):
raise ValueError("There are invalid characters in config name!")
if not self.check_valid_value(value, config_type=config_type):
raise TypeError('Incompatible configuration value!')
self._make_request(
config_type=config_type, name=name, method='POST', data=value)
def delete_config(self, name, config_type=None):
"""Mark a valid configuration as discarded."""
config_type = self._process_config_type(config_type)
return self._make_request(
config_type=config_type, name=name, method='DELETE')
def retrieve_config(self, name, config_type=None):
"""Mark a discarded configuration as valid."""
config_type = self._process_config_type(config_type)
return self._make_request(
config_type=config_type, name=name, discarded=True, method='POST')
def get_value_from_template(self, config_type=None):
"""Return value of a configuration type."""
config_type = self._process_config_type(config_type)
return _templates.get_template(config_type)
def check_valid_value(self, value, config_type=None):
"""Check whether values data corresponds to a configuration type."""
config_type = self._process_config_type(config_type)
return _templates.check_value(config_type, value)
@classmethod
def check_valid_configname(cls, name):
"Check if `name` is a valid name for configurations."
return not set(name) & set(cls._INVALID_CHARACTERS)
@staticmethod
def conv_timestamp_txt_2_flt(timestamp):
"""Convert timestamp format from text to float."""
return dateutil.parser.parse(timestamp).timestamp()
@staticmethod
def conv_timestamp_flt_2_txt(timestamp):
"""Convert timestamp format from float to text."""
return str(_datetime.datetime.fromtimestamp(timestamp))
# --- private methods ---
def _process_config_type(self, config_type):
config_type = config_type or self._config_type
if not config_type:
raise ValueError(
'You must define a `config_type` attribute or' +
' provide it in method call.')
return config_type
def _make_request(self, method='GET', data=None, **kwargs):
try:
return self._request(method, data, **kwargs)
except ConfigDBException as err:
if err.server_code == -2:
self._rotate_server_url()
return self._request(method, data, **kwargs)
else:
raise err
def _request(self, method='GET', data=None, **kwargs):
url = self._create_url(**kwargs)
if data is None:
request = _Request(url=url, method=method)
else:
request = _Request(
url=url, method=method,
headers={"Content-Type": "application/json"},
data=_json.dumps(data, default=_jsonify_numpy).encode())
try:
url_conn = _urlopen(
request, timeout=ConfigDBClient._TIMEOUT_DEFAULT)
response = _json.loads(url_conn.read().decode("utf-8"))
except _json.JSONDecodeError:
response = {"code": -1, "message": "JSON decode error"}
except _URLError as err:
response = {'code': -2, 'message': str(err)}
# print(response)
if response['code'] != 200:
raise ConfigDBException(response)
return response['result']
def _rotate_server_url(self):
if self._url != _envars.SRVURL_CONFIGDB_2:
self._url = _envars.SRVURL_CONFIGDB_2
else:
self._url = _envars.SRVURL_CONFIGDB
def _create_url(self, config_type=None, name=None, discarded=False,
stats=False, newname=None):
url = self.url
if stats:
return url + '/stats'
url += '/configs'
if newname:
url += '/rename'
if discarded:
url += '/discarded'
if config_type:
url += '/' + config_type
if name:
url += '/' + name
if newname:
url += '/' + newname
return _parse.quote(url, safe='/:')
class ConfigDBException(Exception):
"""Default exception raised for configDB server errors."""
def __init__(self, response):
"""."""
super().__init__('{code:d}: {message:s}.'.format(**response))
self.server_code = response['code']
self.server_message = response['message']
def _jsonify_numpy(obj):
if isinstance(obj, _np.ndarray):
return obj.tolist()
raise TypeError('Object is not JSON serializable.')
| gpl-3.0 | -8,357,856,691,299,465,000 | 34.420664 | 78 | 0.59277 | false |
ttreeagency/PootleTypo3Org | pootle/apps/pootle_store/util.py | 1 | 6417 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2012 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import copy
import os
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from pootle_misc.aggregate import sum_column
from pootle_misc.util import dictsum
# Unit States
#: Unit is no longer part of the store
OBSOLETE = -100
#: Empty unit
UNTRANSLATED = 0
#: Marked as fuzzy, typically means translation needs more work
FUZZY = 50
#: Unit is fully translated
TRANSLATED = 200
# Map for retrieving natural names for unit states
STATES_MAP = {
OBSOLETE: _("Obsolete"),
UNTRANSLATED: _("Untranslated"),
FUZZY: _("Needs work"),
TRANSLATED: _("Translated"),
}
def add_trailing_slash(path):
"""If path does not end with /, add it and return."""
if len(path) > 0 and path[-1] == os.sep:
return path
else:
return path + os.sep
def relative_real_path(p):
if p.startswith(settings.PODIRECTORY):
return p[len(add_trailing_slash(settings.PODIRECTORY)):]
else:
return p
def absolute_real_path(p):
if not p.startswith(settings.PODIRECTORY):
return os.path.join(settings.PODIRECTORY, p)
else:
return p
empty_quickstats = {'fuzzy': 0,
'fuzzysourcewords': 0,
'review': 0,
'total': 0,
'totalsourcewords': 0,
'translated': 0,
'translatedsourcewords': 0,
'translatedtargetwords': 0,
'untranslated': 0,
'untranslatedsourcewords': 0,
'errors': 0}
def statssum(queryset, empty_stats=empty_quickstats):
totals = empty_stats
for item in queryset:
try:
totals = dictsum(totals, item.getquickstats())
except:
totals['errors'] += 1
return totals
empty_completestats = {0: {u'isfuzzy': 0,
'errors': 0} }
def completestatssum(queryset, empty_stats=empty_completestats):
totals = copy.deepcopy(empty_stats)
for item in queryset:
try:
item_totals = item.getcompletestats()
for cat in set(item_totals) | set(totals):
totals[cat] = dictsum(totals.get(cat, {}),
item_totals.get(cat, {}))
except:
totals[0]['errors'] += 1
return totals
def calculate_stats(units):
"""Calculate translation statistics for a given `units` queryset."""
total = sum_column(units,
['source_wordcount'], count=True)
untranslated = sum_column(units.filter(state=UNTRANSLATED),
['source_wordcount'], count=True)
fuzzy = sum_column(units.filter(state=FUZZY),
['source_wordcount'], count=True)
translated = sum_column(units.filter(state=TRANSLATED),
['source_wordcount', 'target_wordcount'],
count=True)
result = {'errors': 0}
result['total'] = total['count']
if result['total'] == 0:
result['totalsourcewords'] = 0
else:
result['totalsourcewords'] = total['source_wordcount']
result['fuzzy'] = fuzzy['count']
if result['fuzzy'] == 0:
result['fuzzysourcewords'] = 0
else:
result['fuzzysourcewords'] = fuzzy['source_wordcount']
result['untranslated'] = untranslated['count']
if result['untranslated'] == 0:
result['untranslatedsourcewords'] = 0
else:
result['untranslatedsourcewords'] = untranslated['source_wordcount']
result['translated'] = translated['count']
if result['translated'] == 0:
result['translatedsourcewords'] = 0
result['translatedtargetwords'] = 0
else:
result['translatedsourcewords'] = translated['source_wordcount']
result['translatedtargetwords'] = translated['target_wordcount']
return result
def suggestions_sum(queryset):
total = 0
for item in queryset:
total += item.get_suggestion_count()
return total
def find_altsrcs(unit, alt_src_langs, store=None, project=None):
from pootle_store.models import Unit
store = store or unit.store
project = project or store.translation_project.project
altsrcs = Unit.objects.filter(
unitid_hash=unit.unitid_hash,
store__translation_project__project=project,
store__translation_project__language__in=alt_src_langs,
state=TRANSLATED) \
.select_related(
'store', 'store__translation_project',
'store__translation_project__language')
if project.get_treestyle() == 'nongnu':
altsrcs = altsrcs.filter(store__name=store.name)
return altsrcs
def get_sugg_list(unit):
"""Get suggested translations and rated scores for the given unit.
:return: List of tuples containing the suggestion and the score for
it in case it's a terminology project. Otherwise the score
part is filled with False values.
"""
sugg_list = []
scores = {}
suggestions = unit.get_suggestions()
# Avoid the votes query if we're not editing terminology
if (suggestions and (unit.store.is_terminology or
unit.store.translation_project.project.is_terminology)):
from voting.models import Vote
scores = Vote.objects.get_scores_in_bulk(suggestions)
for sugg in suggestions:
score = scores.get(sugg.id, False)
sugg_list.append((sugg, score))
return sugg_list
| gpl-2.0 | 8,377,143,370,565,134,000 | 30.455882 | 76 | 0.616332 | false |
steven-murray/pydftools | pydftools/model.py | 1 | 4956 | """
A module for defining generative distribution function models.
All models *must* be subclassed from :class:`~Model`, which provides the abstract base methods required to implement.
"""
import numpy as np
from .utils import numerical_jac, numerical_hess
class Model(object):
"""
Base class defining a generative distribution function model
All models *must* be subclassed from this, which provides the abstract base methods required to implement.
The primary method is :meth:`~gdf`, which defines the generative distribution, though the class also provides
information about the parameters and other useful things.
Parameters
----------
p0 : sequence
A vector of parameters to use as the default for any methods that require them.
Examples
--------
Evaluate and plot a Schechter function
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(7,11,100)
>>> mass = 10**x
>>> parameters = (-2,10,-1.5)
>>> model = Schechter(parameters)
>>> plt.plot(mass, model.gdf(x, parameters))
>>> plt.xscale('log')
>>> plt.yscale('log')
Any model can be inspected before instantiation. Its default parameters are (using Schechter as an example):
>>> Schechter._p0_default
Its equation is
>>> Schechter.gdf_equation
And the names of its parameters are
>>> Schechter.names_text
"""
"Latex-Equation for gdf"
gdf_equation = None
"Text-friendly parameter names"
names_text = None
"Latex-friendly parameters names"
names = None
"Default value for p0 for the model"
p0 = None
def __init__(self, p0=None):
if p0 is not None:
self.p0 = p0
if not hasattr(self, "n_param"):
if self.names is not None:
self.n_param = len(self.names)
elif self.names_text is not None:
self.n_param = len(self.names)
else:
raise ValueError("Model has not specified the number of parameters")
def gdf(self, x, p):
"""
The generative distribution function.
Parameters
----------
x : array-like
The n-dimensional variate.
p : tuple
The parameters of the distribution.
Returns
-------
phi : array-like
Array of same size as `x`, with value at each point.
"""
pass
def gdf_jacobian(self, x, p):
"""
The jacobian of the GDF as a function of x at point p.
"""
jac = numerical_jac(lambda p: self.gdf(x, p), p)
return jac
def gdf_hessian(self, x, p):
"""
The jacobian of the GDF as a function of x at point p.
"""
return numerical_hess(lambda p: self.gdf(x, p), p)
class Schechter(Model):
"""
A Schechter function model.
"""
p0 = (-2.0, 11.0, -1.3)
names_text = ["log_10 (phi_star)", "log_10 (M_star)", "alpha"]
names = [r"$\log_{10} \phi_\star$", r"$\log_{10} M_\star$", r"$\alpha$"]
gdf_equation = r"$\frac{dN}{dVdx} = \log(10) \phi_\star \mu^{\alpha+1} \exp(-\mu)$, where $\mu = 10^{x - \log_{10} M_\star}$"
def gdf(self, x, p):
mu = 10 ** (x - p[1])
return np.log(10) * 10 ** p[0] * mu ** (p[2] + 1) * np.exp(-mu)
def gdf_jacobian(self, x, p):
g = self.gdf(x, p)
return (
np.log(10)
* g
* np.array([np.ones_like(x), (-p[2] - 1) + 10 ** (x - p[1]), (x - p[1])])
)
def gdf_hessian(self, x, p):
g = self.gdf(x, p)
jac = self.gdf_jacobian(x, p)
p00 = jac[0]
p01 = jac[1]
p02 = jac[2]
p22 = jac[2] * (x - p[1])
p11 = (
jac[1] * (-p[2] - 1)
- np.log(10) * 10 ** (x - p[1]) * g
+ 10 ** (x - p[1]) * jac[1]
)
p12 = jac[1] * x - g - p[1] * jac[1]
return np.log(10) * np.array(
[[p00, p01, p02], [p01, p11, p12], [p02, p12, p22]]
)
class MRP(Model):
"""
An MRP model (see Murray, Robotham, Power, 2017)
"""
p0 = (-2.0, 11.0, -1.0, 1)
names_text = ["log_10 (phi_star)", "log_10 (M_star)", "alpha", "beta"]
names = [r"$\log_{10} \phi_\star$", r"$\log_{10} M_\star$", r"$\alpha$", r"$\beta$"]
gdf_equation = r"$\frac{dN}{dVdx} = \log(10) \beta \phi_\star \mu^{\alpha+1} \exp(-\mu^\beta)$, where $\mu = 10^{x - \log_{10} M_\star}$"
def gdf(self, x, p):
mu = 10 ** (x - p[1])
return (
np.log(10) * p[3] * 10 ** p[0] * mu ** (p[2] + 1) * np.exp(-mu ** abs(p[3]))
)
class PL(Model):
"""
A power-law model.
"""
p0 = (2.0, -1.0)
names_text = ("log_10(A)", "alpha")
names = (r"$\log_{10}A$", r"$\alpha$")
gdf_equation = r"$\frac{dN}{dVdx} = A 10^{\alpha x}$"
def gdf(self, x, p):
return 10 ** p[0] * (10 ** (p[1] * x))
| mit | 2,237,806,698,947,694,600 | 25.934783 | 141 | 0.521186 | false |
ControCurator/controcurator | cronjobs/clusterComments.py | 1 | 25964 | import numpy as np
import pandas as pd
import nltk
from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from bs4 import BeautifulSoup
import re
import os
import codecs
from sklearn import feature_extraction
from sklearn.cluster import KMeans
from pprint import pprint
from sklearn.manifold import MDS
from sklearn.metrics.pairwise import cosine_similarity
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
from textwrap import wrap
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from scipy.spatial import distance
from elasticsearch import Elasticsearch
from elasticsearch_dsl.connections import connections
connections.create_connection(hosts=['http://controcurator.org:80/ess'])
es = Elasticsearch(
['http://controcurator.org/ess/'],
port=80)
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
'''
query = {
"query": {
"bool": {
"must": [
{
"match_all": {}
}
]
}
},
"from": 0,
"size": 400
}
response = es.search(index="controcurator", doc_type="article", body=query)
'''
#article = es.get(index="controcurator", doc_type="article",id="58ed3daee4b0e0ec04effff7")
stopwords = nltk.corpus.stopwords.words('english')
stemmer = SnowballStemmer("english")
#response['hits']['hits'] = [hit for hit in response['hits']['hits'] if 'comments' in hit['_source']]
#response['hits']['hits'].sort(key=lambda d: len(d['_source']['comments']), reverse=True)
def tokenize_and_stem(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
stems = [stemmer.stem(t) for t in filtered_tokens]
return stems
tfidf_vectorizer = TfidfVectorizer(max_df=0.9, max_features=200000,
min_df=0.1, stop_words='english',
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,3))
fcluster, axcluster = plt.subplots(6, 8,figsize=(24, 16))
fsenti, axsenti = plt.subplots(6, 8,figsize=(24, 16))
ftype, axtype = plt.subplots(6, 8,figsize=(24, 16))
#fig, ax = plt.subplots(figsize=(20, 10)) # set size
col = 0
row = 0
cluster_colors = {0: '#1b9e77', 1: '#d95f02', 2: '#7570b3', 3: '#e7298a', 4: '#66a61e'}
type_colors = {'guardian': '#FF9900', 'twitter': '#000099'}
senti_colors = {'neg': '#CC0000', 'neu': '#CCCCCC', 'pos' : '#00CC00'}
def classifySentiment(score):
if score < 0:
return 'neg'
elif score > 0:
return 'pos'
else:
return 'neu'
articles = ['https://www.theguardian.com/commentisfree/2017/apr/11/working-class-public-spaces-musee-d-orsay',
'https://www.theguardian.com/football/2017/apr/11/juventus-barcelona-champions-league-quarter-final-match-report',
'https://www.theguardian.com/world/2017/apr/11/us-defense-syria-chemical-weapons-attacks-assad-regime',
'https://www.theguardian.com/society/2017/apr/11/parents-fighting-to-keep-baby-charlie-gard-life-support-lose-high-court-battle',
'https://www.theguardian.com/football/2017/apr/11/borussia-dortmund-explosion-team-bus',
'https://www.theguardian.com/education/2017/apr/12/new-free-schools-despite-secondary-staff-cuts',
'https://www.theguardian.com/politics/2017/mar/21/martin-mcguinness-northern-ireland-former-deputy-first-minister-dies',
'https://www.theguardian.com/politics/2017/apr/12/foreign-states-may-have-interfered-in-brexit-vote-report-says',
'https://www.theguardian.com/us-news/2017/apr/11/homeland-security-searches-electronics-border',
'https://www.theguardian.com/environment/2017/mar/22/princess-anne-backs-gm-crops-livestock-unlike-prince-charles',
'https://www.theguardian.com/music/2017/apr/11/palestine-music-expo-pmx-musicians-shaking-up-the-occupied-territories',
'https://www.theguardian.com/world/2017/apr/11/g7-rejects-uk-call-for-sanctions-against-russia-and-syria',
'https://www.theguardian.com/commentisfree/2017/apr/11/frontline-brexit-culture-wars-ask-comedian-al-murray',
'https://www.theguardian.com/news/2017/apr/11/painting-a-new-picture-of-the-little-ice-age-weatherwatch',
'https://www.theguardian.com/us-news/2017/apr/11/detroit-michigan-500-dollar-house-rust-belt-america',
'https://www.theguardian.com/global-development/2017/apr/11/worrying-trend-as-aid-money-stays-in-wealthiest-countries',
'https://www.theguardian.com/society/2017/apr/11/recorded-childhood-cancers-rise-worldwide-world-health-organization',
'https://www.theguardian.com/commentisfree/2016/dec/08/modern-day-hermits-share-experiences',
'https://www.theguardian.com/football/2017/mar/22/ronnie-moran-liverpool-dies',
'https://www.theguardian.com/lifeandstyle/2017/apr/11/vision-thing-how-babies-colour-in-the-world',
'https://www.theguardian.com/world/2017/apr/11/nurses-grant-dying-man-final-wish-cigarette-glass-wine',
'https://www.theguardian.com/business/2017/apr/11/labour-declare-war-late-payers-marks-spencer-jeremy-corbyn',
'https://www.theguardian.com/science/2017/apr/12/scientists-unravel-mystery-of-the-loose-shoelace',
'https://www.theguardian.com/us-news/2017/apr/11/united-airlines-shares-plummet-passenger-removal-controversy',
'https://www.theguardian.com/business/2017/apr/11/judges-reject-us-bankers-claim-to-be-randy-work-genius-in-divorce-case',
'https://www.theguardian.com/business/2017/apr/12/tesco-profits-1bn-growth-supermarket',
'https://www.theguardian.com/money/2017/apr/11/probate-fees-plan-is-daft-as-well-as-devious',
'https://www.theguardian.com/commentisfree/2017/apr/11/donald-trump-russia-rex-tillersons-visit-syria',
'https://www.theguardian.com/environment/2017/apr/12/uk-butterflies-worst-hit-in-2016-with-70-of-species-in-decline-study-finds',
'https://www.theguardian.com/business/2017/apr/11/developing-countries-demands-for-better-life-must-be-met-says-world-bank-head',
'https://www.theguardian.com/politics/2017/apr/12/devon-and-cornwall-pcc-expenses-inquiry-prosecutors',
'https://www.theguardian.com/politics/shortcuts/2017/apr/11/deep-england-brexit-britain',
'https://www.theguardian.com/society/2017/apr/11/uk-supreme-court-denies-tobacco-firms-permission-for-plain-packaging-appeal',
'https://www.theguardian.com/society/2017/mar/21/dawn-butler-stood-up-for-deaf-people-but-we-need-more-than-gestures',
'https://www.theguardian.com/technology/2017/apr/11/gordon-ramsay-father-in-law-admits-hacking-company-computers',
'https://www.theguardian.com/tv-and-radio/2017/mar/20/richard-hammond-injured-in-grand-tour-crash-in-mozambique',
'https://www.theguardian.com/us-news/2017/apr/11/sean-spicer-hitler-chemical-weapons-holocaust-assad',
'https://www.theguardian.com/science/2017/mar/22/face-medieval-cambridge-man-emerges-700-years-after-death',
'https://www.theguardian.com/society/2017/mar/22/new-alzheimers-test-can-predict-age-when-disease-will-appear',
'https://www.theguardian.com/world/2017/apr/11/national-archives-mi5-file-new-zealand-diplomat-paddy-costello-kgb-spy',
'https://www.theguardian.com/australia-news/2017/mar/22/british-war-veteran-granted-permanent-residency-in-australia-ending-visa-drama',
'https://www.theguardian.com/books/2017/apr/11/x-men-illustrator-alleged-anti-christian-messages-marvel-ardian-syaf',
'https://www.theguardian.com/business/2017/apr/12/burger-king-ok-google-commercial',
'https://www.theguardian.com/business/2017/apr/12/edf-customers-price-rise-electricity-gas-energy',
'https://www.theguardian.com/business/2017/apr/12/ship-oil-rig-pioneer-spirit-shell-north-sea-decommissioning',
'https://www.theguardian.com/business/2017/mar/22/asian-shares-drop-investors-fear-trump-wont-deliver-promises',
'https://www.theguardian.com/football/2017/apr/11/tony-adams-vows-to-give-granada-players-a-kick-up-the-arse',
'https://www.theguardian.com/football/2017/mar/22/football-transfer-rumours-jermain-defoe-back-to-west-ham',
'https://www.theguardian.com/global-development/2017/apr/11/india-acts-to-help-acid-attack-victims',
'https://www.theguardian.com/money/2017/apr/11/student-loan-interest-rate-rise-uk-inflation-brexit',
'https://www.theguardian.com/uk-news/2017/mar/17/coroner-warns-of-dangers-after-man-electrocuted-in-bath-while-charging-phone',
'https://www.theguardian.com/business/2017/mar/22/london-taxi-company-coventry-electric-cabs-jobs-brexit',
'https://www.theguardian.com/commentisfree/2016/dec/14/experiences-accessing-mental-health-services-uk',
'https://www.theguardian.com/commentisfree/2017/apr/11/france-left-europe-jean-luc-melenchon-presidential-election',
'https://www.theguardian.com/commentisfree/2017/apr/11/sean-spicers-hitler-holocaust-speak-volumes',
'https://www.theguardian.com/commentisfree/2017/apr/11/united-airlines-flying-while-asian-fear',
'https://www.theguardian.com/environment/2017/mar/22/country-diary-long-mynd-shropshire-light-spout-waterfall',
'https://www.theguardian.com/football/2017/apr/11/borussia-dortmund-shock-team-bus-explosions',
'https://www.theguardian.com/football/2017/mar/17/stewart-downing-middlesbrough-karanka-row-agnew',
'https://www.theguardian.com/football/2017/mar/22/which-football-manager-has-been-sacked-by-one-club-the-most-times',
'https://www.theguardian.com/music/2017/mar/16/ed-sheeran-headline-sunday-night-glastonbury-2017',
'https://www.theguardian.com/sport/2017/apr/11/pennsylvania-woman-jail-threats-youth-football-league-officials',
'https://www.theguardian.com/sport/blog/2017/mar/22/talking-horses-best-wednesday-bets-for-warwick-and-newcastle',
'https://www.theguardian.com/technology/2017/mar/17/youtube-and-google-search-for-answers',
'https://www.theguardian.com/tv-and-radio/2017/mar/19/neighbours-tv-soap-could-disappear-from-british-screens',
'https://www.theguardian.com/uk-news/2017/apr/11/boris-johnson-full-support-failure-secure-sanctions-syria-russia',
'https://www.theguardian.com/world/2017/mar/22/brussels-unveil-terror-victims-memorial-one-year-after-attacks',
'https://www.theguardian.com/world/2017/mar/22/north-korea-missile-test-failure',
'https://www.theguardian.com/business/2017/mar/16/bank-of-england-uk-interest-rates-monetary-policy-committee',
'https://www.theguardian.com/business/2017/mar/21/inflation-uk-wages-lag-behind-prices-mark-carney',
'https://www.theguardian.com/business/2017/mar/22/nervous-markets-take-fright-at-prospect-of-trump-failing-to-deliver',
'https://www.theguardian.com/commentisfree/2016/dec/21/i-lost-my-mum-seven-weeks-ago-our-readers-on-coping-with-grief-at-christmas',
'https://www.theguardian.com/commentisfree/2017/jan/06/brexit-vote-have-you-applied-for-a-second-passport',
'https://www.theguardian.com/fashion/2017/mar/22/fiorucci-why-the-disco-friendly-label-is-perfect-for-2017',
'https://www.theguardian.com/film/2017/mar/17/from-the-corner-of-the-oval-obama-white-house-movie',
'https://www.theguardian.com/film/2017/mar/22/film-franchises-terminator-sequel-arnold-schwarzenegger-die-hard-alien',
'https://www.theguardian.com/law/2017/apr/12/judge-sacked-over-online-posts-calling-his-critics-donkeys',
'https://www.theguardian.com/lifeandstyle/2017/mar/17/monopoly-board-game-new-tokens-vote',
'https://www.theguardian.com/music/2017/mar/16/stormzy-condemns-nme-for-using-him-as-poster-boy-for-depression',
'https://www.theguardian.com/music/2017/mar/21/los-angeles-police-mistake-wyclef-jean-suspect-assault-case',
'https://www.theguardian.com/politics/2017/mar/22/uk-based-airlines-told-to-move-to-europe-after-brexit-or-lose-major-routes',
'https://www.theguardian.com/society/2017/apr/11/national-social-care-service-centralised-nhs',
'https://www.theguardian.com/sport/2017/mar/17/wales-france-six-nations-world-rankings',
'https://www.theguardian.com/tv-and-radio/2017/mar/22/n-word-taboo-tv-carmichael-show-atlanta-insecure-language',
'https://www.theguardian.com/uk-news/2017/mar/16/man-dies-explosion-former-petrol-station-highgate-north-london-swains-lane',
'https://www.theguardian.com/us-news/2017/mar/17/national-weather-service-forecasting-temperatures-storms',
'https://www.theguardian.com/us-news/2017/mar/22/fbi-muslim-employees-discrimination-religion-middle-east-travel',
'https://www.theguardian.com/us-news/2017/mar/22/zapier-pay-employees-move-silicon-valley-startup',
'https://www.theguardian.com/world/2017/mar/17/fleeing-from-dantes-hell-on-mount-etna',
'https://www.theguardian.com/world/2017/mar/22/gay-clergyman-jeffrey-johns-turned-down-welsh-bishop-twice-before-claims',
'https://www.theguardian.com/world/2017/mar/23/apple-paid-no-tax-in-new-zealand-for-at-least-a-decade-reports-say',
'https://www.theguardian.com/books/2017/mar/22/comics-chavez-redline-transformers-v-gi-joe',
'https://www.theguardian.com/business/2017/apr/11/uk-inflation-rate-stays-three-year-high',
'https://www.theguardian.com/commentisfree/2017/apr/12/charlie-gard-legal-aid',
'https://www.theguardian.com/commentisfree/2017/mar/22/rights-gig-economy-self-employed-worker',
'https://www.theguardian.com/media/2017/mar/14/face-off-mps-and-social-media-giants-online-hate-speech-facebook-twitter',
'https://www.theguardian.com/music/2017/apr/11/michael-buble-wife-says-son-noah-is-recovering-from-cancer',
'https://www.theguardian.com/society/2017/apr/11/bullying-and-violence-grip-out-of-control-guys-marsh-jail-dorset',
'https://www.theguardian.com/stage/2017/mar/22/trisha-brown-obituary',
'https://www.theguardian.com/travel/2017/mar/22/10-best-clubs-in-amsterdam-chosen-by-dj-experts',
'https://www.theguardian.com/us-news/2017/apr/11/us-universal-healthcare-single-payer-rallies',
'https://www.theguardian.com/us-news/2017/mar/22/us-border-agent-sexually-assaults-teenage-sisters-texas',
'https://www.theguardian.com/world/2017/apr/11/hundreds-of-refugees-missing-after-dunkirk-camp-fire',
'https://www.theguardian.com/world/2017/mar/22/unicef-condemns-sale-cambodian-breast-milk-us-mothers-firm-ambrosia-labs',
'https://www.theguardian.com/world/commentisfree/2017/mar/17/week-in-patriarchy-bbc-dad-jessica-valenti',
'https://www.theguardian.com/business/2017/mar/15/us-federal-reserve-raises-interest-rates-to-1',
'https://www.theguardian.com/business/2017/mar/21/london-cycle-courier-was-punished-for-refusing-work-after-eight-hours-in-cold',
'https://www.theguardian.com/football/2017/mar/17/tottenham-harry-kane-return-injury',
'https://www.theguardian.com/politics/2017/mar/15/browse-of-commons-explore-uk-parliament-with-first-virtual-tour',
'https://www.theguardian.com/politics/2017/mar/21/martin-mcguinness-sinn-fein-members-carry-coffin-home-in-derry',
'https://www.theguardian.com/sport/2017/mar/18/ireland-england-six-nations-dublin',
'https://www.theguardian.com/us-news/2017/mar/20/ivanka-trump-west-wing-office-security-clearance',
'https://www.theguardian.com/film/2017/mar/21/look-on-the-sweet-side-of-love-actually',
'https://www.theguardian.com/media/2017/mar/20/jamie-oliver-new-show-deal-channel-4-tv',
'https://www.theguardian.com/politics/2017/mar/16/theresa-may-vows-absolute-faith-in-hammond-after-u-turn',
'https://www.theguardian.com/politics/2017/mar/21/nicola-sturgeon-accused-of-hypocrisy-as-independence-debate-begins',
'https://www.theguardian.com/sport/2017/mar/17/jailed-transgender-fell-runner-thought-uk-athletics-was-trying-to-kill-her',
'https://www.theguardian.com/uk-news/2017/mar/16/former-marine-cleared-alexander-blackman-freed-immediately-ex-soldier-jail',
'https://www.theguardian.com/world/2017/mar/16/india-brexit-and-the-legacy-of-empire-in-africa',
'https://www.theguardian.com/world/2017/mar/18/a-good-looking-bird-the-bush-stone-curlew-that-loves-its-own-reflection',
'https://www.theguardian.com/world/2017/mar/21/electronics-ban-middle-east-flights-safety-hazards-airline-profit',
'https://www.theguardian.com/business/2017/mar/14/us-federal-reserve-interest-rates-janet-yellen-donald-trump',
'https://www.theguardian.com/business/2017/mar/16/rupert-murdoch-sky-bid-uk-ofcom',
'https://www.theguardian.com/business/2017/mar/20/us-forbids-devices-larger-cell-phones-flights-13-countries',
'https://www.theguardian.com/business/2017/mar/22/uk-ceos-national-living-wage-equality-trust-pay-gap',
'https://www.theguardian.com/football/2017/mar/17/arsene-wenger-granit-xhaka-referees',
'https://www.theguardian.com/lifeandstyle/2017/mar/17/chorizo-chicken-lemon-yoghurt-cavolo-nero-recipe-anna-hansen',
'https://www.theguardian.com/politics/2017/mar/17/george-osborne-london-evening-standard-editor-appointment-evgeny-lebedev',
'https://www.theguardian.com/uk-news/2017/mar/16/scotland-cannot-afford-to-ignore-its-deficit',
'https://www.theguardian.com/uk-news/2017/mar/17/prince-william-visits-paris-for-the-first-time-since-mother-dianas-death',
'https://www.theguardian.com/us-news/2017/mar/16/oc-actor-mischa-barton-speaks-out-sex-tapes-scandal',
'https://www.theguardian.com/world/2017/mar/15/uk-government-child-slavery-products-sold-britain-innovation-fund',
'https://www.theguardian.com/commentisfree/2017/mar/17/the-guardian-view-on-brexit-and-publishing-a-hardcore-problem',
'https://www.theguardian.com/politics/2017/mar/21/osborne-becomes-the-remainers-great-hope',
'https://www.theguardian.com/society/2017/mar/16/scotlands-exam-body-to-ensure-invigilators-get-living-wage',
'https://www.theguardian.com/society/2017/mar/18/rural-deprivation-and-ill-health-in-england-in-danger-of-being-overlooked',
'https://www.theguardian.com/sport/2017/mar/16/michael-oleary-team-not-ruling-out-return-mullins-yard-cheltenham-festival-horse-racing',
'https://www.theguardian.com/sport/2017/mar/17/ireland-v-england-lions-six-nations-rugby-union',
'https://www.theguardian.com/sport/2017/mar/18/this-is-your-night-conlans-dream-debut-wipes-out-nightmares-of-the-past',
'https://www.theguardian.com/sport/2017/mar/21/bha-dope-tests-horses-racecourse',
'https://www.theguardian.com/sport/2017/mar/21/donald-trump-colin-kaepernick-free-agent-anthem-protest',
'https://www.theguardian.com/uk-news/2017/mar/16/protect-survive-nuclear-war-republished-pamphlet',
'https://www.theguardian.com/uk-news/2017/mar/21/sisters-al-najjar-sue-cumberland-hotel-london-brutal-hammer-attack',
'https://www.theguardian.com/uk-news/2017/mar/22/what-support-does-your-employer-give-to-fathers',
'https://www.theguardian.com/artanddesign/2017/mar/21/winged-bull-and-giant-dollop-of-cream-to-adorn-trafalgar-squares-fourth-plinth',
'https://www.theguardian.com/books/2017/mar/17/the-bone-readers-jacob-ross-caribbean-thriller-jhalak-prize',
'https://www.theguardian.com/business/2017/mar/11/democrats-question-trump-conflict-of-interest-deutsche-bank-investigation-money-laundering',
'https://www.theguardian.com/business/2017/mar/17/barclays-bob-diamond-panmure-gordon',
'https://www.theguardian.com/commentisfree/2017/mar/15/brexit-was-an-english-vote-for-independence-you-cant-begrudge-the-scots-the-same',
'https://www.theguardian.com/environment/2017/mar/21/the-snow-buntings-drift-takes-them-much-further-than-somerset',
'https://www.theguardian.com/fashion/2017/mar/21/art-colour-victoria-beckham-van-gogh-fashion',
'https://www.theguardian.com/lifeandstyle/2017/mar/17/i-am-26-and-find-it-hard-to-meet-people-on-the-same-wavelength-as-me',
'https://www.theguardian.com/lifeandstyle/shortcuts/2017/mar/21/open-a-window-and-have-a-cold-shower-could-being-chilly-improve-your-health',
'https://www.theguardian.com/society/2017/mar/22/four-supersized-prisons-to-be-built-england-and-wales-elizabeth-truss-plan',
'https://www.theguardian.com/sport/2017/mar/17/ben-youngs-england-ireland-grand-slam-six-nations',
'https://www.theguardian.com/technology/2017/mar/17/google-ads-bike-helmets-adverts',
'https://www.theguardian.com/us-news/2017/mar/20/fbi-director-comey-confirms-investigation-trump-russia',
'https://www.theguardian.com/world/2017/mar/17/time-for-a-declaration-of-war-on-happiness']
# go through each file
for file in articles[0:5]:
query = {
"query": {
"constant_score": {
"filter": {
"term": {
"url": file
}
}
}
},
"from": 0,
"size": 1
}
response = es.search(index="controcurator", doc_type="article", body=query)
article = response['hits']['hits'][0]
print article['_source']['url']
print article['_id']
#for article in response['hits']['hits']:
if 'comments' not in article['_source']:
print "-- NO COMMENTS --"
continue
print len(article['_source']['comments'])
if len(article['_source']['comments']) > 500:
print "-- TOO MANY COMMENTS --"
continue
if len(article['_source']['comments']) < 50:
print "-- NOT ENOUGH COMMENTS --"
continue
# vectorization
tfidf_matrix = tfidf_vectorizer.fit_transform([c['text'] for c in article['_source']['comments']])
# clustering
num_clusters = 5
km = KMeans(n_clusters=num_clusters)
km.fit(tfidf_matrix)
centers = km.cluster_centers_
clusters = km.labels_.tolist()
# distances
similarity_distance = 1 - cosine_similarity(tfidf_matrix)
mds = MDS(n_components=2, dissimilarity="precomputed", random_state=1)
pos = mds.fit_transform(similarity_distance)
# save results to comments
for i, cluster in enumerate(clusters):
article['_source']['comments'][i]['cluster'] = cluster
article['_source']['comments'][i]['cluster_x'] = pos[i][0]
article['_source']['comments'][i]['cluster_y'] = pos[i][1]
#for comment in article['_source']['comments']:
# print comment['cluster'],',',comment['cluster_x'],',',comment['cluster_y'],',',comment['text'].encode('UTF-8')
for c in article['_source']['comments']:
if 'type' not in c:
c['type'] = 'guardian'
data = [{'x':c['cluster_x'], 'y':c['cluster_y'], 'label':c['cluster'], 'sentiment': classifySentiment(c['sentiment']['sentiment']), 'type':c['type'], 'title':c['text'].replace('\r', '').replace('\n', '')} for c in article['_source']['comments']]
#create data frame that has the result of the MDS plus the cluster numbers and titles
clustergroups = pd.DataFrame().from_dict(data).groupby('label')
typegroups = pd.DataFrame().from_dict(data).groupby('type')
sentigroups = pd.DataFrame().from_dict(data).groupby('sentiment')
#fig, ax = plt.subplots(figsize=(20, 10)) # set size
# ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
#iterate through groups to layer the plot
#note that I use the cluster_name and cluster_color dicts with the 'name' lookup to return the appropriate color/label
# ms: marker size
for name, group in clustergroups:
axcluster[row, col].plot(group.x, group.y, marker='o', linestyle='', ms=5, color=cluster_colors[name],
mec='none')
axcluster[row, col].set_aspect('auto')
axcluster[row, col].tick_params(\
axis= 'x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off')
axcluster[row, col].tick_params(\
axis= 'y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelleft='off')
axcluster[row, col].set_title("\n".join(wrap(article['_source']['document']['title'], 30)),fontsize=8)
for name, group in typegroups:
axtype[row, col].plot(group.x, group.y, marker='o', linestyle='', ms=5, color=type_colors[name],
mec='none')
axtype[row, col].set_aspect('auto')
axtype[row, col].tick_params(\
axis= 'x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off')
axtype[row, col].tick_params(\
axis= 'y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelleft='off')
axtype[row, col].set_title("\n".join(wrap(article['_source']['document']['title'], 30)),fontsize=8)
#title.set_y(1.05)
for name, group in sentigroups:
axsenti[row, col].plot(group.x, group.y, marker='o', linestyle='', ms=5, color=senti_colors[name],
mec='none')
axsenti[row, col].set_aspect('auto')
axsenti[row, col].tick_params(\
axis= 'x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off')
axsenti[row, col].tick_params(\
axis= 'y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelleft='off')
axsenti[row, col].set_title("\n".join(wrap(article['_source']['document']['title'], 30)),fontsize=8)
#ax.legend(numpoints=1) #show legend with only 1 point
#add label in x,y position with the label as the film title
# for i in range(len(df)):
# ax.text(df.ix[i]['x'], df.ix[i]['y'], df.ix[i]['title'], size=8)
col += 1
if col > 7:
col = 0
row += 1
if row > 5:
break
print article['_source']['document']['title'].encode('UTF-8')
for name, group in sentigroups:
avgx = group.x.mean()
avgy = group.y.mean()
group['dist'] = group.apply(lambda row: float(distance.pdist([(row['x'], row['y']), (avgx, avgy)])), axis=1)
print group
print "==="
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
#plt.setp([a.get_xticklabels() for a in axarr[:,-1]], visible=False)
#plt.setp([a.get_yticklabels() for a in axarr[0,:]], visible=False)
#plt.show() #show the plot
#fcluster.savefig('img/clusters.png', dpi=200)
#ftype.savefig('img/type.png', dpi=200)
#fsenti.savefig('img/sentiment.png', dpi=200)
| mit | 6,142,809,010,730,646,000 | 55.689956 | 246 | 0.749615 | false |
ngaut/vitess | test/utils.py | 1 | 20433 | #!/usr/bin/env python
import json
import logging
import optparse
import os
import shlex
import shutil
import signal
import socket
from subprocess import Popen, CalledProcessError, PIPE
import sys
import time
import unittest
import urllib2
import MySQLdb
import environment
from vtctl import vtctl_client
from mysql_flavor import set_mysql_flavor
from protocols_flavor import set_protocols_flavor, protocols_flavor
from topo_flavor.server import set_topo_server_flavor
options = None
devnull = open('/dev/null', 'w')
hostname = socket.gethostname()
class TestError(Exception):
pass
class Break(Exception):
pass
environment.setup()
class LoggingStream(object):
def __init__(self):
self.line = ""
def write(self, value):
if value == "\n":
# we already printed it
self.line = ""
return
self.line += value
logging.info("===== " + self.line)
if value.endswith("\n"):
self.line = ""
def writeln(self, value):
self.write(value)
self.line = ""
def flush(self):
pass
def add_options(parser):
parser.add_option('-d', '--debug', action='store_true',
help='utils.pause() statements will wait for user input')
parser.add_option('-k', '--keep-logs', action='store_true',
help="Don't delete log files on teardown.")
parser.add_option("-q", "--quiet", action="store_const", const=0, dest="verbose", default=1)
parser.add_option("-v", "--verbose", action="store_const", const=2, dest="verbose", default=1)
parser.add_option('--skip-teardown', action='store_true')
parser.add_option("--mysql-flavor")
parser.add_option("--protocols-flavor")
parser.add_option("--topo-server-flavor")
def set_options(opts):
global options
options = opts
set_mysql_flavor(options.mysql_flavor)
set_protocols_flavor(options.protocols_flavor)
set_topo_server_flavor(options.topo_server_flavor)
# main executes the test classes contained in the passed module, or
# __main__ if empty.
def main(mod=None):
if mod == None:
mod = sys.modules['__main__']
global options
parser = optparse.OptionParser(usage="usage: %prog [options] [test_names]")
add_options(parser)
(options, args) = parser.parse_args()
if options.verbose == 0:
level = logging.WARNING
elif options.verbose == 1:
level = logging.INFO
else:
level = logging.DEBUG
logging.getLogger().setLevel(level)
logging.basicConfig(format='-- %(asctime)s %(module)s:%(lineno)d %(levelname)s %(message)s')
set_options(options)
try:
suite = unittest.TestSuite()
if not args:
# this will run the setup and teardown
suite.addTests(unittest.TestLoader().loadTestsFromModule(mod))
else:
if args[0] == 'teardown':
mod.tearDownModule()
elif args[0] == 'setup':
mod.setUpModule()
else:
for arg in args:
# this will run the setup and teardown
suite.addTests(unittest.TestLoader().loadTestsFromName(arg, mod))
if suite.countTestCases() > 0:
logger = LoggingStream()
result = unittest.TextTestRunner(stream=logger, verbosity=options.verbose, failfast=True).run(suite)
if not result.wasSuccessful():
sys.exit(-1)
except KeyboardInterrupt:
logging.warning("======== Tests interrupted, cleaning up ========")
mod.tearDownModule()
# If you interrupt a test, you probably want to stop evaluating the rest.
sys.exit(1)
finally:
if options.keep_logs:
logging.warning("Leaving temporary files behind (--keep-logs), please "
"clean up before next run: " + os.environ["VTDATAROOT"])
def remove_tmp_files():
if options.keep_logs:
return
try:
shutil.rmtree(environment.tmproot)
except OSError as e:
logging.debug("remove_tmp_files: %s", str(e))
def pause(prompt):
if options.debug:
raw_input(prompt)
# sub-process management
pid_map = {}
already_killed = []
def _add_proc(proc):
pid_map[proc.pid] = proc
with open(environment.tmproot+'/test-pids', 'a') as f:
print >> f, proc.pid, os.path.basename(proc.args[0])
def kill_sub_processes():
for proc in pid_map.values():
if proc.pid and proc.returncode is None:
proc.kill()
if not os.path.exists(environment.tmproot+'/test-pids'):
return
with open(environment.tmproot+'/test-pids') as f:
for line in f:
try:
parts = line.strip().split()
pid = int(parts[0])
proc = pid_map.get(pid)
if not proc or (proc and proc.pid and proc.returncode is None):
if pid not in already_killed:
os.kill(pid, signal.SIGTERM)
except OSError as e:
logging.debug("kill_sub_processes: %s", str(e))
def kill_sub_process(proc, soft=False):
if proc is None:
return
pid = proc.pid
if soft:
proc.terminate()
else:
proc.kill()
if pid and pid in pid_map:
del pid_map[pid]
already_killed.append(pid)
# run in foreground, possibly capturing output
def run(cmd, trap_output=False, raise_on_error=True, **kargs):
if isinstance(cmd, str):
args = shlex.split(cmd)
else:
args = cmd
if trap_output:
kargs['stdout'] = PIPE
kargs['stderr'] = PIPE
logging.debug("run: %s %s", str(cmd), ', '.join('%s=%s' % x for x in kargs.iteritems()))
proc = Popen(args, **kargs)
proc.args = args
stdout, stderr = proc.communicate()
if proc.returncode:
if raise_on_error:
raise TestError('cmd fail:', args, stdout, stderr)
else:
logging.debug('cmd fail: %s %s %s', str(args), stdout, stderr)
return stdout, stderr
# run sub-process, expects failure
def run_fail(cmd, **kargs):
if isinstance(cmd, str):
args = shlex.split(cmd)
else:
args = cmd
kargs['stdout'] = PIPE
kargs['stderr'] = PIPE
if options.verbose == 2:
logging.debug("run: (expect fail) %s %s", cmd, ', '.join('%s=%s' % x for x in kargs.iteritems()))
proc = Popen(args, **kargs)
proc.args = args
stdout, stderr = proc.communicate()
if proc.returncode == 0:
logging.info("stdout:\n%sstderr:\n%s", stdout, stderr)
raise TestError('expected fail:', args, stdout, stderr)
return stdout, stderr
# run a daemon - kill when this script exits
def run_bg(cmd, **kargs):
if options.verbose == 2:
logging.debug("run: %s %s", cmd, ', '.join('%s=%s' % x for x in kargs.iteritems()))
if 'extra_env' in kargs:
kargs['env'] = os.environ.copy()
if kargs['extra_env']:
kargs['env'].update(kargs['extra_env'])
del(kargs['extra_env'])
if isinstance(cmd, str):
args = shlex.split(cmd)
else:
args = cmd
proc = Popen(args=args, **kargs)
proc.args = args
_add_proc(proc)
return proc
def wait_procs(proc_list, raise_on_error=True):
for proc in proc_list:
pid = proc.pid
if pid:
already_killed.append(pid)
for proc in proc_list:
proc.wait()
for proc in proc_list:
if proc.returncode:
if options.verbose >= 1 and proc.returncode not in (-9,):
sys.stderr.write("proc failed: %s %s\n" % (proc.returncode, proc.args))
if raise_on_error:
raise CalledProcessError(proc.returncode, ' '.join(proc.args))
def run_procs(cmds, raise_on_error=True):
procs = []
for cmd in cmds:
procs.append(run_bg(cmd))
wait_procs(procs, raise_on_error=raise_on_error)
def validate_topology(ping_tablets=False):
if ping_tablets:
run_vtctl(['Validate', '-ping-tablets'])
else:
run_vtctl(['Validate'])
def zk_ls(path):
out, err = run(environment.binary_argstr('zk')+' ls '+path, trap_output=True)
return sorted(out.splitlines())
def zk_cat(path):
out, err = run(environment.binary_argstr('zk')+' cat '+path, trap_output=True)
return out
def zk_cat_json(path):
data = zk_cat(path)
return json.loads(data)
# wait_step is a helper for looping until a condition is true.
# use as follow:
# timeout = 10
# while True:
# if done:
# break
# timeout = utils.wait_step('condition', timeout)
def wait_step(msg, timeout, sleep_time=1.0):
timeout -= sleep_time
if timeout <= 0:
raise TestError("timeout waiting for condition '%s'" % msg)
logging.debug("Sleeping for %f seconds waiting for condition '%s'" %
(sleep_time, msg))
time.sleep(sleep_time)
return timeout
# vars helpers
def get_vars(port):
"""
Returns the dict for vars, from a vtxxx process, or None
if we can't get them.
"""
try:
url = 'http://localhost:%u/debug/vars' % int(port)
f = urllib2.urlopen(url)
data = f.read()
f.close()
except:
return None
try:
return json.loads(data)
except ValueError:
print data
raise
# wait_for_vars will wait until we can actually get the vars from a process,
# and if var is specified, will wait until that var is in vars
def wait_for_vars(name, port, var=None):
timeout = 5.0
while True:
v = get_vars(port)
if v and (var is None or var in v):
break
timeout = wait_step('waiting for /debug/vars of %s' % name, timeout)
# zkocc helpers
def zkocc_start(cells=['test_nj'], extra_params=[]):
args = environment.binary_args('zkocc') + [
'-port', str(environment.topo_server().zkocc_port_base),
'-stderrthreshold=ERROR',
] + extra_params + cells
sp = run_bg(args)
wait_for_vars("zkocc", environment.topo_server().zkocc_port_base)
return sp
def zkocc_kill(sp):
kill_sub_process(sp)
sp.wait()
# vtgate helpers, assuming it always restarts on the same port
def vtgate_start(vtport=None, cell='test_nj', retry_delay=1, retry_count=1,
topo_impl=None, tablet_bson_encrypted=False, cache_ttl='1s',
auth=False, timeout="5s", cert=None, key=None, ca_cert=None,
socket_file=None, extra_args=None):
port = vtport or environment.reserve_ports(1)
secure_port = None
args = environment.binary_args('vtgate') + [
'-port', str(port),
'-cell', cell,
'-retry-delay', '%ss' % (str(retry_delay)),
'-retry-count', str(retry_count),
'-log_dir', environment.vtlogroot,
'-srv_topo_cache_ttl', cache_ttl,
'-timeout', timeout,
] + protocols_flavor().tabletconn_protocol_flags()
if topo_impl:
args.extend(['-topo_implementation', topo_impl])
else:
args.extend(environment.topo_server().flags())
if tablet_bson_encrypted:
args.append('-tablet-bson-encrypted')
if auth:
args.extend(['-auth-credentials', os.path.join(environment.vttop, 'test', 'test_data', 'authcredentials_test.json')])
if cert:
secure_port = environment.reserve_ports(1)
args.extend(['-secure-port', '%s' % secure_port,
'-cert', cert,
'-key', key])
if ca_cert:
args.extend(['-ca_cert', ca_cert])
if socket_file:
args.extend(['-socket_file', socket_file])
if extra_args:
args.extend(extra_args)
sp = run_bg(args)
if cert:
wait_for_vars("vtgate", port, "SecureConnections")
return sp, port, secure_port
else:
wait_for_vars("vtgate", port)
return sp, port
def vtgate_kill(sp):
if sp is None:
return
kill_sub_process(sp, soft=True)
sp.wait()
# vtctl helpers
# The modes are not all equivalent, and we don't really thrive for it.
# If a client needs to rely on vtctl's command line behavior, make
# sure to use mode=utils.VTCTL_VTCTL
VTCTL_AUTO = 0
VTCTL_VTCTL = 1
VTCTL_VTCTLCLIENT = 2
VTCTL_RPC = 3
def run_vtctl(clargs, log_level='', auto_log=False, expect_fail=False,
mode=VTCTL_AUTO, **kwargs):
if mode == VTCTL_AUTO:
if not expect_fail and vtctld:
mode = VTCTL_RPC
else:
mode = VTCTL_VTCTL
if mode == VTCTL_VTCTL:
return run_vtctl_vtctl(clargs, log_level=log_level, auto_log=auto_log,
expect_fail=expect_fail, **kwargs)
elif mode == VTCTL_VTCTLCLIENT:
result = vtctld.vtctl_client(clargs)
return result, ""
elif mode == VTCTL_RPC:
logging.debug("vtctl: %s", " ".join(clargs))
result = vtctld_connection.execute_vtctl_command(clargs, info_to_debug=True, action_timeout=120)
return result, ""
raise Exception('Unknown mode: %s', mode)
def run_vtctl_vtctl(clargs, log_level='', auto_log=False, expect_fail=False,
**kwargs):
args = environment.binary_args('vtctl') + ['-log_dir', environment.vtlogroot]
args.extend(environment.topo_server().flags())
args.extend(protocols_flavor().tablet_manager_protocol_flags())
args.extend(protocols_flavor().tabletconn_protocol_flags())
if auto_log:
if options.verbose == 2:
log_level='INFO'
elif options.verbose == 1:
log_level='WARNING'
else:
log_level='ERROR'
if log_level:
args.append('--stderrthreshold=%s' % log_level)
if isinstance(clargs, str):
cmd = " ".join(args) + ' ' + clargs
else:
cmd = args + clargs
if expect_fail:
return run_fail(cmd, **kwargs)
return run(cmd, **kwargs)
# run_vtctl_json runs the provided vtctl command and returns the result
# parsed as json
def run_vtctl_json(clargs):
stdout, stderr = run_vtctl(clargs, trap_output=True, auto_log=True)
return json.loads(stdout)
# vtworker helpers
def run_vtworker(clargs, log_level='', auto_log=False, expect_fail=False, **kwargs):
args = environment.binary_args('vtworker') + [
'-log_dir', environment.vtlogroot,
'-port', str(environment.reserve_ports(1))]
args.extend(environment.topo_server().flags())
args.extend(protocols_flavor().tablet_manager_protocol_flags())
if auto_log:
if options.verbose == 2:
log_level='INFO'
elif options.verbose == 1:
log_level='WARNING'
else:
log_level='ERROR'
if log_level:
args.append('--stderrthreshold=%s' % log_level)
cmd = args + clargs
if expect_fail:
return run_fail(cmd, **kwargs)
return run(cmd, **kwargs)
# vtclient2 helpers
# driver is one of:
# - vttablet (default), vttablet-streaming
# - vtdb, vtdb-streaming (default topo server)
# - vtdb-zk, vtdb-zk-streaming (forced zk topo server)
# - vtdb-zkocc, vtdb-zkocc-streaming (forced zkocc topo server)
# path is either: keyspace/shard for vttablet* or zk path for vtdb*
def vtclient2(uid, path, query, bindvars=None, user=None, password=None, driver=None,
verbose=False, raise_on_error=True):
if (user is None) != (password is None):
raise TypeError("you should provide either both or none of user and password")
# for ZK paths to not have // in the path, that confuses things
if path.startswith('/'):
path = path[1:]
server = "localhost:%u/%s" % (uid, path)
cmdline = environment.binary_args('vtclient2') + ['-server', server]
cmdline += environment.topo_server().flags()
cmdline += protocols_flavor().tabletconn_protocol_flags()
if user is not None:
cmdline.extend(['-tablet-bson-username', user,
'-tablet-bson-password', password])
if bindvars:
cmdline.extend(['-bindvars', bindvars])
if driver:
cmdline.extend(['-driver', driver])
if verbose:
cmdline.extend(['-alsologtostderr', '-verbose'])
cmdline.append(query)
return run(cmdline, raise_on_error=raise_on_error, trap_output=True)
# mysql helpers
def mysql_query(uid, dbname, query):
conn = MySQLdb.Connect(user='vt_dba',
unix_socket='%s/vt_%010d/mysql.sock' % (environment.vtdataroot, uid),
db=dbname)
cursor = conn.cursor()
cursor.execute(query)
try:
return cursor.fetchall()
finally:
conn.close()
def mysql_write_query(uid, dbname, query):
conn = MySQLdb.Connect(user='vt_dba',
unix_socket='%s/vt_%010d/mysql.sock' % (environment.vtdataroot, uid),
db=dbname)
cursor = conn.cursor()
conn.begin()
cursor.execute(query)
conn.commit()
try:
return cursor.fetchall()
finally:
conn.close()
def check_db_var(uid, name, value):
conn = MySQLdb.Connect(user='vt_dba',
unix_socket='%s/vt_%010d/mysql.sock' % (environment.vtdataroot, uid))
cursor = conn.cursor()
cursor.execute("show variables like '%s'" % name)
row = cursor.fetchone()
if row != (name, value):
raise TestError('variable not set correctly', name, row)
conn.close()
def check_db_read_only(uid):
return check_db_var(uid, 'read_only', 'ON')
def check_db_read_write(uid):
return check_db_var(uid, 'read_only', 'OFF')
def wait_db_read_only(uid):
for x in xrange(3):
try:
check_db_read_only(uid)
return
except TestError as e:
logging.warning("wait_db_read_only: %s", str(e))
time.sleep(1.0)
raise e
def check_srv_keyspace(cell, keyspace, expected, keyspace_id_type='uint64'):
ks = run_vtctl_json(['GetSrvKeyspace', cell, keyspace])
result = ""
for tablet_type in sorted(ks['TabletTypes']):
result += "Partitions(%s):" % tablet_type
partition = ks['Partitions'][tablet_type]
for shard in partition['Shards']:
result = result + " %s-%s" % (shard['KeyRange']['Start'],
shard['KeyRange']['End'])
result += "\n"
result += "TabletTypes: " + ",".join(sorted(ks['TabletTypes']))
logging.debug("Cell %s keyspace %s has data:\n%s", cell, keyspace, result)
if expected != result:
raise Exception("Mismatch in srv keyspace for cell %s keyspace %s, expected:\n%s\ngot:\n%s" % (
cell, keyspace, expected, result))
if 'keyspace_id' != ks.get('ShardingColumnName'):
raise Exception("Got wrong ShardingColumnName in SrvKeyspace: %s" %
str(ks))
if keyspace_id_type != ks.get('ShardingColumnType'):
raise Exception("Got wrong ShardingColumnType in SrvKeyspace: %s" %
str(ks))
def get_status(port):
return urllib2.urlopen('http://localhost:%u%s' % (port, environment.status_url)).read()
def curl(url, background=False, **kwargs):
if background:
return run_bg([environment.curl_bin, '-s', '-N', '-L', url], **kwargs)
return run([environment.curl_bin, '-s', '-N', '-L', url], **kwargs)
class VtctldError(Exception): pass
# save the first running instance, and an RPC connection to it,
# so we can use it to run remote vtctl commands
vtctld = None
vtctld_connection = None
class Vtctld(object):
def __init__(self):
self.port = environment.reserve_ports(1)
def dbtopo(self):
data = json.load(urllib2.urlopen('http://localhost:%u/dbtopo?format=json' %
self.port))
if data["Error"]:
raise VtctldError(data)
return data["Topology"]
def serving_graph(self):
data = json.load(urllib2.urlopen('http://localhost:%u/serving_graph/test_nj?format=json' % self.port))
if data['Errors']:
raise VtctldError(data['Errors'])
return data["Keyspaces"]
def start(self):
args = environment.binary_args('vtctld') + [
'-debug',
'-templates', environment.vttop + '/go/cmd/vtctld/templates',
'-log_dir', environment.vtlogroot,
'-port', str(self.port),
] + \
environment.topo_server().flags() + \
protocols_flavor().tablet_manager_protocol_flags()
stderr_fd = open(os.path.join(environment.tmproot, "vtctld.stderr"), "w")
self.proc = run_bg(args, stderr=stderr_fd)
# wait for the process to listen to RPC
timeout = 30
while True:
v = get_vars(self.port)
if v:
break
timeout = wait_step('waiting for vtctld to start', timeout,
sleep_time=0.2)
# save the running instance so vtctl commands can be remote executed now
global vtctld, vtctld_connection
if not vtctld:
vtctld = self
vtctld_connection = vtctl_client.connect(
protocols_flavor().vtctl_client_protocol(), 'localhost:%u' % self.port, 30)
return self.proc
def process_args(self):
return ['-vtctld_addr', 'http://localhost:%u/' % self.port]
def vtctl_client(self, args):
if options.verbose == 2:
log_level='INFO'
elif options.verbose == 1:
log_level='WARNING'
else:
log_level='ERROR'
out, err = run(environment.binary_args('vtctlclient') +
['-vtctl_client_protocol',
protocols_flavor().vtctl_client_protocol(),
'-server', 'localhost:%u' % self.port,
'-stderrthreshold', log_level] + args,
trap_output=True)
return out
| bsd-3-clause | -868,375,695,319,744,000 | 30.053191 | 121 | 0.635687 | false |
perrette/iis | setup.py | 1 | 2732 | #!/usr/bin/env python
"""
"""
#from distutils.core import setup
import os, sys, re
from distutils.core import setup
import warnings
with open('README.md') as file:
long_description = file.read()
#
# Track version after pandas' setup.py
#
MAJOR = 0
MINOR = 0
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
FULLVERSION = VERSION
write_version = True
if not ISRELEASED:
import subprocess
FULLVERSION += '.dev'
pipe = None
for cmd in ['git','git.cmd']:
try:
pipe = subprocess.Popen([cmd, "describe", "--always", "--match", "v[0-9]*"],
stdout=subprocess.PIPE)
(so,serr) = pipe.communicate()
if pipe.returncode == 0:
break
except:
pass
if pipe is None or pipe.returncode != 0:
# no git, or not in git dir
if os.path.exists('iis/version.py'):
warnings.warn("WARNING: Couldn't get git revision, using existing iis/version.py")
write_version = False
else:
warnings.warn("WARNING: Couldn't get git revision, using generic version string")
else:
# have git, in git dir, but may have used a shallow clone (travis does this)
rev = so.strip()
# makes distutils blow up on Python 2.7
if sys.version_info[0] >= 3:
rev = rev.decode('ascii')
if not rev.startswith('v') and re.match("[a-zA-Z0-9]{7,9}",rev):
# partial clone, manually construct version string
# this is the format before we started using git-describe
# to get an ordering on dev version strings.
rev ="v%s.dev-%s" % (VERSION, rev)
# Strip leading v from tags format "vx.y.z" to get th version string
FULLVERSION = rev.lstrip('v')
else:
FULLVERSION += QUALIFIER
#
# Actually important part
#
setup(name='iis',
version=FULLVERSION,
author='Mahe Perrette',
author_email='[email protected]',
description='Iterative, bayesian methods to tune an ensemble of models',
keywords=('fortran','template','namelist'),
# basic stuff here
packages = ['iis'],
long_description=long_description,
url='https://github.com/perrette/iis',
license = "MIT",
)
def write_version_py(filename=None):
cnt = """\
version = '%s'
short_version = '%s'
"""
if not filename:
#filename = os.path.join(
# os.path.dirname(__file__), 'dimarray', 'version.py')
filename = os.path.join('iis', 'version.py')
with open(filename, 'w') as a:
a.write(cnt % (FULLVERSION, VERSION))
# Write version.py to dimarray
if write_version:
write_version_py()
| mit | 8,472,331,363,098,328,000 | 26.877551 | 94 | 0.598097 | false |
Azure/azure-sdk-for-python | sdk/purview/azure-mgmt-purview/azure/mgmt/purview/models/__init__.py | 1 | 5836 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AccessKeys
from ._models_py3 import Account
from ._models_py3 import AccountEndpoints
from ._models_py3 import AccountList
from ._models_py3 import AccountPropertiesEndpoints
from ._models_py3 import AccountPropertiesManagedResources
from ._models_py3 import AccountSku
from ._models_py3 import AccountUpdateParameters
from ._models_py3 import CheckNameAvailabilityRequest
from ._models_py3 import CheckNameAvailabilityResult
from ._models_py3 import CloudConnectors
from ._models_py3 import DefaultAccountPayload
from ._models_py3 import DeletedAccount
from ._models_py3 import DeletedAccountList
from ._models_py3 import DeletedAccountProperties
from ._models_py3 import DeletedAccountPropertiesAutoGenerated
from ._models_py3 import DimensionProperties
from ._models_py3 import ErrorModel
from ._models_py3 import ErrorResponseModel
from ._models_py3 import ErrorResponseModelError
from ._models_py3 import Identity
from ._models_py3 import ManagedResources
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationList
from ._models_py3 import OperationMetaLogSpecification
from ._models_py3 import OperationMetaMetricSpecification
from ._models_py3 import OperationMetaServiceSpecification
from ._models_py3 import PrivateEndpoint
from ._models_py3 import PrivateEndpointConnection
from ._models_py3 import PrivateEndpointConnectionList
from ._models_py3 import PrivateLinkResource
from ._models_py3 import PrivateLinkResourceList
from ._models_py3 import PrivateLinkServiceConnectionState
from ._models_py3 import ProxyResource
from ._models_py3 import TrackedResource
except (SyntaxError, ImportError):
from ._models import AccessKeys # type: ignore
from ._models import Account # type: ignore
from ._models import AccountEndpoints # type: ignore
from ._models import AccountList # type: ignore
from ._models import AccountPropertiesEndpoints # type: ignore
from ._models import AccountPropertiesManagedResources # type: ignore
from ._models import AccountSku # type: ignore
from ._models import AccountUpdateParameters # type: ignore
from ._models import CheckNameAvailabilityRequest # type: ignore
from ._models import CheckNameAvailabilityResult # type: ignore
from ._models import CloudConnectors # type: ignore
from ._models import DefaultAccountPayload # type: ignore
from ._models import DeletedAccount # type: ignore
from ._models import DeletedAccountList # type: ignore
from ._models import DeletedAccountProperties # type: ignore
from ._models import DeletedAccountPropertiesAutoGenerated # type: ignore
from ._models import DimensionProperties # type: ignore
from ._models import ErrorModel # type: ignore
from ._models import ErrorResponseModel # type: ignore
from ._models import ErrorResponseModelError # type: ignore
from ._models import Identity # type: ignore
from ._models import ManagedResources # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationList # type: ignore
from ._models import OperationMetaLogSpecification # type: ignore
from ._models import OperationMetaMetricSpecification # type: ignore
from ._models import OperationMetaServiceSpecification # type: ignore
from ._models import PrivateEndpoint # type: ignore
from ._models import PrivateEndpointConnection # type: ignore
from ._models import PrivateEndpointConnectionList # type: ignore
from ._models import PrivateLinkResource # type: ignore
from ._models import PrivateLinkResourceList # type: ignore
from ._models import PrivateLinkServiceConnectionState # type: ignore
from ._models import ProxyResource # type: ignore
from ._models import TrackedResource # type: ignore
from ._purview_management_client_enums import (
Name,
ProvisioningState,
PublicNetworkAccess,
Reason,
ScopeType,
Status,
Type,
)
__all__ = [
'AccessKeys',
'Account',
'AccountEndpoints',
'AccountList',
'AccountPropertiesEndpoints',
'AccountPropertiesManagedResources',
'AccountSku',
'AccountUpdateParameters',
'CheckNameAvailabilityRequest',
'CheckNameAvailabilityResult',
'CloudConnectors',
'DefaultAccountPayload',
'DeletedAccount',
'DeletedAccountList',
'DeletedAccountProperties',
'DeletedAccountPropertiesAutoGenerated',
'DimensionProperties',
'ErrorModel',
'ErrorResponseModel',
'ErrorResponseModelError',
'Identity',
'ManagedResources',
'Operation',
'OperationDisplay',
'OperationList',
'OperationMetaLogSpecification',
'OperationMetaMetricSpecification',
'OperationMetaServiceSpecification',
'PrivateEndpoint',
'PrivateEndpointConnection',
'PrivateEndpointConnectionList',
'PrivateLinkResource',
'PrivateLinkResourceList',
'PrivateLinkServiceConnectionState',
'ProxyResource',
'TrackedResource',
'Name',
'ProvisioningState',
'PublicNetworkAccess',
'Reason',
'ScopeType',
'Status',
'Type',
]
| mit | 2,823,532,459,695,357,400 | 41.289855 | 94 | 0.723441 | false |
vpelletier/neoppod | neo/tests/zodb/testReadOnly.py | 1 | 1032 | #
# Copyright (C) 2009-2016 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from ZODB.tests.ReadOnlyStorage import ReadOnlyStorage
from ZODB.tests.StorageTestBase import StorageTestBase
from . import ZODBTestCase
class ReadOnlyTests(ZODBTestCase, StorageTestBase, ReadOnlyStorage):
pass
if __name__ == "__main__":
suite = unittest.makeSuite(ReadOnlyTests, 'check')
unittest.main(defaultTest='suite')
| gpl-2.0 | -3,604,143,858,798,651,000 | 34.586207 | 71 | 0.763566 | false |
sonofeft/XYmath | xymath/gui/pagedata.py | 1 | 14524 | from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import time
from tkinter import *
from xymath.gui.double_wt_entrygrid import EntryGrid
from numpy import array, double
def get_time_str( tstamp, label ):
#return label + str( tstamp )
return label + time.strftime(" TimeStamp: %m/%d/%Y %H:%M:%S", time.localtime(tstamp))
class PageData(object):
def leavePageCallback(self):
'''When leaving page, tidy up any XYjob issues.'''
#print 'Leaving PageData'
self.put_entry_values_on_plot()
#self.guiObj.see_me() # force focus back
def selectPageCallback(self):
'''When entering page, do a little setup'''
#self.eg.focus_on(0,0)
if not self.block_entry_update:
self.place_xyjob_data()
def page_callback(self, i, j):
'''Call here from EntryGrid if there is a change to one of its
Entry widget's StringVar items.'''
if self.mode_place_xyjob:
return # ignore callbacks if in mode_place_xyjob
#print '___in page_callback i=%s, j=%s'%(i, j)
if hasattr(self,'eg'):
self.show_editor_timestamp()
#if self.eg.is_a_good_row( i ):
# self.UpdatePlotButton.configure(state=NORMAL)
# self.put_entry_values_on_plot()
#self.guiObj.master.deiconify()
#self.guiObj.master.lift()
#self.guiObj.master.focus_set()
#self.guiObj.master.grab_set()
#self.guiObj.master.lift()
#self.eg.focus_on(i,j)
return
def put_entry_values_on_plot(self):
self.place_entries_into_dataset()
XY = self.guiObj.XYjob
self.guiObj.PlotWin.make_new_plot(dataset=XY.dataset, curveL=[],
title_str='Data')
def show_data_timestamp(self):
self.Data_TimeStamp_Label.configure(text=get_time_str( self.guiObj.XYjob.dataset.timeStamp,' Data' ))
def show_editor_timestamp(self):
self.eg.timeStamp = time.time()
self.Editor_TimeStamp_Label.configure(text=get_time_str( self.eg.timeStamp, 'Editor' ))
def place_entries_into_dataset(self):
'''Put entry data into XYjob dataset'''
xL = []
yL = []
wL = []
for i in range(self.eg.Nrows):
if self.eg.is_a_good_row( i ):
xL.append( self.eg.entryL[i][0].get_float_val() )
yL.append( self.eg.entryL[i][1].get_float_val() )
if self.eg.num_active_wtfactors:
wL.append( self.eg.entryL[i][2].get_wt_val() )
print('adding',xL[-1],yL[-1],wL[-1])
#else:
# print 'adding',xL[-1],yL[-1]
if len(xL)>0:
XY = self.guiObj.XYjob
if self.eg.num_active_wtfactors:
wtArr = array(wL, dtype=double)
else:
wtArr = None
print('place_entries_into_dataset with wtArr = None')
XY.define_dataset( array(xL, dtype=double), array(yL, dtype=double), wtArr=wtArr,
xName=self.Xname_Entry_StringVar.get(), yName=self.Yname_Entry_StringVar.get(),
xUnits=self.Xunits_Entry_StringVar.get(), yUnits=self.Yunits_Entry_StringVar.get(),
timeStamp=self.eg.timeStamp)
XY.dataset.sort_by_x()
def place_xyjob_data(self):
'''Put data from XYjob into PageData'''
XY = self.guiObj.XYjob
if not XY.dataset:
return
self.mode_place_xyjob = 1
self.eg.timeStamp = XY.dataset.timeStamp
self.show_data_timestamp()
self.Xname_Entry_StringVar.set(XY.dataset.xName)
self.Xunits_Entry_StringVar.set(XY.dataset.xUnits)
self.Yname_Entry_StringVar.set(XY.dataset.yName)
self.Yunits_Entry_StringVar.set(XY.dataset.yUnits)
# put data into entry grid
self.eg.focus_on(0,0)
N = int( XY.dataset.N )
# Add enough rows to hold data, if required
if self.eg.Nrows <= N:
for i in range( self.eg.Nrows, N+2):
self.eg.add_a_row()
num_active_wtfactors = 0
for i in range(self.eg.Nrows):
# clear all the entry locations
if i < N: # only inserts value into entry for existing values
self.eg.entryL[i][0].set_float_val(XY.dataset.xArr[i])
self.eg.entryL[i][1].set_float_val(XY.dataset.yArr[i])
if XY.dataset.wtArr is None:
#self.eg.entryL[i][2].set_float_val( 1.0 )
self.eg.update_num_active_wtfactors(i, 1.0)
else:
#self.eg.entryL[i][2].set_float_val( XY.dataset.wtArr[i] )
self.eg.update_num_active_wtfactors(i, XY.dataset.wtArr[i])
if abs(1.0 - XY.dataset.wtArr[i]) > 0.001:
num_active_wtfactors += 1
else:
self.eg.entryL[i][0].set_float_val('')
self.eg.entryL[i][1].set_float_val('')
self.eg.entryL[i][2].set_float_val( 1.0 ) # do not impact num_active_wtfactors
self.eg.num_active_wtfactors = num_active_wtfactors
# Now show data points in plot
self.put_entry_values_on_plot()
# diable plot button
#self.UpdatePlotButton.configure(state=DISABLED)
# change mode flag back to 0
self.mode_place_xyjob = 0
def clear_all_data(self):
'''Clear All PageData'''
self.mode_place_xyjob = 1
self.eg.timeStamp = time.time()
self.Data_TimeStamp_Label.configure(text='')
self.Editor_TimeStamp_Label.configure(text='')
self.Xname_Entry_StringVar.set('x')
self.Xunits_Entry_StringVar.set('')
self.Yname_Entry_StringVar.set('y')
self.Yunits_Entry_StringVar.set('')
# put data into entry grid
self.eg.focus_on(0,0)
self.eg.num_active_wtfactors = 0
for i in range(self.eg.Nrows):
# clear all the entry locations
if 1: #i < N: # only inserts value into entry for existing values
self.eg.entryL[i][0].set_float_val('')
self.eg.entryL[i][1].set_float_val('')
self.eg.entryL[i][2].set_float_val( 1.0 ) # do not impact num_active_wtfactors
# change mode flag back to 0
self.mode_place_xyjob = 0
def __init__(self, guiObj, pageFrame):
self.mode_place_xyjob = 0
self.block_entry_update = 0
self.guiObj = guiObj
self.pageFrame = pageFrame
self.eg = EntryGrid(pageFrame, self.page_callback,
charWidthL=[12,12], labelL=['x-data','y-data'],
Nrows=15, Ncols=2, horiz_scroll=0)
self.eg.pack(anchor=NW, side=LEFT, expand=True,fill=BOTH)
self.eg.timeStamp = time.time()
self.iframe = Frame(pageFrame)
xframe = LabelFrame(self.iframe, text="", relief="groove")
yframe = LabelFrame(self.iframe, text="", relief="groove")
self.Data_TimeStamp_Label = Label(self.iframe,text="")
self.Editor_TimeStamp_Label = Label(self.iframe,text="")
self.Xname_Entry = Entry(xframe,width="15")
self.Xname_Entry.grid(row=1, column=1, sticky=W)
self.Xname_Entry_StringVar = StringVar()
self.Xname_Entry.configure(textvariable=self.Xname_Entry_StringVar)
self.Xname_Entry_StringVar.set("x")
self.Xname_Entry_StringVar_traceName = \
self.Xname_Entry_StringVar.trace_variable("w", self.Xname_Entry_StringVar_Callback)
self.Xunits_Entry = Entry(xframe,width="15")
self.Xunits_Entry.grid(row=2, column=1, sticky=W)
self.Xunits_Entry_StringVar = StringVar()
self.Xunits_Entry.configure(textvariable=self.Xunits_Entry_StringVar)
self.Xunits_Entry_StringVar.set("")
self.Xunits_Entry_StringVar_traceName = \
self.Xunits_Entry_StringVar.trace_variable("w", self.Xunits_Entry_StringVar_Callback)
self.Xname_Label = Label(xframe,text="X Name")
self.Xname_Label.grid(row=1, column=0, sticky=W)
self.Xunits_Label = Label(xframe,text="X Units")
self.Xunits_Label.grid(row=2, column=0, sticky=W)
self.Yname_Entry = Entry(yframe,width="15")
self.Yname_Entry.grid(row=1, column=1, sticky=W)
self.Yname_Entry_StringVar = StringVar()
self.Yname_Entry.configure(textvariable=self.Yname_Entry_StringVar)
self.Yname_Entry_StringVar.set("y")
self.Yname_Entry_StringVar_traceName = \
self.Yname_Entry_StringVar.trace_variable("w", self.Yname_Entry_StringVar_Callback)
self.Yunits_Entry = Entry(yframe,width="15")
self.Yunits_Entry.grid(row=2, column=1, sticky=W)
self.Yunits_Entry_StringVar = StringVar()
self.Yunits_Entry.configure(textvariable=self.Yunits_Entry_StringVar)
self.Yunits_Entry_StringVar.set("")
self.Yunits_Entry_StringVar_traceName = \
self.Yunits_Entry_StringVar.trace_variable("w", self.Yunits_Entry_StringVar_Callback)
self.Yname_Label = Label(yframe,text="Y Name")
self.Yname_Label.grid(row=1, column=0, sticky=W)
self.Yunits_Label = Label(yframe,text="Y Units")
self.Yunits_Label.grid(row=2, column=0, sticky=W)
xframe.pack(anchor=NW, side=TOP)
yframe.pack(anchor=NW, side=TOP)
self.Data_TimeStamp_Label.pack(anchor=NW, side=TOP)
self.Editor_TimeStamp_Label.pack(anchor=NW, side=TOP)
self.btn_frame = Frame(self.iframe)
self.UpdatePlotButton = Button(self.btn_frame,text="Update Plot", width="15")
self.UpdatePlotButton.bind("<ButtonRelease-1>", self.UpdatePlotButton_Click)
self.UpdatePlotButton.pack(anchor=NW, side=LEFT)
self.SwapXYButton = Button(self.btn_frame,text="Swap X and Y", width="15")
self.SwapXYButton.bind("<ButtonRelease-1>", self.SwapXYButton_Click)
self.SwapXYButton.pack(anchor=NW, side=LEFT)
self.Btn_Space = Label(self.btn_frame,text=" ")
self.Btn_Space.pack(anchor=NW, side=LEFT, fill=X, expand=1)
#self.ShowHelpButton = Button(self.btn_frame,text="Show Help", width="15")
#self.ShowHelpButton.bind("<ButtonRelease-1>", self.ShowHelp_Button_Click()
#self.ShowHelpButton.pack(anchor=NE, side=LEFT)
self.btn_frame.pack(anchor=NW, side=TOP, fill=X, expand=1)
self.Label_Space = Label(self.iframe,text=" ")
self.Label_Space.pack(anchor=NW, side=TOP)
# make text resulsts area
lbframe = Frame( self.iframe )
self.Messages_Text_frame = lbframe
scrollbar = Scrollbar(lbframe, orient=VERTICAL)
self.Messages_Text = Text(lbframe, yscrollcommand=scrollbar.set)
scrollbar.config(command=self.Messages_Text.yview)
scrollbar.pack(side=RIGHT, fill=Y)
self.Messages_Text.pack(side=LEFT, fill=BOTH, expand=1)
self.Messages_Text_frame.pack(anchor=NW, side=TOP, fill=BOTH, expand=1)
self.iframe.pack(anchor=NW, side=LEFT, expand=True,fill=BOTH)
slab = Label(pageFrame,text=" "*200) # pin xframe and yframe to the left
slab.pack(anchor=E, side=LEFT, expand=True,fill=BOTH)
self.ShowHelp_Button_Click(None)
def ShowHelp_Button_Click(self, event):
#print 'Pressed ShowHelp Button'
self.new_message('''Enter X,Y data pairs into entry boxes
at left. The boxes can be navigated with
the mouse, the return key or arrow keys.
All of the curve fitting options will use
these data.
To make curves go nearer certain points,
click the "weight" button next to that
point's entry boxes and enter a weight
greater than 1.
If names and units are entered for X
and Y, they will appear on plots.
Any edits will appear on plots when
the "Update Plot" button is pressed, or
when another tabbed page is selected.
''')
def UpdatePlotButton_Click(self, event):
if hasattr(self,'eg'):
self.block_entry_update = 1
self.put_entry_values_on_plot()
self.block_entry_update = 0
def SwapXYButton_Click(self, event):
if hasattr(self,'eg'):
self.guiObj.XYjob.dataset.swap_x_and_y()
self.place_xyjob_data()
self.show_editor_timestamp()
#self.block_entry_update = 1
#self.put_entry_values_on_plot()
#self.block_entry_update = 0
def Xname_Entry_StringVar_Callback(self, varName, index, mode):
pass
#print "Xname_Entry_StringVar_Callback varName, index, mode",varName, index, mode
#print " new StringVar value =",self.Xname_Entry_StringVar.get()
def Xunits_Entry_StringVar_Callback(self, varName, index, mode):
pass
#print "Xunits_Entry_StringVar_Callback varName, index, mode",varName, index, mode
#print " new StringVar value =",self.Xunits_Entry_StringVar.get()
def Yname_Entry_StringVar_Callback(self, varName, index, mode):
pass
#print "Yname_Entry_StringVar_Callback varName, index, mode",varName, index, mode
#print " new StringVar value =",self.Yname_Entry_StringVar.get()
def Yunits_Entry_StringVar_Callback(self, varName, index, mode):
pass
#print "Yunits_Entry_StringVar_Callback varName, index, mode",varName, index, mode
#print " new StringVar value =",self.Yunits_Entry_StringVar.get()
def clear_messages(self):
self.Messages_Text.delete(1.0, END)
self.Messages_Text.update_idletasks()
def add_to_messages(self, s):
self.Messages_Text.insert(END, s)
self.Messages_Text.update_idletasks()
def new_message(self, s):
self.clear_messages()
self.Messages_Text.insert(END, s)
self.Messages_Text.update_idletasks()
| gpl-3.0 | -422,575,960,700,958,340 | 38.363144 | 110 | 0.594327 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.