repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
YJoe/SpaceShips | Desktop/Python/space_scroll/Start_up.py | 1 | 1590 | import pygame
import random
import math
from Settings import Settings
pygame.init()
clock = pygame.time.Clock()
def deg_to_rad(degrees):
return degrees / 180.0 * math.pi
# variables to control game states
home_state = 1
info_state = 2
game_state = 3
pause_state = 4
shop_state = 5
settings_state = 6
quit_state = 7
game_over_state = 8
reset_game_state = 9
# colours used
main_theme = (0, 100, 100)
red = (100, 0, 0)
white = (255, 255, 255)
# game settings
settings = Settings()
show_notes = True
star_chance = 10 # chance of a star is 1 in 10 every update
initial_stars = 100 # create 100 stars to fill the screen
package_chance = 10 # chance of a package drop is 1 in 10 every enemy kill
# if your game runs slow, set particle count low and star chance higher
# all particles, stars, enemies and bullets are removed when they leave the screen
width = 800
height = 500
main_s = pygame.display.set_mode((width, height))
main_s.fill((255, 255, 255))
pygame.display.set_caption("SPACE SHIPS")
pygame.display.set_icon(pygame.image.load("Images/Icon.png"))
font = pygame.font.Font("./Font/tall bolder.ttf", 15)
menu_font = pygame.font.Font("./Font/tall bolder.ttf", 25)
title_font = pygame.font.Font("./Font/tall bolder.ttf", 45)
screen_rect = pygame.sprite.Sprite()
# screen_rect is slightly bigger than the screen so that objects do not get removed
# when created off screen. many objects are checked as still on screen to remove it
# from a list that it is in
screen_rect.rect = pygame.Surface((width + 30, height + 30)).get_rect()
screen_rect.rect.x = 0
screen_rect.rect.y = 0
| gpl-3.0 | -1,097,112,209,323,025,900 | 29 | 83 | 0.723899 | false | 3.154762 | false | false | false |
polarise/RP-python | compute_rho.py | 1 | 1038 | #!/home/paulk/software/bin/python
from __future__ import division
from sys import argv,exit,stderr
import rpy2.robjects as R
from rpy2.robjects.packages import importr
qvalue = importr('qvalue')
cor = R.r['cor']
qunif = R.r['qunif']
runif = R.r['runif']
sort = R.r['sort']
try:
prefix = argv[1]
except IndexError:
print >> stderr,"Usage: %s [core|all]" % argv[0]
exit(1)
#assert prefix == 'core' or prefix == 'all'
if prefix == 'core': quniform = sort(qunif(runif(123266)))
elif prefix == 'full': quniform = sort(qunif(runif(131997)))
import fnmatch
import os
for file in os.listdir('permutation_tests'):
#if fnmatch.fnmatch(file,'%s_norm_ps.[0-9]*' % prefix):
if fnmatch.fnmatch(file,'%s_sanitised.paired.[0-9]*.out.tests' % prefix):
f = open('permutation_tests/'+file)
data = list()
data = [row.strip().split('\t')[3] for row in f]
data.sort()
data = R.FloatVector(map(float,data))
q = qvalue.qvalue(data)
f.close()
print file+"\t"+str(q[1][0])
# print file+"\t"+str(cor(data,quniform,method="pearson")[0])
| gpl-2.0 | 1,321,716,852,320,594,700 | 25.615385 | 74 | 0.663776 | false | 2.575682 | false | false | false |
gklyne/annalist | src/annalist_root/annalist/models/objectcache.py | 1 | 10794 | from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
This module provides an object cacheing framework for arbitrary Python values.
The intent is thatall cacghe logic can be isolated, and may be re-implemented
using a network cache faclity such as MemCache or Redis.
The present implementation assumes a single-process, multi-threaded environment
and interlocks cache accesses to avoid possible cache-related race conditions.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2019, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import sys
import traceback
import threading
import contextlib
from annalist.exceptions import Annalist_Error
# ===================================================================
#
# Error class
#
# ===================================================================
class Cache_Error(Annalist_Error):
"""
Class for errors raised by cache methods.
"""
def __init__(self, value=None, msg="Cache_error (objectcache)"):
super(Cache_Error, self).__init__(value, msg)
return
# ===================================================================
#
# Cache creation and discovery
#
# ===================================================================
globalcachelock = threading.Lock() # Used to interlock creation/deletion of caches
objectcache_dict = {} # Initial empty set of object caches
objectcache_tb = {}
def get_cache(cachekey):
"""
This function locates or creates an object cache.
cachekey is a hashable value that uniquely identifies the required cache
(e.g. a string or URI).
Returns the requested cache object, which may be created on-the-fly.
"""
with globalcachelock:
if cachekey not in objectcache_dict:
objectcache_dict[cachekey] = ObjectCache(cachekey)
objectcache = objectcache_dict[cachekey] # Copy value while lock acquired
return objectcache
def remove_cache(cachekey):
"""
This function removes a cache from the set of object caches
cachekey is a hashable value that uniquely identifies the required cache
(e.g. a string or URI).
"""
# log.debug("objectcache.remove_cache %r"%(cachekey,))
objectcache = None
with globalcachelock:
if cachekey in objectcache_dict:
objectcache = objectcache_dict[cachekey]
del objectcache_dict[cachekey]
# Defer operations that acquire the cache local lock until
# the global lock is released
if objectcache:
objectcache.close()
return
# ===== @@@Unused functions follow; eventually, remove these? ====
def make_cache_unused_(cachekey):
"""
This function creates an object cache.
cachekey is a hashable value that uniquely identifies the required cache
(e.g. a string or URI).
Returns the created cache object.
"""
with globalcachelock:
try:
if cachekey in objectcache_dict:
raise Cache_Error(cachekey, msg="Cache already exists")
objectcache_dict[cachekey] = ObjectCache(cachekey)
objectcache_tb[cachekey] = traceback.extract_stack()
objectcache = objectcache_dict[cachekey] # Copy value while lock acquired
except Exception as e:
print("@@@@ Cache already exists", file=sys.stderr)
print("".join(traceback.format_list(objectcache_tb[cachekey])), file=sys.stderr)
print("@@@@", file=sys.stderr)
raise
return objectcache
def remove_all_caches_unused_():
"""
This function removes all caches from the set of object caches
"""
# log.debug("@@@@ remove_all_caches")
objectcaches = []
with globalcachelock:
for cachekey in objectcache_dict.keys():
# log.debug("@@@@ remove_all_caches %r"%(cachekey,))
objectcaches.append(objectcache_dict[cachekey])
del objectcache_dict[cachekey]
# Defer operations that acquire the cache local lock until
# the global lock is released
for objectcache in objectcaches:
objectcache.close()
return
def remove_matching_caches_unused_(match_fn):
"""
This function removes all caches whose cache key is matched by the provided
function from the set of object caches.
match_fn is a function that tests a supplied cache key, and returns
True if the corresponding cache is to be removed.
"""
objectcaches = []
with globalcachelock:
for cachekey in _find_matching_cache_keys(match_fn):
objectcaches.append(objectcache_dict[cachekey])
del objectcache_dict[cachekey]
# Defer operations that acquire the cache local lock until
# the global lock is released
for objectcache in objectcaches:
objectcache.close()
return
def _find_matching_cache_keys_unused_(match_fn):
"""
A generator that returns all cache keys matching the supplied function.
match_fn is a function that tests a supplied cache key, and returns
True if it mnatches some criterion.
"""
for cachekey in objectcache_dict.keys():
if match_fn(cachekey):
yield cachekey
return
# ===================================================================
#
# Object cache class
#
# ===================================================================
class ObjectCache(object):
"""
A class for caching objects of some type.
The cache is identified by is cache key value that is used to distinguish
a particular object cache from all others (see also `getCache`)
"""
def __init__(self, cachekey):
# log.debug("ObjectCache.__init__: cachekey %r"%(cachekey,))
self._cachekey = cachekey
self._cachelock = threading.Lock() # Allocate a lock object for this cache
self._cache = {} # Initial empty set of values
self._opened = traceback.extract_stack()
self._closed = None
return
def cache_key(self):
"""
Return cache key (e.g. for use with 'remove_cache')
"""
return self._cachekey
def flush(self):
"""
Remove all objects from cache.
"""
# log.debug("ObjectCache.flush: cachekey %r"%(self._cachekey,))
with self._cachelock:
for key in self._cache.keys():
del self._cache[key]
return self
def close(self):
"""
Close down this cache object. Once closed, it cannot be used again.
"""
# log.debug("ObjectCache.close: cachekey %r"%(self._cachekey,))
self.flush()
self._cachelock = None # Discard lock object
self._closed = traceback.extract_stack()
return
def set(self, key, value):
"""
Save object value in cache (overwriting any existing value for the key).
key is a hashable value that uniquely identifies the required cache
(e.g. a string or URI).
value is a (new) value that is to be associated with the key.
"""
with self._cachelock:
self._cache[key] = value
return value
def get(self, key, default=None):
"""
Retrieve object value from cache, or return default value
"""
if self._cachelock is None:
msg = "Access after cache closed (%r, %s)"%(self._cachekey, key)
log.error(msg)
log.debug("---- closed at:")
log.debug("".join(traceback.format_list(self._closed)))
log.debug("----")
raise Exception(msg)
# print("@@@@ self._cachelock %r, self._cachekey %r"%(self._cachelock, self._cachekey))
with self._cachelock:
value = self._cache.get(key, default)
return value
def pop(self, key, default=None):
"""
Remove object value from cache, return that or default value
"""
with self._cachelock:
value = self._cache.pop(key, default)
return value
@contextlib.contextmanager
def access(self, *keys):
"""
A context manager for interlocked access to a cached value.
The value bound by the context manager (for a 'with ... as' assignment) is a
dictionary that has entries for each of the valuyes in the supplied key values
for which there is a previously cached value.
On exit from the context manager, if the value under any of the given keys has
been changed, or if any new entries have been added, they are used to update the
cached values before the interlock is released.
Use like this:
with cacheobject.access("key1", "key2", ...) as value:
# value is dict of cached values for given keys
# interlocked processing code here
# updates to value are written back to cache on leavinbg context
See: https://docs.python.org/2/library/contextlib.html
"""
with self._cachelock:
value_dict = {}
for key in keys:
if key in self._cache:
value_dict[key] = self._cache[key]
yield value_dict
for key in value_dict:
self._cache[key] = value_dict[key]
# If needed: this logic removes keys deleted by yield code...
# for key in keys:
# if key not in value_dict:
# del self._cache[key]
return
# ===== @@@ UNUSED - remove this in due course =====
def find_unused_(self, key, load_fn, seed_value):
"""
Returns cached value for key, or calls the supplied function to obtain a
value, and caches and returns that value.
If a previously-cached value is present, the value returned is:
(False, old-value)
If a previously-cached value is not present, the function is called with
the supplied "seed_value" as a parameter, the resuling value is cached under
the supplied key, and the return value is:
(True, new-value)
"""
# log.debug("ObjectCache.find: cachekey %r, key %r"%(self._cachekey, key))
with self._cachelock:
if key in self._cache:
old_value = self._cache[key]
result = (False, old_value)
else:
new_value = load_fn(seed_value)
self._cache[key] = new_value
result = (True, new_value)
return result
# End.
| mit | 7,360,627,060,750,772,000 | 34.159609 | 95 | 0.588475 | false | 4.432854 | false | false | false |
BorisNikulin/kevin-bacon-py | AdjacencyList.py | 1 | 2440 | from queue import Queue
class ALGraph:
def __init__(self):
self.__graph = {}
def add_vertex_edge(self, v1, e, v2):
if v1 not in self.__graph:
self.__graph[v1] = []
else:
self.__graph[v1].append((v2, e))
def add_vertex_edge_undirected(self, v1, e, v2):
self.add_vertex_edge(v1, e, v2)
self.add_vertex_edge(v2, e, v1)
def get_adjacent(self, v):
return self.__graph[v]
def __iter__(self):
return iter(self.__graph.items())
def vertices(self):
return iter(self.__graph)
def __getitem__(self, item):
return self.get_adjacent(item)
def __len__(self):
return len(self.__graph)
def read_csv(self, file_path, vertex_class, edge_class, sep=','):
""" Reads a csv file with 3 columns (not checked)
that has a vertex, then a an edge label,
and a float weight"""
with open(file_path, 'r') as file:
movie_to_actors = {}
file.readline() # skip header
for line in file:
vertex_edge_weight = line.strip().split(sep)
vertex = vertex_class(vertex_edge_weight[0])
edge = edge_class(vertex_edge_weight[1], float(vertex_edge_weight[2]))
if edge not in movie_to_actors:
movie_to_actors[edge] = [vertex]
else:
movie_to_actors[edge].append(vertex)
for edge in movie_to_actors:
actors = movie_to_actors[edge]
for i in range(len(actors)):
for j in range(i + 1, len(actors)):
self.add_vertex_edge_undirected(actors[i], edge, actors[j])
def bfs(self, start, goal):
frontier = Queue()
frontier.put(start)
# v -> (pred, edge)
came_from = {}
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for v, e in self.get_adjacent(current):
if v not in came_from:
frontier.put(v)
came_from[v] = (current, e)
result = []
current = goal
while current != start:
current, adj_edge = came_from[current]
result.append((current, adj_edge[current]))
result.reverse()
return start, result
| mit | 1,085,735,054,346,800,500 | 28.756098 | 86 | 0.505328 | false | 3.818466 | false | false | false |
Rzaaeeff/FacebookLikeGrabber | FacebookLikeGrabber.py | 1 | 1711 | __author__ = 'rzaaeeff'
#Dependencies start
from requests import get
import json
#Finish
class FacebookLikeGrabber:
def __init__(self, access_token):
self.access_token_str = '?access_token=%s' % access_token
def get_likes_by_post_id(self, post_id):
"""
Facebook uses paging for like list.
That's why, i used special algorithm
to grab all users who like post.
"""
ended = True
post_likes = {}
response = get("https://graph.facebook.com/" + post_id + "/likes/" + self.access_token_str + '&limit=1000') #making query to graph.facebook.com | &limit=1000 is the number of maximum likes for each page
raw = json.loads(response.text) #response.text will give us string, but we need json
for item in raw['data']:
post_likes[item['name']] = item['id']
if 'next' in raw['paging']: #checking if there is next page
ended = False
while not ended:
response = get(raw['paging']['next'])
raw = json.loads(response.text)
for x in raw['data']:
post_likes[x['name']] = x['id']
if 'next' in raw['paging']:
ended = False
else:
ended = True
return post_likes
def get_posts_from_page(self, page_id, count):
post_ids = []
response = get("https://graph.facebook.com/" + page_id + "/statuses/" + self.access_token_str + '&limit=' + str(count))
raw = json.loads(response.text)
for post in raw['data']:
post_ids.append(str(post['id']))
return post_ids
| gpl-2.0 | -3,517,868,380,791,561,000 | 30.283019 | 210 | 0.540035 | false | 3.97907 | false | false | false |
peeringdb/peeringdb-py | peeringdb/_tasks_sequential.py | 1 | 1651 | """
Wrapper module for sequential task (continuation) handling.
Use as fallback for _tasks_async when async/await is not available.
"""
from types import GeneratorType
class UpdateTask:
"Wrap a generator in a task-like interface"
def __init__(self, gen, desc):
"""
Arguments:
- gen: generator object
- desc<tuple>: (Resource, key)
"""
assert isinstance(gen, GeneratorType)
self._gen = gen
self._desc = desc
def __repr__(self):
res, pk = self._desc
return "<UpdateTask: ({}, {})>".format(res.tag, pk)
def cancel(self):
pass
def __iter__(self):
return self._gen
def send(self, x):
return self._gen.send(x)
def gather(jobs):
"Aggregate and collect jobs"
for job in jobs:
yield from job
def wrap_generator(func):
"Identity decorator (only needed for async compatibility)"
return func
def _consume_task_or_generator(item):
if isinstance(item, (GeneratorType, UpdateTask)):
return _consume_task(item)
else:
return item
def _consume_task(gen):
r, ret = None, []
while True:
try:
item = gen.send(r)
except StopIteration:
break
r = _consume_task_or_generator(item)
ret.append(r)
if len(ret) == 1:
return ret.pop()
return ret
def run_task(func):
"""
Decorator to collect and return generator results, returning a list
if there are multiple results
"""
def _wrapped(*a, **k):
gen = func(*a, **k)
return _consume_task(gen)
return _wrapped
| apache-2.0 | 6,169,636,268,537,176,000 | 20.166667 | 71 | 0.579043 | false | 3.978313 | false | false | false |
tadamic/sokoenginepy | src/sokoenginepy/manager/sokoban_plus.py | 1 | 10316 | from .. import utilities
from .piece import DEFAULT_PIECE_ID
class SokobanPlusDataError(ValueError):
pass
class SokobanPlus:
"""
Manages Sokoban+ data for game board.
**Sokoban+ rules**
In this variant of game rules, each box and each goal on board get number tag
(color). Game objective changes slightly: board is considered solved only when
each goal is filled with box of the same tag. So, for example goal tagged with
number 9 must be filled with any box tagged with number 9.
Multiple boxes and goals may share same plus id, but the number of boxes with one
plus id must be equal to number of goals with that same plus id. There is also
default plus id that represents non tagged boxes and goals.
Sokoban+ ids for given board are defined by two strings called goalorder and
boxorder. For example, boxorder "13 24 3 122 1" would give plus_id = 13 to box id
= 1, plus_id = 24 to box ID = 2, etc...
**Valid Sokoban+ id sequences**
Boxorder and goalorder must define ids for equal number of boxes and goals. This
means that in case of boxorder assigning plus id "42" to two boxes, goalorder
must also contain number 42 twice.
Sokoban+ data parser accepts any positive integer as plus id.
Attributes:
DEFAULT_PLUS_ID: Sokoban+ ID for pieces that don't have one or when
Sokoban+ is disabled.
Original Sokoban+ implementation used number 99 for default plus ID. As
there can be more than 99 boxes on board, sokoenginepy changes this
detail and uses :const:`DEFAULT_PLUS_ID` as default plus ID. When loading
older puzzles with Sokoban+, legacy default value is converted
transparently.
Args:
boxorder (str): Space separated integers describing Sokoban+ IDs for boxes
goalorder (str): Space separated integers describing Sokoban+ IDs for goals
pieces_count (int): Total count of boxes/goals on board
"""
_LEGACY_DEFAULT_PLUS_ID = 99
DEFAULT_PLUS_ID = 0
def __init__(self, pieces_count, boxorder=None, goalorder=None):
self._is_enabled = False
self._is_validated = False
self.errors = []
self._pieces_count = pieces_count
self._box_plus_ids = None
self._goal_plus_ids = None
self._boxorder = None
self._goalorder = None
self.boxorder = boxorder or ""
self.goalorder = goalorder or ""
@classmethod
def is_sokoban_plus_string(cls, line):
return utilities.contains_only_digits_and_spaces(
line
) and not utilities.is_blank(line)
@classmethod
def is_valid_plus_id(cls, plus_id):
return isinstance(plus_id, int) and plus_id >= cls.DEFAULT_PLUS_ID
@property
def pieces_count(self):
return self._pieces_count
@pieces_count.setter
def pieces_count(self, rv):
if rv != self._pieces_count:
self.is_enabled = False
self._is_validated = False
self._pieces_count = int(rv)
@property
def boxorder(self):
if self.is_enabled and self.is_valid:
return self._rstrip_default_plus_ids(
" ".join(str(i) for i in self._box_plus_ids.values())
)
else:
return self._boxorder
@boxorder.setter
def boxorder(self, rv):
if rv != self._boxorder:
self.is_enabled = False
self._is_validated = False
self._boxorder = rv or ""
@property
def goalorder(self):
if self.is_enabled and self.is_valid:
return self._rstrip_default_plus_ids(
" ".join(str(i) for i in self._goal_plus_ids.values())
)
else:
return self._goalorder
@goalorder.setter
def goalorder(self, rv):
if rv != self._goalorder:
self.is_enabled = False
self._is_validated = False
self._goalorder = rv or ""
@property
def is_valid(self):
if self._is_validated:
return not self.errors
self.errors = []
try:
self._box_plus_ids = self._parse_and_clean_ids_string(self._boxorder)
self._goal_plus_ids = self._parse_and_clean_ids_string(self._goalorder)
except ValueError as exc:
self.errors.append(str(exc))
self._validate_plus_ids(self._box_plus_ids)
self._validate_plus_ids(self._goal_plus_ids)
self._validate_piece_count()
self._validate_ids_counts()
self._validate_id_sets_equality()
self._is_validated = True
return not self.errors
@property
def is_enabled(self):
return self._is_enabled
@is_enabled.setter
def is_enabled(self, value):
"""
Raises:
:exc:`SokobanPlusDataError`: Trying to enable invalid Sokoban+
"""
if value:
if not self.is_valid:
raise SokobanPlusDataError(self.errors)
self._is_enabled = value
def box_plus_id(self, for_box_id):
"""
Get Sokoban+ ID for box.
Args:
for_box_id (int): box ID
Returns:
int: If Sokoban+ is enabled returns Sokoban+ ID of a box. If not, returns
:const:`DEFAULT_PLUS_ID`
Raises:
:exc:`KeyError`: No box with ID ``for_box_id``, but only if i Sokoban+ is
enabled
"""
try:
return self._get_plus_id(for_box_id, from_where=self._box_plus_ids)
except KeyError:
raise KeyError("No box with ID: {0}".format(for_box_id))
def goal_plus_id(self, for_goal_id):
"""
Get Sokoban+ ID for goal.
Args:
for_goal_id (int): goal ID
Returns:
int: If Sokoban+ is enabled returns Sokoban+ ID of a goal. If not,
returns :const:`DEFAULT_PLUS_ID`
Raises:
:exc:`KeyError`: No goal with ID ``for_goal_id``, but only if Sokoban+ is
enabled
"""
try:
return self._get_plus_id(for_goal_id, from_where=self._goal_plus_ids)
except KeyError:
raise KeyError("No goal with ID: {0}".format(for_goal_id))
def _rstrip_default_plus_ids(self, plus_ids_str):
# TODO: Might not work correctly for "3 5 4 6 2 19" or "3 5 4 6 2 10"
if self.pieces_count < self._LEGACY_DEFAULT_PLUS_ID:
return plus_ids_str.rstrip(
str(self.DEFAULT_PLUS_ID) + " " + str(self._LEGACY_DEFAULT_PLUS_ID)
)
else:
return plus_ids_str.rstrip(str(self.DEFAULT_PLUS_ID) + " ")
def _get_plus_id(self, for_id, from_where):
if not self.is_enabled:
return self.DEFAULT_PLUS_ID
else:
return from_where[for_id]
def _parse_and_clean_ids_string(self, plus_ids_str):
"""
Safely replaces legacy default plus ids with default ones.
Returns:
dict: dict that maps piece IDs to piece Sokoban+ IDs
"""
def convert_or_raise(id_str):
try:
return int(id_str)
except ValueError:
raise SokobanPlusDataError(
"Can't parse Sokoban+ string! Illegal characters found. "
"Only digits and spaces allowed."
)
trimmed = [
convert_or_raise(id_str)
for id_str in self._rstrip_default_plus_ids(plus_ids_str).split()
]
cleaned = [
self.DEFAULT_PLUS_ID
if (
i == self._LEGACY_DEFAULT_PLUS_ID
and self.pieces_count < self._LEGACY_DEFAULT_PLUS_ID
)
else i
for i in trimmed
]
expanded = cleaned + [self.DEFAULT_PLUS_ID] * (self.pieces_count - len(cleaned))
retv = dict()
for index, plus_id in enumerate(expanded):
retv[DEFAULT_PIECE_ID + index] = plus_id
return retv
def _validate_plus_ids(self, ids):
if ids:
for i in ids.values():
if not self.is_valid_plus_id(i):
self.errors.append("Invalid Sokoban+ ID: {0}".format(i))
def _validate_piece_count(self):
if self.pieces_count < 0:
self.errors.append("Sokoban+ can't be applied to zero pieces count.")
def _validate_ids_counts(self):
if self._box_plus_ids and len(self._box_plus_ids) != self.pieces_count:
self.errors.append(
"Sokoban+ boxorder data doesn't contain same amount of IDs "
+ "as there are pieces on board! (pieces_count: {0})".format(
self.pieces_count
)
)
if self._goal_plus_ids and len(self._goal_plus_ids) != self.pieces_count:
self.errors.append(
"Sokoban+ goalorder data doesn't contain same amount of IDs "
+ "as there are pieces on board! (pieces_count: {0})".format(
self.pieces_count
)
)
def _validate_id_sets_equality(self):
if self._box_plus_ids:
boxes = set(
pid
for pid in self._box_plus_ids.values()
if pid != self.DEFAULT_PLUS_ID
)
else:
boxes = set()
if self._goal_plus_ids:
goals = set(
pid
for pid in self._goal_plus_ids.values()
if pid != self.DEFAULT_PLUS_ID
)
else:
goals = set()
if boxes != goals:
self.errors.append(
"Sokoban+ data doesn't define equal sets of IDs for "
+ "boxes and goals"
)
def __repr__(self):
return (
"SokobanPlus("
+ "pieces_count={0}, boxorder='{1}', goalorder='{2}')".format(
self.pieces_count, self.boxorder, self.goalorder
)
)
def __str__(self):
return (
"SokobanPlus("
+ "pieces_count={0}, boxorder='{1}', goalorder='{2}')".format(
self.pieces_count, self.boxorder, self.goalorder
)
)
| gpl-3.0 | 4,151,498,084,753,524,700 | 31.338558 | 88 | 0.559035 | false | 3.884036 | false | false | false |
JesGor/test_rest | apprest/views.py | 1 | 1691 | from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from rest_framework import status
from .models import Empresa, Calificacion
from .serializers import EmpresaSerializer, CalificacionSerializer
from django.shortcuts import render
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def index(request):
return render(request, 'apprest/index.html')
@csrf_exempt
def lista_empresas(request):
"""
List las empresas, o crea una
"""
if request.method == 'GET':
empresas = Empresa.objects.all()
serializer = EmpresaSerializer(empresas, many=True)
return JSONResponse(serializer.data)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = EmpresaSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JSONResponse(serializer.data, status=201)
return JSONResponse(serializer.errors, status=400)
@csrf_exempt
def empresa(request, pk):
try:
empresa = Empresa.objects.get(pk=pk)
except Empresa.DoesNotExist:
return JSONResponse(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
calificaciones = empresa.calificacion_set.all()
serializer = CalificacionSerializer(calificaciones, many=True)
return JSONResponse(serializer.data)
elif request.method == 'DELETE':
empresa.delete()
return JSONResponse(status=status.HTTP_204_NO_CONTENT)
| gpl-2.0 | 972,403,972,535,338,800 | 28.666667 | 66 | 0.760497 | false | 3.264479 | false | false | false |
morevnaproject/RenderChan | renderchan/httpserver.py | 2 | 4162 | __author__ = 'Ivan Mahonin'
from gettext import gettext as _
from argparse import ArgumentParser
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import parse_qs
from urllib.parse import unquote
from urllib.parse import urlparse
import os.path
import json
from renderchan.core import RenderChan
class RenderChanHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
parsed_url = urlparse(self.path)
args = parse_qs(parsed_url.query)
for key in args.keys():
args[key] = args[key][-1]
parsed_url_path = unquote(parsed_url.path)
while len(parsed_url_path) and (parsed_url_path[0] == '/' or parsed_url_path[0] == '\\'):
parsed_url_path = parsed_url_path[1:]
filename = os.path.abspath(os.path.join(self.server.renderchan_rootdir, parsed_url_path))
renderchan = RenderChan()
renderchan.datadir = self.server.renderchan_datadir
renderchan.track = True
renderchan.dry_run = True
if "dryRun" in args:
renderchan.dry_run = bool(args["dryRun"])
if "profile" in args:
renderchan.setProfile(str(args["profile"]))
if "renderfarmType" in args and str(args["renderfarmType"]) in renderchan.available_renderfarm_engines:
renderchan.renderfarm_engine = str(args["renderfarmType"])
if "host" in args:
if renderchan.renderfarm_engine in ("puli"):
renderchan.setHost(str(args["host"]))
else:
print("WARNING: The --host parameter cannot be set for this type of renderfarm.")
if "port" in args:
if renderchan.renderfarm_engine in ("puli"):
renderchan.setPort(int(args["port"]))
else:
print("WARNING: The --port parameter cannot be set for this type of renderfarm.")
if "cgru_location" in args:
renderchan.cgru_location = str(args["cgru_location"])
if "snapshot_to" in args:
renderchan.snapshot_path = str(args["snapshot_to"])
if "force" in args:
renderchan.force = bool(args["force"])
if "force_proxy" in args:
renderchan.force_proxy = bool(args["force_proxy"])
error = renderchan.submit('render', filename, bool(args.get("dependenciesOnly")), bool(args.get("allocateOnly")), str(args.get("stereo")))
reply = {}
if error:
reply["error"] = error
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
reply["files"] = [];
for file in renderchan.trackedFiles.values():
if file["source"][0:len(self.server.renderchan_rootdir)] == self.server.renderchan_rootdir:
file["source"] = file["source"][len(self.server.renderchan_rootdir):]
reply["files"].append( file )
self.wfile.write(bytes(json.dumps(reply, self.wfile), "UTF-8"))
def process_args():
parser = ArgumentParser(description=_("Run RenderChan HTTP-server."),
epilog=_("For more information about RenderChan, visit https://morevnaproject.org/renderchan/"))
parser.add_argument("--host", dest="host",
action="store",
default="",
help=_("Set HTTP-server host."))
parser.add_argument("--port", dest="port",
type=int,
action="store",
default=80,
help=_("Set HTTP-server port."))
parser.add_argument("--root", dest="root",
action="store",
default=".",
help=_("Set HTTP-server root directory."))
return parser.parse_args()
def main(datadir, argv):
args = process_args()
server = HTTPServer((args.host, args.port), RenderChanHTTPRequestHandler)
server.renderchan_datadir = datadir
server.renderchan_rootdir = os.path.abspath(args.root)
print("Starting RenderChan HTTP-server at " + args.host + ":" + str(args.port))
server.serve_forever()
| bsd-3-clause | -1,991,866,624,910,789,600 | 37.537037 | 146 | 0.596588 | false | 4.048638 | false | false | false |
daevaorn/sentry | tests/sentry/api/endpoints/test_group_details.py | 2 | 5852 | from __future__ import absolute_import, print_function
from datetime import timedelta
from django.utils import timezone
from sentry.models import (
Activity, Group, GroupAssignee, GroupBookmark, GroupSeen, GroupSnooze,
GroupStatus, GroupTagValue, Release
)
from sentry.testutils import APITestCase
class GroupDetailsTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
group = self.create_group()
url = '/api/0/issues/{}/'.format(group.id)
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert response.data['id'] == str(group.id)
assert response.data['firstRelease'] is None
def test_with_first_release(self):
self.login_as(user=self.user)
group = self.create_group()
release = Release.objects.create(
project=group.project,
version='1.0',
)
GroupTagValue.objects.create(
group=group,
project=group.project,
key='sentry:release',
value=release.version,
)
url = '/api/0/issues/{}/'.format(group.id)
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert response.data['id'] == str(group.id)
assert response.data['firstRelease']['version'] == release.version
class GroupUpdateTest(APITestCase):
def test_resolve(self):
self.login_as(user=self.user)
group = self.create_group()
url = '/api/0/issues/{}/'.format(group.id)
response = self.client.put(url, data={
'status': 'resolved',
}, format='json')
assert response.status_code == 200, response.content
group = Group.objects.get(
id=group.id,
project=group.project.id,
)
assert group.status == GroupStatus.RESOLVED
def test_snooze_duration(self):
group = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
self.login_as(user=self.user)
url = '/api/0/issues/{}/'.format(group.id)
response = self.client.put(url, data={
'status': 'muted',
'snoozeDuration': 30,
}, format='json')
assert response.status_code == 200
snooze = GroupSnooze.objects.get(group=group)
assert snooze.until > timezone.now() + timedelta(minutes=29)
assert snooze.until < timezone.now() + timedelta(minutes=31)
assert response.data['statusDetails']['snoozeUntil'] == snooze.until
group = Group.objects.get(id=group.id)
assert group.get_status() == GroupStatus.MUTED
def test_bookmark(self):
self.login_as(user=self.user)
group = self.create_group()
url = '/api/0/issues/{}/'.format(group.id)
response = self.client.put(url, data={
'isBookmarked': '1',
}, format='json')
assert response.status_code == 200, response.content
# ensure we've created the bookmark
assert GroupBookmark.objects.filter(
group=group, user=self.user).exists()
def test_assign(self):
self.login_as(user=self.user)
group = self.create_group()
url = '/api/0/issues/{}/'.format(group.id)
response = self.client.put(url, data={
'assignedTo': self.user.username,
}, format='json')
assert response.status_code == 200, response.content
assert GroupAssignee.objects.filter(
group=group, user=self.user
).exists()
assert Activity.objects.filter(
group=group, user=self.user, type=Activity.ASSIGNED,
).count() == 1
response = self.client.put(url, format='json')
assert response.status_code == 200, response.content
assert GroupAssignee.objects.filter(
group=group, user=self.user
).exists()
response = self.client.put(url, data={
'assignedTo': '',
}, format='json')
assert response.status_code == 200, response.content
assert not GroupAssignee.objects.filter(
group=group, user=self.user
).exists()
def test_mark_seen(self):
self.login_as(user=self.user)
group = self.create_group()
url = '/api/0/issues/{}/'.format(group.id)
response = self.client.put(url, data={
'hasSeen': '1',
}, format='json')
assert response.status_code == 200, response.content
assert GroupSeen.objects.filter(
group=group, user=self.user).exists()
response = self.client.put(url, data={
'hasSeen': '0',
}, format='json')
assert response.status_code == 200, response.content
assert not GroupSeen.objects.filter(
group=group, user=self.user).exists()
def test_mark_seen_as_non_member(self):
user = self.create_user('[email protected]', is_superuser=True)
self.login_as(user=user)
group = self.create_group()
url = '/api/0/issues/{}/'.format(group.id)
response = self.client.put(url, data={
'hasSeen': '1',
}, format='json')
assert response.status_code == 200, response.content
assert not GroupSeen.objects.filter(
group=group, user=self.user).exists()
class GroupDeleteTest(APITestCase):
def test_delete(self):
self.login_as(user=self.user)
group = self.create_group()
url = '/api/0/issues/{}/'.format(group.id)
with self.tasks():
response = self.client.delete(url, format='json')
assert response.status_code == 202, response.content
group = Group.objects.filter(id=group.id).exists()
assert not group
| bsd-3-clause | -3,845,956,371,487,922,700 | 27.407767 | 81 | 0.595523 | false | 3.817352 | true | false | false |
dc3-plaso/plaso | tests/parsers/utmpx.py | 1 | 2564 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for UTMPX file parser."""
import unittest
from plaso.formatters import utmpx # pylint: disable=unused-import
from plaso.lib import timelib
from plaso.parsers import utmpx
from tests import test_lib as shared_test_lib
from tests.parsers import test_lib
class UtmpxParserTest(test_lib.ParserTestCase):
"""Tests for utmpx file parser."""
@shared_test_lib.skipUnlessHasTestFile([u'utmpx_mac'])
def testParse(self):
"""Tests the Parse function."""
parser_object = utmpx.UtmpxParser()
storage_writer = self._ParseFile([u'utmpx_mac'], parser_object)
self.assertEqual(len(storage_writer.events), 6)
event_object = storage_writer.events[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-11-13 17:52:34')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_message = (
u'User: N/A Status: BOOT_TIME '
u'Computer Name: localhost Terminal: N/A')
expected_short_message = u'User: N/A'
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
event_object = storage_writer.events[1]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-11-13 17:52:41.736713')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.user, u'moxilo')
self.assertEqual(event_object.terminal, u'console', )
self.assertEqual(event_object.status_type, 7)
self.assertEqual(event_object.computer_name, u'localhost')
expected_message = (
u'User: moxilo Status: '
u'USER_PROCESS '
u'Computer Name: localhost '
u'Terminal: console')
expected_short_message = u'User: moxilo'
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
event_object = storage_writer.events[4]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-11-14 04:32:56.641464')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.user, u'moxilo')
self.assertEqual(event_object.terminal, u'ttys002')
self.assertEqual(event_object.status_type, 8)
expected_message = (
u'User: moxilo Status: '
u'DEAD_PROCESS '
u'Computer Name: localhost '
u'Terminal: ttys002')
expected_short_message = u'User: moxilo'
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 8,200,209,274,617,385,000 | 30.268293 | 67 | 0.691108 | false | 3.460189 | true | false | false |
UpSea/midProjects | BasicOperations/02_Datetime/Datatime To str.py | 1 | 1246 | import time as time
import datetime as dt
dateEnd = dt.datetime.now()
strEnd = dateEnd.strftime('%Y-%m-%d %H:%M:%S')
print('now:'+strEnd)
'''mid
以下程序将一个确定期间字符串日期等分为若干段,之后分段输出字符串
'''
timeFrom = '2016-05-20 00:00:00'
timeTo = '2016-05-30 00:00:00'
phases = 3
print timeFrom,timeTo
#mid 1)str to pyTimeStamp
timeStampFrom = int(time.mktime(time.strptime(timeFrom, "%Y-%m-%d %H:%M:%S")))
timeStampTo = int(time.mktime(time.strptime(timeTo, "%Y-%m-%d %H:%M:%S")))
#mid 2)str to datetime
timeFrom = dt.datetime.strptime(timeFrom,'%Y-%m-%d %H:%M:%S')
timeTo = dt.datetime.strptime(timeTo,'%Y-%m-%d %H:%M:%S')
interval = (timeStampTo - timeStampFrom)/phases
startTimeStamp = timeStampFrom
for index in range(phases):
endTimeStamp = startTimeStamp + interval
#mid 3)pyTimeStamp to datetime
timeFromDatetime = dt.datetime.utcfromtimestamp(startTimeStamp)
timeToDatetime = dt.datetime.utcfromtimestamp(endTimeStamp)
#mid 4)datetime to str
strTimeFrom = timeFromDatetime.strftime("%Y-%m-%d %H:%M:%S")
strTimeTo = timeToDatetime.strftime("%Y-%m-%d %H:%M:%S")
print '------',strTimeFrom,strTimeTo
startTimeStamp = endTimeStamp
| mit | 8,901,578,867,318,427,000 | 30.131579 | 80 | 0.684433 | false | 2.723502 | false | false | false |
saimn/astropy | astropy/coordinates/calculation.py | 3 | 7008 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Standard library
import re
import textwrap
import warnings
from datetime import datetime
from urllib.request import urlopen, Request
# Third-party
from astropy import time as atime
from astropy.utils.console import color_print, _color_text
from . import get_sun
__all__ = []
class HumanError(ValueError):
pass
class CelestialError(ValueError):
pass
def get_sign(dt):
"""
"""
if ((int(dt.month) == 12 and int(dt.day) >= 22)or(int(dt.month) == 1 and int(dt.day) <= 19)):
zodiac_sign = "capricorn"
elif ((int(dt.month) == 1 and int(dt.day) >= 20)or(int(dt.month) == 2 and int(dt.day) <= 17)):
zodiac_sign = "aquarius"
elif ((int(dt.month) == 2 and int(dt.day) >= 18)or(int(dt.month) == 3 and int(dt.day) <= 19)):
zodiac_sign = "pisces"
elif ((int(dt.month) == 3 and int(dt.day) >= 20)or(int(dt.month) == 4 and int(dt.day) <= 19)):
zodiac_sign = "aries"
elif ((int(dt.month) == 4 and int(dt.day) >= 20)or(int(dt.month) == 5 and int(dt.day) <= 20)):
zodiac_sign = "taurus"
elif ((int(dt.month) == 5 and int(dt.day) >= 21)or(int(dt.month) == 6 and int(dt.day) <= 20)):
zodiac_sign = "gemini"
elif ((int(dt.month) == 6 and int(dt.day) >= 21)or(int(dt.month) == 7 and int(dt.day) <= 22)):
zodiac_sign = "cancer"
elif ((int(dt.month) == 7 and int(dt.day) >= 23)or(int(dt.month) == 8 and int(dt.day) <= 22)):
zodiac_sign = "leo"
elif ((int(dt.month) == 8 and int(dt.day) >= 23)or(int(dt.month) == 9 and int(dt.day) <= 22)):
zodiac_sign = "virgo"
elif ((int(dt.month) == 9 and int(dt.day) >= 23)or(int(dt.month) == 10 and int(dt.day) <= 22)):
zodiac_sign = "libra"
elif ((int(dt.month) == 10 and int(dt.day) >= 23)or(int(dt.month) == 11 and int(dt.day) <= 21)):
zodiac_sign = "scorpio"
elif ((int(dt.month) == 11 and int(dt.day) >= 22)or(int(dt.month) == 12 and int(dt.day) <= 21)):
zodiac_sign = "sagittarius"
return zodiac_sign
_VALID_SIGNS = ["capricorn", "aquarius", "pisces", "aries", "taurus", "gemini",
"cancer", "leo", "virgo", "libra", "scorpio", "sagittarius"]
# Some of the constellation names map to different astrological "sign names".
# Astrologers really needs to talk to the IAU...
_CONST_TO_SIGNS = {'capricornus': 'capricorn', 'scorpius': 'scorpio'}
_ZODIAC = ((1900, "rat"), (1901, "ox"), (1902, "tiger"),
(1903, "rabbit"), (1904, "dragon"), (1905, "snake"),
(1906, "horse"), (1907, "goat"), (1908, "monkey"),
(1909, "rooster"), (1910, "dog"), (1911, "pig"))
# https://stackoverflow.com/questions/12791871/chinese-zodiac-python-program
def _get_zodiac(yr):
return _ZODIAC[(yr - _ZODIAC[0][0]) % 12][1]
def horoscope(birthday, corrected=True, chinese=False):
"""
Enter your birthday as an `astropy.time.Time` object and
receive a mystical horoscope about things to come.
Parameter
---------
birthday : `astropy.time.Time` or str
Your birthday as a `datetime.datetime` or `astropy.time.Time` object
or "YYYY-MM-DD"string.
corrected : bool
Whether to account for the precession of the Earth instead of using the
ancient Greek dates for the signs. After all, you do want your *real*
horoscope, not a cheap inaccurate approximation, right?
chinese : bool
Chinese annual zodiac wisdom instead of Western one.
Returns
-------
Infinite wisdom, condensed into astrologically precise prose.
Notes
-----
This function was implemented on April 1. Take note of that date.
"""
from bs4 import BeautifulSoup
today = datetime.now()
err_msg = "Invalid response from celestial gods (failed to load horoscope)."
headers = {'User-Agent': 'foo/bar'}
special_words = {
'([sS]tar[s^ ]*)': 'yellow',
'([yY]ou[^ ]*)': 'magenta',
'([pP]lay[^ ]*)': 'blue',
'([hH]eart)': 'red',
'([fF]ate)': 'lightgreen',
}
if isinstance(birthday, str):
birthday = datetime.strptime(birthday, '%Y-%m-%d')
if chinese:
# TODO: Make this more accurate by using the actual date, not just year
# Might need third-party tool like https://pypi.org/project/lunardate
zodiac_sign = _get_zodiac(birthday.year)
url = ('https://www.horoscope.com/us/horoscopes/yearly/'
'{}-chinese-horoscope-{}.aspx'.format(today.year, zodiac_sign))
summ_title_sfx = f'in {today.year}'
try:
res = Request(url, headers=headers)
with urlopen(res) as f:
try:
doc = BeautifulSoup(f, 'html.parser')
# TODO: Also include Love, Family & Friends, Work, Money, More?
item = doc.find(id='overview')
desc = item.getText()
except Exception:
raise CelestialError(err_msg)
except Exception:
raise CelestialError(err_msg)
else:
birthday = atime.Time(birthday)
if corrected:
with warnings.catch_warnings():
warnings.simplefilter('ignore') # Ignore ErfaWarning
zodiac_sign = get_sun(birthday).get_constellation().lower()
zodiac_sign = _CONST_TO_SIGNS.get(zodiac_sign, zodiac_sign)
if zodiac_sign not in _VALID_SIGNS:
raise HumanError('On your birthday the sun was in {}, which is not '
'a sign of the zodiac. You must not exist. Or '
'maybe you can settle for '
'corrected=False.'.format(zodiac_sign.title()))
else:
zodiac_sign = get_sign(birthday.to_datetime())
url = f"http://www.astrology.com/us/horoscope/daily-overview.aspx?sign={zodiac_sign}"
summ_title_sfx = f"on {today.strftime('%Y-%m-%d')}"
res = Request(url, headers=headers)
with urlopen(res) as f:
try:
doc = BeautifulSoup(f, 'html.parser')
item = doc.find('span', {'class': 'date'})
desc = item.parent.getText()
except Exception:
raise CelestialError(err_msg)
print("*"*79)
color_print(f"Horoscope for {zodiac_sign.capitalize()} {summ_title_sfx}:",
'green')
print("*"*79)
for block in textwrap.wrap(desc, 79):
split_block = block.split()
for i, word in enumerate(split_block):
for re_word in special_words.keys():
match = re.search(re_word, word)
if match is None:
continue
split_block[i] = _color_text(match.groups()[0], special_words[re_word])
print(" ".join(split_block))
def inject_horoscope():
import astropy
astropy._yourfuture = horoscope
inject_horoscope()
| bsd-3-clause | 1,458,674,728,459,648,800 | 36.475936 | 100 | 0.572203 | false | 3.159603 | false | false | false |
jcmcclurg/serverpower | profiling/generateRampRateTestPlots.py | 1 | 1388 | verbose = True
saveFig = True
showFig = True
if verbose:
print "Loading general modules..."
import numpy as np
import sys
if verbose:
print "Loading matplotlib module..."
import matplotlib.pyplot as plt
if verbose:
print "Loading custom functions..."
from defaultGetPower import readPowerFile
if verbose:
print " Reading power file..."
littleFile = 'rampRateTest/powerlog1.log'
#bigFile = 'experiments/cgroups/1456424681.514702900/powerlog.log'
bigFile = 'experiments/signal_insert_delays/1452732970.201413700/powerlog.log'
#type = 'big'
type = 'little'
if type == 'little':
file = littleFile
else:
file = bigFile
powerData = readPowerFile(file,1000,verbose)
#powerData = powerData[:, [0,4,5,6,7] ]
powerData = powerData[:, [0,4,5,6,7] ]
ignoreTime = 10
if showFig:
if type == 'little':
time = np.linspace(0,powerData[-1,0]-powerData[0,0],powerData.shape[0])
data = np.zeros(time.shape[0])
data[:] = np.sum(powerData[:,1:],axis=1)
else:
start = 30000+2950
len = 150
#start = 0
#len=10000
# The data is ten samples separated.
data = np.zeros(len)
for i in range(4):
data += powerData[start+10*i:start+10*i+len,i+1]
time = np.linspace(0,powerData[start+len-1,0]-powerData[start,0],len)
plt.plot(time,data)
plt.title('Example of fast ramp rate for four-server cluster')
plt.xlabel('Time (seconds)')
plt.ylabel('Cluster power (W)')
plt.show()
| gpl-2.0 | 1,362,174,705,579,429,400 | 22.931034 | 78 | 0.70389 | false | 2.732283 | false | false | false |
jupyter/nbgrader | nbgrader/tests/apps/test_api.py | 3 | 36897 | import pytest
import sys
import os
import shutil
import filecmp
from os.path import join
from traitlets.config import Config
from datetime import datetime
from ...apps.api import NbGraderAPI
from ...coursedir import CourseDirectory
from ...utils import rmtree, get_username, parse_utc
from .. import run_nbgrader
from .base import BaseTestApp
from .conftest import notwindows, windows
@pytest.fixture
def api(request, course_dir, db, exchange, cache):
config = Config()
config.CourseDirectory.course_id = "abc101"
config.Exchange.root = exchange
config.Exchange.cache = cache
config.CourseDirectory.root = course_dir
config.CourseDirectory.db_url = db
coursedir = CourseDirectory(config=config)
api = NbGraderAPI(coursedir, config=config)
return api
class TestNbGraderAPI(BaseTestApp):
if sys.platform == 'win32':
tz = "Coordinated Universal Time"
else:
tz = "UTC"
def test_get_source_assignments(self, api, course_dir):
assert api.get_source_assignments() == set([])
self._empty_notebook(join(course_dir, "source", "ps1", "problem1.ipynb"))
self._empty_notebook(join(course_dir, "source", "ps2", "problem1.ipynb"))
self._make_file(join(course_dir, "source", "blah"))
assert api.get_source_assignments() == {"ps1", "ps2"}
@notwindows
def test_get_released_assignments(self, api, exchange, course_dir):
assert api.get_released_assignments() == set([])
self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb"))
run_nbgrader(["release_assignment", "ps1", "--course", "abc101", "--Exchange.root={}".format(exchange)])
assert api.get_released_assignments() == {"ps1"}
api.course_id = None
assert api.get_released_assignments() == set([])
@windows
def test_get_released_assignments_windows(self, api, exchange, course_dir):
assert api.get_released_assignments() == set([])
api.course_id = 'abc101'
assert api.get_released_assignments() == set([])
def test_get_submitted_students(self, api, course_dir):
assert api.get_submitted_students("ps1") == set([])
self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "problem1.ipynb"))
self._empty_notebook(join(course_dir, "submitted", "bar", "ps1", "problem1.ipynb"))
self._make_file(join(course_dir, "submitted", "blah"))
assert api.get_submitted_students("ps1") == {"foo", "bar"}
assert api.get_submitted_students("*") == {"foo", "bar"}
def test_get_submitted_timestamp(self, api, course_dir):
assert api.get_submitted_timestamp("ps1", "foo") is None
self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "problem1.ipynb"))
assert api.get_submitted_timestamp("ps1", "foo") is None
timestamp = datetime.now()
self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents=timestamp.isoformat())
assert api.get_submitted_timestamp("ps1", "foo") == timestamp
def test_get_autograded_students(self, api, course_dir, db):
self._empty_notebook(join(course_dir, "source", "ps1", "problem1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
# submitted and autograded exist, but not in the database
self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "problem1.ipynb"))
timestamp = datetime.now()
self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents=timestamp.isoformat())
self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "problem1.ipynb"))
self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents=timestamp.isoformat())
assert api.get_autograded_students("ps1") == set([])
# run autograde so things are consistent
run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db])
assert api.get_autograded_students("ps1") == {"foo"}
# updated submission
timestamp = datetime.now()
self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents=timestamp.isoformat())
assert api.get_autograded_students("ps1") == set([])
def test_get_autograded_students_no_timestamps(self, api, course_dir, db):
self._empty_notebook(join(course_dir, "source", "ps1", "problem1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
# submitted and autograded exist, but not in the database
self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "problem1.ipynb"))
self._empty_notebook(join(course_dir, "autograded", "foo", "ps1", "problem1.ipynb"))
assert api.get_autograded_students("ps1") == set([])
# run autograde so things are consistent
run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db])
assert api.get_autograded_students("ps1") == {"foo"}
# updated submission
timestamp = datetime.now()
self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents=timestamp.isoformat())
assert api.get_autograded_students("ps1") == set([])
def test_get_assignment(self, api, course_dir, db, exchange):
keys = set([
'average_code_score', 'average_score', 'average_written_score',
'duedate', 'name', 'num_submissions', 'release_path', 'releaseable',
'source_path', 'status', 'id', 'max_code_score', 'max_score',
'max_written_score', 'display_duedate', 'duedate_timezone',
'duedate_notimezone',
'max_task_score', 'average_task_score'])
default = {
"average_code_score": 0,
"average_score": 0,
"average_written_score": 0,
"average_task_score": 0,
"duedate": None,
"display_duedate": None,
"duedate_timezone": "+0000",
"duedate_notimezone": None,
"name": "ps1",
"num_submissions": 0,
"release_path": None,
"releaseable": True if sys.platform != 'win32' else False,
"source_path": join("source", "ps1"),
"status": "draft",
"id": None,
"max_code_score": 0,
"max_score": 0,
"max_written_score": 0,
"max_task_score": 0
}
# check that return value is None when there is no assignment
a = api.get_assignment("ps1")
assert a is None
# check the values when the source assignment exists, but hasn't been
# released yet
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
a = api.get_assignment("ps1")
assert set(a.keys()) == keys
target = default.copy()
assert a == target
# check that it is not releasable if the course id isn't set
api.course_id = None
a = api.get_assignment("ps1")
assert set(a.keys()) == keys
target = default.copy()
target["releaseable"] = False
assert a == target
# check the values once the student version of the assignment has been created
api.course_id = "abc101"
run_nbgrader(["generate_assignment", "ps1", "--db", db])
a = api.get_assignment("ps1")
assert set(a.keys()) == keys
target = default.copy()
target["release_path"] = join("release", "ps1")
target["id"] = a["id"]
target["max_code_score"] = 5
target["max_score"] = 6
target["max_written_score"] = 1
target["max_task_score"] = 1
assert a == target
# check that timestamps are handled correctly
with api.gradebook as gb:
assignment = gb.find_assignment("ps1")
assignment.duedate = parse_utc("2017-07-05 12:22:08 UTC")
gb.db.commit()
a = api.get_assignment("ps1")
default["duedate"] = "2017-07-05T12:22:08"
default["display_duedate"] = "2017-07-05 12:22:08 {}".format(self.tz)
default["duedate_notimezone"] = "2017-07-05T12:22:08"
assert a["duedate"] == default["duedate"]
assert a["display_duedate"] == default["display_duedate"]
assert a["duedate_notimezone"] == default["duedate_notimezone"]
assert a["duedate_timezone"] == default["duedate_timezone"]
# check the values once the assignment has been released and unreleased
if sys.platform != "win32":
run_nbgrader(["release_assignment", "ps1", "--course", "abc101", "--Exchange.root={}".format(exchange)])
a = api.get_assignment("ps1")
assert set(a.keys()) == keys
target = default.copy()
target["release_path"] = join("release", "ps1")
target["id"] = a["id"]
target["max_code_score"] = 5
target["max_score"] = 6
target["max_written_score"] = 1
target["max_task_score"] = 1
target["releaseable"] = True
target["status"] = "released"
assert a == target
run_nbgrader(["list", "ps1", "--course", "abc101", "--Exchange.root={}".format(exchange), "--remove"])
a = api.get_assignment("ps1")
assert set(a.keys()) == keys
target = default.copy()
target["release_path"] = join("release", "ps1")
target["id"] = a["id"]
target["max_code_score"] = 5
target["max_score"] = 6
target["max_written_score"] = 1
target["max_task_score"] = 1
assert a == target
# check the values once there are submissions as well
self._empty_notebook(join(course_dir, "submitted", "foo", "ps1", "problem1.ipynb"))
self._empty_notebook(join(course_dir, "submitted", "bar", "ps1", "problem1.ipynb"))
a = api.get_assignment("ps1")
assert set(a.keys()) == keys
target = default.copy()
target["release_path"] = join("release", "ps1")
target["id"] = a["id"]
target["max_code_score"] = 5
target["max_score"] = 6
target["max_written_score"] = 1
target["max_task_score"] = 1
target["num_submissions"] = 2
assert a == target
def test_get_assignments(self, api, course_dir):
assert api.get_assignments() == []
self._empty_notebook(join(course_dir, "source", "ps1", "problem1.ipynb"))
self._empty_notebook(join(course_dir, "source", "ps2", "problem1.ipynb"))
a = api.get_assignments()
assert len(a) == 2
assert a[0] == api.get_assignment("ps1")
assert a[1] == api.get_assignment("ps2")
def test_get_notebooks(self, api, course_dir, db):
keys = set([
'average_code_score', 'average_score', 'average_written_score',
'name', 'id', 'max_code_score', 'max_score', 'max_written_score',
'max_task_score', 'average_task_score',
'needs_manual_grade', 'num_submissions'])
default = {
"name": "p1",
"id": None,
"average_code_score": 0,
"max_code_score": 0,
"average_score": 0,
"max_score": 0,
"average_written_score": 0,
"max_written_score": 0,
"average_task_score": 0,
"max_task_score": 0,
"needs_manual_grade": False,
"num_submissions": 0
}
# check that return value is None when there is no assignment
n = api.get_notebooks("ps1")
assert n == []
# check values before nbgrader generate_assignment is run
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
n1, = api.get_notebooks("ps1")
assert set(n1.keys()) == keys
assert n1 == default.copy()
# add it to the database (but don't assign yet)
with api.gradebook as gb:
gb.update_or_create_assignment("ps1")
n1, = api.get_notebooks("ps1")
assert set(n1.keys()) == keys
assert n1 == default.copy()
# check values after nbgrader generate_assignment is run
run_nbgrader(["generate_assignment", "ps1", "--db", db, "--force"])
n1, = api.get_notebooks("ps1")
assert set(n1.keys()) == keys
target = default.copy()
target["id"] = n1["id"]
target["max_code_score"] = 5
target["max_score"] = 6
target["max_written_score"] = 1
assert n1 == target
def test_get_submission(self, api, course_dir, db):
keys = set([
"id", "name", "student", "last_name", "first_name", "score",
"max_score", "code_score", "max_code_score", "written_score",
"max_written_score", "task_score", "max_task_score", "needs_manual_grade", "autograded",
"timestamp", "submitted", "display_timestamp"])
default = {
"id": None,
"name": "ps1",
"student": "foo",
"last_name": None,
"first_name": None,
"score": 0,
"max_score": 0,
"code_score": 0,
"max_code_score": 0,
"written_score": 0,
"max_written_score": 0,
"task_score": 0,
"max_task_score": 0,
"needs_manual_grade": False,
"autograded": False,
"timestamp": None,
"display_timestamp": None,
"submitted": False
}
s = api.get_submission("ps1", "foo")
assert s == default.copy()
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents="2017-07-05T12:32:56.123456")
s = api.get_submission("ps1", "foo")
assert set(s.keys()) == keys
target = default.copy()
target["submitted"] = True
target["timestamp"] = "2017-07-05T12:32:56.123456"
target["display_timestamp"] = "2017-07-05 12:32:56 {}".format(self.tz)
assert s == target
run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db])
s = api.get_submission("ps1", "foo")
target = default.copy()
target["id"] = s["id"]
target["autograded"] = True
target["submitted"] = True
target["timestamp"] = "2017-07-05T12:32:56.123456"
target["display_timestamp"] = "2017-07-05 12:32:56 {}".format(self.tz)
target["code_score"] = 2
target["max_code_score"] = 5
target["score"] = 2
target["max_score"] = 7
target["written_score"] = 0
target["max_written_score"] = 2
target["needs_manual_grade"] = True
assert s == target
def test_get_submission_no_timestamp(self, api, course_dir, db):
keys = set([
"id", "name", "student", "last_name", "first_name", "score",
"max_score", "code_score", "max_code_score", "written_score",
"max_written_score", "task_score", "max_task_score", "needs_manual_grade", "autograded",
"timestamp", "submitted", "display_timestamp"])
default = {
"id": None,
"name": "ps1",
"student": "foo",
"last_name": None,
"first_name": None,
"score": 0,
"max_score": 0,
"code_score": 0,
"max_code_score": 0,
"written_score": 0,
"max_written_score": 0,
"task_score": 0,
"max_task_score": 0,
"needs_manual_grade": False,
"autograded": False,
"timestamp": None,
"display_timestamp": None,
"submitted": False
}
s = api.get_submission("ps1", "foo")
assert s == default.copy()
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
s = api.get_submission("ps1", "foo")
assert set(s.keys()) == keys
target = default.copy()
target["submitted"] = True
assert s == target
run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db])
s = api.get_submission("ps1", "foo")
target = default.copy()
target["id"] = s["id"]
target["autograded"] = True
target["submitted"] = True
target["code_score"] = 2
target["max_code_score"] = 5
target["score"] = 2
target["max_score"] = 7
target["written_score"] = 0
target["max_written_score"] = 2
target["needs_manual_grade"] = True
assert s == target
def test_get_submissions(self, api, course_dir, db):
assert api.get_submissions("ps1") == []
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
timestamp = datetime.now()
self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents=timestamp.isoformat())
s1, = api.get_submissions("ps1")
assert s1 == api.get_submission("ps1", "foo")
run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db])
s1, = api.get_submissions("ps1")
assert s1 == api.get_submission("ps1", "foo")
def test_filter_existing_notebooks(self, api, course_dir, db):
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db])
with api.gradebook as gb:
notebooks = gb.notebook_submissions("p1", "ps1")
s = api._filter_existing_notebooks("ps1", notebooks)
assert s == notebooks
notebooks = gb.notebook_submissions("p2", "ps1")
s = api._filter_existing_notebooks("ps1", notebooks)
assert s == []
@notwindows
def test_filter_existing_notebooks_strict(self, api, course_dir, db):
api.config.ExchangeSubmit.strict = True
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db])
with api.gradebook as gb:
notebooks = gb.notebook_submissions("p1", "ps1")
s = api._filter_existing_notebooks("ps1", notebooks)
assert s == notebooks
notebooks = gb.notebook_submissions("p2", "ps1")
s = api._filter_existing_notebooks("ps1", notebooks)
assert s == notebooks
def test_get_notebook_submission_indices(self, api, course_dir, db):
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db])
with api.gradebook as gb:
notebooks = gb.notebook_submissions("p1", "ps1")
notebooks.sort(key=lambda x: x.id)
idx = api.get_notebook_submission_indices("ps1", "p1")
assert idx[notebooks[0].id] == 0
assert idx[notebooks[1].id] == 1
def test_get_notebook_submissions(self, api, course_dir, db):
assert api.get_notebook_submissions("ps1", "p1") == []
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db])
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "baz", "ps1", "p1.ipynb"))
s = api.get_notebook_submissions("ps1", "p1")
assert len(s) == 2
with api.gradebook as gb:
notebooks = gb.notebook_submissions("p1", "ps1")
notebooks.sort(key=lambda x: x.id)
notebooks = [x.to_dict() for x in notebooks]
for i in range(2):
notebooks[i]["index"] = i
assert s[i] == notebooks[i]
def test_get_student(self, api, course_dir, db):
assert api.get_student("foo") is None
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
assert api.get_student("foo") == {
"id": "foo",
"last_name": None,
"first_name": None,
"email": None,
"lms_user_id": None,
"max_score": 0,
"score": 0
}
rmtree(join(course_dir, "submitted", "foo"))
with api.gradebook as gb:
gb.add_student("foo")
assert api.get_student("foo") == {
"id": "foo",
"last_name": None,
"first_name": None,
"email": None,
"lms_user_id": None,
"max_score": 0,
"score": 0
}
gb.update_or_create_student("foo", last_name="Foo", first_name="A", email="[email protected]", lms_user_id="230")
assert api.get_student("foo") == {
"id": "foo",
"last_name": "Foo",
"first_name": "A",
"email": "[email protected]",
"lms_user_id": "230",
"max_score": 0,
"score": 0
}
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db])
assert api.get_student("foo") == {
"id": "foo",
"last_name": "Foo",
"first_name": "A",
"email": "[email protected]",
"lms_user_id": "230",
"max_score": 7,
"score": 2
}
def test_get_students(self, api, course_dir):
assert api.get_students() == []
with api.gradebook as gb:
gb.update_or_create_student("foo", last_name="Foo", first_name="A", email="[email protected]", lms_user_id=None)
s1 = {
"id": "foo",
"last_name": "Foo",
"first_name": "A",
"email": "[email protected]",
"lms_user_id": None,
"max_score": 0,
"score": 0
}
assert api.get_students() == [s1]
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb"))
s2 = {
"id": "bar",
"last_name": None,
"first_name": None,
"email": None,
"lms_user_id": None,
"max_score": 0,
"score": 0
}
assert api.get_students() == [s1, s2]
def test_get_student_submissions(self, api, course_dir, db):
assert api.get_student_submissions("foo") == []
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
timestamp = datetime.now()
self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents=timestamp.isoformat())
run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db])
assert api.get_student_submissions("foo") == [api.get_submission("ps1", "foo")]
def test_get_student_notebook_submissions(self, api, course_dir, db):
assert api.get_student_notebook_submissions("foo", "ps1") == []
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--no-execute", "--force", "--db", db])
s_p1, s_p2 = api.get_student_notebook_submissions("foo", "ps1")
p1, = api.get_notebook_submissions("ps1", "p1")
del p1["index"]
assert s_p1 == p1
assert s_p2 == {
"id": None,
"name": "p2",
"student": "foo",
"last_name": None,
"first_name": None,
"score": 0,
"max_score": 7,
"code_score": 0,
"max_code_score": 5,
"written_score": 0,
"max_written_score": 2,
"task_score": 0,
"max_task_score": 0,
"needs_manual_grade": False,
"failed_tests": False,
"flagged": False
}
def test_deprecation(self, api, course_dir, db):
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
result = api.generate_assignment("ps1")
assert result["success"]
assert os.path.exists(join(course_dir, "release", "ps1", "p1.ipynb"))
os.makedirs(join(course_dir, "source", "ps2"))
result = api.assign("ps2")
assert not result["success"]
def test_generate_assignment(self, api, course_dir, db):
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
result = api.generate_assignment("ps1")
assert result["success"]
assert os.path.exists(join(course_dir, "release", "ps1", "p1.ipynb"))
os.makedirs(join(course_dir, "source", "ps2"))
result = api.generate_assignment("ps2")
assert not result["success"]
@notwindows
def test_release_deprecated(self, api, course_dir, db, exchange):
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
result = api.generate_assignment("ps1")
result = api.release("ps1")
assert result["success"]
assert os.path.exists(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"))
@notwindows
def test_release_and_unrelease(self, api, course_dir, db, exchange):
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
result = api.generate_assignment("ps1")
result = api.release_assignment("ps1")
assert result["success"]
assert os.path.exists(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"))
result = api.release_assignment("ps1")
assert not result["success"]
result = api.unrelease("ps1")
assert result["success"]
assert not os.path.exists(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"))
@notwindows
def test_collect(self, api, course_dir, db, exchange):
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
result = api.generate_assignment("ps1")
result = api.release_assignment("ps1")
result = api.collect("ps1")
assert result["success"]
assert "No submissions" in result["log"]
run_nbgrader(["fetch_assignment", "ps1", "--course", "abc101", "--Exchange.root={}".format(exchange)])
run_nbgrader(["submit", "ps1", "--course", "abc101", "--Exchange.root={}".format(exchange)])
username = get_username()
result = api.collect("ps1")
assert result["success"]
assert "Collecting submission" in result["log"]
assert os.path.exists(join(course_dir, "submitted", username, "ps1", "p1.ipynb"))
run_nbgrader(["submit", "ps1", "--course", "abc101", "--Exchange.root={}".format(exchange)])
result = api.collect("ps1")
assert result["success"]
assert "Updating submission" in result["log"]
assert os.path.exists(join(course_dir, "submitted", username, "ps1", "p1.ipynb"))
@notwindows
def test_autograde(self, api, course_dir, db):
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
api.generate_assignment("ps1")
result = api.autograde("ps1", "foo")
assert not result["success"]
assert "No notebooks were matched" in result["log"]
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
result = api.autograde("ps1", "foo")
assert result["success"]
assert os.path.exists(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
result = api.autograde("ps1", "foo")
assert result["success"]
def test_generate_feedback(self, api, course_dir, db):
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
api.generate_assignment("ps1")
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
api.autograde("ps1", "foo")
result = api.generate_feedback("ps1", "foo")
assert result["success"]
assert os.path.exists(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
contents = open(join(course_dir, "feedback", "foo", "ps1", "p1.html"), "r").read()
# update the grade
with api.gradebook as gb:
nb = gb.find_submission_notebook("p1", "ps1", "foo")
nb.grades[0].manual_score = 123
gb.db.commit()
# contents shouldn't have changed, because force=False
result = api.generate_feedback("ps1", "foo", force=False)
assert result["success"]
assert os.path.exists(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
new_contents = open(join(course_dir, "feedback", "foo", "ps1", "p1.html"), "r").read()
assert new_contents == contents
# contents should now have changed, because force=True
result = api.generate_feedback("ps1", "foo", force=True)
assert result["success"]
assert os.path.exists(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
new_contents = open(join(course_dir, "feedback", "foo", "ps1", "p1.html"), "r").read()
assert new_contents != contents
# should not work for an empty submission
os.makedirs(join(course_dir, "submitted", "foo", "ps2"))
result = api.generate_feedback("ps2", "foo")
assert not result["success"]
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps2", "p2.ipynb"))
api.generate_assignment("ps2")
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps2", "p2.ipynb"))
api.autograde("ps2", "foo")
result = api.generate_feedback("ps2", "foo")
assert result["success"]
@notwindows
def test_release_feedback(self, api, course_dir, db, exchange):
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
api.generate_assignment("ps1")
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
self._copy_file(join("files", "timestamp.txt"), join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"))
api.autograde("ps1", "foo")
api.generate_feedback("ps1", "foo")
result = api.release_feedback("ps1", "foo")
assert result["success"]
assert os.path.isdir(join(exchange, "abc101", "feedback"))
assert os.path.exists(join(exchange, "abc101", "feedback", "c600ef68c434c3d136bb5e68ea874169.html"))
# add another assignment
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps2", "p2.ipynb"))
api.generate_assignment("ps2")
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps2", "p2.ipynb"))
self._copy_file(join("files", "timestamp.txt"), join(course_dir, "submitted", "foo", "ps2", "timestamp.txt"))
api.autograde("ps2", "foo")
api.generate_feedback("ps2", "foo")
result = api.release_feedback("ps2", "foo")
assert result["success"]
assert os.path.exists(join(exchange, "abc101", "feedback", "e190e1f234b633832f2069f4f8a3a680.html"))
@notwindows
def test_fetch_feedback(self, api, course_dir, db, cache):
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
api.generate_assignment("ps1")
timestamp = open(os.path.join(os.path.dirname(__file__), "files", "timestamp.txt")).read()
cachepath = join(cache, "abc101", "foo+ps1+{}".format(timestamp))
self._copy_file(join("files", "submitted-changed.ipynb"), join(cachepath, "p1.ipynb"))
self._copy_file(join("files", "timestamp.txt"), join(cachepath, "timestamp.txt"))
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
self._copy_file(join("files", "timestamp.txt"), join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"))
api.autograde("ps1", "foo")
api.generate_feedback("ps1", "foo")
api.release_feedback("ps1", "foo")
result = api.fetch_feedback("ps1", "foo")
assert result["success"]
assert os.path.isdir(join("ps1", "feedback"))
assert os.path.exists(join("ps1", "feedback", timestamp, "p1.html"))
# add another assignment
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps2", "p2.ipynb"))
api.generate_assignment("ps2")
cachepath = join(cache, "abc101", "foo+ps2+{}".format(timestamp))
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(cachepath, "p2.ipynb"))
self._copy_file(join("files", "timestamp.txt"), join(cachepath, "timestamp.txt"))
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps2", "p2.ipynb"))
self._copy_file(join("files", "timestamp.txt"), join(course_dir, "submitted", "foo", "ps2", "timestamp.txt"))
api.autograde("ps2", "foo")
api.generate_feedback("ps2", "foo")
api.release_feedback("ps2", "foo")
result = api.fetch_feedback("ps2", "foo")
assert result["success"]
assert os.path.exists(join("ps2", "feedback", timestamp, "p2.html"))
| bsd-3-clause | 2,897,264,296,471,364,600 | 44.106357 | 124 | 0.567499 | false | 3.430046 | true | false | false |
crs4/omero.biobank | examples/create_markers_sets.py | 1 | 4865 | # BEGIN_COPYRIGHT
# END_COPYRIGHT
# pylint: disable=W0105, C0103
""" ..
This example shows how to handle genetic marker data.
**NOTE:** the example assumes that the KB already contains all objects
created by the example on importing individuals.
Suppose you have run a series of genotyping assays where the DNA
sample in each well of a titer plate has been associated to a
collection of genotyping values. To import this information into the
KB, we define a collection of marker data:
"""
import sys, os, uuid
import numpy as np
from bl.vl.kb import KnowledgeBase as KB
OME_HOST = os.getenv('OME_HOST', 'localhost')
OME_USER = os.getenv('OME_USER', 'test')
OME_PASSWD = os.getenv('OME_PASSWD', 'test')
STUDY_LABEL = 'KB_EXAMPLES'
MSET_LABEL = 'DUMMY_MS'
REF_GENOME = 'DUMMY_GENOME'
kb = KB(driver='omero')(OME_HOST, OME_USER, OME_PASSWD)
marker_defs = [
# label mask index allele_flip
('A001', 'TCACTTCTTCAAAGCT[A/G]AGCTACAAGCATTATT', 0, False),
('A002', 'GGAAGGAAGAAATAAA[C/G]CAGCACTATGTCTGGC', 1, False),
('A003', 'CCGACCTAGTAGGCAA[A/G]TAGACACTGAGGCTGA', 2, False),
('A004', 'AGGTCTATGTTAATAC[A/G]GAATCAGTTTCTCACC', 3, True),
('A005', 'AGATTACCATGCAGGA[A/T]CTGTTCTGAGATTAGC', 4, False),
('A006', 'TCTACCTCTGTGACTA[C/G]AAGTGTTCTTTTATTT', 5, True),
('A007', 'AAGGCAATACTGTTCA[C/T]ATTGTATGGAAAGAAG', 6, True),
]
""" ..
See the :ref:`import tool documentation <import_tool>` for
details on the mask, index and allele flip fields. Now we have to
import the above definitions into the KB:
"""
study = kb.get_study(STUDY_LABEL)
if study is None:
sys.exit("ERROR: study '%s' not found" % STUDY_LABEL)
action = kb.create_an_action(study)
maker, model, release = (uuid.uuid4().hex for _ in xrange(3))
N, stream = len(marker_defs), iter(marker_defs)
mset = kb.create_snp_markers_set(
MSET_LABEL, maker, model, release, N, stream, action
)
""" ..
If markers have been aligned to a reference genome, we can store the
alignment information. This information must be provided in the form
of a stream of tuples that contain the marker's id within the KB
(called *vid*), chromosome number, position within the chromosome,
strand info and number of copies. Again, the :ref:`import tool docs
<import_tool>` provide more details on this matter. In this case, we
will auto-generate dummy alignment info for all markers in the set:
"""
mset.load_markers()
aligns = [(m['vid'], i+1, (i+1)*1000, True, 'A' if (i%2)== 0 else 'B', 1)
for i, m in enumerate(mset.markers)]
kb.align_snp_markers_set(mset, REF_GENOME, iter(aligns), action)
""" ...
In OMERO.biobank, genotyping data is represented by a pair of arrays:
* a 2 X N array where each column represents the probabilities of
being homozygous for allele A and B, respectively;
* a 1 X N array where each element represents a degree of confidence
related to the corresponding probabilities in the above array;
where N is the number of markers in the reference set. The following
snippet generates dummy genotyping data for all individuals enrolled
in the study:
"""
def make_dummy_data(ms):
n = len(ms)
probabilities = 0.5 * np.cast[np.float32](np.random.random((2, n)))
confidence_values = np.cast[np.float32](np.random.random(n))
return probabilities, confidence_values
data_sample_list = []
for i, ind in enumerate(kb.get_individuals(study)):
action = kb.create_an_action(study, target=ind)
config = {
'label' : uuid.uuid4().hex,
'status' : kb.DataSampleStatus.USABLE,
'action' : action,
'snpMarkersSet' : mset
}
data_sample = kb.factory.create(kb.GenotypeDataSample, config).save()
probs, confs = make_dummy_data(mset)
do = kb.add_gdo_data_object(action, data_sample, probs, confs)
data_sample_list.append(data_sample)
""" ..
Data samples keep track of the existence of genotyping data defined on
a given marker set, while data objects model actual data containers
such as files or OMERO table rows. Multiple data objects can refer to
the same data sample when they contain the same data, but encoded in
different formats, or stored in distinct copies of the same file.
For simplicity, we have defined an action that directly links each
data sample to an individual. While this approach can be used when no
information is available on the steps that led to the production of
the data sample, the KB allows to keep track of several intermediate
objects such as blood samples, dna samples, titer plates, plate wells,
etc. To iterate over the data objects we have just stored, we can do
the following:
"""
np.set_printoptions(precision=3)
print "marker set id: %s" % mset.id
for gdo in kb.get_gdo_iterator(mset, data_samples=data_sample_list):
print gdo['probs']
print gdo['confidence']
print
| gpl-2.0 | -8,684,772,627,203,433,000 | 34.772059 | 73 | 0.710791 | false | 3.181818 | false | false | false |
abitofalchemy/ScientificImpactPrediction | json_dataset_tograph.py | 1 | 2398 | __author__ = 'saguinag'+'@'+'nd.edu'
__version__ = "0.1.0"
##
## json_dataset_tograph = convert twitter (json format) dataset to a graph object
## arguments: input file (json)
##
## VersionLog:
# 0.0.1 Initial commit
#
import argparse,traceback,optparse
import urllib, json
import sys
def json_load_byteified(file_handle):
return _byteify(
json.load(file_handle, object_hook=_byteify),
ignore_dicts=True
)
def json_loads_byteified(json_text):
return _byteify(
json.loads(json_text, object_hook=_byteify),
ignore_dicts=True
)
def _byteify(data, ignore_dicts = False):
# if this is a unicode string, return its string representation
if isinstance(data, unicode):
return data.encode('utf-8')
# if this is a list of values, return list of byteified values
if isinstance(data, list):
return [ _byteify(item, ignore_dicts=True) for item in data ]
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
if isinstance(data, dict) and not ignore_dicts:
return {
_byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True)
for key, value in data.iteritems()
}
# if it's anything else, return it in its original form
return data
def get_parser():
parser = argparse.ArgumentParser(description='query twitter and output to file')
# parser.add_argument('jsonfile', metavar='JSONFILE', help='Quoted query')
parser.add_argument('--version', action='version', version=__version__)
return parser
def main():
parser = get_parser()
args = vars(parser.parse_args())
#print args
#url = "http://apollo.cse.nd.edu/datasets/paris_shooting.txt"
# url = "http://dsg1.crc.nd.edu/~saguinag/paper_accepted.json"
# response = urllib.urlopen(url)
# data = json.loads(response.read())
# data = response.read()
# for line in data:
infile = "paper_accepted.json"
infile = "datasets/network_models_evolution.json"
data = []
with open(infile) as data_file:
lines = data_file.readlines()
print len(lines)
for l in lines:
d = json.dumps(l)
print type(d)
# print json_loads_byteified(l)
break
# with open(infile) as f:
# ldict = json.load(f)
# print type(ldict)
# # break
if __name__ == '__main__':
main()
sys.exit(0)
| mit | 3,842,952,083,991,179,300 | 26.883721 | 82 | 0.650542 | false | 3.358543 | false | false | false |
sawankh/MMOTracker | src/dataAnalysis/dsl/grammar/behaviour/readFile.py | 2 | 1546 | #!/usr/bin/python
# Title: readFile.py
# Description: Contains the grammar and methods for reading a file
# Author: Sawan J. Kapai Harpalani
# Date: 2016-06-26
# Version: 0.1
# Usage: python readFile.py
# Notes:
# python_version: 2.6.6
# License: Copyright 200X Sawan J. Kapai Harpalani
# This file is part of MMOTracker. MMOTracker is free software: you can redistribute it and/or modify
# it under the terms of the GNU GeneralPublic License as published by the Free Software Foundation,
# either version 3 of the License, or (at your option) any later version
# MMOTracker is distributed in the hope that it willbe useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESSFOR A PARTICULAR PURPOSE. See the GNU General
# PubliLicense for more details.You should have received a copy of the GNU GeneralPublic License along with MMOTracker.
# If not, seehttp://www.gnu.org/licenses/.
#==============================================================================
from pyparsing import *
# Rules
reservedWordReadFile = Suppress(Keyword("readFile"))
fileName = QuotedString('"', escChar = "\\")
leftBracket = Suppress(Literal("("))
rightBracket = Suppress(Literal(")"))
readFileExpr = reservedWordReadFile + leftBracket + fileName.setResultsName("fileName") + rightBracket
# Reads file and returns quoted string to be possible to assign variable
def readFile(fileN):
fp = open(fileN, 'r')
return "\"" + fp.read() + "\""
readFileExpr.setParseAction(lambda tokens: readFile(tokens.fileName)) | gpl-3.0 | -6,108,897,773,729,414,000 | 44.5 | 120 | 0.717335 | false | 3.743341 | false | false | false |
WaldurChatbot/Waldur-Chatbot | common/offline_graphs/totalcosts_offline.py | 1 | 16907 | import json
import collections
import matplotlib
matplotlib.use('Agg') # requirement of matplotlib
import matplotlib.pyplot as plt
import numpy as np
from textwrap import wrap
myinput = """[
{
"url":"https://api.etais.ee/api/invoices/9e67980771a94de3bd0075fe84522b05/",
"uuid":"9e67980771a94de3bd0075fe84522b05",
"number":100151,
"customer":"https://api.etais.ee/api/customers/5991d0c109df4e8cab4f9dd660295517/",
"price":"87.7300000",
"tax":"0.0000000",
"total":"87.7300000",
"state":"pending",
"year":2018,
"month":1,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"[email protected]",
"bank":"Estonian Bank"
},
"invoice_date":null,
"due_date":null,
"customer_details":null,
"openstack_items":[
{
"name":"WaldurChatbot (Small / Generic)",
"price":87.73,
"tax":"0.0000000",
"total":"87.7300000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-12-01T00:00:00Z",
"end":"2017-12-31T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"Waldur Chatbot testbed",
"project_uuid":"88879e68a4c84f6ea0e05fb9bc59ea8f",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"package":"https://api.etais.ee/api/openstack-packages/517047bdfefe418899c981663f1ea5f5/",
"tenant_name":"WaldurChatbot",
"tenant_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"usage_days":31,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/9e67980771a94de3bd0075fe84522b05/",
"uuid":"9e67980771a94de3bd0075fe84522b05",
"number":100151,
"customer":"https://api.etais.ee/api/customers/5991d0c109df4e8cab4f9dd660295517/",
"price":"87.7300000",
"tax":"0.0000000",
"total":"87.7300000",
"state":"pending",
"year":2017,
"month":12,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"[email protected]",
"bank":"Estonian Bank"
},
"invoice_date":null,
"due_date":null,
"customer_details":null,
"openstack_items":[
{
"name":"WaldurChatbot (Small / Generic)",
"price":87.73,
"tax":"0.0000000",
"total":"87.7300000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-12-01T00:00:00Z",
"end":"2017-12-31T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"Waldur Chatbot testbed",
"project_uuid":"88879e68a4c84f6ea0e05fb9bc59ea8f",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"package":"https://api.etais.ee/api/openstack-packages/517047bdfefe418899c981663f1ea5f5/",
"tenant_name":"WaldurChatbot",
"tenant_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"usage_days":31,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/59fd12a0d3e34f829d6a0eefd2e5ee41/",
"uuid":"59fd12a0d3e34f829d6a0eefd2e5ee41",
"number":100156,
"customer":"https://api.etais.ee/api/customers/0d689685ab3444bbb592338e24613f03/",
"price":"87.7300000",
"tax":"0.0000000",
"total":"87.7300000",
"state":"pending",
"year":2017,
"month":12,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"[email protected]",
"bank":"Estonian Bank"
},
"invoice_date":null,
"due_date":null,
"customer_details":null,
"openstack_items":[
{
"name":"Waldur Maie cloud (Small / Generic)",
"price":87.73,
"tax":"0.0000000",
"total":"87.7300000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-12-01T00:00:00Z",
"end":"2017-12-31T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"W-M project",
"project_uuid":"26fc83e64ea0473fb9f57f0ae978b396",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"1571bca1f6594ad3bede4d2c8d64755a",
"package":"https://api.etais.ee/api/openstack-packages/81e93543103b4cf8a5d3658e026e98f3/",
"tenant_name":"Waldur Maie cloud",
"tenant_uuid":"1571bca1f6594ad3bede4d2c8d64755a",
"usage_days":31,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/bb6f38e908e7493791c65b26e88e1619/",
"uuid":"bb6f38e908e7493791c65b26e88e1619",
"number":100121,
"customer":"https://api.etais.ee/api/customers/5991d0c109df4e8cab4f9dd660295517/",
"price":"84.9000000",
"tax":"0.0000000",
"total":"84.9000000",
"state":"created",
"year":2017,
"month":11,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"[email protected]",
"bank":"Estonian Bank"
},
"invoice_date":"2017-12-01",
"due_date":"2017-12-31",
"customer_details":null,
"openstack_items":[
{
"name":"WaldurChatbot (Small / Generic)",
"price":84.9,
"tax":"0.0000000",
"total":"84.9000000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-11-01T00:00:00Z",
"end":"2017-11-30T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"Waldur Chatbot testbed",
"project_uuid":"88879e68a4c84f6ea0e05fb9bc59ea8f",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"package":"https://api.etais.ee/api/openstack-packages/517047bdfefe418899c981663f1ea5f5/",
"tenant_name":"WaldurChatbot",
"tenant_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"usage_days":30,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/d13cdd4ef4d2478e8e0cf0961d20e6f2/",
"uuid":"d13cdd4ef4d2478e8e0cf0961d20e6f2",
"number":100129,
"customer":"https://api.etais.ee/api/customers/0d689685ab3444bbb592338e24613f03/",
"price":"53.7700000",
"tax":"0.0000000",
"total":"53.7700000",
"state":"created",
"year":2017,
"month":11,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"[email protected]",
"bank":"Estonian Bank"
},
"invoice_date":"2017-12-01",
"due_date":"2017-12-31",
"customer_details":null,
"openstack_items":[
{
"name":"Waldur Maie cloud (Small / Generic)",
"price":53.77,
"tax":"0.0000000",
"total":"53.7700000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-11-12T11:29:21.522230Z",
"end":"2017-11-30T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"W-M project",
"project_uuid":"26fc83e64ea0473fb9f57f0ae978b396",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"1571bca1f6594ad3bede4d2c8d64755a",
"package":"https://api.etais.ee/api/openstack-packages/81e93543103b4cf8a5d3658e026e98f3/",
"tenant_name":"Waldur Maie cloud",
"tenant_uuid":"1571bca1f6594ad3bede4d2c8d64755a",
"usage_days":19,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/b094173f50a848e19d3362c84eabebc4/",
"uuid":"b094173f50a848e19d3362c84eabebc4",
"number":100096,
"customer":"https://api.etais.ee/api/customers/5991d0c109df4e8cab4f9dd660295517/",
"price":"87.7300000",
"tax":"0.0000000",
"total":"87.7300000",
"state":"created",
"year":2017,
"month":10,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"[email protected]",
"bank":"Estonian Bank"
},
"invoice_date":"2017-11-01",
"due_date":"2017-12-01",
"customer_details":null,
"openstack_items":[
{
"name":"WaldurChatbot (Small / Generic)",
"price":87.73,
"tax":"0.0000000",
"total":"87.7300000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-10-01T00:00:00Z",
"end":"2017-10-31T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"Waldur Chatbot testbed",
"project_uuid":"88879e68a4c84f6ea0e05fb9bc59ea8f",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"package":"https://api.etais.ee/api/openstack-packages/517047bdfefe418899c981663f1ea5f5/",
"tenant_name":"WaldurChatbot",
"tenant_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"usage_days":31,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/b636ee1236e0486994cdd1ffda4c7e1d/",
"uuid":"b636ee1236e0486994cdd1ffda4c7e1d",
"number":100076,
"customer":"https://api.etais.ee/api/customers/5991d0c109df4e8cab4f9dd660295517/",
"price":"11.3200000",
"tax":"0.0000000",
"total":"11.3200000",
"state":"created",
"year":2017,
"month":9,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"[email protected]",
"bank":"Estonian Bank"
},
"invoice_date":"2017-10-01",
"due_date":"2017-10-31",
"customer_details":null,
"openstack_items":[
{
"name":"WaldurChatbot (Small / Generic)",
"price":11.32,
"tax":"0.0000000",
"total":"11.3200000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-09-27T13:53:31.425080Z",
"end":"2017-09-30T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"Waldur Chatbot testbed",
"project_uuid":"88879e68a4c84f6ea0e05fb9bc59ea8f",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"package":"https://api.etais.ee/api/openstack-packages/517047bdfefe418899c981663f1ea5f5/",
"tenant_name":"WaldurChatbot",
"tenant_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"usage_days":4,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
}
]"""
data = json.loads(myinput)
num_to_monthdict = {
1:'Jan',
2:'Feb',
3:'Mar',
4:'Apr',
5:'May',
6:'Jun',
7:'Jul',
8:'Aug',
9:'Sep',
10:'Oct',
11:'Nov',
12:'Dec'
}
plotx = []
ploty = []
uuid = '5991d0c109df4e8cab4f9dd660295517'
customer = 'https://api.etais.ee/api/customers/' + uuid + '/'
newlist = []
print(type(data))
print(type(data[0]))
for i in range((len(data)-1), -1, -1):
if data[i]['customer'] == customer:
newlist.append(data[i])
plotx.append(num_to_monthdict[data[i]['month']] + " " + str(data[i]['year']))
ploty.append(float(data[i]['total']))
print("### " + str(len(newlist)))
'''
result = collections.OrderedDict()
for i in range(len(plotx)):
result[plotx[i]] = float(ploty[i])
'''
print(plotx)
print(ploty)
N = len(ploty)
ind = np.arange(N)
width = 0.35
fig, ax = plt.subplots()
rects1 = ax.bar(ind, ploty, width, color='#75ad58')
ax.set_xlabel('Months')
ax.set_ylabel('Total costs')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(plotx)
title = ax.set_title("\n".join(wrap('Last ' + str(N) + 'month total costs but then everytime the title gets longer '
'omg like wtf when does it stop OMG HELP well okay'
'let me tell you a story all about how'
'my life got turned upside down'
'so id like to take a moment just sit right there', 60)))
def autolabel(rects, ax):
# Get y-axis height to calculate label position from.
(y_bottom, y_top) = ax.get_ylim()
y_height = y_top - y_bottom
for rect in rects:
height = rect.get_height()
label_position = height + (y_height * 0.01)
ax.text(rect.get_x() + rect.get_width()/2., label_position,
'%d' % int(height),
ha='center', va='bottom')
autolabel(rects1, ax)
print()
counter = 1
for child in ax.get_children():
if counter == N:
child.set_color('#2388d6')
print("HERE:" + str(child))
else:
print(child)
counter += 1
real_invoice = matplotlib.patches.Patch(color='#75ad58', label='Invoice')
estimate_invoice = matplotlib.patches.Patch(color='#2388d6', label='Estimation')
plt.legend(handles=[real_invoice, estimate_invoice])
fig.tight_layout()
title.set_y(1.05)
fig.subplots_adjust(top=0.8)
#plt.show()
fig.savefig('foo.png')
| mit | 2,200,156,060,488,851,500 | 30.251386 | 116 | 0.528538 | false | 3.155468 | false | false | false |
lltk/lltk | lltk/caching.py | 1 | 5363 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
__all__ = ['register', 'enable', 'disable', 'get', 'put', 'exists', 'cached', 'Cache']
from functools import wraps
import lltk.config as config
from lltk.helpers import debug, warning
from lltk.exceptions import CacheFatalError
caches = {}
def register(cache):
''' Registers a cache. '''
global caches
name = cache().name
if not caches.has_key(name):
caches[name] = cache
def enable(identifier = None, *args, **kwargs):
''' Enables a specific cache for the current session. Remember that is has to be registered. '''
global cache
if not identifier:
for item in (config['default-caches'] + ['NoCache']):
if caches.has_key(item):
debug('Enabling default cache %s...' % (item,))
cache = caches[item](*args, **kwargs)
if not cache.status():
warning('%s could not be loaded. Is the backend running (%s:%d)?' % (item, cache.server, cache.port))
continue
# This means that the cache backend was set up successfully
break
else:
debug('Cache backend %s is not registered. Are all requirements satisfied?' % (item,))
elif caches.has_key(identifier):
debug('Enabling cache %s...' % (identifier,))
previouscache = cache
cache = caches[identifier](*args, **kwargs)
if not cache.status():
warning('%s could not be loaded. Is the backend running (%s:%d)?' % (identifier, cache.server, cache.port))
cache = previouscache
else:
debug('Cache backend %s is not registered. Are all requirements satisfied?' % (identifier,))
def disable():
''' Disables the cache for the current session. '''
global cache
cache = NoCache()
def connect(self):
''' Establishes the connection to the backend. '''
return cache.connect()
def status(self):
''' Returns True if connection can be established, False otherwise. '''
return cache.status()
def exists(key):
''' Checks if a document is cached. '''
return cache.exists(key)
def get(key):
''' Retrieves a document from the the currently activated cache (by unique identifier). '''
return cache.get(key)
def put(key, value, extradata = {}):
''' Caches a document using the currently activated cache. '''
return cache.put(key, value, extradata)
def delete(key):
''' Remove a document from the cache (by unique identifier). '''
return cache.delete(key)
def commit():
''' Ensures that all changes are committed to disc. '''
return cache.commit()
def cached(key = None, extradata = {}):
''' Decorator used for caching. '''
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
uid = key
if not uid:
from hashlib import md5
arguments = list(args) + [(a, kwargs[a]) for a in sorted(kwargs.keys())]
uid = md5(str(arguments)).hexdigest()
if exists(uid):
debug('Item \'%s\' is cached (%s).' % (uid, cache))
return get(uid)
else:
debug('Item \'%s\' is not cached (%s).' % (uid, cache))
result = f(*args, **kwargs)
debug('Caching result \'%s\' as \'%s\' (%s)...' % (result, uid, cache))
debug('Extra data: ' + (str(extradata) or 'None'))
put(uid, result, extradata)
return result
return wrapper
return decorator
class GenericCache(object):
''' Generic cache class that all custom caches should be derived from. '''
def __init__(self, *args, **kwargs):
self.name = 'Unkown'
self.connection = False
self.server = None
self.port = None
self.user = None
self.database = None
self.filename = None
if kwargs.has_key('server'):
self.server = kwargs['server']
if kwargs.has_key('port'):
self.port = kwargs['port']
if kwargs.has_key('user'):
self.user = kwargs['user']
if kwargs.has_key('database'):
self.database = kwargs['database']
if kwargs.has_key('filename'):
self.filename = kwargs['filename']
def __del__(self):
# self.commit()
pass
def __str__(self):
return '%s cache backend' % (self.name)
@classmethod
def needsconnection(self, f):
''' Decorator used to make sure that the connection has been established. '''
@wraps(f)
def wrapper(self, *args, **kwargs):
if not self.connection:
self.connect()
return f(self, *args, **kwargs)
return wrapper
def setup(self):
''' Runs the initial setup for the cache. '''
pass
def connect(self):
''' Establishes the connection to the backend. '''
pass
def status(self):
''' Returns True if connection can be established, False otherwise. '''
try:
self.connect()
except CacheFatalError:
return False
return True
def exists(self, key):
''' Checks if a document is cached. '''
pass
def get(self, key):
''' Retrieves a document from the cache (by unique identifier). '''
pass
def put(self, key, value, extradata = {}):
''' Caches a document. '''
pass
def delete(self, key):
''' Remove a document from the cache (by unique identifier). '''
pass
def commit(self):
''' Ensures that all changes are committed to disc. '''
pass
class NoCache(GenericCache):
''' Pseudo-class implementing no caching at all. '''
def __init__(self, *args, **kwargs):
super(NoCache, self).__init__()
self.name = 'NoCache'
def exists(self, key):
''' Checks if a document is cached. '''
return False
register(NoCache)
# Setup the NoCache() cache for now...
cache = NoCache()
# Import and register all available caches...
import lltk.caches
# Enable default caches...
enable()
del lltk
| lgpl-3.0 | -7,534,432,656,292,242,000 | 24.660287 | 110 | 0.658214 | false | 3.358172 | false | false | false |
mineo/mpd_pydb | setup.py | 1 | 1278 | #!/usr/bin/env python2
from __future__ import print_function
from setuptools import setup
from sys import version_info
if version_info < (3, 5):
requirements = ["pathlib"]
else:
requirements = []
setup(name="mpd_pydb",
author="Wieland Hoffmann",
author_email="[email protected]",
packages=["mpd_pydb"],
package_dir={"mpd_pydb": "mpd_pydb"},
download_url="https://github.com/mineo/mpd_pydb/tarball/master",
url="http://github.com/mineo/mpd_pydb",
license="MIT",
classifiers=["Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",],
description="Module for reading an MPD database",
long_description=open("README.rst").read(),
setup_requires=["setuptools_scm", "pytest-runner"],
use_scm_version={"write_to": "mpd_pydb/version.py"},
install_requires=requirements,
extras_require={
'docs': ['sphinx']
},
tests_require=["pytest"],
)
| mit | 2,467,657,736,107,854,300 | 35.514286 | 70 | 0.579812 | false | 3.920245 | false | false | false |
fw4spl-org/fw4spl-git | hooks/check_commit.py | 1 | 3013 | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
import re
import common
class Types(object):
def __iter__(self):
return (x for x in ['feat', 'fix', 'perf', 'revert', 'docs', 'chore', 'style', 'refactor', 'test', 'merge'])
TYPES = Types()
TITLE_PATTERN_REGEX = r'(?P<type>' + '|'.join(TYPES) + ')\((?P<scope>\S+)\): (?P<subject>[a-z].*)'
# return all unpushed commit message
def unpushed_commit_message():
command_result = common.execute_command('git log --branches --not --remotes --pretty=format:%h:%aE:%s')
if command_result.status != 0:
return []
else:
return command_result.out.decode().split('\n')
def commit_in_path(old_path=None, new_path=None):
git_command = 'git log --first-parent --pretty=format:%h:%aE:%s'
if old_path is not None and len(old_path) > 0:
git_command += ' ' + old_path
if new_path is not None and len(new_path) > 0:
git_command += '..' + new_path
command_result = common.execute_command(git_command)
if command_result.status != 0:
return []
else:
return command_result.out.decode().split('\n')
# check the title conformance against commitizen/angularjs/... rules
def __check_commit_title(commit_hash, commit_title):
# Test the title against regex
title_pattern = re.compile(TITLE_PATTERN_REGEX)
title_match = title_pattern.match(commit_title)
# Convert into a boolean
title_have_not_matched = title_match is None
if title_have_not_matched is True:
common.error(
"Commit '"
+ commit_hash
+ "' with title '"
+ commit_title
+ "' does not follow Sheldon rules: '<" + "|".join(TYPES) + ">(<scope>): <subject>'.")
return title_have_not_matched
# check that the author is not anonymous
def __check_commit_author(commit_hash, commit_author):
# Test the title against regex
author_pattern = re.compile(r'\.*anonymous\.*')
author_match = author_pattern.match(commit_author.lower())
# Convert into a boolean
author_have_matched = author_match is not None
if author_have_matched is True:
common.error(
"Commit '"
+ commit_hash
+ "' has anonymous author.")
return author_have_matched
def check_commit_messages(commit_messages):
results = [False]
for commit_message in commit_messages:
# Split commit message according to "--pretty=format:%h:%aE:%s"
split_message = commit_message.split(':', 2)
if len(split_message) == 3:
# Extract the type
commit_hash = split_message[0]
commit_author = split_message[1]
commit_title = split_message[2]
results.append(__check_commit_title(commit_hash, commit_title))
results.append(__check_commit_author(commit_hash, commit_author))
common.note('%d commit(s) checked, %d error(s) found.' % (len(commit_messages), results.count(True)))
return results
| lgpl-3.0 | -865,163,373,709,589,600 | 28.539216 | 116 | 0.609691 | false | 3.61271 | false | false | false |
mitnk/notes | index.py | 1 | 1208 | #!/usr/bin/env python
import os
import yaml
NO_INDEX = set(['PL', 'Others', 'Topics', 'Roadmap'])
def load_mkdocs():
filename = 'mkdocs.yml'
with open(filename) as f:
return yaml.load(f.read())
def make_index(docs):
groups = docs['pages']
for group in groups:
topic = group.keys()[0]
_pages = group[topic]
if topic not in NO_INDEX and isinstance(_pages, list):
pages = []
for _page in _pages:
page = _page.items()[0]
if 'index.md' not in page[1]:
path = page[1]
new_page = (page[0], path.split('/')[1])
pages.append(new_page)
write_index(topic, pages)
def write_index(topic, pages):
index = os.path.join('docs', topic.lower(), 'index.md')
title = '### **%s**' % topic
contents = '\n'.join(map(map_page, pages))
document = '\n\n'.join([title, contents])
with open(index, 'w') as f:
f.write(document)
def map_page(page):
"""
('Chapter 1. Title', 'foo/ch1.md') => '[Chapter 1. Title](foo/ch1.md)'
"""
return '* [%s](%s)' % (page[0], page[1])
docs = load_mkdocs()
make_index(docs)
| mit | 4,829,976,709,307,048,000 | 24.702128 | 74 | 0.519868 | false | 3.238606 | false | false | false |
gridsim/gridsim | test/test_DLF2.py | 1 | 3003 | # This is a test program for the DirectLoadFlowCalculator present in the
# gridsim.electricalnetwork module. It computes the example given in
# http://home.eng.iastate.edu/~jdm/ee553/DCPowerFlowEquations.pdf pp. 10-15 and
# compare results with those of the reference.
import unittest
import numpy as np
from gridsim.electrical.loadflow import DirectLoadFlowCalculator
class TestDLF2(unittest.TestCase):
def test_reference(self):
# network set-up
# ----------------
# boolean array specifying which bus is a PV bus
# has to be a one-dimensional boolean numpy array
is_PV = np.array([False, True, False, True])
# array giving from-bus and to-bus ids for each branch
# b12, b13, b14, b23, b34
b = np.array([[0, 1], [0, 2], [0, 3], [1, 2], [2, 3]])
# array containing branch admittances
Yb = np.zeros((5, 4), dtype=complex)
yT = [1j * (-10.), 1j * (-10.), 1j * (-10.), 1j * (-10.), 1j * (-10.)]
for i_branch in range(0, 5):
Yb[i_branch, 0] = yT[i_branch]
Yb[i_branch, 1] = yT[i_branch]
Yb[i_branch, 2] = yT[i_branch]
Yb[i_branch, 3] = yT[i_branch]
# calculator initialization
# --------------------------
s_base = 1.0
v_base = 1.0
dlf = DirectLoadFlowCalculator()
# dlf = NewtonRaphsonLoadFlowCalculator()
dlf.update(s_base, v_base, is_PV, b, Yb)
# input buses electrical values
# ------------------------------
# P, Q, V, Th can be either numpy 1-D arrays or 2-D arrays with 1 row,
# respectively 1 column
# P1,P2,P3,P4, slack power can be set to any value, e.g. float('NaN')
P = np.array([float('NaN'), 2. - 1., -4., 1.])
Q = np.array([float('NaN'), 0., 0., 0.])
# mutable variable is needed
V = np.ones([4])
# mutable variable is needed
Th = np.zeros([4])
# compute buses other electrical values
# --------------------------------------
[P, Q, V, Th] = dlf.calculate(P, Q, V, Th, True)
# check results against reference values
p_slack = P[0]
# print "P_slack ="
# print p_slack
refslack = 2.
self.assertEqual(p_slack, refslack, "The power of the slack bus is "
+ str(p_slack) + " instead of " + str(refslack))
# print "Th = "
# print Th
ref_Th = np.array([0., -0.025, -0.15, -0.025]) # Th1,Th2,Th3,Th4
self.assertTrue(np.allclose(Th, ref_Th))
# get branch currents
# ---------------------------------
[Pij, Qij, Pji, Qji] = dlf.get_branch_power_flows(True)
# check results against reference values
# print "Pij = "
# print Pij
# P12,P13,P14, P23,P34
ref_Pij = np.array([0.25, 1.5, 0.25, 1.25, -1.25])
self.assertTrue(np.allclose(Pij, ref_Pij))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -2,668,466,233,681,159,700 | 33.918605 | 79 | 0.525808 | false | 3.307269 | true | false | false |
knoppo/pi3bar | pi3bar/plugins/clock.py | 1 | 1269 | from pi3bar.plugins.base import Plugin
import datetime
class Clock(Plugin):
"""
:class:`pi3bar.app.Pi3Bar` plugin to show the current date and time.
:param full_format: :class:`str` - :meth:`datetime.datetime.strftime` argument
:param short_format: :class:`str` - :meth:`datetime.datetime.strftime` argument, short alternative
:param timezone: :class:`str` - timezone to use. E.g. 'US/Pacific' or 'Europe/Berlin'
Examples:
.. code-block:: python
# default formats:
Clock(full_format='%Y-%m-%d %H:%M:%S', short_format='%d. %H:%M:%S')
#
Clock(full_format='%d.%m.%Y %H:%M:%S'), # other format example
# pass a timezone
Clock(timezone='US/Eastern')
"""
#: Refresh every second
ticks = 1
def __init__(self, full_format='%Y-%m-%d %H:%M:%S', short_format='%d. %H:%M', timezone=None, **kwargs):
self.full_format = full_format
self.short_format = short_format
self.timezone = timezone
self.instance = timezone
super(Clock, self).__init__(**kwargs)
def cycle(self):
now = datetime.datetime.now(self.timezone)
self.full_text = now.strftime(self.full_format)
self.short_text = now.strftime(self.short_format)
| mit | 7,539,013,647,937,396,000 | 30.725 | 107 | 0.611505 | false | 3.515235 | false | false | false |
floatec/ProsDataBase | ProsDataBase/database/tests/grouptest.py | 1 | 3530 | from django.test import TestCase
from ..models import *
from ..tests.factory import *
from django.test.client import Client
from ..views.api import *
# funzt bisher alles
class GroupTest(TestCase):
def test_serializeAll(self):
# =================================================================
# tests the api showAllGroups
# =================================================================
self.maxDiff = None
group1 = UserFactory.createGroup(10)
group2 = UserFactory.createGroup(10)
result = GroupSerializer.serializeAll()
groupMembers1 = list()
for m1 in Membership.objects.filter(group=group1):
groupMembers1.append(m1.user.username)
groupMembers2 = list()
for m2 in Membership.objects.filter(group=group2):
groupMembers2.append(m2.user.username)
# =================================================================
# test the name of the group is the same groupname in the result
# =================================================================
self.assertTrue(group1.name in [group["name"] for group in result["groups"]])
self.assertTrue(group2.name in [group["name"] for group in result["groups"]])
# =================================================================
# test the users in the group are the same users in the result
# =================================================================
for group in result["groups"]:
if group["name"] == group1.name:
self.assertEquals(groupMembers1, group["users"])
break
elif group["name"] == group2.name:
self.assertEquals(groupMembers2, group["users"])
# =================================================================
# test the quantity of the result is correct
# =================================================================
length = 0
for array in [group["users"] for group in result["groups"]]:
length += len(array)
self.assertEquals(length, 20)
# =================================================================
# test the tableCreator and groupCreator are False
# =================================================================
for group in result["groups"]:
if group["name"] == group1.name:
self.assertFalse(group["tableCreator"])
elif group["name"] == group2.name:
self.assertFalse(group["tableCreator"])
def test_serializeOne(self):
group = UserFactory.createGroup(10)
result = GroupSerializer.serializeOne(group)
# =================================================================
# tests the users in the result are the same users in the group
# =================================================================
groupMember = list()
for m in Membership.objects.filter(group=group):
groupMember.append(m.user.username)
#for user in groupMember:
# self.assertTrue(user in result["users"])
# =================================================================
# test the quantity of the result is correct
# =================================================================
self.assertEquals(len(result["users"]), 10)
def test_groups(self):
group = UserFactory.createGroup(10)
c = Client()
| bsd-2-clause | -1,643,750,423,225,071,600 | 41.53012 | 85 | 0.436261 | false | 5.711974 | true | false | false |
koobonil/Boss2D | Boss2D/addon/_old/webrtc-qt5.11.2_for_boss/modules/audio_processing/test/py_quality_assessment/quality_assessment/echo_path_simulation_unittest.py | 9 | 3035 | # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Unit tests for the echo path simulation module.
"""
import shutil
import os
import tempfile
import unittest
import pydub
from . import echo_path_simulation
from . import echo_path_simulation_factory
from . import signal_processing
class TestEchoPathSimulators(unittest.TestCase):
"""Unit tests for the eval_scores module.
"""
def setUp(self):
"""Creates temporary data."""
self._tmp_path = tempfile.mkdtemp()
# Create and save white noise.
silence = pydub.AudioSegment.silent(duration=1000, frame_rate=48000)
white_noise = signal_processing.SignalProcessingUtils.GenerateWhiteNoise(
silence)
self._audio_track_num_samples = (
signal_processing.SignalProcessingUtils.CountSamples(white_noise))
self._audio_track_filepath = os.path.join(self._tmp_path, 'white_noise.wav')
signal_processing.SignalProcessingUtils.SaveWav(
self._audio_track_filepath, white_noise)
# Make a copy the white noise audio track file; it will be used by
# echo_path_simulation.RecordedEchoPathSimulator.
shutil.copy(self._audio_track_filepath, os.path.join(
self._tmp_path, 'white_noise_echo.wav'))
def tearDown(self):
"""Recursively deletes temporary folders."""
shutil.rmtree(self._tmp_path)
def testRegisteredClasses(self):
# Check that there is at least one registered echo path simulator.
registered_classes = (
echo_path_simulation.EchoPathSimulator.REGISTERED_CLASSES)
self.assertIsInstance(registered_classes, dict)
self.assertGreater(len(registered_classes), 0)
# Instance factory.
factory = echo_path_simulation_factory.EchoPathSimulatorFactory()
# Try each registered echo path simulator.
for echo_path_simulator_name in registered_classes:
simulator = factory.GetInstance(
echo_path_simulator_class=registered_classes[
echo_path_simulator_name],
render_input_filepath=self._audio_track_filepath)
echo_filepath = simulator.Simulate(self._tmp_path)
if echo_filepath is None:
self.assertEqual(echo_path_simulation.NoEchoPathSimulator.NAME,
echo_path_simulator_name)
# No other tests in this case.
continue
# Check that the echo audio track file exists and its length is greater or
# equal to that of the render audio track.
self.assertTrue(os.path.exists(echo_filepath))
echo = signal_processing.SignalProcessingUtils.LoadWav(echo_filepath)
self.assertGreaterEqual(
signal_processing.SignalProcessingUtils.CountSamples(echo),
self._audio_track_num_samples)
| mit | -6,845,322,949,738,666,000 | 36.469136 | 80 | 0.719275 | false | 4.084791 | true | false | false |
jmtoivan/apparatus | src/bmgraph/file.py | 2 | 9308 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import sys
import traceback
import StringIO
import logging
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger("bmgraph.file")
class NotImplementedError(Exception):
pass
class mdict(dict):
"""This class implements a multi-value dictionary."""
def __setitem__(self, key, value):
self.setdefault(key, []).append(value)
def __delitem__(self, key):
raise NotImplementedError("del not supported for mdict, use .delete(k, v) instead")
def delete(self, key, value):
self[key].remove(value)
def __str__(self):
return unicode(self).encode('ASCII', 'backslashreplace')
def __unicode__(self):
pairs = []
for key, values in self.items():
for value in values:
pairs.append("%s=%s" % (key, value.replace(" ", "+")))
return u" ".join(pairs)
class GraphSink(object):
def special_node_read(self, node_name, node_type):
pass
def edge_read(self, node1_name, node1_type,
node2_name, node2_type, type, attribute_dict):
pass
def node_attributes_read(self, node_name, node_type, attribute_dict):
pass
def comment_read(self, type, value):
pass
class GraphObjectSink(GraphSink):
'''Sink for an in-memory Graph object. If passed a graph object as the kw
param graph, append to that Graph.'''
def __init__(self, graph=None):
if graph != None:
self.graph = graph
else:
self.graph = Graph()
def special_node_read(self, node_name, node_type):
self.graph.get_node(node_name, node_type).special_node = True
def edge_read(self, node1_name, node1_type, node2_name, node2_type,
type, attribute_dict):
n1 = self.graph.get_node(node1_name, node1_type)
n2 = self.graph.get_node(node2_name, node2_type)
e = n1.add_edge(n2)
e.type = type
for k, v in attribute_dict.iteritems():
e.attributes[k] = v
def node_attributes_read(self, node_name, node_type, attribute_dict):
n = self.graph.get_node(node_name, node_type)
for k, v in attribute_dict.iteritems():
n.attributes[k] = v
def get_object(self):
return self.graph
class Graph(object):
def __init__(self):
self.attributes = mdict()
self.nodes = {}
self.comments = []
def add_node(self, node):
self.nodes[node.name] = node
def del_node(self, node):
del self.nodes[node.name]
def get_node(self, name, type):
if self.nodes.has_key(name):
return self.nodes[name]
else:
n = Node(self, name, type)
return n
def __str__(self):
print "called"
return unicode(self).encode('ASCII', 'backslashreplace')
def __unicode__(self):
ret = []
for node in self.nodes.values():
if node.special_node:
ret.append(unicode(node))
specials_written = True
for comment in self.comments:
ret.append(u"# %s" % unicode(comment))
comments_written = True
written_edges = set([])
for node in self.nodes.values():
for edge in node.edges:
if unicode(edge) in written_edges:
continue
ret.append(unicode(edge))
written_edges.add(unicode(edge))
for node in self.nodes.values():
if len(node.attributes.keys()) == 0:
continue
ret.append(u"# _attributes %s %s" % (unicode(node), unicode(node.attributes)))
ret.append(u'')
return u'\n'.join(ret)
class Edge(object):
def __init__(self, n1, n2):
self.attributes = mdict()
self.n1 = n1
self.n2 = n2
def other(self, node):
if node == self.n1:
return self.n2
return self.n1
def __cmp__(self, other):
return (str(self) == str(other))
def __str__(self):
return unicode(self).encode('ASCII', 'backslashreplace')
def __unicode__(self):
return u"%s %s %s %s" % (self.n1, self.n2, self.type, self.attributes)
def __repr__(self):
return "<Edge %s>" % str(self)
class Node(object):
def __init__(self, graph, name, type):
self.graph = graph
self.attributes = mdict()
self.name = name
self.type = type
self.special_node = False
self.edges = []
self.graph.add_node(self)
def add_edge(self, other):
e = Edge(self, other)
self.edges.append(e)
other.edges.append(e)
return e
def remove_edge(self, edge):
self.edges.remove(edge)
def delete(self):
self.graph.del_node(self)
for edge in self.edges:
other = edge.other(self)
other.remove_edge(edge)
def __cmp__(self, other):
return (str(self) == str(other))
def __str__(self):
return unicode(self).encode('ASCII', 'backslashreplace')
def __unicode__(self):
if self.type:
return u"%s_%s" % (self.type, self.name)
return self.name
def __repr__(self):
return "<Node %s>" % str(self)
def read_file(stream, sink):
lines_read = 0
for line in stream:
lines_read += 1
if logger.isEnabledFor(logging.INFO):
if lines_read % 10000 == 0:
logger.info("Read %i lines..." % lines_read)
else:
logger.debug("Read %i lines..." % lines_read)
if len(line) < 1:
continue
# Decode early
try:
pass
# line = line.decode('utf-8', 'replace')
except Exception, e:
print lines_read, line.replace("\n", "")
traceback.print_exc()
raise e
if line[0] == '#':
comment_type, value = line[2:].split(" ", 1)
# only handles node attributes atm...
if comment_type == "_attributes":
node, attributes = value.split(" ", 1)
parts = node.split('_', 1)
if len(parts) == 1:
node_name = parts[0]
node_type = None
else:
node_name = parts[1]
node_type = parts[0]
attributes = attributes.split(" ")
attr_dict = {}
for attribute in attributes:
try:
key, value = attribute.split("=", 1)
attr_dict[key] = value.replace("\n", "").replace("+", " ")
except ValueError, ve:
logger.warning("Line %i: error parsing attribute %s" % (lines_read, attribute))
logger.warning(traceback.format_exc())
sink.node_attributes_read(node_name, node_type, attr_dict)
else:
sink.comment_read(comment_type, value.replace("\n", "").replace("+", " "))
else:
parts = line.split(" ", 2)
if len(parts) == 1:
if parts[0].strip() == "":
continue
parts = parts[0].replace("\n", "").split("_", 1)
if len(parts) == 1:
sink.special_node_read(parts[0], None)
else:
sink.special_node_read(parts[1], parts[0])
if len(parts) == 3:
attr_dict = {}
edge_attributes = parts[2].replace("\n", "").split(" ")
type = edge_attributes[0]
if len(edge_attributes) > 0:
for attr in edge_attributes[1:]:
try:
key, value = attr.split("=", 1)
attr_dict[key] = value.replace("+", " ")
except ValueError, ve:
logger.warning("Line %i: error parsing attribute %s" % (lines_read, attr))
logger.warning(traceback.format_exc())
n1_parts = parts[0].split('_', 1)
if len(n1_parts) == 1:
n1_name = n1_parts[0]
n1_type = None
else:
n1_name = n1_parts[1]
n1_type = n1_parts[0]
n2_parts = parts[1].split('_', 1)
if len(n2_parts) == 1:
n2_name = n2_parts[0]
n2_type = None
else:
n2_name = n2_parts[1]
n2_type = n2_parts[0]
sink.edge_read(n1_name, n1_type, n2_name, n2_type,
type, attr_dict)
def read_string(string, sink):
return read_file(StringIO.StringIO(string), sink)
def main(args):
if len(args) > 0:
s = bmgraph_file.GraphObjectSink()
for arg in args:
try:
bmgraph_file.read_file(arg, s)
except:
traceback.print_exc()
print s.get_object()
else:
print "Please run test.py to run tests."
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-2.0 | 3,065,605,060,408,539,600 | 30.130435 | 103 | 0.502578 | false | 3.909282 | false | false | false |
rimpybharot/CMPE273 | mid-term/encoder_pb2_grpc.py | 1 | 2101 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import encoder_pb2 as encoder__pb2
class EncoderStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.encode = channel.unary_unary(
'/Encoder/encode',
request_serializer=encoder__pb2.EncodeRequest.SerializeToString,
response_deserializer=encoder__pb2.EncodeResponse.FromString,
)
self.decode = channel.unary_unary(
'/Encoder/decode',
request_serializer=encoder__pb2.DecodeRequest.SerializeToString,
response_deserializer=encoder__pb2.DecodeResponse.FromString,
)
class EncoderServicer(object):
# missing associated documentation comment in .proto file
pass
def encode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def decode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_EncoderServicer_to_server(servicer, server):
rpc_method_handlers = {
'encode': grpc.unary_unary_rpc_method_handler(
servicer.encode,
request_deserializer=encoder__pb2.EncodeRequest.FromString,
response_serializer=encoder__pb2.EncodeResponse.SerializeToString,
),
'decode': grpc.unary_unary_rpc_method_handler(
servicer.decode,
request_deserializer=encoder__pb2.DecodeRequest.FromString,
response_serializer=encoder__pb2.DecodeResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Encoder', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| mit | -8,531,388,553,193,744,000 | 32.349206 | 76 | 0.71109 | false | 4.314168 | false | false | false |
GaelMagnan/PyGitHook | src/Hooks/Hook.py | 1 | 8074 | """
Git Hook. The Simple python way to code Hooks
Hook is the base class for every Hook.
AUTHOR:
Gael Magnan de bornier
"""
import sys
import os
import re
from tempfile import NamedTemporaryFile
from contextlib import contextmanager
from src.Utils import Bash
from src.Tasks import HookTask
class Hook(object):
def __init__(self, tasks=None, conf_location="", exclude=None):
if tasks is None:
tasks = []
if exclude is None:
exclude = []
self.exclude = exclude
self.tasks = tasks
self.conf_location = conf_location
def main_process(self):
"""Main function"""
kwargs = self.get_exec_params()
result = self.process(**kwargs)
if result:
sys.exit(0)
sys.exit(1)
def get_script_params(self, **kwargs):
return {'${0}'.format(i): arg for i, arg in enumerate(sys.argv)}
def get_line_params(self, **kwargs):
return {}
def get_files_params(self, **kwargs):
return self.get_files_grouped_by_change(**kwargs)
def get_exec_params(self):
"""Reads the inputs to get execution parameters"""
params = {'conf_location': self.conf_location}
params.update(self.get_script_params(**params))
params.update(self.get_line_params(**params))
params.update(self.get_files_params(**params))
return params
def process(self, **kwargs):
"""Main treatment, execute the tasks return False if any task fails,
true otherwise"""
try:
tasks = self.get_tasks_group_by_type()
return self.execute_tasks_group_by_type(*tasks, **kwargs)
except Exception as e:
print("An error occured during the runing of the script, "
"please report this following message to you administrator.")
print(e)
return False
def get_tasks_group_by_type(self):
""" This method return the tasks group by execution context,
the groups should be the ones used by execute_tasks_group_by_type"""
general_tasks = []
new_file_task = []
modified_file_task = []
deleted_file_task = []
for task in self.tasks:
if issubclass(task, HookTask.HookNewOrModifiedFileTask):
new_file_task.append(task)
modified_file_task.append(task)
elif issubclass(task, HookTask.HookNewFileTask):
new_file_task.append(task)
elif issubclass(task, HookTask.HookModifiedFileTask):
modified_file_task.append(task)
elif issubclass(task, HookTask.HookDeletedFileTask):
deleted_file_task.append(task)
elif issubclass(task, HookTask.HookFileTask):
new_file_task.append(task)
modified_file_task.append(task)
deleted_file_task.append(task)
else:
general_tasks.append(task)
return (general_tasks, new_file_task, modified_file_task,
deleted_file_task)
def execute_tasks_group_by_type(self, general_tasks, new_file_task,
modified_file_task, deleted_file_task,
**kwargs):
"""The tasks are executed with different context depending on their type
The HookFileTasks are executed on specific files depending on the
changes the file encountered
Other tasks are executing as general statements"""
for task in general_tasks:
if not task().execute(**kwargs):
return False
if new_file_task or modified_file_task or deleted_file_task:
for file_type in ['new_files', 'modified_files', 'deleted_files']:
files_to_check = kwargs[file_type]
for filename in files_to_check:
exclusion_matchs = [ x for x in self.exclude if re.match(x, filename)]
if exclusion_matchs:
print( "{0} ignored because it matches: {1}".format( filename, exclusion_matchs ) )
continue
if(file_type != "deleted_files" and
len(new_file_task) + len(modified_file_task) > 0):
try:
file_val = self.get_file(filename, **kwargs)
except:
print("Could not read %s" % filename)
return False
with self.get_temp_file() as tmp:
try:
self.write_file_value_in_file(file_val, tmp)
except:
print("Could not write %s " % filename)
return False
if file_type == "new_files":
for task in new_file_task:
if not task().execute(file_desc=tmp,
filename=filename,
file_value=file_val,
**kwargs):
return False
elif file_type == "modified_files":
for task in modified_file_task:
if not task().execute(file_desc=tmp,
filename=filename,
file_value=file_val,
**kwargs):
return False
else:
for task in deleted_file_task:
if not task().execute(filename=filename, **kwargs):
return False
return True
def get_file(self, filename, **kwargs):
pass
def get_file_diffs(self, **kwargs):
pass
@contextmanager
def get_temp_file(self, mode="r+"):
f = NamedTemporaryFile(mode=mode, delete=False)
try:
yield f
finally:
try:
os.unlink(f.name)
except OSError:
pass
def write_file_value_in_file(self, file_value, file_desc):
if file_value:
file_desc.write("\n".join(file_value))
file_desc.flush()
else:
raise Exception()
def get_files_grouped_by_change(self, **kwargs):
added = []
modified = []
deleted = []
file_diffs = self.get_file_diffs(**kwargs)
for line in file_diffs:
if len(line) < 3:
continue
mode, filename = self.get_mode_and_filname(line)
if mode == "A":
added.append(filename)
elif mode == "M":
modified.append(filename)
elif mode == "D":
deleted.append(filename)
return {'new_files': added,
'modified_files': modified,
'deleted_files': deleted}
def get_mode_and_filname(self, line):
try:
mode, filename = line.split()
return mode, filename
except:
line_splited = line.split()
if len(line_splited) > 2:
mode = line_splited[0]
filename = line.replace(mode, "", 1)
return mode, filename
else:
print("An error occured while trying to split:{0}"
" Please warn and adminitrator ".format(line))
def main(_klass, tasks=None, conf_location="", exclude=None):
if issubclass(_klass, Hook):
hook = _klass(tasks, conf_location, exclude)
hook.main_process()
else:
print("Not a valid class, should inherit from Hook")
sys.exit(1)
sys.exit(0)
| gpl-2.0 | -6,777,289,918,627,628,000 | 34.725664 | 107 | 0.502353 | false | 4.691458 | false | false | false |
dmpiergiacomo/scion | python/path_server/base.py | 1 | 20649 | # Copyright 2014 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`base` --- Base path server
================================
"""
# Stdlib
import logging
import random
import threading
from collections import defaultdict, deque
from abc import ABCMeta, abstractmethod
from threading import Lock
# External packages
from external.expiring_dict import ExpiringDict
# SCION
from lib.crypto.hash_tree import ConnectedHashTree
from lib.crypto.symcrypto import crypto_hash
from lib.defines import (
HASHTREE_EPOCH_TIME,
HASHTREE_TTL,
PATH_SERVICE,
)
from lib.log import add_formatter, Rfc3339Formatter
from lib.path_seg_meta import PathSegMeta
from lib.packet.path_mgmt.rev_info import RevocationInfo
from lib.packet.path_mgmt.seg_recs import PathRecordsReply, PathSegmentRecords
from lib.packet.scmp.types import SCMPClass, SCMPPathClass
from lib.packet.svc import SVCType
from lib.path_db import DBResult, PathSegmentDB
from lib.rev_cache import RevCache
from lib.thread import thread_safety_net
from lib.types import (
CertMgmtType,
PathMgmtType as PMT,
PathSegmentType as PST,
PayloadClass,
)
from lib.util import SCIONTime, sleep_interval
from lib.zk.cache import ZkSharedCache
from lib.zk.errors import ZkNoConnection
from lib.zk.id import ZkID
from lib.zk.zk import ZK_LOCK_SUCCESS, Zookeeper
from scion_elem.scion_elem import SCIONElement
class PathServer(SCIONElement, metaclass=ABCMeta):
"""
The SCION Path Server.
"""
SERVICE_TYPE = PATH_SERVICE
MAX_SEG_NO = 5 # TODO: replace by config variable.
# ZK path for incoming PATHs
ZK_PATH_CACHE_PATH = "path_cache"
# ZK path for incoming REVs
ZK_REV_CACHE_PATH = "rev_cache"
# Max number of segments per propagation packet
PROP_LIMIT = 5
# Max number of segments per ZK cache entry
ZK_SHARE_LIMIT = 10
# Time to store revocations in zookeeper
ZK_REV_OBJ_MAX_AGE = HASHTREE_EPOCH_TIME
def __init__(self, server_id, conf_dir):
"""
:param str server_id: server identifier.
:param str conf_dir: configuration directory.
"""
super().__init__(server_id, conf_dir)
self.down_segments = PathSegmentDB(max_res_no=self.MAX_SEG_NO)
self.core_segments = PathSegmentDB(max_res_no=self.MAX_SEG_NO)
self.pending_req = defaultdict(list) # Dict of pending requests.
self.pen_req_lock = threading.Lock()
self._request_logger = None
# Used when l/cPS doesn't have up/dw-path.
self.waiting_targets = defaultdict(list)
self.revocations = RevCache()
# A mapping from (hash tree root of AS, IFID) to segments
self.htroot_if2seg = ExpiringDict(1000, HASHTREE_TTL)
self.htroot_if2seglock = Lock()
self.CTRL_PLD_CLASS_MAP = {
PayloadClass.PATH: {
PMT.REQUEST: self.path_resolution,
PMT.REPLY: self.handle_path_segment_record,
PMT.REG: self.handle_path_segment_record,
PMT.REVOCATION: self._handle_revocation,
PMT.SYNC: self.handle_path_segment_record,
},
PayloadClass.CERT: {
CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
CertMgmtType.TRC_REPLY: self.process_trc_reply,
CertMgmtType.TRC_REQ: self.process_trc_request,
},
}
self.SCMP_PLD_CLASS_MAP = {
SCMPClass.PATH: {
SCMPPathClass.REVOKED_IF: self._handle_scmp_revocation,
},
}
self._segs_to_zk = deque()
self._revs_to_zk = deque()
self._zkid = ZkID.from_values(self.addr.isd_as, self.id,
[(self.addr.host, self._port)])
self.zk = Zookeeper(self.topology.isd_as, PATH_SERVICE,
self._zkid.copy().pack(), self.topology.zookeepers)
self.zk.retry("Joining party", self.zk.party_setup)
self.path_cache = ZkSharedCache(self.zk, self.ZK_PATH_CACHE_PATH,
self._handle_paths_from_zk)
self.rev_cache = ZkSharedCache(self.zk, self.ZK_REV_CACHE_PATH,
self._rev_entries_handler)
self._init_request_logger()
def worker(self):
"""
Worker thread that takes care of reading shared paths from ZK, and
handling master election for core servers.
"""
worker_cycle = 1.0
start = SCIONTime.get_time()
while self.run_flag.is_set():
sleep_interval(start, worker_cycle, "cPS.worker cycle",
self._quiet_startup())
start = SCIONTime.get_time()
try:
self.zk.wait_connected()
self.path_cache.process()
self.rev_cache.process()
# Try to become a master.
ret = self.zk.get_lock(lock_timeout=0, conn_timeout=0)
if ret: # Either got the lock, or already had it.
if ret == ZK_LOCK_SUCCESS:
logging.info("Became master")
self.path_cache.expire(self.config.propagation_time * 10)
self.rev_cache.expire(self.ZK_REV_OBJ_MAX_AGE)
except ZkNoConnection:
logging.warning('worker(): ZkNoConnection')
pass
self._update_master()
self._propagate_and_sync()
self._handle_pending_requests()
def _update_master(self):
pass
def _rev_entries_handler(self, raw_entries):
for raw in raw_entries:
rev_info = RevocationInfo.from_raw(raw)
self._remove_revoked_segments(rev_info)
def _add_rev_mappings(self, pcb):
"""
Add if revocation token to segment ID mappings.
"""
segment_id = pcb.get_hops_hash()
with self.htroot_if2seglock:
for asm in pcb.iter_asms():
hof = asm.pcbm(0).hof()
egress_h = (asm.p.hashTreeRoot, hof.egress_if)
self.htroot_if2seg.setdefault(egress_h, set()).add(segment_id)
ingress_h = (asm.p.hashTreeRoot, hof.ingress_if)
self.htroot_if2seg.setdefault(ingress_h, set()).add(segment_id)
@abstractmethod
def _handle_up_segment_record(self, pcb, **kwargs):
raise NotImplementedError
@abstractmethod
def _handle_down_segment_record(self, pcb, **kwargs):
raise NotImplementedError
@abstractmethod
def _handle_core_segment_record(self, pcb, **kwargs):
raise NotImplementedError
def _add_segment(self, pcb, seg_db, name, reverse=False):
res = seg_db.update(pcb, reverse=reverse)
if res == DBResult.ENTRY_ADDED:
self._add_rev_mappings(pcb)
logging.info("%s-Segment registered: %s", name, pcb.short_id())
return True
elif res == DBResult.ENTRY_UPDATED:
self._add_rev_mappings(pcb)
logging.debug("%s-Segment updated: %s", name, pcb.short_id())
return False
def _handle_scmp_revocation(self, pld, meta):
rev_info = RevocationInfo.from_raw(pld.info.rev_info)
self._handle_revocation(rev_info, meta)
def _handle_revocation(self, rev_info, meta):
"""
Handles a revocation of a segment, interface or hop.
:param rev_info: The RevocationInfo object.
"""
assert isinstance(rev_info, RevocationInfo)
if not self._validate_revocation(rev_info):
return
if meta.ia[0] != self.addr.isd_as[0]:
logging.info("Dropping revocation received from a different ISD. Src: %s RevInfo: %s" %
(meta, rev_info.short_desc()))
return
if rev_info in self.revocations:
return False
self.revocations.add(rev_info)
logging.debug("Received revocation from %s: %s", meta, rev_info.short_desc())
self._revs_to_zk.append(rev_info.copy().pack()) # have to pack copy
# Remove segments that contain the revoked interface.
self._remove_revoked_segments(rev_info)
# Forward revocation to other path servers.
self._forward_revocation(rev_info, meta)
def _remove_revoked_segments(self, rev_info):
"""
Try the previous and next hashes as possible astokens,
and delete any segment that matches
:param rev_info: The revocation info
:type rev_info: RevocationInfo
"""
if not ConnectedHashTree.verify_epoch(rev_info.p.epoch):
return
(hash01, hash12) = ConnectedHashTree.get_possible_hashes(rev_info)
if_id = rev_info.p.ifID
with self.htroot_if2seglock:
down_segs_removed = 0
core_segs_removed = 0
up_segs_removed = 0
for h in (hash01, hash12):
for sid in self.htroot_if2seg.pop((h, if_id), []):
if self.down_segments.delete(sid) == DBResult.ENTRY_DELETED:
down_segs_removed += 1
if self.core_segments.delete(sid) == DBResult.ENTRY_DELETED:
core_segs_removed += 1
if not self.topology.is_core_as:
if (self.up_segments.delete(sid) ==
DBResult.ENTRY_DELETED):
up_segs_removed += 1
logging.debug("Removed segments revoked by [%s]: UP: %d DOWN: %d CORE: %d" %
(rev_info.short_desc(), up_segs_removed, down_segs_removed,
core_segs_removed))
@abstractmethod
def _forward_revocation(self, rev_info, meta):
"""
Forwards a revocation to other path servers that need to be notified.
:param rev_info: The RevInfo object.
:param meta: The MessageMeta object.
"""
raise NotImplementedError
def _send_path_segments(self, req, meta, logger, up=None, core=None, down=None):
"""
Sends path-segments to requester (depending on Path Server's location).
"""
up = up or set()
core = core or set()
down = down or set()
all_segs = up | core | down
if not all_segs:
logger.warning("No segments to send for request: %s from: %s" %
(req.short_desc(), meta))
return
revs_to_add = self._peer_revs_for_segs(all_segs)
pld = PathRecordsReply.from_values(
{PST.UP: up, PST.CORE: core, PST.DOWN: down},
revs_to_add
)
self.send_meta(pld, meta)
logger.info("Sending PATH_REPLY with %d segment(s).", len(all_segs))
def _peer_revs_for_segs(self, segs):
"""Returns a list of peer revocations for segments in 'segs'."""
def _handle_one_seg(seg):
for asm in seg.iter_asms():
for pcbm in asm.iter_pcbms(1):
hof = pcbm.hof()
for if_id in [hof.ingress_if, hof.egress_if]:
rev_info = self.revocations.get((asm.isd_as(), if_id))
if rev_info:
revs_to_add.add(rev_info.copy())
return
revs_to_add = set()
for seg in segs:
_handle_one_seg(seg)
return list(revs_to_add)
def _handle_pending_requests(self):
rem_keys = []
# Serve pending requests.
with self.pen_req_lock:
for key in self.pending_req:
to_remove = []
for req, meta, logger in self.pending_req[key]:
if self.path_resolution(req, meta, new_request=False, logger=logger):
meta.close()
to_remove.append((req, meta, logger))
# Clean state.
for req_meta in to_remove:
self.pending_req[key].remove(req_meta)
if not self.pending_req[key]:
rem_keys.append(key)
for key in rem_keys:
del self.pending_req[key]
def _handle_paths_from_zk(self, raw_entries):
"""
Handles cached paths through ZK, passed as a list.
"""
for raw in raw_entries:
recs = PathSegmentRecords.from_raw(raw)
for type_, pcb in recs.iter_pcbs():
seg_meta = PathSegMeta(pcb, self.continue_seg_processing,
type_=type_, params={'from_zk': True})
self._process_path_seg(seg_meta)
if raw_entries:
logging.debug("Processed %s segments from ZK", len(raw_entries))
def handle_path_segment_record(self, seg_recs, meta):
"""
Handles paths received from the network.
"""
params = self._dispatch_params(seg_recs, meta)
# Add revocations for peer interfaces included in the path segments.
for rev_info in seg_recs.iter_rev_infos():
self.revocations.add(rev_info)
# Verify pcbs and process them
for type_, pcb in seg_recs.iter_pcbs():
seg_meta = PathSegMeta(pcb, self.continue_seg_processing, meta,
type_, params)
self._process_path_seg(seg_meta)
def continue_seg_processing(self, seg_meta):
"""
For every path segment(that can be verified) received from the network
or ZK this function gets called to continue the processing for the
segment.
The segment is added to pathdb and pending requests are checked.
"""
pcb = seg_meta.seg
logging.debug("Successfully verified PCB %s" % pcb.short_id())
type_ = seg_meta.type
params = seg_meta.params
self.handle_ext(pcb)
self._dispatch_segment_record(type_, pcb, **params)
self._handle_pending_requests()
def handle_ext(self, pcb):
"""
Handle beacon extensions.
"""
# Handle PCB extensions:
if pcb.is_sibra():
# TODO(Sezer): Implement sibra extension handling
logging.debug("%s", pcb.sibra_ext)
for asm in pcb.iter_asms():
pol = asm.routing_pol_ext()
if pol:
self.handle_routing_pol_ext(pol)
def handle_routing_pol_ext(self, ext):
# TODO(Sezer): Implement extension handling
logging.debug("Routing policy extension: %s" % ext)
def _dispatch_segment_record(self, type_, seg, **kwargs):
# Check that segment does not contain a revoked interface.
if not self._validate_segment(seg):
return
handle_map = {
PST.UP: self._handle_up_segment_record,
PST.CORE: self._handle_core_segment_record,
PST.DOWN: self._handle_down_segment_record,
}
handle_map[type_](seg, **kwargs)
def _validate_segment(self, seg):
"""
Check segment for revoked upstream/downstream interfaces.
:param seg: The PathSegment object.
:return: False, if the path segment contains a revoked upstream/
downstream interface (not peer). True otherwise.
"""
for asm in seg.iter_asms():
pcbm = asm.pcbm(0)
for if_id in [pcbm.p.inIF, pcbm.p.outIF]:
rev_info = self.revocations.get((asm.isd_as(), if_id))
if rev_info:
logging.debug("Found revoked interface (%d, %s) in segment %s." %
(rev_info.p.ifID, rev_info.isd_as(), seg.short_desc()))
return False
return True
def _dispatch_params(self, pld, meta):
return {}
def _propagate_and_sync(self):
self._share_via_zk()
self._share_revs_via_zk()
def _gen_prop_recs(self, queue, limit=PROP_LIMIT):
count = 0
pcbs = defaultdict(list)
while queue:
count += 1
type_, pcb = queue.popleft()
pcbs[type_].append(pcb.copy())
if count >= limit:
yield(pcbs)
count = 0
pcbs = defaultdict(list)
if pcbs:
yield(pcbs)
@abstractmethod
def path_resolution(self, path_request, meta, new_request=True, logger=None):
"""
Handles all types of path request.
"""
raise NotImplementedError
def _handle_waiting_targets(self, pcb):
"""
Handle any queries that are waiting for a path to any core AS in an ISD.
"""
dst_ia = pcb.first_ia()
if not self.is_core_as(dst_ia):
logging.warning("Invalid waiting target, not a core AS: %s", dst_ia)
return
self._send_waiting_queries(dst_ia[0], pcb)
def _send_waiting_queries(self, dst_isd, pcb):
targets = self.waiting_targets[dst_isd]
if not targets:
return
path = pcb.get_path(reverse_direction=True)
src_ia = pcb.first_ia()
while targets:
(seg_req, logger) = targets.pop(0)
meta = self._build_meta(ia=src_ia, path=path, host=SVCType.PS_A, reuse=True)
self.send_meta(seg_req, meta)
logger.info("Waiting request (%s) sent to %s via %s",
seg_req.short_desc(), meta, pcb.short_desc())
def _share_via_zk(self):
if not self._segs_to_zk:
return
logging.info("Sharing %d segment(s) via ZK", len(self._segs_to_zk))
for pcb_dict in self._gen_prop_recs(self._segs_to_zk,
limit=self.ZK_SHARE_LIMIT):
seg_recs = PathSegmentRecords.from_values(pcb_dict)
self._zk_write(seg_recs.pack())
def _share_revs_via_zk(self):
if not self._revs_to_zk:
return
logging.info("Sharing %d revocation(s) via ZK", len(self._revs_to_zk))
while self._revs_to_zk:
self._zk_write_rev(self._revs_to_zk.popleft())
def _zk_write(self, data):
hash_ = crypto_hash(data).hex()
try:
self.path_cache.store("%s-%s" % (hash_, SCIONTime.get_time()), data)
except ZkNoConnection:
logging.warning("Unable to store segment(s) in shared path: "
"no connection to ZK")
def _zk_write_rev(self, data):
hash_ = crypto_hash(data).hex()
try:
self.rev_cache.store("%s-%s" % (hash_, SCIONTime.get_time()), data)
except ZkNoConnection:
logging.warning("Unable to store revocation(s) in shared path: "
"no connection to ZK")
def _init_request_logger(self):
"""
Initializes the request logger.
"""
self._request_logger = logging.getLogger("RequestLogger")
# Create new formatter to include the random request id and the request in the log.
formatter = formatter = Rfc3339Formatter(
"%(asctime)s [%(levelname)s] (%(threadName)s) %(message)s "
"{id=%(id)s, req=%(req)s, from=%(from)s}")
add_formatter('RequestLogger', formatter)
def get_request_logger(self, req, meta):
"""
Returns a logger adapter for 'req'.
"""
# Random ID to relate log entries for a request.
req_id = "%08x" % random.randint(0, 2**32 - 1)
# Create a logger for the request to log with context.
return logging.LoggerAdapter(
self._request_logger, {"id": req_id, "req": req.short_desc(), "from": str(meta)})
def run(self):
"""
Run an instance of the Path Server.
"""
threading.Thread(
target=thread_safety_net, args=(self.worker,),
name="PS.worker", daemon=True).start()
threading.Thread(
target=thread_safety_net, args=(self._check_trc_cert_reqs,),
name="Elem.check_trc_cert_reqs", daemon=True).start()
super().run()
| apache-2.0 | -3,079,775,288,533,546,000 | 38.256654 | 99 | 0.573926 | false | 3.778408 | false | false | false |
olamotte/sbblocator | utils.py | 1 | 3069 | import time
from functools import wraps
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff."""
"""Copyright (c) 2013, SaltyCrane
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
* Neither the name of the SaltyCrane nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print (msg)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
| unlicense | 6,351,903,952,804,055,000 | 39.92 | 77 | 0.688498 | false | 4.628959 | false | false | false |
verifiedpixel/superdesk | server/apps/planning.py | 10 | 1989 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from superdesk.notification import push_notification
from superdesk.resource import Resource
from apps.archive.common import on_create_item
from superdesk.services import BaseService
import superdesk
def init_app(app):
endpoint_name = 'planning'
service = PlanningService(endpoint_name, backend=superdesk.get_backend())
PlanningResource(endpoint_name, app=app, service=service)
class PlanningResource(Resource):
schema = {
'guid': {
'type': 'string',
'unique': True
},
'language': {
'type': 'string'
},
'headline': {
'type': 'string'
},
'slugline': {
'type': 'string'
},
'description_text': {
'type': 'string',
'nullable': True
},
'firstcreated': {
'type': 'datetime'
},
'urgency': {
'type': 'integer'
},
'desk': Resource.rel('desks', True)
}
item_url = 'regex("[\w,.:-]+")'
datasource = {'search_backend': 'elastic'}
resource_methods = ['GET', 'POST']
privileges = {'POST': 'planning', 'PATCH': 'planning'}
class PlanningService(BaseService):
def on_create(self, docs):
on_create_item(docs)
def on_created(self, docs):
push_notification('planning', created=1)
def on_updated(self, updates, original):
push_notification('planning', updated=1)
def on_deleted(self, doc):
push_notification('planning', deleted=1)
superdesk.privilege(name='planning',
label='Planning Management',
description='User can plan and cover.')
| agpl-3.0 | 2,288,588,184,138,790,400 | 25.878378 | 77 | 0.589744 | false | 3.954274 | false | false | false |
yuraic/koza4ok | examples/draw.py | 2 | 4678 | # Author: Yuriy Ilchenko ([email protected])
# Compare two ROC curves from scikit-learn and from TMVA (using skTMVA converter)
import os
import sys
if os.environ['TERM'] == 'xterm':
os.environ['TERM'] = 'vt100'
# Now it's OK to import readline :)
# Import ROOT libraries
import ROOT
import array
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import roc_curve
from sklearn import tree
import cPickle
import numpy as np
from numpy.random import RandomState
RNG = RandomState(45)
# Construct an example dataset for binary classification
n_vars = 2
n_events = 300
signal = RNG.multivariate_normal(
np.ones(n_vars), np.diag(np.ones(n_vars)), n_events)
background = RNG.multivariate_normal(
np.ones(n_vars) * -1, np.diag(np.ones(n_vars)), n_events)
X = np.concatenate([signal, background])
y = np.ones(X.shape[0])
w = RNG.randint(1, 10, n_events * 2)
y[signal.shape[0]:] *= -1
permute = RNG.permutation(y.shape[0])
X = X[permute]
y = y[permute]
# Some print-out
print "Event numbers total:", 2 * n_events
# Plot the testing points
c1 = ROOT.TCanvas("c1","Testing Dataset",200,10,700,500)
c1.cd()
plot_colors = (ROOT.kRed, ROOT.kBlue)
mg = ROOT.TMultiGraph()
for i, n, c in zip([-1, 1], ('Class A', 'Class B'), plot_colors):
idx = np.where(y == i)
n = len(idx[0])
g = ROOT.TGraph(n,X[idx, 0][0],X[idx, 1][0])
g.SetMarkerColor(c)
g.SetMarkerStyle(8)
g.SetMarkerSize(0.5)
mg.Add(g)
mg.Draw("ap p")
mg.SetTitle("Testing dataset")
mg.GetXaxis().SetTitle("var1")
mg.GetYaxis().SetTitle("var2")
c1.Update()
c1.Modified()
# Use all dataset for testing
X_test, y_test, w_test = X, y, w
# sklearn, get BDT from pickle file
fid = open('bdt_sklearn_to_tmva_example.pkl', 'rb')
bdt = cPickle.load(fid)
# create TMVA reader
reader = ROOT.TMVA.Reader()
var1 = array.array('f',[0.])
reader.AddVariable("var1", var1)
var2 = array.array('f',[0.])
reader.AddVariable("var2", var2)
# TMVA, get BDT from the xml file
reader.BookMVA("BDT", "bdt_sklearn_to_tmva_example.xml")
# List for numpy arrays
sk_y_predicted =[]
tmva_y_predicted =[]
# Number of events
n = X.shape[0]
# Iterate over events
# Note: this is not the fastest way for sklearn
# but most representative, I believe
for i in xrange(n):
if (i % 100 == 0) and (i != 0):
print "Event %i" % i
var1[0] = X.item((i,0))
var2[0] = X.item((i,1))
# sklearn score
score = bdt.decision_function([var1[0], var2[0]]).item(0)
# calculate the value of the classifier with TMVA/TskMVA
bdtOutput = reader.EvaluateMVA("BDT")
# save skleanr and TMVA BDT output scores
sk_y_predicted.append(score)
tmva_y_predicted.append(bdtOutput)
# Convert arrays to numpy arrays
sk_y_predicted = np.array(sk_y_predicted)
tmva_y_predicted = np.array(tmva_y_predicted)
# Calculate ROC curves
fpr_sk, tpr_sk, _ = roc_curve(y_test, sk_y_predicted)
fpr_tmva, tpr_tmva, _ = roc_curve(y_test, tmva_y_predicted)
# Derive signal efficiencies and background rejections
# for sklearn and TMVA
sig_eff_sk = array.array('f', [rate for rate in tpr_sk])
bkg_rej_sk = array.array('f',[ (1-rate) for rate in fpr_sk])
sig_eff_tmva = array.array('f', [rate for rate in tpr_tmva])
bkg_rej_tmva = array.array('f',[ (1-rate) for rate in fpr_tmva])
# Stack for keeping plots
#plots = []
c2 = ROOT.TCanvas("c2","A Simple Graph Example",200,10,700,500)
c2.cd()
# Draw ROC-curve for sklearn
g1 = ROOT.TGraph(len(sig_eff_sk), sig_eff_sk, bkg_rej_sk)
g1.GetXaxis().SetRangeUser(0.0,1.0)
g1.GetYaxis().SetRangeUser(0.0,1.0)
g1.SetName("g1")
g1.SetTitle("ROC curve")
g1.SetLineStyle(3)
g1.SetLineColor(ROOT.kBlue)
g1.Draw("AL") # draw TGraph with no marker dots
# Draw ROC-curve for skTMVA
g2 = ROOT.TGraph(len(fpr_tmva), sig_eff_tmva, bkg_rej_tmva)
g2.GetXaxis().SetRangeUser(0.0,1.0)
g2.GetYaxis().SetRangeUser(0.0,1.0)
g2.SetName("g2")
g2.SetTitle("ROC curve")
g2.SetLineStyle(7)
g2.SetLineColor(ROOT.kRed)
g2.Draw("SAME") # draw TGraph with no marker dots
leg = ROOT.TLegend(0.4,0.35,0.7,0.2)
#leg.SetHeader("ROC curve")
leg.AddEntry("g1","sklearn","l")
leg.AddEntry("g2","skTMVA","l")
leg.Draw()
c2.Update()
c2.Modified()
## Draw ROC curves
#plt.figure()
#
#plt.plot(fpr_sk, tpr_sk, 'b-', label='scikit-learn bdt.predict()')
#plt.plot(fpr_tmva, tpr_tmva, 'r--', label='TMVA reader.EvaluateMVA("BDT")')
#
#plt.plot([0, 1], [0, 1], 'k--')
#plt.xlim([0.0, 1.0])
#plt.ylim([0.0, 1.05])
#plt.xlabel('False Positive Rate')
#plt.ylabel('True Positive Rate')
#plt.title('Simple ROC-curve comparison')
#
#plt.legend(loc="lower right")
#
#plt.savefig("roc_bdt_curves.png", dpi=96)
| mit | -3,488,663,077,255,613,400 | 23.621053 | 81 | 0.678068 | false | 2.523193 | true | false | false |
yidawang/brainiak | examples/utils/fmrisim_example.py | 2 | 7403 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""fMRI Simulator example script
Example script to generate a run of a participant's data. This generates
data representing a pair of conditions that are then combined
Authors: Cameron Ellis (Princeton) 2016
"""
import logging
import numpy as np
from brainiak.utils import fmrisim as sim
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401
import nibabel
logger = logging.getLogger(__name__)
# Inputs for generate_signal
dimensions = np.array([64, 64, 36]) # What is the size of the brain
feature_size = [9, 4, 9, 9]
feature_type = ['loop', 'cube', 'cavity', 'sphere']
coordinates_A = np.array(
[[32, 32, 18], [26, 32, 18], [32, 26, 18], [32, 32, 12]])
coordinates_B = np.array(
[[32, 32, 18], [38, 32, 18], [32, 38, 18], [32, 32, 24]])
signal_magnitude = [1, 0.5, 0.25, -1] # In percent signal change
# Inputs for generate_stimfunction
onsets_A = [10, 30, 50, 70, 90]
onsets_B = [0, 20, 40, 60, 80]
event_durations = [6]
tr_duration = 2
temporal_res = 1000.0 # How many elements per second are there
duration = 100
# Specify a name to save this generated volume.
savename = 'examples/utils/example.nii'
# Generate a volume representing the location and quality of the signal
volume_signal_A = sim.generate_signal(dimensions=dimensions,
feature_coordinates=coordinates_A,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
volume_signal_B = sim.generate_signal(dimensions=dimensions,
feature_coordinates=coordinates_B,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Visualize the signal that was generated for condition A
fig = plt.figure()
sim.plot_brain(fig,
volume_signal_A)
plt.show()
# Create the time course for the signal to be generated
stimfunction_A = sim.generate_stimfunction(onsets=onsets_A,
event_durations=event_durations,
total_time=duration,
temporal_resolution=temporal_res,
)
stimfunction_B = sim.generate_stimfunction(onsets=onsets_B,
event_durations=event_durations,
total_time=duration,
temporal_resolution=temporal_res,
)
# Convolve the HRF with the stimulus sequence
signal_function_A = sim.convolve_hrf(stimfunction=stimfunction_A,
tr_duration=tr_duration,
temporal_resolution=temporal_res,
)
signal_function_B = sim.convolve_hrf(stimfunction=stimfunction_B,
tr_duration=tr_duration,
temporal_resolution=temporal_res,
)
# Multiply the HRF timecourse with the signal
signal_A = sim.apply_signal(signal_function=signal_function_A,
volume_signal=volume_signal_A,
)
signal_B = sim.apply_signal(signal_function=signal_function_B,
volume_signal=volume_signal_B,
)
# Combine the signals from the two conditions
signal = signal_A + signal_B
# Combine the stim functions
stimfunction = list(np.add(stimfunction_A, stimfunction_B))
stimfunction_tr = stimfunction[::int(tr_duration * temporal_res)]
# Generate the mask of the signal
mask, template = sim.mask_brain(signal, mask_threshold=0.2)
# Mask the signal to the shape of a brain (attenuates signal according to grey
# matter likelihood)
signal *= mask.reshape(dimensions[0], dimensions[1], dimensions[2], 1)
# Generate original noise dict for comparison later
orig_noise_dict = sim._noise_dict_update({})
# Create the noise volumes (using the default parameters
noise = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
mask=mask,
template=template,
noise_dict=orig_noise_dict,
)
# Standardize the signal activity to make it percent signal change
mean_act = (mask * orig_noise_dict['max_activity']).sum() / (mask > 0).sum()
signal = signal * mean_act / 100
# Combine the signal and the noise
brain = signal + noise
# Display the brain
fig = plt.figure()
for tr_counter in list(range(0, brain.shape[3])):
# Get the axis to be plotted
ax = sim.plot_brain(fig,
brain[:, :, :, tr_counter],
mask=mask,
percentile=99.9)
# Wait for an input
logging.info(tr_counter)
plt.pause(0.5)
# Save the volume
affine_matrix = np.diag([-1, 1, 1, 1]) # LR gets flipped
brain_nifti = nibabel.Nifti1Image(brain, affine_matrix) # Create a nifti brain
nibabel.save(brain_nifti, savename)
# Load in the test dataset and generate a random volume based on it
# Pull out the data and associated data
volume = nibabel.load(savename).get_data()
dimensions = volume.shape[0:3]
total_time = volume.shape[3] * tr_duration
stimfunction = sim.generate_stimfunction(onsets=[],
event_durations=[0],
total_time=total_time,
)
stimfunction_tr = stimfunction[::int(tr_duration * temporal_res)]
# Calculate the mask
mask, template = sim.mask_brain(volume=volume,
mask_self=True,
)
# Calculate the noise parameters
noise_dict = sim.calc_noise(volume=volume,
mask=mask,
)
# Create the noise volumes (using the default parameters
noise = sim.generate_noise(dimensions=dimensions,
tr_duration=tr_duration,
stimfunction_tr=stimfunction_tr,
template=template,
mask=mask,
noise_dict=noise_dict,
)
# Create a nifti brain
brain_noise = nibabel.Nifti1Image(noise, affine_matrix)
nibabel.save(brain_noise, 'examples/utils/example2.nii') # Save
| apache-2.0 | 7,489,342,609,411,777,000 | 37.759162 | 79 | 0.574767 | false | 4.138066 | false | false | false |
Klaudit/livestreamer | src/livestreamer/plugins/rtve.py | 32 | 1661 | import re
from livestreamer.plugin import Plugin, PluginError
from livestreamer.plugin.api import http
from livestreamer.stream import HLSStream
# The last four channel_paths repsond with 301 and provide
# a redirect location that corresponds to a channel_path above.
_url_re = re.compile(r"""
https?://www\.rtve\.es/
(?P<channel_path>
directo/la-1|
directo/la-2|
directo/teledeporte|
directo/canal-24h|
noticias/directo-la-1|
television/la-2-directo|
deportes/directo/teledeporte|
noticias/directo/canal-24h
)
/?
""", re.VERBOSE)
_id_map = {
"directo/la-1": "LA1",
"directo/la-2": "LA2",
"directo/teledeporte": "TDP",
"directo/canal-24h": "24H",
"noticias/directo-la-1": "LA1",
"television/la-2-directo": "LA2",
"deportes/directo/teledeporte": "TDP",
"noticias/directo/canal-24h": "24H",
}
class Rtve(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def __init__(self, url):
Plugin.__init__(self, url)
match = _url_re.match(url).groupdict()
self.channel_path = match["channel_path"]
def _get_streams(self):
stream_id = _id_map[self.channel_path]
hls_url = "http://iphonelive.rtve.es/{0}_LV3_IPH/{0}_LV3_IPH.m3u8".format(stream_id)
# Check if the stream is available
res = http.head(hls_url, raise_for_status=False)
if res.status_code == 404:
raise PluginError("The program is not available due to rights restrictions")
return HLSStream.parse_variant_playlist(self.session, hls_url)
__plugin__ = Rtve
| bsd-2-clause | -4,746,790,248,737,518,000 | 27.637931 | 92 | 0.629741 | false | 2.971377 | false | false | false |
terbolous/SickRage | lib/guessit/rules/properties/episodes.py | 18 | 21850 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
episode, season, episode_count, season_count and episode_details properties
"""
import copy
from collections import defaultdict
from rebulk import Rebulk, RemoveMatch, Rule, AppendMatch, RenameMatch
from rebulk.match import Match
from rebulk.remodule import re
from rebulk.utils import is_iterable
from .title import TitleFromPosition
from ..common import dash, alt_dash, seps
from ..common.formatters import strip
from ..common.numeral import numeral, parse_numeral
from ..common.validators import compose, seps_surround, seps_before, int_coercable
from ...reutils import build_or_pattern
def episodes():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
# pylint: disable=too-many-branches,too-many-statements,too-many-locals
rebulk = Rebulk()
rebulk.regex_defaults(flags=re.IGNORECASE).string_defaults(ignore_case=True)
rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator'])
def season_episode_conflict_solver(match, other):
"""
Conflict solver for episode/season patterns
:param match:
:param other:
:return:
"""
if match.name in ['season', 'episode'] and other.name in ['screen_size', 'video_codec',
'audio_codec', 'audio_channels',
'container', 'date']:
return match
elif match.name in ['season', 'episode'] and other.name in ['season', 'episode'] \
and match.initiator != other.initiator:
if 'weak-episode' in match.tags:
return match
if 'weak-episode' in other.tags:
return other
if 'x' in match.initiator.raw.lower():
return match
if 'x' in other.initiator.raw.lower():
return other
return '__default__'
season_episode_seps = []
season_episode_seps.extend(seps)
season_episode_seps.extend(['x', 'X', 'e', 'E'])
season_words = ['season', 'saison', 'serie', 'seasons', 'saisons', 'series']
episode_words = ['episode', 'episodes', 'ep']
of_words = ['of', 'sur']
all_words = ['All']
season_markers = ["S"]
season_ep_markers = ["x"]
episode_markers = ["xE", "Ex", "EP", "E", "x"]
range_separators = ['-', '~', 'to', 'a']
weak_discrete_separators = list(sep for sep in seps if sep not in range_separators)
strong_discrete_separators = ['+', '&', 'and', 'et']
discrete_separators = strong_discrete_separators + weak_discrete_separators
def ordering_validator(match):
"""
Validator for season list. They should be in natural order to be validated.
episode/season separated by a weak discrete separator should be consecutive, unless a strong discrete separator
or a range separator is present in the chain (1.3&5 is valid, but 1.3-5 is not valid and 1.3.5 is not valid)
"""
values = match.children.to_dict(implicit=True)
if 'season' in values and is_iterable(values['season']):
# Season numbers must be in natural order to be validated.
if not list(sorted(values['season'])) == values['season']:
return False
if 'episode' in values and is_iterable(values['episode']):
# Season numbers must be in natural order to be validated.
if not list(sorted(values['episode'])) == values['episode']:
return False
def is_consecutive(property_name):
"""
Check if the property season or episode has valid consecutive values.
:param property_name:
:type property_name:
:return:
:rtype:
"""
previous_match = None
valid = True
for current_match in match.children.named(property_name):
if previous_match:
match.children.previous(current_match,
lambda m: m.name == property_name + 'Separator')
separator = match.children.previous(current_match,
lambda m: m.name == property_name + 'Separator', 0)
if separator.raw not in range_separators and separator.raw in weak_discrete_separators:
if not current_match.value - previous_match.value == 1:
valid = False
if separator.raw in strong_discrete_separators:
valid = True
break
previous_match = current_match
return valid
return is_consecutive('episode') and is_consecutive('season')
# S01E02, 01x02, S01S02S03
rebulk.chain(formatter={'season': int, 'episode': int},
tags=['SxxExx'],
abbreviations=[alt_dash],
children=True,
private_parent=True,
validate_all=True,
validator={'__parent__': ordering_validator},
conflict_solver=season_episode_conflict_solver) \
.regex(build_or_pattern(season_markers) + r'(?P<season>\d+)@?' +
build_or_pattern(episode_markers) + r'@?(?P<episode>\d+)',
validate_all=True,
validator={'__parent__': seps_before}).repeater('+') \
.regex(build_or_pattern(episode_markers + discrete_separators + range_separators,
name='episodeSeparator',
escape=True) +
r'(?P<episode>\d+)').repeater('*') \
.chain() \
.regex(r'(?P<season>\d+)@?' +
build_or_pattern(season_ep_markers) +
r'@?(?P<episode>\d+)',
validate_all=True,
validator={'__parent__': seps_before}) \
.chain() \
.regex(r'(?P<season>\d+)@?' +
build_or_pattern(season_ep_markers) +
r'@?(?P<episode>\d+)',
validate_all=True,
validator={'__parent__': seps_before}) \
.regex(build_or_pattern(season_ep_markers + discrete_separators + range_separators,
name='episodeSeparator',
escape=True) +
r'(?P<episode>\d+)').repeater('*') \
.chain() \
.regex(build_or_pattern(season_markers) + r'(?P<season>\d+)',
validate_all=True,
validator={'__parent__': seps_before}) \
.regex(build_or_pattern(season_markers + discrete_separators + range_separators,
name='seasonSeparator',
escape=True) +
r'(?P<season>\d+)').repeater('*')
# episode_details property
for episode_detail in ('Special', 'Bonus', 'Omake', 'Ova', 'Oav', 'Pilot', 'Unaired'):
rebulk.string(episode_detail, value=episode_detail, name='episode_details')
rebulk.regex(r'Extras?', name='episode_details', value='Extras')
rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator'],
validate_all=True, validator={'__parent__': seps_surround}, children=True, private_parent=True)
def validate_roman(match):
"""
Validate a roman match if surrounded by separators
:param match:
:type match:
:return:
:rtype:
"""
if int_coercable(match.raw):
return True
return seps_surround(match)
rebulk.chain(abbreviations=[alt_dash],
formatter={'season': parse_numeral, 'count': parse_numeral},
validator={'__parent__': compose(seps_surround, ordering_validator),
'season': validate_roman,
'count': validate_roman}) \
.defaults(validator=None) \
.regex(build_or_pattern(season_words) + '@?(?P<season>' + numeral + ')') \
.regex(r'' + build_or_pattern(of_words) + '@?(?P<count>' + numeral + ')').repeater('?') \
.regex(r'@?(?P<seasonSeparator>' +
build_or_pattern(range_separators + discrete_separators + ['@'], escape=True) +
r')@?(?P<season>\d+)').repeater('*')
rebulk.regex(build_or_pattern(episode_words) + r'-?(?P<episode>\d+)' +
r'(?:v(?P<version>\d+))?' +
r'(?:-?' + build_or_pattern(of_words) + r'-?(?P<count>\d+))?', # Episode 4
abbreviations=[dash], formatter=int,
disabled=lambda context: context.get('type') == 'episode')
rebulk.regex(build_or_pattern(episode_words) + r'-?(?P<episode>' + numeral + ')' +
r'(?:v(?P<version>\d+))?' +
r'(?:-?' + build_or_pattern(of_words) + r'-?(?P<count>\d+))?', # Episode 4
abbreviations=[dash],
validator={'episode': validate_roman},
formatter={'episode': parse_numeral, 'version': int, 'count': int},
disabled=lambda context: context.get('type') != 'episode')
rebulk.regex(r'S?(?P<season>\d+)-?(?:xE|Ex|E|x)-?(?P<other>' + build_or_pattern(all_words) + ')',
tags=['SxxExx'],
abbreviations=[dash],
validator=None,
formatter={'season': int, 'other': lambda match: 'Complete'})
rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator'], validate_all=True,
validator={'__parent__': seps_surround}, children=True, private_parent=True)
# 12, 13
rebulk.chain(tags=['bonus-conflict', 'weak-movie', 'weak-episode'], formatter={'episode': int, 'version': int}) \
.defaults(validator=None) \
.regex(r'(?P<episode>\d{2})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>[x-])(?P<episode>\d{2})').repeater('*')
# 012, 013
rebulk.chain(tags=['bonus-conflict', 'weak-movie', 'weak-episode'], formatter={'episode': int, 'version': int}) \
.defaults(validator=None) \
.regex(r'0(?P<episode>\d{1,2})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>[x-])0(?P<episode>\d{1,2})').repeater('*')
# 112, 113
rebulk.chain(tags=['bonus-conflict', 'weak-movie', 'weak-episode'], formatter={'episode': int, 'version': int},
disabled=lambda context: not context.get('episode_prefer_number', False)) \
.defaults(validator=None) \
.regex(r'(?P<episode>\d{3,4})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>[x-])(?P<episode>\d{3,4})').repeater('*')
# 1, 2, 3
rebulk.chain(tags=['bonus-conflict', 'weak-movie', 'weak-episode'], formatter={'episode': int, 'version': int},
disabled=lambda context: context.get('type') != 'episode') \
.defaults(validator=None) \
.regex(r'(?P<episode>\d)') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>[x-])(?P<episode>\d{1,2})').repeater('*')
# e112, e113
# TODO: Enhance rebulk for validator to be used globally (season_episode_validator)
rebulk.chain(formatter={'episode': int, 'version': int}) \
.defaults(validator=None) \
.regex(r'e(?P<episode>\d{1,4})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>e|x|-)(?P<episode>\d{1,4})').repeater('*')
# ep 112, ep113, ep112, ep113
rebulk.chain(abbreviations=[dash], formatter={'episode': int, 'version': int}) \
.defaults(validator=None) \
.regex(r'ep-?(?P<episode>\d{1,4})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>ep|e|x|-)(?P<episode>\d{1,4})').repeater('*')
# 102, 0102
rebulk.chain(tags=['bonus-conflict', 'weak-movie', 'weak-episode', 'weak-duplicate'],
formatter={'season': int, 'episode': int, 'version': int},
conflict_solver=lambda match, other: match if other.name == 'year' else '__default__',
disabled=lambda context: context.get('episode_prefer_number', False)) \
.defaults(validator=None) \
.regex(r'(?P<season>\d{1,2})(?P<episode>\d{2})') \
.regex(r'v(?P<version>\d+)').repeater('?') \
.regex(r'(?P<episodeSeparator>x|-)(?P<episode>\d{2})').repeater('*')
rebulk.regex(r'v(?P<version>\d+)', children=True, private_parent=True, formatter=int)
rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator'])
# TODO: List of words
# detached of X count (season/episode)
rebulk.regex(r'(?P<episode>\d+)?-?' + build_or_pattern(of_words) +
r'-?(?P<count>\d+)-?' + build_or_pattern(episode_words) + '?',
abbreviations=[dash], children=True, private_parent=True, formatter=int)
rebulk.regex(r'Minisodes?', name='episode_format', value="Minisode")
# Harcoded movie to disable weak season/episodes
rebulk.regex('OSS-?117',
abbreviations=[dash], name="hardcoded-movies", marker=True,
conflict_solver=lambda match, other: None)
rebulk.rules(EpisodeNumberSeparatorRange(range_separators),
SeasonSeparatorRange(range_separators), RemoveWeakIfMovie, RemoveWeakIfSxxExx,
RemoveWeakDuplicate, EpisodeDetailValidator, RemoveDetachedEpisodeNumber, VersionValidator,
CountValidator, EpisodeSingleDigitValidator)
return rebulk
class CountValidator(Rule):
"""
Validate count property and rename it
"""
priority = 64
consequence = [RemoveMatch, RenameMatch('episode_count'), RenameMatch('season_count')]
properties = {'episode_count': [None], 'season_count': [None]}
def when(self, matches, context):
to_remove = []
episode_count = []
season_count = []
for count in matches.named('count'):
previous = matches.previous(count, lambda match: match.name in ['episode', 'season'], 0)
if previous:
if previous.name == 'episode':
episode_count.append(count)
elif previous.name == 'season':
season_count.append(count)
else:
to_remove.append(count)
return to_remove, episode_count, season_count
class AbstractSeparatorRange(Rule):
"""
Remove separator matches and create matches for season range.
"""
priority = 128
consequence = [RemoveMatch, AppendMatch]
def __init__(self, range_separators, property_name):
super(AbstractSeparatorRange, self).__init__()
self.range_separators = range_separators
self.property_name = property_name
def when(self, matches, context):
to_remove = []
to_append = []
for separator in matches.named(self.property_name + 'Separator'):
previous_match = matches.previous(separator, lambda match: match.name == self.property_name, 0)
next_match = matches.next(separator, lambda match: match.name == self.property_name, 0)
if previous_match and next_match and separator.value in self.range_separators:
for episode_number in range(previous_match.value + 1, next_match.value):
match = copy.copy(next_match)
match.value = episode_number
to_append.append(match)
to_remove.append(separator)
previous_match = None
for next_match in matches.named(self.property_name):
if previous_match:
separator = matches.input_string[previous_match.initiator.end:next_match.initiator.start]
if separator not in self.range_separators:
separator = strip(separator)
if separator in self.range_separators:
for episode_number in range(previous_match.value + 1, next_match.value):
match = copy.copy(next_match)
match.value = episode_number
to_append.append(match)
to_append.append(Match(previous_match.end, next_match.start - 1,
name=self.property_name + 'Separator',
private=True,
input_string=matches.input_string))
to_remove.append(next_match) # Remove and append match to support proper ordering
to_append.append(next_match)
previous_match = next_match
return to_remove, to_append
class EpisodeNumberSeparatorRange(AbstractSeparatorRange):
"""
Remove separator matches and create matches for episoderNumber range.
"""
priority = 128
consequence = [RemoveMatch, AppendMatch]
def __init__(self, range_separators):
super(EpisodeNumberSeparatorRange, self).__init__(range_separators, "episode")
class SeasonSeparatorRange(AbstractSeparatorRange):
"""
Remove separator matches and create matches for season range.
"""
priority = 128
consequence = [RemoveMatch, AppendMatch]
def __init__(self, range_separators):
super(SeasonSeparatorRange, self).__init__(range_separators, "season")
class RemoveWeakIfMovie(Rule):
"""
Remove weak-movie tagged matches if it seems to be a movie.
"""
priority = 64
consequence = RemoveMatch
def when(self, matches, context):
if matches.named('year') or matches.markers.named('hardcoded-movies'):
return matches.tagged('weak-movie')
class RemoveWeakIfSxxExx(Rule):
"""
Remove weak-movie tagged matches if SxxExx pattern is matched.
"""
priority = 64
consequence = RemoveMatch
def when(self, matches, context):
if matches.tagged('SxxExx', lambda match: not match.private):
return matches.tagged('weak-movie')
class RemoveWeakDuplicate(Rule):
"""
Remove weak-duplicate tagged matches if duplicate patterns, for example The 100.109
"""
priority = 64
consequence = RemoveMatch
def when(self, matches, context):
to_remove = []
for filepart in matches.markers.named('path'):
patterns = defaultdict(list)
for match in reversed(matches.range(filepart.start, filepart.end,
predicate=lambda match: 'weak-duplicate' in match.tags)):
if match.pattern in patterns[match.name]:
to_remove.append(match)
else:
patterns[match.name].append(match.pattern)
return to_remove
class EpisodeDetailValidator(Rule):
"""
Validate episode_details if they are detached or next to season or episode.
"""
priority = 64
consequence = RemoveMatch
def when(self, matches, context):
ret = []
for detail in matches.named('episode_details'):
if not seps_surround(detail) \
and not matches.previous(detail, lambda match: match.name in ['season', 'episode']) \
and not matches.next(detail, lambda match: match.name in ['season', 'episode']):
ret.append(detail)
return ret
class RemoveDetachedEpisodeNumber(Rule):
"""
If multiple episode are found, remove those that are not detached from a range and less than 10.
Fairy Tail 2 - 16-20, 2 should be removed.
"""
priority = 64
consequence = RemoveMatch
dependency = [RemoveWeakIfSxxExx, RemoveWeakDuplicate]
def when(self, matches, context):
ret = []
episode_numbers = []
episode_values = set()
for match in matches.named('episode', lambda match: not match.private and 'weak-movie' in match.tags):
if match.value not in episode_values:
episode_numbers.append(match)
episode_values.add(match.value)
episode_numbers = list(sorted(episode_numbers, key=lambda match: match.value))
if len(episode_numbers) > 1 and \
episode_numbers[0].value < 10 and \
episode_numbers[1].value - episode_numbers[0].value != 1:
parent = episode_numbers[0]
while parent: # TODO: Add a feature in rebulk to avoid this ...
ret.append(parent)
parent = parent.parent
return ret
class VersionValidator(Rule):
"""
Validate version if previous match is episode or if surrounded by separators.
"""
priority = 64
dependency = [RemoveWeakIfMovie, RemoveWeakIfSxxExx]
consequence = RemoveMatch
def when(self, matches, context):
ret = []
for version in matches.named('version'):
episode_number = matches.previous(version, lambda match: match.name == 'episode', 0)
if not episode_number and not seps_surround(version.initiator):
ret.append(version)
return ret
class EpisodeSingleDigitValidator(Rule):
"""
Remove single digit episode when inside a group that doesn't own title.
"""
dependency = [TitleFromPosition]
consequence = RemoveMatch
def when(self, matches, context):
ret = []
for episode in matches.named('episode', lambda match: len(match.initiator) == 1):
group = matches.markers.at_match(episode, lambda marker: marker.name == 'group', index=0)
if group:
if not matches.range(*group.span, predicate=lambda match: match.name == 'title'):
ret.append(episode)
return ret
| gpl-3.0 | -3,951,014,639,551,786,000 | 41.344961 | 119 | 0.573272 | false | 4.062093 | false | false | false |
sdykes3/myrtleApp | server/server.py | 1 | 6757 | #!/usr/bin/env python
import web
import json
urls = (
'/checkIngredients', 'checkIngredients',
'/markOut/(.*)', 'markOut',
'/markIn/(.*)', 'markIn'
)
jsonString = '''[
{
"type": "liquor",
"id" : "brandy",
"name": "Brandy",
"inStock": true
},
{
"type": "liquor",
"id" : "bourbon",
"name": "Bourbon",
"inStock": true
},
{
"type": "liquor",
"id" : "dark-rum",
"name": "Dark Rum",
"inStock": false
},
{
"type": "liquor",
"id" : "gin",
"name": "Gin",
"inStock": false
},
{
"type": "liquor",
"id" : "light-rum",
"name": "Light Rum",
"inStock": false
},
{
"type": "liquor",
"id" : "tequila",
"name": "Tequila",
"inStock": true
},
{
"type": "liquor",
"id" : "vodka",
"name": "Vodka",
"inStock": true
},
{
"type": "liquor",
"id" : "whiskey",
"name": "Whiskey",
"inStock": true
},
{
"type": "mixer",
"id" : "amaretto",
"name": "Amaretto",
"inStock": true
},
{
"type": "mixer",
"id" : "bitters",
"name": "Bitters",
"inStock": true
},
{
"type": "mixer",
"id" : "blue",
"name": "Blue Curacao",
"inStock": true
},
{
"type": "mixer",
"id" : "champagne",
"name": "Champagne",
"inStock": true
},
{
"type": "mixer",
"id" : "club-soda",
"name": "Club Soda",
"inStock": true
},
{
"type": "mixer",
"id" : "creme-de-cacao",
"name": "Creme de Cacao",
"inStock": true
},
{
"type": "mixer",
"id" : "creme-de-menth",
"name": "Creme de Menthe",
"inStock": true
},
{
"type": "mixer",
"id" : "dry-vermouth",
"name": "Dry Vermouth",
"inStock": true
},
{
"type": "mixer",
"id" : "grenadine",
"name": "Grenadine",
"inStock": true
},
{
"type": "mixer",
"id" : "ginger-ale",
"name": "Ginger Ale",
"inStock": true
},
{
"type": "mixer",
"id" : "ginger-beer",
"name": "Ginger Beer",
"inStock": true
},
{
"type": "mixer",
"id" : "irish-cream",
"name": "Irish Cream",
"inStock": true
},
{
"type": "mixer",
"id" : "apple-juice",
"name": "Juice - Apple",
"inStock": true
},
{
"type": "mixer",
"id" : "cranberry-juice",
"name": "Juice - Cranberry",
"inStock": true
},
{
"type": "mixer",
"id" : "grapefruit-juice",
"name": "Juice - Grapefruit",
"inStock": true
},
{
"type": "mixer",
"id" : "lemon-juice",
"name": "Juice - Lemon",
"inStock": true
},
{
"type": "mixer",
"id" : "lime-juice",
"name": "Juice - Lime",
"inStock": true
},
{
"type": "mixer",
"id" : "mango-juice",
"name": "Juice - Mango",
"inStock": true
},
{
"type": "mixer",
"id" : "orange-juice",
"name": "Juice - Orange",
"inStock": true
},
{
"type": "mixer",
"id" : "peach-juice",
"name": "Juice - Peach",
"inStock": true
},
{
"type": "mixer",
"id" : "pineapple-juice",
"name": "Juice - Pineapple",
"inStock": true
},
{
"type": "mixer",
"id" : "kahlua",
"name": "Kahlua",
"inStock": true
},
{
"type": "mixer",
"id" : "melon-liqueur",
"name": "Melon Liqueur",
"inStock": true
},
{
"type": "mixer",
"id" : "orange-schnapps",
"name": "Orange Schnapps",
"inStock": true
},
{
"type": "mixer",
"id" : "peach-schnapps",
"name": "Peach Schnapps",
"inStock": true
},
{
"type": "mixer",
"id" : "simple-syrup",
"name": "Simple Syrup",
"inStock": true
},
{
"type": "mixer",
"id" : "cola",
"name": "Soda - Cola",
"inStock": true
},
{
"type": "mixer",
"id" : "sprite",
"name": "Soda - Sprite",
"inStock": true
},
{
"type": "mixer",
"id" : "sour-mix",
"name": "Sour Mix",
"inStock": true
},
{
"type": "mixer",
"id" : "so-co",
"name": "Southern Comfort",
"inStock": true
},
{
"type": "mixer",
"id" : "sweet-lime",
"name": "Sweet Lime",
"inStock": true
},
{
"type": "mixer",
"id" : "sweet-sour-mix",
"name": "Sweet & Sour Mix",
"inStock": true
},
{
"type": "mixer",
"id" : "sweet-vermouth",
"name": "Sweet Vermouth",
"inStock": true
},
{
"type": "mixer",
"id" : "triple-sec",
"name": "Triple Sec",
"inStock": true
},
{
"type": "mixer",
"id" : "wine",
"name": "Wine",
"inStock": true
},
{
"type": "other",
"id" : "cinnamon",
"name": "Cinnamon",
"inStock": true
},
{
"type": "other",
"id" : "cream",
"name": "Cream",
"inStock": true
},
{
"type": "other",
"id" : "lime",
"name": "Lime",
"inStock": true
},
{
"type": "other",
"id" : "lemon",
"name": "Lemon",
"inStock": true
},
{
"type": "other",
"id" : "cherry",
"name": "Maraschino Cherry",
"inStock": true
},
{
"type": "other",
"id" : "milk",
"name": "Milk",
"inStock": true
},
{
"type": "other",
"id" : "mint",
"name": "Mint",
"inStock": true
},
{
"type": "other",
"id" : "strawberry",
"name": "Strawberry",
"inStock": true
},
{
"type": "other",
"id" : "sugar",
"name": "Sugar",
"inStock": true
}
]'''
ingredients = json.loads(jsonString)
class checkIngredients:
def GET(self):
web.header('Access-Control-Allow-Origin', '*')
web.header('Access-Control-Allow-Credentials', 'true')
return json.dumps(ingredients)
class markOut:
def GET(self, drink):
web.header('Access-Control-Allow-Origin', '*')
web.header('Access-Control-Allow-Credentials', 'true')
for ing in ingredients:
if drink == ing['id']:
ing['inStock'] = False
return 'Marked ' + str(drink) + ' as out of stock'
return 'Couldn\'t find ' + str(drink)
class markIn:
def GET(self, drink):
web.header('Access-Control-Allow-Origin', '*')
web.header('Access-Control-Allow-Credentials', 'true')
for ing in ingredients:
if drink == ing['id']:
ing['inStock'] = True
return 'Marked ' + str(drink) + ' as in stock'
return 'Couldn\'t find ' + str(drink)
class MyApplication(web.application):
def run(self, port=8080, *middleware):
func = self.wsgifunc(*middleware)
return web.httpserver.runsimple(func, ('0.0.0.0', port))
if __name__ == "__main__":
app = MyApplication(urls, globals())
app.run(port=8888)
| mit | -7,296,965,727,733,815,000 | 14.569124 | 66 | 0.462335 | false | 2.726796 | false | false | false |
msurkovsky/kaira | gui/settings.py | 6 | 2850 |
import gtk
class SettingsWidget(gtk.Notebook):
def __init__(self, app):
gtk.Notebook.__init__(self)
self.app = app
self.set_tab_pos(gtk.POS_LEFT)
self.append_page(self._generic_settings(), gtk.Label("Generic"))
self.append_page(self._completion_settings(), gtk.Label("Completion"))
self.show_all()
def _generic_settings(self):
def set(section, name, value):
self.app.settings.set(section, name, str(value))
self.app.save_settings()
def settings_button(section, name, descritpion):
button = gtk.CheckButton(descritpion)
button.set_active(self.app.settings.getboolean(section, name))
button.connect("toggled",
lambda w: set(section, name, w.get_active()))
vbox.pack_start(button, False, False)
vbox = gtk.VBox()
settings_button("main", "save-before-build", "Save project before build")
settings_button("main", "ptp-debug", "PTP debugging")
return vbox
def _completion_settings(self):
def set(section, name, value):
self.app.settings.set(section, name, str(value))
self.app.save_settings()
def add_check_button(section, name, description):
button = gtk.CheckButton(description)
button.set_active(self.app.settings.getboolean(section,name))
button.connect("toggled",
lambda w: set(section, name, button.get_active()))
vbox.pack_start(button, False, False)
def add_spin_box(section,name,description, numeric = False, digits = 1, range = (1, 12)):
hbox = gtk.HBox()
spin = gtk.SpinButton()
spin.set_digits(digits)
spin.set_increments(1,2)
spin.set_range(range[0], range[1])
spin.set_numeric(numeric)
spin.set_value(self.app.settings.getfloat("code_completion", name))
spin.connect("value-changed", lambda w: set(section, name, str(spin.get_value())))
hbox.pack_start(gtk.Label(description), False, False)
hbox.pack_start(spin, False, False)
vbox.pack_start(hbox, False, False)
vbox = gtk.VBox()
add_check_button("code_completion","enable_highlight_current_line","Highlight current line")
add_check_button("code_completion", "enable_show_line_numbers", "Show line numbers")
add_spin_box("code_completion", "tab_width", "Tab size", numeric = True)
add_check_button("code_completion","enable_info_box","Show info box")
add_spin_box("code_completion", "delay_info_box", "Delay for info box in ms",
numeric = True,
digits = 0,
range = (0, 3000))
return vbox | gpl-3.0 | -8,428,830,736,328,680,000 | 42.861538 | 100 | 0.584912 | false | 3.914835 | false | false | false |
sghai/robottelo | robottelo/cli/auth.py | 3 | 1157 | # -*- encoding: utf-8 -*-
"""
Usage::
hammer auth [OPTIONS] SUBCOMMAND [ARG] ...
Parameters::
SUBCOMMAND subcommand
[ARG] ... subcommand arguments
Subcommands::
login Set credentials
logout Wipe your credentials
status Information about current connections
"""
from robottelo.cli.base import Base
class Auth(Base):
""" Authenticates Foreman users """
command_base = 'auth'
@classmethod
def login(cls, options=None):
"""Set credentials"""
cls.command_sub = 'login'
return cls.execute(
cls._construct_command(options), output_format='csv')
@classmethod
def logout(cls, options=None):
"""Wipe credentials"""
cls.command_sub = 'logout'
return cls.execute(
cls._construct_command(options), output_format='csv')
@classmethod
def status(cls, options=None):
"""Show login status"""
cls.command_sub = 'status'
return cls.execute(
cls._construct_command(options), output_format='csv')
| gpl-3.0 | -7,402,639,921,279,210,000 | 25.906977 | 71 | 0.560933 | false | 4.537255 | false | false | false |
nmc-probe/emulab-nome | protogeni/test/version1/tuntest.py | 1 | 11262 | #! /usr/bin/env python
#
# Copyright (c) 2008-2010 University of Utah and the Flux Group.
#
# {{{GENIPUBLIC-LICENSE
#
# GENI Public License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#
# }}}
#
#
#
import sys
import pwd
import getopt
import os
import re
import xmlrpclib
import urllib
from xml.sax.handler import ContentHandler
import xml.sax
import string
from M2Crypto import X509
ACCEPTSLICENAME=1
def Usage():
print "usage: " + sys.argv[ 0 ] + " [option...] \
[component-manager-1 component-manager-2]"
print """Options:
-c file, --credentials=file read self-credentials from file
[default: query from SA]
-d, --debug be verbose about XML methods invoked
-f file, --certificate=file read SSL certificate from file
[default: ~/.ssl/encrypted.pem]
-h, --help show options and usage
-n name, --slicename=name specify human-readable name of slice
[default: mytestslice]
-p file, --passphrase=file read passphrase from file
[default: ~/.ssl/password]
-r file, --read-commands=file specify additional configuration file
-s file, --slicecredentials=file read slice credentials from file
[default: query from SA]"""
execfile( "test-common.py" )
if len( args ) == 2:
managers = ( args[ 0 ], args[ 1 ] )
elif len( args ):
Usage()
sys.exit( 1 )
else:
managers = None
class findElement(ContentHandler):
name = None
value = None
string = None
attributes = None
data = None
def __init__(self, name, stuff):
self.name = name
xml.sax.parseString(stuff, self)
pass
def startElement(self, name, attrs):
if self.name == name:
self.data = []
self.attributes = attrs
elif self.data != None:
self.data.append("<" + name + ">")
pass
pass
def characters(self, content):
if self.data != None:
self.data.append(content)
pass
pass
def endElement(self, name):
if self.name == name:
self.value = string.join(self.data, "");
self.string = "<" + name + ">" + self.value + "</" + name + ">"
self.data = None;
elif self.data != None:
self.data.append("</" + name + ">")
pass
pass
pass
#
# Get a credential for myself, that allows me to do things at the SA.
#
mycredential = get_self_credential()
print "Got my SA credential"
#
# Lookup slice.
#
params = {}
params["credential"] = mycredential
params["type"] = "Slice"
params["hrn"] = SLICENAME
rval,response = do_method("sa", "Resolve", params)
if rval:
#
# Create a slice.
#
print "Creating new slice called " + SLICENAME
params = {}
params["credential"] = mycredential
params["type"] = "Slice"
params["hrn"] = SLICENAME
rval,response = do_method("sa", "Register", params)
if rval:
Fatal("Could not create new slice")
pass
myslice = response["value"]
print "New slice created"
pass
else:
#
# Get the slice credential.
#
print "Asking for slice credential for " + SLICENAME
myslice = response["value"]
myslice = get_slice_credential( myslice, mycredential )
print "Got the slice credential"
pass
#
# Ask the clearinghouse for a list of component managers.
#
params = {}
params["credential"] = mycredential
rval,response = do_method("ch", "ListComponents", params)
if rval:
Fatal("Could not get a list of components from the ClearingHouse")
pass
components = response["value"];
if managers:
def FindCM( name, cmlist ):
for cm in cmlist:
hrn = cm[ "hrn" ]
if hrn == name or hrn == name + ".cm":
return cm[ "url" ]
Fatal( "Could not find component manager " + name )
url1 = FindCM( managers[ 0 ], components )
url2 = FindCM( managers[ 1 ], components )
else:
url1 = components[0]["url"]
url2 = components[1]["url"]
#url1 = "https://boss.emulab.net/protogeni/stoller/xmlrpc/cm"
#url2 = "https://myboss.myelab.testbed.emulab.net/protogeni/xmlrpc/cm"
#
# Get a ticket for a node on a CM.
#
rspec1 = "<rspec xmlns=\"http://protogeni.net/resources/rspec/0.1\"> " +\
" <node virtual_id=\"geni1\" "+\
" virtualization_type=\"emulab-vnode\"> " +\
" </node>" +\
"</rspec>"
print "Asking for a ticket from CM1 ..."
params = {}
params["credential"] = myslice
params["rspec"] = rspec1
rval,response = do_method(None, "GetTicket", params, URI=url1)
if rval:
if response and response["value"]:
print >> sys.stderr, ""
print >> sys.stderr, str(response["value"])
print >> sys.stderr, ""
pass
Fatal("Could not get ticket")
pass
ticket1 = response["value"]
print "Got a ticket from CM1, asking for a ticket from CM2 ..."
#
# Get the uuid of the node assigned so we can specify it in the tunnel.
#
ticket_element = findElement("ticket", ticket1)
node_element = findElement("node", str(ticket_element.string))
node1_rspec = str(node_element.string);
#
# Get a ticket for a node on another CM.
#
rspec2 = "<rspec xmlns=\"http://protogeni.net/resources/rspec/0.1\"> " +\
" <node virtual_id=\"geni2\" "+\
" virtualization_type=\"emulab-vnode\"> " +\
" </node>" +\
"</rspec>"
params = {}
params["credential"] = myslice
params["rspec"] = rspec2
rval,response = do_method(None, "GetTicket", params, URI=url2)
if rval:
if response and response["value"]:
print >> sys.stderr, ""
print >> sys.stderr, str(response["value"])
print >> sys.stderr, ""
pass
Fatal("Could not get ticket")
pass
ticket2 = response["value"]
print "Got a ticket from CM2, redeeming ticket on CM1 ..."
#
# Get the uuid of the node assigned so we can specify it in the tunnel.
#
ticket_element = findElement("ticket", ticket2)
node_element = findElement("node", str(ticket_element.string))
node2_rspec = str(node_element.string);
#
# Create the slivers.
#
params = {}
params["credential"] = myslice
params["ticket"] = ticket1
rval,response = do_method(None, "RedeemTicket", params, url1)
if rval:
Fatal("Could not redeem ticket on CM1")
pass
sliver1,manifest1 = response["value"]
print "Created a sliver on CM1, redeeming ticket on CM2 ..."
print str(manifest1);
params = {}
params["credential"] = myslice
params["ticket"] = ticket2
rval,response = do_method(None, "RedeemTicket", params, url2)
if rval:
Fatal("Could not redeem ticket on CM2")
pass
sliver2,manifest2 = response["value"]
print "Created a sliver on CM2"
print str(manifest2)
#
# Now add the tunnel part since we have the uuids for the two nodes.
#
rspec = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" +\
"<rspec xmlns=\"http://www.protogeni.net/resources/rspec/0.1\" " +\
" type=\"request\"> " + node1_rspec + " " + node2_rspec + " " +\
" <link virtual_id=\"link0\" link_type=\"tunnel\"> " +\
" <interface_ref virtual_node_id=\"geni1\" " +\
" virtual_interface_id=\"virt0\" "+\
" tunnel_ip=\"192.168.1.1\" />" +\
" <interface_ref virtual_node_id=\"geni2\" " +\
" virtual_interface_id=\"virt0\" "+\
" tunnel_ip=\"192.168.1.2\" />" +\
" </link> " +\
"</rspec>"
#print str(rspec)
print "Updating ticket on CM1 with tunnel stuff ..."
params = {}
params["credential"] = myslice
params["ticket"] = ticket1
params["rspec"] = rspec
rval,response = do_method(None, "UpdateTicket", params, url1)
if rval:
Fatal("Could not update ticket on CM1")
pass
ticket1 = response["value"]
print "Updated ticket on CM1. Updating ticket on CM2 with tunnel stuff ..."
#
# And again for the second ticket.
#
params = {}
params["credential"] = myslice
params["ticket"] = ticket2
params["rspec"] = rspec
rval,response = do_method(None, "UpdateTicket", params, url2)
if rval:
Fatal("Could not update ticket on CM2")
pass
ticket2 = response["value"]
print "Updated ticket on CM2. Updating sliver on CM1 with new ticket ..."
#
# Update the slivers with the new tickets, to create the tunnels
#
params = {}
params["credential"] = sliver1
params["ticket"] = ticket1
rval,response = do_method(None, "UpdateSliver", params, url1)
if rval:
Fatal("Could not update sliver on CM1")
pass
manifest1 = response["value"]
print "Updated sliver on CM1. Updating sliver on CM2 with new ticket ..."
#print str(manifest1);
params = {}
params["credential"] = sliver2
params["ticket"] = ticket2
rval,response = do_method(None, "UpdateSliver", params, url2)
if rval:
Fatal("Could not start sliver on CM2")
pass
manifest2 = response["value"]
print "Updated sliver on CM2. Starting sliver on CM1 ..."
#print str(manifest1);
#
# Start the slivers.
#
params = {}
params["credential"] = sliver1
rval,response = do_method(None, "StartSliver", params, url1)
if rval:
Fatal("Could not start sliver on CM1")
pass
print "Started sliver on CM1. Starting sliver on CM2 ..."
params = {}
params["credential"] = sliver2
rval,response = do_method(None, "StartSliver", params, url2)
if rval:
Fatal("Could not start sliver on CM2")
pass
print "Slivers have been started, waiting for input to delete it"
print "You should be able to log into the sliver after a little bit"
sys.stdin.readline();
#
# Delete the slivers.
#
print "Deleting sliver1 now"
params = {}
params["credential"] = sliver1
rval,response = do_method(None, "DeleteSliver", params, url1)
if rval:
Fatal("Could not delete sliver on CM1")
pass
print "Sliver1 has been deleted"
print "Deleting sliver2 now"
params = {}
params["credential"] = sliver2
rval,response = do_method(None, "DeleteSliver", params, url2)
if rval:
Fatal("Could not delete sliver on CM2")
pass
print "Sliver2 has been deleted"
| agpl-3.0 | -5,964,417,454,744,307,000 | 28.872679 | 79 | 0.621826 | false | 3.465231 | false | false | false |
AntonioQu/pythontest | get_city.py | 1 | 1595 | # pip install XlsxWriter
# pip install requests beautifulsoup4
import requests
import bs4
import xlsxwriter
import sys
def progress_bar(percent, bar_length=30):
hashes = '#' * int(round(percent * bar_length))
spaces = '-' * (bar_length - len(hashes))
sys.stdout.write("\rPercent: [{0}] {1}%".format(hashes + spaces, int(round(percent * 100))))
sys.stdout.flush()
name_workbook = 'cities_mexico_1.xlsx'
workbook = xlsxwriter.Workbook(name_workbook)
worksheet = workbook.add_worksheet('states')
url = 'http://micodigopostal.org'
response = requests.get(url)
soup = bs4.BeautifulSoup(response.text, "html.parser")
table = soup.find('table')
states = []
states_link =[]
tds = table.find_all('td')
for td in tds:
href = td.find('a')
span = td.find('span')
states_link.append(href['href'])
states.append(span.text)
lenth_states = len(states)
worksheet.write_column('A1', states)
for i in range(0,lenth_states):
worksheet = workbook.add_worksheet(states[i])
url_second = url + states_link[i]
response = requests.get(url_second)
soup = bs4.BeautifulSoup(response.text, "html.parser")
table = soup.find('table')
cities = []
cities_link = []
tds = table.find_all('td')
for td in tds:
href = td.find('a')
span = td.find('span')
if span is not None and href is not None:
cities_link.append(href['href'])
cities.append(span.text)
lenth_cities = len(cities)
worksheet.write_column('A1', cities)
percent = (i+1) / lenth_states
progress_bar(percent)
workbook.close()
print('finished')
| apache-2.0 | 635,753,228,543,512,700 | 25.982456 | 100 | 0.660188 | false | 2.889493 | false | false | false |
poschi3/AdventOfCode2015 | day14/day14.py | 1 | 1337 | totalflight = 2503
kmmax = 0
winner = None
reindeers = list()
class Reindeer:
def __init__(self, name, speed, duration, pause):
self.name = name
self.speed = speed
self.duration = duration
self.pause = pause
self.points = 0
def distanceAt(self, flight):
iteration = self.duration + self.pause
km = flight // iteration * self.duration * self.speed
rest = flight % iteration
km += min(rest, self.duration) * self.speed
return km
def __unicode__(self):
return self.name + " " + str(self.points)
with open('input.txt') as f:
for line in f.readlines():
s = line.split(' ')
r = Reindeer(s[0], int(s[3]), int(s[6]), int(s[13]))
reindeers.append(r)
km = r.distanceAt(totalflight)
if(kmmax <= km):
winner = r
kmmax = km
print(winner.name, kmmax, "\n")
for i in range(1, totalflight +1):
roundpoints = dict()
maxpoint = 0
for r in reindeers:
points = r.distanceAt(i)
roundpoints[r] = points
maxpoint = max(maxpoint, points)
for r, point in roundpoints.items():
if point == maxpoint:
r.points += 1
reindeers.sort(key=lambda r: r.points, reverse=True)
print(reindeers[0].name, reindeers[0].points)
| mit | -6,517,702,240,340,848,000 | 19.890625 | 61 | 0.567689 | false | 3.293103 | false | false | false |
perfectsearch/sandman | code/buildscripts/textui/getch.py | 1 | 2670 | #!/usr/bin/env python
#
# $Id: sadm_getch.py 9424 2011-06-13 18:42:04Z ahartvigsen $
#
# Proprietary and confidential.
# Copyright $Date:: 2011#$ Perfect Search Corporation.
# All rights reserved.
#
def getch():
"""Gets a single character from stdin. Does not echo to the screen."""
return _impl()
class _getchUnix:
def __init__(self):
import tty, sys, termios # import termios now or else you'll get the Unix version on the Mac
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _getchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
class _getchMacCarbon:
"""
A function which returns the current ASCII key that is down;
if no ASCII key is down, the null string is returned. The
page http://www.mactech.com/macintosh-c/chap02-1.html was
very helpful in figuring out how to do this.
"""
def __init__(self):
# Depending on which version of python we have, and which
# version of OSX, this implementation may or may not be
# available. The Unix impl appears to work on the mac,
# in my testing, so we can fall back to that one if need be.
import Carbon
Carbon.Evt #see if it has this (in Unix, it doesn't)
def __call__(self):
import Carbon
if Carbon.Evt.EventAvail(0x0008)[0]==0: # 0x0008 is the keyDownMask
return ''
else:
#
# The event contains the following info:
# (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
#
# The message (msg) contains the ASCII char which is
# extracted with the 0x000000FF charCodeMask; this
# number is converted to an ASCII character with chr() and
# returned
#
(what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
return chr(msg & 0x000000FF)
try:
_impl = _getchWindows()
except ImportError:
_impl = None
try:
_impl = _getchMacCarbon()
except AttributeError:
pass
except ImportError:
pass
if not _impl:
_impl = _getchUnix()
if __name__ == '__main__': # a little test
print 'Press a key'
while True:
k=getch()
if k <> '':
break
print 'you pressed ',str(ord(k))
| mit | 7,634,219,220,338,520,000 | 29.689655 | 100 | 0.586517 | false | 3.734266 | false | false | false |
fireflame241/Jelly-Explainer | named_parsing.py | 1 | 6859 | from jelly import *
# NOTE: This still has indentation issues
from wikis import quicks_wiki, atoms_wiki, quicks_tail
#quicks_tail to be used to format quicks like so:
# Ternary if
# <condition>
# ...
# <if-clause>
# ...
# <else-clause>
# ...
for k in quicks:
quicks[k].token = k
for k in hypers:
hypers[k].token = k
for k in atoms:
atoms[k].token = k
def is_litlist(literal):
return re.match(str_litlist+"$",literal)!=None and re.match(str_literal+"$",literal)==None
def literal_type(literal):
out = ""
lit = literal
if lit[0] == "“":
if '“' in lit[1:]:
out = ["list"," of %ss "]
else:
out = ["%s"]
if lit[-1] == '”':
out[-1] = out[-1]%["string"]
elif lit[-1] == '»':
out[-1] = out[-1]%['dictionary-compressed string']
elif lit[-1] == '‘':
out[-1] = out[-1]%['code-page index list']
elif lit[-1] == '’':
out[-1] = out[-1]%['literal']
elif lit[0] == '⁽':
out = ["integer"]
elif lit[0] == '⁾':
out = ["2-char string"]
elif lit[0] == '”':
out = ["char"]
else:
out = ["integer"]
return out
def literal_title(literal):
if is_litlist(literal):
equiv = "[" + ','.join(map(mono_literal_equivalent,literal.split(","))) + "]"
name = map(literal_type,literal.split(","))
for k in name:
first = k
break
if all(item == first for item in name):
first.insert(1,"s")
name = "list of "+ ''.join(first)
else:
name = "list"
else:
equiv = mono_literal_equivalent(literal)
name = ''.join(literal_type(literal))
return "The literal "+name+" "+equiv
def mono_literal_equivalent(mono_literal):
evaled = jelly_eval(mono_literal,[])
if type(evaled) == list:
if type(evaled[0]) == list:
if type(evaled[0][0]) == str:
evaled = [''.join(k) for k in evaled]
elif type(evaled[0]) == str:
evaled = ''.join(evaled)
if type(evaled) == str:
evaled = "'" + evaled + "'"
return str(evaled)
def chainsep_title(token):
assert token in chain_separators.keys()
value = chain_separators[token]
return "Start a new "+['nil','mon','dy'][value[0]]+"adic chain"+(" with reversed arguments" if not value[1] else "")
def name(token):
if len(token) == 0:
return ""
elif token in atoms_wiki:
return atoms_wiki[token]
elif token in quicks_wiki:
return quicks_wiki[token]
elif token in str_arities:
return chainsep_title(token)
else:
return literal_title(token)
def token_attrdict(ls):
assert type(ls) in [str,list,attrdict]
if type(ls) == str:
if ls in quicks:
return quicks[ls]
elif ls in atoms:
return atoms[ls]
elif ls in hypers:
return hypers[ls]
else:
return create_literal(regex_liter.sub(parse_literal, ls))
elif type(ls) == list:
return [token_attrdict(k) for k in ls]
elif type(ls) == attrdict:
return token_attrdict(ls.token)
def indent_deepest(ls):
if type(ls) == list:
return [indent_deepest(k) for k in ls]
ls.indentation += 2
return ls
# structure derived from Jelly's parse_code() function.
regex_token_sep = re.compile(str_nonlits + "|" + str_litlist + "|[" + str_arities +"]|")
def parse_code_named(code):
lines_match = regex_flink.finditer(code)
lines = list(lines_match)
lines_str = [line.group(0) for line in lines]
lines_match = regex_flink.finditer(code)
links = [[] for line in lines]
for index, line_match in enumerate(lines_match):
line = line_match.group(0)
chains = links[index]
for word_match in regex_chain.finditer(line):
word = word_match.group(0)
chain = []
for match in regex_token_sep.finditer(word):
token = match.group(0)
token_span = attrdict(token=token, span=match.span(), word_start=word_match.start(), line_len = len(line), name=name(token), indentation=0)
if not len(token):
break;
if token in atoms:
chain.append(token_span)
elif token in quicks:
popped = []
while not quicks[token].condition([token_attrdict(k) for k in popped]) and (chain or chains):
popped.insert(0, chain.pop() if chain else chains.pop())
popped = indent_deepest(popped)
#token_span = indent_deepest(token_span)
chain.append([popped, token_span])
elif token in hypers:
chain.append(token_span)
else:
chain.append(token_span)
chains.append(chain)
return (links, lines_str)
def order(tokens):
if type(tokens) in (list,tuple):
# system to order more naturally e.g. ABC? -> ?CAB [if C then A else B].
# Improve based on quick? Future difficulty: "/" could have two definitions
if len(tokens)==0:
return []
if len(tokens)==1:
return [order(tokens[~0])]
if len(tokens)==2:
return [order(tokens[~0]),order(tokens[~1])]
if len(tokens)==3:
return [order(tokens[~0]),order(tokens[~2]),order(tokens[~1])]
elif type(tokens) == attrdict:
return tokens
else:
return tokens
def order_ranking(ranking):
out = []
for link in ranking:
p = []
for chain in link:
o = []
for token_seq in chain:
ordered = order(token_seq)
if type(ordered) == attrdict:
o.append(ordered)
else:
for k in order(token_seq):
o.append(k)
p.append(o)
out.append(p)
return out
def explain_token(token):
assert type(token) in [str, list, tuple, attrdict]
if type(token) == str:
return [token, name(token)]
elif type(token) == attrdict:
return token
elif type(token) in [list, tuple]:
o = []
for tok in token:
e = explain_token(tok)
o+=[e]
return o
def filter_out(ls, element):
if type(ls) == list:
return [filter_out(k, element) for k in ls if k!=element]
else:
return ls
def form_neat(ranking):
if type(ranking) == attrdict:
return "name: "+ranking.name
else:
return [form_neat(k) for k in ranking]
def explain(code):
ranking, lines = parse_code_named(code)
print("RANKING: ",ranking)
ranking = filter_out(ranking, [])
ranking = order_ranking(ranking)
print("RANKING: ",ranking)
explanation = []
# Iteration form not pythonic but necessary to append lines from parse_code_named. Maybe interleave instead?
for line_num in range(len(ranking)):
line = ranking[line_num]
explanation.append(lines[line_num])
for chain in line:
explanation.append(explain_token(chain))
return render(explanation)
def render(ordered,join="\n\n"):
assert type(ordered) in [str, list, attrdict]
if type(ordered) == list:
# this looks and is horrible. TODO:Change
lines = ["\n".join( [a for a in render(k,"\n").split("\n")] ) for k in ordered]
o = join.join(lines)
return re.sub(r"(\n\n[^\n]+\n)\n",r"\1",o)
elif type(ordered) == str:
return ordered
elif type(ordered) == attrdict:
start = ordered.span[0]+ordered.word_start
return " "*(start)+ordered.token+" "*(ordered.line_len-start-len(ordered.token))+" "*ordered.indentation+" "+ordered.name
test_string = """3,µ+5µ7C
01P?2S?+3
5Ç+5©
P01?
3
1+2+3+4+5
1+2µ3+45
CN$
+5/
+/
SƤ
S€"""
print(explain(test_string))
k = attrdict(a=5, b=3, c=1)
| mit | -8,745,419,046,236,431,000 | 25.488372 | 143 | 0.641498 | false | 2.702254 | false | false | false |
RedHenLab/ControversyDetection | src/controversy_scoring.py | 1 | 2071 | #!/usr/bin/env python3
import sqlite3
import pandas as pd
from scipy.stats import chi2_contingency
cnx_lda = sqlite3.connect("1_31_LDA.db")
cnx_sentiment = sqlite3.connect("2016-01_sentiments_annotated.db")
# get topic distribution over stories
_ = pd.read_sql("SELECT * FROM [1_31_LDA]", cnx_lda)
topics = [str(i) for i in range(100)]
df_lda = _[topics]
topics_lemmas = _.loc[_.index[-1]][topics]
df_lda.index = _['story_id']
df_lda = df_lda[:-1]
# get emotion vectors
_ = pd.read_sql("SELECT * FROM [2016-01_sentiments_annotated.db]", cnx_sentiment)
df_emotions = _[['negative', 'ambiguous', 'positive']]
df_emotions.index = _['story_id']
def controversy(topic, cutoff_topic=.1, df_emotions=df_emotions, df_lda=df_lda):
# retrieve all relevant story ids for given topic
story_ids = list()
for row in df_lda.iterrows():
if row[1][topic] is not None:
if float(row[1][topic]) > cutoff_topic:
story_ids.append(row[0])
story_ids = set(story_ids)
# retrieve all emotions vectors for relevant stories
emotion_vectors = list()
for row in df_emotions.iterrows():
if str(row[0]) in story_ids:
if row[1].values.sum() > 0:
emotion_vectors.append(list(row[1].values))
# calculate divergence
if len(emotion_vectors) > 2:
_, p, _, _ = chi2_contingency(emotion_vectors)
print("topic " + topic + ": controversy score: " + str(1 - p))
return (1 - p), story_ids
else:
print("topic " + topic + ": not enough stories with emotions vectors in that topic")
return 0, story_ids
# evaluate for each topic
stories = list()
controversy_scores = list()
for topic in topics:
score, ids = controversy(topic)
controversy_scores.append(score)
stories.append(ids)
df_topic_controversy = pd.DataFrame(index=topics)
df_topic_controversy['controversy'] = controversy_scores
df_topic_controversy['lemmas'] = topics_lemmas
df_topic_controversy['story_ids'] = stories
df_topic_controversy.to_csv("January_controversy_scores.csv")
| gpl-2.0 | -3,052,488,400,251,001,000 | 30.861538 | 92 | 0.657653 | false | 2.954351 | false | false | false |
Silianbo/salary-prediction-with-machine-learning | DataCrawer.py | 3 | 1399 | #encoding: utf-8
"""
Nothing to say
"""
from __future__ import print_function
import re
import requests
from BeautifulSoup import BeautifulSoup as soup
from sqlalchemy.orm import sessionmaker
import sys
from Tasks import getContent
import DataModel
from DataModel import Salary
import datetime
import time
LAGOU_JOB_URL = 'http://www.lagou.com/jobs/{0}.html'
def getLagou(i,p={'job_description':('dd',{'class':'job_bt'}),
'job_request':('dd',{'class':'job_request'})}):
return getContent(LAGOU_JOB_URL.format(i),patterns=p,ignore_pattern=['div',{'class':'position_del'}])
#DBSession = sessionmaker(DataModel.engine)
get_salary = r'(?P<low>[\d]{,3})k-(?P<high>[\d]{,3})k'
salary_reg = re.compile(get_salary)
SAVE_CSV = './save_file_' + str(datetime.datetime.fromtimestamp(time.time())) + '.csv'
def saveLagou(job):
res = re.match(salary_reg,job['job_request'])
try:
with open(SAVE_CSV,'a+') as f:
res = res.groupdict()
salary = (int(res['low']) + int(res['high'])) / 2
jd = job['job_description']
f.write('{0},{1}\n'.format(salary,jd.encode('utf8')))
except Exception,e:
print(e)
if __name__=='__main__':
for i in range(0,999999):
try:
saveLagou(getLagou(i))
print('\r {0} id Finished\r'.format(i),file=sys.stdout)
except Exception,e:
pass
| gpl-2.0 | -3,625,143,950,399,576,000 | 29.413043 | 105 | 0.616869 | false | 3.179545 | false | false | false |
nikhilgarg459/bcs | bcs_server/account.py | 2 | 1958 | #!usr/bin/env python
# -*-coding:utf8-*-
from datetime import datetime
from server_logger import log
__doc__ = """
* Account class
"""
class Account:
def __init__(self, name, email, password, account_type):
self.name = name
self.email = email
self.password = password
self.type = account_type
self.money = 0
self.passbook = []
def datetime_now(self):
"""
For format reference of datetime, refer to table at
the end of following page:
https://docs.python.org/2/library/datetime.html
"""
datetime_now = datetime.now().strftime('%b %d, %Y %I:%M %p')
return datetime_now
def deposit(self, amount):
self.money += int(amount)
tx = (self.datetime_now(), amount, 0)
self.passbook.append(tx)
return "Money deposit Succesfull! New Balance Rs. " + str(self.money)
def withDraw(self, amount):
if int(amount) <= self.money:
self.money -= int(amount)
tx = (self.datetime_now(), 0, amount)
self.passbook.append(tx)
return "Money withdraw Succesfull! New Balance Rs. " + str(self.money)
return "Sorry you can withdraw max of Rs. " + str(self.money)
def login(self, password):
if password == self.password:
return "Login Successful", self.type
return "Login Unsuccessful", "Invalid"
def changePassword(self, password):
self.password = password
return "Password change Successfully"
def getPassbook(self):
if len(self.passbook) == 0:
return 'No transactions'
return '\n'.join(['%s : %s : %s' % (tx[0], tx[1], tx[2])
for tx in self.passbook])
def __str__(self):
return '%-15s %-20s %-15s %-10s' % (self.name, self.email,
self.password, self.type)
| mit | 8,810,352,944,762,448,000 | 29.59375 | 82 | 0.551583 | false | 3.787234 | false | false | false |
captiosus/treadmill | tests/appcfg/abort_test.py | 1 | 3800 | """Unit test for treadmill.appcfg.abort
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import json
import os
import shutil
import tempfile
import unittest
import kazoo
import mock
import treadmill
from treadmill import appenv
from treadmill import context
from treadmill import fs
from treadmill.apptrace import events
from treadmill.appcfg import abort as app_abort
class AppCfgAbortTest(unittest.TestCase):
"""Tests for teadmill.appcfg.abort"""
def setUp(self):
self.root = tempfile.mkdtemp()
self.tm_env = appenv.AppEnvironment(root=self.root)
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('treadmill.appcfg.abort.flag_aborted', mock.Mock())
@mock.patch('treadmill.supervisor.control_service', mock.Mock())
def test_abort(self):
"""Tests abort sequence."""
container_dir = os.path.join(self.root, 'apps', 'proid.myapp#001',
'data')
fs.mkdir_safe(container_dir)
app_abort.abort(container_dir,
why=app_abort.AbortedReason.INVALID_TYPE,
payload='test')
treadmill.appcfg.abort.flag_aborted.assert_called_with(
container_dir,
app_abort.AbortedReason.INVALID_TYPE,
'test'
)
treadmill.supervisor.control_service.assert_called_with(
os.path.join(self.root, 'apps', 'proid.myapp#001'),
treadmill.supervisor.ServiceControlAction.kill
)
def test_flag_aborted(self):
"""Tests flag abort sequence."""
container_dir = os.path.join(self.root, 'apps', 'proid.myapp#001',
'data')
fs.mkdir_safe(container_dir)
app_abort.flag_aborted(container_dir,
why=app_abort.AbortedReason.INVALID_TYPE,
payload='test')
aborted_file = os.path.join(container_dir, 'aborted')
with io.open(aborted_file) as f:
aborted = json.load(f)
self.assertEqual('invalid_type', aborted.get('why'))
self.assertEqual('test', aborted.get('payload'))
@mock.patch('kazoo.client.KazooClient.exists', mock.Mock())
@mock.patch('kazoo.client.KazooClient.create', mock.Mock())
@mock.patch('kazoo.client.KazooClient.delete', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get_children', mock.Mock())
@mock.patch('treadmill.appevents.post', mock.Mock())
@mock.patch('treadmill.sysinfo.hostname',
mock.Mock(return_value='xxx.xx.com'))
@mock.patch('treadmill.zkutils.connect', mock.Mock())
@mock.patch('treadmill.zkutils.put', mock.Mock())
def test_report_aborted(self):
"""Tests report abort sequence."""
context.GLOBAL.zk.url = 'zookeeper://xxx@hhh:123/treadmill/mycell'
treadmill.zkutils.connect.return_value = kazoo.client.KazooClient()
kazoo.client.KazooClient.get_children.return_value = []
kazoo.client.KazooClient.exists.return_value = True
kazoo.client.KazooClient.create.reset()
kazoo.client.KazooClient.delete.reset()
app_abort.report_aborted(self.tm_env, 'proid.myapp#001',
why=app_abort.AbortedReason.TICKETS,
payload='test')
treadmill.appevents.post.assert_called_with(
mock.ANY,
events.AbortedTraceEvent(
instanceid='proid.myapp#001',
why='tickets',
payload='test',
),
)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,938,947,858,906,630,000 | 33.234234 | 75 | 0.614211 | false | 3.525046 | true | false | false |
slashdd/sos | sos/report/plugins/ds.py | 1 | 3536 | # Copyright (C) 2007 Red Hat, Inc., Kent Lamb <[email protected]>
# Copyright (C) 2014 Red Hat, Inc., Bryn M. Reeves <[email protected]>
# Copyright (C) 2021 Red Hat, Inc., Mark Reynolds <[email protected]>
#
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, RedHatPlugin
import os
class DirectoryServer(Plugin, RedHatPlugin):
short_desc = 'Directory Server'
plugin_name = 'ds'
profiles = ('identity',)
files = ('/etc/dirsrv', '/opt/redhat-ds')
packages = ('redhat-ds-base', 'redhat-ds-7')
def check_version(self):
if self.is_installed("redhat-ds-base") or \
os.path.exists("/etc/dirsrv"):
return "ds8"
elif self.is_installed("redhat-ds-7") or \
os.path.exists("/opt/redhat-ds"):
return "ds7"
return False
def setup(self):
self.add_forbidden_path([
"/etc/dirsrv/slapd*/pin.txt",
"/etc/dirsrv/slapd*/key3.db",
"/etc/dirsrv/slapd*/pwfile.txt",
"/etc/dirsrv/slapd*/*passw*",
"/etc/dirsrv/admin-serv/key[3-4].db",
"/etc/dirsrv/admin-serv/admpw",
"/etc/dirsrv/admin-serv/password.conf"
])
try:
for d in os.listdir("/etc/dirsrv"):
if d[0:5] == 'slapd':
certpath = os.path.join("/etc/dirsrv", d)
self.add_cmd_output("certutil -L -d %s" % certpath)
self.add_cmd_output("dsctl %s healthcheck" % d)
except OSError:
self._log_warn("could not list /etc/dirsrv")
if not self.check_version():
self.add_alert("Directory Server not found.")
elif "ds8" in self.check_version():
self.add_copy_spec([
"/etc/dirsrv/slapd*/cert8.db",
"/etc/dirsrv/slapd*/certmap.conf",
"/etc/dirsrv/slapd*/dse.ldif",
"/etc/dirsrv/slapd*/dse.ldif.startOK",
"/etc/dirsrv/slapd*/secmod.db",
"/etc/dirsrv/slapd*/schema/*.ldif",
"/etc/dirsrv/admin-serv",
"/var/log/dirsrv/*"
])
elif "ds7" in self.check_version():
self.add_copy_spec([
"/opt/redhat-ds/slapd-*/config",
"/opt/redhat-ds/slapd-*/logs"
])
self.add_cmd_output("ls -l /var/lib/dirsrv/slapd-*/db/*")
def postproc(self):
# Example for scrubbing rootpw hash
#
# nsslapd-rootpw: AAAAB3NzaC1yc2EAAAADAQABAAABAQDeXYA3juyPqaUuyfWV2HuIM
# v3gebb/5cvx9ehEAFF2yIKvsQN2EJGTV+hBM1DEOB4eyy/H11NqcNwm/2QsagDB3PVwYp
# 9VKN3BdhQjlhuoYKhLwgtYUMiGL8AX5g1qxjirIkTRJwjbXkSNuQaXig7wVjmvXnB2o7B
# zLtu99DiL1AizfVeZTYA+OVowYKYaXYljVmVKS+g3t29Obaom54ZLpfuoGMmyO64AJrWs
#
# to
#
# nsslapd-rootpw:********
regexppass = r"(nsslapd-rootpw(\s)*:(\s)*)(\S+)([\r\n]\s.*)*\n"
regexpkey = r"(nsSymmetricKey(\s)*::(\s)*)(\S+)([\r\n]\s.*)*\n"
repl = r"\1********\n"
self.do_path_regex_sub('/etc/dirsrv/*', regexppass, repl)
self.do_path_regex_sub('/etc/dirsrv/*', regexpkey, repl)
# vim: set et ts=4 sw=4 :
| gpl-2.0 | 3,939,228,121,040,642,600 | 36.221053 | 79 | 0.568156 | false | 2.966443 | false | false | false |
avastjohn/maventy_new | search/templatetags/searchtags.py | 1 | 6107 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.template import TemplateSyntaxError, TemplateDoesNotExist
from django.template.loader import get_template
from django.utils.safestring import mark_safe
from itertools import ifilter
from ragendja.template import Library
import re
whitespace = re.compile(r'[\s\-\'\"]+')
register = Library()
@register.filter
def searchexcerpt(text, query, context_words=10, startswith=True):
if not isinstance(query, (list, tuple, set)):
query = set(whitespace.split(query))
query = [re.escape(q) for q in query if q]
exprs = [re.compile(r"^%s$" % p, re.IGNORECASE) for p in query]
re_template = startswith and r"\b(%s)" or r"\b(%s)\b"
pieces = re.compile(re_template % "|".join(query), re.IGNORECASE).split(text)
matches = {}
word_lists = []
index = {}
for i, piece in enumerate(pieces):
word_lists.append(whitespace.split(piece))
if i % 2:
index[i] = expr = ifilter(lambda e: e.match(piece), exprs).next()
matches.setdefault(expr, []).append(i)
def merge(lists):
merged = []
for words in lists:
if not words:
continue
if merged:
merged[-1] += words[0]
del words[0]
merged.extend(words)
return merged
i = 0
merged = []
for j in map(min, matches.itervalues()):
merged.append(merge(word_lists[i:j]))
merged.append(word_lists[j])
i = j + 1
merged.append(merge(word_lists[i:]))
output = []
for i, words in enumerate(merged):
omit = None
if i == len(merged) - 1:
omit = slice(max(1, 2 - i) * context_words + 1, None)
elif i == 0:
omit = slice(-context_words - 1)
elif not i % 2:
omit = slice(context_words + 1, -context_words - 1)
if omit and words[omit]:
words[omit] = ["..."]
output.append(" ".join(words))
return ''.join(output)
@register.filter
def highlightedexcerpt(text, query, context_words=10, startswith=True, class_name='highlight'):
if not isinstance(query, (list, tuple, set)):
query = set(whitespace.split(query))
text = searchexcerpt(text, query, context_words=context_words, startswith=startswith)
query = [re.escape(q) for q in query if q]
re_template = startswith and r"\b(%s)" or r"\b(%s)\b"
expr = re.compile(re_template % "|".join(query), re.IGNORECASE)
template = '<span class="%s">%%s</span>' % class_name
matches = []
def replace(match):
matches.append(match)
return template % match.group(0)
return mark_safe(expr.sub(replace, text))
@register.context_tag
def global_search_form(context, url, label='Search'):
request = context['request']
form_module, xxx, class_name= getattr(settings, 'GLOBAL_SEARCH_FORM',
'search.forms.SearchForm').rpartition('.')
form_class = getattr(__import__(form_module, {}, {}, ['']), class_name)
html = '<form action="%(url)s" method="get">%(input)s<input type="submit" value="%(label)s" /></form>'
if request.path == url:
form = form_class(request.GET, auto_id='global_search_%s')
else:
form = form_class(auto_id='global_search_%s')
return html % {'url': url, 'input': form['query'], 'label': label}
@register.context_tag
def load_object_list(context):
"""
Loads search__object_list for iteration and applies the converter to it.
"""
name = context['template_object_name'] + '_list'
object_list = context[name]
converter = context.get('search__converter')
if converter:
object_list = converter(object_list)
context['search__object_list'] = object_list
return ''
@register.context_tag
def display_in_list(context, item):
template_name = '%s/%s_in_list.html' % (
item._meta.app_label, item._meta.object_name.lower())
context.push()
context[ context['template_object_name'] ] = item
try:
output = get_template(template_name).render(context)
except (TemplateSyntaxError, TemplateDoesNotExist), e:
if settings.TEMPLATE_DEBUG:
raise
output = ''
except:
output = '' # Fail silently for invalid included templates.
context.pop()
return output
@register.filter
def resultsformat(hits, results_format):
if not hits:
format = results_format[0]
elif hits == 1:
format = results_format[1]
elif hits <= 300:
format = results_format[2]
else:
format = results_format[3]
hits -= 1
return format % {'hits': hits}
@register.inclusion_tag('search/pagenav.html', takes_context=True)
def pagenav(context, adjacent_pages=3):
"""
To be used in conjunction with the object_list generic view.
Adds pagination context variables for use in displaying first, adjacent and
last page links in addition to those created by the object_list generic
view.
"""
page = context['page']
pages = context['pages']
if page < adjacent_pages:
page_range = range(1, 2 * adjacent_pages)
elif pages - page + 1 < adjacent_pages:
page_range = range(pages - 2 * adjacent_pages + 2, pages + 1)
else:
page_range = range(page - adjacent_pages, page + adjacent_pages + 1)
page_range = [n for n in page_range if n >= 1 and n <= pages]
if pages not in page_range and pages - 1 in page_range:
page_range.append(pages)
if 1 not in page_range and 2 in page_range:
page_range.insert(0, 1)
return {
'hits': context['hits'],
'results_per_page': context['results_per_page'],
'page': page,
'pages': pages,
'page_range': page_range,
'next': context['next'],
'previous': context['previous'],
'has_next': context['has_next'],
'has_previous': context['has_previous'],
'show_first': 1 not in page_range,
'show_last': pages not in page_range,
'base_url': context['base_url'],
}
| bsd-3-clause | -2,424,585,837,204,797,000 | 33.308989 | 106 | 0.604716 | false | 3.674489 | false | false | false |
karlnapf/kameleon-mcmc | kameleon_mcmc/tools/MatrixTools.py | 1 | 1967 | """
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Written (W) 2013 Heiko Strathmann
Written (W) 2013 Dino Sejdinovic
"""
from numpy import zeros, cos, sin, sqrt
from numpy.lib.twodim_base import diag
from numpy.linalg import svd
from scipy.linalg.basic import solve_triangular
class MatrixTools(object):
@staticmethod
def rotation_matrix(theta):
"""
Returns a 2d rotation matrix where theta is in radiants
"""
R = zeros((2, 2))
R[0, 0] = cos(theta)
R[0, 1] = -sin(theta)
R[1, 0] = sin(theta)
R[1, 1] = cos(theta)
return R
@staticmethod
def low_rank_approx(K, d):
"""
Returns a low rank approximation factor L of the given psd matrix such that
LL^T \approx K with a given number of principal components to use
K - psd matrix to compute low-rank approximation of
d - number of principal components to use
returns (L, s, V) where
L - LL^T \approx K
s - 1D vector of Eigenvalues
V - matrix containing Eigen-row-vectors
"""
# perform SVD and only use first d components. Note that U^T=V if K psd and
# rows of V are Eigenvectors of K
U, s, V = svd(K)
U = U[:, 0:d]
V = V[0:d, :]
s = s[0:d]
S = diag(s)
# K \approx=U.dot(S.dot(V))
L = sqrt(S).dot(V)
# LL^T \approx K
return (L, s, V)
@staticmethod
def cholesky_solve(L, x):
"""
Solves X^-1 x = (LL^T) ^-1 x = L^-T L ^-1 * x for a given Cholesky
X=LL^T
"""
x = solve_triangular(L, x.T, lower=True)
x = solve_triangular(L.T, x, lower=False)
return x
| bsd-2-clause | -7,081,884,426,567,236,000 | 28.358209 | 83 | 0.567361 | false | 3.531418 | false | false | false |
jmbeuken/abinit | scripts/post_processing/ElectronPhononCoupling/ElectronPhononCoupling/data/LiF_g2/__init__.py | 2 | 1772 |
import os
from os.path import join as pjoin
dirname = os.path.dirname(__file__)
nqpt = 3
wtq = [0.125, 0.5, 0.375]
DDB_fnames = [ pjoin(dirname, fname) for fname in """
odat_calc_DS5_DDB.nc
odat_calc_DS9_DDB.nc
odat_calc_DS13_DDB.nc
""".split() ]
EIG_fnames = [ pjoin(dirname, fname) for fname in """
odat_calc_DS6_EIG.nc
odat_calc_DS10_EIG.nc
odat_calc_DS14_EIG.nc
""".split() ]
EIGR2D_fnames = [ pjoin(dirname, fname) for fname in """
odat_calc_DS7_EIGR2D.nc
odat_calc_DS11_EIGR2D.nc
odat_calc_DS15_EIGR2D.nc
""".split() ]
EIGI2D_fnames = [ pjoin(dirname, fname) for fname in """
odat_calc_DS7_EIGI2D.nc
odat_calc_DS11_EIGI2D.nc
odat_calc_DS15_EIGI2D.nc
""".split() ]
FAN_fnames = [ pjoin(dirname, fname) for fname in """
odat_calc_DS7_FAN.nc
odat_calc_DS11_FAN.nc
odat_calc_DS15_FAN.nc
""".split() ]
GKK_fnames = [ pjoin(dirname, fname) for fname in """
odat_calc_DS7_GKK.nc
odat_calc_DS11_GKK.nc
odat_calc_DS15_GKK.nc
""".split() ]
EIG0_fname = pjoin(dirname, 'odat_calc_DS3_EIG.nc')
fnames = dict(
eigk_fname=EIG0_fname,
eigq_fnames=EIG_fnames,
ddb_fnames=DDB_fnames,
eigr2d_fnames=EIGR2D_fnames,
eigi2d_fnames=EIGI2D_fnames,
#fan_fnames=FAN_fnames,
gkk_fnames=GKK_fnames,
)
refdir = pjoin(dirname, 'epc_outputs')
| gpl-3.0 | 5,406,779,415,015,076,000 | 29.033898 | 56 | 0.485327 | false | 2.928926 | false | false | false |
cpina/science-cruise-data-management | ScienceCruiseDataManagement/main/management/commands/updateeventlocations.py | 1 | 4254 | from django.core.management.base import BaseCommand, CommandError
from main.models import EventAction, PositionUncertainty, PositionSource
from django.conf import settings
from main import utils
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina ([email protected]) and Jen Thomas ([email protected]), 2016-2017.
class Command(BaseCommand):
help = 'Updates locations of latitude longitude of Event Actions'
def __init__(self):
self._dry_run = True
self._position_source_object = None
self._position_uncertainty_object = None
self._force_update = False
def add_arguments(self, parser):
parser.add_argument('action', help="[update|dry-run|force-update]", type=str)
def handle(self, *args, **options):
if options['action'] == "dry-run":
self._dry_run = True
elif options['action'] == "update" or options['action'] == "force-update":
self._dry_run = False
self._force_update = (options['action'] == "force-update")
else:
print("Unknown action, should be dry-run, update or force-update")
exit(1)
self._update_locations()
def _update_locations(self):
event_actions = EventAction.objects.order_by('time')
for event_action in event_actions:
if event_action.position_depends_on_time(self._force_update):
self._update(event_action)
def _update(self, event_action):
ship_location = utils.ship_location(event_action.time)
action_text_before=""
if ship_location.latitude is not None and ship_location.longitude is not None:
if self._dry_run:
action_text = "Should update"
else:
action_text_before = "(Previously: Latitude: {} Longitude: {})".format(event_action.latitude, event_action.longitude)
if event_action.latitude == float("{:.4f}".format(ship_location.latitude)) and event_action.longitude == float("{:.4f}".format(ship_location.longitude)):
print("Was going to update {} but it's the same than before, skips".format(event_action))
return
event_action.latitude = "{:.4f}".format(ship_location.latitude)
event_action.longitude = "{:.4f}".format(ship_location.longitude)
event_action.position_source = self._position_source()
event_action.position_uncertainty = self._position_uncertainty()
action_text = "Updated"
event_action.save()
print("{} event_action: {}\t{} {:.4f} {:.4f} {}".format(action_text, event_action.id, event_action.time,
ship_location.latitude, ship_location.longitude, action_text_before))
elif not ship_location.is_valid:
print("Event action {} location in the database is invalid. Date time: {}".format(event_action.id, event_action.time))
print("In the event action is: Latitude: {} Longitude: {}".format(event_action.latitude, event_action.longitude))
else:
print("Missing information for event action ID: {} Time: {}".format(event_action.id, event_action.time))
def _position_uncertainty(self):
if self._position_uncertainty_object is None:
self._position_uncertainty_object = PositionUncertainty.objects.get(name=settings.UPDATE_LOCATION_POSITION_UNCERTAINTY_NAME)
return self._position_uncertainty_object
def _position_source(self):
if self._position_source_object is None:
self._position_source_object = PositionSource.objects.get(name=settings.UPDATE_LOCATION_POSITION_SOURCE_NAME)
return self._position_source_object | mit | -4,826,362,323,577,263,000 | 46.277778 | 169 | 0.647626 | false | 4.009425 | false | false | false |
signalfx/signalfx-python | signalfx/signalflow/errors.py | 1 | 1840 | # Copyright (C) 2016 SignalFx, Inc. All rights reserved.
class SignalFlowException(Exception):
"""A generic error encountered when interacting with the SignalFx
SignalFlow API."""
def __init__(self, code, message=None, error_type=None):
self._code = code
self._message = message
self._error_type = error_type
@property
def code(self):
"""Returns the HTTP error code."""
return self._code
@property
def message(self):
"""Returns an optional error message attached to this error."""
return self._message
@property
def error_type(self):
"""Returns an optional error type attached to this error."""
return self._error_type
def __str__(self):
err = self._code
if self._error_type:
err = '{0} ({1})'.format(self._code, self._error_type)
if self._message:
return '{0}: {1}'.format(err, self._message)
return 'Error {0}'.format(err)
class ComputationAborted(Exception):
"""Exception thrown if the computation is aborted during its execution."""
def __init__(self, abort_info):
self._state = abort_info['sf_job_abortState']
self._reason = abort_info['sf_job_abortReason']
@property
def state(self):
return self._state
@property
def reason(self):
return self._reason
def __str__(self):
return 'Computation {0}: {1}'.format(
self._state.lower(), self._reason)
class ComputationFailed(Exception):
"""Exception thrown when the computation failed after being started."""
def __init__(self, errors):
self._errors = errors
@property
def errors(self):
return self._errors
def __str__(self):
return 'Computation failed ({0})'.format(self._errors)
| apache-2.0 | 436,478,070,167,203,840 | 25.666667 | 78 | 0.603261 | false | 4.220183 | false | false | false |
ebottabi/mmakinde-ebot | flask_react/server/app.py | 1 | 1514 | import os
from flask import request, jsonify, abort
from flask_app import create_app, db
from flask_app.models import User, Role, Document
config_name = os.getenv('FLASK_CONFIG')
app = create_app(config_name)
@app.route('/')
def index():
return jsonify({ "message": "Welcome to Document Manager"})
@app.route('/users', methods=['POST'])
def create_user():
user = User(
email=str(request.form.get('email')),
full_name=str(request.form.get('full_name')),
password=str(request.form.get('password')))
db.session.add(user)
db.session.commit()
response = jsonify({ "success": "user details saved successfully" })
response.status_code = 201
return response
@app.route('/documents', methods=['POST'])
def create_document():
document = Document(
title=str(request.form.get('title')),
access=str(request.form.get('access')),
content=str(request.form.get('content')),
roleId=(request.form.get('roleId')),
ownerId=(request.form.get('ownerId')))
db.session.add(document)
db.session.commit()
response = jsonify({ "success": "document saved successfully" })
response.status_code = 201
return response
@app.route('/roles', methods=['POST'])
def create_role():
role = Role(
title=str(request.form.get('title')))
print "role"
db.session.add(role)
db.session.commit()
response = jsonify({
'status': 'Role created successfully',
'role': role.title
})
response.status_code = 201
return response
if __name__ == '__main__':
app.run()
| apache-2.0 | -2,344,024,401,241,587,700 | 23.031746 | 70 | 0.67041 | false | 3.496536 | false | false | false |
kaazoo/DrQueueIPython | etc/mentalray_sg.py | 2 | 3737 | # -*- coding: utf-8 -*-
"""
DrQueue render template for Mental Ray
Copyright (C) 2011 Andreas Schroeder
This file is part of DrQueue.
Licensed under GNU General Public License version 3. See LICENSE for details.
"""
import os
import DrQueue
from DrQueue import engine_helpers
def run_renderer(env_dict):
# define external variables as global
globals().update(env_dict)
global DRQUEUE_OS
global DRQUEUE_ETC
global DRQUEUE_SCENEFILE
global DRQUEUE_FRAME
global DRQUEUE_BLOCKSIZE
global DRQUEUE_ENDFRAME
global DRQUEUE_RENDERDIR
global DRQUEUE_IMAGE
global DRQUEUE_CAMERA
global DRQUEUE_RESX
global DRQUEUE_RESY
global DRQUEUE_FILEFORMAT
global DRQUEUE_RENDERTYPE
global DRQUEUE_LOGFILE
# initialize helper object
helper = engine_helpers.Helper(env_dict['DRQUEUE_LOGFILE'])
# range to render
block = helper.calc_block(DRQUEUE_FRAME, DRQUEUE_ENDFRAME, DRQUEUE_BLOCKSIZE)
# renderer path/executable
engine_path = "ray"
# replace paths on Windows
if DRQUEUE_OS in ["Windows", "Win32"]:
DRQUEUE_SCENEFILE = helper.replace_stdpath_with_driveletter(DRQUEUE_SCENEFILE, 'n:')
DRQUEUE_RENDERDIR = helper.replace_stdpath_with_driveletter(DRQUEUE_RENDERDIR, 'n:')
if ("DRQUEUE_IMAGEFILE" in globals()) and (DRQUEUE_IMAGEFILE != ""):
image_args = "-im " + DRQUEUE_IMAGEFILE
else:
image_args = ""
if ("DRQUEUE_CAMERA" in globals()) and (DRQUEUE_CAMERA != ""):
camera_args = "-cam " + DRQUEUE_CAMERA
else:
camera_args=""
if ("DRQUEUE_RESX" in globals()) and ("DRQUEUE_RESX" in globals()) and (int(DRQUEUE_RESX) > 0) and (int(DRQUEUE_RESY) > 0):
res_args = "-x " + DRQUEUE_RESX + " -y " + DRQUEUE_RESY
else:
res_args = ""
if ("DRQUEUE_FILEFORMAT" in globals()) and (DRQUEUE_FILEFORMAT != ""):
format_args = "-of " + DRQUEUE_FILEFORMAT
else:
format_args = ""
if ("DRQUEUE_RENDERDIR" in globals()) and (DRQUEUE_RENDERDIR != ""):
os.chdir(DRQUEUE_RENDERDIR)
# extra stuff for rendering single images in a couple of parts
if DRQUEUE_RENDERTYPE == "single image":
# calculate parts to render
for line in open(DRQUEUE_SCENEFILE):
if "resolution" in line:
res_arr = line.split()
if res_arr[0] == "resolution":
scene_height = res_arr[2]
scene_width = res_arr[1]
part_height = scene_height / (DRQUEUE_ENDFRAME + 1)
height_high = scene_height - (DRQUEUE_FRAME * part_height)
height_low = height_high - part_height
print("rendering dimensions: 0 " + height_low + " " + scene_width + " " + height_high)
# generate frame filename
for line in open(DRQUEUE_SCENEFILE):
if "resolution" in line:
if "." in line:
res_arr = line.split()
outputname = string.replace(res_arr[3], "\"", "")
basename, extension = os.path.splitext(outputname)
framename = basename + "_" + string.zfill(DRQUEUE_FRAME, 4) + "." + extension
command = engine_path + " -window 0 " + str(height_low) + " " + str(scene_width) + " " + str(height_high) + " " + DRQUEUE_SCENEFILE + " -file_name " + framename
else:
command = engine_path + " " + DRQUEUE_SCENEFILE + " -render " + str(DRQUEUE_FRAME) + " " + str(block)
# log command line
helper.log_write(command + "\n")
# check scenefile
helper.check_scenefile(DRQUEUE_SCENEFILE)
# run renderer and wait for finish
ret = helper.run_command(command)
# return exit status to IPython
return helper.return_to_ipython(ret)
| gpl-3.0 | 5,246,072,343,090,412,000 | 32.070796 | 169 | 0.619213 | false | 3.4506 | false | false | false |
tjtrebat/algorithms | algorithms/sorting/quicksort/quicksort.py | 1 | 2252 | """
quicksort.py -- sorts an array of integers using the Quicksort sorting algorithm. When executed, uses a randomized
version of quicksort to obtain a good average-case performance over all inputs.
"""
__author__ = 'Tom'
import random
def sort(data, p, r):
""" Sorts the data from p to r
Attributes:
data -- an array of elements
p -- a starting index
r -- an end index
"""
if p < r:
q = partition(data, p, r)
sort(data, p, q - 1)
sort(data, q + 1, r)
return data
def partition(data, p, r):
""" Partitions the subarray data[p..r] around a pivot element, data[r], moving it into its place in
the array.
Attributes:
data -- an array of elements
p -- a starting index
r -- an end index
"""
x = data[r]
i = p - 1
for j in range(p, r):
if data[j] <= x:
i += 1
data[i], data[j] = data[j], data[i]
data[i + 1], data[r] = data[r], data[i + 1]
return i + 1
def randomized_sort(data, p, r):
""" Sorts the data from p to r using a randomized partition.
"""
if p < r:
q = randomized_partition(data, p, r)
randomized_sort(data, p, q - 1)
randomized_sort(data, q + 1, r)
return data
def randomized_partition(data, p, r):
""" Partitions the subarray data[p..r] around a randomly-chosen pivot element.
"""
i = random.randint(p, r)
data[r], data[i] = data[i], data[r]
return partition(data, p, r)
if __name__ == "__main__":
import argparse
# create the top-level parser
parser = argparse.ArgumentParser(description='Sort integers using quicksort')
# add arguments
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='an integer for the sort')
parser.add_argument('--begin', metavar='P', type=int, default=0, help='an integer for the start index')
parser.add_argument('--end', metavar='R', type=int, help='an integer for the end index')
# parse arguments
args = parser.parse_args()
# populates end index if it is None
if args.end is None:
args.end = len(args.integers) - 1
# print sorted array
print randomized_sort(args.integers, args.begin, args.end) | gpl-2.0 | 5,903,304,838,772,438,000 | 29.445946 | 114 | 0.596803 | false | 3.491473 | false | false | false |
1flow/1flow | oneflow/core/admin/language.py | 2 | 2912 | # -*- coding: utf-8 -*-
u"""
Copyright 2013-2014 Olivier Cortès <[email protected]>.
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
import logging
from django.contrib import admin
# from django.utils.translation import ugettext_lazy as _
# from django_object_actions import DjangoObjectActions
# from ..models.common import DUPLICATE_STATUS
from ..models.reldb import ( # NOQA
Language,
abstract_replace_duplicate_task
)
LOGGER = logging.getLogger(__name__)
class LanguageAdmin(admin.ModelAdmin):
""" Language admin class. """
list_display = (
'id', 'name', 'dj_code',
'iso639_1', 'iso639_2', 'iso639_3',
'parent',
'duplicate_of',
'duplicate_status',
)
list_display_links = ('id', 'name', )
list_filter = ('parent', 'duplicate_status', )
ordering = ('dj_code', )
change_list_template = "admin/change_list_filter_sidebar.html"
change_list_filter_template = "admin/filter_listing.html"
search_fields = ('name', 'dj_code', 'iso639_1', 'iso639_2', 'iso639_3', )
# def get_object_actions(self, request, context, **kwargs):
# """ Get object actions. """
# objectactions = []
# # Actions cannot be applied to new objects (i.e. Using "add" new obj)
# if 'original' in context:
# # The obj to perform checks against to determine
# # object actions you want to support.
# obj = context['original']
# LOGGER.warning('YALLAAA')
# if obj.duplicate_of:
# if obj.duplicate_status in (DUPLICATE_STATUS.NOT_REPLACED,
# DUPLICATE_STATUS.FAILED, ):
# objectactions.append('replace_duplicate_again')
# return objectactions
# def replace_duplicate_again(self, request, obj):
# """ Re-run the replace_duplicate() task. """
# abstract_replace_duplicate_task.delay(obj._meta.app_label,
# obj._meta.object_name,
# obj.id,
# obj.duplicate_of.id)
# replace_duplicate_again.label = _(u'Replace again')
# replace_duplicate_again.short_description = \
# _(u'Re-run the replace_duplicate() task.')
| agpl-3.0 | -5,439,481,210,857,402,000 | 32.848837 | 79 | 0.620749 | false | 3.83531 | false | false | false |
mrts/foodbank-campaign | src/foodbank/settings.py | 1 | 4616 | """
Django settings for foodbank project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import platform
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# -> this *MUST BE OVERRIDEN* with settings_local in production
SECRET_KEY = 'e6#83hi)*reeq2lk1v9y59u(z@i7(wto-ter#q&3ii8f6t8n2x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'127.0.0.1',
'localhost',
'osale.toidupank.ee',
'test-osale.toidupank.ee',
'uus-osale.toidupank.ee',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tinymce',
'nested_admin',
'locations',
'campaigns',
'coordinators',
'volunteers',
'auditlog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'foodbank.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'foodbank.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'et-ee'
TIME_ZONE = 'Europe/Tallinn'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
HTDOCS = os.path.join(BASE_DIR, '..', 'htdocs')
STATIC_URL = '/static/media/'
STATIC_ROOT = os.path.join(HTDOCS, 'static', 'media')
MEDIA_URL = '/static/uploads/'
MEDIA_ROOT = os.path.join(HTDOCS, 'static', 'uploads')
# Rich-text editor
TINYMCE_DEFAULT_CONFIG = {
'plugins': 'link lists code',
'menubar': 'edit format',
'toolbar': 'undo redo | styleselect | bold italic | removeformat | link | bullist numlist | code',
'width': 500,
'height': 400,
}
# Fix Estonian date formatting
FORMAT_MODULE_PATH = 'foodbank.formats'
EMAIL_BACKEND = 'django_sendmail_backend.backends.EmailBackend'
# For testing email:
# EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
# EMAIL_FILE_PATH = '/tmp/toidupank-email-messages'
ADMIN_URL_PREFIX = 'haldus/'
# Mandatory settings override in production environment
PRODUCTION_HOSTNAME = 'atria.elkdata.ee'
IS_PRODUCTION_ENV = False
if platform.node() == PRODUCTION_HOSTNAME and 'live-osale' in BASE_DIR:
IS_PRODUCTION_ENV = True
from .settings_live import *
else:
# Optional local settings override, especially useful during development
try:
from .settings_local import *
except ImportError:
pass
| mit | 7,900,825,950,640,479,000 | 25.528736 | 102 | 0.686092 | false | 3.364431 | false | false | false |
chiara-paci/dinosaurus | dinosaurus_lib/config.py | 1 | 4742 | # -*- coding: utf-8 -*-
import datetime
PAGINATION=250
PRIMARY_MASTER="rvvmdns03pl.server.intra.rve."
EMAIL_ADMIN="nsmaster.regione.veneto.it."
BASE_DIR="/home/chiara/dinosaurus"
VERSION_FILE="/home/chiara/dinosaurus/VERSION"
fd=open(VERSION_FILE,'r')
VERSION="".join(fd.readlines()).strip()
fd.close()
THEMES_DIR=BASE_DIR+"/share/themes"
TEMPLATES_HTML=BASE_DIR+"/var/templates/html"
TEMPLATES_CSS=BASE_DIR+"/var/templates/css"
TEMPLATES_IMG=BASE_DIR+"/var/templates/img"
TEMPLATES_JS=BASE_DIR+"/var/templates/js"
TEMPLATES_FONTS=BASE_DIR+"/var/templates/fonts"
t=datetime.datetime.today()
SERIAL_NUMBER="%4.4d%2.2d%2.2d%2.2d" % (t.year,t.month,t.day,00)
HTML_BASE_VAR = { "VERSION": VERSION,
"TIMESTAMP": t.strftime("%A, %d %B %Y - %H:%M") }
DEFAULTS={
"ttl": [86400],
"nx_ttl": [3600],
"refresh": [86400,900],
"retry": [1800,600],
"expiry": [2592000,86400],
}
def get_non_default(data,defaults):
""" Ritorna gli elementi che non hanno un valore di default, o il primo dei valori di default.
:param data: Array di valori. Gli elementi uguali a "_" vengono ignorati.
:param defaults: Array di valori di default.
:return: * Se nessun elemento di data ha un valore non di default, ritorna il primo elemento di defaults.
* Se un solo elemento ha un valore non di default, ritorna quell'elemento.
* Altrimenti ritorna un array con gli elementi non di default.
"""
defaults=map(str,defaults)
if type(data)!=list:
return str(data)
L=filter(lambda x: x!="_",data)
L=filter(lambda x: x not in defaults,L)
if len(L)==0: return defaults[0]
if len(L)==1: return L[0]
return L
def max_or_default(data,defaults):
""" Ritorna il maggiore degli elementi che non hanno un valore di default, o il primo dei valori di default.
:param data: Array di valori. Gli elementi uguali a "_" vengono ignorati.
:param defaults: Array di valori di default.
:return: * Se nessun elemento di data ha un valore non di default, ritorna il primo elemento di defaults.
* Altrimenti ritorna il maggiore degli elementi non di default, trasfromato in stringa.
"""
x=get_non_default(data,defaults)
if not x: return defaults[0]
if type(x)!=list: return str(x)
return str( max(map(int,x)) )
def min_or_default(data,defaults):
""" Ritorna il minore degli elementi che non hanno un valore di default, o il primo dei valori di default.
:param data: Array di valori. Gli elementi uguali a "_" vengono ignorati.
:param defaults: Array di valori di default.
:return: * Se nessun elemento di data ha un valore non di default, ritorna il primo elemento di defaults.
* Altrimenti ritorna il minore degli elementi non di default, trasformato in stringa.
"""
x=get_non_default(data,defaults)
if not x: return defaults[0]
if type(x)!=list: return str(x)
return str( min(map(int,x)) )
class GetSequence(object):
""" Oggetti callable che generano numeri in sequenza. """
def __init__(self):
self.val=-1
def __call__(self):
self.val+=1
return self.val
def ip_cmp(x,y):
"""
Confronto tra due ip.
:param x: ip
:param y: ip
:return: * 1 se x>y;
* 0 se x==y;
* -1 se x<y.
"""
if x==y: return 0
if y=="::1": return 1
if x=="::1": return -1
try:
x_t=map(int,x.split(".")[:4])
except ValueError, e:
return -1
try:
y_t=map(int,y.split(".")[:4])
except ValueError, e:
return 1
if (x_t[0]==127) and (y_t[0]!=127):
return -1
if (x_t[0]!=127) and (y_t[0]==127):
return 1
if (x_t[0] in [ 127,0,10 ]) and (y_t[0] not in [ 127,0,10 ]):
return -1
if (x_t[0] not in [ 127,0,10 ]) and (y_t[0] in [ 127,0,10 ]):
return 1
if (x_t[0]==172) and (x_t[1] in range(16,32)):
if (y_t[0]!=172): return -1
if (y_t[1] not in range(16,32)): return -1
if (y_t[0]==172) and (y_t[1] in range(16,32)):
if (x_t[0]!=172): return 1
if (x_t[1] not in range(16,32)): return 1
if (x_t[0]==192) and (x_t[1]==168):
if (y_t[0]!=192): return -1
if (y_t[1]!=168): return -1
if (y_t[0]==192) and (y_t[1]==168):
if (x_t[0]!=192): return 1
if (x_t[1]!=168): return 1
if (x_t[0]==169) and (x_t[1]==254):
if (y_t[0]!=169): return -1
if (y_t[1]!=254): return -1
if (y_t[0]==169) and (y_t[1]==254):
if (x_t[0]!=169): return 1
if (x_t[1]!=254): return 1
for n in range(0,4):
if x_t[n]<y_t[n]: return -1
if x_t[n]>y_t[n]: return 1
return 0
| gpl-3.0 | 4,065,850,318,391,263,000 | 30.825503 | 113 | 0.589836 | false | 2.76986 | false | false | false |
tuxite/pharmaship | pharmaship/inventory/export.py | 1 | 16202 | # -*- coding: utf-8; -*-
"""Export methods for Inventory application."""
import tarfile
import time
import io
import hashlib
from pathlib import PurePath
from yaml import dump
try:
from yaml import CDumper as Dumper
except ImportError:
from yaml import Dumper
from django.core import serializers
from django.conf import settings
from django.utils.text import slugify
from rest_framework.renderers import JSONRenderer
import pharmaship.inventory.models as models
import pharmaship.inventory.serializers
from pharmaship.core.utils import remove_yaml_pk, get_content_types
from pharmaship.core.utils import log, query_count_all
def serialize_allowance(allowance, content_types):
"""Export an allowance using the YAML format.
To have an usable export, the user needs:
- the :mod:`pharmaship.inventory.models.Allowance` selected instance,
And related to this instance:
- the :mod:`pharmaship.inventory.models.Molecule` objects list,
- the :mod:`pharmaship.inventory.models.Equipment` objects list,
- the :mod:`pharmaship.inventory.models.MoleculeReqQty` objects list,
- the :mod:`pharmaship.inventory.models.EquipmentReqQty` objects list,
- the :mod:`pharmaship.inventory.models.RescueBagReqQty` objects list,
- the :mod:`pharmaship.inventory.models.FirstAidKitReqQty` objects list,
- the :mod:`pharmaship.inventory.models.TelemedicalReqQty` objects list,
- the :mod:`pharmaship.inventory.models.LaboratoryReqQty` objects list.
This function grabs all these together in a list of tuples::
[('filename.yaml', <yaml content string>)]
In addition, it returns the Equipment and Molecule lists.
:param pharmaship.inventory.models.Allowance allowance: Allowance to \
serialize.
:return: List of tuples filenames and streams
:rtype: tuple(list(tuple(str, str)), django.db.models.query.QuerySet, \
django.db.models.query.QuerySet)
"""
log.debug("Start serialize")
renderer = JSONRenderer()
# Molecules used by the allowance
molecule_id_list = []
equipment_id_list = []
# Required quantities for molecules
molecule_reqqty_list = models.MoleculeReqQty.objects.filter(allowance__in=[allowance]).prefetch_related("base")
molecule_id_list += molecule_reqqty_list.values_list("base_id", flat=True)
serialized = pharmaship.inventory.serializers.MoleculeReqQtySerializer(molecule_reqqty_list, many=True)
molecule_reqqty_data = renderer.render(
data=serialized.data,
accepted_media_type='application/json; indent=2'
)
query_count_all()
# Required quantities for equipments
equipment_reqqty_list = models.EquipmentReqQty.objects.filter(allowance__in=[allowance]).prefetch_related("base")
equipment_id_list += equipment_reqqty_list.values_list("base_id", flat=True)
serialized = pharmaship.inventory.serializers.EquipmentReqQtySerializer(equipment_reqqty_list, many=True)
equipment_reqqty_data = renderer.render(
data=serialized.data,
accepted_media_type='application/json; indent=2'
)
query_count_all()
# Required quantities for Laboratory
laboratory_reqqty_list = models.LaboratoryReqQty.objects.filter(allowance__in=[allowance]).prefetch_related("base")
equipment_id_list += laboratory_reqqty_list.values_list("base_id", flat=True)
serialized = pharmaship.inventory.serializers.LaboratoryReqQtySerializer(laboratory_reqqty_list, many=True)
laboratory_reqqty_data = renderer.render(
data=serialized.data,
accepted_media_type='application/json; indent=2'
)
query_count_all()
# Required quantities for Telemedical
telemedical_reqqty_list = models.TelemedicalReqQty.objects.filter(allowance__in=[allowance]).prefetch_related("base")
equipment_id_list += telemedical_reqqty_list.values_list("base_id", flat=True)
serialized = pharmaship.inventory.serializers.TelemedicalReqQtySerializer(telemedical_reqqty_list, many=True)
telemedical_reqqty_data = renderer.render(
data=serialized.data,
accepted_media_type='application/json; indent=2'
)
query_count_all()
# Required quantities for First Aid Kit
first_aid_kit_reqqty_list = models.FirstAidKitReqQty.objects.filter(allowance__in=[allowance]).prefetch_related("base")
molecule_id_list += first_aid_kit_reqqty_list.filter(
content_type_id=content_types["molecule"]
).values_list("object_id", flat=True)
equipment_id_list += first_aid_kit_reqqty_list.filter(
content_type_id=content_types["equipment"]
).values_list("object_id", flat=True)
serialized = pharmaship.inventory.serializers.FirstAidKitReqQtySerializer(first_aid_kit_reqqty_list, many=True)
first_aid_kit_reqqty_data = renderer.render(
data=serialized.data,
accepted_media_type='application/json; indent=2'
)
query_count_all()
# Required quantities for Rescue Bag
rescue_bag_reqqty_list = models.RescueBagReqQty.objects.filter(allowance__in=[allowance]).prefetch_related("base")
molecule_id_list += rescue_bag_reqqty_list.filter(
content_type_id=content_types["molecule"]
).values_list("object_id", flat=True)
equipment_id_list += rescue_bag_reqqty_list.filter(
content_type_id=content_types["equipment"]
).values_list("object_id", flat=True)
serialized = pharmaship.inventory.serializers.RescueBagReqQtySerializer(rescue_bag_reqqty_list, many=True)
rescue_bag_reqqty_data = renderer.render(
data=serialized.data,
accepted_media_type='application/json; indent=2'
)
query_count_all()
# Equipment used by the allowance
equipment_list = models.Equipment.objects.filter(id__in=equipment_id_list).prefetch_related("group")
equipment_data = serializers.serialize(
"yaml",
equipment_list,
use_natural_foreign_keys=True,
fields=(
"name_en",
"packaging_en",
"remark_en",
"consumable",
"perishable",
"picture",
"group",
)
)
log.debug("Equipment")
query_count_all()
# Molecule used by the allowance
molecule_list = models.Molecule.objects.filter(id__in=molecule_id_list).prefetch_related("group")
molecule_data = serializers.serialize(
"yaml",
molecule_list,
use_natural_foreign_keys=True,
fields=(
"name_en",
"composition_en",
"remark_en",
"roa",
"dosage_form",
"medicine_list",
"group",
)
)
log.debug("Molecule")
query_count_all()
# Allowance record
allowance_data = serializers.serialize(
"yaml",
(allowance,),
fields=('name', 'author', 'version', 'date', 'additional'),
use_natural_foreign_keys=True
)
log.debug("Allowance")
query_count_all()
log.debug("End serialize")
# Returning a list with tuples: (filename, data)
return ([
('inventory/molecule_obj.yaml', remove_yaml_pk(molecule_data)),
('inventory/equipment_obj.yaml', remove_yaml_pk(equipment_data)),
('inventory/molecule_reqqty.json', molecule_reqqty_data),
('inventory/equipment_reqqty.json', equipment_reqqty_data),
('inventory/laboratory_reqqty.json', laboratory_reqqty_data),
('inventory/telemedical_reqqty.json', telemedical_reqqty_data),
('inventory/first_aid_kit_reqqty.json', first_aid_kit_reqqty_data),
('inventory/rescue_bag_reqqty.json', rescue_bag_reqqty_data),
('inventory/allowance.yaml', remove_yaml_pk(allowance_data)),
], equipment_list, molecule_list)
def get_pictures(equipment_list):
"""Return a list of picture paths to include in the archive.
:param equipment_list: List of equipment for serialized allowance.
:type equipment_list: django.db.models.query.QuerySet
:return: List of pictures filenames.
:rtype: list
"""
# Pictures attached to equipments
pictures = equipment_list.exclude(picture='').values_list(
'picture', flat=True)
return pictures
def get_hash(name, content=None, filename=None):
"""Return sha256 hash and filename for MANIFEST file.
:param str name: Name of the file to hash.
:param content: Content of the file to hash.
:type content: bytes or str
:param str filename: Path to the file to hash.
:return: Name and file hash in hexadecimal string.
:rtype: tuple(str, str)
"""
if content is None and filename is None:
return None
m = hashlib.sha256()
if content:
if isinstance(content, bytes):
m.update(content)
else:
m.update(bytes(content, "utf-8"))
elif filename:
try:
with open(filename, 'rb') as fdesc:
m.update(fdesc.read())
except IOError as error:
log.error("File %s not readable. %s", filename, error)
return None
return (name, m.hexdigest())
def create_tarinfo(name, content):
"""Return a the TarInfo for a virtual file.
:param str name: Name of the file
:param content: Content of the file to add to the tar file.
:type content: bytes or str
:return: :class:`tarfile.TarInfo` and :class:`io.BytesIO` instance of the
file content.
:rtype: tuple
"""
if isinstance(content, bytes):
f = io.BytesIO(content)
else:
f = io.BytesIO(bytes(content, "utf-8"))
info = tarfile.TarInfo()
info.name = name
info.type = tarfile.REGTYPE
info.uid = info.gid = 0
info.uname = info.gname = "root"
info.mtime = time.time()
info.size = len(f.getvalue())
return (info, f)
def create_manifest(items):
"""Create the data to write into the MANIFEST file.
:param list(tuple) items: list of files with their hash.
:return: Formatted string
:rtype: str
"""
content = ""
for item in items:
content += "{1} {0}\n".format(item[0], item[1])
return content
def create_package_yaml(allowance):
"""Export package info in YAML string.
:param allowance: Allowance instance to export
:type allowance: pharmaship.inventory.models.Allowance
:return: YAML string containing Allowance data.
:rtype: str
"""
content = {
"info": {
"author": allowance.author,
"date": allowance.date,
"version": allowance.version
},
"modules": {
"inventory": {
"install_file": False
}
}
}
content_string = dump(content, Dumper=Dumper)
return content_string
def create_pot(allowance):
"""Create of PO template file for Equipment & Molecule strings."""
# Get serialized Allowance data
content_types = get_content_types()
_data, equipment_list, molecule_list = serialize_allowance(allowance, content_types)
strings = []
for item in equipment_list:
strings.append(item.name)
strings.append(item.packaging)
strings.append(item.remark)
for item in molecule_list:
strings.append(item.name)
strings.append(item.remark)
# Remove empty strings
strings = list(filter(None, strings))
# Remove duplicates
strings = list(set(strings))
# Sort for easier translation
strings.sort()
# Create POT file
result = """msgid ""
msgstr ""
"Project-Id-Version: Pharmaship export\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=UTF-8\\n"
"Content-Transfer-Encoding: 8bit\\n"\n
"""
for item in strings:
result += "msgid \"{0}\"\n".format(item.replace("\"", "\\\""))
result += "msgstr \"\"\n\n"
return result
def create_po(allowance, lang_code):
# Get serialized Allowance data
content_types = get_content_types()
_data, equipment_list, molecule_list = serialize_allowance(allowance, content_types)
strings = {}
for item in equipment_list:
strings[item.name_en] = getattr(item, "name_{0}".format(lang_code))
strings[item.packaging_en] = getattr(item, "packaging_{0}".format(lang_code))
strings[item.remark_en] = getattr(item, "remark_{0}".format(lang_code))
for item in molecule_list:
strings[item.name_en] = getattr(item, "name_{0}".format(lang_code))
strings[item.composition_en] = getattr(item, "composition_{0}".format(lang_code))
strings[item.remark_en] = getattr(item, "remark_{0}".format(lang_code))
# Create PO file
result = """msgid ""
msgstr ""
"Project-Id-Version: Pharmaship export\\n"
"MIME-Version: 1.0\\n"
"Language: {0}\\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\\n"
"Content-Type: text/plain; charset=UTF-8\\n"
"Content-Transfer-Encoding: 8bit\\n"\n
""".format(lang_code.lower())
for item in strings:
if not item:
continue
result += "msgid \"{0}\"\n".format(item.replace("\"", "\\\""))
result += "msgstr \"{0}\"\n\n".format(strings[item].replace("\"", "\\\""))
return result
def create_archive(allowance, file_obj):
"""Create an archive from the given `Allowance` instance.
The response is a tar.gz file containing YAML files generated by the
function `serialize_allowance`.
Pictures are added if any.
The package description file (``package.yaml``) and the ``MANIFEST`` file
are created at the end.
:param allowance: Allowance instance to export
:type allowance: pharmaship.inventory.models.Allowance
:param file_obj: Destination file object
:type file_obj: argparse.FileType or any compatible file object
:return: ``True`` if success
:rtype: bool
"""
# Creating a tar.gz archive
hashes = []
serialized_data, equipment_list, molecule_list = serialize_allowance(
allowance=allowance,
content_types=get_content_types()
)
with tarfile.open(fileobj=file_obj, mode='w') as tar:
# Processing the database
for item in serialized_data:
info, f = create_tarinfo(item[0], item[1])
tar.addfile(info, f)
hashes.append(get_hash(info.name, content=item[1]))
# Adding the pictures of Equipment
for item in get_pictures(equipment_list):
picture_filename = settings.PICTURES_FOLDER / item
log.debug(picture_filename)
try:
tar.add(picture_filename, arcname=PurePath("pictures", item))
# TODO: Detail Exception
except Exception as error:
log.error("Error: %s", error)
hashes.append(
get_hash(PurePath("pictures", item), filename=picture_filename)
)
# Adding the translation files if any
# TODO: Generate MO if only PO is found...
mo_filename = "{0}.mo".format(slugify(allowance.name))
for item in settings.TRANSLATIONS_FOLDER.glob("*/LC_MESSAGES/{0}".format(mo_filename)):
log.debug(item)
relative_path = PurePath("locale", item.relative_to(settings.TRANSLATIONS_FOLDER))
tar.add(item, arcname=relative_path)
hashes.append(get_hash(relative_path, filename=item))
# Try to get also the PO file
po_filename = item.with_suffix(".po")
if po_filename.exists():
log.debug(po_filename)
relative_path = PurePath("locale", po_filename.relative_to(settings.TRANSLATIONS_FOLDER))
tar.add(po_filename, arcname=relative_path)
hashes.append(get_hash(relative_path, filename=po_filename))
# Add the package description file
package_content = create_package_yaml(allowance)
info, f = create_tarinfo("package.yaml", package_content)
tar.addfile(info, f)
hashes.append(get_hash("package.yaml", content=package_content))
# Add the MANIFEST
manifest_content = create_manifest(hashes)
info, f = create_tarinfo("MANIFEST", manifest_content)
tar.addfile(info, f)
return True
| agpl-3.0 | 3,850,545,553,416,638,500 | 33.2537 | 123 | 0.650846 | false | 3.685623 | false | false | false |
RedHatInsights/insights-core | insights/parsers/iptables.py | 1 | 8316 | """
IPTables configuration
======================
Module for processing output of the ``iptables-save`` and ``ip6tables-save``
commands. Parsers included are:
IPTables - command ``iptables-save``
------------------------------------
IP6Tables - command ``ip6tables-save``
--------------------------------------
IPTabPermanent - file ``/etc/sysconfig/iptables``
-------------------------------------------------
IP6TabPermanent - file ``/etc/sysconfig/ip6tables``
---------------------------------------------------
Sample input data looks like::
# Generated by iptables-save v1.4.7 on Tue Aug 16 10:18:43 2016
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [769:196899]
:REJECT-LOG - [0:0]
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -s 192.168.0.0/24 -j ACCEPT
-A INPUT -p icmp -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A REJECT-LOG -p tcp -j REJECT --reject-with tcp-reset
COMMIT
# Completed on Tue Aug 16 10:18:43 2016
# Generated by iptables-save v1.4.7 on Tue Aug 16 10:18:43 2016
*mangle
:PREROUTING ACCEPT [451:22060]
:INPUT ACCEPT [451:22060]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [594:47151]
:POSTROUTING ACCEPT [594:47151]
COMMIT
# Completed on Tue Aug 16 10:18:43 2016
# Generated by iptables-save v1.4.7 on Tue Aug 16 10:18:43 2016
*nat
:PREROUTING ACCEPT [0:0]
:POSTROUTING ACCEPT [3:450]
:OUTPUT ACCEPT [3:450]
COMMIT
# Completed on Tue Aug 16 10:18:43 2016
* Each table of iptables starts with a ``# Generated by ...`` line.
* Each table starts with ``*<table-name>``, for example ``*filter``.
* Each chain specifications starts with a ``:`` sign.
* A chain specification looks like ``:<chain-name> <chain-policy> [<packet-counter>:<byte-counter>]``
* The chain-name may be for example ``INPUT``.
* Each ``iptables`` rule starts with a `-` sign.
Examples:
>>> ipt.rules[0] == {'target': 'ACCEPT', 'chain': 'INPUT', 'rule': '-m state --state RELATED,ESTABLISHED -j ACCEPT', 'table': 'filter', 'target_options': None, 'target_action': 'jump', 'constraints': '-m state --state RELATED,ESTABLISHED'}
True
>>> ipt.get_chain('INPUT')[1] == {'target': 'ACCEPT', 'chain': 'INPUT', 'rule': '-s 192.168.0.0/24 -j ACCEPT', 'table': 'filter', 'target_options': None, 'target_action': 'jump', 'constraints': '-s 192.168.0.0/24'}
True
>>> ipt.table_chains('mangle') == {'FORWARD': [], 'INPUT': [], 'POSTROUTING': [], 'PREROUTING': [], 'OUTPUT': []}
True
>>> ipt.get_table('nat')[-1] == {'policy': 'ACCEPT', 'table': 'nat', 'byte_counter': 450, 'name': 'OUTPUT', 'packet_counter': 3}
True
"""
from .. import Parser, parser, get_active_lines, CommandParser
from insights.specs import Specs
class IPTablesConfiguration(Parser):
"""
A general class for parsing iptables configuration in the
``iptables-save``-like format.
"""
def parse_content(self, content):
self.chains = []
self.rules = []
current_table = None
for line in get_active_lines(content):
if line.startswith("*"):
current_table = line[1:].strip()
elif line.startswith(":"):
name, policy, counter = line[1:].split()
packet_counter, byte_counter = counter.strip("[]").split(":")
self.chains.append({
"policy": policy if policy != "-" else None,
"table": current_table,
"name": name,
"packet_counter": int(packet_counter),
"byte_counter": int(byte_counter),
})
elif line.startswith("-"):
line_spl = line[3:].split(None, 1)
if not line_spl:
continue
chain_name = line_spl[0]
rule = line_spl[1] if len(line_spl) == 2 else ''
target_option = [i for i in (' -j', '-j ', ' -g', '-g ') if i in rule]
if target_option:
constraints, target = [i.strip() for i in rule.split(target_option[-1])]
if " " in target:
target, target_options = target.split(None, 1)
else:
target_options = None
self.rules.append({
"table": current_table,
"chain": chain_name,
"rule": rule,
"target_action": "jump" if target_option[-1].strip() == "-j" else "goto",
"constraints": constraints,
"target": target,
"target_options": target_options
})
else:
self.rules.append({
"table": current_table,
"chain": chain_name,
"rule": rule
})
def get_chain(self, name, table="filter"):
"""
Get the list of rules for a particular chain. Chain order is kept intact.
Args:
name (str): chain name, e.g. ``
table (str): table name, defaults to ``filter``
Returns:
list: rules
"""
return [r for r in self.rules if r["table"] == table and r["chain"] == name]
def get_table(self, name="filter"):
"""
Get the list of chains for a particular table.
Args:
name (str): table name, defaults to ``filter``
Returns:
list: chains
"""
return [c for c in self.chains if c["table"] == name]
def table_chains(self, table="filter"):
"""
Get a dict where the keys are all the chains for the given table
and each value is the set of rules defined for the given chain.
Args:
table (str): table name, defaults to ``filter``
Returns:
dict: chains with set of defined rules
"""
return dict((c["name"], self.get_chain(c["name"], table)) for c in self.get_table(table))
def get_rule(self, s):
"""
Get the list of rules that contain the given string.
Args:
s (str): string to look for in iptables rules
Returns:
list: rules containing given string
"""
return [r for r in self.rules if s in r["rule"]]
def __contains__(self, s):
return any(s in r["rule"] for r in self.rules)
@parser(Specs.iptables)
class IPTables(CommandParser, IPTablesConfiguration):
"""
Process output of the ``iptables-save`` command.
See the :py:class:`insights.parsers.iptables.IPTablesConfiguration` base
class for additional information.
"""
pass
@parser(Specs.ip6tables)
class IP6Tables(CommandParser, IPTablesConfiguration):
"""
Process output of the ``ip6tables-save`` command.
See the :py:class:`insights.parsers.iptables.IPTablesConfiguration` base
class for additional information.
"""
pass
@parser(Specs.iptables_permanent)
class IPTabPermanent(IPTablesConfiguration):
"""
Process ``iptables`` configuration saved in file ``/etc/sysconfig/iptables``.
The configuration in this file is loaded by the ``iptables`` service when the system boots.
New configuration is saved by using the ``service iptables save`` command. This configuration
file is not available on a system with ``firewalld`` service.
See the :py:class:`insights.parsers.iptables.IPTablesConfiguration` base
class for additional information.
"""
pass
@parser(Specs.ip6tables_permanent)
class IP6TabPermanent(IPTablesConfiguration):
"""
Process ``ip6tables`` configuration saved in file ``/etc/sysconfig/ip6tables``.
The configuration in this file is loaded by the ``ip6tables`` service when the system boots.
New configuration is saved by using the ``service ip6tables save`` command. This configuration
file is not available on a system with ``firewalld`` service.
See the :py:class:`insights.parsers.iptables.IPTablesConfiguration` base
class for additional information.
"""
pass
| apache-2.0 | -8,611,245,743,958,177,000 | 35.156522 | 243 | 0.567701 | false | 3.986577 | true | false | false |
GENI-NSF/gram | juno/install/Keystone.py | 1 | 5642 | #----------------------------------------------------------------------
# Copyright (c) 2013 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
from GenericInstaller import GenericInstaller
from gram.am.gram import config
class Keystone(GenericInstaller):
# Return a list of command strings for installing this component
def installCommands(self):
self.comment("*** Keystone Install ***")
self.add("rm -f /var/lib/keystone/keystone.db")
# Set the SQL connection in /etc/keystone/conf
self.comment("Step 2. Edit /etc/keystone/keystone.conf")
keystone_user = config.keystone_user
keystone_password = config.keystone_password
keystone_conf_filename = '/etc/keystone/keystone.conf'
saved_keystone_conf_filename = '/home/gram/gram/juno/install/control_files/keystone.conf'
os_password = config.os_password
os_region_name = config.os_region_name
service_token = config.service_token
backup_directory = config.backup_directory
connection_command = "connection = mysql:\/\/" + \
keystone_user + ":" + keystone_password + \
"@" + config.control_host + "\/keystone"
self.backup("/etc/keystone", backup_directory, "keystone.conf")
self.add("TMPTOKEN=`openssl rand -hex 10`")
self.add("cp " + saved_keystone_conf_filename + " " + keystone_conf_filename)
self.sed("s/^connection =.*/"+connection_command+"/",
keystone_conf_filename)
self.sed("s/^admin_token=.*/admin_token=${TMPTOKEN}/", keystone_conf_filename)
# Restart keystone and create the database tables
self.comment("Step 3. Restart Keystone and create DB tables")
self.add("su -s /bin/sh -c \"keystone-manage db_sync\" keystone")
self.add("service keystone restart")
self.add("sleep 5")
#Start a cron job that purges expired tokens hourly
cron_cmd = "(crontab -l -u keystone 2>&1 | grep -q token_flush) || " + \
"echo '@hourly /usr/bin/keystone-manage token_flush >/var/log/keystone/keystone-tokenflush.log 2>&1' >> /var/spool/cron/crontabs/keystone"
self.add(cron_cmd)
# Install data and enpoints
self.comment("Step 4. Download data script")
saved_data_script_filename = '/home/gram/gram/juno/install/control_files/keystone_basic.sh'
data_script_filename = 'keystone_basic.sh'
self.add("rm -f " + data_script_filename)
self.add("cp " + saved_data_script_filename + " " + data_script_filename)
self.sed("s/CONTROL_HOST=.*/CONTROL_HOST=" + config.control_host + "/",data_script_filename)
self.sed("s/OS_SERVICE_TOKEN=.*/OS_SERVICE_TOKEN=${TMPTOKEN}/", data_script_filename)
self.sed("s/OS_PASSWORD=.*/OS_PASSWORD=" + config.os_password + "/",data_script_filename)
self.sed("s/OS_EMAIL=.*/OS_EMAIL=" + config.control_email_addr + "/",data_script_filename)
self.sed("s/OS_SERVICE_PASSWORD=.*/OS_SERVICE_PASSWORD=" + config.service_password + "/",data_script_filename)
self.add("chmod a+x ./" + data_script_filename)
self.add("./" + data_script_filename)
# Create the novarc file
self.comment("Step 5. Create novarc file")
novarc_file = "/etc/novarc"
self.backup("/etc", backup_directory, "novarc")
self.writeToFile("export OS_TENANT_NAME=admin", novarc_file)
self.appendToFile("export OS_USERNAME=admin", novarc_file)
self.appendToFile("export OS_PASSWORD=" + config.os_password , novarc_file)
self.appendToFile("export OS_AUTH_URL=http://" + config.control_host + ":35357/v2.0", novarc_file)
#self.appendToFile("export OS_NO_CACHE=" + str(config.os_no_cache), novarc_file)
#self.appendToFile("export OS_REGION_NAME=" + config.os_region_name, novarc_file)
#self.appendToFile("export SERVICE_TOKEN=" + config.service_token, novarc_file)
#self.appendToFile("export SERVICE_ENDPOINT=" + config.service_endpoint, novarc_file)
self.add("sleep 5")
self.add("source " + novarc_file)
# Return a list of command strings for uninstalling this component
def uninstallCommands(self):
mysql_password = config.mysql_password
backup_directory = config.backup_directory
self.comment("*** Keystone Uninstall ***")
self.restore("/etc/keystone", backup_directory, "keystone.conf")
self.restore("/etc/keystone", backup_directory, "logging.conf")
self.restore("/etc", backup_directory, "novarc")
| mit | 3,796,973,718,651,089,400 | 52.733333 | 149 | 0.655441 | false | 3.801887 | true | false | false |
wasit7/visionmarker | alpha/visionmarker/app/migrations/0001_initial.py | 1 | 3783 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-21 19:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Batch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(auto_now_add=True)),
('updated_time', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('1', 'To do'), ('2', 'To be labelled'), ('3', 'To be reviewed'), ('4', 'Done')], default='1', max_length=1)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(auto_now_add=True)),
('message', models.CharField(max_length=20)),
('batch', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='app.Batch')),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('src_path', models.CharField(max_length=50)),
('raw_path', models.CharField(max_length=20)),
('batch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Batch')),
],
),
migrations.CreateModel(
name='Label',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('x', models.PositiveSmallIntegerField()),
('y', models.PositiveSmallIntegerField()),
('width', models.PositiveSmallIntegerField()),
('height', models.PositiveSmallIntegerField()),
('brand', models.CharField(max_length=20)),
('model', models.CharField(max_length=20)),
('color', models.CharField(max_length=10)),
('nickname', models.CharField(blank=True, max_length=20)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Image')),
],
),
migrations.CreateModel(
name='MyUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isreviewer', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='app.MyUser'),
),
migrations.AddField(
model_name='batch',
name='labeller',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='labeller', to='app.MyUser'),
),
migrations.AddField(
model_name='batch',
name='reviewer',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='reviewer', to='app.MyUser'),
),
]
| mit | 2,221,291,614,650,264,800 | 44.035714 | 163 | 0.567803 | false | 4.303754 | false | false | false |
edoburu/django-any-urlfield | setup.py | 1 | 2594 | #!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
import codecs
import os
import re
import sys
# When creating the sdist, make sure the django.mo file also exists:
if 'sdist' in sys.argv or 'develop' in sys.argv:
os.chdir('any_urlfield')
try:
from django.core import management
management.call_command('compilemessages', stdout=sys.stderr, verbosity=1)
except ImportError:
if 'sdist' in sys.argv:
raise
finally:
os.chdir('..')
def read(*parts):
file_path = path.join(path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name='django-any-urlfield',
version=find_version('any_urlfield', '__init__.py'),
license='Apache 2.0',
requires=[
'Django (>=1.8)',
],
description='An improved URL selector to choose between internal models and external URLs',
long_description=read('README.rst'),
author='Diederik van der Boor',
author_email='[email protected]',
url='https://github.com/edoburu/django-any-urlfield',
download_url='https://github.com/edoburu/django-any-urlfield/zipball/master',
packages=find_packages(exclude=('example*',)),
include_package_data=True,
test_suite = 'runtests',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Framework :: Django',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| apache-2.0 | 2,856,539,361,783,235,000 | 30.634146 | 95 | 0.615266 | false | 3.912519 | false | false | false |
Nrikolo/X8_AutoPilot | fsm/scripts/fsm_states.py | 1 | 15703 | #!/usr/bin/env python
#General Imports
import roslib; #roslib.load_manifest('smach_tutorials')
roslib.load_manifest('fsm')
import rospy
import time
import random
import math
import numpy
import smach
import smach_ros
#For dealing with msgs
from std_msgs.msg import Float32
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion
# The Listener object used in the FSM
#from ListenerClass import ListenerClass
#from beginner_tutorials.srv import *
from Utility import *
from quadrotor_input.srv import *
# define state MANUAL
class MANUAL(smach.State):
def __init__(self,flightStatus):
smach.State.__init__(self, outcomes=['Finish',
'Monitor',
'TOAUTONOMOUS'])
self.flightStatus = flightStatus
def execute(self, userdata):
## rospy.loginfo('Executing state MANUAL')
rospy.sleep(self.flightStatus.sleepTime)
## print "\nstable:", self.flightStatus.getStable()
if ( self.flightStatus.IsBatteryOK() ) and ( self.flightStatus.listener.AutoPilotSwitch == True ) and self.flightStatus.getStable() == True :
print ("AutoPilot switch is ON, there is enough battery ---->>> Transfering control to PC ")
return 'TOAUTONOMOUS'
if self.flightStatus.IsTimeExceeded() :
print ("Mission Duration Exceeded - Finish")
return 'Finish'
else:
return 'Monitor'
class AUTONOMOUS_INIT(smach.State):
def __init__(self,flightStatus):
smach.State.__init__(self,outcomes=['ToIdle',
'ToHover',
'ToLand',
'Failure'])
## input_keys=['AutoInit_mission_stage_in'],
## output_keys=['AutoInit_mission_stage_out'])
self.flightStatus = flightStatus
def execute(self, userdata):
## rospy.loginfo('Executing state AUTONOMOUS_INIT')
rospy.sleep(self.flightStatus.sleepTime)
if self.flightStatus.IsBatteryOK() and self.flightStatus.listener.AutoPilotSwitch == True :
z = self.flightStatus.getCurrentAltitude()
if z - self.flightStatus.getSafeAltitude() > self.flightStatus.tolerance: #Safe
print ("Vehicle above minimal safe altitude - goto HOVER")
return 'ToHover'
elif z - self.flightStatus.getGroundLevel() < self.flightStatus.tolerance: #On the GROUND
print ("Vehicle seems to be still on the ground - goto IDLE")
return 'ToIdle'
else :
print ("Vehicle in intermediate altitude - goto LAND")
return 'ToLand' #Intermediate altitude - LAND!
else:
return 'Failure'
# define state TAKEOFF
class TAKEOFF(smach.State):
def __init__(self,flightStatus):
smach.State.__init__(self, outcomes=['Success',
'Aborted_NoBatt',
'Aborted_Diverge',
'Maintain'])
## input_keys = ['TakeOff_mission_stage_in'],
## output_keys = ['TakeOff_mission_stage_out'])
self.flightStatus = flightStatus
def execute(self, userdata):
## rospy.loginfo('Executing state TAKEOFF')
rospy.sleep(self.flightStatus.sleepTime)
if ( self.flightStatus.listener.AutoPilotSwitch == False ) or ( self.flightStatus.PositionErrorDiverge()== True ):
print ("Either pilot wants control back or vehicle is unstable - goto MANUAL")
return 'Aborted_Diverge'
elif (self.flightStatus.listener.MissionGoSwitch == False ) or ( self.flightStatus.IsBatteryOK()== False ):
print ("Either pilot wants vehicle to come home or there is no Batt - goto LAND")#Later should be mapped to GOHOME state
return 'Aborted_NoBatt'
if self.flightStatus.PositionErrorConverge()==True:
print ("Takeoff complete and succeful - goto HOVER")
return 'Success'
print ("TakingOff...")#Later should be mapped to GOHOME state
return 'Maintain'
# define state HOVER
class HOVER(smach.State):
def __init__(self,flightStatus):
smach.State.__init__(self, outcomes=['Aborted_NoBatt',
'Aborted_Diverge',
'Maintain',
'GoHome',
'FollowTraj'])
## input_keys = ['Hover_mission_stage_in'],
## output_keys = ['Hover_mission_stage_out'])
self.flightStatus = flightStatus
def execute(self, userdata):
## rospy.loginfo('Executing state HOVER')
rospy.sleep(self.flightStatus.sleepTime)
if self.flightStatus.PositionErrorDiverge() or (self.flightStatus.listener.AutoPilotSwitch is False) :
print ("Either pilot wants control back or vehicle is unstable - goto MANUAL")
return 'Aborted_Diverge' #->Manual!
if ( not self.flightStatus.IsBatteryOK() ) or (self.flightStatus.listener.MissionGoSwitch is False) or self.flightStatus.IsTimeExceeded() :
print ("Either pilot wants vehicle to come home, duration exceeded or there is no Battery")#Later should be mapped to GOHOME state
#print "Battery Voltage Level: " ,self.flightStatus.getCurrentBatteryVoltage()
#print "MissionGo Switch: " ,self.flightStatus.listener.MissionGoSwitch
if self.flightStatus.IsHome():
return 'Aborted_NoBatt' #->Vehicle should LAND
else:
return 'GoHome' #->Vehicle should return home
#print "self.flightStatus.DistanceToTarget(3)", self.flightStatus.DistanceToTarget(3)
if self.flightStatus.DistanceToTarget(3) > 15 * self.flightStatus.tolerance:
print("Far away from target, should generate a trajectry to go there")
return 'FollowTraj'
print("Hovering....")
return 'Maintain'
# define state LAND
class LAND(smach.State):
def __init__(self,flightStatus):
smach.State.__init__(self, outcomes=['Success',
'Failure',
'Maintain'])
self.flightStatus = flightStatus
def execute(self, userdata):
## rospy.loginfo('Executing state LAND')
rospy.sleep(self.flightStatus.sleepTime)
if self.flightStatus.PositionErrorDiverge() or self.flightStatus.listener.AutoPilotSwitch==False:
print ("Vehicle is unstable - goto MANUAL")
return 'Failure' #->Manual!
if self.flightStatus.PositionErrorConverge():
print ("Vehicle has landed - goto IDLE")
return 'Success' #->Idle
print ("Landing...")
return 'Maintain' #Remain in Land!
# define state IDLE
class IDLE(smach.State):
def __init__(self,flightStatus):
smach.State.__init__(self, outcomes=['Finish',
'Start',
'Maintain'])
## input_keys = ['Idle_mission_stage_in'],
## output_keys = ['Idle_mission_stage_out'])
self.flightStatus = flightStatus
def execute(self, userdata):
## rospy.loginfo('Executing state IDLE')
rospy.sleep(self.flightStatus.sleepTime)
if self.flightStatus.listener.AutoPilotSwitch == False or not self.flightStatus.IsBatteryOK():
print ('All Controllers turned off - we are DONE')
#Waited for a while in idle or one of the switches is OFF
print ("AutoPilot is OFF --->>> goto MANUAL")
return 'Finish' #- to manual
elif self.flightStatus.IsThrottleUp() and self.flightStatus.IsBatteryOK() and self.flightStatus.listener.MissionGoSwitch == True :
#Throttle is up and there is enough battery
print ("Seems like pilot wants to take off and there's enough battery --->>> goto TAKEOFF")
return 'Start' #- to takeoff
print("Idle...")
return 'Maintain'
# define state FOLLOWTRAJECTORY (This state should be is a template for GoHome or any other follow traj, the only difference is what is the trajectory to follow)
class FOLLOW_TRAJECTORY(smach.State):
def __init__(self,flightStatus,str_ParentStateName):
smach.State.__init__(self, outcomes=['Arrived',
'Aborted_Diverge',
'Maintain'])
## input_keys = ['TrajFol_mission_stage_in'],
## output_keys = ['TrajFol_mission_stage_out'])
self.flightStatus = flightStatus
self.str_ParentStateName = str_ParentStateName
def execute(self, userdata):
## rospy.loginfo('Executing state FOLLOW_TRAJECTORY inside %s', self.str_ParentStateName )
rospy.sleep(self.flightStatus.sleepTime)
#Should add that is in follow trajectory and target pose is homepose, then maintain...
if self.flightStatus.PositionErrorDiverge() or self.flightStatus.listener.AutoPilotSwitch == False:
print ("Either pilot wants control back or vehicle is unstable --->>> goto MANUAL")
return 'Aborted_Diverge' #--->>>Manual!
if self.flightStatus.PositionErrorConverge() : #Regardless of parent container, if arrived at destination, should goto HOVER
print ("Seems like vehicle arrived at destination --->>> goto HOVER")
return 'Arrived'
for case in switch(self.str_ParentStateName):
if case('GO_HOME'):
if self.flightStatus.IsBatteryOK() and not self.flightStatus.IsTimeExceeded() and self.flightStatus.listener.MissionGoSwitch == True :
return 'Arrived' #->Vehicle should go to HOVER
else:
print ("Vehicle returning home...")
return 'Maintain' #->Vehicle should continue going home
break
if case('FOLLOW_TRAJ'):
if self.flightStatus.listener.MissionGoSwitch == False or not self.flightStatus.IsBatteryOK() or self.flightStatus.IsTimeExceeded() :
return 'Arrived' #->Vehicle should go to HOVER
break
print("Following Trajectory...")
return 'Maintain'
# define state InitContoller
class CONTROLLER_INIT(smach.State):
def __init__(self, flightStatus, controlManagement , str_ParentStateName):
smach.State.__init__(self, outcomes=['Success','Failure'])
self.flightStatus = flightStatus
self.controlManagement = controlManagement
self.str_ParentStateName = str_ParentStateName #Used to indicate which controller should be turned ON
self.dict = {'True': 'ON', 'False':'OFF'};
#self.stateDictionary = {'IDLE': 1, 'HOVER':2,'LAND':3,'TAKEOFF':4,'GO_HOME':5};
def execute(self,userdata):
## rospy.loginfo('Executing state ControllerInit in %s' , self.str_ParentStateName )
rospy.sleep(self.flightStatus.sleepTime)
# Create a service client
Service_in = CommandControllerRequest()
# Designated whether there are controller gains to be adjusted (Default)
#Service_in.input_gain_flag = True
# Controller PID gains (regarded only if flag is TRUE, Default)
#Service_in.gains = [1.0,2.0,3.0]
# Default - Controller should turn ON
Service_in.running = True
if self.str_ParentStateName is 'IDLE':
## print "SwitchCase IDLE"
if self.flightStatus.listener.AutoPilotSwitch == False or self.flightStatus.listener.MissionGoSwitch == False or not self.flightStatus.IsBatteryOK():
print ('All Controllers should be turned off...')
# Designated that the controller should turn OFF
Service_in.running = False
else:
print("Getting ready to start mission...")
print ("Creating a TargetPose to be used as a constant ref signal for the controller")
self.flightStatus.setTargetPose(self.flightStatus.getCurrentPose().position)
self.flightStatus._targetPose.position.z = self.flightStatus.getCurrentAltitude()
else:
for case in switch(self.str_ParentStateName):
if case('HOVER'):
## print "SwitchCase HOVER"
## print("Starting Controller for HOVER")
print ("Creating a StampedPose to be used as a constant ref signal for the controller")
self.flightStatus.setTargetPose(self.flightStatus.getCurrentPose().position)
break
## Service_in.path.poses.append(self.flightStatus.getCurrentPoseStamped())
if case('LAND'):
## print "SwitchCase LAND"
self.flightStatus.setTargetPose(self.flightStatus.getCurrentPose().position)
self.flightStatus._targetPose.position.z = self.flightStatus.getGroundLevel()
print 'Generating trajectory for LAND'
break
if case('TAKEOFF'):
## print "SwitchCase TAKEOFF"
self.flightStatus.setTargetPose(self.flightStatus.getCurrentPose().position)
self.flightStatus._targetPose.position.z = self.flightStatus.getSafeAltitude() + 0.1 #MOdify the z value of the private targetPose attribute
print 'Generating trajectory for TAKEOFF'
break
if case('GO_HOME'):
## print "SwitchCase GOHOME"
self.flightStatus.setTargetPose(self.flightStatus.getHomePose().position)
## print 'GO_HOME'
## print 'GO_HOME'
break
## print "Prior to generating a trajectory"
## print "Current:" , self.flightStatus.getCurrentPose()
## print "Target:", self.flightStatus.getTargetPose()
# Call a function that generates a trajectory for the controller to follow - - >>>> SHOULD BE A SERVICE PROVIDOR
Service_in.path.poses = getTrajectory(self.flightStatus.getCurrentPose(),
self.flightStatus.getTargetPose())
if self.controlManagement.ControllerClient(Service_in):
print("Controller SUCCEEDED to turn " + self.dict[str(Service_in.running)] )
return 'Success'
else:
print("Controller FAILED to turn " + self.dict[str(Service_in.running)])
return 'Failure'
| gpl-2.0 | 3,482,930,527,921,378,300 | 51.518395 | 162 | 0.568554 | false | 4.386313 | false | false | false |
Evnsan/MAC0448 | ep3/porta.py | 1 | 1653 | #!/usr/bin/python
from datagrama import Datagrama
class Porta(object):
def __init__(self):
#definicao dos campos de um segmento
self.enlace = None
self.ip = None
self.tamanhoBuffer = None
self.buffer = []
self.modoVerboso = False
super(Porta, self).__init__()
def setEnlace(self,enlace):
self.enlace = enlace
def getEnlace(self):
return self.enlace
def setIp(self,ip):
self.ip = ip
def getIp(self):
return self.ip
def setTamanhoBuffer(self,tamanhoBuffer):
self.tamanhoBuffer = tamanhoBuffer
def getIp(self):
return self.tamanhoBuffer
def addNoBuffer(self,pacote):
if len(self.buffer) < self.tamanhoBuffer:
self.buffer.append(pacote)
def bufferEstaVazio(self):
try:
if self.buffer[0]:
return False
except IndexError:
return True
def getDoBuffer(self):
if self.buffer[0]:
topoBuffer = self.buffer[0]
del self.buffer[0]
return topoBuffer
def printBuffer(self):
print self.buffer
print str(self)
# for d in self.buffer:
# print d
def receber(self, datagrama):
if self.modoVerboso:
print "PORTA: vai receber " + str(self)
self.addNoBuffer(datagrama)
if self.modoVerboso:
print self.buffer
def enviar(self, router, datagrama):
if self.modoVerboso:
print str(self) + ": " + str(datagrama)
self.enlace.enviar(self, datagrama)
| gpl-2.0 | 4,058,502,042,517,190,000 | 21.337838 | 51 | 0.562613 | false | 3.539615 | false | false | false |
junishikawa911/CAR-E | pytodb.py | 1 | 1153 | from pymongo import MongoClient
#Step 1: Connect to MongoDB - Note: Change connection string as needed
client = MongoClient("mongodb://jun_ishikawa:[email protected]:27017,car-e-mainframe-shard-00-01-tppcz.mongodb.net:27017,car-e-mainframe-shard-00-02-tppcz.mongodb.net:27017/<DATABASE>?ssl=true&replicaSet=Car-E-Mainframe-shard-0&authSource=admin")
db=client.database
#Step 2: Create sample data
names = ['Avijoy Haldar','Rajat Saxena','Divyansh Agrawal']
car_number = ['WB069102','DL011303','TN021011']
passkey = ['010111', '100101', '111001']
for x in xrange(0, 3):
database = {
'name' : names[x],'car number' : car_number[x],'pass code' : passkey[x]
}
#Step 3: Insert business object directly into MongoDB via isnert_one
result=db.database.insert_one(database)
#Step 4: Print to the console the ObjectID of the new document
print('Created {0} of 3 as {1}'.format(x,result.inserted_id))
#Step 5: Tell us that you are done
insert_result=db.database.insert_one({'name' : 'Zayn Malik','car number' : 'DL012201','pass code':'110011'})
print('finished creating 3 user records') | mit | 3,231,768,840,390,031,000 | 59.736842 | 291 | 0.725932 | false | 2.875312 | false | false | false |
enguy/FAST-iCLIP | bin/oldscripts/fastclip.py | 2 | 65904 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import os
import cmath
import math
import sys
import numpy as np
import glob
import subprocess
import re
from matplotlib_venn import venn2
import pandas as pd
from collections import defaultdict
from operator import itemgetter
import matplotlib as mpl
import matplotlib.pyplot as plt
import shutil
from optparse import OptionParser
mpl.rcParams['savefig.dpi'] = 2 * mpl.rcParams['savefig.dpi']
# <codecell>
global sampleName
global outfilepath
global logFile
global logOpen
### File name ###
sampleName=sys.argv[1]
infilepath=os.getcwd() + '/' + 'rawdata/'
outfilepath=os.getcwd() + '/results/%s/'%sampleName
# <codecell>
# Create log and start pipeline
logFile=outfilepath + "runLog"
logOpen=open(logFile, 'w')
# <codecell>
### Parameters ###
iCLIP3pBarcode='AGATCGGAAGAGCGGTTCAGCAGGAATGCCGAGACCGATCTCGTATGCCGTCTTCTGCTTG' # Barcode sequence to trim from reads.
q=25 # Minimum quality score to keep during filtering.
p=80 # Percentage of bases that must have quality > q during filtering.
iCLIP5pBasesToTrim=13 # Number of reads to trim from 5' end of clip reads.
k='1' # k=N distinct, valid alignments for each read in bt2 mapping.
threshold=3 # Sum of RT stops (for both replicates) required to keep file.
expand=15 # Bases to expand around RT position after RT stops are merged.
repeat_index=os.getcwd() + '/docs/repeat/rep' # bt2 index for repeat RNA.
repeatGenomeBuild=os.getcwd()+'/docs/repeat/repeatRNA.fa' # Sequence of repeat index.
repeatAnnotation=os.getcwd()+'/docs/repeat/Hs_repeatIndex_positions.txt' # Repeat annotation file.
start18s=3657
end18s=5527
start5s=6623
end5s=6779
start28s=7935
end28s=12969
rRNAend=13314
threshold_rep=1 # RT stop threshold for repeat index.
index=os.getcwd() + '/docs/hg19/hg19' # bt2 index for mapping.
index_tag='hg19' # Name of bt2 index.
genomeFile=os.getcwd()+'/docs/human.hg19.genome' # Genome file for bedGraph, etc.
genomeForCLIPper='-shg19' # Parameter for CLIPper.
blacklistregions=os.getcwd()+'/docs/wgEncodeDukeMapabilityRegionsExcludable.bed' # Blacklist masker.
repeatregions=os.getcwd()+'/docs/repeat_masker.bed' # Repeat masker.
geneAnnot=glob.glob(os.getcwd()+'/docs/genes_types/*') # List of genes by type.
snoRNAmasker=os.getcwd()+'/docs/snoRNA_reference/snoRNAmasker_formatted_5pExtend.bed' # snoRNA masker file.
miRNAmasker=os.getcwd()+'/docs/miR_sort_clean.bed' # miRNA masker file.
fivePUTRBed=os.getcwd()+'/docs/5pUTRs_Ensbl_sort_clean_uniq.bed' # UTR annotation file.
threePUTRBed=os.getcwd()+'/docs/3pUTRs_Ensbl_sort_clean_uniq.bed' # UTR annotation file.
cdsBed=os.getcwd()+'/docs/Exons_Ensbl_sort_clean_uniq.bed' # UTR annotation file.
utrFile=os.getcwd()+'/docs/hg19_ensembl_UTR_annotation.txt' # UTR annotation file.
genesFile=os.getcwd()+'/docs/hg19_ensembl_genes.txt' # Gene annotation file.
sizesFile=os.getcwd()+'/docs/hg19.sizes' # Genome sizes file.
snoRNAindex=os.getcwd()+'/docs/snoRNA_reference/sno_coordinates_hg19_formatted.bed' # snoRNA coordinate file.
CLIPPERoutNameDelim='_' # Delimiter that for splitting gene name in the CLIPper windows file.
# <codecell>
import datetime
now=datetime.datetime.now()
logOpen.write("Timestamp:%s\n"%str(now))
logOpen.write("\n###Parameters used###\n")
logOpen.write("3' barcode:%s\n'"%iCLIP3pBarcode)
logOpen.write("Minimum quality score (q):%s\n"%q)
logOpen.write("Percentage of bases with > q:%s\n"%p)
logOpen.write("5' bases to trim:%s\n'"%iCLIP5pBasesToTrim)
logOpen.write("k distinct, valid alignments for each read in bt2 mapping:%s\n"%k)
logOpen.write("Threshold for minimum number of RT stops:%s\n"%threshold)
logOpen.write("Bases for expansion around conserved RT stops:%s\n"%expand)
logOpen.write("\n\n\n")
# <codecell>
print "Processing sample %s" %(sampleName)
logOpen.write("Processing sample: "+sampleName+'\n')
read1=infilepath+sampleName+'_R1.fastq'
read2=infilepath+sampleName+'_R2.fastq'
unzippedreads=[read1,read2]
# <codecell>
def trimReads3p(unzippedreads,adapter3p):
# Usage: Trims a specified adapter sequence from the 3p end of the reads.
# Input: List of fastq files.
# Output: List of 3p trimmed files.
trimparam='-a'+adapter3p # Adapter string
trimmedReads=[]
try:
for inread in unzippedreads:
outread=inread.replace("rawdata/", "results/%s/"%sampleName)
outread=outread.replace(".fastq", "_3ptrimmed.fastq")
process=subprocess.Popen(['fastx_clipper',trimparam,'-n','-l33','-Q33','-i',inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
logOpen.write("Trim 3p end of reads.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
trimmedReads=trimmedReads+[outread]
return trimmedReads
except:
logOpen.write("Problem with 3p trimming.\n")
print "Problem with 3p trimming."
print "Trim 3p adapter from reads."
trimmedReads3p=trimReads3p(unzippedreads,iCLIP3pBarcode)
# <codecell>
def qualityFilter(trim3pReads,q,p):
# Usage: Filters reads based upon quality score.
# Input: List of fastq file names as well as the quality paramters p and q.
# Output: List of modified fastq file names.
qualityparam='-q'+str(q)
percentrageparam='-p'+str(p)
filteredReads=[]
try:
for inread in trim3pReads:
outread=inread.replace(".fastq", "_filter.fastq")
process=subprocess.Popen(['fastq_quality_filter',qualityparam,percentrageparam,'-Q33','-i',inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr=process.communicate()
logOpen.write("Perform quality filtering.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
filteredReads=filteredReads+[outread]
return filteredReads
except:
logOpen.write("Problem with quality filter.\n")
print "Problem with quality filter."
print "Perform quality filtering."
filteredReads=qualityFilter(trimmedReads3p,q,p)
# <codecell>
def dupRemoval(filteredReads):
# Usage: Removes duplicate reads.
# Input: List of fastq file names.
# Output: List of reads in FASTA format.
program=os.getcwd() + '/bin/fasta_to_fastq.pl'
noDupes=[]
try:
for inread in filteredReads:
outread=inread.replace(".fastq","_nodupe.fasta")
process=subprocess.Popen(['fastx_collapser','-Q33','-i',inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr=process.communicate()
logOpen.write("Perform duplicate removal.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
fastqOut=outread.replace('.fasta', '.fastq') # fastx_collapser returns fasta files, which are then converted to fastq.
outfh=open(fastqOut, 'w')
process=subprocess.Popen(['perl',program,outread],stdout=outfh)
process.communicate() # Wait for the process to complete.
os.remove(outread) # Remove the remaining .fasta file.
noDupes=noDupes+[fastqOut]
return noDupes
except:
logOpen.write("Problem with duplicate removal.\n")
print "Problem with duplicate removal."
print "Perform duplicate removal."
nodupReads=dupRemoval(filteredReads)
# <codecell>
def trimReads5p(nodupes,n):
# Usage: Trims a specified number of bases from the 5' end of each read.
# Input: List of fastq files.
# Output: List of 5p trimmed files.
trimparam='-f'+str(n)
trimmedReads=[]
try:
for inread in nodupes:
outread=inread.replace(".fastq", "_5ptrimmed.fastq")
process=subprocess.Popen(['fastx_trimmer', trimparam, '-Q33', '-i', inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr=process.communicate()
logOpen.write("Perform 5' barcode trimming.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
trimmedReads=trimmedReads+[outread]
return trimmedReads
except:
logOpen.write("Problem with 5' barcode trimming.\n")
print "Problem with 5' barcode trimming."
print "Perform 5' barcode trimming."
trimmedReads5p=trimReads5p(nodupReads,iCLIP5pBasesToTrim)
# <codecell>
def runBowtie(fastqFiles,index,index_tag):
# Usage: Read mapping to reference.
# Input: Fastq files of replicate trimmed read files.
# Output: Path to samfile for each read.
program='bowtie2'
mappedReads=[]
unMappedReads=[]
try:
for infastq in fastqFiles:
outfile=infastq.replace(".fastq","_mappedTo%s.sam"%index_tag)
unmapped=infastq.replace(".fastq","_notMappedTo%s.fastq"%index_tag)
process=subprocess.Popen([program,'-x',index,'-k',k,'-U',infastq,'--un',unmapped,'-S',outfile],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout,stderr=process.communicate()
logOpen.write("Perform mapping to %s index.\n"%index_tag)
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
mappedReads = mappedReads + [outfile]
unMappedReads = unMappedReads + [unmapped]
return (mappedReads,unMappedReads)
except:
logOpen.write("Problem with mapping.\n")
print "Problem with mapping."
print "Run mapping to repeat index."
mappedReads_rep,unmappedReads_rep=runBowtie(trimmedReads5p,repeat_index,'repeat')
# <codecell>
def runSamtools(samfiles):
# Usage: Samfile processing.
# Input: Sam files from Bowtie mapping.
# Output: Sorted bedFiles.
program = 'samtools'
program2 = 'bamToBed'
outBedFiles=[]
try:
for samfile in samfiles:
bamfile = samfile.replace('.sam','.bam')
proc = subprocess.Popen( [program,'view','-bS','-o', bamfile, samfile])
proc.communicate()
bamfile_sort = bamfile.replace('.bam','_sorted')
proc2 = subprocess.Popen([program,'sort',bamfile, bamfile_sort])
proc2.communicate()
bedFile = bamfile_sort.replace('_sorted', '_withDupes.bed')
outfh = open(bedFile,'w')
proc3 = subprocess.Popen( [program2,'-i', bamfile_sort+'.bam'],stdout=outfh)
proc3.communicate()
outBedFiles=outBedFiles+[bedFile]
return outBedFiles
except:
logOpen.write("Problem with samtools.\n")
print "Problem with samtools."
print "Run samtools."
logOpen.write("Run samtools.\n")
mappedBedFiles_rep=runSamtools(mappedReads_rep)
# <codecell>
def seperateStrands(mappedReads):
# Usage: Seperate positive and negative strands.
# Input: Paths to two bed files from Samtools.
# Output: Paths to bed files isolated by strand.
negativeStrand=[]
positiveStrand=[]
for mapFile in mappedReads:
with open(mapFile, 'r') as infile:
neg_strand=mapFile.replace('.bed','_neg.bed')
pos_strand=mapFile.replace('.bed','_pos.bed')
neg = open(neg_strand, 'w')
pos = open(pos_strand, 'w')
negativeStrand=negativeStrand+[neg_strand]
positiveStrand=positiveStrand+[pos_strand]
for line in infile:
if str(line.strip().split('\t')[5]) == '-':
neg.write(line)
elif str(line.strip().split('\t')[5]) == '+':
pos.write(line)
return (negativeStrand,positiveStrand)
def modifyNegativeStrand(negativeStrandReads):
# Usage: For negative stranded reads, ensure 5' position (RT stop) is listed first.
# Input: Bed file paths to all negative stranded.
# Output: Paths to modified bed files.
negativeStrandEdit=[]
for negativeRead in negativeStrandReads:
neg_strand_edited=negativeRead.replace('_neg.bed','_negEdit.bed')
negativeStrandEdit=negativeStrandEdit+[neg_strand_edited]
neg_edit = open(neg_strand_edited, 'w')
with open(negativeRead, 'r') as infile:
for line in infile:
chrom,start,end,name,quality,strand=line.strip().split('\t')
neg_edit.write('\t'.join((chrom,end,str(int(end)+30),name,quality,strand))+'\n')
return negativeStrandEdit
def isolate5prime(strandedReads):
# Usage: Isolate only the Chr, 5' position (RT stop), and strand.
# Input: Bed file paths to strand seperated reads.
# Output: Paths RT stop files.
RTstops=[]
for reads in strandedReads:
RTstop=reads.replace('.bed','_RTstop.bed')
f = open(RTstop, 'w')
with open(reads, 'r') as infile:
RTstops=RTstops+[RTstop]
for line in infile:
chrom,start,end,name,quality,strand=line.strip().split('\t')
f.write('\t'.join((chrom,start,strand))+'\n')
return RTstops
print "RT stop isolation (repeat)."
logOpen.write("RT stop isolation (repeat).\n")
readsByStrand_rep=seperateStrands(mappedBedFiles_rep)
negativeRTstop_rep=isolate5prime(modifyNegativeStrand(readsByStrand_rep[0]))
positiveRTstop_rep=isolate5prime(readsByStrand_rep[1])
# <codecell>
def fileCat(destinationFile,fileList):
f = open(destinationFile, "w")
for tempfile in fileList:
readfile = open(tempfile, "r")
f.write(readfile.read())
readfile.close()
f.close()
def RTcounts(RTfile):
posRT_R1=pd.DataFrame(pd.read_table(RTfile,index_col=None,header=None,sep='\t'))
posRT_R1.columns=['Chr','Start','Strand']
cts=posRT_R1.groupby(['Chr','Start']).size()
return cts
def mergeRT(RTstopFiles,outfilename,threshold,expand,strand):
# Usage: Merge RT stops between replicates and keep only those positions that exceed threshold.
# Input: Files with RT stops for each replicate, outfile, threshold, strand, and bases to expand around RT stop.
# Output: None. Writes merged RT stop file.
cts_R1=RTcounts(RTstopFiles[0])
cts_R2=RTcounts(RTstopFiles[1])
m=pd.concat([cts_R1,cts_R2],axis=1,join='inner')
m.columns=['Rep_1','Rep_2']
m['Sum']=m['Rep_1']+m['Rep_2']
m_filter=m[m['Sum']>threshold]
f = open(outfilename, 'w')
for i in m_filter.index:
chrom=i[0]
RT=i[1]
count=m_filter.loc[i,'Sum']
if RT > expand:
read='\t'.join((chrom,str(int(RT)-expand),str(int(RT)+expand),'CLIPread','255',strand))+'\n'
else:
read='\t'.join((chrom,str(int(RT)),str(int(RT)+expand),'CLIPread','255',strand))+'\n'
f.write(read*(count))
print "Merge RT stops."
logOpen.write("Merge RT stops.\n")
posMerged=outfilepath+sampleName+'_repeat_positivereads.mergedRT'
strand='+'
mergeRT(positiveRTstop_rep,posMerged,threshold_rep,expand,strand)
negMerged=outfilepath+sampleName+'_repeat_negativereads.mergedRT'
strand='-'
mergeRT(negativeRTstop_rep,negMerged,threshold_rep,expand,strand)
negAndPosMerged=outfilepath+sampleName+'_threshold=%s'%threshold_rep+'_repeat_allreads.mergedRT.bed'
fileCat(negAndPosMerged,[posMerged,negMerged])
# <codecell>
print "Run mapping to %s."%index_tag
mappedReads,unmappedReads=runBowtie(unmappedReads_rep,index,index_tag)
# <codecell>
print "Run samtools."
logOpen.write("Run samtools.\n")
mappedBedFiles=runSamtools(mappedReads)
# <codecell>
def runRepeatMask(mappedReads,repeatregions):
# Usage: Remove repeat regions from bedfile following mapping.
# Input: .bed file after mapping (duplicates removed by samtools) and blastlist regions removed.
# Output: Bedfile with repeat regions removed.
program='intersectBed'
masked=[]
try:
for bedIn in mappedReads:
noRepeat=bedIn.replace('.bed','_noRepeat.bed')
outfh=open(noRepeat, 'w')
proc=subprocess.Popen([program,'-a',bedIn,'-b',repeatregions,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
masked=masked+[noRepeat]
return (masked)
except:
print "Problem with repeat masking."
logOpen.write("Problem with repeat masking.\n")
def runBlacklistRegions(mappedReads,blacklistregions):
# Usage: Remove blacklisted regions from bedfile following mapping.
# Input: .bed file after mapping (duplicates removed by samtools).
# Output: Bedfile with blacklisted regions removed.
program='intersectBed'
blackListed=[]
try:
for bedIn in mappedReads:
noBlacklist=bedIn.replace('.bed','_noBlacklist.bed')
outfh=open(noBlacklist, 'w')
proc=subprocess.Popen([program,'-a',bedIn,'-b',blacklistregions,'-v'],stdout=outfh)
proc.communicate()
outfh.close()
blackListed=blackListed+[noBlacklist]
return (blackListed)
except:
print "Problem with blacklist."
logOpen.write("Problem with blacklist.\n")
print "Run repeat and blacklist region masker."
logOpen.write("Run repeat and blacklist masker.\n")
blacklistedBedFiles=runBlacklistRegions(mappedBedFiles,blacklistregions)
maskedBedFiles=runRepeatMask(blacklistedBedFiles,repeatregions)
# <codecell>
print "RT stop isolation."
logOpen.write("RT stop isolation.\n")
readsByStrand=seperateStrands(maskedBedFiles)
negativeRTstop=isolate5prime(modifyNegativeStrand(readsByStrand[0]))
positiveRTstop=isolate5prime(readsByStrand[1])
print "Merge RT stops."
logOpen.write("Merge RT stops.\n")
posMerged=outfilepath+sampleName+'_%s_positivereads.mergedRT'%index_tag
strand='+'
mergeRT(positiveRTstop,posMerged,threshold,expand,strand)
negMerged=outfilepath+sampleName+'_%s_negativereads.mergedRT'%index_tag
strand='-'
mergeRT(negativeRTstop,negMerged,threshold,expand,strand)
negAndPosMerged=outfilepath+sampleName+'_threshold=%s'%threshold+'_%s_allreads.mergedRT.bed'%index_tag
fileCat(negAndPosMerged,[posMerged,negMerged])
# <codecell>
def runCLIPPER(RTclusterfile,genome,genomeFile):
# Useage: Process the mergedRT file and pass through CLIPper FDR script.
# Input: Merged RT file.
# Output: CLIPper input (.bed) file and output file.
program='bedToBam'
program2='samtools'
program3='bamToBed'
program4='clipper'
bamfile=RTclusterfile.replace('.bed','.bam')
outfh=open(bamfile, 'w')
proc=subprocess.Popen([program,'-i',RTclusterfile,'-g',genomeFile],stdout=outfh)
proc.communicate()
bamfile_sort=bamfile.replace('.bam','.srt')
proc2=subprocess.Popen([program2,'sort',bamfile,bamfile_sort])
proc2.communicate()
bamfile_sorted=bamfile_sort+'.bam'
mapStats=bamfile_sorted.replace('.srt.bam','.mapStats.txt')
outfh=open(mapStats, 'w')
proc3=subprocess.Popen([program2,'flagstat',bamfile_sorted],stdout=outfh)
proc3.communicate()
proc4=subprocess.Popen([program2,'index',bamfile_sorted])
proc4.communicate()
CLIPPERin=bamfile_sorted.replace('.srt.bam','_CLIPPERin.bed')
outfh=open(CLIPPERin, 'w')
proc5=subprocess.Popen([program3,'-i',bamfile_sorted],stdout=outfh)
proc5.communicate()
CLIPPERout=CLIPPERin.replace('_CLIPPERin.bed','_CLIP_clusters')
proc6=subprocess.Popen([program4,'--bam',bamfile_sorted,genome,'--outfile=%s'%CLIPPERout],)
proc6.communicate()
outfh.close()
return (CLIPPERin,CLIPPERout)
def makeGeneNameDict(fi):
# Usage: Make a dictionary that maps RT stop to gene name.
# Input: File path to intersected CLIPper windows and input RT stop coordinates.
# Output Dictionary mapping RT stop to name.
nameDict={}
with open(fi, 'r') as infile:
for read in infile:
elementList=read.strip().split('\t')
RT_id='_'.join((elementList[0],elementList[1],elementList[2],elementList[5]))
if RT_id not in nameDict:
geneName=elementList[9].strip().split(CLIPPERoutNameDelim)[0]
nameDict[RT_id]=geneName
return nameDict
def modCLIPPERout(CLIPPERin,CLIPPERout):
# Usage: Process the CLIPper output and isolate lowFDR reads based upon CLIPper windows.
# Input: .bed file passed into CLIPper and the CLIPper windows file.
# Output: Low FDR reads recovered using the CLIPer windows file, genes per cluster, gene list of CLIPper clusters, and CLIPper windows as .bed.
program='intersectBed'
CLIPperOutBed=CLIPPERout+'.bed'
CLIPpeReadsPerCluster=CLIPPERout+'.readsPerCluster'
CLIPpeGeneList=CLIPPERout+'.geneNames'
f = open(CLIPperOutBed,'w')
g = open(CLIPpeReadsPerCluster,'w')
h = open(CLIPpeGeneList,'w')
with open(CLIPPERout,'r') as infile:
for line in infile:
try:
# Note that different versions on CLIPper will report the gene name differently. So, we must handle this.
chrom,start,end,name,stats,strand,start_2,end_2 = line.strip().split('\t')
if CLIPPERoutNameDelim=='_':
readPerCluster=name.strip().split(CLIPPERoutNameDelim)[2]
else:
readPerCluster=(name.strip().split(CLIPPERoutNameDelim)[1]).split('_')[2]
geneName=name.strip().split(CLIPPERoutNameDelim)[0]
f.write('\t'.join((chrom,start,end,name,stats,strand))+'\n')
g.write((readPerCluster+'\n'))
h.write((geneName+'\n'))
except:
print ""
f.close()
g.close()
h.close()
# Intersect input reads with the CLIPper windows, report full result for both, include strand, do not duplicate reads from -a if they interset with multiple windows.
clusterWindowInt=CLIPperOutBed.replace('.bed','_fullClusterWindow.bed')
outfh=open(clusterWindowInt,'w')
proc=subprocess.Popen([program,'-a',CLIPPERin,'-b',CLIPperOutBed,'-wa','-wb','-s'],stdout=outfh)
proc.communicate()
outfh.close()
# Use the full window intersection to make a dictionary mapping RTstop to gene name.
nameDict=makeGeneNameDict(clusterWindowInt)
# Intersect input reads with CLIPper windows, but only report one intersection per read (as reads can overlap with multiple windows).
clusterWindowIntUniq=CLIPperOutBed.replace('.bed','_oneIntPerRead.bed')
outfh=open(clusterWindowIntUniq,'w')
proc=subprocess.Popen([program,'-a',CLIPPERin,'-b',CLIPperOutBed,'-wa','-s','-u'],stdout=outfh)
proc.communicate()
outfh.close()
# Process the uniquly intersected RT stops by adding gene name.
CLIPPERlowFDR=CLIPperOutBed.replace('.bed','_lowFDRreads.bed')
outfh=open(CLIPPERlowFDR,'w')
with open(clusterWindowIntUniq, 'r') as infile:
for read in infile:
bed=read.strip().split('\t')
RT_id='_'.join((bed[0],bed[1],bed[2],bed[5]))
geneName=nameDict[RT_id]
outfh.write('\t'.join((bed[0],bed[1],bed[2],geneName,bed[4],bed[5],'\n')))
outfh.close()
infile.close()
return (CLIPPERlowFDR,CLIPpeReadsPerCluster,CLIPpeGeneList,CLIPperOutBed)
print "Run CLIPper."
logOpen.write("Run CLIPper.\n")
CLIPPERio=runCLIPPER(negAndPosMerged,genomeForCLIPper,genomeFile)
CLIPPERin=CLIPPERio[0]
CLIPPERout=CLIPPERio[1]
clipperStats=modCLIPPERout(CLIPPERin,CLIPPERout)
CLIPPERlowFDR=clipperStats[0] # Low FDR reads returned filtred through CLIPper windows
CLIPpeReadsPerCluster=clipperStats[1] # Number of reads per CLIPper cluster
CLIPpeGeneList=clipperStats[2] # Gene names returned from the CLIPper file
CLIPperOutBed=clipperStats[3] # CLIPper windows as a bed file
# <codecell>
def getBedCenterPoints(inBed):
# Usage: Obtain ceter coordiantes of bedFile.
# Input: BedFile.
# Output: Center coodinates returned.
outBed=inBed.replace('.bed','_centerCoord.bed')
f=open(outBed, 'w')
with open(inBed, 'r') as infile:
for line in infile:
elementList=line.strip().split('\t')
f.write('\t'.join((elementList[0],str(int(elementList[1])+expand),str(int(elementList[1])+expand+1),elementList[3],elementList[4],elementList[5],'\n')))
f.close()
return outBed
def cleanBedFile(inBed):
# Usage: Sort and recover only first 6 fields from a bed file.
# Input: BedFile.
# Output: Sorted bedFile with correct number of fields.
program='sortBed'
CLIPperOutBed=inBed.replace('.bed','_cleaned.bed')
sortedBed=CLIPperOutBed.replace('_cleaned.bed','_cleaned_sorted.bed')
f=open(CLIPperOutBed, 'w')
with open(inBed, 'r') as infile:
for line in infile:
elementList=line.strip().split('\t')
f.write('\t'.join((elementList[0],elementList[1],elementList[2],elementList[3],elementList[4],elementList[5],'\n')))
f.close()
outfh=open(sortedBed, 'w')
proc=subprocess.Popen([program, '-i', CLIPperOutBed],stdout=outfh)
proc.communicate()
outfh.close()
return sortedBed
def makeBedGraph(lowFDRreads,sizesFile):
# Usage: From a bedFile, generate a bedGraph and bigWig.
# Input: BedFile.
# Output: BedGraph file.
program='genomeCoverageBed'
program2=os.getcwd() + '/bin/bedGraphToBigWig'
cleanBed=cleanBedFile(lowFDRreads)
outname=cleanBed.replace('.bed','.bedgraph')
outname2=cleanBed.replace('.bed','.bw')
outfh=open(outname,'w')
proc=subprocess.Popen([program,'-bg','-split','-i',cleanBed,'-g',sizesFile],stdout=outfh)
proc.communicate()
outfh2=open(outname2,'w')
proc2=subprocess.Popen([program2,outname,sizesFile,outname2],stdout=subprocess.PIPE)
proc2.communicate()
return outname
print "Make bedGraph"
logOpen.write("Make bedGraph.\n")
bedGraphCLIPout=makeBedGraph(CLIPPERlowFDR,genomeFile)
CLIPPERlowFDRcenters=getBedCenterPoints(CLIPPERlowFDR)
allLowFDRCentersBedGraph=makeBedGraph(CLIPPERlowFDRcenters,genomeFile)
# <codecell>
def filterSnoRNAs(proteinCodingReads,snoRNAmasker,miRNAmasker):
# Usage: Filter snoRNA and miRNAs from protein coding reads.
# Input: .bed file with protein coding reads.
# Output: snoRNA and miR filtered .bed file.
program='intersectBed'
proteinWithoutsnoRNAs=proteinCodingReads.replace('.bed','_snoRNAremoved.bed')
proteinWithoutmiRNAs=proteinWithoutsnoRNAs.replace('.bed','_miRNAremoved.bed')
outfh=open(proteinWithoutsnoRNAs, 'w')
proc=subprocess.Popen([program,'-a',proteinCodingReads,'-b',snoRNAmasker,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
outfh=open(proteinWithoutmiRNAs, 'w')
proc=subprocess.Popen([program,'-a',proteinWithoutsnoRNAs,'-b',miRNAmasker,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
return (proteinWithoutmiRNAs)
def getLowFDRReadTypes(CLIPPERlowFDR,pathToGeneLists):
# Usage: Given a list of genes, return all reads for the associated genes.
# Input: Gene list and the path to lowFDR read file.
# Output: List of reads assocaited with the given genes.
lowFDRgenelist=[]
for path in pathToGeneLists:
outfile=path+'_LowFDRreads.bed'
proc=subprocess.Popen('grep -F -f %s %s > %s'%(path,CLIPPERlowFDR,outfile),shell=True)
proc.communicate()
return_code=proc.wait() # *** Remove later. ***
lowFDRgenelist=lowFDRgenelist+[outfile]
return lowFDRgenelist
def compareLists(list1,list2,outname):
# Usage: Compare gene lists and output matches to the file.
# Input: Two gene lists.
# Output: Path file containing the matching genes.
f=open(list1,'r')
g=open(list2,'r')
commonGenes=set(f.readlines()) & set(g.readlines())
geneCategory=outname.split('.')[1]
outputName=outfilepath+'clipGenes_'+geneCategory
outfh=open(outputName,'w')
for gene in commonGenes:
outfh.write(gene)
outfh.close()
return outputName
def getLowFDRGeneTypes(CLIPpeGeneList,geneAnnot):
# Usage: Get all genes listed under each type, compare to CLIPper targets.
# Input: .bed file passed into CLIPper and the CLIPper windows file.
# Output: Path to file containing all CLIPper genes of each type.
geneTypes=[]
for genepath in geneAnnot:
lowFDRgenes=compareLists(CLIPpeGeneList,genepath,os.path.split(genepath)[1])
geneTypes=geneTypes+[lowFDRgenes]
return geneTypes
print "Partition reads by type."
logOpen.write("Partition reads by type.\n")
pathToGeneLists=getLowFDRGeneTypes(CLIPpeGeneList,geneAnnot)
pathToReadLists=getLowFDRReadTypes(CLIPPERlowFDR,pathToGeneLists)
proteinCodingReads=outfilepath+'clipGenes_proteinCoding_LowFDRreads.bed'
proteinBedGraph=makeBedGraph(proteinCodingReads,genomeFile)
filteredProteinCodingCenters=filterSnoRNAs(getBedCenterPoints(proteinCodingReads),snoRNAmasker,miRNAmasker)
filteredProteinCentersBedGraph=makeBedGraph(filteredProteinCodingCenters,genomeFile)
lincRNAReads=outfilepath+'clipGenes_lincRNA_LowFDRreads.bed'
filteredLincRNACenters=filterSnoRNAs(getBedCenterPoints(lincRNAReads),snoRNAmasker,miRNAmasker)
# <codecell>
# --- #
# <codecell>
def sortFilteredBed(bedFile):
bf=pd.DataFrame(pd.read_table(bedFile,header=None))
bf.columns=['Chr','Start','Stop','CLIPper_name','Q','Strand']
geneCounts=countHitsPerGene(bf)
return geneCounts
def countHitsPerGene(bf):
# *** THIS MAY DEPEND UPON THE VERSION OF CLIPPER USED ***
bf['geneName']=bf['CLIPper_name'].apply(lambda x: x.split('_')[0])
geneCounts=bf.groupby('geneName').size()
geneCounts.sort(ascending=False)
return geneCounts
def getSnoRNAreads(CLIPPERlowFDRcenters,snoRNAindex):
program='intersectBed'
bedFile=outfilepath+'clipGenes_snoRNA_LowFDRreads.bed'
outfh=open(bedFile, 'w')
proc=subprocess.Popen([program,'-a',CLIPPERlowFDRcenters,'-b',snoRNAindex,'-s','-wa','-wb'],stdout=outfh)
proc.communicate()
outfh.close()
return bedFile
def countSnoRNAs(bedFile_sno):
bf=pd.DataFrame(pd.read_table(bedFile_sno,header=None))
bf.columns=['Chr','Start','End','CLIPper_name','Q','Strand','Chr_snoRNA','Start_snoRNA','Stop_snoRNA','name_snoRNA','Type','strand_snoRNA']
geneCounts=bf.groupby('name_snoRNA').size()
geneCounts.sort(ascending=False)
return geneCounts
def countRemainingGeneTypes(remaining):
for bedFile in remaining:
try:
bf=pd.DataFrame(pd.read_table(bedFile,header=None))
bf.columns=['Chr','Start','End','ReadName','Q','Strand','CLIPper_winChr','CLIPper_winStart','CLIPper_winEmd','CLIPper_winaName','CLIPper_winP','CLIPper_winStrand']
# *** THIS MAY DEPEND UPON THE VERSION OF CLIPPER USED ***
bf['geneName']=bf['CLIPper_winaName'].apply(lambda x: x.split('_')[0])
geneCounts=bf.groupby('geneName').size()
geneCounts.sort(ascending=False)
head,fname=os.path.split(bedFile)
geneType=fname.split("_")[1]
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_%s'%geneType
geneCounts.to_csv(outfilepathToSave)
except ValueError:
print "No reads in %s"%bedFile
print "Generate sorted gene lists by gene type."
logOpen.write("Generate sorted gene lists by gene type.\n")
bedFile_pc=outfilepath+"clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed"
geneCounts_pc=sortFilteredBed(bedFile_pc)
outfilepathToSave=outfilepath + '/PlotData_ReadsPerGene_proteinCoding'
geneCounts_pc.to_csv(outfilepathToSave)
bedFile_linc=outfilepath+"clipGenes_lincRNA_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed"
geneCounts_linc=sortFilteredBed(bedFile_linc)
outfilepathToSave=outfilepath + '/PlotData_ReadsPerGene_lincRNA'
geneCounts_linc.to_csv(outfilepathToSave)
CLIPPERlowFDRcenters=getBedCenterPoints(CLIPPERlowFDR)
allLowFDRCentersBedGraph=makeBedGraph(CLIPPERlowFDRcenters,genomeFile)
bedFile_sno=getSnoRNAreads(CLIPPERlowFDRcenters,snoRNAindex)
geneCounts_sno=countSnoRNAs(bedFile_sno)
outfilepathToSave=outfilepath + '/PlotData_ReadsPerGene_snoRNA'
geneCounts_sno.to_csv(outfilepathToSave)
remaining=[f for f in glob.glob(outfilepath+"*_LowFDRreads.bed") if 'lincRNA' not in f and 'proteinCoding' not in f and 'snoRNA' not in f]
countRemainingGeneTypes(remaining)
# <codecell>
def makeClusterCenter(windowsFile):
# Usage: Generate a file of cluster centers.
# Input: Raw CLIPper output file.
# Output: File with coordinates for the center of each CLIPper cluster.
cleanBed = cleanBedFile(windowsFile)
centers=cleanBed.replace('.bed','.clusterCenter')
f = open(centers, 'w')
with open(cleanBed, 'r') as infile:
for line in infile:
elementList = line.strip().split('\t')
diff=abs(int((int(elementList[1])-int(elementList[2]))/2))
f.write(elementList[0]+'\t'+str(int(elementList[1])+diff)+'\t'+str(int(elementList[1])+diff+1)+'\n')
f.close()
return centers
def getClusterIntensity(bedGraph,centerCoordinates):
# Usage: Generate a matrix of read itensity values around CLIPper cluster center.
# Input: BedGraph and cluster center file.
# Output: Generates a matrix, which is passed into R.
program=os.getcwd() + '/bin/grep_chip-seq_intensity.pl'
program2='wait'
proc=subprocess.Popen(['perl',program, centerCoordinates, bedGraph],)
proc.communicate()
logOpen.write("Waiting for Cluster Intensity file completion...\n")
proc2=subprocess.Popen(program2,shell=True)
proc2.communicate()
print "Get binding intensity around cluster centers."
logOpen.write("Get binding intensity around cluster centers.\n")
bedGraphCLIPin=makeBedGraph(CLIPPERin,genomeFile)
centerCoordinates=makeClusterCenter(CLIPperOutBed)
getClusterIntensity(bedGraphCLIPin,centerCoordinates)
# <codecell>
def partitionReadsByUTR(infile,UTRmask,utrReads,notutrReads):
program = 'intersectBed'
outfh = open(utrReads,'w')
proc = subprocess.Popen([program,'-a',infile,'-b',UTRmask,'-u','-s'],stdout=outfh)
proc.communicate()
outfh.close()
outfh = open(notutrReads,'w')
proc = subprocess.Popen([program,'-a',infile,'-b',UTRmask,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
def extractUTRs(bedIn,fivePUTRBed,threePUTRBed,cdsBed):
# Usage: Extract all UTR specific reads from the input file.
# Input: .bed file
# Output: Mutually exclusive partitions of the input file.
fivePreads = bedIn.replace('.bed', '_5p.bed')
notFivePreads = bedIn.replace('.bed', '_NOT5p.bed')
partitionReadsByUTR(bedIn,fivePUTRBed,fivePreads,notFivePreads)
threePreads = bedIn.replace('.bed', '_3p.bed')
notThreePreads = bedIn.replace('.bed', '_NOT3p.bed')
partitionReadsByUTR(notFivePreads,threePUTRBed,threePreads,notThreePreads)
CDSreads = bedIn.replace('.bed', '_cds.bed')
notCDSreads = bedIn.replace('.bed', '_NOTcds.bed')
partitionReadsByUTR(notThreePreads,cdsBed,CDSreads,notCDSreads)
return (fivePreads,notFivePreads,CDSreads,notCDSreads,threePreads,notThreePreads)
print "Intron and UTR analysis."
logOpen.write("Intron and UTR analysis.\n")
fivePreads,notFivePreads,CDSreads,notCDSreads,threePreads,notThreePreads=extractUTRs(filteredProteinCodingCenters,fivePUTRBed,threePUTRBed,cdsBed)
geneCounts_5p=sortFilteredBed(fivePreads)
geneCounts_3p=sortFilteredBed(threePreads)
geneCounts_cds=sortFilteredBed(CDSreads)
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_5pUTR'
geneCounts_5p.to_csv(outfilepathToSave)
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_3pUTR'
geneCounts_3p.to_csv(outfilepathToSave)
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_CDS'
geneCounts_cds.to_csv(outfilepathToSave)
# <codecell>
def makeTab(bedGraph,genesFile,sizesFile):
program = os.getcwd() + '/bin/bedGraph2tab.pl'
program2 = 'wait'
outfile=bedGraph.replace('.bedgraph','.tab')
proc = subprocess.Popen(['perl',program,genesFile,sizesFile,bedGraph,outfile],)
proc.communicate()
proc2 = subprocess.Popen(program2,shell=True)
proc2.communicate()
return outfile
def makeAvgGraph(bedGraph,utrFile,genesFile,sizesFile):
# Usage: Generate a matrix of read itensity values across gene body.
# Input: BedGraph.
# Output: Generates two matricies.
program= os.getcwd() + '/bin/averageGraph_scaled_tab.pl'
program2 = 'wait'
tabFile=makeTab(bedGraph,genesFile,sizesFile)
outhandle=tabFile.replace('.tab','_UTRs')
proc = subprocess.Popen(['perl',program,utrFile,tabFile,tabFile,outhandle],)
proc.communicate()
proc2 = subprocess.Popen(program2,shell=True)
proc2.communicate()
print "Gene body analysis."
logOpen.write("Gene body analysis.\n")
bedGraphProtein=makeBedGraph(bedFile_pc,genomeFile)
makeAvgGraph(bedGraphProtein,utrFile,genesFile,sizesFile)
# <codecell>
def getGeneStartStop(bedFile,geneRef):
try:
bf=pd.DataFrame(pd.read_table(bedFile,header=None))
bf.columns=['Chr','Start','End','ReadName','Q','Strand','CLIPper_winChr','CLIPper_winStart','CLIPper_winEmd','CLIPper_winaName','CLIPper_winP','CLIPper_winStrand']
bf['geneName']=bf['CLIPper_winaName'].apply(lambda x: x.split('_')[0])
merge=pd.merge(geneRef,bf,left_on='Ensembl Gene ID',right_on='geneName')
ncRNA_startStop=merge[['Ensembl Gene ID','Gene Start (bp)','Gene End (bp)','Start','End','Strand']]
outfilepathToSave=bedFile.replace(".bed",".geneStartStop")
ncRNA_startStop.to_csv(outfilepathToSave)
except ValueError:
print "No reads in %s"%bedFile
print "ncRNA gene body anaysis."
geneStartStopRepo=os.getcwd()+'/docs/all_genes.txt'
geneRef=pd.DataFrame(pd.read_table(geneStartStopRepo))
remaining=[f for f in glob.glob(outfilepath+"*_LowFDRreads.bed") if 'lincRNA' not in f and 'proteinCoding' not in f and 'snoRNA' not in f]
for bedFile in remaining:
st_stop=getGeneStartStop(bedFile,geneRef)
# lincRNA file processing
bedFile_linc=outfilepath+"clipGenes_lincRNA_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed"
bf=pd.DataFrame(pd.read_table(bedFile_linc,header=None))
bf.columns=['Chr','Start','Stop','CLIPper_name','Q','Strand']
bf['geneName']=bf['CLIPper_name'].apply(lambda x: x.split('_')[0])
merge=pd.merge(geneRef,bf,left_on='Ensembl Gene ID',right_on='geneName')
ncRNA_startStop=merge[['Ensembl Gene ID','Gene Start (bp)','Gene End (bp)','Start','Stop','Strand']]
outfilepathToSave=bedFile_linc.replace(".bed",".geneStartStop")
ncRNA_startStop.to_csv(outfilepathToSave)
# <codecell>
def makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation):
repeat_genome=np.genfromtxt(repeatGenomeBuild,dtype='string')
repeat_genome_bases=repeat_genome[1]
repeat_genome_size=len(repeat_genome[1])
repeatAnnotDF=pd.DataFrame(pd.read_table(repeatAnnotation,header=None))
repeatAnnotDF.columns=['Name','Length','IndexStart','IndexEnd']
repeatAnnotDF['End_for_extraction']=repeatAnnotDF['IndexEnd']+1 # Python list extraction is not end index inclusive; to extract sequence, use end + 1.
return (repeat_genome_bases,repeatAnnotDF)
def readBed(path):
bedFile = pd.read_table(path,dtype=str,header=None)
bedFile.columns=['Index','Start','Stop','Name','QS','Strand']
bedFile['Start']=bedFile['Start'].astype(int)
return bedFile
print "Record repeat RNA."
repeat_genome_bases,repeatAnnotDF=makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation)
repeatAnnotDF.set_index('Name',inplace=True,drop=False)
# Get merged data for repeat index.
repeatMerged=glob.glob(outfilepath+"*repeat_allreads.mergedRT.bed")
rep=pd.read_table(repeatMerged[0],dtype=str,header=None)
rep.columns=['Rep_index','Start','Stop','Read_name','Q','Strand']
rep['RT_stop']=rep['Start'].astype(int)+expand
for ix in repeatAnnotDF.index:
end=repeatAnnotDF.loc[ix,'IndexEnd']
repName=repeatAnnotDF.loc[ix,'Name']
gene_hits=rep[(rep['RT_stop']<int(repeatAnnotDF.loc[ix,'IndexEnd']))&(rep['RT_stop']>int(repeatAnnotDF.loc[ix,'IndexStart']))]
gene_hits['Repeat_End']=repeatAnnotDF.loc[ix,'IndexEnd']
gene_hits['Repeat_Start']=repeatAnnotDF.loc[ix,'IndexStart']
outfilepathToSave=outfilepath + '/PlotData_RepeatRNAreads_%s'%repName
gene_hits.to_csv(outfilepathToSave)
# <codecell>
def makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation):
repeat_genome=np.genfromtxt(repeatGenomeBuild,dtype='string')
repeat_genome_bases=repeat_genome[1]
repeat_genome_size=len(repeat_genome[1])
repeatAnnotDF=pd.DataFrame(pd.read_table(repeatAnnotation,header=None))
repeatAnnotDF.columns=['Name','Length','IndexStart','IndexEnd']
repeatAnnotDF['End_for_extraction']=repeatAnnotDF['IndexEnd']+1 # Python list extraction is not end index inclusive; to extract sequence, use end + 1.
return (repeat_genome_bases,repeatAnnotDF)
repeat_genome_bases,repeatAnnotDF=makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation)
# <codecell>
def lineCount(filename):
i=0
with open(filename) as f:
for i,l in enumerate(f):
pass
return i+1
def plot_ReadAccounting(outfilepath,sampleName):
rawRead1=infilepath+sampleName+'_R1.fastq'
rawRead2=infilepath+sampleName+'_R2.fastq'
reads3pTrim=[outfilepath+sampleName+'_R1_3ptrimmed.fastq',outfilepath+sampleName+'_R2_3ptrimmed.fastq']
readsFilter=[outfilepath+sampleName+'_R1_3ptrimmed_filter.fastq',outfilepath+sampleName+'_R2_3ptrimmed_filter.fastq']
readsNoDupes=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe.fastq',outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe.fastq']
readsMappedReapeat=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_mappedTorepeat_withDupes.bed',outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_mappedTorepeat_withDupes.bed']
readsMappedHg19=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag,outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag]
readsMappedBlacklist=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag,outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag]
readsMappedRepeatMask=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes_noBlacklist_noRepeat.bed'%index_tag,outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes_noBlacklist_noRepeat.bed'%index_tag]
clipperIN=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIPPERin.bed'%(threshold,index_tag)
clipperOUT=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters_lowFDRreads.bed'%(threshold,index_tag)
fileNames=['Raw (R1)','Raw (R2)','3p Trim (R1)','3p Trim (R2)','Filter (R1)','Filter (R2)','No dupes (R1)','No dupes (R2)','RepeatMapped (R1)','RepeatMapped (R2)','Hg19Mapped (R1)','Hg19Mapped (R2)','Blacklist (R1)','Blacklist (R2)','RepeatMask (R1)','RepeatMask (R2)','ClipperIn','ClipperOut']
filesToCount=[rawRead1,rawRead2,reads3pTrim[0],reads3pTrim[1],readsFilter[0],readsFilter[1],readsNoDupes[0],readsNoDupes[1],readsMappedReapeat[0],readsMappedReapeat[1],readsMappedHg19[0],readsMappedHg19[1],readsMappedBlacklist[0],readsMappedBlacklist[1],readsMappedRepeatMask[0],readsMappedRepeatMask[1],clipperIN,clipperOUT]
counts=[]
counter=0
for fileString in filesToCount:
temp=lineCount(fileString)
if counter < 8:
temp=temp/4 # Fastq files
counts=counts+[temp]
counter += 1
ind = np.arange(len(counts)) + 0.5
plt.barh(ind,list(reversed(np.log10(np.array(counts)))),align='center',color='blue')
plt.xlabel('log10(Counts per file)',fontsize=5)
locs,pltlabels = plt.xticks(fontsize=5)
plt.setp(pltlabels, rotation=90, fontsize=5)
plt.yticks(ind,list(reversed(fileNames)),fontsize=5)
plt.tick_params(axis='yticks',labelsize=5)
ax=plt.gca()
for line in ax.get_yticklines():
line.set_markersize(0)
plt.title('Read counts',fontsize=5)
readDF=pd.DataFrame()
readDF['File_name']=fileNames
readDF['Reads_per_file']=counts
outfilepathToSave=outfilepath + '/PlotData_ReadsPerPipeFile'
readDF.to_csv(outfilepathToSave)
plt.subplot(2,3,1)
plot_ReadAccounting(outfilepath,sampleName)
# <codecell>
def plot_BoundGeneTypes(outfilepath,sampleName):
record=pd.DataFrame()
# Exclude specific files (e.g., UTR-specific reads).
geneListToPlot=[f for f in glob.glob(outfilepath+'PlotData_ReadsPerGene_*') if '5pUTR' not in f and '3pUTR' not in f and 'CDS' not in f]
for boundGenes in geneListToPlot:
glist=pd.read_csv(boundGenes,header=None)
glist.columns=['GeneName','Count']
gName=boundGenes.split('_')[-1]
record.loc[gName,'genesBound']=glist.shape[0]
record.loc[gName,'totalReads']=glist['Count'].sum()
record.sort('genesBound',inplace=True)
outfilepathToSave=outfilepath + '/PlotData_ReadAndGeneCountsPerGenetype'
record.to_csv(outfilepathToSave)
ind = np.arange(record.shape[0]) + 0.5
plt.bar(ind,record['genesBound'],align='center',color='blue')
locs,pltlabels = plt.yticks(fontsize=5)
locs,pltlabels = plt.xticks(ind,record.index,fontsize=5)
plt.setp(pltlabels, rotation=90, fontsize=5)
plt.tick_params(axis='xticks',labelsize=5)
ax=plt.gca()
for line in ax.get_xticklines():
line.set_markersize(0)
plt.ylabel('Number of genes bound',fontsize=5)
plt.tick_params(axis='yticks',labelsize=5)
plt.title('Bound genes by class',fontsize=5)
plt.subplot(2,3,6)
plot_BoundGeneTypes(outfilepath,sampleName)
# <codecell>
def plot_ReadsPerCluster(outfilepath,sampleName):
readPerCluster=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters.readsPerCluster'%(threshold,index_tag)
clust=pd.DataFrame(pd.read_table(readPerCluster,header=None))
clust.columns=['ReadsPerCluster']
clust=clust['ReadsPerCluster']
interval=10
bins=range(min(clust)-10,max(clust)+10,interval)
hist,bins=np.histogram(clust,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
plt.bar(center, hist,align='center',width=width)
locs,pltlabels = plt.yticks(fontsize=5)
locs,pltlabels = plt.xticks(center,center,fontsize=5)
plt.setp(pltlabels, rotation=90, fontsize=3.5)
plt.tick_params(axis='yticks',labelsize=5)
plt.xlabel('Reads per cluster (bin=%s)'%interval,fontsize=5)
plt.ylabel('Frequency (RT stop count)',fontsize=5)
plt.title('Reads per cluster',fontsize=5)
plt.xlim(0,100) # Make the histogram easy to view.
# plt.xlim(-interval,np.max(center)+interval)
plt.subplot(2,3,2)
plot_ReadsPerCluster(outfilepath,sampleName)
# <codecell>
def plot_ClusterSizes(outfilepath,sampleName):
clipClusters=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters'%(threshold,index_tag)
clust=pd.DataFrame(pd.read_table(clipClusters,header=None,skiprows=1))
clust.columns=['chr','start','end','name','score','strand','m1','m2']
clust['clusterSize']=clust['start']-clust['end']
clust['clusterSize']=clust['clusterSize'].apply(lambda x: math.fabs(x))
plt.boxplot(clust['clusterSize'])
plt.tick_params(axis='x',labelbottom='off')
ax=plt.gca()
for line in ax.get_xticklines():
line.set_markersize(0)
plt.ylabel('Cluster length (bases)',fontsize=5)
locs,pltlabels = plt.yticks(fontsize=5)
plt.title('Cluster size',fontsize=5)
plt.subplot(2,3,3)
plot_ClusterSizes(outfilepath,sampleName)
# <codecell>
def plot_clusterBindingIntensity(outfilepath,sampleName):
clusterCenterHeatmap=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters_cleaned_sorted.clusterCenter_heatmap.txt'%(threshold,index_tag)
hmap=pd.DataFrame(pd.read_table(clusterCenterHeatmap,header=None,skiprows=1))
hmap_vals=hmap.ix[:,1:]
sums=hmap_vals.sum(axis=1)
hmap_vals=hmap_vals.loc[np.argsort(sums),:]
plt.ylim(0,hmap_vals.shape[0])
p=plt.pcolormesh(np.array(hmap_vals),cmap='Blues')
plt.tick_params(axis='x',labelbottom='off')
plt.xlabel('Cluster position',fontsize=5)
locs,pltlabels = plt.yticks(fontsize=5)
plt.ylabel('Cluster number',fontsize=5)
plt.title('Read distribution',fontsize=5)
plt.subplot(2,3,4)
plot_clusterBindingIntensity(outfilepath,sampleName)
# <codecell>
def readUTRfile(path):
geneCounts=pd.read_csv(path,header=None)
geneCounts.columns=['Gene_name','Count']
return geneCounts
def plot_readsBymRNAregion(outfilepath,sampleName):
pc_5pReads=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_5pUTR')['Count'].sum()
pc_3pReads=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_3pUTR')['Count'].sum()
pc_CDSReads=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_CDS')['Count'].sum()
non_intronic=pc_5pReads+pc_3pReads+pc_CDSReads
allProteinCoding=outfilepath +'clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed'
all_pc=pd.DataFrame(pd.read_table(allProteinCoding,header=None))
pc_allReads=all_pc.shape[0]
v=[float(pc_allReads-non_intronic)/pc_allReads,float(pc_5pReads)/pc_allReads,float(pc_CDSReads)/pc_allReads,float(pc_3pReads)/pc_allReads]
pie_wedges=ax.pie(v,labels=["Intronic","5p UTR","CDS","3pUTR"],labeldistance=1.1,autopct='%1.1f%%')
plt.rcParams['font.size']=5
for wedge in pie_wedges[0]:
wedge.set_edgecolor('black')
wedge.set_lw(1)
ax=plt.subplot(2,3,5)
plot_readsBymRNAregion(outfilepath,sampleName)
# <codecell>
fig1=plt.figure(1)
plt.subplot(2,3,1)
plot_ReadAccounting(outfilepath,sampleName)
plt.subplot(2,3,2)
plot_ReadsPerCluster(outfilepath,sampleName)
plt.subplot(2,3,3)
plot_ClusterSizes(outfilepath,sampleName)
plt.subplot(2,3,4)
plot_clusterBindingIntensity(outfilepath,sampleName)
ax=plt.subplot(2,3,5)
plot_readsBymRNAregion(outfilepath,sampleName)
plt.subplot(2,3,6)
plot_BoundGeneTypes(outfilepath,sampleName)
fig1.tight_layout()
fig1.savefig(outfilepath+'Figure1.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig1.savefig(outfilepath+'Figure1.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def plot_mRNAgeneBodyDist(outfilepath,sampleName):
averageGraph=outfilepath+'clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved_cleaned_sorted_UTRs_scaled_cds200_abt0_averageGraph.txt'
hmap=pd.DataFrame(pd.read_table(averageGraph,header=None,skiprows=1))
hmap=hmap.set_index(0)
avgTrace=hmap.loc['treat',:]
plt.plot(avgTrace,color='blue',linewidth='2')
plt.vlines(200,0,np.max(avgTrace),linestyles='dashed')
plt.vlines(400,0,np.max(avgTrace),linestyles='dashed')
plt.ylim(0,np.max(avgTrace))
plt.tick_params(axis='x',labelbottom='off')
plt.xlabel('mRNA gene body (5pUTR, CDS, 3pUTR)')
plt.ylabel('Read density')
plt.tick_params(axis='y',labelsize=5)
plt.title('CLIP signal across average mRNA transcript.',fontsize=5)
plt.subplot2grid((2,3),(0,0),colspan=3)
plot_mRNAgeneBodyDist(outfilepath,sampleName)
# <codecell>
def convertENBLids(enst_name):
ensg_name=ensemblGeneAnnot.loc[enst_name,'name2']
return ensg_name
def getUTRbindingProfile(utr,hmap_m):
if utr=='5p':
ix=(hmap_m[range(201,601)].sum(axis=1)==0)&(hmap_m[range(1,201)].sum(axis=1)>0)
screen=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_5pUTR')
elif utr=='3p':
ix=(hmap_m[range(1,401)].sum(axis=1)==0)&(hmap_m[range(401,601)].sum(axis=1)>0)
screen=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_3pUTR')
else:
ix=(hmap_m[range(1,201)].sum(axis=1)==0)&(hmap_m[range(401,601)].sum(axis=1)==0)&(hmap_m[range(201,401)].sum(axis=1)>0)
screen=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_CDS')
# Ensure all genes are also identified in pre-allocated gene lists.
hmap_m_utrSpec=hmap_m.ix[ix,:]
hmap_m_utrSpec_filter=pd.merge(hmap_m_utrSpec,screen,left_on='ENSG_ID',right_on='Gene_name',how='inner')
sums=hmap_m_utrSpec_filter[range(1,601)].sum(axis=1)
hmap_m_utrSpec_filter=hmap_m_utrSpec_filter.loc[np.argsort(sums),:]
return hmap_m_utrSpec_filter
def plot_geneBodyPartition(outfilepath,sampleName):
treatMatrix=outfilepath+'clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved_cleaned_sorted_UTRs_scaled_cds200_abt0_treatmatrix.txt'
hmap=pd.DataFrame(pd.read_table(treatMatrix,header=None,skiprows=1))
# Ensure genes recoverd from this analysis are indepdently indentified using partitioning of CLIPper cluster data.
hmap['ENSG_ID']=hmap.ix[:,0].apply(convertENBLids)
bound_pc = outfilepath+'clipGenes_proteinCoding'
pc_genes=pd.DataFrame(pd.read_table(bound_pc,header=None,))
pc_genes.columns=['ENSG_ID']
hmap_m=pd.merge(hmap,pc_genes,left_on='ENSG_ID',right_on='ENSG_ID',how='inner')
# Isolate intronic bound genes.
tosave=outfilepath+'PlotData_ExclusiveBound_Intronic'
intronicBoundGenes=list(set(pc_genes['ENSG_ID'])-set(hmap_m['ENSG_ID']))
np.savetxt(tosave,np.array(intronicBoundGenes),fmt="%s")
# UTR specific genes.
geneTypes=['5p','cds','3p']
depth=50
for i in range(0,3):
utrMatrix=getUTRbindingProfile(geneTypes[i],hmap_m)
tosave=outfilepath+'PlotData_ExclusiveBound_%s'%geneTypes[i]
np.savetxt(tosave,utrMatrix['ENSG_ID'],fmt="%s")
plt.subplot2grid((2,3),(1,i),colspan=1)
dataToPlot=utrMatrix[range(1,601)]
p=plt.pcolormesh(np.array(dataToPlot)[-depth:-1,:],cmap='Blues')
plt.title(geneTypes[i],fontsize=5)
plt.vlines(200,0,depth,linestyles='dashed')
plt.vlines(400,0,depth,linestyles='dashed')
plt.tick_params(axis='x',labelbottom='off')
plt.tick_params(axis='y',labelleft='off')
plt.ylim(0,depth)
plt.ylabel('Ranked genes (highest on bottom)',fontsize=5)
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.title('%s specific genes: %s'%(geneTypes[i],np.unique(utrMatrix['ENSG_ID']).shape[0]),fontsize=5)
ensemblGeneAnnot=pd.DataFrame(pd.read_table(genesFile))
ensemblGeneAnnot=ensemblGeneAnnot.set_index('name') # Make ENST the index
plot_geneBodyPartition(outfilepath,sampleName)
# <codecell>
fig2=plt.figure(2)
plt.subplot2grid((2,3),(0,0),colspan=3)
plot_mRNAgeneBodyDist(outfilepath,sampleName)
plot_geneBodyPartition(outfilepath,sampleName)
fig2.tight_layout()
fig2.savefig(outfilepath+'Figure2.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig2.savefig(outfilepath+'Figure2.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def plot_repeatRNA(outfilepath,sampleName):
repeat_genome=np.genfromtxt(repeatGenomeBuild,dtype='string')
repeat_genome_bases=repeat_genome[1]
repFiles=glob.glob(outfilepath + '/PlotData_RepeatRNAreads_*')
repFiles=[repFile for repFile in repFiles if 'rDNA' not in repFile]
plotDim=math.ceil(math.sqrt(len(repFiles)))
i=0
for path in repFiles:
name=path.split('RepeatRNAreads_')[-1]
try:
# Read in each RT stop file
hits_per_rep=pd.read_csv(path)
RTpositions=hits_per_rep['RT_stop']
start=hits_per_rep.loc[0,'Repeat_Start']
end=hits_per_rep.loc[0,'Repeat_End']
# Histogram of RT stops across gene body
bins=range(start,end+2,1)
hist,bins=np.histogram(RTpositions,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
# Normalize
histPlot=np.array(hist,dtype=float)
histPlot=np.array(histPlot/float(len(RTpositions)),dtype=float)
# Subplot
plt.subplot(plotDim,plotDim,i+1)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.45)
plt.tick_params(axis='x',labelsize=2.5)
plt.tick_params(axis='y',labelsize=2.5)
plt.title('RT stops for %s: %s'%(name,len(RTpositions)),fontsize=5)
plt.xlim(start,end)
# Record data
storageDF=pd.DataFrame()
sequence=repeat_genome_bases[start:end+1]
storageDF['Sequence']=pd.Series(list(sequence))
readsPerBase=np.array(list(hist))
readsPerBaseNorm=np.array(list(histPlot))
storageDF['RT_stops']=readsPerBase
storageDF['RT_stops_norm']=readsPerBaseNorm
outfilepathToSave=outfilepath +'/PlotData_RepeatRNAHist_%s'%name
storageDF.to_csv(outfilepathToSave)
i+=1
except:
print "No reads for repeatRNA %s"%name
plt.tight_layout()
fig3=plt.figure(3)
plot_repeatRNA(outfilepath,sampleName)
fig3.tight_layout()
fig3.savefig(outfilepath+'Figure3.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig3.savefig(outfilepath+'Figure3.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def plot_rDNA(outfilepath,sampleName):
plt.subplot2grid((3,3),(0,0),colspan=3)
name='rDNA'
rDNA=glob.glob(outfilepath + 'PlotData_RepeatRNAreads_rDNA')
hits_per_rep=pd.read_csv(rDNA[0])
RTpositions=hits_per_rep['RT_stop']
start=hits_per_rep.loc[0,'Repeat_Start']
end=hits_per_rep.loc[0,'Repeat_End']
bins=range(start,end+2,1)
hist,bins=np.histogram(RTpositions,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1]+bins[1:])/2
histPlot=np.array(hist,dtype=float)
histPlot=np.array(histPlot/float(len(RTpositions)),dtype=float)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.45)
plt.tick_params(axis='x',labelsize=2.5)
plt.tick_params(axis='y',labelsize=2.5)
plt.title('RT stops for %s: %s'%(name,len(RTpositions)),fontsize=5)
plt.xlim(start,end)
# Record data
storageDF=pd.DataFrame()
sequence=repeat_genome_bases[start:end+1]
storageDF['Sequence']=pd.Series(list(sequence))
readsPerBase=np.array(list(hist))
readsPerBaseNorm=np.array(list(histPlot))
storageDF['RT_stops']=readsPerBase
storageDF['RT_stops_norm']=readsPerBaseNorm
outfilepathToSave=outfilepath +'/PlotData_RepeatRNAHist_%s'%name
storageDF.to_csv(outfilepathToSave)
# Features of rDNA with respect to start of the bowtie index (index=0)
rRNAstart=start
plt.axvspan(start18s+rRNAstart,end18s+rRNAstart,facecolor='g',alpha=0.5)
plt.axvspan(start5s+rRNAstart,end5s+rRNAstart,facecolor='r',alpha=0.5)
plt.axvspan(start28s+rRNAstart,end28s+rRNAstart,facecolor='b',alpha=0.5)
# Generate histogram for transcribed region
plt.subplot2grid((3,3),(1,0),colspan=3)
datarDNAOnly=RTpositions-start
bins=range((start-start),(end-start+2),1)
hist,bins=np.histogram(datarDNAOnly,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
histPlot=np.array(hist,dtype=float)
histPlot=np.array(histPlot/float(len(RTpositions)),dtype=float)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.45)
plt.tick_params(axis='x',labelsize=2.5)
plt.tick_params(axis='y',labelsize=2.5)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.ylabel('Normalized RT stop / bin',fontsize=2.5)
plt.axvspan(start18s,end18s,facecolor='g',alpha=0.5)
plt.axvspan(start5s,end5s,facecolor='r',alpha=0.5)
plt.axvspan(start28s,end28s,facecolor='b',alpha=0.5)
plt.xlim(0,rRNAend)
# Individual regions
plt.subplot2grid((3,3),(2,0),colspan=1)
plt.bar(center,histPlot,align='center',width=width,color='green',alpha=0.75)
plt.xlim(start18s,end18s)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.ylabel('Normalized RT stop / bin',fontsize=2.5)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.title('18s Region',fontsize=5)
plt.subplot2grid((3,3),(2,1),colspan=1)
plt.bar(center,histPlot,align='center',width=width,color='red',alpha=0.75)
plt.xlim(start5s,end5s)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.title('5.8s Region',fontsize=5)
plt.subplot2grid((3,3),(2,2),colspan=1)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.75)
plt.xlim(start28s,end28s)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.title('28s Region',fontsize=5)
plt.tight_layout()
fig4=plt.figure(4)
plot_rDNA(outfilepath,sampleName)
fig4.tight_layout()
fig4.savefig(outfilepath+'Figure4.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig4.savefig(outfilepath+'Figure4.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def getBindingFrac(type_specific):
# 5' position on the negative strand is snoRNA stop coordinate.
neg_data=type_specific[type_specific['strand_snoRNA']=='-']
neg_data['diff']=np.abs(neg_data['Stop_snoRNA']-neg_data['Start'])
neg_data['frac']=neg_data['diff']/(neg_data['Stop_snoRNA']-neg_data['Start_snoRNA'])
# 5' position on the positive strand is snoRNA start coordinate.
pos_data=type_specific[type_specific['strand_snoRNA']=='+']
pos_data['diff']=np.abs(pos_data['Start_snoRNA']-pos_data['Start'])
pos_data['frac']=pos_data['diff']/(pos_data['Stop_snoRNA']-pos_data['Start_snoRNA'])
DF_snoProfile=pd.concat([neg_data,pos_data])
return DF_snoProfile
print "snoRNA gene body anaysis."
# logOpen.write("Gene body analysis.\n")
bf_sno=pd.read_table(outfilepath+"clipGenes_snoRNA_LowFDRreads.bed",header=None)
bf_sno.columns=['Chr','Start','End','CLIPper_name','Q','Strand','Chr_snoRNA','Start_snoRNA','Stop_snoRNA','name_snoRNA','Type','strand_snoRNA']
snoTypes=pd.DataFrame(bf_sno.groupby('Type').size())
snoTypes.columns=['Reads']
snoTypes['Fraction']=snoTypes['Reads']/snoTypes['Reads'].sum(axis=1)
outfilepathToSave=outfilepath+'/PlotData_readsPerSnoRNAType'
snoTypes.to_csv(outfilepathToSave)
snoTypesAndGenes=pd.DataFrame(bf_sno.groupby(['Type','name_snoRNA']).size())
snoTypesAndGenes.columns=['Count_per_gene']
outfilepathToSave=outfilepath+'/PlotData_geneStatsPerSnoRNAType'
snoTypesAndGenes.to_csv(outfilepathToSave)
fig5=plt.figure(5)
ax=plt.subplot(2,2,1)
pie_wedges=ax.pie(snoTypes['Fraction'],labels=snoTypes.index,labeldistance=1.1,autopct='%1.1f%%')
plt.rcParams['font.size']=5
for wedge in pie_wedges[0]:
wedge.set_edgecolor('black')
wedge.set_lw(1)
i=2
for sType in set(bf_sno['Type']):
type_specific=bf_sno[bf_sno['Type']==sType]
sno_profile=getBindingFrac(type_specific)
if sType=='C':
title="C/D_box"
elif sType=='H':
title="H/ACA_box"
else:
title="scaRNA"
outfilepathToSave=outfilepath+'/PlotData_snoRNAReadDist_%s'%sType
sno_profile.to_csv(outfilepathToSave)
plt.subplot(2,2,i)
bins=np.arange(0,1,0.01)
hist,bins=np.histogram(sno_profile['frac'],bins=bins)
hist=np.array(hist/float(sno_profile['frac'].shape[0]),dtype=float)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
plt.bar(center,hist,align='center',width=width,color='blue',alpha=0.75)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.xlabel('Fraction of gene body (5p - 3p)',fontsize=5)
plt.title('Binding profile for %s'%title,fontsize=5)
plt.xlim([0,1])
# Record data
storageDF=pd.DataFrame()
storageDF['bins']=pd.Series(bins)
storageDF['hist']=pd.Series(hist)
outfilepathToSave=outfilepath+'/PlotData_snoRNAhistogram_%s'%sType
storageDF.to_csv(outfilepathToSave)
i+=1
fig5.tight_layout()
fig5.savefig(outfilepath+'Figure5.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig5.savefig(outfilepath+'Figure5.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def getncRNABindingFrac(type_specific):
# 5' position on the negative strand is snoRNA stop coordinate.
neg_data=type_specific[type_specific['Strand']=='-']
neg_data['diff']=np.abs(neg_data['Gene End (bp)']-neg_data['RT_stop'])
neg_data['frac']=neg_data['diff']/(neg_data['Gene End (bp)']-neg_data['Gene Start (bp)'])
# 5' position on the positive strand is snoRNA start coordinate.
pos_data=type_specific[type_specific['Strand']=='+']
pos_data['diff']=np.abs(pos_data['Gene Start (bp)']-pos_data['RT_stop'])
pos_data['frac']=pos_data['diff']/(pos_data['Gene End (bp)']-pos_data['Gene Start (bp)'])
DF_ncRNAProfile=pd.concat([neg_data,pos_data])
return DF_ncRNAProfile
print "ncRNA gene body anaysis."
st_stopFiles=glob.glob(outfilepath+"*.geneStartStop")
st_stopFiles=[f for f in st_stopFiles if 'rRNA' not in f]
fig6=plt.figure(6)
plotDim=math.ceil(math.sqrt(len(st_stopFiles)))
i=1
for st_file in st_stopFiles:
name=st_file.split('clipGenes_')[1].split('_LowFDRreads')[0]
tmp=pd.read_csv(st_file)
tmp['RT_stop']=tmp['Start']+expand
tmp_profile=getncRNABindingFrac(tmp)
plt.subplot(plotDim,plotDim,i)
bins=np.arange(0,1,0.01)
hist,bins=np.histogram(tmp_profile['frac'],bins=bins)
hist=np.array(hist/float(tmp_profile['frac'].shape[0]),dtype=float)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
plt.bar(center,hist,align='center',width=width,color='blue',alpha=0.75)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.xlabel('Fraction of gene body (5p - 3p)',fontsize=5)
plt.title('Binding profile for %s'%name,fontsize=5)
i+=1
fig6.tight_layout()
fig6.savefig(outfilepath+'Figure6.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig6.savefig(outfilepath+'Figure6.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
logOpen.close()
# <codecell>
| gpl-2.0 | 3,409,307,261,350,917,000 | 42.13089 | 329 | 0.702674 | false | 3.036631 | false | false | false |
yrakcaz/music-style-classifier | src/ai.py | 1 | 1667 | from songmodel import SongModel
from extractor import Extractor
from sklearn import svm, multiclass, neighbors
import subprocess, math
class AI:
def __init__(self, song):
self.song = song
self.model = SongModel()
self.extractor = Extractor()
self.tempo = 0
self.rolloffmoy = 0.0
self.rolloffect = 0.0
self.zcrmoy = 0.0
self.zcrect = 0.0
self.duration = 0.0
self.genre = []
for l in open("training/Tracks/genres.txt"):
self.genre.append(l.replace('\n',''))
def get_song_datas(self):
self.extractor.set_song(self.song)
self.tempo = self.extractor.get_tempo()
self.rolloffmoy = self.extractor.get_rolloff_moy()
self.rolloffect = self.extractor.get_rolloff_ect()
self.zcrmoy = self.extractor.get_zcr_moy()
self.zcrect = self.extractor.get_zcr_ect()
self.duration = self.extractor.get_duration()
def classify_with_knn(self):
vect, mat = self.model.get_datas()
clf = neighbors.KNeighborsClassifier()
clf.fit(mat, vect)
self.get_song_datas()
l = [[self.tempo, self.rolloffmoy, self.rolloffect, self.zcrmoy, self.zcrect, self.duration]]
ret = clf.predict(l)
print(self.genre[ret[0]])
def classify_with_svm(self):
vect, mat = self.model.get_datas()
clf = svm.SVC(class_weight='auto', kernel='linear')
clf.fit(mat, vect)
self.get_song_datas()
l = [[self.tempo, self.rolloffmoy, self.rolloffect, self.zcrmoy, self.zcrect, self.duration]]
ret = clf.predict(l)
print(self.genre[int(ret[0])])
| gpl-2.0 | 2,355,641,711,767,800,000 | 35.23913 | 101 | 0.610078 | false | 3.218147 | false | false | false |
MostlyOpen/odoo_addons_jcafb | myo_survey_cst/wizard/survey_file_arquive_wizard.py | 1 | 2349 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import api, fields, models
import logging
import shutil
_logger = logging.getLogger(__name__)
class SurveyFileArquiveWizard(models.TransientModel):
_name = 'myo.survey.file.arquive.wizard'
def _default_survey_file_ids(self):
return self._context.get('active_ids')
survey_file_ids = fields.Many2many(
'myo.survey.file',
string='Survey Files',
default=_default_survey_file_ids)
dir_path = fields.Char(
'Directory Path',
required=True,
help="Directory Path",
default='/opt/openerp/mostlyopen_clvhealth_jcafb/survey_files/input'
)
arquive_dir_path = fields.Char(
'Arquive Directory Path',
required=True,
help="Arquive Directory Path",
default='/opt/openerp/mostlyopen_clvhealth_jcafb/survey_files/arquive'
)
@api.multi
def do_survey_file_arquive(self):
self.ensure_one()
for survey_file_reg in self.survey_file_ids:
filepath = self.dir_path + '/' + survey_file_reg.name
arquive_filepath = self.arquive_dir_path + '/' + survey_file_reg.name
print '>>>>>', filepath, survey_file_reg.state
# print '>>>>>', survey_file_reg.survey_id.description
if survey_file_reg.state == 'imported':
shutil.move(filepath, arquive_filepath)
survey_file_reg.state = 'arquived'
return True
| agpl-3.0 | 1,237,427,965,669,874,700 | 32.084507 | 81 | 0.61771 | false | 3.934673 | false | false | false |
YuHongJun/python-training | work_two_Crawler/catch_mzui.py | 1 | 3114 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# http://cuiqingcai.com/3179.html
__author__ = 'Demi Yu'
from bs4 import BeautifulSoup
import os
from Download import request
class mzitu():
def all_url(self, url):
html = request.get(url,3) ##调用request函数把套图地址传进去会返回给我们一个response
all_a = BeautifulSoup(html.text, 'lxml').find('div', class_='all').find_all('a')
retval = os.getcwd() # 查看当前工作目录 '/Users/yuhongjun/Python/python-training/work_two_Crawler'
for a in all_a:
title = a.get_text()
print(u'开始保存:', title) ##加点提示不然太枯燥了
path = str(title).replace("?", '_') ##我注意到有个标题带有 ? 这个符号Windows系统是不能创建文件夹的所以要替换掉
self.mkdir(path) ##调用mkdir函数创建文件夹!这儿path代表的是标题title哦!!!!!不要糊涂了哦!
href = a['href']
self.html(href) ##调用html函数把href参数传递过去!href是啥还记的吧? 就是套图的地址哦!!不要迷糊了哦!
os.chdir(retval) ##切换到目录
def html(self, href): ##这个函数是处理套图地址获得图片的页面地址
html = request.get(href,3)
max_span = BeautifulSoup(html.text, 'lxml').find('div', class_='pagenavi').find_all('span')[-2].get_text()
# for page in range(1, int(max_span) + 1):
for page in range(1, 2):
page_url = href + '/' + str(page)
self.img(page_url) ##调用img函数
def img(self, page_url): ##这个函数处理图片页面地址获得图片的实际地址
img_html = request.get(page_url,3)
img_url = BeautifulSoup(img_html.text, 'lxml').find('div', class_='main-image').find('img')['src']
self.save(img_url)
def save(self, img_url): ##这个函数保存图片
name = img_url[-9:-4]
img = request.get(img_url,3)
f = open(name + '.jpg', 'ab')
f.write(img.content)
f.close()
def mkdir(self, path): ##这个函数创建文件夹
path = path.strip()
macPath="Pic/"
isExists = os.path.exists(os.path.join(macPath, path))
if not isExists:
print(u'建了一个名字叫做', path, u'的文件夹!')
os.makedirs(os.path.join(macPath, path))
os.chdir(os.path.join(macPath, path)) ##切换到目录
return True
else:
print(u'名字叫做', path, u'的文件夹已经存在了!')
return False
# def request(self, url): ##这个函数获取网页的response 然后返回
# headers = {
# 'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"}
# content = requests.get(url, headers=headers)
# return content
Mzitu = mzitu() ##实例化
Mzitu.all_url('http://www.mzitu.com/all') ##给函数all_url传入参数 你可以当作启动爬虫(就是入口)
| mit | 686,732,049,385,913,300 | 37.727273 | 136 | 0.58216 | false | 2.160609 | false | false | false |
ajyoon/brown | examples/feldman_projections_2/score.py | 1 | 5909 | from typing import Union
from brown import constants
from brown.core import brown
from brown.core.font import Font
from brown.core.music_font import MusicFont
from brown.core.object_group import ObjectGroup
from brown.core.path import Path
from brown.core.pen import Pen
from brown.core.pen_pattern import PenPattern
from brown.core.staff import Staff
from brown.utils.units import GraphicUnit
from examples.feldman_projections_2.glyph_name import GlyphName
from examples.feldman_projections_2.grid_unit import GridUnit
from examples.feldman_projections_2.instrument_data import InstrumentData
from examples.feldman_projections_2.measure import Measure
from examples.feldman_projections_2.music_text_event import MusicTextEvent
from examples.feldman_projections_2.text_event import TextEvent
class Score(ObjectGroup):
_TEXT_FONT_SIZE = GraphicUnit(GridUnit(0.6)).value
_MUSIC_FONT_SIZE = Staff._make_unit_class(GridUnit(0.5))
_bar_line_pen = Pen(thickness=GridUnit(0.05), pattern=PenPattern.DOT)
_instrument_divider_pen = Pen(thickness=GridUnit(0.05))
def __init__(self, pos, instruments, parent):
super().__init__(pos, parent)
self.events = []
self.text_font = Font.deriving(
brown.default_font,
size=Score._TEXT_FONT_SIZE,
weight=60)
self.music_font = MusicFont(
constants.DEFAULT_MUSIC_FONT_NAME,
Score._MUSIC_FONT_SIZE)
self.instruments = instruments
for i, instrument in enumerate(instruments):
for event_data in instrument.event_data:
self.events.append(self._create_event(i, event_data))
self.draw_instrument_dividers()
self.draw_bar_lines()
def _create_event(self, instrument_index, event_data):
if isinstance(event_data.text, GlyphName):
return self._create_music_text_event(instrument_index, event_data)
return self._create_text_event(instrument_index, event_data)
def _create_text_event(self, instrument_index, event_data):
return TextEvent(
(event_data.pos_x, (Score._instrument_pos_y(instrument_index)
+ event_data.register.value)),
self,
event_data.length,
event_data.text,
self.text_font)
def _create_music_text_event(self, instrument_index, event_data):
return MusicTextEvent(
(event_data.pos_x, (Score._instrument_pos_y(instrument_index)
+ event_data.register.value)),
self,
event_data.length,
event_data.text,
self.music_font)
@property
def measure_count(self):
return max(max(int(Measure(e.pos_x).value)
for e in i.event_data)
for i in self.instruments) + 1
@staticmethod
def _instrument_pos_y(instrument_index):
return GridUnit(3 * instrument_index)
@staticmethod
def _divider_pos_y(divider_index):
return GridUnit(3 * divider_index)
@staticmethod
def _divider_visible(instrument_above: Union[InstrumentData, None],
instrument_below: Union[InstrumentData, None],
measure_num: int) -> bool:
return ((instrument_above is not None
and instrument_above.measure_has_events(measure_num))
or (instrument_below is not None
and instrument_below.measure_has_events(measure_num)))
def _bar_line_extends_below(self,
measure_num: int,
divider_num: int) -> bool:
if divider_num >= len(self.instruments):
return False
instrument = self.instruments[divider_num]
return (instrument.measure_has_events(measure_num - 1)
or instrument.measure_has_events(measure_num))
def draw_instrument_dividers(self):
for divider in range(len(self.instruments) + 1):
current_path = Path((Measure(0), Score._divider_pos_y(divider)),
pen=Score._instrument_divider_pen,
parent=self)
instrument_above = (self.instruments[divider - 1]
if divider > 0 else None)
instrument_below = (self.instruments[divider]
if divider < len(self.instruments) else None)
drawing = False
for measure_num in range(self.measure_count + 1):
if Score._divider_visible(
instrument_above, instrument_below, measure_num):
if not drawing:
current_path.move_to(Measure(measure_num), GridUnit(0))
drawing = True
else:
if drawing:
current_path.line_to(Measure(measure_num), GridUnit(0))
drawing = False
def draw_bar_lines(self):
for measure_num in range(self.measure_count + 1):
current_path = Path((Measure(measure_num), GridUnit(0)),
pen=Score._bar_line_pen,
parent=self)
drawing = False
for divider_num in range(len(self.instruments) + 1):
if self._bar_line_extends_below(measure_num, divider_num):
if not drawing:
current_path.move_to(
GridUnit(0),
Score._instrument_pos_y(divider_num))
drawing = True
else:
if drawing:
current_path.line_to(
GridUnit(0),
Score._instrument_pos_y(divider_num))
drawing = False
| gpl-3.0 | -1,098,191,962,342,558,000 | 40.034722 | 79 | 0.575055 | false | 4.094941 | false | false | false |
rizar/actor-critic-public | bin/pack_to_hdf5.py | 1 | 2224 | #!/usr/bin/env python
import h5py
import numpy
import argparse
import cPickle
from fuel.datasets.hdf5 import H5PYDataset
def pack(f, name, dataset_pathes):
datasets = [cPickle.load(open(path)) for path in dataset_pathes]
data = sum(datasets, [])
dtype = h5py.special_dtype(vlen=numpy.dtype('int32'))
table = f.create_dataset(name, (len(data),), dtype=dtype)
for i, example in enumerate(data):
table[i] = example
return numpy.array([len(d) for d in datasets])
if __name__ == '__main__':
parser = argparse.ArgumentParser("Pack data to HDF5")
parser.add_argument('-s', dest='sources', nargs='*', help="Source datasets")
parser.add_argument('-t', dest='targets', nargs='*', help="Target datasets")
parser.add_argument('-n', dest='names', nargs='*', help="Dataset names")
parser.add_argument('-i', dest='add_ids',
action='store_true', default=False,
help="Add integer IDs")
parser.add_argument('dest', help="Destination")
args = parser.parse_args()
assert len(args.sources) == len(args.targets)
assert len(args.sources) == len(args.names)
with h5py.File(args.dest, mode='w') as f:
lengths = pack(f, "sources", args.sources)
assert numpy.all(lengths == pack(f, "targets", args.targets))
offsets = [0] + list(lengths.cumsum())
total_len = offsets[-1]
if args.add_ids:
id_table = f.create_dataset('ids',
data=numpy.arange(total_len,
dtype='int32'))
split_dict = {
args.names[i]:
{'sources': (offsets[i], offsets[i + 1]),
'targets': (offsets[i], offsets[i + 1]),
'ids': (offsets[i], offsets[i + 1])}
for i in range(len(args.names))}
else:
split_dict = {
args.names[i]:
{'sources': (offsets[i], offsets[i + 1]),
'targets': (offsets[i], offsets[i + 1])}
for i in range(len(args.names))}
f.attrs['split'] = H5PYDataset.create_split_array(split_dict)
| mit | 2,333,511,898,382,353,400 | 38.017544 | 80 | 0.540468 | false | 3.756757 | false | false | false |
grantSwalwell/Machine-Learning | Embedding/PreEmbed.py | 1 | 1338 | import os
import numpy
import word2toke
def getCommonVecs(num=10000, d=100, path="C:/Users/Grant/PycharmProjects/Machine-Learning/Embedding/"):
fname = os.path.join(path, "glove.6B." + str(d) + "d.txt")
f = open(fname, 'r', encoding="utf-8")
dic = {}
for step, line, in zip(range(num), f):
entry = line.split()
word, vec = entry[0], numpy.array(entry[1:], dtype=float).reshape((d, 1))
dic[word] = vec
return dic
def getVecsInVocab(vocab, path="C:/Users/Grant/PycharmProjects/Machine-Learning/Embedding/", d=100, steps=100000):
fname = os.path.join(path, "glove.6B." + str(d) + "d.txt")
f = open(fname, 'r', encoding="utf-8")
dic, out = {}, {}
for word in vocab:
out[word] = (numpy.random.rand(d, 1) - 0.5) / float(int(d) + 1)
for step, line, in zip(range(steps), f):
entry = line.split()
word, vec = entry[0], numpy.array(entry[1:], dtype=float).reshape((d,))
dic[word] = vec
for key0, vec0, key1, vec1 in zip(out.keys(), out.values(), dic.keys(), dic.values()):
if key0 in dic:
out[key0] = vec1
return out
def getWeights(vocab, w2int, dic, d=100):
n = len(vocab)
W = numpy.zeros((n, d))
for word in vocab:
W[w2int[word]] = dic[word].reshape((d))
return W
#def vec2embedding | mit | 6,394,171,580,117,049,000 | 26.326531 | 114 | 0.586697 | false | 2.902386 | false | false | false |
david618/Simulator | pythonScripts/reset_counts.py | 1 | 2071 | #!/usr/bin/python2
# requires install python-httplib2
# apt-get install python-httplib2
# Run using python2
# python2 get_counts.py
# The sha-bang is set for ubuntu 16.04
# Windows
# pip isntall httplib2
import httplib2
import sys
import json
source_hosts = []
sink_hosts = []
args = sys.argv
numargs = len(args)
if numargs != 3:
raise Exception("reset_counts2.py source-name sink-name")
source_name = args[1]
sink_name = args[2]
try:
# Try to get tasks for source
conn = httplib2.Http(timeout=1)
resp, resp_body = conn.request("http://master.mesos:8080/v2/apps/" + source_name)
data = json.loads(resp_body)
tasks = data['app']['tasks']
for task in tasks:
source_hosts.append(task['host'] + ":" + str(task['ports'][0]))
except Exception as e:
print("Failed to connect")
try:
# Try to get tasks for source
conn = httplib2.Http(timeout=1)
resp, resp_body = conn.request("http://master.mesos:8080/v2/apps/" + sink_name)
data = json.loads(resp_body)
tasks = data['app']['tasks']
for task in tasks:
sink_hosts.append(task['host'] + ":" + str(task['ports'][0]))
except Exception as e:
print("Failed to connect")
print("Sources")
for host in source_hosts:
print(host)
try:
conn = httplib2.Http(timeout=1)
resp, resp_body = conn.request("http://" + host + "/reset")
data = json.loads(resp_body)
print(data)
except Exception as e:
#print(e.message)
print("Failed to connect")
print
print("Sinks")
for host in sink_hosts:
print(host)
try:
conn = httplib2.Http(timeout=1)
resp, resp_body = conn.request("http://" + host + "/reset")
data = json.loads(resp_body)
print(data)
except Exception as e:
#print(e.message)
print("Failed to connect")
| apache-2.0 | 2,931,455,239,615,329,300 | 19.919192 | 95 | 0.557219 | false | 3.601739 | false | false | false |
stricklin/Gambot | MoveGenerator.py | 1 | 5742 | from Move import Move
class MoveGenerator:
def __init__(self, board):
"""
makes all legal moves for the side on move
:param board: the board to make moves for
"""
self.board = board
self.pieces = self.get_all_pieces()
self.moves = []
self.get_moves(self.pieces)
def get_all_pieces(self):
"""
:return: the pieces of the side on move
"""
if self.board.whites_turn:
return self.board.white_piece_list.get_pieces()
else:
return self.board.black_piece_list.get_pieces()
def get_all_moves(self):
"""
makes all moves and adds them to self.moves
:return: None
"""
self.get_moves(self.get_all_pieces())
def get_moves(self, pieces):
"""
makes all moves for the pieces specified and adds the moves to self.move
:param pieces: the pieces specified
:return: None
"""
for piece in pieces:
self.find_pieces_moves(piece)
def find_pieces_moves(self, src):
"""
makes all moves for the piece specified and adds the moves to self.move
:param src: the piece
:return: None
"""
piece_type = src.piece.upper()
# add the moves based on what kind of src it is
if piece_type == "P":
# set the forward direction
if self.board.whites_turn:
f = -1
else:
f = 1
self.scan(src, src, f, -1, True, True, True)
self.scan(src, src, f, 0, True, False, False)
self.scan(src, src, f, 1, True, True, True)
elif piece_type == "N":
self.symscan(src, src, 2, 1, True, True)
self.symscan(src, src, 1, 2, True, True)
elif piece_type == "B":
self.symscan(src, src, 1, 1, False, True)
self.symscan(src, src, 0, 1, True, False)
elif piece_type == "R":
self.symscan(src, src, 0, 1, False, True)
elif piece_type == "Q":
self.symscan(src, src, 0, 1, False, True)
self.symscan(src, src, 1, 1, False, True)
elif piece_type == "K":
self.symscan(src, src, 0, 1, True, True)
self.symscan(src, src, 1, 1, True, True)
def scan(self, src, intermediate, row_change, col_change, short, can_capture, must_capture=False):
"""
looks at all the squares projected in the direction to search for valid moves
valid moves are added to self.move
:param src: the starting square, never changes because moves produced need a valid starting square
:param intermediate: a square used to walk the board
:param row_change: the change in rows
:param col_change: the change in cols
:param short: if the scan should continue in the direction
:param can_capture: if the scan allows capturing moves
:param must_capture: if the scan requires capturing moves
:return: None
"""
# make sure that the intermediate square is on the board
if not self.check_bounds(intermediate, row_change, col_change):
return
dest_cords = (intermediate.row + row_change, intermediate.col + col_change)
dest = self.board.dict_board[dest_cords]
if dest.is_empty() and not must_capture:
self.moves.append(Move(src, dest))
else:
# if the square is occupied the scan can stop
short = True
if not dest.is_empty():
# if the dest has a enemy piece
if src.piece.isupper() != dest.piece.isupper():
if can_capture:
# if this scan allows capuring, add this move
self.moves.append(Move(src, dest))
if not short:
# recurse if scan not over
self.scan(src, dest, row_change, col_change, short, can_capture, must_capture)
def check_bounds(self, src, row_change, col_change):
"""
checks if a square is on the board
:param src: the starting square
:param row_change: the change in rows
:param col_change: the change in columns
:return: True if square on board, False otherwise
"""
r = src.row + row_change
c = src.col + col_change
if r < 0 or r >= self.board.row_count:
return False
if c < 0 or c >= self.board.col_count:
return False
return True
def symscan(self, src, intermediate, row_change, col_change, short, can_capture, must_capture=False):
"""
looks at all the squares projected in 4 directions to search for valid moves
valid moves are added to self.move
:param src: the starting square, never changes because moves produced need a valid starting square
:param intermediate: a square used to walk the board
:param row_change: the change in rows
:param col_change: the change in cols
:param short: if the scan should continue in the direction
:param can_capture: if the scan allows capturing moves
:param must_capture: if the scan requires capturing moves
:return: None
"""
# row_change and col_change are swapped and negated to get 4 directions
self.scan(src, intermediate, row_change, col_change, short, can_capture, must_capture)
self.scan(src, intermediate, -col_change, row_change, short, can_capture, must_capture)
self.scan(src, intermediate, -row_change, -col_change, short, can_capture, must_capture)
self.scan(src, intermediate, col_change, -row_change, short, can_capture, must_capture)
| gpl-3.0 | 6,963,039,227,782,971,000 | 38.875 | 106 | 0.587078 | false | 4.015385 | false | false | false |
osaddon/cimi | cimi/cimiapp/machineimage.py | 1 | 6370 | # Copyright (c) 2012 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common import log as logging
from webob import Request, Response
import json
import copy
from cimibase import Controller, Consts
from cimibase import make_response_data
from cimiutils import concat, match_up, remove_member
from cimiutils import map_image_state
LOG = logging.getLogger(__name__)
class MachineImageCtrler(Controller):
"""
Handles machine image request.
"""
def __init__(self, conf, app, req, tenant_id, *args):
super(MachineImageCtrler, self).__init__(conf, app, req, tenant_id,
*args)
self.os_path = '/%s/images' % (tenant_id)
self.image_id = args[0] if len(args) > 0 else ''
self.entity_uri = 'MachineImage'
self.metadata = Consts.MACHINEIMAGE_METADATA
# Use GET to handle all container read related operations.
def GET(self, req, *parts):
"""
Handle GET Container (List Objects) request
"""
env = self._fresh_env(req)
env['PATH_INFO'] = '/'.join([self.os_path, self.image_id])
new_req = Request(env)
res = new_req.get_response(self.app)
if res.status_int == 200:
image = json.loads(res.body).get('image')
if image:
body = {}
body['type'] = 'IMAGE'
body['id'] = '/'.join([self.tenant_id, self.entity_uri,
self.image_id])
match_up(body, image, 'name', 'name')
match_up(body, image, 'created', 'created')
match_up(body, image, 'updated', 'updated')
body['state'] = map_image_state(image['status'])
body['imageLocation'] = body['id']
if self.res_content_type == 'application/xml':
response_data = {self.entity_uri: body}
else:
body['resourceURI'] = '/'.join([self.uri_prefix,
self.entity_uri])
response_data = body
new_content = make_response_data(response_data,
self.res_content_type,
self.metadata,
self.uri_prefix)
resp = Response()
self._fixup_cimi_header(resp)
resp.headers['Content-Type'] = self.res_content_type
resp.status = 200
resp.body = new_content
return resp
else:
return res
return res
class MachineImageColCtrler(Controller):
"""
Handles machine image collection request.
"""
def __init__(self, conf, app, req, tenant_id, *args):
super(MachineImageColCtrler, self).__init__(conf, app, req, tenant_id,
*args)
self.os_path = '/%s/images/detail' % (tenant_id)
self.entity_uri = 'MachineImageCollection'
self.metadata = Consts.MACHINEIMAGE_COL_METADATA
# Use GET to handle all container read related operations.
def GET(self, req, *parts):
"""
Handle GET Container (List Objects) request
"""
env = copy.copy(req.environ)
env['SCRIPT_NAME'] = self.os_version
env['PATH_INFO'] = self.os_path
# we will always use json format to get Nova information
env['HTTP_ACCEPT'] = 'application/json'
# need to remove this header, otherwise, it will always take the
# original request accept content type
if env.has_key('nova.best_content_type'):
env.pop('nova.best_content_type')
new_req = Request(env)
res = new_req.get_response(self.app)
if res.status_int == 200:
content = json.loads(res.body)
body = {}
body['id'] = '/'.join([self.tenant_id, self.entity_uri])
body['machineImages'] = []
images = content.get('images', [])
for image in images:
entry = {}
entry['resourceURI'] = '/'.join([self.uri_prefix,
'MachineImage'])
entry['id'] = '/'.join([self.tenant_id,
'MachineImage',
image['id']])
entry['type'] = 'IMAGE'
entry['name'] = image['name']
entry['created'] = image['created']
entry['updated'] = image['updated']
entry['state'] = map_image_state(image['status'])
entry['imageLocation'] = entry['id']
body['machineImages'].append(entry)
body['count'] = len(body['machineImages'])
if self.res_content_type == 'application/xml':
remove_member(body, 'resourceURI')
body['resourceURI'] = '/'.join([self.uri_prefix,
self.entity_uri])
response_data = {'Collection': body}
else:
body['resourceURI'] = '/'.join([self.uri_prefix,
self.entity_uri])
response_data = body
new_content = make_response_data(response_data,
self.res_content_type,
self.metadata,
self.uri_prefix)
resp = Response()
self._fixup_cimi_header(resp)
resp.headers['Content-Type'] = self.res_content_type
resp.status = 200
resp.body = new_content
return resp
else:
return res
| apache-2.0 | -7,873,371,230,516,636,000 | 37.841463 | 78 | 0.513972 | false | 4.420541 | false | false | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.2/Lib/tokenize.py | 1 | 10661 | """Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
from __future__ import generators
__author__ = 'Ka-Ping Yee <[email protected]>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from token import *
import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize", "NL"]
del token
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
N_TOKENS += 2
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return apply(group, choices) + '*'
def maybe(*choices): return apply(group, choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
'r': None, 'R': None, 'u': None, 'U': None}
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
def tokenize(readline, tokeneater=printtoken):
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
apply(tokeneater, token_info)
def generate_tokens(readline):
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
line = readline()
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError, ("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError, ("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield (parenlev > 0 and NL or NEWLINE,
token, spos, epos, line)
elif initial == '#':
yield (COMMENT, token, spos, epos, line)
elif token in ("'''", '"""', # triple-quoted
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""'):
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in ("'", '"') or \
token[:2] in ("r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"') or \
token[:3] in ("ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"' ):
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
| mit | -5,903,353,024,651,132,000 | 39.846743 | 80 | 0.477441 | false | 3.755195 | false | false | false |
mgiannikouris/vi-firmware | script/make_trips.py | 3 | 3361 | #!/usr/bin/env python
import sys
from xml.etree import ElementTree as ET
import json
import re
import datetime
class DestFileCreator:
nextTrip = 1
def __init__(self, firstTrip):
self.nextTrip = firstTrip
def next_dest(self, data_file):
newName = "Trip" + str(self.nextTrip).zfill(3) + "-" + data_file
self.nextTrip = self.nextTrip + 1
print "Starting ",
print newName
return open(newName, "w")
def get_next_file(trace_file):
numbers_str = re.findall(r'[0-9]+', trace_file)
numbers_int = map(int, numbers_str)
oldFile = datetime.datetime(numbers_int[0], numbers_int[1], numbers_int[2], numbers_int[3])
dtime = datetime.timedelta(hours=1)
oldFile = oldFile + dtime
filename = str(oldFile.date())
filename += "-"
if(oldFile.hour < 10):
filename += "0"
filename += str(oldFile.hour)
filename += ".json"
return filename
def compile_trip(trace_file, tripNum):
dataFileValid = True
lastTimeStamp = 0.0
currentTimeStamp = 0
destFileGen = DestFileCreator(tripNum)
errorCount = 0
lineCount = 0
destinationFile = destFileGen.next_dest(trace_file)
while dataFileValid is True:
try:
currentTraceFile = open(trace_file, "r")
except IOError, e:
print e
dataFileValid = False
destinationFile.close()
break
else:
print 'Opened %s' % trace_file
for line in currentTraceFile:
try:
lineCount = lineCount + 1
timestamp, data = line.split(':', 1)
record = json.loads(data)
except ValueError:
sys.stderr.write("Skipping line: %s" % data)
print " "
errorCount = errorCount + 1
continue
if lastTimeStamp is not 0.0:
if (float(timestamp) - lastTimeStamp) > 600.00: # Time is in seconds
print "Found a gap of ",
print (float(timestamp) - lastTimeStamp),
print " seconds. Creating new Trip file."
destinationFile.close()
lastTimeStamp = 0.0
destinationFile = destFileGen.next_dest(trace_file)
elif (float(timestamp) - lastTimeStamp) > 1.00: # Time is in seconds
print "Momentary dropout of ",
print (float(timestamp) - lastTimeStamp),
print " seconds. Ignoring."
lastTimeStamp = float(timestamp)
destinationFile.write(line)
if dataFileValid is True:
currentTraceFile.close()
trace_file = get_next_file(trace_file)
percentBad = 100.0 * errorCount / lineCount
print "Parsed",
print lineCount,
print "lines."
print "Detected",
print errorCount,
print "errors."
print percentBad,
print "% bad data."
if __name__ == '__main__':
if len(sys.argv) is not 3:
print "Must provide the path to the first trace file in a trip and the trip number."
sys.exit(1)
compile_trip(sys.argv[1], int(sys.argv[2]))
| bsd-3-clause | 3,610,042,837,803,529,000 | 29.554545 | 95 | 0.540315 | false | 4.232997 | false | false | false |
mesosphere/dcos-kafka-service | frameworks/kafka/tests/test_active_directory_zookeeper_auth.py | 1 | 5966 | """
This module tests the interaction of Kafka with Zookeeper with authentication enabled
"""
import logging
import uuid
import pytest
import sdk_cmd
import sdk_install
import sdk_marathon
import sdk_utils
from tests import active_directory
from tests import auth
from tests import config
from tests import test_utils
pytestmark = pytest.mark.skipif(
not active_directory.is_active_directory_enabled(),
reason="This test requires TESTING_ACTIVE_DIRECTORY_SERVER to be set",
)
log = logging.getLogger(__name__)
@pytest.fixture(scope="module", autouse=True)
def kerberos(configure_security):
try:
kerberos_env = active_directory.ActiveDirectoryKerberos()
yield kerberos_env
finally:
kerberos_env.cleanup()
@pytest.fixture(scope="module")
def zookeeper_server(kerberos):
service_kerberos_options = {
"service": {
"name": config.ZOOKEEPER_SERVICE_NAME,
"security": {
"kerberos": {
"enabled": True,
"kdc": {"hostname": kerberos.get_host(), "port": int(kerberos.get_port())},
"realm": kerberos.get_realm(),
"keytab_secret": kerberos.get_keytab_path(),
}
},
}
}
try:
sdk_install.uninstall(config.ZOOKEEPER_PACKAGE_NAME, config.ZOOKEEPER_SERVICE_NAME)
sdk_install.install(
config.ZOOKEEPER_PACKAGE_NAME,
config.ZOOKEEPER_SERVICE_NAME,
config.ZOOKEEPER_TASK_COUNT,
package_version=config.ZOOKEEPER_PACKAGE_VERSION,
additional_options=service_kerberos_options,
timeout_seconds=30 * 60,
)
yield {**service_kerberos_options, **{"package_name": config.ZOOKEEPER_PACKAGE_NAME}}
finally:
sdk_install.uninstall(config.ZOOKEEPER_PACKAGE_NAME, config.ZOOKEEPER_SERVICE_NAME)
@pytest.fixture(scope="module", autouse=True)
def kafka_server(kerberos, zookeeper_server):
# Get the zookeeper DNS values
zookeeper_dns = sdk_cmd.svc_cli(
zookeeper_server["package_name"],
zookeeper_server["service"]["name"],
"endpoint clientport",
parse_json=True,
)[1]["dns"]
service_kerberos_options = {
"service": {
"name": config.SERVICE_NAME,
"security": {
"kerberos": {
"enabled": True,
"enabled_for_zookeeper": True,
"kdc": {"hostname": kerberos.get_host(), "port": int(kerberos.get_port())},
"realm": kerberos.get_realm(),
"keytab_secret": kerberos.get_keytab_path(),
}
},
},
"kafka": {"kafka_zookeeper_uri": ",".join(zookeeper_dns)},
}
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
try:
sdk_install.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_BROKER_COUNT,
additional_options=service_kerberos_options,
timeout_seconds=30 * 60,
)
yield {**service_kerberos_options, **{"package_name": config.PACKAGE_NAME}}
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.fixture(scope="module", autouse=True)
def kafka_client(kerberos, kafka_server):
brokers = sdk_cmd.svc_cli(
kafka_server["package_name"], kafka_server["service"]["name"], "endpoint broker", parse_json=True
)[1]["dns"]
try:
client_id = "kafka-client"
client = {
"id": client_id,
"mem": 512,
"container": {
"type": "MESOS",
"docker": {"image": "elezar/kafka-client:4b9c060", "forcePullImage": True},
"volumes": [
{
"containerPath": "/tmp/kafkaconfig/kafka-client.keytab",
"secret": "kafka_keytab",
}
],
},
"secrets": {"kafka_keytab": {"source": kerberos.get_keytab_path()}},
"networks": [{"mode": "host"}],
"env": {
"JVM_MaxHeapSize": "512",
"KAFKA_CLIENT_MODE": "test",
"KAFKA_TOPIC": "securetest",
"KAFKA_BROKER_LIST": ",".join(brokers),
},
}
sdk_marathon.install_app(client)
yield {**client, **{"brokers": list(map(lambda x: x.split(":")[0], brokers))}}
finally:
sdk_marathon.destroy_app(client_id)
@pytest.mark.dcos_min_version("1.10")
@sdk_utils.dcos_ee_only
@pytest.mark.sanity
def test_client_can_read_and_write(kafka_client, kafka_server, kerberos):
client_id = kafka_client["id"]
auth.wait_for_brokers(kafka_client["id"], kafka_client["brokers"])
topic_name = "authn.test"
sdk_cmd.svc_cli(
kafka_server["package_name"],
kafka_server["service"]["name"],
"topic create {}".format(topic_name),
parse_json=True,
)
test_utils.wait_for_topic(
kafka_server["package_name"], kafka_server["service"]["name"], topic_name
)
message = str(uuid.uuid4())
assert write_to_topic("client", client_id, topic_name, message, kerberos)
assert message in read_from_topic("client", client_id, topic_name, 1, kerberos)
def write_to_topic(cn: str, task: str, topic: str, message: str, krb5: object) -> bool:
return auth.write_to_topic(
cn,
task,
topic,
message,
auth.get_kerberos_client_properties(ssl_enabled=False),
auth.setup_krb5_env(cn, task, krb5),
)
def read_from_topic(cn: str, task: str, topic: str, message: str, krb5: object) -> str:
return auth.read_from_topic(
cn,
task,
topic,
message,
auth.get_kerberos_client_properties(ssl_enabled=False),
auth.setup_krb5_env(cn, task, krb5),
)
| apache-2.0 | -6,718,657,523,241,052,000 | 28.83 | 105 | 0.572746 | false | 3.642247 | true | false | false |
cc-archive/jtoolkit | jToolkit/widgets/grid.py | 1 | 14502 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""grid module: responsible for building up grid widgets"""
# Copyright 2002, 2003 St James Software
#
# This file is part of jToolkit.
#
# jToolkit is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# jToolkit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jToolkit; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from jToolkit.widgets import widgets
from jToolkit.widgets import table
from jToolkit import cidict
from jToolkit.data import dates
import types
class GridCategory:
def __init__(self, name, title, display, storageformat, displayformat, col, pctwidth, mergecells, mainarea, attribs = {}):
self.name = name
self.title = title
self.display = display
self.mainarea = mainarea
self.storageformat = storageformat
self.displayformat = displayformat
self.col = col
self.pctwidth = pctwidth
self.width = None
self.mergecells = mergecells
self.attribs = attribs
def gettitlecellwidget(self):
"""returns the title cell for this category, using the self.width"""
titlestyle = {'font-weight':'bold'}
return table.TableCell(self.title, newattribs={'width':'%f%%' % self.width,'style':titlestyle})
def valuetostring(self, value):
"""based on the category, returns value as a string that can be displayed"""
# TODO: look at combining this and other methods with FormCategory...
if self.storageformat in ['TEXT','STRING']:
if value is None:
return ''
else:
return value
elif self.storageformat in ['INTEGER', 'DECIMAL']:
return str(value)
elif self.storageformat == 'DATETIME':
return dates.formatdate(value, self.displayformat)
elif value is None:
return ''
else:
return value
def getcellwidget(self, obj, style, href, hreftarget):
"""returns a widget for a cell"""
text = self.valuetostring(obj)
if href is None:
contents = text
else:
contents = widgets.Link(href, text, {'target':hreftarget,'style':style})
self.attribs.update({'valign':'top','style':style})
return table.TableCell(contents, newattribs=self.attribs)
def canmerge(self, widget1, widget2):
"""checks whether we can merge these cells..."""
if not self.mergecells:
return False
if type(widget1) == type(widget2):
if type(widget1) == types.InstanceType:
if widget1.__class__ == widget2.__class__:
# now we can compare
if isinstance(widget1, table.TableCell):
# match cells if the contents match and the attributes match
return self.canmerge(widget1.contents, widget2.contents) and widget1.attribs == widget2.attribs
elif isinstance(widget1, widgets.Link):
# merge if contents are the same, even if links are different
# the links are usable from another row
return self.canmerge(widget1.contents, widget2.contents)
else:
# unknown class...
return 0
else:
# mismatched class
return 0
elif isinstance(widget1, basestring):
return widget1 == widget2
else:
# unknown type
return 0
else:
# mismatched types
return 0
def gethtmlstyle(self, textcolor, backcolor, font):
style = {}
style['color'] = widgets.getrgb(textcolor, '&H000000')
style['background-color'] = widgets.getrgb(backcolor, '&HFFFFFF')
if font is not None:
fontstyles = font.lower().split()
for fontstyle in fontstyles:
if fontstyle == 'bold': style['font-weight'] = 'bold'
if fontstyle == 'italic': style['font-style'] = 'italic'
return style
class GridDivision(widgets.Division):
"""this division class is used to do layout tricks to be wider than the screen"""
def __init__(self, gridwidget):
widthstr = '%d%%' % gridwidget.pctwidth
widgets.Division.__init__(self, newattribs={'style':{'width':widthstr}}, contents=gridwidget)
class Grid(table.TableLayout):
def __init__(self, categories, fillincolumn=None):
table.TableLayout.__init__(self, newattribs = {'width':'100%','border':'1','cellspacing':'0'})
self.pctwidth = 100
self.categories = categories
self.fillincolumn = fillincolumn
self.balancewidths()
def addtitlerow(self):
"""Add the title row cells"""
for category in self.categories:
if category.display:
titlecell = category.gettitlecellwidget()
self.addcell(0, category, titlecell)
def balancewidths(self):
"""calculate the width of each of the columns"""
# summarize widths...
mainfixedpctwidth = 0
suppfixedpctwidth = 0
for category in self.categories:
if category.display:
if category.pctwidth == 0: category.pctwidth = 10
if category.mainarea:
mainfixedpctwidth += category.pctwidth
else:
suppfixedpctwidth += category.pctwidth
extrapctwidth = 100 - mainfixedpctwidth
if extrapctwidth >= 0:
totalpctwidth = 100 + suppfixedpctwidth
else:
totalpctwidth = 100 + suppfixedpctwidth - extrapctwidth
extrapctwidth = 0
self.pctwidth = totalpctwidth
# add the title cells...
for category in self.categories:
if category.display:
if category.name == self.fillincolumn:
category.width = (category.pctwidth + extrapctwidth) * 100.0 / totalpctwidth
else:
category.width = category.pctwidth * 100.0 / totalpctwidth
def addcell(self, rownum, category, value):
"""adds a cell to the grid"""
# see if this should be merged with the above cell...
if self.hascell(rownum-1, category.col):
cellabove = self.getcell(rownum-1, category.col)
if category.canmerge(value, cellabove):
# at the moment, duplicate objects, later, replace them...
value = cellabove
table.TableLayout.setcell(self, rownum, category.col, value)
class SimpleGridCategory(GridCategory):
"""a GridCategory that handles coloring..."""
def __init__(self, name, title, tooltip, colnum, colordict, isheading, display, storageformat='TEXT'):
if storageformat == 'DATETIME':
displayformat = '%y-%m-%d %H:%M:%S'
else:
displayformat = ''
GridCategory.__init__(self, name, title, display=display, storageformat=storageformat, displayformat=displayformat,
col=colnum, pctwidth=10, mergecells=0, mainarea=1)
self.tooltip = tooltip
self.colorcategory = None
self.colordict = colordict
if isheading:
self.backcolor = '&HD0D0D0'
self.font = 'bold'
else:
self.backcolor = '&HFFFFFF'
self.font = ''
def setcolorcategory(self, colorcategory):
"""sets which category the color of this one will be based on"""
self.colorcategory = colorcategory
def gettitlecellwidget(self):
"""returns the title cell for this category, using the given width"""
titlestyle = self.gethtmlstyle('&H660000', self.backcolor, 'bold')
titlelink=widgets.Tooltip(self.tooltip, self.title)
attribs={'width': '%f%%' % self.width, 'style': titlestyle,'valign': 'top'}
return table.TableCell(titlelink, newattribs=attribs)
def gettextcolor(self, row):
"""returns textcolor based on the value of this category in given row"""
obj = row[self.name]
return self.colordict.get(str(obj),None)
def getwidget(self, row, href, hreftarget):
"""simply returns a widget for this category"""
obj = row[self.name]
obj = self.valuetostring(obj)
if obj is None:
text = ''
elif isinstance(obj, unicode):
text = obj.encode('utf8')
else:
text = str(obj)
if text == '':
text = ' '
if self.colorcategory is not None:
textcolor = self.colorcategory.gettextcolor(row)
else:
textcolor = '&H000000'
style = self.gethtmlstyle(textcolor, self.backcolor, self.font)
style['text-decoration'] = 'none'
# we need to set the style in both objects otherwise the link style can override it
if href is None:
contents = text
else:
contents = widgets.Link(href, text, {'target':hreftarget,'style':style})
return table.TableCell(contents, newattribs={'valign':'top','style':style})
class SimpleGrid(Grid):
"""a grid with common methods for config pages"""
def __init__(self, gridtable, columnlist, hrefbase=None, hreftarget='', colordefs={}, colordeps={}, \
headingcolumns=(), hidecolumns=(), filter=None, gridcategory=SimpleGridCategory,newattribs={}):
self.hrefbase = hrefbase
self.hreftarget = hreftarget
self.gridtable = gridtable
self.columnlist = columnlist
self.colordefs = colordefs
self.colordeps = colordeps
self.headingcolumns = headingcolumns
self.hidecolumns = hidecolumns
self.filter = filter
self.gridcategory = gridcategory
Grid.__init__(self, self.getcolumns())
self.overrideattribs(newattribs)
self.makegrid()
enabled, disabled = '&H000000', '&H808080'
booleancolors = cidict.cidict({'false':disabled, 'true':enabled})
def getcolumns(self):
"""gets the columns for the grid (columns of categoryconf)..."""
columns = []
columndict = {}
colnum = 0
for name, title, tooltip in self.columnlist:
colnum += 1
colordict = self.colordefs.get(name,{})
isheading = name in self.headingcolumns
display = name not in self.hidecolumns
storageformat = self.gridtable.columntypes.get(name, 'TEXT').upper()
column = self.gridcategory(name, title, tooltip, colnum, colordict, isheading, display, storageformat)
columns.append(column)
columndict[name] = column
for colorcolumnname, depcolumnnames in self.colordeps.iteritems():
colorcolumn = columndict[colorcolumnname]
for depcolumnname in depcolumnnames:
depcolumn = columndict[depcolumnname]
depcolumn.setcolorcategory(colorcolumn)
return columns
def makegrid(self):
"""makes up the grid - retrieves rows, adds them, and adjusts the grid"""
self.addtitlerow()
self.addrows()
self.shrinkrange()
# in case we missed any out...
self.fillemptycells()
self.mergeduplicates([0])
self.hidecoveredcells()
self.calcweights()
def addrows(self):
"""gets all the database rows from the table and adds cells for each one to the grid"""
self.getrows()
for row in self.tablerows:
self.addrow(row)
def addrow(self, row):
"""adds all the cells for a row to the grid"""
href = None
if self.hrefbase is not None:
rowid = self.gridtable.getrowid(row)
href = self.hrefbase
if '?' in href:
href += "&"
else:
href += "?"
href += 'action=view&' + self.gridtable.rowidparamstring(rowid)
if self.page:
href += '&page=%d' % self.page
rownum = self.maxrownum() + 1
for category in self.categories:
if not category.display: continue
widgetcell = category.getwidget(row, href, self.hreftarget)
self.addcell(rownum, category, widgetcell)
def handlepageargs(self):
"""handle arguments that select the current page"""
page = self.attribs.get('page', 0)
try:
page = int(page)
except:
if page.lower() == 'all':
page = 0
else:
page = 1
self.page = page
self.numrowsinpage = self.attribs.get('numrowsinpage',20)
def getrows(self):
"""retrieves the appropriate rows from the database"""
self.handlepageargs()
if self.page == 0:
minrow, maxrow = None, None
else:
minrow, maxrow = (self.page-1)*self.numrowsinpage, self.page*self.numrowsinpage
if minrow < 0: minrow = 0
# if maxrow > len(alllogrows): maxrow = len(alllogrows)
if minrow != None and maxrow != None:
self.tablerows = self.gridtable.getsometablerows(minrow,maxrow,self.filter)
if len(self.tablerows) < maxrow-minrow: #End of table
self.tableEnded = 1
else:
self.tableEnded = 0
else:
self.tablerows = self.gridtable.gettablerows(self.filter)
def getpagelink(self, pagehref, pagenum, pagetext=None):
"""returns a widget that links to a particular page"""
if pagetext is None:
pagetext = "Page %d" % pagenum
if pagenum == self.page:
pagetext = "<font color='red'><b>%s</b></font>" % pagetext
return widgets.Link(pagehref+"&page=%d" % pagenum, pagetext)
def getpagelinks(self):
"""returns a widget that links to other pages of this grid"""
self.numpages = (self.gridtable.countrows(self.filter) + self.numrowsinpage - 1) / self.numrowsinpage
currentpage = "Page %d of %d. " % (self.page, self.numpages)
pagehref = "?"
if self.filter is not None:
pagehref += self.filter.getfilterparams()
first, last, next, previous = "First", "Last", "Next", "Previous"
if self.page == 0:
currentpage = "Showing all records"
pages = [(1, first), (self.numpages, last)]
elif self.numpages > 1:
pages = []
if 1 == self.page-1:
pages.append((1, first+"/"+previous))
else:
pages.append((1, first))
if self.page-1 > 1:
pages.append((self.page-1, previous))
if self.page+1 == self.numpages:
pages.append((self.page+1, next+"/"+last))
else:
if self.page+1 < self.numpages:
pages.append((self.page+1, next))
pages.append((self.numpages, last))
else:
pages = []
pagelinks = [self.getpagelink(pagehref, pagenum, pagetext) for pagenum, pagetext in pages]
return widgets.Division(contents=widgets.Paragraph(contents=[currentpage, pagelinks]), id="toolbar")
class NumberGridCategory(SimpleGridCategory):
"""A grid category that handles numbers"""
def valuetostring(self, value):
if self.storageformat in ['DECIMAL', 'DOUBLE'] and isinstance(value, (int, float)):
return "%.3f" % value
else:
return SimpleGridCategory.valuetostring(self, value)
| gpl-2.0 | -4,524,484,315,021,323,300 | 36.275064 | 124 | 0.658944 | false | 3.77853 | false | false | false |
lsst-sqre/zenodio | setup.py | 1 | 1113 | from setuptools import setup, find_packages
import os
packagename = 'zenodio'
description = 'I/O with Zenodo.'
author = 'Jonathan Sick'
author_email = '[email protected]'
license = 'MIT'
url = 'https://github.com/lsst-sqre/zenodio'
version = '0.1.1.dev0'
def read(filename):
full_filename = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
filename)
return open(full_filename, mode='r', encoding='utf8').read()
long_description = read('README.rst')
setup(
name=packagename,
version=version,
description=description,
long_description=long_description,
url=url,
author=author,
author_email=author_email,
license=license,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='aas',
packages=find_packages(exclude=['docs', 'tests*', 'data', 'notebooks']),
install_requires=['future', 'requests', 'xmltodict'],
tests_require=['pytest'],
# package_data={},
)
| mit | -2,961,864,877,985,767,400 | 24.883721 | 76 | 0.638814 | false | 3.456522 | false | false | false |
wkww/art-c | web/scalica/micro/views.py | 2 | 4598 | from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import HttpResponse
from django.shortcuts import render
from django.utils import timezone
from django.contrib.gis.geos import Point
from .models import TopPost, Following, Post, FollowingForm, PostForm, MyUserCreationForm
# Anonymous views
#################
def index(request):
# if request.user.is_authenticated():
return home(request)
# else:
# return anon_home(request)
def anon_home(request):
return render(request, 'micro/public.html')
def search(request):
return render(request, 'micro/search.html')
def stream(request, user_id):
# See if to present a 'follow' button
form = None
if request.user.is_authenticated() and request.user.id != int(user_id):
try:
f = Following.objects.get(follower_id=request.user.id,
followee_id=user_id)
except Following.DoesNotExist:
form = FollowingForm
user = User.objects.get(pk=user_id)
post_list = Post.objects.filter(user_id=user_id).order_by('-pub_date')
paginator = Paginator(post_list, 10)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
posts = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
posts = paginator.page(paginator.num_pages)
context = {
'posts' : posts,
'stream_user' : user,
'form' : form,
}
return render(request, 'micro/stream.html', context)
def getpost(request, post_id):
if request.user.is_authenticated():
post = Post.objects.get(
id = post_id)
print(post.text)
print(post.url)
print(post.mpoint)
context = {
'post': post,
}
return render(request, 'micro/postindiv.html', context)
# return render(request, 'micro/postindiv.html', context)
def register(request):
if request.method == 'POST':
form = MyUserCreationForm(request.POST)
new_user = form.save(commit=True)
# Log in that user.
user = authenticate(username=new_user.username,
password=form.clean_password2())
if user is not None:
login(request, user)
else:
raise Exception
return home(request)
else:
form = MyUserCreationForm
return render(request, 'micro/register.html', {'form' : form})
# Authenticated views
#####################
# @login_required
def home(request):
# '''List of recent posts by people I follow'''
# try:
# my_post = Post.objects.filter(user=request.user).order_by('-pub_date')[0]
# except IndexError:
# my_post = None
# follows = [o.followee_id for o in Following.objects.filter(
# follower_id=request.user.id)]
global logedIn
if request.user.is_authenticated():
logedIn = True
else:
logedIn = False
print(logedIn)
# user = User.objects.get(pk=user_id)
post_list = Post.objects.all()
context = {
'post_list': post_list,
# 'user': user,
# 'my_post' : my_post,
# 'post_form' : PostForm
'logedIn': logedIn
}
return render(request, 'micro/home.html', context)
def toppost(request):
global logedIn
if request.user.is_authenticated():
logedIn = True
else:
logedIn = False
print(logedIn)
# user = User.objects.get(pk=user_id)
post_list = TopPost.objects.all()
context = {
'post_list': post_list,
# 'user': user,
# 'my_post' : my_post,
# 'post_form' : PostForm
'logedIn': logedIn
}
return render(request, 'micro/home.html', context)
# Allows to post something and shows my most recent posts.
@login_required
def post(request):
if request.method == 'POST':
form = PostForm(request.POST)
new_post = form.save(commit=False)
new_post.user = request.user
new_post.pub_date = timezone.now()
new_post.mpoint = Point(new_post.longtitude, new_post.latitude)
new_post.save()
return home(request)
else:
form = PostForm
return render(request, 'micro/post.html', {'form' : form})
@login_required
def follow(request):
if request.method == 'POST':
form = FollowingForm(request.POST)
new_follow = form.save(commit=False)
new_follow.follower = request.user
new_follow.follow_date = timezone.now()
new_follow.save()
return home(request)
else:
form = FollowingForm
return render(request, 'micro/follow.html', {'form' : form})
| mit | -3,015,600,426,929,801,700 | 28.101266 | 89 | 0.665289 | false | 3.385862 | false | false | false |
ministryofjustice/money-to-prisoners-transaction-uploader | main.py | 1 | 2665 | import logging
import logging.config
import os
import sys
import sentry_sdk
from mtp_transaction_uploader import settings
from mtp_transaction_uploader.upload import main as transaction_uploader
def setup_monitoring():
"""
Setup logging and exception reporting
"""
logging_conf = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'simple': {
'format': '%(asctime)s [%(levelname)s] %(message)s',
'datefmt': '%Y-%m-%dT%H:%M:%S',
},
'elk': {
'()': 'mtp_common.logging.ELKFormatter'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple' if settings.ENVIRONMENT == 'local' else 'elk',
},
},
'root': {
'level': 'WARNING',
'handlers': ['console'],
},
'loggers': {
'mtp': {
'level': 'INFO',
'handlers': ['console'],
'propagate': False,
},
},
}
sentry_enabled = False
if os.environ.get('SENTRY_DSN'):
sentry_sdk.init(
dsn=settings.SENTRY_DSN,
environment=settings.ENVIRONMENT,
release=settings.APP_GIT_COMMIT,
send_default_pii=False,
request_bodies='never',
)
sentry_enabled = True
logging.config.dictConfig(logging_conf)
logger = logging.getLogger('mtp')
return logger, sentry_enabled
def main():
logger, sentry_enabled = setup_monitoring()
if settings.UPLOADER_DISABLED:
logger.info('Transaction uploader is disabled')
sys.exit(0)
# ensure all required parameters are set
missing_params = []
required_params = {'SFTP_HOST', 'SFTP_USER', 'SFTP_PRIVATE_KEY', 'ACCOUNT_CODE',
'API_URL', 'API_CLIENT_ID', 'API_CLIENT_SECRET',
'API_USERNAME', 'API_PASSWORD'}
for param in dir(settings):
if param in required_params and not getattr(settings, param):
missing_params.append(param)
if missing_params:
logger.error('Missing environment variables: ' +
', '.join(missing_params))
sys.exit(1)
try:
# run the transaction uploader
transaction_uploader()
except Exception as e:
if sentry_enabled:
sentry_sdk.capture_exception(e)
else:
logger.exception('Unhandled error')
sys.exit(2)
if __name__ == '__main__':
main()
| mit | 938,083,083,143,458,300 | 27.052632 | 84 | 0.526829 | false | 4.190252 | false | false | false |
viswimmer1/PythonGenerator | data/python_files/33075525/generator.py | 1 | 4072 | import os
import re
import cache
from fields import RequiresField, ModulesField, PackagesField
from template import template, show_field
import jinja2
import simplejson as json
from wtforms import form, fields
import logging
from uuid import uuid4 as uuid
log = logging.getLogger(__name__)
safe_string = jinja2.Markup
SETUP_PY_TEMPLATE = 'setup_py.tpl'
python_file_pattern = re.compile(r'(.*)\.(py|pyc|pyo)$', re.I)
readme_file_pattern = re.compile(r'readme(\..*)?$', re.I)
from trove import all_classifiers
license_choices = \
[('', '')] + \
[tuple([c.split(' :: ')[-1]] * 2) for c in all_classifiers
if c.startswith('License :: ')]
classifier_choices = [tuple([c] * 2) for c in all_classifiers
if not c.startswith('License :: ')]
def create_setup(client=None):
"""
Use the file list from the source control client to
instantiate a new Setup object.
"""
setup = SetupDistutils()
packages = []
modules = []
readme = None
if client:
packages = [os.path.dirname(f)
for f in client.files if '__init__.' in f]
# look for files not in a package to add to py_modules in setup
# find README.* files, first one wins
modules = []
for filename in client.files:
match = re.match(python_file_pattern, filename)
if match:
package = os.path.dirname(filename)
module = match.groups()[0]
if not module.endswith('setup') and package not in packages:
modules.append(module.replace('/', '.'))
if not readme:
match = re.match(readme_file_pattern, filename)
if match:
readme = filename
setup.process(None, **client.discovered)
setup.readme.data = readme
setup.py_modules.data = ' '.join(modules)
setup.packages.data = ' '.join(packages)
return setup
class Setup(form.Form):
author = fields.TextField()
author_email = fields.TextField()
name = fields.TextField()
description = fields.TextField()
version = fields.TextField()
long_description = fields.TextAreaField()
url = fields.TextField()
license = fields.SelectField(choices=license_choices)
classifiers = fields.SelectMultipleField(choices=classifier_choices)
readme = fields.HiddenField()
# lists
py_modules = ModulesField()
packages = PackagesField()
requires = RequiresField()
def __init__(self, *args, **kwargs):
super(Setup, self).__init__(*args, **kwargs)
self.cache_key = str(uuid()).replace('-', '')
for field in [self.license, self.classifiers]:
if field.data == 'None':
field.data = None
def process(self, formdata=None, obj=None, **kwargs):
super(Setup, self).process(formdata=formdata, obj=obj, **kwargs)
def cache(self):
data = dict(self.data)
data['cache_key'] = self.cache_key
cache.set(self.cache_key, json.dumps(data))
def visible_fields(self):
return [f for f in self if not isinstance(f, fields.HiddenField)]
class SetupDistutils(Setup):
def generate(self, executable=False, under_test=False):
try:
indent = ' '
args = ''
for field in self.visible_fields():
# don't show field at all if executable is on
if not field.data and executable:
continue
args += u'{}{}\n'.format(
indent,
show_field(field, self, executable))
return safe_string(template(SETUP_PY_TEMPLATE,
setup=self,
executable=executable,
setup_arguments=args,
under_test=under_test))
except Exception:
log.exception('Failed to generate setup.py')
return 'Error generating setup.py'
| gpl-2.0 | 4,055,651,554,083,751,400 | 30.083969 | 76 | 0.574902 | false | 4.219689 | false | false | false |
tsuru/rpaas | tests/test_nginx.py | 1 | 8607 | # Copyright 2016 rpaas authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
import mock
from rpaas.nginx import Nginx, NginxError
class NginxTestCase(unittest.TestCase):
def setUp(self):
self.cache_headers = [{'Accept-Encoding': 'gzip'}, {'Accept-Encoding': 'identity'}]
def test_init_default(self):
nginx = Nginx()
self.assertEqual(nginx.nginx_manage_port, '8089')
self.assertEqual(nginx.nginx_purge_path, '/purge')
self.assertEqual(nginx.nginx_healthcheck_path, '/healthcheck')
def test_init_config(self):
nginx = Nginx({
'NGINX_PURGE_PATH': '/2',
'NGINX_MANAGE_PORT': '4',
'NGINX_LOCATION_TEMPLATE_DEFAULT_TXT': '5',
'NGINX_LOCATION_TEMPLATE_ROUTER_TXT': '6',
'NGINX_HEALTHCHECK_PATH': '7',
})
self.assertEqual(nginx.nginx_purge_path, '/2')
self.assertEqual(nginx.nginx_manage_port, '4')
self.assertEqual(nginx.config_manager.location_template_default, '5')
self.assertEqual(nginx.config_manager.location_template_router, '6')
self.assertEqual(nginx.nginx_healthcheck_path, '7')
@mock.patch('rpaas.nginx.requests')
def test_init_config_location_url(self, requests):
def mocked_requests_get(*args, **kwargs):
class MockResponse:
def __init__(self, text, status_code):
self.text = text
self.status_code = status_code
if args[0] == 'http://my.com/default':
return MockResponse("my result default", 200)
elif args[0] == 'http://my.com/router':
return MockResponse("my result router", 200)
with mock.patch('rpaas.nginx.requests.get', side_effect=mocked_requests_get) as requests_get:
nginx = Nginx({
'NGINX_LOCATION_TEMPLATE_DEFAULT_URL': 'http://my.com/default',
'NGINX_LOCATION_TEMPLATE_ROUTER_URL': 'http://my.com/router',
})
self.assertEqual(nginx.config_manager.location_template_default, 'my result default')
self.assertEqual(nginx.config_manager.location_template_router, 'my result router')
expected_calls = [mock.call('http://my.com/default'),
mock.call('http://my.com/router')]
requests_get.assert_has_calls(expected_calls)
@mock.patch('rpaas.nginx.requests')
def test_purge_location_successfully(self, requests):
nginx = Nginx()
response = mock.Mock()
response.status_code = 200
response.text = 'purged'
side_effect = mock.Mock()
side_effect.status_code = 404
side_effect.text = "Not Found"
requests.request.side_effect = [response, side_effect, response, side_effect]
purged = nginx.purge_location('myhost', '/foo/bar')
self.assertTrue(purged)
self.assertEqual(requests.request.call_count, 4)
expec_responses = []
for scheme in ['http', 'https']:
for header in self.cache_headers:
expec_responses.append(mock.call('get', 'http://myhost:8089/purge/{}/foo/bar'.format(scheme),
headers=header, timeout=2))
requests.request.assert_has_calls(expec_responses)
@mock.patch('rpaas.nginx.requests')
def test_purge_location_preserve_path_successfully(self, requests):
nginx = Nginx()
response = mock.Mock()
response.status_code = 200
response.text = 'purged'
requests.request.side_effect = [response]
purged = nginx.purge_location('myhost', 'http://example.com/foo/bar', True)
self.assertTrue(purged)
self.assertEqual(requests.request.call_count, 2)
expected_responses = []
for header in self.cache_headers:
expected_responses.append(mock.call('get', 'http://myhost:8089/purge/http://example.com/foo/bar',
headers=header, timeout=2))
requests.request.assert_has_calls(expected_responses)
@mock.patch('rpaas.nginx.requests')
def test_purge_location_not_found(self, requests):
nginx = Nginx()
response = mock.Mock()
response.status_code = 404
response.text = 'Not Found'
requests.request.side_effect = [response, response, response, response]
purged = nginx.purge_location('myhost', '/foo/bar')
self.assertFalse(purged)
self.assertEqual(requests.request.call_count, 4)
expec_responses = []
for scheme in ['http', 'https']:
for header in self.cache_headers:
expec_responses.append(mock.call('get', 'http://myhost:8089/purge/{}/foo/bar'.format(scheme),
headers=header, timeout=2))
requests.request.assert_has_calls(expec_responses)
@mock.patch('rpaas.nginx.requests')
def test_wait_healthcheck(self, requests):
nginx = Nginx()
count = [0]
response = mock.Mock()
response.status_code = 200
response.text = 'WORKING'
def side_effect(method, url, timeout, **params):
count[0] += 1
if count[0] < 2:
raise Exception('some error')
return response
requests.request.side_effect = side_effect
nginx.wait_healthcheck('myhost.com', timeout=5)
self.assertEqual(requests.request.call_count, 2)
requests.request.assert_called_with('get', 'http://myhost.com:8089/healthcheck', timeout=2)
@mock.patch('rpaas.nginx.requests')
def test_wait_app_healthcheck(self, requests):
nginx = Nginx()
count = [0]
response = mock.Mock()
response.status_code = 200
response.text = '\n\nWORKING'
def side_effect(method, url, timeout, **params):
count[0] += 1
if count[0] < 2:
raise Exception('some error')
return response
requests.request.side_effect = side_effect
nginx.wait_healthcheck('myhost.com', timeout=5, manage_healthcheck=False)
self.assertEqual(requests.request.call_count, 2)
requests.request.assert_called_with('get', 'http://myhost.com:8080/_nginx_healthcheck/', timeout=2)
@mock.patch('rpaas.nginx.requests')
def test_wait_app_healthcheck_invalid_response(self, requests):
nginx = Nginx()
count = [0]
response = mock.Mock()
response.status_code = 200
response.text = '\nFAIL\n'
def side_effect(method, url, timeout, **params):
count[0] += 1
if count[0] < 2:
raise Exception('some error')
return response
requests.request.side_effect = side_effect
with self.assertRaises(NginxError):
nginx.wait_healthcheck('myhost.com', timeout=5, manage_healthcheck=False)
self.assertEqual(requests.request.call_count, 6)
requests.request.assert_called_with('get', 'http://myhost.com:8080/_nginx_healthcheck/', timeout=2)
@mock.patch('rpaas.nginx.requests')
def test_wait_healthcheck_timeout(self, requests):
nginx = Nginx()
def side_effect(method, url, timeout, **params):
raise Exception('some error')
requests.request.side_effect = side_effect
with self.assertRaises(Exception):
nginx.wait_healthcheck('myhost.com', timeout=2)
self.assertGreaterEqual(requests.request.call_count, 2)
requests.request.assert_called_with('get', 'http://myhost.com:8089/healthcheck', timeout=2)
@mock.patch('os.path')
@mock.patch('rpaas.nginx.requests')
def test_add_session_ticket_success(self, requests, os_path):
nginx = Nginx({'CA_CERT': 'cert data'})
os_path.exists.return_value = True
response = mock.Mock()
response.status_code = 200
response.text = '\n\nticket was succsessfully added'
requests.request.return_value = response
nginx.add_session_ticket('host-1', 'random data', timeout=2)
requests.request.assert_called_once_with('post', 'https://host-1:8090/session_ticket', timeout=2,
data='random data', verify='/tmp/rpaas_ca.pem')
@mock.patch('rpaas.nginx.requests')
def test_missing_ca_cert(self, requests):
nginx = Nginx()
with self.assertRaises(NginxError):
nginx.add_session_ticket('host-1', 'random data', timeout=2)
| bsd-3-clause | -1,738,867,843,030,581,200 | 40.379808 | 109 | 0.609272 | false | 3.796648 | true | false | false |
axbaretto/beam | sdks/python/apache_beam/runners/portability/local_job_service_main.py | 5 | 5787 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
"""Starts a service for running portable beam pipelines.
The basic usage is simply
python -m apache_beam.runners.portability.local_job_service_main
Many other options are also supported, such as starting in the background or
passing in a lockfile to ensure that only one copy of the service is running
at a time. Pass --help to see them all.
"""
import argparse
import logging
import os
import pathlib
import signal
import subprocess
import sys
import time
from apache_beam.runners.portability import local_job_service
_LOGGER = logging.getLogger(__name__)
def run(argv):
if argv[0] == __file__:
argv = argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument(
'-p',
'--port',
'--job_port',
type=int,
default=0,
help='port on which to serve the job api')
parser.add_argument('--staging_dir')
parser.add_argument(
'--pid_file', help='File in which to store the process id of the server.')
parser.add_argument(
'--port_file', help='File in which to store the port of the server.')
parser.add_argument(
'--background',
action='store_true',
help='Start the server up as a background process.'
' Will fail if pid_file already exists, unless --stop is also specified.')
parser.add_argument(
'--stderr_file',
help='Where to write stderr (if not specified, merged with stdout).')
parser.add_argument(
'--stdout_file', help='Where to write stdout for background job service.')
parser.add_argument(
'--stop',
action='store_true',
help='Stop the existing process, if any, specified in pid_file.'
' Will not start up a new service unless --background is specified.')
options = parser.parse_args(argv)
if options.stop:
if not options.pid_file:
raise RuntimeError('--pid_file must be specified with --stop')
if os.path.exists(options.pid_file):
with open(options.pid_file) as fin:
pid = int(fin.read())
print('Killing process at', pid)
try:
os.kill(pid, signal.SIGTERM)
except Exception:
print('Process', pid, 'already killed.')
os.unlink(options.pid_file)
else:
print('Process id file', options.pid_file, 'already removed.')
if not options.background:
return
if options.background:
if not options.pid_file:
raise RuntimeError('--pid_file must be specified with --start')
if options.stop:
argv.remove('--stop')
argv.remove('--background')
if not options.port_file:
options.port_file = os.path.splitext(options.pid_file)[0] + '.port'
argv.append('--port_file')
argv.append(options.port_file)
if not options.stdout_file:
raise RuntimeError('--stdout_file must be specified with --background')
os.makedirs(pathlib.PurePath(options.stdout_file).parent, exist_ok=True)
stdout_dest = open(options.stdout_file, mode='w')
if options.stderr_file:
os.makedirs(pathlib.PurePath(options.stderr_file).parent, exist_ok=True)
stderr_dest = open(options.stderr_file, mode='w')
else:
stderr_dest = subprocess.STDOUT
subprocess.Popen([
sys.executable,
'-m',
'apache_beam.runners.portability.local_job_service_main'
] + argv,
stderr=stderr_dest,
stdout=stdout_dest)
print('Waiting for server to start up...')
while not os.path.exists(options.port_file):
time.sleep(.1)
with open(options.port_file) as fin:
port = fin.read()
print('Server started at port', port)
return
if options.pid_file:
print('Writing process id to', options.pid_file)
os.makedirs(pathlib.PurePath(options.pid_file).parent, exist_ok=True)
fd = os.open(options.pid_file, os.O_CREAT | os.O_EXCL | os.O_RDWR)
with os.fdopen(fd, 'w') as fout:
fout.write(str(os.getpid()))
try:
job_servicer = local_job_service.LocalJobServicer(options.staging_dir)
port = job_servicer.start_grpc_server(options.port)
try:
if options.port_file:
print('Writing port to', options.port_file)
os.makedirs(pathlib.PurePath(options.port_file).parent, exist_ok=True)
with open(options.port_file + '.tmp', 'w') as fout:
fout.write(str(port))
os.rename(options.port_file + '.tmp', options.port_file)
serve("Listening for beam jobs on port %d." % port, job_servicer)
finally:
job_servicer.stop()
finally:
if options.pid_file and os.path.exists(options.pid_file):
os.unlink(options.pid_file)
if options.port_file and os.path.exists(options.port_file):
os.unlink(options.port_file)
def serve(msg, job_servicer):
logging_delay = 30
while True:
_LOGGER.info(msg)
time.sleep(logging_delay)
logging_delay *= 1.25
if __name__ == '__main__':
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
run(sys.argv)
| apache-2.0 | 7,439,806,534,146,865,000 | 33.041176 | 80 | 0.672542 | false | 3.709615 | false | false | false |
openstack/vitrage | vitrage/graph/driver/graph.py | 1 | 12340 | # Copyright 2016 - Alcatel-Lucent
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for Graph access and manipulation
Functions in this module are imported into the vitrage.graph namespace.
Call these functions from vitrage.graph namespace and not the
vitrage.graph.driver namespace.
"""
import abc
import copy
from vitrage.graph.driver.elements import Edge
from vitrage.graph.driver.elements import Vertex
from vitrage.graph.driver.notifier import Notifier
class Direction(object):
OUT = 1
IN = 2
BOTH = 3
class Graph(object, metaclass=abc.ABCMeta):
def __init__(self, name, graph_type, vertices=None, edges=None):
"""Create a Graph instance
:type name: str
:type graph_type: str
:type vertices: list of Vertex
:type edges: list of Edge
:rtype: Graph
"""
self.name = name
self.graph_type = graph_type
self.notifier = Notifier()
def subscribe(self, function, finalization=False):
"""Subscribe to graph changes
:param function: function will be called after each graph change
:param finalization: function will be called after all non finalization
Usage Example:
graph = NXGraph()
graph.subscribe(foo1, finalization=True)
graph.subscribe(foo2, finalization=False)
graph.subscribe(foo3, finalization=False)
The order of the calls in this example wii be:
1. foo2
2. foo3
3. foo1
foo1 is called last because it subscribed as a finalization function
"""
self.notifier.subscribe(function, finalization)
def is_subscribed(self):
return self.notifier.is_subscribed()
def get_item(self, item):
if isinstance(item, Edge):
return self.get_edge(item.source_id, item.target_id, item.label)
if isinstance(item, Vertex):
return self.get_vertex(item.vertex_id)
@property
def algo(self):
"""Get graph algorithms
:rtype: GraphAlgorithm
"""
return None
@abc.abstractmethod
def copy(self):
"""Create a copy of the graph
:return: A copy of the graph
:rtype: Graph
"""
pass
@abc.abstractmethod
def num_vertices(self):
"""Number of vertices in the graph
:return:
:rtype: int
"""
pass
@abc.abstractmethod
def num_edges(self):
"""Number of edges in the graph
:return:
:rtype: int
"""
pass
@abc.abstractmethod
def add_vertex(self, v):
"""Add a vertex to the graph
A copy of Vertex v will be added to the graph.
Example:
--------
graph = Graph()
v = Vertex(vertex_id=1, properties={prop_key:prop_value})
graph.add_vertex(v)
:param v: the vertex to add
:type v: Vertex
"""
pass
def add_vertices(self, vertices):
"""Add a list of vertices to the graph
Uses add_vertex to add each vertex
:param vertices:
:type vertices:list of Vertex
"""
if not vertices:
return
for v in vertices:
self.add_vertex(v)
@abc.abstractmethod
def add_edge(self, e):
"""Add an edge to the graph
A copy of Edge e will be added to the graph.
Example:
--------
graph = Graph()
v1_prop = {'prop_key':'some value for my first vertex'}
v2_prop = {'prop_key':'another value for my second vertex'}
v1 = Vertex(vertex_id=1, properties=v1_prop)
v2 = Vertex(vertex_id=2, properties=v2_prop)
graph.add_vertex(v1)
graph.add_vertex(v2)
e_prop = {'edge_prop':'and here is my edge property value'}
e = Edge(source_id=v1.vertex_id, target_id=v2.vertex_id,
label='BELONGS', properties=e_prop)
graph.add_edge(e)
:param e: the edge to add
:type e: Edge
"""
pass
def add_edges(self, edges):
"""Add a list of edges to the graph
Uses add_edge to add each edge
:param edges:
:type edges:list of Edge
"""
if not edges:
return
for e in edges:
self.add_edge(e)
@abc.abstractmethod
def get_vertex(self, v_id):
"""Fetch a vertex from the graph
:param v_id: vertex id
:type v_id: str
:return: the vertex or None if it does not exist
:rtype: Vertex
"""
pass
@abc.abstractmethod
def get_edge(self, source_id, target_id, label):
"""Fetch an edge from the graph,
Fetch an edge from the graph, according to its two vertices and label
:param source_id: vertex id of the source vertex
:type source_id: str or None
:param target_id: vertex id of the target vertex
:type target_id: str
:param label: the label property of the edge
:type label: str or None
:return: The edge between the two vertices or None
:rtype: Edge
"""
pass
@abc.abstractmethod
def get_edges(self,
v1_id,
v2_id=None,
direction=Direction.BOTH,
attr_filter=None):
"""Fetch multiple edges from the graph,
Fetch all edges from the graph, according to its two vertices.
If only one vertex id is given it finds all the edges from this vertex
to all other vertices.
If two vertices ids are given it finds all the edges between those two
vertices.
EXAMPLE
-------
v2_edges1 = g.get_edges(
v_id=v2.vertex_id,
attr_filter={'LABEL': 'ON'})
v2_edges2 = g.get_edges(
v_id=v2.vertex_id,
attr_filter={'LABEL': ['ON', 'WITH']})
:param v1_id: first vertex id of vertex
:type v1_id: str
:param v2_id: second vertex id of vertex
:type v2_id: str
:param direction: specify In/Out/Both for edge direction
:type direction: int
:param attr_filter: expected keys and values
:type attr_filter: dict
:return: All edges matching the requirements
:rtype: set of Edge
"""
pass
@abc.abstractmethod
def update_vertex(self, v, overwrite=True):
"""Update the vertex properties
Update an existing vertex and create it if non existing.
:param v: the vertex with the new data
:type v: Vertex
:param overwrite: whether to overwrite existing properties
:type overwrite: Boolean
"""
pass
def update_vertices(self, vertices):
"""For each vertex, update its properties
For each existing vertex, update its properties and create it if
non existing.
:param vertices: the vertex with the new data
:type vertices: List
"""
for v in vertices:
self.update_vertex(v)
@abc.abstractmethod
def update_edge(self, e):
"""Update the edge properties
Update an existing edge and create it if non existing.
:param e: the edge with the new data
:type e: Edge
"""
pass
@staticmethod
def _merged_properties(base_props, updated_props, overwrite):
if base_props is None:
return copy.copy(updated_props)
else:
# Return all updated properties if overwrite is true, or only the
# new properties otherwise
return {k: v for k, v in updated_props.items()
if overwrite or k not in base_props}
@abc.abstractmethod
def remove_vertex(self, v):
"""Remove Vertex v and its edges from the graph
:type v: Vertex
"""
pass
@abc.abstractmethod
def remove_edge(self, e):
"""Remove an edge from the graph
:type e: Edge
"""
pass
@abc.abstractmethod
def get_vertices(self,
vertex_attr_filter=None,
query_dict=None):
"""Get vertices list with an optional match filter
To filter the vertices, specify property values for
the vertices
Example:
--------
graph = Graph()
v1_prop = {'prop_key':'some value for my first vertex'}
v2_prop = {'prop_key':'another value for my second vertex'}
v3_prop = {'prop_key':'YES'}
v1 = Vertex(vertex_id=1, properties=v1_prop)
v2 = Vertex(vertex_id=2, properties=v2_prop)
v3 = Vertex(vertex_id=3, properties=v3_prop)
graph.add_vertex(v1)
graph.add_vertex(v2)
graph.add_vertex(v3)
all_vertices = graph.get_vertices()
for v in all_vertices:
do something with v
filtered_vertices_list = graph.get_vertices(
vertex_attr_filter={'prop_key':['YES']})
:param vertex_attr_filter: expected keys and values
:type vertex_attr_filter dict
:param query_dict: expected query
:type query_dict dict
:return: A list of vertices that match the requested query
:rtype: list of Vertex
"""
pass
@abc.abstractmethod
def get_vertices_by_key(self,
key_values_hash):
"""Get vertices list according to their hash key
The hash key is derived from their properties :
See processor_utils - get_defining_properties
:param key_values_hash: hash key
:type key_values_hash str
"""
pass
@abc.abstractmethod
def neighbors(self, v_id, vertex_attr_filter=None,
edge_attr_filter=None, direction=Direction.BOTH):
"""Get vertices that are neighboring to v_id vertex
To filter the neighboring vertices, specify property values for
the vertices or for the edges connecting them.
Example:
--------
graph = Graph()
v1_prop = {'prop_key':'some value for my first vertex'}
v2_prop = {'prop_key':'another value for my second vertex'}
v3_prop = {'prop_key':'YES'}
v1 = Vertex(vertex_id=1, properties=v1_prop)
v2 = Vertex(vertex_id=2, properties=v2_prop)
v3 = Vertex(vertex_id=3, properties=v3_prop)
graph.add_vertex(v1)
graph.add_vertex(v2)
graph.add_vertex(v3)
e_prop = {'edge_prop':'and here is my edge property value'}
e1 = Edge(source_id=v1.vertex_id, target_id=v2.vertex_id,
label='BELONGS', properties=e_prop)
e2 = Edge(source_id=v1.vertex_id, target_id=v3.vertex_id,
label='ON', properties=e_prop)
graph.add_edge(e1)
graph.add_edge(e2)
vertices_list1 = graph.neighbors(v_id=v1.vertex_id,
vertex_attr_filter={'prop_key':'YES'},
edge_attr_filter={'LABEL':'ON})
vertices_list2 = graph.neighbors(v_id=v1.vertex_id,
vertex_attr_filter={'prop_key':['YES', 'CAT']},
edge_attr_filter={'LABEL':['ON', 'WITH']})
:param direction:
:param v_id: vertex id
:type v_id: str
:param vertex_attr_filter: expected keys and values
:type vertex_attr_filter dict
:param edge_attr_filter: expected keys and values
:type edge_attr_filter: dict
:return: A list of vertices that match the requested query
:rtype: list of Vertex
"""
pass
@abc.abstractmethod
def json_output_graph(self, **kwargs):
pass
@abc.abstractmethod
def union(self, other_graph):
pass
| apache-2.0 | -3,534,215,445,248,439,000 | 27.764569 | 79 | 0.582334 | false | 4.118825 | false | false | false |
wdv4758h/Yeppp | library/sources/core/kernels/binop/binop_IVV_IV.py | 1 | 5017 | from peachpy.x86_64 import *
from peachpy import *
import common.YepStatus as YepStatus
from common.pipeline import software_pipelined_loop
from common.instruction_selection import *
def binop_IVV_IV(arg_x, arg_y, arg_n, op, isa_ext):
# First we set some constants based on the input/output types
# so that we can use the same code for any input/output
# type combination
input_type = arg_x.c_type.base
output_type = arg_x.c_type.base
input_type_size = arg_x.c_type.base.size
output_type_size = arg_x.c_type.base.size
unroll_factor = 5
simd_register_size = { "AVX2": YMMRegister.size,
"AVX" : YMMRegister.size,
"SSE" : XMMRegister.size }[isa_ext]
SCALAR_LOAD, SCALAR_OP, SCALAR_STORE = scalar_instruction_select(input_type, output_type, op, isa_ext)
SIMD_LOAD, SIMD_OP, SIMD_STORE = vector_instruction_select(input_type, output_type, op, isa_ext)
reg_x_scalar, reg_y_scalar = scalar_reg_select(output_type, isa_ext)
simd_accs, simd_ops = vector_reg_select(isa_ext, unroll_factor)
ret_ok = Label()
ret_null_pointer = Label()
ret_misaligned_pointer = Label()
# Load args and test for null pointers and invalid arguments
reg_length = GeneralPurposeRegister64() # Keeps track of how many elements are left to process
LOAD.ARGUMENT(reg_length, arg_n)
TEST(reg_length, reg_length)
JZ(ret_ok) # Check there is at least 1 element to process
reg_x_addr = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_x_addr, arg_x)
TEST(reg_x_addr, reg_x_addr) # Make sure arg_x is not null
JZ(ret_null_pointer)
TEST(reg_x_addr, output_type_size - 1) # Check that our output arr is aligned
JNZ(ret_misaligned_pointer)
reg_y_addr = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_y_addr, arg_y)
TEST(reg_y_addr, reg_y_addr) # Make sure arg_y is not null
JZ(ret_null_pointer)
align_loop = Loop() # Loop to align one of the addresses
scalar_loop = Loop() # Processes remainder elements (if n % 8 != 0)
# Aligning on X addr
# Process elements 1 at a time until z is aligned on YMMRegister.size boundary
TEST(reg_x_addr, simd_register_size - 1) # Check if already aligned
JZ(align_loop.end) # If so, skip this loop entirely
with align_loop:
SCALAR_LOAD(reg_x_scalar, [reg_x_addr])
SCALAR_LOAD(reg_y_scalar, [reg_y_addr])
SCALAR_OP(reg_x_scalar, reg_x_scalar, reg_y_scalar)
SCALAR_STORE([reg_x_addr], reg_x_scalar)
ADD(reg_x_addr, output_type_size)
ADD(reg_y_addr, output_type_size)
SUB(reg_length, 1)
JZ(ret_ok)
TEST(reg_x_addr, simd_register_size - 1)
JNZ(align_loop.begin)
reg_x_addr_out = GeneralPurposeRegister64()
MOV(reg_x_addr_out, reg_x_addr)
# Batch loop for processing the rest of the array in a pipelined loop
instruction_columns = [InstructionStream(), InstructionStream(), InstructionStream(), InstructionStream()]
instruction_offsets = (0, 1, 2, 3)
for i in range(unroll_factor):
with instruction_columns[0]:
SIMD_LOAD(simd_accs[i], [reg_x_addr + i * simd_register_size * input_type_size / output_type_size])
with instruction_columns[1]:
SIMD_LOAD(simd_ops[i], [reg_y_addr + i * simd_register_size * input_type_size / output_type_size])
with instruction_columns[2]:
SIMD_OP(simd_accs[i], simd_accs[i], simd_ops[i])
with instruction_columns[3]:
SIMD_STORE([reg_x_addr_out + i * simd_register_size], simd_accs[i])
with instruction_columns[0]:
ADD(reg_x_addr, simd_register_size * unroll_factor * input_type_size / output_type_size)
with instruction_columns[1]:
ADD(reg_y_addr, simd_register_size * unroll_factor * input_type_size / output_type_size)
with instruction_columns[3]:
ADD(reg_x_addr_out, simd_register_size * unroll_factor * input_type_size / output_type_size)
software_pipelined_loop(reg_length, unroll_factor * simd_register_size / output_type_size, instruction_columns, instruction_offsets)
# Check if there are leftover elements that were not processed in the pipelined loop
# This loop should iterate at most #(elems processed per iteration in the batch loop) - 1 times
TEST(reg_length, reg_length)
JZ(scalar_loop.end)
with scalar_loop: # Process the remaining elements
SCALAR_LOAD(reg_x_scalar, [reg_x_addr])
SCALAR_LOAD(reg_y_scalar, [reg_y_addr])
SCALAR_OP(reg_x_scalar, reg_x_scalar, reg_y_scalar)
SCALAR_STORE([reg_x_addr], reg_x_scalar)
ADD(reg_x_addr, output_type_size)
ADD(reg_y_addr, output_type_size)
SUB(reg_length, 1)
JNZ(scalar_loop.begin)
with LABEL(ret_ok):
RETURN(YepStatus.YepStatusOk)
with LABEL(ret_null_pointer):
RETURN(YepStatus.YepStatusNullPointer)
with LABEL(ret_misaligned_pointer):
RETURN(YepStatus.YepStatusMisalignedPointer)
| bsd-3-clause | 3,629,260,896,634,825,700 | 42.25 | 136 | 0.665139 | false | 3.104579 | true | false | false |
WmHHooper/aima-python | submissions/Thompson/vacuum2.py | 1 | 2607 | import agents as ag
#testsdjhfakdjfhds
def HW2Agent() -> object:
def program(percept):
bump, status = percept
if status == 'Dirty':
action = 'Suck'
else:
lastBump, lastStatus, = program.oldPercepts[-1]
lastAction = program.oldActions[-1]
if program.counter == 0:
if bump == 'Bump':
program.counter += 1
action = 'Right'
else:
action = 'Down'
elif program.counter == 1:
if bump == 'Bump':
program.counter = 6
action = 'Up'
else:
program.counter += 1
action = 'Up'
elif program.counter == 2:
if bump == 'Up':
program.counter += 1
action = 'Up'
else:
action = 'Up'
elif program.counter == 3:
if bump == 'Bump':
program.counter = 7
action = 'Left'
else:
program.counter = 0
action = 'Down'
#Skipping 4 and 5 because it's similar to 1 and 3
elif program.counter == 6:
if bump == 'Bump':
program.counter += 1
action = 'Left'
else:
action = 'Up'
elif program.counter == 7:
if bump == 'Bump':
program.counter = 3
action = 'Right'
else:
program.counter += 1
action = 'Down'
elif program.counter == 8:
if bump == 'Bump':
program.counter += 1
action = 'Left'
else:
action = 'Down'
elif program.counter == 9:
if bump == 'Bump':
program.counter = 1
action = 'Right'
else:
program.counter = 6
action = 'Up'
program.oldPercepts.append(percept)
program.oldActions.append(action)
return action
# assign static variables here
program.oldPercepts = [('None', 'Clean')]
program.oldActions = ['Left', 'Right']
program.counter = 0
# program.lastWall = ['None', 'Down']
agt = ag.Agent(program)
# assign class attributes here:
# agt.direction = ag.Direction('left')
return agt
| mit | 2,600,238,648,749,636,600 | 31.5875 | 61 | 0.410433 | false | 4.918868 | false | false | false |
pykit/pykit-cpy | pykit_cpy/lower/lower_convert.py | 1 | 1746 | # -*- coding: utf-8 -*-
"""
Lower all conversions between native values <-> objects to calls to a
runtime.conversion module set in the environment.
"""
from __future__ import print_function, division, absolute_import
from pykit import types
from pykit.ir import transform, GlobalValue, Module, Builder
def build_conversion_table(convertable=types.scalar_set):
"""Returns { (from_type, to_type) -> funcname }"""
table = {}
for type in convertable:
typename = types.typename(type).lower()
table[(type, types.Object)] = "object_from_%s" % typename
table[(types.Object, type)] = "%s_from_object" % typename
return table
def conversion_runtime(convertable=types.scalar_set):
"""Returns a Module with declared external runtime conversion functions"""
table = build_conversion_table(convertable)
mod = Module()
for (from_type, to_type), funcname in table.iteritems():
signature = types.Function(to_type, [from_type])
gv = GlobalValue(funcname, signature, external=True, address=0)
mod.add_global(gv)
return mod
class LowerConversions(object):
def __init__(self, func, conversion_table):
self.conversion_table = conversion_table
self.builder = Builder(func)
def op_convert(self, op):
arg = op.args[0]
if (op.type, arg.type) in self.conversion_table:
funcname = self.conversion_table[op.type, arg.type]
return self.builder.gen_call_external(funcname, [arg])
def run(func, env):
if not env.get("runtime.conversion"):
env["runtime.conversion"] = conversion_runtime()
func.module.link(env["runtime.conversion"])
transform(LowerConversions(func, build_conversion_table()), func) | bsd-3-clause | -5,180,342,390,082,493,000 | 35.395833 | 78 | 0.672394 | false | 3.837363 | false | false | false |
niktre/espressopp | contrib/mpi4py/mpi4py-2.0.0/conf/epydocify.py | 11 | 3073 | #!/usr/bin/env python
# --------------------------------------------------------------------
from mpi4py import MPI
try:
from signal import signal, SIGPIPE, SIG_IGN
signal(SIGPIPE, SIG_IGN)
except ImportError:
pass
# --------------------------------------------------------------------
try:
from docutils.nodes import NodeVisitor
NodeVisitor.unknown_visit = lambda self, node: None
NodeVisitor.unknown_departure = lambda self, node: None
except ImportError:
pass
try: # epydoc 3.0.1 + docutils 0.6
from docutils.nodes import Text
try:
from collections import UserString
except ImportError:
from UserString import UserString
if not isinstance(Text, UserString):
def Text_get_data(s):
try:
return s._data
except AttributeError:
return s.astext()
def Text_set_data(s, d):
s.astext = lambda: d
s._data = d
Text.data = property(Text_get_data, Text_set_data)
except ImportError:
pass
# --------------------------------------------------------------------
from epydoc.docwriter import dotgraph
import re
dotgraph._DOT_VERSION_RE = \
re.compile(r'dot (?:- Graphviz )version ([\d\.]+)')
try:
dotgraph.DotGraph.DEFAULT_HTML_IMAGE_FORMAT
dotgraph.DotGraph.DEFAULT_HTML_IMAGE_FORMAT = 'png'
except AttributeError:
DotGraph_to_html = dotgraph.DotGraph.to_html
DotGraph_run_dot = dotgraph.DotGraph._run_dot
def to_html(self, image_file, image_url, center=True):
if image_file[-4:] == '.gif':
image_file = image_file[:-4] + '.png'
if image_url[-4:] == '.gif':
image_url = image_url[:-4] + '.png'
return DotGraph_to_html(self, image_file, image_url)
def _run_dot(self, *options):
if '-Tgif' in options:
opts = list(options)
for i, o in enumerate(opts):
if o == '-Tgif': opts[i] = '-Tpng'
options = type(options)(opts)
return DotGraph_run_dot(self, *options)
dotgraph.DotGraph.to_html = to_html
dotgraph.DotGraph._run_dot = _run_dot
# --------------------------------------------------------------------
import re
_SIGNATURE_RE = re.compile(
# Class name (for builtin methods)
r'^\s*((?P<class>\w+)\.)?' +
# The function name
r'(?P<func>\w+)' +
# The parameters
r'\(((?P<self>(?:self|cls|mcs)),?)?(?P<params>.*)\)' +
# The return value (optional)
r'(\s*(->)\s*(?P<return>\S.*?))?'+
# The end marker
r'\s*(\n|\s+(--|<=+>)\s+|$|\.\s+|\.\n)')
from epydoc import docstringparser as dsp
dsp._SIGNATURE_RE = _SIGNATURE_RE
# --------------------------------------------------------------------
import sys, os
import epydoc.cli
def epydocify():
dirname = os.path.dirname(__file__)
config = os.path.join(dirname, 'epydoc.cfg')
sys.argv.append('--config=' + config)
epydoc.cli.cli()
if __name__ == '__main__':
epydocify()
# --------------------------------------------------------------------
| gpl-3.0 | 4,019,386,524,033,540,600 | 26.936364 | 70 | 0.509925 | false | 3.71584 | false | false | false |
the-engine-room/crowdata | crowdataapp/urls.py | 1 | 2375 | from django.conf.urls import patterns, url
from crowdataapp import views
urlpatterns = patterns('crowdataapp.views',
url(r'^$',
'document_set_index',
name='document_set_index'),
url(r'^pleaselogin$',
'login',
name='login_page'),
url(r'^logout$',
'logout',
name='logout_page'),
url(r'^afterlogin$',
'after_login',
name='after_login'),
url(r'^profile$',
'edit_profile',
name='edit_profile'),
url(r'^(?P<document_set>[\w-]+)$',
'document_set_view',
name='document_set_view'),
url(r'^(?P<document_set>[\w-]+)/new_transcription$',
'transcription_new',
name='new_transcription'),
url(r'^(?P<document_set>[\w-]+)/(?P<document_id>[\w-]+)$',
'show_document',
name='show_document'),
url(r'^(?P<document_set>[\w-]+)/autocomplete/(?P<field_name>[\w-]+)$',
'autocomplete_field',
name='autocomplete_field'),
url(r'crowdata/form/(?P<slug>[\w-]+)',
'form_detail',
name='crowdata_form_detail'),
url(r'^(?P<document_set>[\w-]+)/ranking/(?P<ranking_id>[\w-]+)$',
'ranking_all',
name='ranking_all'),
url(r'^(?P<document_set>[\w-]+)/users/(?P<username>[\w-]+)$',
'user_profile',
name='user_profile'),
url(r'^(?P<document_set>[\w-]+)/all/users$',
'users_all',
name='users_all'),
url(r'^(?P<document_set>[\w-]+)/(?P<field_id>[\w-]+)/(?P<canon_id>[\w-]+)$',
'documents_by_entry_value',
name='documents_by_entry_value'),
)
| mit | 2,779,071,065,911,481,000 | 49.531915 | 99 | 0.347789 | false | 4.979036 | false | true | false |
aurora-pro/apex-sigma | sigma/plugins/administration/evaluate.py | 1 | 1210 | from config import permitted_id
import discord
import inspect
async def evaluate(cmd, message, args):
if message.author.id in permitted_id:
if not args:
await message.channel.send(cmd.help())
else:
try:
execution = " ".join(args)
output = eval(execution)
if inspect.isawaitable(output):
output = await output
status = discord.Embed(title='✅ Executed', color=0x66CC66)
if output:
try:
status.add_field(name='Results', value='\n```\n' + str(output) + '\n```')
except:
pass
except Exception as e:
cmd.log.error(e)
status = discord.Embed(color=0xDB0000, title='❗ Error')
status.add_field(name='Execution Failed', value=str(e))
await message.channel.send(None, embed=status)
else:
status = discord.Embed(type='rich', color=0xDB0000,
title='⛔ Insufficient Permissions. Bot Owner or Server Admin Only.')
await message.channel.send(None, embed=status)
| gpl-3.0 | 6,171,629,511,094,829,000 | 39.066667 | 99 | 0.524126 | false | 4.262411 | false | false | false |
migueldvb/piernik | make_test.py | 1 | 1835 | #!/usr/bin/env python
import sys
import getopt
import os
import subprocess as sp
import shutil as sh
import xmlrpclib
class MakeTest(object):
def __init__(self,test):
self.initpath = os.getcwd()
self.runpath = os.path.join(self.initpath,'runs',test)
self.test = test
os.chdir(self.runpath)
retcode = sp.call(["mpiexec","./piernik"])
if retcode != 0:
sys.exit(retcode)
self.runtest(test)
os.chdir(self.initpath)
def put_png(self):
server = xmlrpclib.ServerProxy("http://piernik:p1ern1k@hum/piernik/login/xmlrpc")
for file in os.listdir(self.runpath):
if file.find('png') != -1:
server.wiki.putAttachment(self.test+'/'+file, xmlrpclib.Binary(open(self.runpath+'/'+file).read()))
def testJeans (self):
sp.call(["gnuplot","verify.gpl"])
self.put_png()
def testMaclaurin (self):
from maclaurin import Maclaurin_test
Maclaurin_test(self.runpath+'/maclaurin_sph_0001.h5')
self.put_png()
def testSedov (self):
print "test not implemented"
def output(self):
print self.initpath
print self.runpath
def runtest(self,test):
tests = { "jeans": self.testJeans,
"maclaurin": self.testMaclaurin,
"sedov": self.testSedov}[test]()
#tests.get(test)
def usage():
print __doc__
def main(argv):
try:
opts, args = getopt.getopt(argv, "ht", ["help", "test="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-t", "--test"):
test=arg
# add piernik modules
sys.path.append(sys.path[0]+'/python')
t = MakeTest(test)
t.output()
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-3.0 | -3,363,016,374,321,181,000 | 23.797297 | 111 | 0.590191 | false | 3.230634 | true | false | false |
pystruct/pystruct | doc/conf.py | 2 | 10691 | # -*- coding: utf-8 -*-
#
# pystruct documentation build configuration file, created by
# sphinx-quickstart on Fri May 3 17:14:50 2013.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_bootstrap_theme
import pystruct
#import sphinx_gallery
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('sphinxext'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.doctest', 'sphinx.ext.pngmath',
'sphinx.ext.viewcode', 'numpy_ext.numpydoc', 'sphinx_gallery.gen_gallery']
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# generate autosummary even if no references
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pystruct'
copyright = u'2013, Andreas Mueller'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pystruct.__version__
# The full version, including alpha/beta/rc tags.
release = pystruct.__version__
sphinx_gallery_conf = {
'reference_url': {
# The module you locally document uses a None
'pystruct': None,
# External python modules use their documentation websites
'sklearn': 'http://scikit-learn.org/stable',
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1'}}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_templates', '_themes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'pystruct'
html_theme = 'bootstrap'
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['_themes']
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
#pystruct The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pystructdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'pystruct.tex', u'pystruct Documentation',
u'Andreas Mueller', 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pystruct', u'pystruct Documentation',
[u'Andreas Mueller'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pystruct', u'pystruct Documentation', u'Andreas Mueller',
'pystruct', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Theme options are theme-specific and customize the look and feel of a
# theme further.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "PyStruct",
# Tab name for entire site. (Default: "Site")
#'navbar_site_name': "Site",
# A list of tuples containting pages to link to. The value should
# be in the form [(name, page), ..]
'navbar_links': [
('Start', 'index'),
('Installation', 'installation'),
('Introduction', 'intro'),
('User Guide', 'user_guide'),
('Examples', 'auto_examples/index'),
('API', 'references'),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 0,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "false",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "None",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "cerulean",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
| bsd-2-clause | -1,833,842,543,130,741,800 | 31.39697 | 88 | 0.682069 | false | 3.713442 | true | false | false |
10c8/pixelbot | PluginAPI.py | 1 | 5326 | #: vim set encoding=utf-8 :
##
# PixelBot Plugin API
# Tools for creating plugins for the bot
#
# version 0.2
# author William F.
# copyright MIT
##
# Imports
import asyncio
import logging
# Main class
class Plugin(object):
def __init__(self, bot, name, description, version, author, data):
self.bot = bot
self.name = name
self.desc = description
self.version = version
self.author = author
self.data = {}
self.default_data = data
self.tasks = {}
self.cmds = {}
self.mod_cmds = {}
# Fetch plugin data
if not self.name in self.bot.data['plugins']:
self.bot.data['plugins'][self.name] = self.default_data
self.data = self.bot.data['plugins'][self.name]
# Register tasks and commands
logging.info('Registering plugin "{}".'.format(self.name))
for method in dir(self):
if callable(getattr(self, method)):
call = getattr(self, method)
if method.startswith('cmd_'):
call(None, None, None)
elif method.startswith('task_'):
call(None)
def init(plugin, bot):
"""Used to inherit from PluginAPI without calling the
'bloated' super method"""
default_data = {}
if 'default_data' in plugin.__dir__():
default_data = plugin.default_data
super(type(plugin), plugin).__init__(bot,
name=plugin.name,
description=plugin.description,
version=plugin.version,
author=plugin.author,
data=default_data)
# Utils
def saveData(self):
self.bot.data['plugins'][self.name] = self.data
self.bot.saveData()
def getConfig(self, key):
return self.bot.cfg.get(key, section=self.name)
def log(self, message):
logging.info('[{}][INFO] {}'.format(self.name, message))
def warning(self, message):
logging.critical('[{}][WARN] {}'.format(self.name, message))
def critical(self, message):
logging.critical('[{}][FAIL] {}'.format(self.name, message))
def generateHelp(self, mod=False):
info = (
'**{name}**\n'
'*{desc}*\n\n'
'Version: {version}\n'
'Commands:```{cmds}```'
).format(
name=self.name,
desc=self.description,
version=self.version,
cmds='...'
)
return info
# Methods
async def on_ready(self, client):
pass
async def on_message(self, client, msg):
pass
async def on_member_join(self, client, user):
pass
async def on_member_remove(self, client, user):
pass
async def on_member_update(self, client, before, after):
pass
# User API
class _UserAPI(object):
def __init__(self):
pass
def is_mod(self, plugin, user):
"""Returns True if the user is a mod (has any 'mod_roles' role)."""
try:
for role in user.roles:
if role.name in plugin.bot.settings['discord']['mod_roles']:
return True
return False
except:
return False
User = _UserAPI()
# Task class
class Task(object):
def __init__(self, owner, name, func, interval, alive=True):
self.owner = owner
self.name = name
self.func = func
self.interval = interval
self.alive = alive
async def run(self, client):
while self.alive:
await self.func(self.owner, client)
await asyncio.sleep(self.interval)
def kill(self):
logging.info('[{}] Task "{}" killed.'.format(self.owner.name,
self.name))
self.alive = False
def revive(self):
logging.info('[{}] Task "{}" revived.'.format(self.owner.name,
self.name))
self.alive = True
self.owner.bot.client.loop.create_task(self.run(self.owner.bot.client))
# Task decorator
def task(name, interval, alive=True):
"""Make the function a bot task."""
def wrapper(func):
def wrapped(*args):
this = Task(args[0], name, func, interval)
this.alive = alive
args[0].tasks[name] = this
if args[1]:
func(*args)
return wrapped
return wrapper
# Command decorator
def command(name, mod=False, ns=None):
def wrapper(func):
def wrapped(*args):
cmd = {
'type': 'cmd',
'func': func,
'mod': mod
}
if ns is None:
args[0].cmds[name] = cmd
else:
if ns not in args[0].cmds.keys():
args[0].cmds[ns] = {
'type': 'ns',
name: cmd
}
else:
args[0].cmds[ns][name] = cmd
if args[1]:
func(*args)
return wrapped
return wrapper
| mit | -335,620,228,300,371,700 | 25.89899 | 79 | 0.495869 | false | 4.3021 | false | false | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/application_gateway_frontend_ip_configuration_py3.py | 1 | 3140 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayFrontendIPConfiguration(SubResource):
"""Frontend IP configuration of an application gateway.
:param id: Resource ID.
:type id: str
:param private_ip_address: PrivateIPAddress of the network interface IP
Configuration.
:type private_ip_address: str
:param private_ip_allocation_method: PrivateIP allocation method. Possible
values include: 'Static', 'Dynamic'
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2017_09_01.models.IPAllocationMethod
:param subnet: Reference of the subnet resource.
:type subnet: ~azure.mgmt.network.v2017_09_01.models.SubResource
:param public_ip_address: Reference of the PublicIP resource.
:type public_ip_address:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param provisioning_state: Provisioning state of the public IP resource.
Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, id: str=None, private_ip_address: str=None, private_ip_allocation_method=None, subnet=None, public_ip_address=None, provisioning_state: str=None, name: str=None, etag: str=None, type: str=None, **kwargs) -> None:
super(ApplicationGatewayFrontendIPConfiguration, self).__init__(id=id, **kwargs)
self.private_ip_address = private_ip_address
self.private_ip_allocation_method = private_ip_allocation_method
self.subnet = subnet
self.public_ip_address = public_ip_address
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
| mit | 6,454,073,461,142,669,000 | 46.575758 | 238 | 0.644586 | false | 4.015345 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.