repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
amdegroot/ssd.pytorch | layers/box_utils.py | 1 | 9435 | # -*- coding: utf-8 -*-
import torch
def point_form(boxes):
""" Convert prior_boxes to (xmin, ymin, xmax, ymax)
representation for comparison to point form ground truth data.
Args:
boxes: (tensor) center-size default boxes from priorbox layers.
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin
boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax
def center_size(boxes):
""" Convert prior_boxes to (cx, cy, w, h)
representation for comparison to center-size form ground truth data.
Args:
boxes: (tensor) point_form boxes
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy
boxes[:, 2:] - boxes[:, :2], 1) # w, h
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def jaccard(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
area_b = ((box_b[:, 2]-box_b[:, 0]) *
(box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
def match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx):
"""Match each prior box with the ground truth box of the highest jaccard
overlap, encode the bounding boxes, then return the matched indices
corresponding to both confidence and location preds.
Args:
threshold: (float) The overlap threshold used when mathing boxes.
truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].
priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
variances: (tensor) Variances corresponding to each prior coord,
Shape: [num_priors, 4].
labels: (tensor) All the class labels for the image, Shape: [num_obj].
loc_t: (tensor) Tensor to be filled w/ endcoded location targets.
conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.
idx: (int) current batch index
Return:
The matched indices corresponding to 1)location and 2)confidence preds.
"""
# jaccard index
overlaps = jaccard(
truths,
point_form(priors)
)
# (Bipartite Matching)
# [1,num_objects] best prior for each ground truth
best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)
# [1,num_priors] best ground truth for each prior
best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)
best_truth_idx.squeeze_(0)
best_truth_overlap.squeeze_(0)
best_prior_idx.squeeze_(1)
best_prior_overlap.squeeze_(1)
best_truth_overlap.index_fill_(0, best_prior_idx, 2) # ensure best prior
# TODO refactor: index best_prior_idx with long tensor
# ensure every gt matches with its prior of max overlap
for j in range(best_prior_idx.size(0)):
best_truth_idx[best_prior_idx[j]] = j
matches = truths[best_truth_idx] # Shape: [num_priors,4]
conf = labels[best_truth_idx] + 1 # Shape: [num_priors]
conf[best_truth_overlap < threshold] = 0 # label as background
loc = encode(matches, priors, variances)
loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
conf_t[idx] = conf # [num_priors] top class label for each prior
def encode(matched, priors, variances):
"""Encode the variances from the priorbox layers into the ground truth boxes
we have matched (based on jaccard overlap) with the prior boxes.
Args:
matched: (tensor) Coords of ground truth for each prior in point-form
Shape: [num_priors, 4].
priors: (tensor) Prior boxes in center-offset form
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
encoded boxes (tensor), Shape: [num_priors, 4]
"""
# dist b/t match center and prior's center
g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, 2:])
# match wh / prior wh
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = torch.log(g_wh) / variances[1]
# return target for smooth_l1_loss
return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
# Adapted from https://github.com/Hakuyume/chainer-ssd
def decode(loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def log_sum_exp(x):
"""Utility function for computing log_sum_exp while determining
This will be used to determine unaveraged confidence loss across
all examples in a batch.
Args:
x (Variable(tensor)): conf_preds from conf layers
"""
x_max = x.data.max()
return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max
# Original author: Francisco Massa:
# https://github.com/fmassa/object-detection.torch
# Ported to PyTorch by Max deGroot (02/01/2017)
def nms(boxes, scores, overlap=0.5, top_k=200):
"""Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
scores: (tensor) The class predscores for the img, Shape:[num_priors].
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
top_k: (int) The Maximum number of box preds to consider.
Return:
The indices of the kept boxes with respect to num_priors.
"""
keep = scores.new(scores.size(0)).zero_().long()
if boxes.numel() == 0:
return keep
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
v, idx = scores.sort(0) # sort in ascending order
# I = I[v >= 0.01]
idx = idx[-top_k:] # indices of the top-k largest vals
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
# keep = torch.Tensor()
count = 0
while idx.numel() > 0:
i = idx[-1] # index of current largest val
# keep.append(i)
keep[count] = i
count += 1
if idx.size(0) == 1:
break
idx = idx[:-1] # remove kept element from view
# load bboxes of next highest vals
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
# store element-wise max with next highest score
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
# check sizes of xx1 and xx2.. after each iteration
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = w*h
# IoU = i / (area(a) + area(b) - i)
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
union = (rem_areas - inter) + area[i]
IoU = inter/union # store result in iou
# keep only elements with an IoU <= overlap
idx = idx[IoU.le(overlap)]
return keep, count
| mit | 5,635,319,637,972,045,000 | 38.443515 | 80 | 0.594463 | false |
opendatatrentino/ckan-api-client | ckan_api_client/tests/unit/test_utils_diff.py | 1 | 1092 | from ckan_api_client.tests.utils.diff import diff_mappings, diff_sequences
def test_diff_dicts():
dct1 = {
'one': 'VAL-1',
'two': 'VAL-2',
'three': 'VAL-3',
'four': 'VAL-4',
'five': 'VAL-5',
}
dct2 = {
'three': 'VAL-3',
'four': 'VAL-4-2',
'five': 'VAL-5-2',
'six': 'VAL-6',
'seven': 'VAL-7',
'eight': 'VAL-8',
}
diff = diff_mappings(dct1, dct2)
assert diff['common'] == set(['three', 'four', 'five'])
assert diff['left'] == set(['one', 'two'])
assert diff['right'] == set(['six', 'seven', 'eight'])
assert diff['differing'] == set(['four', 'five'])
def test_diff_sequences():
diff = diff_sequences([1, 2, 3], [1, 2, 9])
assert diff['length_match'] is True
assert diff['differing'] == set([2])
diff = diff_sequences([1, 2], [])
assert diff['length_match'] is False
assert diff['differing'] == set()
diff = diff_sequences([0, 0, 0, 0], [0, 1, 0, 1])
assert diff['length_match'] is True
assert diff['differing'] == set([1, 3])
| bsd-2-clause | 2,361,998,526,642,072,000 | 26.3 | 74 | 0.508242 | false |
hyperreal/GanjaBot | magnet_utils.py | 1 | 4242 | #
# This file is part of Magnet2.
# Copyright (c) 2011 Grom PE
#
# Magnet2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Magnet2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Magnet2. If not, see <http://www.gnu.org/licenses/>.
#
import xmpp, os, time, cPickle
try:
from HTMLParser import HTMLParser
htmlparser_available = True
except:
htmlparser_available = False
def iq_set_affiliation(room, nick, affiliation, reason=None):
iq = xmpp.Iq('set', xmpp.NS_MUC_ADMIN, {}, room)
item = iq.getTag('query').setTag('item')
item.setAttr('nick', nick)
item.setAttr('affiliation', affiliation)
if reason: item.addChild('reason', {}, reason)
return iq
def iq_set_role(room, nick, role, reason=None):
iq = xmpp.Iq('set', xmpp.NS_MUC_ADMIN, {}, room)
item = iq.getTag('query').setTag('item')
item.setAttr('nick', nick)
item.setAttr('role', role)
if reason: item.addChild('reason', {}, reason)
return iq
def serialize(fname, data):
f = open(fname, 'wb')
result = cPickle.dump(data, f, 2)
f.close()
return result
def unserialize(fname):
if not os.path.exists(fname): return False
f = open(fname, 'rb')
result = cPickle.load(f)
f.close()
return result
def writelog(filename, text):
s = '[%s] %s\n'%(time.strftime('%d %b %Y %H:%M:%S'), text)
f = open(filename, 'a')
f.write(s.encode('utf-8'))
f.close()
def hasbadwords(text):
badwords = ['palenie.cz', 'paleniecz', 'fakeweed', 'gbac', 'odczynnikchemiczne', 'rcsafe', 'GRC', 'genuine research chemicals', 'genuine', 'geniue', 'genuineresearchchemicals', 'genuine-rc.nl', 'befree', 'befreerc', 'b3', 'officialbenzofury', 'ucygana', 'rcwolf.nl', 'rcwolf', 'black-chem', 'blackchem', 'ksero24', 'pv8.nl', 'brainwasher', 'r-c.com', 'gamma cleaner', 'gammacleaner', 'eurochemicalsco', 'hajsenberg', 'topone', 'chemiczni.eu', 'how-high', 'legalchem', 'legalchem.pl', 'designerchemical', 'odczynniki.cz', 'legalne-ziola', 'synthetics.pl', 'coolchem', 'rcforyou.net', 'rc4you', 'rcforyou', 'rcchemicals', 'mefedron.pl', 'bazarr.nl', 'bazarr', 'fakehash.pl', 'stymulab', 'paularc', 'fakeshop', 'get-rc', 'peakowski', 'r-c', 'rc.pl', 'giene', 'gienk', 'kolekcjoner.nl', 'kolekcjonernl', 'gblchrom']
textl = text.replace(u'\xad', '').lower()
for word in badwords:
if word in textl: return True
return False
def unhtml(content):
if htmlparser_available:
return HTMLParser().unescape(content)
content = content.replace('<', '<')
content = content.replace('>', '>')
content = content.replace('"', '"')
content = content.replace(''', "'")
return content.replace('&', '&')
def timeformat(s):
s = s//1 # Rounding
days = s//86400
s -= days*86400
hours = s//3600
s -= hours*3600
minutes = s//60
s -= minutes*60
result = ''
limit = 0
if days>0:
result += ' %d day%s'%(days, ('', 's')[days>1])
limit = 2
if hours>0:
result += ' %d hour%s'%(hours, ('', 's')[hours>1])
limit += 1
if limit<2 and minutes>0:
result += ' %d minute%s'%(minutes, ('', 's')[minutes>1])
if limit<1 and s>0:
result += ' %d second%s'%(s, ('', 's')[s>1])
return result[1:]
def separate_target_reason(bot, room, parameters):
target = parameters
reason = None
if not target in bot.roster[room]:
p = len(parameters)
while True:
p = parameters.rfind(' ', 0, p)
if p == -1:
if parameters.find(' ') != -1:
(target, reason) = parameters.split(' ', 1)
break
if parameters[:p] in bot.roster[room]:
target = parameters[:p]
reason = parameters[p+1:]
break
return (target, reason)
def force_directory(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname, 0755)
if __name__ == "__main__":
pass
| gpl-3.0 | -5,035,402,395,565,944,000 | 34.057851 | 813 | 0.635078 | false |
map0logo/hmm_tagging | bigram_tagging.py | 1 | 10176 | """
Implementation of bigram part-of speech (POS) tagger based on first-order hidden
Markov models from scratch.
"""
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import pandas as pd
import codecs
POS_UNIVERSAL = ('VERB', 'NOUN', 'PRON', 'ADJ', 'ADV', 'ADP',
'CONJ', 'DET', 'NUM', 'PRT', 'X', '.')
POS_STATES = np.array(POS_UNIVERSAL)
def viterbi(i_obs, i_states, lstart_p, ltrans_p, lemit_p):
"""
Return the best path, given an HMM model and a sequence of observations
:param i_obs: index of observations in obs_states
:param i_states: index of states
:param start_p: 2D array of log initial probabilities (requires explicit reshape)
:param trans_p: 2D array of log transition probabilities
:param emit_p: 2D array of log emission probabilities
:return:
best_path: 1D array best corresponding hidden states to observations
(internal, published for debugging)
path: 2D array of best state for each step and hidden state
logV: 2D array of best log probability for each step and state
"""
""""""
n_obs = i_obs.size
n_states = i_states.size # number of states
logV = np.zeros((n_states, n_obs)) # initialise viterbi table
path = np.zeros((n_states, n_obs), dtype=np.int) # initialise the best path table
best_path = np.zeros(n_obs, dtype=np.int) # this will be your output
# B- base case
logV[:, [0]] = lstart_p + lemit_p[:, [i_obs[0]]]
path[:, 0] = i_states
# C- Inductive case
for t in xrange(1, n_obs): # loop through time
for s in xrange(0, n_states): # loop through the states @(t-1)
tp = logV[:, t-1] + ltrans_p[:, s] + lemit_p[s, i_obs[t]]
path[s, t], logV[s, t] = tp.argmax(), tp.max()
# D - Backpoint
best_path[n_obs - 1] = logV[:, n_obs - 1].argmax() # last state
for t in xrange(n_obs - 1, 0, -1): # states of (last-1)th to 0th time step
best_path[t - 1] = path[best_path[t], t]
return best_path, path, logV
def read_corpus(file_id):
"""
Read a corpus in a CLL file format with "words" and "pos" columns
:param file_id:
:return:
"""
f = open(file_id)
lines = f.readlines()
f.close()
words = [] # List of words in corpus
tags = [] # List of tags corresponding to each word
n_sents = 0 # Sentences are separated by a empty string
sents = [[]] # List of sentences. Each sentence is a list of words
t_sents = [[]] # List of corresponding tags for each word in sentences.
for line in lines:
split = line.split()
if len(split) == 2:
words.append(split[0])
tags.append(split[1])
sents[n_sents].append(split[0])
t_sents[n_sents].append(split[1])
else:
if sents[n_sents] != []:
n_sents += 1
sents.append([])
t_sents.append([])
words = np.array(words)
tags = np.array(tags)
if sents[-1] == []:
sents = sents[:-1]
t_sents = t_sents[:-1]
sents = np.array(sents)
t_sents = np.array(t_sents)
return words, tags, sents, t_sents
def read_words(file_id):
"""
Read a corpus in a CLL file format with only "words" column
:param file_id:
:return:
"""
f = open(file_id)
lines = f.readlines()
f.close()
words = []
n_sents = 0
sents = [[]]
for line in lines:
line = line.strip()
if line:
words.append(line)
sents[n_sents].append(line)
else:
if sents[n_sents] != []:
n_sents += 1
sents.append([])
words = np.array(words)
if sents[-1] == []:
sents = sents[:-1]
sents = np.array(sents)
return words, sents
def write_corpus(file_id, sents, t_sents):
"""
Writes a Corpus in CLL file format, with "words" and "pos" columns.
Inserts a empty line between sentences.
:param file_id:
:return:
"""
f = codecs.open(file_id, "w", encoding='utf-8')
for i, sent in enumerate(sents):
for j, word in enumerate(sent):
f.write("{}\t{}\n".format(word.decode('utf-8'), t_sents[i][j]))
f.write("\n")
f.close()
def where_in_states(values, states):
"""
Return a flat array of indexes of occurrences of values array in
states array.
:param values:
:param states:
:return:
"""
return np.array([np.where(states == i) for i in values]).flatten()
def testing_viterbi():
"""
Example taken from Borodovsky & Ekisheva (2006), pp 80-81
:return:
"""
states = np.array(['H', 'L'])
i_states = np.arange(0, states.size)
obs = np.array(['G', 'G', 'C', 'A', 'C', 'T', 'G', 'A', 'A'])
obs_states = np.array(['A', 'C', 'G', 'T'])
i_obs = where_in_states(obs, obs_states)
start_p = np.array([0.5, 0.5]).reshape((states.size, 1))
trans_p = np.array([[0.5, 0.5],
[0.4, 0.6]])
emit_p = np.array([[0.2, 0.3, 0.3, 0.2],
[0.3, 0.2, 0.2, 0.3]])
lstart_p = np.log(start_p)
ltrans_p = np.log(trans_p)
lemit_p = np.log(emit_p)
best_path, path, logV = viterbi(i_obs, i_states, lstart_p, ltrans_p, lemit_p)
print(states[best_path])
print(states[path])
print(logV)
def bigrams(array):
"""
Returns an array of bigrams given a 1D array of words or tags.
:param array:
:return:
"""
return np.array([(array[i:i+2]) for i in xrange(len(array) - 1)])
def train(file_id):
"""
Estimate HMM model parameters using maximum likelihood method, i.e.
Calculating relative frequency distributions.
:param file_id: tagged corpus file in CLL format
:return:
start_p: frequency of tags of first word in each sentence.
array POS_STATES.size
trans_p: frequency of tags from one state to another for each bigram.
matrix POS_STATES.size x POS_STATES.size
emit_p: frequency of words for each tag.
matrix POS_STATES.size x unique_words.size
unique_words: array of unique words in corpus
"""
# read corpus data
words, tags, sents, t_sents = read_corpus(file_id)
t_bigrams = bigrams(tags)
# Calculate frequency of tags of first word in each sentence.
t_first = [t_sent[0] for t_sent in t_sents]
start_f = np.zeros(POS_STATES.size, dtype=np.int)
start_f = pd.DataFrame(start_f)
start_f.index = POS_STATES
for tag in t_first:
start_f.loc[tag, 0] += 1
# Calculate frequency between states in bigrams
trans_f = np.zeros((POS_STATES.size, POS_STATES.size), dtype=np.int)
trans_f = pd.DataFrame(trans_f)
trans_f.index = POS_STATES
trans_f.columns = POS_STATES
for i, j in t_bigrams:
trans_f.loc[i, j] += 1
# Calculate frequency of each word by tag
unique_words = np.unique(words)
emit_f = np.zeros((POS_STATES.size, unique_words.size), dtype=np.int)
emit_f = pd.DataFrame(emit_f)
emit_f.index = POS_STATES
emit_f.columns = unique_words
for tag, word in zip(tags, words):
emit_f.loc[tag, word] += 1
return start_f.values, trans_f.values, emit_f.values, unique_words
def freq2prob(start_f, trans_f, emit_f):
"""
Convert frequencies in probabilities
:param start_f:
:param trans_f:
:param emit_f:
:return:
"""
start_p = np.zeros(start_f.shape)
start_p = start_f / sum(start_f)
trans_p = np.zeros(trans_f.shape)
for i in xrange(POS_STATES.size):
trans_p[i, :] = trans_f[i, :] / np.sum(trans_f[i, :])
emit_p = np.zeros(emit_f.shape)
for i in xrange(POS_STATES.size):
emit_p[i, :] = emit_f[i, :] / np.sum(emit_f[i, :])
return start_p, trans_p, emit_p
def generate_model(file_id, model_id):
"""
Estimate model form data given in file_id, and save parameters in
model_id file.
:return:
"""
start_f, trans_f, emit_f, obs_states = train(file_id)
np.savez(model_id, start_f=start_f, trans_f=trans_f,
emit_f=emit_f, states=POS_STATES, obs_states=obs_states)
def add_one_smoothing(emit_f, obs_states, words):
"""
Assign frequency of one to each new word that doesn't appeared on train
data.
:param emit_p:
:param obs_states:
:param: words
:return:
"""
new_words = []
for word in words:
if not(word in obs_states) and not(word in new_words):
new_words.append(word)
obs_states = np.append(obs_states, new_words)
new_words_f = np.zeros((emit_f.shape[0], len(new_words)))
emit_f = np.append(emit_f, new_words_f, axis=1)
emit_f += 1 # Add one!
return emit_f, obs_states
def load_model(model_id):
"""
:param model_id:
:return:
"""
model = np.load("{}.npz".format(model_id))
start_f = model["start_f"]
trans_f = model["trans_f"]
emit_f = model["emit_f"]
obs_states = model["obs_states"]
return start_f, trans_f, emit_f, obs_states
def evaluate_model(file_id, start_f, trans_f, emit_f, obs_states, smooth):
"""
Evaluate model in model_id for corpus given in file_id and generate
output_id file of ConLL file format.
:param file_id: eval corpus file in CLL format, without tags
:param model_id: hmm model in npz format
:param output_id: result corpus file in CLL format
:return:
Generate new corpus file output_id in CLL format.
"""
words, sents = read_words(file_id)
i_states = np.arange(0, POS_STATES.size)
emit_f, obs_states = smooth(emit_f, obs_states, words)
start_p, trans_p, emit_p = freq2prob(start_f, trans_f, emit_f)
lstart_p = np.log(start_p.reshape((start_p.size, 1)))
ltrans_p = np.log(trans_p)
lemit_p = np.log(emit_p)
# For each sentence as observations, obtain tags using viterbi
t_sents = []
for sent in sents:
i_obs = where_in_states(sent, obs_states)
best_path, path, logV = viterbi(i_obs, i_states,
lstart_p, ltrans_p, lemit_p)
t_sents.append(POS_STATES[best_path].tolist())
return sents, t_sents
| gpl-2.0 | -3,219,382,188,687,192,000 | 30.214724 | 86 | 0.592571 | false |
TomAugspurger/pandas | pandas/tests/arrays/boolean/test_construction.py | 1 | 12938 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import BooleanArray
from pandas.core.arrays.boolean import coerce_to_array
@pytest.fixture
def data():
return pd.array(
[True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],
dtype="boolean",
)
def test_boolean_array_constructor():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(values, mask)
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
with pytest.raises(TypeError, match="values should be boolean numpy array"):
BooleanArray(values.tolist(), mask)
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, mask.tolist())
with pytest.raises(TypeError, match="values should be boolean numpy array"):
BooleanArray(values.astype(int), mask)
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, None)
with pytest.raises(ValueError, match="values must be a 1D array"):
BooleanArray(values.reshape(1, -1), mask)
with pytest.raises(ValueError, match="mask must be a 1D array"):
BooleanArray(values, mask.reshape(1, -1))
def test_boolean_array_constructor_copy():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(values, mask)
assert result._data is values
assert result._mask is mask
result = BooleanArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
def test_to_boolean_array():
expected = BooleanArray(
np.array([True, False, True]), np.array([False, False, False])
)
result = pd.array([True, False, True], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, True]), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, True], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
expected = BooleanArray(
np.array([True, False, True]), np.array([False, False, True])
)
result = pd.array([True, False, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, None], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_all_none():
expected = BooleanArray(np.array([True, True, True]), np.array([True, True, True]))
result = pd.array([None, None, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([None, None, None], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"a, b",
[
([True, False, None, np.nan, pd.NA], [True, False, None, None, None]),
([True, np.nan], [True, None]),
([True, pd.NA], [True, None]),
([np.nan, np.nan], [None, None]),
(np.array([np.nan, np.nan], dtype=float), [None, None]),
],
)
def test_to_boolean_array_missing_indicators(a, b):
result = pd.array(a, dtype="boolean")
expected = pd.array(b, dtype="boolean")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
["foo", "bar"],
["1", "2"],
# "foo",
[1, 2],
[1.0, 2.0],
pd.date_range("20130101", periods=2),
np.array(["foo"]),
np.array([1, 2]),
np.array([1.0, 2.0]),
[np.nan, {"a": 1}],
],
)
def test_to_boolean_array_error(values):
# error in converting existing arrays to BooleanArray
msg = "Need to pass bool-like value"
with pytest.raises(TypeError, match=msg):
pd.array(values, dtype="boolean")
def test_to_boolean_array_from_integer_array():
result = pd.array(np.array([1, 0, 1, 0]), dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array(np.array([1, 0, 1, None]), dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_from_float_array():
result = pd.array(np.array([1.0, 0.0, 1.0, 0.0]), dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array(np.array([1.0, 0.0, 1.0, np.nan]), dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_integer_like():
# integers of 0's and 1's
result = pd.array([1, 0, 1, 0], dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array([1, 0, 1, None], dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_coerce_to_array():
# TODO this is currently not public API
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(*coerce_to_array(values, mask=mask))
expected = BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
assert result._data is values
assert result._mask is mask
result = BooleanArray(*coerce_to_array(values, mask=mask, copy=True))
expected = BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
assert result._data is not values
assert result._mask is not mask
# mixed missing from values and mask
values = [True, False, None, False]
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(*coerce_to_array(values, mask=mask))
expected = BooleanArray(
np.array([True, False, True, True]), np.array([False, False, True, True])
)
tm.assert_extension_array_equal(result, expected)
result = BooleanArray(*coerce_to_array(np.array(values, dtype=object), mask=mask))
tm.assert_extension_array_equal(result, expected)
result = BooleanArray(*coerce_to_array(values, mask=mask.tolist()))
tm.assert_extension_array_equal(result, expected)
# raise errors for wrong dimension
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
with pytest.raises(ValueError, match="values must be a 1D list-like"):
coerce_to_array(values.reshape(1, -1))
with pytest.raises(ValueError, match="mask must be a 1D list-like"):
coerce_to_array(values, mask=mask.reshape(1, -1))
def test_coerce_to_array_from_boolean_array():
# passing BooleanArray to coerce_to_array
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
arr = BooleanArray(values, mask)
result = BooleanArray(*coerce_to_array(arr))
tm.assert_extension_array_equal(result, arr)
# no copy
assert result._data is arr._data
assert result._mask is arr._mask
result = BooleanArray(*coerce_to_array(arr), copy=True)
tm.assert_extension_array_equal(result, arr)
assert result._data is not arr._data
assert result._mask is not arr._mask
with pytest.raises(ValueError, match="cannot pass mask for BooleanArray input"):
coerce_to_array(arr, mask=mask)
def test_coerce_to_numpy_array():
# with missing values -> object dtype
arr = pd.array([True, False, None], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# also with no missing values -> object dtype
arr = pd.array([True, False, True], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# force bool dtype
result = np.array(arr, dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
# with missing values will raise error
arr = pd.array([True, False, None], dtype="boolean")
msg = (
"cannot convert to 'bool'-dtype NumPy array with missing values. "
"Specify an appropriate 'na_value' for this dtype."
)
with pytest.raises(ValueError, match=msg):
np.array(arr, dtype="bool")
def test_to_boolean_array_from_strings():
result = BooleanArray._from_sequence_of_strings(
np.array(["True", "False", np.nan], dtype=object)
)
expected = BooleanArray(
np.array([True, False, False]), np.array([False, False, True])
)
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_from_strings_invalid_string():
with pytest.raises(ValueError, match="cannot be cast"):
BooleanArray._from_sequence_of_strings(["donkey"])
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
def test_to_numpy(box):
con = pd.Series if box else pd.array
# default (with or without missing values) -> object dtype
arr = con([True, False, True], dtype="boolean")
result = arr.to_numpy()
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy()
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy(dtype="str")
expected = np.array([True, False, pd.NA], dtype="<U5")
tm.assert_numpy_array_equal(result, expected)
# no missing values -> can convert to bool, otherwise raises
arr = con([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
with pytest.raises(ValueError, match="cannot convert to 'bool'-dtype"):
result = arr.to_numpy(dtype="bool")
# specify dtype and na_value
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy(dtype=object, na_value=None)
expected = np.array([True, False, None], dtype="object")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype=bool, na_value=False)
expected = np.array([True, False, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype="int64", na_value=-99)
expected = np.array([1, 0, -99], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([1, 0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# converting to int or float without specifying na_value raises
with pytest.raises(ValueError, match="cannot convert to 'int64'-dtype"):
arr.to_numpy(dtype="int64")
with pytest.raises(ValueError, match="cannot convert to 'float64'-dtype"):
arr.to_numpy(dtype="float64")
def test_to_numpy_copy():
# to_numpy can be zero-copy if no missing values
arr = pd.array([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype=bool)
result[0] = False
tm.assert_extension_array_equal(
arr, pd.array([False, False, True], dtype="boolean")
)
arr = pd.array([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype=bool, copy=True)
result[0] = False
tm.assert_extension_array_equal(arr, pd.array([True, False, True], dtype="boolean"))
# FIXME: don't leave commented out
# TODO when BooleanArray coerces to object dtype numpy array, need to do conversion
# manually in the indexing code
# def test_indexing_boolean_mask():
# arr = pd.array([1, 2, 3, 4], dtype="Int64")
# mask = pd.array([True, False, True, False], dtype="boolean")
# result = arr[mask]
# expected = pd.array([1, 3], dtype="Int64")
# tm.assert_extension_array_equal(result, expected)
# # missing values -> error
# mask = pd.array([True, False, True, None], dtype="boolean")
# with pytest.raises(IndexError):
# result = arr[mask]
| bsd-3-clause | -719,118,255,221,504,500 | 36.393064 | 88 | 0.655588 | false |
YongseopKim/crosswalk-test-suite | wrt/wrt-packertool2-android-tests/packertool2/projectonlytest.py | 1 | 3008 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Hongjuan, Wang<[email protected]>
import unittest
import os, sys, commands
import comm
class TestPackertoolsFunctions(unittest.TestCase):
def test_projectonly1(self):
comm.setUp()
os.chdir("testapp/example")
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=./manifest.json --project-only" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE)
packstatus = commands.getstatusoutput(cmd)
errormsg = "--project-only must be used with --project-dir"
self.assertNotEqual(packstatus[0] ,0)
self.assertIn(errormsg, packstatus[1])
def test_projectonly2(self):
comm.setUp()
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=./manifest.json --project-only --project-dir=example" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE)
packstatus = commands.getstatusoutput(cmd)
self.assertEqual(packstatus[0] ,0)
resultstatus = commands.getstatusoutput("ls")
self.assertNotIn("Example.apk", resultstatus[1])
self.assertIn("example", resultstatus[1])
if os.path.exists(comm.ConstPath + "/../testapp/example/example"):
try:
shutil.rmtree(comm.ConstPath + "/../testapp/example/example")
except Exception,e:
os.system("rm -rf " + comm.ConstPath + "/../testapp/example/example")
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -4,399,023,799,283,816,400 | 47.516129 | 152 | 0.702793 | false |
suminb/translator | tests/test_translate_api.py | 1 | 4303 | # -*- coding: utf-8 -*-
from translator.api import translate, HTTPException
import pytest
import json
def test_translate_1():
"""Tests translation where source language and target language are
identical."""
actual = translate('This is a test', '1', 'en', 'en')['translated_text']
expected = 'This is a test'
assert actual == expected
def test_translate_2():
"""Tests translation where an empty text is given."""
with pytest.raises(HTTPException):
translate('', '1', 'en', 'ko')
def test_translate_3():
"""Tests translation where an invalid source language is given."""
with pytest.raises(HTTPException):
translate('The cake was a lie', '1', 'unknown', 'ko')
def test_translate_4():
"""Tests translation where an invalid target language is given."""
with pytest.raises(HTTPException):
translate('The cake was a lie', '1', 'en', 'unknown')
def _test_translate_v1_1(testapp):
params = dict(
t=u'도요타는 일본의 자동차 제조사이다.',
m=1, sl='ko', tl='en')
resp = testapp.post('/v1.1/translate', data=params)
assert resp.status_code == 200
t = json.loads(resp.get_data(as_text=True))
assert 'Toyota' in t['translated_text']
assert 'Japan' in t['translated_text']
def _test_translate_6(testapp):
params = dict(
t=u'구글은 세계 정복을 꿈꾸고 있다.',
m=1, sl='ko', tl='en')
req = testapp.post('/v1.2/translate', data=params)
assert req.status_code == 200
t = json.loads(req.get_data(as_text=True))
tt = t['translated_text']
assert ('Google' in tt) or ('We' in tt) or ('I' in tt)
assert 'dream' in tt
assert 'world' in tt
def _test_translate_7(testapp):
params = dict(
t=u'Coca Cola is one of the most widely known brand names.',
m=2, sl='en', tl='ko')
resp = testapp.post('/v1.2/translate', data=params)
assert resp.status_code == 200
data = json.loads(resp.get_data(as_text=True))
tt = data['translated_text']
assert u'코카콜라' in tt
assert u'가장' in tt
assert u'브랜드' in tt
def test_translate_v1_3_1(testapp):
params = {
'text': 'Python can be easy to pick up whether you\'re a first time '
'programmer or you\'re experienced with other languages.',
'source': 'en',
'target': 'es',
}
resp = testapp.post('/api/v1.3/translate', data=params)
assert resp.status_code == 200
resp_data = json.loads(resp.get_data(as_text=True))
assert resp_data['sentences']
sentences = ' '.join([x['trans'] for x in resp_data['sentences']])
assert 'Python' in sentences
assert 'programador' in sentences
assert 'experiencia' in sentences
assert 'otros' in sentences
def test_translate_v1_4_1(testapp):
text = '''
AdSense publishers may not display Google ads on pages with content protected
by copyright law unless they have the necessary legal rights to display that
content. This includes pages that display copyrighted material, pages hosting
copyrighted files, or pages that provide links driving traffic to pages that
contain copyrighted material.
It is our policy to respond to notices of alleged infringement that comply with
the Digital Millennium Copyright Act (DMCA). For AdSense publishers, if we
receive a notice or otherwise have reason to believe that your page is
infringing, we may terminate your participation in the program. You can file a
counter-notification via this form. More information about our DMCA process is
available in this blog post.
If you believe that a page which is participating in the AdSense program is
displaying your copyrighted material without the rights to do so, please report
it using this form or by clicking on the AdChoices icon AdChoices icon.
'''
params = {
'text': text,
'source': 'en',
'target': 'fr',
}
resp = testapp.post('/api/v1.4/translate', data=params)
assert resp.status_code == 200
resp_data = resp.data.decode('utf-8')
assert 'AdSense' in resp_data
assert 'AdChoices' in resp_data
assert 'Digital Millennium Copyright' in resp_data
assert 'diteurs' in resp_data
assert 'politique' in resp_data
assert 'programme' in resp_data
| gpl-3.0 | -1,596,437,705,747,291,600 | 29.207143 | 79 | 0.665878 | false |
alvinwan/tex2py | setup.py | 1 | 1510 | import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
install_requires = ['TexSoup==0.1.4', 'pptree==2.0']
tests_require = ['pytest', 'pytest-cov==2.5.1', 'coverage == 3.7.1', 'coveralls == 1.1']
# hack
install_requires = install_requires + tests_require
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
VERSION = '0.0.5'
setup(
name = "tex2py",
version = VERSION,
author = "Alvin Wan",
author_email = '[email protected]',
description = ("utility converting latex into parse tree in Python"),
license = "BSD",
url = "http://github.com/alvinwan/tex2py",
packages = ['tex2py'],
cmdclass = {'test': PyTest},
tests_require = tests_require,
install_requires = install_requires + tests_require,
download_url = 'https://github.com/alvinwan/tex2py/archive/%s.zip' % VERSION,
classifiers = [
"Topic :: Utilities",
"Topic :: Utilities",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries",
],
)
| bsd-2-clause | -4,880,245,095,279,423,000 | 29.816327 | 88 | 0.635762 | false |
pepetreshere/odoo | addons/website/models/website_visitor.py | 2 | 15826 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
import uuid
import pytz
from odoo import fields, models, api, _
from odoo.addons.base.models.res_partner import _tz_get
from odoo.exceptions import UserError
from odoo.tools.misc import _format_time_ago
from odoo.http import request
from odoo.osv import expression
class WebsiteTrack(models.Model):
_name = 'website.track'
_description = 'Visited Pages'
_order = 'visit_datetime DESC'
_log_access = False
visitor_id = fields.Many2one('website.visitor', ondelete="cascade", index=True, required=True, readonly=True)
page_id = fields.Many2one('website.page', index=True, ondelete='cascade', readonly=True)
url = fields.Text('Url', index=True)
visit_datetime = fields.Datetime('Visit Date', default=fields.Datetime.now, required=True, readonly=True)
class WebsiteVisitor(models.Model):
_name = 'website.visitor'
_description = 'Website Visitor'
_order = 'last_connection_datetime DESC'
name = fields.Char('Name')
access_token = fields.Char(required=True, default=lambda x: uuid.uuid4().hex, index=False, copy=False, groups='base.group_website_publisher')
active = fields.Boolean('Active', default=True)
website_id = fields.Many2one('website', "Website", readonly=True)
partner_id = fields.Many2one('res.partner', string="Linked Partner", help="Partner of the last logged in user.")
partner_image = fields.Binary(related='partner_id.image_1920')
# localisation and info
country_id = fields.Many2one('res.country', 'Country', readonly=True)
country_flag = fields.Char(related="country_id.image_url", string="Country Flag")
lang_id = fields.Many2one('res.lang', string='Language', help="Language from the website when visitor has been created")
timezone = fields.Selection(_tz_get, string='Timezone')
email = fields.Char(string='Email', compute='_compute_email_phone')
mobile = fields.Char(string='Mobile Phone', compute='_compute_email_phone')
# Visit fields
visit_count = fields.Integer('Number of visits', default=1, readonly=True, help="A new visit is considered if last connection was more than 8 hours ago.")
website_track_ids = fields.One2many('website.track', 'visitor_id', string='Visited Pages History', readonly=True)
visitor_page_count = fields.Integer('Page Views', compute="_compute_page_statistics", help="Total number of visits on tracked pages")
page_ids = fields.Many2many('website.page', string="Visited Pages", compute="_compute_page_statistics")
page_count = fields.Integer('# Visited Pages', compute="_compute_page_statistics", help="Total number of tracked page visited")
last_visited_page_id = fields.Many2one('website.page', string="Last Visited Page", compute="_compute_last_visited_page_id")
# Time fields
create_date = fields.Datetime('First connection date', readonly=True)
last_connection_datetime = fields.Datetime('Last Connection', default=fields.Datetime.now, help="Last page view date", readonly=True)
time_since_last_action = fields.Char('Last action', compute="_compute_time_statistics", help='Time since last page view. E.g.: 2 minutes ago')
is_connected = fields.Boolean('Is connected ?', compute='_compute_time_statistics', help='A visitor is considered as connected if his last page view was within the last 5 minutes.')
_sql_constraints = [
('access_token_unique', 'unique(access_token)', 'Access token should be unique.'),
('partner_uniq', 'unique(partner_id)', 'A partner is linked to only one visitor.'),
]
@api.depends('name')
def name_get(self):
return [(
record.id,
(record.name or _('Website Visitor #%s', record.id))
) for record in self]
@api.depends('partner_id.email_normalized', 'partner_id.mobile', 'partner_id.phone')
def _compute_email_phone(self):
results = self.env['res.partner'].search_read(
[('id', 'in', self.partner_id.ids)],
['id', 'email_normalized', 'mobile', 'phone'],
)
mapped_data = {
result['id']: {
'email_normalized': result['email_normalized'],
'mobile': result['mobile'] if result['mobile'] else result['phone']
} for result in results
}
for visitor in self:
visitor.email = mapped_data.get(visitor.partner_id.id, {}).get('email_normalized')
visitor.mobile = mapped_data.get(visitor.partner_id.id, {}).get('mobile')
@api.depends('website_track_ids')
def _compute_page_statistics(self):
results = self.env['website.track'].read_group(
[('visitor_id', 'in', self.ids), ('url', '!=', False)], ['visitor_id', 'page_id', 'url'], ['visitor_id', 'page_id', 'url'], lazy=False)
mapped_data = {}
for result in results:
visitor_info = mapped_data.get(result['visitor_id'][0], {'page_count': 0, 'visitor_page_count': 0, 'page_ids': set()})
visitor_info['visitor_page_count'] += result['__count']
visitor_info['page_count'] += 1
if result['page_id']:
visitor_info['page_ids'].add(result['page_id'][0])
mapped_data[result['visitor_id'][0]] = visitor_info
for visitor in self:
visitor_info = mapped_data.get(visitor.id, {'page_count': 0, 'visitor_page_count': 0, 'page_ids': set()})
visitor.page_ids = [(6, 0, visitor_info['page_ids'])]
visitor.visitor_page_count = visitor_info['visitor_page_count']
visitor.page_count = visitor_info['page_count']
@api.depends('website_track_ids.page_id')
def _compute_last_visited_page_id(self):
results = self.env['website.track'].read_group([('visitor_id', 'in', self.ids)],
['visitor_id', 'page_id', 'visit_datetime:max'],
['visitor_id', 'page_id'], lazy=False)
mapped_data = {result['visitor_id'][0]: result['page_id'][0] for result in results if result['page_id']}
for visitor in self:
visitor.last_visited_page_id = mapped_data.get(visitor.id, False)
@api.depends('last_connection_datetime')
def _compute_time_statistics(self):
for visitor in self:
visitor.time_since_last_action = _format_time_ago(self.env, (datetime.now() - visitor.last_connection_datetime))
visitor.is_connected = (datetime.now() - visitor.last_connection_datetime) < timedelta(minutes=5)
def _check_for_message_composer(self):
""" Purpose of this method is to actualize visitor model prior to contacting
him. Used notably for inheritance purpose, when dealing with leads that
could update the visitor model. """
return bool(self.partner_id and self.partner_id.email)
def _prepare_message_composer_context(self):
return {
'default_model': 'res.partner',
'default_res_id': self.partner_id.id,
'default_partner_ids': [self.partner_id.id],
}
def action_send_mail(self):
self.ensure_one()
if not self._check_for_message_composer():
raise UserError(_("There are no contact and/or no email linked to this visitor."))
visitor_composer_ctx = self._prepare_message_composer_context()
compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)
compose_ctx = dict(
default_use_template=False,
default_composition_mode='comment',
)
compose_ctx.update(**visitor_composer_ctx)
return {
'name': _('Contact Visitor'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form.id, 'form')],
'view_id': compose_form.id,
'target': 'new',
'context': compose_ctx,
}
def _get_visitor_from_request(self, force_create=False):
""" Return the visitor as sudo from the request if there is a visitor_uuid cookie.
It is possible that the partner has changed or has disconnected.
In that case the cookie is still referencing the old visitor and need to be replaced
with the one of the visitor returned !!!. """
# This function can be called in json with mobile app.
# In case of mobile app, no uid is set on the jsonRequest env.
# In case of multi db, _env is None on request, and request.env unbound.
if not request:
return None
Visitor = self.env['website.visitor'].sudo()
visitor = Visitor
access_token = request.httprequest.cookies.get('visitor_uuid')
if access_token:
visitor = Visitor.with_context(active_test=False).search([('access_token', '=', access_token)])
# Prefetch access_token and other fields. Since access_token has a restricted group and we access
# a non restricted field (partner_id) first it is not fetched and will require an additional query to be retrieved.
visitor.access_token
if not self.env.user._is_public():
partner_id = self.env.user.partner_id
if not visitor or visitor.partner_id and visitor.partner_id != partner_id:
# Partner and no cookie or wrong cookie
visitor = Visitor.with_context(active_test=False).search([('partner_id', '=', partner_id.id)])
elif visitor and visitor.partner_id:
# Cookie associated to a Partner
visitor = Visitor
if visitor and not visitor.timezone:
tz = self._get_visitor_timezone()
if tz:
visitor.timezone = tz
if not visitor and force_create:
visitor = self._create_visitor()
return visitor
def _handle_webpage_dispatch(self, response, website_page):
# get visitor. Done here to avoid having to do it multiple times in case of override.
visitor_sudo = self._get_visitor_from_request(force_create=True)
if request.httprequest.cookies.get('visitor_uuid', '') != visitor_sudo.access_token:
expiration_date = datetime.now() + timedelta(days=365)
response.set_cookie('visitor_uuid', visitor_sudo.access_token, expires=expiration_date)
self._handle_website_page_visit(website_page, visitor_sudo)
def _handle_website_page_visit(self, website_page, visitor_sudo):
""" Called on dispatch. This will create a website.visitor if the http request object
is a tracked website page or a tracked view. Only on tracked elements to avoid having
too much operations done on every page or other http requests.
Note: The side effect is that the last_connection_datetime is updated ONLY on tracked elements."""
url = request.httprequest.url
website_track_values = {
'url': url,
'visit_datetime': datetime.now(),
}
if website_page:
website_track_values['page_id'] = website_page.id
domain = [('page_id', '=', website_page.id)]
else:
domain = [('url', '=', url)]
visitor_sudo._add_tracking(domain, website_track_values)
if visitor_sudo.lang_id.id != request.lang.id:
visitor_sudo.write({'lang_id': request.lang.id})
def _add_tracking(self, domain, website_track_values):
""" Add the track and update the visitor"""
domain = expression.AND([domain, [('visitor_id', '=', self.id)]])
last_view = self.env['website.track'].sudo().search(domain, limit=1)
if not last_view or last_view.visit_datetime < datetime.now() - timedelta(minutes=30):
website_track_values['visitor_id'] = self.id
self.env['website.track'].create(website_track_values)
self._update_visitor_last_visit()
def _create_visitor(self):
""" Create a visitor. Tracking is added after the visitor has been created."""
country_code = request.session.get('geoip', {}).get('country_code', False)
country_id = request.env['res.country'].sudo().search([('code', '=', country_code)], limit=1).id if country_code else False
vals = {
'lang_id': request.lang.id,
'country_id': country_id,
'website_id': request.website.id,
}
tz = self._get_visitor_timezone()
if tz:
vals['timezone'] = tz
if not self.env.user._is_public():
vals['partner_id'] = self.env.user.partner_id.id
vals['name'] = self.env.user.partner_id.name
return self.sudo().create(vals)
def _link_to_partner(self, partner, update_values=None):
""" Link visitors to a partner. This method is meant to be overridden in
order to propagate, if necessary, partner information to sub records.
:param partner: partner used to link sub records;
:param update_values: optional values to update visitors to link;
"""
vals = {'name': partner.name}
if update_values:
vals.update(update_values)
self.write(vals)
def _link_to_visitor(self, target, keep_unique=True):
""" Link visitors to target visitors, because they are linked to the
same identity. Purpose is mainly to propagate partner identity to sub
records to ease database update and decide what to do with "duplicated".
THis method is meant to be overridden in order to implement some specific
behavior linked to sub records of duplicate management.
:param target: main visitor, target of link process;
:param keep_unique: if True, find a way to make target unique;
"""
# Link sub records of self to target partner
if target.partner_id:
self._link_to_partner(target.partner_id)
# Link sub records of self to target visitor
self.website_track_ids.write({'visitor_id': target.id})
if keep_unique:
self.unlink()
return target
def _cron_archive_visitors(self):
delay_days = int(self.env['ir.config_parameter'].sudo().get_param('website.visitor.live.days', 30))
deadline = datetime.now() - timedelta(days=delay_days)
visitors_to_archive = self.env['website.visitor'].sudo().search([('last_connection_datetime', '<', deadline)])
visitors_to_archive.write({'active': False})
def _update_visitor_last_visit(self):
""" We need to do this part here to avoid concurrent updates error. """
try:
with self.env.cr.savepoint():
query_lock = "SELECT * FROM website_visitor where id = %s FOR NO KEY UPDATE NOWAIT"
self.env.cr.execute(query_lock, (self.id,), log_exceptions=False)
date_now = datetime.now()
query = "UPDATE website_visitor SET "
if self.last_connection_datetime < (date_now - timedelta(hours=8)):
query += "visit_count = visit_count + 1,"
query += """
active = True,
last_connection_datetime = %s
WHERE id = %s
"""
self.env.cr.execute(query, (date_now, self.id), log_exceptions=False)
except Exception:
pass
def _get_visitor_timezone(self):
tz = request.httprequest.cookies.get('tz') if request else None
if tz in pytz.all_timezones:
return tz
elif not self.env.user._is_public():
return self.env.user.tz
else:
return None
| agpl-3.0 | -45,463,958,355,934,510 | 48.611285 | 185 | 0.623784 | false |
marco-mariotti/selenoprofiles | libraries/networkx/algorithms/flow/mincost.py | 1 | 26221 | # -*- coding: utf-8 -*-
"""
Minimum cost flow algorithms on directed connected graphs.
"""
__author__ = """Loïc Séguin-C. <[email protected]>"""
# Copyright (C) 2010 Loïc Séguin-C. <[email protected]>
# All rights reserved.
# BSD license.
__all__ = ['network_simplex',
'min_cost_flow_cost',
'min_cost_flow',
'cost_of_flow',
'max_flow_min_cost']
import networkx as nx
def _initial_tree_solution(G, r, demand = 'demand', weight = 'weight'):
"""Find a initial tree solution rooted at r.
The initial tree solution is obtained by considering edges (r, v)
for all nodes v with non-negative demand and (v, r) for all nodes
with negative demand. If these edges do not exist, we add them to
the graph and call them artificial edges.
"""
H = nx.DiGraph(G)
T = nx.DiGraph()
y = {r: 0}
artificialEdges = []
flowCost = 0
n = G.number_of_nodes()
try:
maxWeight = max(abs(d[weight]) for u, v, d in G.edges(data = True)
if weight in d)
except ValueError:
maxWeight = 0
hugeWeight = 1 + n * maxWeight
for v, d in G.nodes(data = True)[1:]:
vDemand = d.get(demand, 0)
if vDemand >= 0:
if not (r, v) in G.edges():
H.add_edge(r, v, {weight: hugeWeight, 'flow': vDemand})
artificialEdges.append((r, v))
else: # (r, v) in G.edges()
H[r][v]['flow'] = vDemand
y[v] = H[r][v].get(weight, 0)
T.add_edge(r, v)
flowCost += vDemand * H[r][v].get(weight, 0)
else: # vDemand < 0
if not (v, r) in G.edges():
H.add_edge(v, r, {weight: hugeWeight, 'flow': -vDemand})
artificialEdges.append((v, r))
else:
H[v][r]['flow'] = -vDemand
y[v] = -H[v][r].get(weight, 0)
T.add_edge(v, r)
flowCost += -vDemand * H[v][r].get(weight, 0)
return H, T, y, artificialEdges, flowCost
def _find_entering_edge(H, c, capacity = 'capacity'):
"""Find an edge which creates a negative cost cycle in the actual
tree solution.
The reduced cost of every edge gives the value of the cycle
obtained by adding that edge to the tree solution. If that value is
negative, we will augment the flow in the direction indicated by
the edge. Otherwise, we will augment the flow in the reverse
direction.
If no edge is found, return and empty tuple. This will cause the
main loop of the algorithm to terminate.
"""
newEdge = ()
for u, v, d in H.edges_iter(data = True):
if d.get('flow', 0) == 0:
if c[(u, v)] < 0:
newEdge = (u, v)
break
else:
if capacity in d:
if (d.get('flow', 0) == d[capacity]
and c[(u, v)] > 0):
newEdge = (u, v)
break
return newEdge
def _find_leaving_edge(H, T, cycle, newEdge, capacity = 'capacity'):
"""Find an edge that will leave the basis and the value by which we
can increase or decrease the flow on that edge.
The leaving arc rule is used to prevent cycling.
If cycle has no reverse edge and no forward edge of finite
capacity, it means that cycle is a negative cost infinite capacity
cycle. This implies that the cost of a flow satisfying all demands
is unbounded below. An exception is raised in this case.
"""
eps = False
leavingEdge = ()
# Find the forward edge with the minimum value for capacity - 'flow'
# and the reverse edge with the minimum value for 'flow'.
for index, u in enumerate(cycle[:-1]):
edgeCapacity = False
edge = ()
v = cycle[index + 1]
if (u, v) in T.edges() + [newEdge]: #forward edge
if capacity in H[u][v]: # edge (u, v) has finite capacity
edgeCapacity = H[u][v][capacity] - H[u][v].get('flow', 0)
edge = (u, v)
else: #reverse edge
edgeCapacity = H[v][u].get('flow', 0)
edge = (v, u)
# Determine if edge might be the leaving edge.
if edge:
if leavingEdge:
if edgeCapacity < eps:
eps = edgeCapacity
leavingEdge = edge
else:
eps = edgeCapacity
leavingEdge = edge
if not leavingEdge:
raise nx.NetworkXUnbounded(
"Negative cost cycle of infinite capacity found. "
+ "Min cost flow unbounded below.")
return leavingEdge, eps
def _create_flow_dict(G):
"""Creates the flow dict of dicts of graph G."""
flowDict = {}
for u in G.nodes_iter():
if not u in flowDict:
flowDict[u] = {}
for v in G.neighbors(u):
flowDict[u][v] = G[u][v].get('flow', 0)
return flowDict
def network_simplex(G, demand = 'demand', capacity = 'capacity',
weight = 'weight'):
"""Find a minimum cost flow satisfying all demands in digraph G.
This is a primal network simplex algorithm that uses the leaving
arc rule to prevent cycling.
G is a digraph with edge costs and capacities and in which nodes
have demand, i.e., they want to send or receive some amount of
flow. A negative demand means that the node wants to send flow, a
positive demand means that the node want to receive flow. A flow on
the digraph G satisfies all demand if the net flow into each node
is equal to the demand of that node.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
demand: string
Nodes of the graph G are expected to have an attribute demand
that indicates how much flow a node wants to send (negative
demand) or receive (positive demand). Note that the sum of the
demands should be 0 otherwise the problem in not feasible. If
this attribute is not present, a node is considered to have 0
demand. Default value: 'demand'.
capacity: string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
Returns
-------
flowCost: integer, float
Cost of a minimum cost flow satisfying all demands.
flowDict: dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v).
Raises
------
NetworkXError
This exception is raised if the input graph is not directed or
not connected.
NetworkXUnfeasible
This exception is raised in the following situations:
* The sum of the demands is not zero. Then, there is no
flow satisfying all demands.
* There is no flow satisfying all demand.
NetworkXUnbounded
This exception is raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
satisfying all demands is unbounded below.
See also
--------
cost_of_flow, max_flow_min_cost, min_cost_flow, min_cost_flow_cost
Examples
--------
A simple example of a min cost flow problem.
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_node('a', demand = -5)
>>> G.add_node('d', demand = 5)
>>> G.add_edge('a', 'b', weight = 3, capacity = 4)
>>> G.add_edge('a', 'c', weight = 6, capacity = 10)
>>> G.add_edge('b', 'd', weight = 1, capacity = 9)
>>> G.add_edge('c', 'd', weight = 2, capacity = 5)
>>> flowCost, flowDict = nx.network_simplex(G)
>>> flowCost
24
>>> flowDict
{'a': {'c': 1, 'b': 4}, 'c': {'d': 1}, 'b': {'d': 4}, 'd': {}}
The mincost flow algorithm can also be used to solve shortest path
problems. To find the shortest path between two nodes u and v,
give all edges an infinite capacity, give node u a demand of -1 and
node v a demand a 1. Then run the network simplex. The value of a
min cost flow will be the distance between u and v and edges
carrying positive flow will indicate the path.
>>> G=nx.DiGraph()
>>> G.add_weighted_edges_from([('s','u',10), ('s','x',5),
... ('u','v',1), ('u','x',2),
... ('v','y',1), ('x','u',3),
... ('x','v',5), ('x','y',2),
... ('y','s',7), ('y','v',6)])
>>> G.add_node('s', demand = -1)
>>> G.add_node('v', demand = 1)
>>> flowCost, flowDict = nx.network_simplex(G)
>>> flowCost == nx.shortest_path_length(G, 's', 'v', weighted = True)
True
>>> [(u, v) for u in flowDict for v in flowDict[u] if flowDict[u][v] > 0]
[('x', 'u'), ('s', 'x'), ('u', 'v')]
>>> nx.shortest_path(G, 's', 'v', weighted = True)
['s', 'x', 'u', 'v']
It is possible to change the name of the attributes used for the
algorithm.
>>> G = nx.DiGraph()
>>> G.add_node('p', spam = -4)
>>> G.add_node('q', spam = 2)
>>> G.add_node('a', spam = -2)
>>> G.add_node('d', spam = -1)
>>> G.add_node('t', spam = 2)
>>> G.add_node('w', spam = 3)
>>> G.add_edge('p', 'q', cost = 7, vacancies = 5)
>>> G.add_edge('p', 'a', cost = 1, vacancies = 4)
>>> G.add_edge('q', 'd', cost = 2, vacancies = 3)
>>> G.add_edge('t', 'q', cost = 1, vacancies = 2)
>>> G.add_edge('a', 't', cost = 2, vacancies = 4)
>>> G.add_edge('d', 'w', cost = 3, vacancies = 4)
>>> G.add_edge('t', 'w', cost = 4, vacancies = 1)
>>> flowCost, flowDict = nx.network_simplex(G, demand = 'spam',
... capacity = 'vacancies',
... weight = 'cost')
>>> flowCost
37
>>> flowDict
{'a': {'t': 4}, 'd': {'w': 2}, 'q': {'d': 1}, 'p': {'q': 2, 'a': 2}, 't': {'q': 1, 'w': 1}, 'w': {}}
References
----------
W. J. Cook, W. H. Cunningham, W. R. Pulleyblank and A. Schrijver.
Combinatorial Optimization. Wiley-Interscience, 1998.
"""
if not G.is_directed():
raise nx.NetworkXError("Undirected graph not supported (yet).")
if not nx.is_connected(G.to_undirected()):
raise nx.NetworkXError("Not connected graph not supported (yet).")
if sum(d[demand] for v, d in G.nodes(data = True)
if demand in d) != 0:
raise nx.NetworkXUnfeasible("Sum of the demands should be 0.")
# Fix an arbitrarily chosen root node and find an initial tree solution.
r = G.nodes()[0]
H, T, y, artificialEdges, flowCost = \
_initial_tree_solution(G, r, demand = demand, weight = weight)
# Initialize the reduced costs.
c = {}
for u, v, d in H.edges_iter(data = True):
c[(u, v)] = d.get(weight, 0) + y[u] - y[v]
# Print stuff for debugging.
# print('-' * 78)
# nbIter = 0
# print('Iteration %d' % nbIter)
# nbIter += 1
# print('Tree solution: %s' % T.edges())
# print(' Edge %11s%10s' % ('Flow', 'Red Cost'))
# for u, v, d in H.edges(data = True):
# flag = ''
# if (u, v) in artificialEdges:
# flag = '*'
# print('(%s, %s)%1s%10d%10d' % (u, v, flag, d.get('flow', 0),
# c[(u, v)]))
# print('Distances: %s' % y)
# Main loop.
while True:
newEdge = _find_entering_edge(H, c, capacity = capacity)
if not newEdge:
break # Optimal basis found. Main loop is over.
cycleCost = abs(c[newEdge])
# Find the cycle created by adding newEdge to T.
path1 = nx.shortest_path(T.to_undirected(), r, newEdge[0])
path2 = nx.shortest_path(T.to_undirected(), r, newEdge[1])
join = r
for index, node in enumerate(path1[1:]):
if index + 1 < len(path2) and node == path2[index + 1]:
join = node
else:
break
path1 = path1[path1.index(join):]
path2 = path2[path2.index(join):]
cycle = []
if H[newEdge[0]][newEdge[1]].get('flow', 0) == 0:
path2.reverse()
cycle = path1 + path2
else: # newEdge is at capacity
path1.reverse()
cycle = path2 + path1
# Find the leaving edge. Will stop here if cycle is an infinite
# capacity negative cost cycle.
leavingEdge, eps = _find_leaving_edge(H, T, cycle, newEdge,
capacity = capacity)
# Actual augmentation happens here. If eps = 0, don't bother.
if eps:
flowCost -= cycleCost * eps
for index, u in enumerate(cycle[:-1]):
v = cycle[index + 1]
if (u, v) in T.edges() + [newEdge]:
H[u][v]['flow'] = H[u][v].get('flow', 0) + eps
else: # (v, u) in T.edges():
H[v][u]['flow'] -= eps
# Update tree solution.
T.add_edge(*newEdge)
T.remove_edge(*leavingEdge)
# Update distances and reduced costs.
if newEdge != leavingEdge:
forest = nx.DiGraph(T)
forest.remove_edge(*newEdge)
R, notR = nx.connected_component_subgraphs(forest.to_undirected())
if r in notR.nodes(): # make sure r is in R
R, notR = notR, R
if newEdge[0] in R.nodes():
for v in notR.nodes():
y[v] += c[newEdge]
else:
for v in notR.nodes():
y[v] -= c[newEdge]
for u, v in H.edges():
if u in notR.nodes() or v in notR.nodes():
c[(u, v)] = H[u][v].get(weight, 0) + y[u] - y[v]
# Print stuff for debugging.
# print('-' * 78)
# print('Iteration %d' % nbIter)
# nbIter += 1
# print('Tree solution: %s' % T.edges())
# print('New edge: (%s, %s)' % (newEdge[0], newEdge[1]))
# print('Leaving edge: (%s, %s)' % (leavingEdge[0], leavingEdge[1]))
# print('Cycle: %s' % cycle)
# print(' Edge %11s%10s' % ('Flow', 'Red Cost'))
# for u, v, d in H.edges(data = True):
# flag = ''
# if (u, v) in artificialEdges:
# flag = '*'
# print('(%s, %s)%1s%10d%10d' % (u, v, flag, d.get('flow', 0),
# c[(u, v)]))
# print('Distances: %s' % y)
# If an artificial edge has positive flow, the initial problem was
# not feasible.
for u, v in artificialEdges:
if H[u][v]['flow'] != 0:
raise nx.NetworkXUnfeasible("No flow satisfying all demands.")
H.remove_edge(u, v)
flowDict = _create_flow_dict(H)
return flowCost, flowDict
def min_cost_flow_cost(G, demand = 'demand', capacity = 'capacity',
weight = 'weight'):
"""Find the cost of a minimum cost flow satisfying all demands in digraph G.
G is a digraph with edge costs and capacities and in which nodes
have demand, i.e., they want to send or receive some amount of
flow. A negative demand means that the node wants to send flow, a
positive demand means that the node want to receive flow. A flow on
the digraph G satisfies all demand if the net flow into each node
is equal to the demand of that node.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
demand: string
Nodes of the graph G are expected to have an attribute demand
that indicates how much flow a node wants to send (negative
demand) or receive (positive demand). Note that the sum of the
demands should be 0 otherwise the problem in not feasible. If
this attribute is not present, a node is considered to have 0
demand. Default value: 'demand'.
capacity: string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
Returns
-------
flowCost: integer, float
Cost of a minimum cost flow satisfying all demands.
Raises
------
NetworkXError
This exception is raised if the input graph is not directed or
not connected.
NetworkXUnfeasible
This exception is raised in the following situations:
* The sum of the demands is not zero. Then, there is no
flow satisfying all demands.
* There is no flow satisfying all demand.
NetworkXUnbounded
This exception is raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
satisfying all demands is unbounded below.
See also
--------
cost_of_flow, max_flow_min_cost, min_cost_flow, network_simplex
Examples
--------
A simple example of a min cost flow problem.
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_node('a', demand = -5)
>>> G.add_node('d', demand = 5)
>>> G.add_edge('a', 'b', weight = 3, capacity = 4)
>>> G.add_edge('a', 'c', weight = 6, capacity = 10)
>>> G.add_edge('b', 'd', weight = 1, capacity = 9)
>>> G.add_edge('c', 'd', weight = 2, capacity = 5)
>>> flowCost = nx.min_cost_flow_cost(G)
>>> flowCost
24
"""
return network_simplex(G, demand = demand, capacity = capacity,
weight = weight)[0]
def min_cost_flow(G, demand = 'demand', capacity = 'capacity',
weight = 'weight'):
"""Return a minimum cost flow satisfying all demands in digraph G.
G is a digraph with edge costs and capacities and in which nodes
have demand, i.e., they want to send or receive some amount of
flow. A negative demand means that the node wants to send flow, a
positive demand means that the node want to receive flow. A flow on
the digraph G satisfies all demand if the net flow into each node
is equal to the demand of that node.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
demand: string
Nodes of the graph G are expected to have an attribute demand
that indicates how much flow a node wants to send (negative
demand) or receive (positive demand). Note that the sum of the
demands should be 0 otherwise the problem in not feasible. If
this attribute is not present, a node is considered to have 0
demand. Default value: 'demand'.
capacity: string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
Returns
-------
flowDict: dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v).
Raises
------
NetworkXError
This exception is raised if the input graph is not directed or
not connected.
NetworkXUnfeasible
This exception is raised in the following situations:
* The sum of the demands is not zero. Then, there is no
flow satisfying all demands.
* There is no flow satisfying all demand.
NetworkXUnbounded
This exception is raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
satisfying all demands is unbounded below.
See also
--------
cost_of_flow, max_flow_min_cost, min_cost_flow_cost, network_simplex
Examples
--------
A simple example of a min cost flow problem.
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_node('a', demand = -5)
>>> G.add_node('d', demand = 5)
>>> G.add_edge('a', 'b', weight = 3, capacity = 4)
>>> G.add_edge('a', 'c', weight = 6, capacity = 10)
>>> G.add_edge('b', 'd', weight = 1, capacity = 9)
>>> G.add_edge('c', 'd', weight = 2, capacity = 5)
>>> flowDict = nx.min_cost_flow(G)
>>> flowDict
{'a': {'c': 1, 'b': 4}, 'c': {'d': 1}, 'b': {'d': 4}, 'd': {}}
"""
return network_simplex(G, demand = demand, capacity = capacity,
weight = weight)[1]
def cost_of_flow(G, flowDict, weight = 'weight'):
"""Compute the cost of the flow given by flowDict on graph G.
Note that this function does not check for the validity of the
flow flowDict. This function will fail if the graph G and the
flow don't have the same edge set.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
flowDict: dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v).
Returns
-------
cost: Integer, float
The total cost of the flow. This is given by the sum over all
edges of the product of the edge's flow and the edge's weight.
See also
--------
max_flow_min_cost, min_cost_flow, min_cost_flow_cost, network_simplex
"""
return sum((flowDict[u][v] * d.get(weight, 0)
for u, v, d in G.edges_iter(data = True)))
def max_flow_min_cost(G, s, t, capacity = 'capacity', weight = 'weight'):
"""Return a maximum (s, t)-flow of minimum cost.
G is a digraph with edge costs and capacities. There is a source
node s and a sink node t. This function finds a maximum flow from
s to t whose total cost is minimized.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
s: node label
Source of the flow.
t: node label
Destination of the flow.
capacity: string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
Returns
-------
flowDict: dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v).
Raises
------
NetworkXError
This exception is raised if the input graph is not directed or
not connected.
NetworkXUnbounded
This exception is raised if there is an infinite capacity path
from s to t in G. In this case there is no maximum flow. This
exception is also raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
is unbounded below.
See also
--------
cost_of_flow, ford_fulkerson, min_cost_flow, min_cost_flow_cost,
network_simplex
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_edges_from([(1, 2, {'capacity': 12, 'weight': 4}),
... (1, 3, {'capacity': 20, 'weight': 6}),
... (2, 3, {'capacity': 6, 'weight': -3}),
... (2, 6, {'capacity': 14, 'weight': 1}),
... (3, 4, {'weight': 9}),
... (3, 5, {'capacity': 10, 'weight': 5}),
... (4, 2, {'capacity': 19, 'weight': 13}),
... (4, 5, {'capacity': 4, 'weight': 0}),
... (5, 7, {'capacity': 28, 'weight': 2}),
... (6, 5, {'capacity': 11, 'weight': 1}),
... (6, 7, {'weight': 8}),
... (7, 4, {'capacity': 6, 'weight': 6})])
>>> mincostFlow = nx.max_flow_min_cost(G, 1, 7)
>>> nx.cost_of_flow(G, mincostFlow)
373
>>> maxFlow = nx.ford_fulkerson_flow(G, 1, 7)
>>> nx.cost_of_flow(G, maxFlow)
428
>>> mincostFlowValue = (sum((mincostFlow[u][7] for u in G.predecessors(7)))
... - sum((mincostFlow[7][v] for v in G.successors(7))))
>>> mincostFlowValue == nx.max_flow(G, 1, 7)
True
"""
maxFlow = nx.max_flow(G, s, t, capacity = capacity)
H = nx.DiGraph(G)
H.add_node(s, demand = -maxFlow)
H.add_node(t, demand = maxFlow)
return min_cost_flow(H, capacity = capacity, weight = weight)
| gpl-2.0 | 4,067,730,387,553,061,400 | 35.821629 | 104 | 0.56265 | false |
DavidPowell/OpenModes | openmodes/helpers.py | 1 | 6055 | # -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# OpenModes - An eigenmode solver for open electromagnetic resonantors
# Copyright (C) 2013 David Powell
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-----------------------------------------------------------------------------
import functools
import uuid
import weakref
import numpy as np
import numbers
from collections import defaultdict
import six
def inc_slice(s, inc):
"""Increment a slice so that it starts at the current stop, and the current
stop is incremented by some amount"""
return slice(s.stop, s.stop+inc)
class cached_property(object):
"""
A property that is only computed once per instance and then replaces itself
with an ordinary attribute. Deleting the attribute resets the property.
Taken from https://github.com/pydanny/cached-property/blob/master/cached_property.py
Original source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
Copyright under MIT License
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class MeshError(Exception):
"An exeception indicating a failure generating or reading the mesh"
pass
class Identified(object):
"""An object which can be uniquely identified by an id number. It is
assumed that any object which subclasses Identified is immutable, so that
its id can be used for caching complex results which depend on this object.
"""
def __init__(self):
self.id = uuid.uuid4()
def __hash__(self):
return self.id.__hash__()
def __eq__(self, other):
return hasattr(other, 'id') and (self.id == other.id)
def __repr__(self):
"Represent the object by its id, in addition to its memory address"
return ("<%s at 0x%08x with id %s>" % (str(self.__class__)[8:-2],
id(self),
str(self.id)))
class PicklableRef(object):
"""A weak reference which can be pickled. This is achieved by
creating a strong reference to the object at pickling time, then restoring
the weak reference when unpickling. Note that unless the object being
referenced is also pickled and referenced after unpickling, the weak
reference will be dead after unpickling.
"""
def __init__(self, obj, callback=None):
self.ref = weakref.ref(obj, callback)
def __call__(self):
return self.ref()
def __getstate__(self):
return {'ref': self.ref()}
def __setstate__(self, state):
self.ref = weakref.ref(state['ref'])
def memoize(obj):
"""A simple decorator to memoize function calls. Pays particular attention
to numpy arrays and objects which are subclasses of Identified. It is
assumed that in such cases, the object does not change if its `id` is the
same"""
cache = obj.cache = {}
def get_key(item):
if isinstance(item, (six.string_types, numbers.Number)):
return item
elif isinstance(item, Identified):
return str(item.id)
elif isinstance(item, np.ndarray):
return item.tostring()
else:
return str(item)
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key_arg = tuple(get_key(arg) for arg in args)
key_kwarg = tuple((kw, get_key(arg)) for (kw, arg)
in kwargs.items())
key = (key_arg, key_kwarg)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def equivalence(relations):
"""Determine the equivalence classes between objects
Following numerical recipes section 8.6
Parameters
----------
relations: list
Each element of the list is a tuple containing the identities of two
equivalent items. Each item can be any hashable type
Returns
-------
class_items: list of set
Each set
"""
# first put each item in its own equivalence class
classes = {}
for j, k in relations:
classes[j] = j
classes[k] = k
for relation in relations:
j, k = relation
# track the anscestor of each
while classes[j] != j:
j = classes[j]
while classes[k] != k:
k = classes[k]
# if not already related, then relate items
if j != k:
classes[j] = k
# The final sweep
for j in classes.keys():
while classes[j] != classes[classes[j]]:
classes[j] = classes[classes[j]]
# Now reverse the storage arrangement, so that all items of the same
# class are grouped together into a set
classes_reverse = defaultdict(set)
for item, item_class in classes.items():
classes_reverse[item_class].add(item)
# the class names are arbitrary, so just return the list of sets
return list(classes_reverse.values())
def wrap_if_constant(func):
"""If passed a constant, wrap it in a function. If passed a function, just
return it as is"""
if hasattr(func, '__call__'):
return func
else:
return lambda x: func
| gpl-3.0 | -6,340,580,178,052,489,000 | 30.536458 | 103 | 0.61635 | false |
drewcsillag/skunkweb | pylibs/skunkdoc/scanners/common.py | 1 | 1465 | #
# Copyright (C) 2001 Andrew T. Csillag <[email protected]>
#
# You may distribute under the terms of either the GNU General
# Public License or the SkunkWeb License, as specified in the
# README file.
#
import sys
import string
import ParseSkunkDoc
def doDocString(s):
"""**if <code>s</code> starts with '**', it contains xml markup, so don't
do anything to it (except trim off the '**', otherwise, xml escape it and
return it"""
if s is None:
return ""
# Made ** able to occur after whitespace at start of docstring
s = string.strip(s)
if s[:2] == '**':
s = s[2:]
try:
ParseSkunkDoc.parseString(s)
except:
sys.stderr.write('error parsing XML doc string %s, treating '
'as plaintext\n' % s)
s = '<pre>%s</pre>' % string.replace(plainEscape(s), '&', '&')
else:
s = '<pre>%s</pre>' % string.replace(plainEscape(s), '&', '&')
return '%s' % s
def plainEscape( s ):
'''**xml escape the string <code>s</code>'''
ns = []
for c in s:
if c == '&': ns.append('&')
elif c == '<': ns.append('<')
elif c == '>': ns.append('>')
elif c in ('\n', '\r', '\t'): ns.append(c)
elif c == '"': ns.append('"')
elif ord(c) < 32 or c > 'z': ns.append('&#%d;' % ord(c))
else: ns.append(c)
return string.join(ns, '')
| gpl-2.0 | -243,526,687,297,408,400 | 30.847826 | 78 | 0.526962 | false |
Hossein-Noroozpour/PyHDM | hml/testunits/test_001.py | 1 | 2306 | #!/usr/bin/python3.3
__author__ = 'Hossein Noroozpour Thany Abady'
from hml.core.HDataEncoderM2 import EncM2
from sklearn.preprocessing import scale
from sklearn.preprocessing import normalize
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import KFold
def test001(arg_list):
e = EncM2('/run/media/thany/AE1247021246CF51/Users/Thany/Documents/Lessons' +
'/DataMining/second/DMC_task_2014/orders_train.txt')
print("Encoding done.")
if arg_list[0] == 'std':
preprocess = scale
elif arg_list[0] == 'nrm':
preprocess = normalize
else:
print('Error in command line arguments.')
raise 0
pca = PCA(int(arg_list[1]))
if arg_list[2] == 'decision-tree':
classifier = DecisionTreeClassifier()
elif arg_list[2] == 'SVM':
classifier = SVC()
elif arg_list[2] == 'naive-bayes':
if arg_list[3] == 'GNB':
classifier = GaussianNB()
elif arg_list[3] == 'MNB':
classifier = MultinomialNB()
elif arg_list[3] == 'BNB':
classifier = BernoulliNB()
else:
print('Error in command line arguments.')
raise 0
elif arg_list[2] == 'KNN':
#arg_list[4] := 'uniform' or 'distance'
classifier = KNeighborsClassifier(int(arg_list[3]), arg_list[4])
else:
print('Error in command line arguments.')
raise 0
selection = KFold(e.datlen)
print(arg_list)
for train_index, test_index in selection:
x_train = e.data[train_index]
y_train = e.target[train_index]
x_test = e.data[test_index]
y_test = e.target[test_index]
print("Selection is done.")
x_train = preprocess(x_train)
x_test = preprocess(x_test)
print("Pre-processing done.")
x_train = pca.fit_transform(x_train)
x_test = pca.fit_transform(x_test)
print('PCA done.')
classifier.fit(x_train, y_train)
print('Score for model is :', classifier.score(x_test, y_test)) | mit | -5,941,045,483,948,872,000 | 35.619048 | 81 | 0.634432 | false |
AlertaDengue/AlertaDengue | AlertaDengue/dbf/tests/test_validation.py | 1 | 3682 | from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.files import File
from django.test import TestCase
from datetime import date
import unittest
# local
from dbf.models import DBF
from dbf.validation import is_valid_dbf
import datetime
import os
__all__ = ["DBFValidationTest"]
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data/")
@unittest.skip("reason='Issue #416'")
class DBFValidationTest(TestCase):
fixtures = ['users']
def _get_file_from_filename(self, filename):
with open(os.path.join(TEST_DATA_DIR, filename), "rb") as fp:
dbf = DBF.objects.create(
uploaded_by=User.objects.all()[0],
file=File(fp, name=filename),
export_date=date.today(),
notification_year=date.today().year,
)
return dbf.file
def test_valid_dbf_returns_true(self):
valid_file = self._get_file_from_filename("simple.dbf")
self.assertTrue(is_valid_dbf(valid_file, 2016))
def test_invalid_dbf_raises_ValidationError(self):
"""If the file is not in dbf format, we should raise an error."""
invalid_file = self._get_file_from_filename("invalid.dbf")
with self.assertRaises(ValidationError):
is_valid_dbf(invalid_file, 2016)
def test_dbf_without_an_expected_column_raises_ValidationError(self):
"""
If the file does not have all columns we expect in a SINAN file, we
should raise an error.
"""
missing_column_file = self._get_file_from_filename(
"missing_nu_ano.dbf"
)
with self.assertRaises(ValidationError):
is_valid_dbf(missing_column_file, 2016)
def test_ID_MUNICIP_can_be_called_ID_MN_RESI(self):
"""
The ID_MUNICIP collumn can also be named ID_MN_RESI
"""
mn_resid_file = self._get_file_from_filename("id_mn_resi.dbf")
self.assertTrue(is_valid_dbf(mn_resid_file, 2016))
def test_can_receive_file_with_name_that_does_not_exist(self):
"""
This is a regression test. We had an error because we were testing this
function with data that was already saved to disk. In the upload
process, what happens is more similiar to what is happening in this
test: the instance exists, but was never saved to disk, so calling
`dbf.file.path` would return a path that had no file there (because the
save process was not complete yet).
"""
inexistent_filename = "{}.dbf".format(datetime.datetime.now())
with open(os.path.join(TEST_DATA_DIR, "simple.dbf"), "rb") as fp:
# Instead of using ".objects.create()" we only instantiate the file
# This will trigger the error when calling dbf.clean() on an
# unsaved instance.
dbf = DBF(
uploaded_by=User.objects.all()[0],
file=File(fp, name=inexistent_filename),
export_date=date.today(),
notification_year=date.today().year,
)
self.assertTrue(is_valid_dbf(dbf.file, dbf.notification_year))
def test_dbf_with_wrong_date_datatypes_raises_ValidationError(self):
"""
The notification year validation should be triggered even if some of
the data is poiting to the correct year
"""
wrong_data_type_file = self._get_file_from_filename(
"wrong_date_datatype.dbf"
)
notification_year = 2015
with self.assertRaises(ValidationError):
is_valid_dbf(wrong_data_type_file, notification_year)
| gpl-3.0 | -1,538,261,545,143,088,000 | 35.82 | 79 | 0.630092 | false |
rave-engine/rave | modules/opengl/core3/texture.py | 1 | 3655 | from OpenGL import GL
import numpy
import ctypes
from . import shaders
class Texture:
__slots__ = ('width', 'height', 'data', 'texture')
def __init__(self, width, height, data):
self.width = width
self.height = height
self.data = data
self.texture = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.texture)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA8, self.width, self.height, 0, GL.GL_BGRA, GL.GL_UNSIGNED_INT_8_8_8_8, ctypes.cast(self.data, ctypes.c_void_p))
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP_TO_EDGE)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
def bind(self):
GL.glBindTexture(GL.GL_TEXTURE_2D, self.texture)
def unbind(self):
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
class Image:
FRAGMENT = """
#version 330 core
in vec2 v_texcoord;
out vec4 o_color;
uniform sampler2D u_tex;
void main(void) {
o_color = texture(u_tex, v_texcoord);
}
""".strip()
VERTEX = """
#version 330 core
in vec2 a_vertex;
in vec2 a_texcoord;
out vec2 v_texcoord;
void main(void) {
gl_Position = vec4(a_vertex, 0.0, 1.0);
v_texcoord = a_texcoord;
v_texcoord.y = -v_texcoord.y;
}""".strip()
def __init__(self, tex):
self.vertexes = numpy.array([
1000.0 , -1800.0,
-1000.0 , -1800.0,
1000.0 , 1800.0,
-1000.0 ,1800.0,
-1000.0 , -1800.0,
1000.0 , 1800.0,
], dtype='float32')
self.texcoords = numpy.array([
1280.0 , -720.0,
-1280.0 , -720.0,
1280.0 , 720.0,
-1280.0 , 720.0,
-1280.0 , -720.0,
1280.0 , 720.0,
], dtype='float32')
self.tex = tex
self.program = shaders.ShaderProgram(fragment=self.FRAGMENT, vertex=self.VERTEX)
self.program.compile()
self.vao = GL.glGenVertexArrays(1)
self.vertex_vbo, self.texcoords_vbo = GL.glGenBuffers(2)
self.program.use()
GL.glBindVertexArray(self.vao)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vertex_vbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(self.vertexes) * 2 * 4, self.vertexes, GL.GL_STATIC_DRAW)
GL.glEnableVertexAttribArray(self.program.get_index('a_vertex'))
GL.glVertexAttribPointer(self.program.get_index('a_vertex'), 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.texcoords_vbo)
GL.glBufferData(GL.GL_ARRAY_BUFFER, len(self.texcoords) * 2 * 4, self.texcoords, GL.GL_STATIC_DRAW)
GL.glEnableVertexAttribArray(self.program.get_index('a_texcoord'))
GL.glVertexAttribPointer(self.program.get_index('a_texcoord'), 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
GL.glBindVertexArray(0)
def render(self, target):
self.program.use()
self.tex.bind()
GL.glEnable(GL.GL_BLEND);
GL.glBlendFunc(GL.GL_SRC_ALPHA,GL.GL_ONE_MINUS_SRC_ALPHA);
GL.glUniform1i(self.program.get_index('u_tex'), 0)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.tex.texture)
GL.glBindVertexArray(self.vao)
GL.glDrawArrays(GL.GL_TRIANGLES, 0, 6)
#GL.glBindVertexArray(0)
#GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
| bsd-2-clause | 4,593,637,646,586,049,500 | 33.158879 | 166 | 0.59617 | false |
puolival/multipy | multipy/scripts/analyze_data.py | 1 | 1952 | # -*- encoding: utf-8 -*-
"""Script for analyzing data from the simulated primary and follow-up
experiments."""
# Allow importing modules from parent directory.
import sys
sys.path.append('..')
from fdr import lsu, tst, qvalue
from fwer import bonferroni, sidak, hochberg, holm_bonferroni
from permutation import tfr_permutation_test
import numpy as np
from repeat import fwer_replicability as repl
from util import grid_model_counts as counts
"""Load the simulated datasets."""
fpath = '/home/puolival/multipy_data'
fname_primary = fpath + '/primary.npy'
fname_followup = fname_primary.replace('primary', 'follow-up')
print('Loading simulated datasets ..')
primary_data, followup_data = (np.load(fname_primary),
np.load(fname_followup))
print('Done.')
# Extract p-values
pvals_pri, pvals_fol = (primary_data.flat[0]['pvals'],
followup_data.flat[0]['pvals'])
# Extract raw data for permutation testing
rvs_a_pri, rvs_b_pri = (primary_data.flat[0]['rvs_a'],
primary_data.flat[0]['rvs_b'])
rvs_a_fol, rvs_b_fol = (followup_data.flat[0]['rvs_a'],
followup_data.flat[0]['rvs_b'])
"""Define analysis parameters."""
n_iterations, n_effect_sizes, nl, _ = np.shape(pvals_pri)
emph_primary = 0.1
alpha = 0.05
method = qvalue
sl = 30 # TODO: save to .npy file.
"""Compute reproducibility rates."""
rr = np.zeros([n_iterations, n_effect_sizes])
for ind in np.ndindex(n_iterations, n_effect_sizes):
print('Analysis iteration %3d' % (1+ind[0]))
replicable = repl(pvals_pri[ind].flatten(), pvals_fol[ind].flatten(),
emph_primary, method, alpha)
replicable = np.reshape(replicable, [nl, nl])
rr[ind] = counts(replicable, nl, sl)[0]
"""Save data to disk."""
output_fpath = fpath
output_fname = output_fpath + ('/result-%s.npy' % method.__name__)
np.save(output_fname, {'rr': rr})
print('Results saved to disk.')
| bsd-3-clause | -2,987,823,949,373,830,000 | 30.483871 | 73 | 0.656762 | false |
aESeguridad/GERE | venv/lib/python2.7/site-packages/weasyprint/layout/markers.py | 1 | 2049 | # coding: utf-8
"""
weasyprint.layout.markers
-------------------------
Layout for list markers (for ``display: list-item``).
:copyright: Copyright 2011-2014 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
from .percentages import resolve_percentages
from .replaced import image_marker_layout
from ..text import split_first_line
from ..formatting_structure import boxes
def list_marker_layout(context, box):
"""Lay out the list markers of ``box``."""
# List markers can be either 'inside' or 'outside'.
# Inside markers are layed out just like normal inline content, but
# outside markers need specific layout.
# TODO: implement outside markers in terms of absolute positioning,
# see CSS3 lists.
marker = getattr(box, 'outside_list_marker', None)
if marker:
resolve_percentages(marker, containing_block=box)
if isinstance(marker, boxes.TextBox):
(marker.pango_layout, _, _, marker.width, marker.height,
marker.baseline) = split_first_line(
marker.text, marker.style, context.enable_hinting,
max_width=None, line_width=None)
else:
# Image marker
image_marker_layout(marker)
# Align the top of the marker box with the top of its list-item’s
# content-box.
# TODO: align the baselines of the first lines instead?
marker.position_y = box.content_box_y()
# ... and its right with the left of its list-item’s padding box.
# (Swap left and right for right-to-left text.)
marker.position_x = box.border_box_x()
half_em = 0.5 * box.style.font_size
direction = box.style.direction
if direction == 'ltr':
marker.margin_right = half_em
marker.position_x -= marker.margin_width()
else:
marker.margin_left = half_em
marker.position_x += box.border_width()
| gpl-3.0 | 6,302,328,897,577,311,000 | 36.181818 | 78 | 0.627384 | false |
jjhelmus/berryconda | tools/find_outdated_packages_pypi.py | 1 | 3975 | #! /usr/bin/env python
""" Find conda packages which are out of date compared to PyPI. """
import argparse
import json
import xmlrpc.client as xmlrpclib
import conda.api as api
try:
from packaging.version import parse as parse_version
except ImportError:
from pip._vendor.packaging.version import parse as parse_version
def find_latest_pypi_version(client, package_name):
"""
Return the latest non-prerelease from PyPI.
None is returned if there are no releases or the package does not exist on
PyPI.
"""
all_releases = client.package_releases(package_name, True)
versions = [parse_version(s) for s in all_releases]
filtered = [v for v in versions if not v.is_prerelease]
if len(filtered) == 0:
return None
return max(filtered)
def find_latest_conda_version(index, package_name):
""" Return the latest version of a package from a conda channel index. """
valid = [v for v in index.values() if v['name'] == package_name]
versions = [parse_version(v['version']) for v in valid]
return max(versions)
def parse_arguments():
""" Parse command line arguments. """
parser = argparse.ArgumentParser(
description="Find conda packages which are out of date with PyPI")
parser.add_argument(
'packages', nargs='*',
help=('Name of packages to check, leave blank to check all packages '
'on the channel'))
parser.add_argument(
'--skip', '-s', action='store', help=(
'file containing list of packages to skip when checking against '
'PyPI'))
parser.add_argument(
'--verb', '-v', action='store_true', help='verbose output')
parser.add_argument(
'--channel', '-c', action='store', default='rpi',
help='Conda channel to check. Default is rpi')
parser.add_argument(
'--json', action='store', help='Save outdated packages to json file.')
return parser.parse_args()
def find_outdated_packages(index, package_names, verbose):
""" Return a list of out-of-date packages. """
client = xmlrpclib.ServerProxy('https://pypi.python.org/pypi')
outdated_packages = []
for package_name in sorted(package_names):
pypi_latest_version = find_latest_pypi_version(client, package_name)
conda_latest_version = find_latest_conda_version(index, package_name)
if pypi_latest_version is None:
if verbose:
print(package_name, "not found on PyPI or does",
"not have any non-prerelease versions")
continue
if pypi_latest_version > conda_latest_version:
print(package_name, "appears out of date", pypi_latest_version,
'vs', conda_latest_version)
pkg = {'name': package_name,
'pypi_version': pypi_latest_version.base_version,
'conda_version': conda_latest_version.base_version}
outdated_packages.append(pkg)
elif verbose:
print(package_name, "appears up to date")
return outdated_packages
def main():
args = parse_arguments()
# determine package names to check
index = api.get_index(
channel_urls=[args.channel], prepend=False, use_cache=False)
package_names = set(args.packages)
if len(package_names) == 0: # no package names given on command line
package_names = {v['name'] for k, v in index.items()}
# remove skipped packages
if args.skip is not None:
with open(args.skip) as f:
pkgs_to_skip = [line.strip() for line in f]
package_names = [p for p in package_names if p not in pkgs_to_skip]
outdated_packages = find_outdated_packages(index, package_names, args.verb)
# save outdated_packages to json formatted file is specified
if args.json is not None:
with open(args.json, 'w') as f:
json.dump(outdated_packages, f)
if __name__ == "__main__":
main()
| bsd-3-clause | -6,458,492,364,811,764,000 | 33.868421 | 79 | 0.636226 | false |
am0d/bit | bit/utils.py | 1 | 1479 | # Utility functions for various actions
import os
import sys
import time
import shutil
import hashlib
from bit.instance import bit
from bit.cprint import error, warning
def hash(file_name):
try:
with open(file_name, 'rb') as hashable:
algo = hashlib.new(bit.options.hash_type)
algo.update(hashable.read())
return algo.hexdigest()
except IOError:
error('Could not hash: {0}'.format(file_name))
def is_exe(filepath):
return os.path.exists(filepath) and os.access(filepath, os.X_OK)
def which(program_name):
if sys.platform == 'win32':
program_name = '{0}.exe'.format(program_name)
filepath = os.path.split(program_name)[0]
if filepath:
if is_exe(program_name):
return program_name
else:
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, program_name)
if is_exe(exe_file):
return exe_file
raise Exception('Could not find {0} on the system path'.format(program_name))
def flatten(list_name, containers=(list, tuple)):
if isinstance(list_name, containers):
if len(list_name) < 1:
return []
else:
return reduce(lambda x, y : x + y, map(flatten, list_name))
else:
return [list_name]
def fix_strings(file_list):
if sys.platform == 'win32':
file_list = [item.replace('\\', '/') for item in file_list]
return file_list
| bsd-3-clause | 7,232,347,731,415,690,000 | 28.58 | 82 | 0.615957 | false |
Schibum/naclports | lib/naclports/pkg_info.py | 1 | 3150 | # Copyright 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import shlex
import string
from naclports.error import PkgFormatError
VALID_KEYS = ['NAME', 'VERSION', 'URL', 'ARCHIVE_ROOT', 'LICENSE', 'DEPENDS',
'MIN_SDK_VERSION', 'LIBC', 'DISABLED_LIBC', 'ARCH', 'CONFLICTS',
'DISABLED_ARCH', 'URL_FILENAME', 'BUILD_OS', 'SHA1', 'DISABLED',
'DISABLED_TOOLCHAIN']
REQUIRED_KEYS = ['NAME', 'VERSION']
def ParsePkgInfo(contents, filename, valid_keys=None, required_keys=None):
"""Parse a string contains the contents of a pkg_info file.
Args:
contents: pkg_info contents as a string.
filename: name of file to use in error messages.
valid_keys: list of keys that are valid in the file.
required_keys: list of keys that are required in the file.
Returns:
A dictionary of the key, value pairs contained in the pkg_info file.
Raises:
PkgFormatError: if file is malformed, contains invalid keys, or does not
contain all required keys.
"""
rtn = {}
if valid_keys is None:
valid_keys = VALID_KEYS
if required_keys is None:
required_keys = REQUIRED_KEYS
def ParsePkgInfoLine(line, line_no):
if '=' not in line:
raise PkgFormatError('Invalid info line %s:%d' % (filename, line_no))
key, value = line.split('=', 1)
key = key.strip()
if key not in valid_keys:
raise PkgFormatError("Invalid key '%s' in info file %s:%d" % (key,
filename,
line_no))
value = value.strip()
if value[0] == '(':
if value[-1] != ')':
raise PkgFormatError('Error parsing %s:%d: %s (%s)' % (filename,
line_no,
key,
value))
value = value[1:-1].split()
else:
value = shlex.split(value)[0]
return (key, value)
def ExpandVars(value, substitutions):
if type(value) == str:
return string.Template(value).substitute(substitutions)
else:
return [string.Template(v).substitute(substitutions) for v in value]
for i, line in enumerate(contents.splitlines()):
if not line or line[0] == '#':
continue
key, raw_value = ParsePkgInfoLine(line, i+1)
if key in rtn:
raise PkgFormatError('Error parsing %s:%d: duplicate key (%s)' %
(filename, i+1, key))
rtn[key] = ExpandVars(raw_value, rtn)
for required_key in required_keys:
if required_key not in rtn:
raise PkgFormatError("Required key '%s' missing from info file: '%s'" %
(required_key, filename))
return rtn
def ParsePkgInfoFile(filename, valid_keys=None, required_keys=None):
"""Parse pkg_info from a file on disk."""
with open(filename) as f:
return ParsePkgInfo(f.read(), filename, valid_keys, required_keys)
| bsd-3-clause | -8,466,547,247,953,835,000 | 35.627907 | 78 | 0.578413 | false |
Azure/azure-sdk-for-python | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2017_10_01/operations/_operations.py | 1 | 4791 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2017_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OperationListResult"]
"""Lists all of the available Azure Container Registry REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerregistry.v2017_10_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.ContainerRegistry/operations'} # type: ignore
| mit | 7,840,496,075,989,891,000 | 42.954128 | 133 | 0.643498 | false |
romain-fontugne/RTTanalysis | dpgmm.py | 1 | 8500 | import numpy as np
import glob
import dpcluster as dpc
import pandas as pd
import os
import sys
try:
import matplotlib.pylab as plt
import matplotlib as mpl
except Exception, e:
sys.stderr.write("Matplotlib is not available!")
def loadData(filename, format="rttEstimate"):
"""Load a csv file in memory.
:returns: pandas DataFrame with the file data
"""
if format=="rttEstimate":
df = pd.read_csv(filename, sep=",", header=None, names=["ip", "peer", "rtt", "dstMac"])
elif format=="thomas":
# the filename is a directory containing several RTT measurements
# ..../ipSrc/ipDst/flowID/hour
data = []
for fi in glob.glob(filename):
tmp = pd.read_csv(fi, sep="\t", comment="s", header=None,
names=["rtt", "start_sec", "start_msec", "end_sec", "end_msec"],
usecols=["rtt","start_sec"])
val = fi.split("/")
tmp["ip"] = "{0}->{1}".format(val[-4], val[-3])
data.append(tmp)
df = pd.concat(data)
# The ip addresses become the index
df = df.set_index("ip")
return df
def clusterRTToverTime(rttEstimates, timeBin="60", outputDirectory="./rttDistributions/",
minEstimates=10, plot=True, logNormal=True):
"""For each IP address, find the different RTT distributions for each time
bin and plot the average value of each distribution.
"""
# for each IP in the traffic
ips = rttEstimates.index.unique()
for ip in ips:
start = rttEstimates[rttEstimates.index == ip].start_sec.min()
end = rttEstimates[rttEstimates.index == ip].start_sec.max()
dataIP = rttEstimates[rttEstimates.index == ip]
x = []
y = []
z = []
i = 0
for ts in range(start,end,timeBin):
if logNormal:
data = np.log10(dataIP[(dataIP.start_sec>=ts) & (dataIP.start_sec<ts+timeBin)].rtt)
else:
data = dataIP[(dataIP.start_sec>=ts) & (dataIP.start_sec<ts+timeBin)].rtt
# Look only at flows containing a certain number of RTT estimates
if len(data) < minEstimates:
sys.stderr("Ignoring data!! not enough samples!")
continue
# Cluster the data
vdp = dpgmm(data)
if vdp is None:
continue
params = NIWparam2Nparam(vdp)
if logNormal:
mean, std = logNormalMeanStdDev(params[0, :], params[1, :])
else:
mean = params[0, :]
std = params[1, :]
for mu, sig in zip(mean, std):
y.append(mu)
z.append(sig)
x.append(ts)
# Plot the clusters characteristics in a file
plt.figure()
plt.errorbar(x,y,yerr=z,fmt="o")
plt.grid(True)
if logNormal:
plt.savefig("{0}/{1}_timeBin{2}sec_logNormal.eps".format(outputDirectory, ip, timeBin))
else:
plt.savefig("{0}/{1}_timeBin{2}sec_normal.eps".format(outputDirectory, ip, timeBin))
def clusterRttPerIP(rttEstimates, outputDirectory="./rttDistributions/", minEstimates=10, plot=True, logNormal=False):
"""For each IP address, find the different RTT distributions and write
their mean and standard deviation in files.
"""
# for each IP in the traffic
ips = rttEstimates.index.unique()
for ip in ips:
if logNormal:
data = np.log10(rttEstimates[rttEstimates.index == ip].rtt)
else:
data = rttEstimates[rttEstimates.index == ip].rtt
# Look only at flows containing a certain number of RTT estimates
if len(data) < minEstimates:
continue
# Cluster the data
vdp = dpgmm(data)
if vdp is None:
continue
# Write the clusters characteristics in a file
fi = open("{0}/{1}.csv".format(outputDirectory, ip), "w")
params = NIWparam2Nparam(vdp)
if logNormal:
mean, std = logNormalMeanStdDev(params[0, :], params[1, :])
else:
mean = params[0, :]
std = params[1, :]
for mu, sig in zip(mean, std):
fi.write("{0},{1}\n".format(mu, sig))
if plot:
plotRttDistribution(rttEstimates, ip, "{0}/{1}.eps".format(outputDirectory, ip))
def NIWparam2Nparam(vdp, minClusterIPRatio=0.05):
"""
Convert Gaussian Normal-Inverse-Wishart parameters to the usual Gaussian
parameters (i.e. mean, standard deviation)
:vdp: Variational Dirichlet Process obtained from dpgmm
:minClusterIPRatio: Ignore distributions standing for a ratio of IPs lower
than minClusterIPRatio
"""
nbIPs = float(np.sum(vdp.cluster_sizes()))
mus, Sgs, k, nu = vdp.distr.prior.nat2usual(vdp.cluster_parameters()[
vdp.cluster_sizes() > (minClusterIPRatio * nbIPs), :])[0]
Sgs = Sgs / (k + 1 + 1)[:, np.newaxis, np.newaxis]
res = np.zeros( (len(mus), 2) )
for i, (mu, Sg) in enumerate(zip(mus, Sgs)):
w, V = np.linalg.eig(Sg)
V = np.array(np.matrix(V) * np.matrix(np.diag(np.sqrt(w))))
V = V[0]
res[i] = (mu[0], V[0])
return res
def logNormalMeanStdDev(loc, scale):
"""Compute the mean and standard deviation from the location and scale
parameter of a lognormal distribution.
:loc: location parameter of a lognormal distribution
:scale: scale parameter of a lognmormal distribution
:return: (mean,stdDev) the mean and standard deviation of the distribution
"""
mu = 10 ** (loc + ((scale ** 2) / 2.0))
var = (10 ** (scale ** 2) -1) * 10 ** (2 * loc + scale ** 2)
return mu, np.sqrt(var)
def dpgmm(data, priorWeight=0.1, maxClusters=32, thresh=1e-3, maxIter=10000):
"""
Compute the Variational Inference for Dirichlet Process Mixtures
on the given data.
:data: 1D array containing the data to cluster
:priorWeight: likelihood-prior distribution pair governing clusters.
:maxClusters: Maximum number of clusters
:
"""
data = np.array(data).reshape(-1, 1)
vdp = dpc.VDP(dpc.distributions.GaussianNIW(1), w=priorWeight, k=maxClusters, tol=thresh, max_iters=maxIter)
stats = vdp.distr.sufficient_stats(data)
vdp.batch_learn(stats)
return vdp
def plotRttDistribution(rttEstimates, ip, filename, nbBins=500, logscale=False):
"""Plot the RTT distribution of an IP address
:rttEstimates: pandas DataFrame containing the RTT estimations
:ip: IP address to plot
:filename: Filename for the plot
:nbBins: Number of bins in the histogram
:logscale: Plot RTTs in logscale if set to True
:returns: None
"""
if logscale:
data = np.log10(rttEstimates[rttEstimates.index == ip].rtt)
else:
data = rttEstimates[rttEstimates.index == ip].rtt
h, b=np.histogram(data, nbBins, normed=True)
plt.figure(1, figsize=(9, 3))
plt.clf()
ax = plt.subplot()
x = b[:-1]
ax.plot(x, h, "k")
ax.grid(True)
plt.title("%s (%s RTTs)" % (ip, len(data)))
if logscale:
plt.xlabel("log10(RTT)")
else:
plt.xlabel("RTT")
plt.ylabel("pdf")
minorLocator = mpl.ticker.MultipleLocator(10)
ax.xaxis.set_minor_locator(minorLocator)
plt.tight_layout()
plt.savefig(filename)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("usage: python {0} rtt.csv [outputDirectory]".format(sys.argv[0]))
filename = sys.argv[1]
if len(sys.argv) > 2:
outputDirectory = sys.argv[2]
# Create the output directory if it doesn't exist
if not os.path.exists(outputDirectory):
os.mkdir(outputDirectory)
if filename.endswith(".csv"):
# Get RTT data from given file
rtt = loadData(filename, format="rttEstimate")
# Sample RTT estimates: samplingRate=0.1 means that 10% of the
# estimates will be used
samplingRate = 0.1
if samplingRate:
rtt = rtt.sample(frac=samplingRate)
# Find RTT distributions for each IP address
clusterRttPerIP(rtt, outputDirectory, logNormal=False)
else:
# Get RTT data from given file
rtt = loadData(filename, format="thomas")
# Find RTT distributions over time
clusterRTToverTime(rtt, 600, outputDirectory, logNormal=False)
#clusterRttPerIP(rtt, outputDirectory)
| gpl-2.0 | -7,810,559,939,886,170,000 | 31.319392 | 118 | 0.601765 | false |
WuShichao/computational-physics | 2/figure_2_2/figure_2_2.py | 1 | 1143 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 10 15:52:36 2016
P23 figure2.2的源程序
@author: nightwing
"""
import matplotlib.pyplot as plt
DENSITY = 1.29 #空气密度(kg/m3)
C = 1.0 #阻力系数
A = 0.33 #截面积(m2)
M = 70.0 #人车质量(kg)
v1 = 4.0 #(无阻力)速度(m/s)
v2 = 4.0 #(有阻力)速度(m/s)
P = 400.0 #功率(w)
t = 0 #初始时间
t_max = 200 #截止时间(s)
dt = 0.1 #时间间隔
time = [] #此列表存储时间
velocity1 = [] #此列表存储无空气阻力时的速度
velocity2 = [] #此列表存储有空气阻力时的速度
#---欧拉法计算自行车运动速度---
while t <= t_max:
velocity1.append(v1)
velocity2.append(v2)
time.append(t)
v1 += P/(M*v1)*dt
v2 += P/(M*v2)*dt-C*DENSITY*A*v2**2/(2*M)*dt
t += dt
#------------绘图---------------
plt.title("Bicycling simulation: velocity vs. time")
plt.xlabel("time (s)")
plt.ylabel("velocity (m/s)")
plt.plot(time,velocity1,"k-",label="No air resistence")
plt.plot(time,velocity2,"k--",label="With air resistence")
plt.legend(loc=2)
plt.show() | gpl-3.0 | -5,247,181,144,916,056,000 | 22.8 | 59 | 0.560463 | false |
owen-chen/wireless-testing-platform | wtp/DeviceUtils.py | 1 | 3223 | # -*- coding: utf-8 -*-
'''
Created on May 21, 2015
@author: chenchen
'''
import os
from CommonLib import callCommand
class DeviceUtils:
processlock = '/sdcard/processlock.pid'
""" 根据手机序列号获取手机产品型号 """
@staticmethod
def getProductBySerial(serial):
return callCommand("adb -s %s shell getprop ro.product.model" % serial)[0].strip()
""" 获取手机分辨率 """
@staticmethod
def getResolutionBySerial(serial):
resolution_cmd = 'adb -s %s shell dumpsys display | grep DisplayDeviceInfo' % serial
rlt = callCommand(resolution_cmd)[0].strip()
return rlt[rlt.find(':') + 1:rlt.find('}')].split(',')[0].strip()
""" 获取手机安卓版本信息 """
@staticmethod
def getEditionBySerial(serial):
return callCommand('adb -s %s shell getprop ro.build.version.release' % serial)[0].strip()
""" 获取手机内存信息,返回内存大小和可用内存大小 """
@staticmethod
def getMemoryParameterBySerial(serial):
memory_result = callCommand('adb -s %s shell df | grep data' % serial)[0].strip().split()
return memory_result[1], memory_result[3]
""" 判断手机是否插入sim卡,主要根据imsi号进行判断 """
@staticmethod
def getSimStateBySerial(serial):
service_state = callCommand('adb -s %s shell dumpsys telephony.registry | grep mServiceState' % serial)[0].strip().split()[0].split('=')[1]
return int(service_state) == 1;
""" 将手机中的文件保存至电脑中 """
@staticmethod
def pullFileFromDevice(serial, source, target):
callCommand('adb -s %s pull %s %s' % (serial, source, target))
""" 将源文件拷贝至指定手机上的目标路径下 """
@staticmethod
def pushFileToTargetPath(serial, source, target):
callCommand('adb -s %s push %s %s' % (serial, source, target))
""" 创建文件到指定手机上的目标路径下 """
@staticmethod
def lockDevice(serial):
callCommand('adb -s %s shell touch %s' % (serial, DeviceUtils.processlock))
""" 在指定手机上的目标路径下删除文件 """
@staticmethod
def unlockDevice(serial):
callCommand('adb -s %s shell rm %s' % (serial, DeviceUtils.processlock))
""" 判断指定手机上的目标路径的指定文件是否存在 """
@staticmethod
def isDeviceLocked(serial):
processlock = DeviceUtils.processlock
return callCommand('adb -s %s shell ls %s | grep %s' % (serial, processlock[0:processlock.rindex('/') + 1], processlock[processlock.rindex('/') + 1:]))
""" 将本地文件夹传入手机中对应的文件夹,且按照本地文件夹的结构传入新文件夹 """
@staticmethod
def pushFolderToDevice(serial, source, target):
file_list = os.listdir(source)
for sub_file in file_list:
local_file = os.path.join(source, sub_file)
if os.path.isfile(local_file):
DeviceUtils.pushFileToTargetPath(serial, local_file, target + '/' + sub_file)
else:
DeviceUtils.pushFolderToDevice(serial, local_file, target + '/' + sub_file) | gpl-2.0 | -2,518,703,167,617,819,000 | 34.949367 | 159 | 0.63156 | false |
antoinedube/django-spine-news-display | Server/RadioCanada/infrastructure.py | 1 | 1266 | from django.utils import timezone
from urllib.request import urlopen
import xml.etree.ElementTree as et
import json
from RadioCanada.models import NewsItem
class Downloader:
def __init__(self):
pass
def fetch(self):
f = urlopen('http://rss.radio-canada.ca/fils/nouvelles/nouvelles.xml')
page_content = f.read().decode("utf-8")
return self.parse_xml(page_content)
def parse_xml(self,xml_string):
tree = et.fromstring(xml_string)
elements = []
for child in tree.iter('item'):
fetch_time = timezone.now()
title = child.find('title').text
link = child.find('link').text
description = child.find('description').text
image_link = child.find('enclosure').attrib['url']
news_item = dict({
'fetch_time': fetch_time,
'title': title,
'link': link,
'image_link': image_link,
'description': description
})
new_element = NewsItem(**news_item)
elements.append(new_element)
try:
new_element.save()
except IntegrityError as e:
print(e)
return elements
| gpl-3.0 | 1,697,606,006,735,934,000 | 26.521739 | 78 | 0.548973 | false |
weltliteratur/vossanto | theof/graph.py | 1 | 3086 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Reads source-modifier pairs and outputs a graphviz .dot file.
#
# Usage:
#
# Author: rja
#
# Changes:
# 2018-08-17 (rja)
# - initial version
import fileinput
import math
import random
from collections import Counter
def filter_graph(sources, modifiers, edges):
filtered_sources = dict()
filtered_modifiers = dict()
filtered_edges = dict()
for source, modifier in edges:
# filter edges
if sources[source] > 14 and modifiers[modifier] > 0:
filtered_edges[(source, modifier)] = edges[(source, modifier)]
filtered_sources[source] = sources[source]
filtered_modifiers[modifier] = modifiers[modifier]
return filtered_sources, filtered_modifiers, filtered_edges
def build_graph(input):
sources = Counter()
modifiers = Counter()
edges = Counter()
for line in input:
parts = line.strip().split('\t')
if len(parts) == 2:
source, modifier = parts
# count
sources[source] += 1
modifiers[modifier] += 1
# add edge
edges[(source, modifier)] += 1
return sources, modifiers, edges
def escape_label(s):
return s.replace('"', '\\"')
# see https://stackoverflow.com/questions/28999287/generate-random-colors-rgb/28999469
def random_color():
levels = range(128,256,16)
return "#" + "".join(["{:02x}".format(random.choice(levels)) for _ in range(3)])
def print_graph(sources, modifiers, edges):
print("digraph D {")
# config
print(' graph [outputorder="edgesfirst",overlap=false,sep="+10,10"];')
print(' node [fontname="Arial",style=filled];')
# vertices
vertices = dict()
vid = 0
colormap = dict()
for source in sources:
vid += 1
# store vertex
vertices["s_" + source] = vid
# store color
color = random_color()
colormap[vid] = color
# attributes
weight = sources[source]
fs = max(weight, 10)
print(vid, '[label="' + escape_label(source) + '",width=' + str(math.log(weight) + 1) + ',fontsize=' + str(fs) + ',color="' + color + '"];')
for modifier in modifiers:
vid += 1
vertices["m_" + modifier] = vid
weight = modifiers[modifier]
fs = max(weight, 10)
print(vid, '[label="' + escape_label(modifier) + '", color="yellow", width=' + str(math.log(weight) + 1) + ',fontsize=' + str(fs) + '];')
# edges
for source, modifier in edges:
sid = vertices["s_" + source]
mid = vertices["m_" + modifier]
weight = edges[(source, modifier)]
print(sid, "->", mid, '[weight=' + str(weight) + ',penwidth=' + str(weight + 1) + ',color="' + colormap[sid] + '"];')
print("}")
if __name__ == '__main__':
# build graph
sources, modifiers, edges = build_graph(fileinput.input())
# filter graph
sources, modifiers, edges = filter_graph(sources, modifiers, edges)
# print graph
print_graph(sources, modifiers, edges)
| gpl-3.0 | 2,003,152,647,947,842,300 | 29.254902 | 148 | 0.579391 | false |
dmitriy0611/django | django/db/migrations/executor.py | 1 | 10302 | from __future__ import unicode_literals
from django.apps.registry import apps as global_apps
from django.db import migrations
from .loader import MigrationLoader
from .recorder import MigrationRecorder
from .state import ProjectState
class MigrationExecutor(object):
"""
End-to-end migration execution - loads migrations, and runs them
up or down to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets, clean_start=False):
"""
Given a set of targets, returns a list of (Migration instance, backwards?).
"""
plan = []
if clean_start:
applied = set()
else:
applied = set(self.loader.applied_migrations)
for target in targets:
# If the target is (app_label, None), that means unmigrate everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
# Don't migrate backwards all the way to the target node (that
# may roll back dependencies in other apps that don't need to
# be rolled back); instead roll back through target's immediate
# child(ren) in the same app, and no further.
next_in_app = sorted(
n for n in
self.loader.graph.node_map[target].children
if n[0] == target[0]
)
for node in next_in_app:
for migration in self.loader.graph.backwards_plan(node):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def migrate(self, targets, plan=None, fake=False, fake_initial=False):
"""
Migrates the database up to the given targets.
Django first needs to create all project states before a migration is
(un)applied and in a second step run all the database operations.
"""
if plan is None:
plan = self.migration_plan(targets)
migrations_to_run = {m[0] for m in plan}
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)
# Holds all states right before a migration is applied
# if the migration is being run.
states = {}
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
if self.progress_callback:
self.progress_callback("render_start")
# Phase 1 -- Store all project states of migrations right before they
# are applied. The first migration that will be applied in phase 2 will
# trigger the rendering of the initial project state. From this time on
# models will be recursively reloaded as explained in
# `django.db.migrations.state.get_related_models_recursive()`.
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration whose state was already computed
# from the set below (`migrations_to_run.remove(migration)`).
# If no states for migrations must be computed, we can exit
# this loop. Migrations that occur after the latest migration
# that is about to be applied would only trigger unneeded
# mutate_state() calls.
break
do_run = migration in migrations_to_run
if do_run:
if 'apps' not in state.__dict__:
state.apps # Render all real_apps -- performance critical
states[migration] = state.clone()
migrations_to_run.remove(migration)
# Only preserve the state if the migration is being run later
state = migration.mutate_state(state, preserve=do_run)
if self.progress_callback:
self.progress_callback("render_success")
# Phase 2 -- Run the migrations
for migration, backwards in plan:
if not backwards:
self.apply_migration(states[migration], migration, fake=fake, fake_initial=fake_initial)
else:
self.unapply_migration(states[migration], migration, fake=fake)
self.check_replacements()
def collect_sql(self, plan):
"""
Takes a migration plan and returns a list of collected SQL
statements that represent the best-efforts version of that plan.
"""
statements = []
state = None
for migration, backwards in plan:
with self.connection.schema_editor(collect_sql=True) as schema_editor:
if state is None:
state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
if not backwards:
state = migration.apply(state, schema_editor, collect_sql=True)
else:
state = migration.unapply(state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
def apply_migration(self, state, migration, fake=False, fake_initial=False):
"""
Runs a migration forwards.
"""
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
if fake_initial:
# Test to see if this is an already-applied initial migration
applied, state = self.detect_soft_applied(state, migration)
if applied:
fake = True
if not fake:
# Alright, do it normally
with self.connection.schema_editor() as schema_editor:
state = migration.apply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
return state
def unapply_migration(self, state, migration, fake=False):
"""
Runs a migration backwards.
"""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor() as schema_editor:
state = migration.unapply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
else:
self.recorder.record_unapplied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
return state
def check_replacements(self):
"""
Mark replacement migrations applied if their replaced set all are.
"""
applied = self.recorder.applied_migrations()
for key, migration in self.loader.replacements.items():
all_applied = all(m in applied for m in migration.replaces)
if all_applied and key not in applied:
self.recorder.record_applied(*key)
def detect_soft_applied(self, project_state, migration):
"""
Tests whether a migration has been implicitly applied - that the
tables it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel).
"""
# Bail if the migration isn't the first one in its app
if [name for app, name in migration.dependencies if app == migration.app_label]:
return False, project_state
if project_state is None:
after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True)
else:
after_state = migration.mutate_state(project_state)
apps = after_state.apps
found_create_migration = False
# Make sure all create model are done
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if model._meta.db_table not in self.connection.introspection.table_names(self.connection.cursor()):
return False, project_state
found_create_migration = True
# If we get this far and we found at least one CreateModel migration,
# the migration is considered implicitly applied.
return found_create_migration, after_state
| bsd-3-clause | 917,353,421,160,323,000 | 46.256881 | 115 | 0.599981 | false |
psychopy/versions | psychopy/core.py | 1 | 5931 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Basic functions, including timing, rush (imported), quit
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from __future__ import absolute_import, division, print_function
from builtins import object
import sys
import threading
import subprocess
import shlex
import locale
# some things are imported just to be accessible within core's namespace
from psychopy.clock import (MonotonicClock, Clock, CountdownTimer,
wait, monotonicClock, getAbsTime,
StaticPeriod) # pylint: disable=W0611
# always safe to call rush, even if its not going to do anything for a
# particular OS
from psychopy.platform_specific import rush # pylint: disable=W0611
from psychopy import logging
from psychopy.constants import STARTED, NOT_STARTED, FINISHED, PY3
try:
import pyglet
havePyglet = True
# may not want to check, to preserve terminal window focus
checkPygletDuringWait = True
except ImportError:
havePyglet = False
checkPygletDuringWait = False
try:
import glfw
haveGLFW = True
except ImportError:
haveGLFW = False
runningThreads = [] # just for backwards compatibility?
openWindows = [] # visual.Window updates this, event.py and clock.py use it
# Set getTime in core to == the monotonicClock instance created in the
# clockModule.
# The logging module sets the defaultClock to == clock.monotonicClock,
# so by default the core.getTime() and logging.defaultClock.getTime()
# functions return the 'same' timebase.
#
# This way 'all' OSs have a core.getTime() timebase that starts at 0.0 when
# the experiment is launched, instead of it being this way on Windows only
# (which was also a descripancy between OS's when win32 was using time.clock).
def getTime(applyZero = True):
"""Get the current time since psychopy.core was loaded.
Version Notes: Note that prior to PsychoPy 1.77.00 the behaviour of
getTime() was platform dependent (on OSX and linux it was equivalent to
:func:`psychopy.core.getAbsTime`
whereas on windows it returned time since loading of the module, as now)
"""
return monotonicClock.getTime(applyZero)
def quit():
"""Close everything and exit nicely (ending the experiment)
"""
# pygame.quit() # safe even if pygame was never initialised
logging.flush()
for thisThread in threading.enumerate():
if hasattr(thisThread, 'stop') and hasattr(thisThread, 'running'):
# this is one of our event threads - kill it and wait for success
thisThread.stop()
while thisThread.running == 0:
pass # wait until it has properly finished polling
sys.exit(0) # quits the python session entirely
def shellCall(shellCmd, stdin='', stderr=False, env=None, encoding=None):
"""Call a single system command with arguments, return its stdout.
Returns stdout, stderr if stderr is True.
Handles simple pipes, passing stdin to shellCmd (pipes are untested
on windows) can accept string or list as the first argument
Parameters
----------
shellCmd : str, or iterable
The command to execute, and its respective arguments.
stdin : str, or None
Input to pass to the command.
stderr : bool
Whether to return the standard error output once execution is finished.
env : dict
The environment variables to set during execution.
encoding : str
The encoding to use for communication with the executed command.
This argument will be ignored on Python 2.7.
Notes
-----
We use ``subprocess.Popen`` to execute the command and establish
`stdin` and `stdout` pipes.
Python 2.7 always opens the pipes in text mode; however,
Python 3 defaults to binary mode, unless an encoding is specified.
To unify pipe communication across Python 2 and 3, we now provide an
`encoding` parameter, enforcing `utf-8` text mode by default.
This parameter is present from Python 3.6 onwards; using an older
Python 3 version will raise an exception. The parameter will be ignored
when running Python 2.7.
"""
if encoding is None:
encoding = locale.getpreferredencoding()
if type(shellCmd) == str:
# safely split into cmd+list-of-args, no pipes here
shellCmdList = shlex.split(shellCmd)
elif type(shellCmd) == bytes:
# safely split into cmd+list-of-args, no pipes here
shellCmdList = shlex.split(shellCmd.decode('utf-8'))
elif type(shellCmd) in (list, tuple): # handles whitespace in filenames
shellCmdList = shellCmd
else:
msg = 'shellCmd requires a string or iterable.'
raise TypeError(msg)
cmdObjects = []
for obj in shellCmdList:
if type(obj) != bytes:
cmdObjects.append(obj)
else:
cmdObjects.append(obj.decode('utf-8'))
# Since Python 3.6, we can use the `encoding` parameter.
if PY3:
if sys.version_info.minor >= 6:
proc = subprocess.Popen(cmdObjects, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding=encoding, env=env)
else:
msg = 'shellCall() requires Python 2.7, or 3.6 and newer.'
raise RuntimeError(msg)
else:
proc = subprocess.Popen(cmdObjects, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
stdoutData, stderrData = proc.communicate(stdin)
del proc
if stderr:
return stdoutData.strip(), stderrData.strip()
else:
return stdoutData.strip()
| gpl-3.0 | 8,450,671,426,704,044,000 | 34.303571 | 79 | 0.669364 | false |
HITGmbH/py-convergent-encryption | tests/test_crypto.py | 1 | 8807 | # Copyright (c) 2011, HIT Information-Control GmbH
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# * Neither the name of the HIT Information-Control GmbH nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL HIT Information-Control GmbH BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import tempfile
import shutil
import os
import hashlib
import logging
import unittest
from convergent import crypto
log = logging.getLogger("convergent.test_hashes")
# some generic strings
STRINGS = ("\x00", "\x01", "\xFF", "\x00"*15,
"\x00\xFF"*20, "test"*1024, "\xFF"*23)
class SHA256dHashTestCase(unittest.TestCase):
def setUp(self):
self.hex = "f5a1f608f4cd6abaf52e716739a68bc83b0e91872c1f70916e59756ea122f047"
def test_sha256d_wo_initial_data(self):
h = crypto.SHA256d()
h.update("test test 123")
h.update("test test 345")
self.assertEqual(h.hexdigest(), self.hex)
def test_sha256d_with_initial_data(self):
h = crypto.SHA256d("test test 123")
h.update("test test 345")
self.assertEqual(h.hexdigest(), self.hex)
def test_sha256d_cache(self):
h = crypto.SHA256d("test test 123test test 345")
void = h.digest() #@UnusedVariable
self.assertEqual(h.hexdigest(), self.hex)
class CounterTestCase(unittest.TestCase):
def test_Counter(self):
c = crypto.Counter()
for x in range(300):
c()
self.assertEqual(c(), "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01,")
class AESTestCase(unittest.TestCase):
#
# NIST TESTVEKTOREN AES256CTR (SP800-38A)
#
key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe\x2b\x73\xae\xf0\x85\x7d\x77\x81\x1f\x35\x2c\x07\x3b\x61\x08\xd7\x2d\x98\x10\xa3\x09\x14\xdf\xf4"
c = (0xf0f1f2f3, 0xf4f5f6f7, 0xf8f9fafb, 0xfcfdfeff)
plain = ("\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17\xad\x2b\x41\x7b\xe6\x6c\x37\x10")
cipher = ("`\x1e\xc3\x13wW\x89\xa5\xb7\xa7\xf5\x04\xbb\xf3\xd2(\xf4C\xe3"
"\xcaMb\xb5\x9a\xca\x84\xe9\x90\xca\xca\xf5\xc5+\t0\xda\xa2="
"\xe9L\xe8p\x17\xba-\x84\x98\x8d\xdf\xc9\xc5\x8d\xb6z\xad\xa6"
"\x13\xc2\xdd\x08EyA\xa6")
def test_1_nist_crypto(self):
""" 1.: testing PyCrypto, using the NIST test vectors."""
# test using Crypto.Cipher.AES
counter = crypto.Counter(*self.c)
from Crypto.Cipher import AES as aes_cc
crypto.AES = aes_cc
self.assertEquals(self.cipher, crypto.aes(self.key, self.plain,
counter=counter))
#
# we can't test using pycryptopp.cipher.aes
# since the counter of PyCryptoPP can't be set
#
def test_2_self_cryptopp(self):
""" 2.: testing CryptoPP using PyCrypto..."""
# generate a cipher string using nist plaintext BUT counter start at "\x00"*16
# Crypto.Cipher.AES shoud work okay, since tested with NIST test vectors
from Crypto.Cipher import AES as aes_cc
crypto.AES = aes_cc
counter = crypto.Counter()
cipher0 = crypto.aes(self.key, self.plain, counter=counter)
# testing PyCryptoPP, not as well as Crypto.Cipher.AES
# but well... better that not at all I guess
from pycryptopp.cipher.aes import AES as aes_pp #@UnresolvedImport
crypto.AES = aes_pp
self.assertEquals(cipher0, crypto.aes(self.key, self.plain))
def test_3_padding_and_compatibility(self):
""" 3.: en- and decryption using CryptoPP and PyCrypto with """
strings = STRINGS
from pycryptopp.cipher.aes import AES as aes_pp #@UnresolvedImport
from Crypto.Cipher import AES as aes_cc
for s in strings:
# testing encryption
#
crypto.AES = aes_cc # monkey-patch Crypto.Cipher.AES
counter = crypto.Counter()
cipher0 = crypto.aes(self.key, s, counter=counter)
# using pycryptopp.cipher.aes
crypto.AES = aes_pp # monkey-patch pycryptopp.cipher.aes.AES
cipher1 = crypto.aes(self.key, s)
self.assertEquals(cipher0, cipher1)
#
# testing decryption
plain1 = crypto.aes(self.key, cipher0) # still using pycryptopp.cipher.aes.AES
crypto.AES = aes_cc # monkey-patch Crypto.Cipher.AES
counter = crypto.Counter()
plain0 = crypto.aes(self.key, cipher0, counter=counter)
self.assertEquals(plain0, plain1)
class ConvergentEncryptionTestBase(crypto.ConvergentEncryption):
def test_set_convergence_secret(self):
c1 = self.encrypt("test123")
self.set_convergence_secret("B"*5)
c2 = self.encrypt("test123")
self.assertNotEqual(c1, c2)
def test_encrypt_decrypt(self):
for data in self.strings:
skey, pkey, crypted = self.encrypt(data)
self.assertNotEquals(data, crypted)
plain = self.decrypt(skey, crypted, verify=True)
self.assertEqual(data, plain)
def test_encrypt_error(self):
for data in self.strangelings:
self.assertRaises(AssertionError, self.encrypt, data)
def test_encrypt_decrypt_with_convergence(self):
for plaintext in self.strings:
self.set_convergence_secret("B"*5)
skey, pkey, crypted = self.encrypt(plaintext)
self.assertNotEquals(plaintext, crypted)
decrypted = self.decrypt(skey, crypted, verify=True)
self.assertEqual(plaintext, decrypted)
def test_convergence(self):
without_convergence = [] # (key, cyphertext), ...
for plaintext in self.strings:
skey, pkey, crypted = self.encrypt(plaintext)
without_convergence.append((skey, crypted))
self.set_convergence_secret("B"*5)
with_convergence = [] # (key, cyphertext), ...
for plaintext in self.strings:
skey, pkey, crypted = self.encrypt(plaintext)
with_convergence.append((skey, crypted))
for w, wo in zip(with_convergence, without_convergence):
self.assertTrue(w[0] != wo[0]) # key must not be equal
self.assertTrue(w[1] != wo[1]) # cyphertext must not be equal
def test_process_key(self):
cyphertext = crypto.encrypt_key("test123", "nounce", "my_sec.key")
plaintext = crypto.encrypt_key("test123", "nounce", cyphertext)
self.assertEqual(plaintext, "my_sec.key")
from pycryptopp.cipher.aes import AES as aes_pp
crypto.AES = aes_pp
class ConvergentEncryptionPyCryptoPPTestCase(unittest.TestCase, ConvergentEncryptionTestBase):
"""ConvergentEncryption TestCase using pycryptopp."""
def setUp(self):
self.strings = STRINGS
self.strangelings = (1, 0x12, False)
from Crypto.Cipher import AES as aes_cc
crypto.AES = aes_cc
class ConvergentEncryptionPyCryptoTestCase(unittest.TestCase, ConvergentEncryptionTestBase):
"""ConvergentEncryption TestCase using PyCrypto"""
def setUp(self):
self.strings = STRINGS
self.strangelings = (1, 0x12, False)
| bsd-3-clause | -2,827,714,815,581,517,000 | 39.773148 | 140 | 0.647212 | false |
ianblenke/awsebcli | ebcli/bundled/botocore/utils.py | 1 | 18227 | # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import datetime
import hashlib
import math
import binascii
from six import string_types, text_type
import dateutil.parser
from dateutil.tz import tzlocal, tzutc
from botocore.exceptions import InvalidExpressionError, ConfigNotFound
from botocore.compat import json, quote, zip_longest
from botocore.vendored import requests
from botocore.compat import OrderedDict
logger = logging.getLogger(__name__)
DEFAULT_METADATA_SERVICE_TIMEOUT = 1
METADATA_SECURITY_CREDENTIALS_URL = (
'http://169.254.169.254/latest/meta-data/iam/security-credentials/'
)
# These are chars that do not need to be urlencoded.
# Based on rfc2986, section 2.3
SAFE_CHARS = '-._~'
class _RetriesExceededError(Exception):
"""Internal exception used when the number of retries are exceeded."""
pass
def normalize_url_path(path):
if not path:
return '/'
return remove_dot_segments(path)
def remove_dot_segments(url):
# RFC 2986, section 5.2.4 "Remove Dot Segments"
output = []
while url:
if url.startswith('../'):
url = url[3:]
elif url.startswith('./'):
url = url[2:]
elif url.startswith('/./'):
url = '/' + url[3:]
elif url.startswith('/../'):
url = '/' + url[4:]
if output:
output.pop()
elif url.startswith('/..'):
url = '/' + url[3:]
if output:
output.pop()
elif url.startswith('/.'):
url = '/' + url[2:]
elif url == '.' or url == '..':
url = ''
elif url.startswith('//'):
# As far as I can tell, this is not in the RFC,
# but AWS auth services require consecutive
# slashes are removed.
url = url[1:]
else:
if url[0] == '/':
next_slash = url.find('/', 1)
else:
next_slash = url.find('/', 0)
if next_slash == -1:
output.append(url)
url = ''
else:
output.append(url[:next_slash])
url = url[next_slash:]
return ''.join(output)
def validate_jmespath_for_set(expression):
# Validates a limited jmespath expression to determine if we can set a value
# based on it. Only works with dotted paths.
if not expression or expression == '.':
raise InvalidExpressionError(expression=expression)
for invalid in ['[', ']', '*']:
if invalid in expression:
raise InvalidExpressionError(expression=expression)
def set_value_from_jmespath(source, expression, value, is_first=True):
# This takes a (limited) jmespath-like expression & can set a value based
# on it.
# Limitations:
# * Only handles dotted lookups
# * No offsets/wildcards/slices/etc.
if is_first:
validate_jmespath_for_set(expression)
bits = expression.split('.', 1)
current_key, remainder = bits[0], bits[1] if len(bits) > 1 else ''
if not current_key:
raise InvalidExpressionError(expression=expression)
if remainder:
if not current_key in source:
# We've got something in the expression that's not present in the
# source (new key). If there's any more bits, we'll set the key with
# an empty dictionary.
source[current_key] = {}
return set_value_from_jmespath(
source[current_key],
remainder,
value,
is_first=False
)
# If we're down to a single key, set it.
source[current_key] = value
class InstanceMetadataFetcher(object):
def __init__(self, timeout=DEFAULT_METADATA_SERVICE_TIMEOUT,
num_attempts=1, url=METADATA_SECURITY_CREDENTIALS_URL):
self._timeout = timeout
self._num_attempts = num_attempts
self._url = url
def _get_request(self, url, timeout, num_attempts=1):
for i in range(num_attempts):
try:
response = requests.get(url, timeout=timeout)
except (requests.Timeout, requests.ConnectionError) as e:
logger.debug("Caught exception while trying to retrieve "
"credentials: %s", e, exc_info=True)
else:
if response.status_code == 200:
return response
raise _RetriesExceededError()
def retrieve_iam_role_credentials(self):
data = {}
url = self._url
timeout = self._timeout
num_attempts = self._num_attempts
try:
r = self._get_request(url, timeout, num_attempts)
if r.content:
fields = r.content.decode('utf-8').split('\n')
for field in fields:
if field.endswith('/'):
data[field[0:-1]] = self.retrieve_iam_role_credentials(
url + field, timeout, num_attempts)
else:
val = self._get_request(
url + field,
timeout=timeout,
num_attempts=num_attempts).content.decode('utf-8')
if val[0] == '{':
val = json.loads(val)
data[field] = val
else:
logger.debug("Metadata service returned non 200 status code "
"of %s for url: %s, content body: %s",
r.status_code, url, r.content)
except _RetriesExceededError:
logger.debug("Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
num_attempts)
# We sort for stable ordering. In practice, this should only consist
# of one role, but may need revisiting if this expands in the future.
final_data = {}
for role_name in sorted(data):
final_data = {
'role_name': role_name,
'access_key': data[role_name]['AccessKeyId'],
'secret_key': data[role_name]['SecretAccessKey'],
'token': data[role_name]['Token'],
'expiry_time': data[role_name]['Expiration'],
}
return final_data
def merge_dicts(dict1, dict2):
"""Given two dict, merge the second dict into the first.
The dicts can have arbitrary nesting.
"""
for key in dict2:
if isinstance(dict2[key], dict):
if key in dict1 and key in dict2:
merge_dicts(dict1[key], dict2[key])
else:
dict1[key] = dict2[key]
else:
# At scalar types, we iterate and merge the
# current dict that we're on.
dict1[key] = dict2[key]
def parse_key_val_file(filename, _open=open):
try:
with _open(filename) as f:
contents = f.read()
return parse_key_val_file_contents(contents)
except OSError as e:
raise ConfigNotFound(path=filename)
def parse_key_val_file_contents(contents):
# This was originally extracted from the EC2 credential provider, which was
# fairly lenient in its parsing. We only try to parse key/val pairs if
# there's a '=' in the line.
final = {}
for line in contents.splitlines():
if '=' not in line:
continue
key, val = line.split('=', 1)
key = key.strip()
val = val.strip()
final[key] = val
return final
def percent_encode_sequence(mapping, safe=SAFE_CHARS):
"""Urlencode a dict or list into a string.
This is similar to urllib.urlencode except that:
* It uses quote, and not quote_plus
* It has a default list of safe chars that don't need
to be encoded, which matches what AWS services expect.
This function should be preferred over the stdlib
``urlencode()`` function.
:param mapping: Either a dict to urlencode or a list of
``(key, value)`` pairs.
"""
encoded_pairs = []
if hasattr(mapping, 'items'):
pairs = mapping.items()
else:
pairs = mapping
for key, value in pairs:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(value)))
return '&'.join(encoded_pairs)
def percent_encode(input_str, safe=SAFE_CHARS):
"""Urlencodes a string.
Whereas percent_encode_sequence handles taking a dict/sequence and
producing a percent encoded string, this function deals only with
taking a string (not a dict/sequence) and percent encoding it.
"""
if not isinstance(input_str, string_types):
input_str = text_type(input_str)
return quote(text_type(input_str).encode('utf-8'), safe=safe)
def parse_timestamp(value):
"""Parse a timestamp into a datetime object.
Supported formats:
* iso8601
* rfc822
* epoch (value is an integer)
This will return a ``datetime.datetime`` object.
"""
if isinstance(value, (int, float)):
# Possibly an epoch time.
return datetime.datetime.fromtimestamp(value, tzlocal())
else:
try:
return datetime.datetime.fromtimestamp(float(value), tzlocal())
except (TypeError, ValueError):
pass
try:
return dateutil.parser.parse(value)
except (TypeError, ValueError) as e:
raise ValueError('Invalid timestamp "%s": %s' % (value, e))
def parse_to_aware_datetime(value):
"""Converted the passed in value to a datetime object with tzinfo.
This function can be used to normalize all timestamp inputs. This
function accepts a number of different types of inputs, but
will always return a datetime.datetime object with time zone
information.
The input param ``value`` can be one of several types:
* A datetime object (both naive and aware)
* An integer representing the epoch time (can also be a string
of the integer, i.e '0', instead of 0). The epoch time is
considered to be UTC.
* An iso8601 formatted timestamp. This does not need to be
a complete timestamp, it can contain just the date portion
without the time component.
The returned value will be a datetime object that will have tzinfo.
If no timezone info was provided in the input value, then UTC is
assumed, not local time.
"""
# This is a general purpose method that handles several cases of
# converting the provided value to a string timestamp suitable to be
# serialized to an http request. It can handle:
# 1) A datetime.datetime object.
if isinstance(value, datetime.datetime):
datetime_obj = value
else:
# 2) A string object that's formatted as a timestamp.
# We document this as being an iso8601 timestamp, although
# parse_timestamp is a bit more flexible.
datetime_obj = parse_timestamp(value)
if datetime_obj.tzinfo is None:
# I think a case would be made that if no time zone is provided,
# we should use the local time. However, to restore backwards
# compat, the previous behavior was to assume UTC, which is
# what we're going to do here.
datetime_obj = datetime_obj.replace(tzinfo=tzutc())
else:
datetime_obj = datetime_obj.astimezone(tzutc())
return datetime_obj
def calculate_sha256(body, as_hex=False):
"""Calculate a sha256 checksum.
This method will calculate the sha256 checksum of a file like
object. Note that this method will iterate through the entire
file contents. The caller is responsible for ensuring the proper
starting position of the file and ``seek()``'ing the file back
to its starting location if other consumers need to read from
the file like object.
:param body: Any file like object. The file must be opened
in binary mode such that a ``.read()`` call returns bytes.
:param as_hex: If True, then the hex digest is returned.
If False, then the digest (as binary bytes) is returned.
:returns: The sha256 checksum
"""
checksum = hashlib.sha256()
for chunk in iter(lambda: body.read(1024 * 1024), b''):
checksum.update(chunk)
if as_hex:
return checksum.hexdigest()
else:
return checksum.digest()
def calculate_tree_hash(body):
"""Calculate a tree hash checksum.
For more information see:
http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html
:param body: Any file like object. This has the same constraints as
the ``body`` param in calculate_sha256
:rtype: str
:returns: The hex version of the calculated tree hash
"""
chunks = []
required_chunk_size = 1024 * 1024
sha256 = hashlib.sha256
for chunk in iter(lambda: body.read(required_chunk_size), b''):
chunks.append(sha256(chunk).digest())
if not chunks:
return sha256(b'').hexdigest()
while len(chunks) > 1:
new_chunks = []
for first, second in _in_pairs(chunks):
if second is not None:
new_chunks.append(sha256(first + second).digest())
else:
# We're at the end of the list and there's no pair left.
new_chunks.append(first)
chunks = new_chunks
return binascii.hexlify(chunks[0]).decode('ascii')
def _in_pairs(iterable):
# Creates iterator that iterates over the list in pairs:
# for a, b in _in_pairs([0, 1, 2, 3, 4]):
# print(a, b)
#
# will print:
# 0, 1
# 2, 3
# 4, None
shared_iter = iter(iterable)
# Note that zip_longest is a compat import that uses
# the itertools izip_longest. This creates an iterator,
# this call below does _not_ immediately create the list
# of pairs.
return zip_longest(shared_iter, shared_iter)
class CachedProperty(object):
"""A read only property that caches the initially computed value.
This descriptor will only call the provided ``fget`` function once.
Subsequent access to this property will return the cached value.
"""
def __init__(self, fget):
self._fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
else:
computed_value = self._fget(obj)
obj.__dict__[self._fget.__name__] = computed_value
return computed_value
class ArgumentGenerator(object):
"""Generate sample input based on a shape model.
This class contains a ``generate_skeleton`` method that will take
an input shape (created from ``botocore.model``) and generate
a sample dictionary corresponding to the input shape.
The specific values used are place holder values. For strings an
empty string is used, for numbers 0 or 0.0 is used. The intended
usage of this class is to generate the *shape* of the input structure.
This can be useful for operations that have complex input shapes.
This allows a user to just fill in the necessary data instead of
worrying about the specific structure of the input arguments.
Example usage::
s = botocore.session.get_session()
ddb = s.get_service_model('dynamodb')
arg_gen = ArgumentGenerator()
sample_input = arg_gen.generate_skeleton(
ddb.operation_model('CreateTable').input_shape)
print("Sample input for dynamodb.CreateTable: %s" % sample_input)
"""
def __init__(self):
pass
def generate_skeleton(self, shape):
"""Generate a sample input.
:type shape: ``botocore.model.Shape``
:param shape: The input shape.
:return: The generated skeleton input corresponding to the
provided input shape.
"""
stack = []
return self._generate_skeleton(shape, stack)
def _generate_skeleton(self, shape, stack):
stack.append(shape.name)
try:
if shape.type_name == 'structure':
return self._generate_type_structure(shape, stack)
elif shape.type_name == 'list':
return self._generate_type_list(shape, stack)
elif shape.type_name == 'map':
return self._generate_type_map(shape, stack)
elif shape.type_name == 'string':
return ''
elif shape.type_name in ['integer', 'long']:
return 0
elif shape.type_name == 'float':
return 0.0
elif shape.type_name == 'boolean':
return True
finally:
stack.pop()
def _generate_type_structure(self, shape, stack):
if stack.count(shape.name) > 1:
return {}
skeleton = OrderedDict()
for member_name, member_shape in shape.members.items():
skeleton[member_name] = self._generate_skeleton(member_shape,
stack)
return skeleton
def _generate_type_list(self, shape, stack):
# For list elements we've arbitrarily decided to
# return two elements for the skeleton list.
return [
self._generate_skeleton(shape.member, stack),
]
def _generate_type_map(self, shape, stack):
key_shape = shape.key
value_shape = shape.value
assert key_shape.type_name == 'string'
return OrderedDict([
('KeyName', self._generate_skeleton(value_shape, stack)),
])
| apache-2.0 | 4,503,787,993,518,301,700 | 33.390566 | 82 | 0.600208 | false |
jepler/linuxcnc-mirror | tests/tool-info/random-no-startup-tool/test-ui.py | 1 | 8177 | #!/usr/bin/env python
import linuxcnc
import hal
import math
import time
import sys
import subprocess
import os
import signal
import glob
import re
def wait_for_linuxcnc_startup(status, timeout=10.0):
"""Poll the Status buffer waiting for it to look initialized,
rather than just allocated (all-zero). Returns on success, throws
RuntimeError on failure."""
start_time = time.time()
while time.time() - start_time < timeout:
status.poll()
if (status.angular_units == 0.0) \
or (status.axes == 0) \
or (status.axis_mask == 0) \
or (status.cycle_time == 0.0) \
or (status.exec_state != linuxcnc.EXEC_DONE) \
or (status.interp_state != linuxcnc.INTERP_IDLE) \
or (status.inpos == False) \
or (status.linear_units == 0.0) \
or (status.max_acceleration == 0.0) \
or (status.max_velocity == 0.0) \
or (status.program_units == 0.0) \
or (status.rapidrate == 0.0) \
or (status.state != linuxcnc.STATE_ESTOP) \
or (status.task_state != linuxcnc.STATE_ESTOP):
time.sleep(0.1)
else:
# looks good
return
# timeout, throw an exception
raise RuntimeError
def verify_interp_vars(state, current_tool, current_pocket, selected_tool, selected_pocket):
c.mdi('(debug,current_tool=#<_current_tool> current_pocket=#<_current_pocket> selected_tool=#<_selected_tool> selected_pocket=#<_selected_pocket>)')
c.wait_complete()
expected = "current_tool=%.6f current_pocket=%.6f selected_tool=%.6f selected_pocket=%.6f" % (current_tool, current_pocket, selected_tool, selected_pocket)
while True:
result = e.poll()
if result == None:
print "nothing from polling error channel"
sys.exit(1)
(type, msg) = result
if type == linuxcnc.OPERATOR_DISPLAY:
if msg == expected:
# success!
break
print "state='%s', unexpected interp variables" % state
print "result:", msg
print "expected:", expected
sys.exit(1)
else:
print "state='%s', ignoring unexpected error type %d: %s" % (state, type, msg)
print "state='%s', got expected interp variables:" % state
print " current_tool=%.6f" % current_tool
print " current_pocket=%.6f" % current_pocket
print " selected_tool=%.6f" % selected_tool
print " selected_pocket=%.6f" % selected_pocket
def verify_io_pins(state, tool_number, tool_prep_number, tool_prep_pocket):
if h['tool-number'] != tool_number:
print "state=%s, expected io.tool-number=%d, got %d" % (state, tool_number, h['tool-number'])
sys.exit(1)
if h['tool-prep-number'] != tool_prep_number:
print "state=%s, expected io.tool-prep-number=%d, got %d" % (state, tool_prep_number, h['tool-prep-number'])
sys.exit(1)
if h['tool-prep-pocket'] != tool_prep_pocket:
print "state=%s, expected io.tool-prep-pocket=%d, got %d" % (state, tool_prep_pocket, h['tool-prep-pocket'])
sys.exit(1)
print "state='%s', got expected io pins:" % state
print " tool-number=%d" % tool_number
print " tool-prep-number=%d" % tool_prep_number
print " tool-prep-pocket=%d" % tool_prep_pocket
def verify_status_buffer(state, tool_in_spindle):
s.poll()
if s.tool_in_spindle != tool_in_spindle:
print "state=%s, expected status.tool_in_spindle=%d, got %d" % (state, tool_in_spindle, s.tool_in_spindle)
sys.exit(1)
print "state='%s', got expected status buffer fields:" % state
print " tool_in_spindle=%d" % tool_in_spindle
def wait_for_hal_pin(pin_name, value, timeout=10):
start = time.time()
while time.time() < (start + timeout):
if h[pin_name] == value:
return
time.sleep(0.1)
print "timeout waiting for hal pin %s to go to %s!" % (pin_name, value)
sys.exit(1)
c = linuxcnc.command()
s = linuxcnc.stat()
e = linuxcnc.error_channel()
h = hal.component("test-ui")
h.newpin("tool-number", hal.HAL_S32, hal.HAL_IN)
h.newpin("tool-prep-number", hal.HAL_S32, hal.HAL_IN)
h.newpin("tool-prep-pocket", hal.HAL_S32, hal.HAL_IN)
h.newpin("tool-prepare", hal.HAL_BIT, hal.HAL_IN)
h.newpin("tool-prepared", hal.HAL_BIT, hal.HAL_OUT)
h.newpin("tool-change", hal.HAL_BIT, hal.HAL_IN)
h.newpin("tool-changed", hal.HAL_BIT, hal.HAL_OUT)
h['tool-prepared'] = False
h['tool-changed'] = False
h.ready()
hal.connect('test-ui.tool-number', 'tool-number')
hal.connect('test-ui.tool-prep-number', 'tool-prep-number')
hal.connect('test-ui.tool-prep-pocket', 'tool-prep-pocket')
hal.connect('test-ui.tool-prepare', 'tool-prepare')
hal.connect('test-ui.tool-prepared', 'tool-prepared')
hal.connect('test-ui.tool-change', 'tool-change')
hal.connect('test-ui.tool-changed', 'tool-changed')
# Wait for LinuxCNC to initialize itself so the Status buffer stabilizes.
wait_for_linuxcnc_startup(s)
c.state(linuxcnc.STATE_ESTOP_RESET)
c.state(linuxcnc.STATE_ON)
c.home(-1)
c.wait_complete()
c.mode(linuxcnc.MODE_MDI)
#
# Starting state should be sane.
#
verify_status_buffer(state='init', tool_in_spindle=-1)
verify_io_pins(state='init', tool_number=-1, tool_prep_number=0, tool_prep_pocket=0)
verify_interp_vars(state='init', current_tool=-1, current_pocket=2, selected_tool=0, selected_pocket=-1)
#
# After "T1" prepares the tool.
#
c.mdi('T1')
wait_for_hal_pin('tool-prepare', True)
h['tool-prepared'] = True
wait_for_hal_pin('tool-prepare', False)
h['tool-prepared'] = False
verify_status_buffer(state='after T1', tool_in_spindle=-1)
verify_io_pins(state='after T1', tool_number=-1, tool_prep_number=1, tool_prep_pocket=1)
verify_interp_vars(state='after T1', current_tool=-1, current_pocket=2, selected_tool=1, selected_pocket=1)
#
# After "M6" changes to the prepared tool.
#
c.mdi('M6')
wait_for_hal_pin('tool-change', True)
h['tool-changed'] = True
wait_for_hal_pin('tool-change', False)
h['tool-changed'] = False
verify_status_buffer(state='after T1 M6', tool_in_spindle=1)
verify_io_pins(state='after T1 M6', tool_number=1, tool_prep_number=0, tool_prep_pocket=0)
verify_interp_vars(state='after T1 M6', current_tool=1, current_pocket=0, selected_tool=1, selected_pocket=-1)
#
# After "T10" prepares the tool.
#
c.mdi('T10')
wait_for_hal_pin('tool-prepare', True)
h['tool-prepared'] = True
wait_for_hal_pin('tool-prepare', False)
h['tool-prepared'] = False
verify_status_buffer(state='after T10', tool_in_spindle=1)
verify_io_pins(state='after T10', tool_number=1, tool_prep_number=10, tool_prep_pocket=3)
verify_interp_vars(state='after T10', current_tool=1, current_pocket=0, selected_tool=10, selected_pocket=3)
#
# After "M6" changes to the prepared tool.
#
c.mdi('M6')
wait_for_hal_pin('tool-change', True)
h['tool-changed'] = True
wait_for_hal_pin('tool-change', False)
h['tool-changed'] = False
verify_status_buffer(state='after T10 M6', tool_in_spindle=10)
verify_io_pins(state='after T10 M6', tool_number=10, tool_prep_number=0, tool_prep_pocket=0)
verify_interp_vars(state='after T10 M6', current_tool=10, current_pocket=0, selected_tool=10, selected_pocket=-1)
#
# After "T99999" prepares a tool.
#
c.mdi('T99999')
wait_for_hal_pin('tool-prepare', True)
h['tool-prepared'] = True
wait_for_hal_pin('tool-prepare', False)
h['tool-prepared'] = False
verify_status_buffer(state='after T99999', tool_in_spindle=10)
verify_io_pins(state='after T99999', tool_number=10, tool_prep_number=99999, tool_prep_pocket=50)
verify_interp_vars(state='after T99999', current_tool=10, current_pocket=0, selected_tool=99999, selected_pocket=50)
#
# After "M6" changes to the prepared tool.
#
c.mdi('M6')
wait_for_hal_pin('tool-change', True)
h['tool-changed'] = True
wait_for_hal_pin('tool-change', False)
h['tool-changed'] = False
verify_status_buffer(state='after T99999 M6', tool_in_spindle=99999)
verify_io_pins(state='after T99999 M6', tool_number=99999, tool_prep_number=0, tool_prep_pocket=0)
verify_interp_vars(state='after T99999 M6', current_tool=99999, current_pocket=0, selected_tool=99999, selected_pocket=-1)
sys.exit(0)
| lgpl-2.1 | -7,770,383,162,234,791,000 | 30.817121 | 159 | 0.660756 | false |
fluo-io/fluo-deploy | lib/tests/ec2/test_config.py | 1 | 6124 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from muchos.config import Ec2DeployConfig
def test_ec2_cluster():
c = Ec2DeployConfig(
"muchos",
"../conf/muchos.props.example",
"../conf/hosts/example/example_cluster",
"../conf/checksums",
"../conf/templates",
"mycluster",
)
assert c.checksum_ver("accumulo", "1.9.0") == (
"sha256:"
"f68a6145029a9ea843b0305c90a7f5f0334d8a8ceeea94734267ec36421fe7fe"
)
assert c.checksum("accumulo") == (
"sha256:"
"df172111698c7a73aa031de09bd5589263a6b824482fbb9b4f0440a16602ed47"
)
assert c.get("ec2", "default_instance_type") == "m5d.large"
assert c.get("ec2", "worker_instance_type") == "m5d.large"
assert c.get("ec2", "aws_ami") == "ami-9887c6e7"
assert c.user_home() == "/home/centos"
assert c.max_ephemeral() == 1
assert c.mount_root() == "/media/ephemeral"
assert c.fstype() == "ext3"
assert c.force_format() == "no"
assert c.worker_data_dirs() == ["/media/ephemeral0"]
assert c.default_data_dirs() == ["/media/ephemeral0"]
assert c.metrics_drive_ids() == ["media-ephemeral0"]
assert c.shutdown_delay_minutes() == "0"
assert c.mounts(2) == ["/media/ephemeral0", "/media/ephemeral1"]
assert c.node_type_map() == {
"default": {
"mounts": ["/media/ephemeral0"],
"devices": ["/dev/nvme1n1"],
},
"worker": {
"mounts": ["/media/ephemeral0"],
"devices": ["/dev/nvme1n1"],
},
}
assert c.node_type("worker1") == "worker"
assert c.node_type("leader1") == "default"
assert not c.has_option("ec2", "vpc_id")
assert not c.has_option("ec2", "subnet_id")
assert c.get("ec2", "key_name") == "my_aws_key"
assert c.instance_tags() == {}
assert len(c.nodes()) == 6
assert c.get_node("leader1") == [
"namenode",
"resourcemanager",
"accumulomaster",
"zookeeper",
]
assert c.get_node("leader2") == ["metrics"]
assert c.get_node("worker1") == ["worker", "swarmmanager"]
assert c.get_node("worker2") == ["worker"]
assert c.get_node("worker3") == ["worker"]
assert c.has_service("accumulomaster")
assert not c.has_service("fluo")
assert c.get_service_hostnames("worker") == [
"worker1",
"worker2",
"worker3",
"worker4",
]
assert c.get_service_hostnames("zookeeper") == ["leader1"]
assert c.get_hosts() == {
"leader2": ("10.0.0.1", None),
"leader1": ("10.0.0.0", "23.0.0.0"),
"worker1": ("10.0.0.2", None),
"worker3": ("10.0.0.4", None),
"worker2": ("10.0.0.3", None),
"worker4": ("10.0.0.5", None),
}
assert c.get_public_ip("leader1") == "23.0.0.0"
assert c.get_private_ip("leader1") == "10.0.0.0"
assert c.cluster_name == "mycluster"
assert c.get_cluster_type() == "ec2"
assert c.version("accumulo").startswith("2.")
assert c.version("fluo").startswith("1.")
assert c.version("hadoop").startswith("3.")
assert c.version("zookeeper").startswith("3.")
assert c.get_service_private_ips("worker") == [
"10.0.0.2",
"10.0.0.3",
"10.0.0.4",
"10.0.0.5",
]
assert c.get("general", "proxy_hostname") == "leader1"
assert c.proxy_public_ip() == "23.0.0.0"
assert c.proxy_private_ip() == "10.0.0.0"
assert c.get("general", "cluster_user") == "centos"
assert c.get("general", "cluster_group") == "centos"
assert c.get_non_proxy() == [
("10.0.0.1", "leader2"),
("10.0.0.2", "worker1"),
("10.0.0.3", "worker2"),
("10.0.0.4", "worker3"),
("10.0.0.5", "worker4"),
]
assert c.get_host_services() == [
("leader1", "namenode resourcemanager accumulomaster zookeeper"),
("leader2", "metrics"),
("worker1", "worker swarmmanager"),
("worker2", "worker"),
("worker3", "worker"),
("worker4", "worker"),
]
def test_case_sensitive():
c = Ec2DeployConfig(
"muchos",
"../conf/muchos.props.example",
"../conf/hosts/example/example_cluster",
"../conf/checksums",
"../conf/templates",
"mycluster",
)
assert c.has_option("ec2", "default_instance_type")
assert not c.has_option("ec2", "Default_instance_type")
c.set("nodes", "CamelCaseWorker", "worker,fluo")
c.init_nodes()
assert c.get_node("CamelCaseWorker") == ["worker", "fluo"]
def test_ec2_cluster_template():
c = Ec2DeployConfig(
"muchos",
"../conf/muchos.props.example",
"../conf/hosts/example/example_cluster",
"../conf/checksums",
"../conf/templates",
"mycluster",
)
c.set("ec2", "cluster_template", "example")
c.init_template("../conf/templates")
# init_template already calls validate_template, so just ensure that
# we've loaded all the expected dictionary items from the example
assert "accumulomaster" in c.cluster_template_d
assert "client" in c.cluster_template_d
assert "metrics" in c.cluster_template_d
assert "namenode" in c.cluster_template_d
assert "resourcemanager" in c.cluster_template_d
assert "worker" in c.cluster_template_d
assert "zookeeper" in c.cluster_template_d
assert "devices" in c.cluster_template_d
| apache-2.0 | -129,113,717,068,885,630 | 35.452381 | 74 | 0.595526 | false |
bradmwalker/wanmap | sandbox/run.py | 1 | 8687 | #!/usr/bin/python3
"""
Create a network and inject scanner agents.
"""
from ipaddress import ip_interface
import logging
from pathlib import Path
from signal import signal, SIGINT, SIGTERM
import subprocess
import sys
from threading import Thread
from time import sleep
from typing import Sequence
import libvirt
import pexpect
CONSOLE_IP = '10.1.0.10/24'
HERE = Path(__file__).resolve().parent
def main():
logging.basicConfig(level=logging.INFO)
hypervisor = libvirt.open('qemu:///system')
vwan = VirtualWAN(hypervisor)
for bridge in ('dc-to-branch', 'dc-to-dmz', 'dc-to-external'):
vwan.add_bridge(bridge)
for bridge in ('branch', 'dmz'):
vwan.add_bridge(bridge)
dc_subnets = [f'dc{i:02d}' for i in range(16)]
for bridge in dc_subnets:
vwan.add_bridge(bridge)
vwan.add_router('dc', ['dc-to-external', 'dc-to-branch', 'dc-to-dmz', *dc_subnets])
vwan.add_router('branch', ['dc-to-branch', 'branch'])
vwan.add_router('dmz', ['dc-to-dmz', 'dmz'])
vwan.add_anchor('dc00', CONSOLE_IP)
vwan.add_scanner('scanner1', 'dc00', '10.1.0.254/20')
vwan.add_scanner('scanner2', 'branch', '10.2.0.254/20')
vwan.add_scanner('dmzscanner', 'dmz', '203.0.113.254/24')
vwan.add_scanner('external', 'dc-to-external', '198.51.100.2/30')
vwan.run()
while True:
sleep(1)
class VirtualWAN:
def __init__(self, hypervisor: libvirt.virConnect):
self._hypervisor = hypervisor
self._anchor = None
self._bridges = {}
self._routers = {}
self._scanners = {}
signal(SIGINT, self.signalled_exit)
signal(SIGTERM, self.signalled_exit)
def add_anchor(self, bridge: str, ip_address: str):
self._anchor = Anchor(self._bridges[bridge], ip_address)
def add_bridge(self, name: str):
self._bridges[name] = Bridge(name)
def add_router(self, name: str, bridges: Sequence[str] = ()):
bridges = [self._bridges[bridge] for bridge in bridges]
self._routers[name] = Router(name, bridges)
def add_scanner(self, name: str, bridge: str, ip_address: str):
bridge = self._bridges[bridge]
self._scanners[name] = Scanner(name, bridge, ip_address)
def run(self):
for bridge in self._bridges.values():
bridge.start(self._hypervisor)
if self._anchor is not None:
self._anchor.start()
for scanner in self._scanners.values():
scanner.start()
configuration_threads = []
for router in self._routers.values():
router.start(self._hypervisor)
thread = Thread(target=router.configure)
thread.start()
configuration_threads.append(thread)
for thread in configuration_threads:
thread.join()
logging.info('Virtual WAN initialization complete')
def cleanup(self):
for router in self._routers.values():
router.stop()
for scanner in self._scanners.values():
scanner.stop()
if self._anchor is not None:
self._anchor.stop()
for bridge in self._bridges.values():
bridge.stop()
def signalled_exit(self, signum, frame):
assert signum in (SIGINT, SIGTERM)
self.cleanup()
sys.exit(0)
class Bridge:
def __init__(self, name):
self.name = name
def start(self, hypervisor: libvirt.virConnect):
self._network = hypervisor.networkCreateXML(self.xml)
# Allow LLDP traffic on bridge
subprocess.call(
f'echo 16384 > /sys/class/net/{self.name}/bridge/group_fwd_mask',
shell=True)
def stop(self):
self._network.destroy()
@property
def xml(self) -> str:
return f'''<network>
<name>{self.name}</name>
<bridge name="{self.name}"/>
</network>
'''
class Anchor:
def __init__(self, bridge: Bridge, ip_address: str):
self._bridge = bridge
self._ip_address = ip_interface(ip_address)
def start(self):
subprocess.call(
'ip link add dev anchor type veth peer name rohcna', shell=True)
subprocess.call(
f'ip link set dev rohcna master {self._bridge.name}', shell=True)
subprocess.call(
f'ip addr add {self._ip_address} dev anchor', shell=True)
subprocess.call('ip link set dev anchor up', shell=True)
subprocess.call('ip link set dev rohcna up', shell=True)
gateway = next(self._ip_address.network.hosts())
subprocess.call(f'ip route add default via {gateway}', shell=True)
def stop(self):
subprocess.call('ip route del default', shell=True)
subprocess.call('ip link set dev anchor down', shell=True)
subprocess.call('ip link set dev rohcna down', shell=True)
subprocess.call('ip link del dev anchor', shell=True)
class Router:
def __init__(self, name: str, bridges: Sequence[Bridge] = ()):
self.name = name
self._bridges = bridges
def start(self, hypervisor: libvirt.virConnect):
logging.info(f'Starting router {self.name}')
self._guest = hypervisor.createXML(self.xml, 0)
def stop(self):
self._guest.destroy()
@property
def xml(self) -> str:
return f'''<domain type='kvm'>
<name>{self.name}</name>
<memory>524288</memory>
<vcpu>1</vcpu>
<os>
<type arch='x86_64' machine='pc'>hvm</type>
<boot dev='hd'/>
<boot dev='cdrom'/>
</os>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64-spice</emulator>
<disk type='file' device='cdrom'>
<source file='/vyos-rolling-latest.iso'/>
<target dev='hdc' bus='ide'/>
</disk>
''' + self._interface_xml + '''
<serial type='pty'/>
<graphics type='vnc' port='-1' listen='127.0.0.1'/>
</devices>
</domain>
'''
@property
def _interface_xml(self) -> str:
return ''.join(
f'''<interface type='bridge'>
<source bridge='{bridge.name}'/>
</interface>'''
for bridge in self._bridges)
def configure(self):
console = pexpect.spawn(
f'virsh -c qemu:///system console {self.name}',
# Allow bootup time
timeout=60)
console.sendline('')
console.expect('.+ login: ')
logging.info(f'Router {self.name} serial console is available')
console.sendline('vyos')
console.expect('Password: ')
console.sendline('vyos')
console.expect(r'vyos@.+:~\$ ')
console.sendline('cat > vyos.config')
logging.info(f'Configuring router {self.name}')
with open(HERE / f'{self.name}.config', 'rt') as file_:
for line in file_.readlines():
console.send(line)
console.sendeof()
console.sendline('configure')
console.expect('vyos@.+# ')
console.sendline('load vyos.config')
console.expect('vyos@.+# ')
console.sendline('commit')
console.expect('vyos@.+# ')
console.sendline('exit')
console.expect(r'vyos@.+:~\$ ')
console.sendline('exit')
console.sendline('exit')
console.close()
logging.info(f'Configured router {self.name}')
class Scanner:
def __init__(self, name: str, bridge: Bridge, ip_address: str):
self.name = name
self._bridge = bridge
self._ip_address = ip_interface(ip_address)
def start(self):
self._guest = guest = pexpect.spawn('unshare -nu', timeout=None)
logging.info('Running scanner %s with pid %d', self.name, self._guest.pid)
host = pexpect.spawn('bash')
host.sendline(
f'ip link add dev eth0 netns {guest.pid} type veth '
f'peer name {self.name}')
host.sendline(
f'ip link set dev {self.name} master {self._bridge.name}')
host.sendline(f'ip link set dev {self.name} up')
guest.sendline(f'ip addr add dev eth0 {self._ip_address}')
guest.sendline(f'ip link set dev eth0 up')
gateway = next(self._ip_address.network.hosts())
guest.sendline(f'ip route add default via {gateway}')
host.terminate()
self.configure()
def configure(self):
self._guest.sendline(f'hostname {self.name}')
# TODO: source file
with open(HERE / f'{self.name}.sh', 'rt') as file_:
for line in file_.readlines():
self._guest.send(line)
def stop(self):
self._guest.sendintr()
self._guest.terminate()
if __name__ == '__main__':
main()
| mit | 868,238,204,597,778,200 | 30.589091 | 87 | 0.596063 | false |
jabesq/home-assistant | homeassistant/components/amcrest/camera.py | 1 | 17637 | """Support for Amcrest IP cameras."""
import asyncio
from datetime import timedelta
import logging
from urllib3.exceptions import HTTPError
from amcrest import AmcrestError
import voluptuous as vol
from homeassistant.components.camera import (
Camera, CAMERA_SERVICE_SCHEMA, SUPPORT_ON_OFF, SUPPORT_STREAM)
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.const import (
CONF_NAME, STATE_ON, STATE_OFF)
from homeassistant.helpers.aiohttp_client import (
async_aiohttp_proxy_stream, async_aiohttp_proxy_web,
async_get_clientsession)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import (
CAMERA_WEB_SESSION_TIMEOUT, CAMERAS, DATA_AMCREST, DEVICES, SERVICE_UPDATE)
from .helpers import log_update_error, service_signal
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=15)
STREAM_SOURCE_LIST = [
'snapshot',
'mjpeg',
'rtsp',
]
_SRV_EN_REC = 'enable_recording'
_SRV_DS_REC = 'disable_recording'
_SRV_EN_AUD = 'enable_audio'
_SRV_DS_AUD = 'disable_audio'
_SRV_EN_MOT_REC = 'enable_motion_recording'
_SRV_DS_MOT_REC = 'disable_motion_recording'
_SRV_GOTO = 'goto_preset'
_SRV_CBW = 'set_color_bw'
_SRV_TOUR_ON = 'start_tour'
_SRV_TOUR_OFF = 'stop_tour'
_ATTR_PRESET = 'preset'
_ATTR_COLOR_BW = 'color_bw'
_CBW_COLOR = 'color'
_CBW_AUTO = 'auto'
_CBW_BW = 'bw'
_CBW = [_CBW_COLOR, _CBW_AUTO, _CBW_BW]
_SRV_GOTO_SCHEMA = CAMERA_SERVICE_SCHEMA.extend({
vol.Required(_ATTR_PRESET): vol.All(vol.Coerce(int), vol.Range(min=1)),
})
_SRV_CBW_SCHEMA = CAMERA_SERVICE_SCHEMA.extend({
vol.Required(_ATTR_COLOR_BW): vol.In(_CBW),
})
CAMERA_SERVICES = {
_SRV_EN_REC: (CAMERA_SERVICE_SCHEMA, 'async_enable_recording', ()),
_SRV_DS_REC: (CAMERA_SERVICE_SCHEMA, 'async_disable_recording', ()),
_SRV_EN_AUD: (CAMERA_SERVICE_SCHEMA, 'async_enable_audio', ()),
_SRV_DS_AUD: (CAMERA_SERVICE_SCHEMA, 'async_disable_audio', ()),
_SRV_EN_MOT_REC: (
CAMERA_SERVICE_SCHEMA, 'async_enable_motion_recording', ()),
_SRV_DS_MOT_REC: (
CAMERA_SERVICE_SCHEMA, 'async_disable_motion_recording', ()),
_SRV_GOTO: (_SRV_GOTO_SCHEMA, 'async_goto_preset', (_ATTR_PRESET,)),
_SRV_CBW: (_SRV_CBW_SCHEMA, 'async_set_color_bw', (_ATTR_COLOR_BW,)),
_SRV_TOUR_ON: (CAMERA_SERVICE_SCHEMA, 'async_start_tour', ()),
_SRV_TOUR_OFF: (CAMERA_SERVICE_SCHEMA, 'async_stop_tour', ()),
}
_BOOL_TO_STATE = {True: STATE_ON, False: STATE_OFF}
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up an Amcrest IP Camera."""
if discovery_info is None:
return
name = discovery_info[CONF_NAME]
device = hass.data[DATA_AMCREST][DEVICES][name]
async_add_entities([
AmcrestCam(name, device, hass.data[DATA_FFMPEG])], True)
class AmcrestCam(Camera):
"""An implementation of an Amcrest IP camera."""
def __init__(self, name, device, ffmpeg):
"""Initialize an Amcrest camera."""
super().__init__()
self._name = name
self._api = device.api
self._ffmpeg = ffmpeg
self._ffmpeg_arguments = device.ffmpeg_arguments
self._stream_source = device.stream_source
self._resolution = device.resolution
self._token = self._auth = device.authentication
self._control_light = device.control_light
self._is_recording = False
self._motion_detection_enabled = None
self._brand = None
self._model = None
self._audio_enabled = None
self._motion_recording_enabled = None
self._color_bw = None
self._rtsp_url = None
self._snapshot_lock = asyncio.Lock()
self._unsub_dispatcher = []
self._update_succeeded = False
async def async_camera_image(self):
"""Return a still image response from the camera."""
available = self.available
if not available or not self.is_on:
_LOGGER.warning(
'Attempt to take snaphot when %s camera is %s', self.name,
'offline' if not available else 'off')
return None
async with self._snapshot_lock:
try:
# Send the request to snap a picture and return raw jpg data
response = await self.hass.async_add_executor_job(
self._api.snapshot)
return response.data
except (AmcrestError, HTTPError) as error:
log_update_error(
_LOGGER, 'get image from', self.name, 'camera', error)
return None
async def handle_async_mjpeg_stream(self, request):
"""Return an MJPEG stream."""
# The snapshot implementation is handled by the parent class
if self._stream_source == 'snapshot':
return await super().handle_async_mjpeg_stream(request)
if not self.available:
_LOGGER.warning(
'Attempt to stream %s when %s camera is offline',
self._stream_source, self.name)
return None
if self._stream_source == 'mjpeg':
# stream an MJPEG image stream directly from the camera
websession = async_get_clientsession(self.hass)
streaming_url = self._api.mjpeg_url(typeno=self._resolution)
stream_coro = websession.get(
streaming_url, auth=self._token,
timeout=CAMERA_WEB_SESSION_TIMEOUT)
return await async_aiohttp_proxy_web(
self.hass, request, stream_coro)
# streaming via ffmpeg
from haffmpeg.camera import CameraMjpeg
streaming_url = self._rtsp_url
stream = CameraMjpeg(self._ffmpeg.binary, loop=self.hass.loop)
await stream.open_camera(
streaming_url, extra_cmd=self._ffmpeg_arguments)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass, request, stream_reader,
self._ffmpeg.ffmpeg_stream_content_type)
finally:
await stream.close()
# Entity property overrides
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return True
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def device_state_attributes(self):
"""Return the Amcrest-specific camera state attributes."""
attr = {}
if self._audio_enabled is not None:
attr['audio'] = _BOOL_TO_STATE.get(self._audio_enabled)
if self._motion_recording_enabled is not None:
attr['motion_recording'] = _BOOL_TO_STATE.get(
self._motion_recording_enabled)
if self._color_bw is not None:
attr[_ATTR_COLOR_BW] = self._color_bw
return attr
@property
def available(self):
"""Return True if entity is available."""
return self._api.available
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_ON_OFF | SUPPORT_STREAM
# Camera property overrides
@property
def is_recording(self):
"""Return true if the device is recording."""
return self._is_recording
@property
def brand(self):
"""Return the camera brand."""
return self._brand
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return self._motion_detection_enabled
@property
def model(self):
"""Return the camera model."""
return self._model
async def stream_source(self):
"""Return the source of the stream."""
return self._rtsp_url
@property
def is_on(self):
"""Return true if on."""
return self.is_streaming
# Other Entity method overrides
async def async_on_demand_update(self):
"""Update state."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Subscribe to signals and add camera to list."""
for service, params in CAMERA_SERVICES.items():
self._unsub_dispatcher.append(async_dispatcher_connect(
self.hass,
service_signal(service, self.entity_id),
getattr(self, params[1])))
self._unsub_dispatcher.append(async_dispatcher_connect(
self.hass, service_signal(SERVICE_UPDATE, self._name),
self.async_on_demand_update))
self.hass.data[DATA_AMCREST][CAMERAS].append(self.entity_id)
async def async_will_remove_from_hass(self):
"""Remove camera from list and disconnect from signals."""
self.hass.data[DATA_AMCREST][CAMERAS].remove(self.entity_id)
for unsub_dispatcher in self._unsub_dispatcher:
unsub_dispatcher()
def update(self):
"""Update entity status."""
if not self.available or self._update_succeeded:
if not self.available:
self._update_succeeded = False
return
_LOGGER.debug('Updating %s camera', self.name)
try:
if self._brand is None:
resp = self._api.vendor_information.strip()
if resp.startswith('vendor='):
self._brand = resp.split('=')[-1]
else:
self._brand = 'unknown'
if self._model is None:
resp = self._api.device_type.strip()
if resp.startswith('type='):
self._model = resp.split('=')[-1]
else:
self._model = 'unknown'
self.is_streaming = self._api.video_enabled
self._is_recording = self._api.record_mode == 'Manual'
self._motion_detection_enabled = (
self._api.is_motion_detector_on())
self._audio_enabled = self._api.audio_enabled
self._motion_recording_enabled = (
self._api.is_record_on_motion_detection())
self._color_bw = _CBW[self._api.day_night_color]
self._rtsp_url = self._api.rtsp_url(typeno=self._resolution)
except AmcrestError as error:
log_update_error(
_LOGGER, 'get', self.name, 'camera attributes', error)
self._update_succeeded = False
else:
self._update_succeeded = True
# Other Camera method overrides
def turn_off(self):
"""Turn off camera."""
self._enable_video_stream(False)
def turn_on(self):
"""Turn on camera."""
self._enable_video_stream(True)
def enable_motion_detection(self):
"""Enable motion detection in the camera."""
self._enable_motion_detection(True)
def disable_motion_detection(self):
"""Disable motion detection in camera."""
self._enable_motion_detection(False)
# Additional Amcrest Camera service methods
async def async_enable_recording(self):
"""Call the job and enable recording."""
await self.hass.async_add_executor_job(self._enable_recording, True)
async def async_disable_recording(self):
"""Call the job and disable recording."""
await self.hass.async_add_executor_job(self._enable_recording, False)
async def async_enable_audio(self):
"""Call the job and enable audio."""
await self.hass.async_add_executor_job(self._enable_audio, True)
async def async_disable_audio(self):
"""Call the job and disable audio."""
await self.hass.async_add_executor_job(self._enable_audio, False)
async def async_enable_motion_recording(self):
"""Call the job and enable motion recording."""
await self.hass.async_add_executor_job(self._enable_motion_recording,
True)
async def async_disable_motion_recording(self):
"""Call the job and disable motion recording."""
await self.hass.async_add_executor_job(self._enable_motion_recording,
False)
async def async_goto_preset(self, preset):
"""Call the job and move camera to preset position."""
await self.hass.async_add_executor_job(self._goto_preset, preset)
async def async_set_color_bw(self, color_bw):
"""Call the job and set camera color mode."""
await self.hass.async_add_executor_job(self._set_color_bw, color_bw)
async def async_start_tour(self):
"""Call the job and start camera tour."""
await self.hass.async_add_executor_job(self._start_tour, True)
async def async_stop_tour(self):
"""Call the job and stop camera tour."""
await self.hass.async_add_executor_job(self._start_tour, False)
# Methods to send commands to Amcrest camera and handle errors
def _enable_video_stream(self, enable):
"""Enable or disable camera video stream."""
# Given the way the camera's state is determined by
# is_streaming and is_recording, we can't leave
# recording on if video stream is being turned off.
if self.is_recording and not enable:
self._enable_recording(False)
try:
self._api.video_enabled = enable
except AmcrestError as error:
log_update_error(
_LOGGER, 'enable' if enable else 'disable', self.name,
'camera video stream', error)
else:
self.is_streaming = enable
self.schedule_update_ha_state()
if self._control_light:
self._enable_light(self._audio_enabled or self.is_streaming)
def _enable_recording(self, enable):
"""Turn recording on or off."""
# Given the way the camera's state is determined by
# is_streaming and is_recording, we can't leave
# video stream off if recording is being turned on.
if not self.is_streaming and enable:
self._enable_video_stream(True)
rec_mode = {'Automatic': 0, 'Manual': 1}
try:
self._api.record_mode = rec_mode[
'Manual' if enable else 'Automatic']
except AmcrestError as error:
log_update_error(
_LOGGER, 'enable' if enable else 'disable', self.name,
'camera recording', error)
else:
self._is_recording = enable
self.schedule_update_ha_state()
def _enable_motion_detection(self, enable):
"""Enable or disable motion detection."""
try:
self._api.motion_detection = str(enable).lower()
except AmcrestError as error:
log_update_error(
_LOGGER, 'enable' if enable else 'disable', self.name,
'camera motion detection', error)
else:
self._motion_detection_enabled = enable
self.schedule_update_ha_state()
def _enable_audio(self, enable):
"""Enable or disable audio stream."""
try:
self._api.audio_enabled = enable
except AmcrestError as error:
log_update_error(
_LOGGER, 'enable' if enable else 'disable', self.name,
'camera audio stream', error)
else:
self._audio_enabled = enable
self.schedule_update_ha_state()
if self._control_light:
self._enable_light(self._audio_enabled or self.is_streaming)
def _enable_light(self, enable):
"""Enable or disable indicator light."""
try:
self._api.command(
'configManager.cgi?action=setConfig&LightGlobal[0].Enable={}'
.format(str(enable).lower()))
except AmcrestError as error:
log_update_error(
_LOGGER, 'enable' if enable else 'disable', self.name,
'indicator light', error)
def _enable_motion_recording(self, enable):
"""Enable or disable motion recording."""
try:
self._api.motion_recording = str(enable).lower()
except AmcrestError as error:
log_update_error(
_LOGGER, 'enable' if enable else 'disable', self.name,
'camera motion recording', error)
else:
self._motion_recording_enabled = enable
self.schedule_update_ha_state()
def _goto_preset(self, preset):
"""Move camera position and zoom to preset."""
try:
self._api.go_to_preset(
action='start', preset_point_number=preset)
except AmcrestError as error:
log_update_error(
_LOGGER, 'move', self.name,
'camera to preset {}'.format(preset), error)
def _set_color_bw(self, cbw):
"""Set camera color mode."""
try:
self._api.day_night_color = _CBW.index(cbw)
except AmcrestError as error:
log_update_error(
_LOGGER, 'set', self.name,
'camera color mode to {}'.format(cbw), error)
else:
self._color_bw = cbw
self.schedule_update_ha_state()
def _start_tour(self, start):
"""Start camera tour."""
try:
self._api.tour(start=start)
except AmcrestError as error:
log_update_error(
_LOGGER, 'start' if start else 'stop', self.name,
'camera tour', error)
| apache-2.0 | 6,563,436,508,412,655,000 | 35.515528 | 79 | 0.594829 | false |
IskyN/submeter-bill-generator | get_submeter_data.py | 1 | 6582 | from sys import stdout
from os import makedirs
from os.path import exists, abspath
from requests import Session
from datetime import datetime, timedelta
from getpass import getpass
periods_path = abspath(__file__ + "/../periods.txt")
site_url = "http://meterdata.submetersolutions.com"
login_url = "/login.php"
file_url = "/consumption_csv.php"
terminal = stdout.isatty() # not all functions work on PyCharm
def get_data(site_id, site_name, period=None):
"""
Access the online submeter database to download and save
data for a given (or asked) period.
Requires authentication.
:param str site_id: the looked-up "SiteID" param in the data query string
:param str site_name: the "SiteName" param in the data query string
:param str|List period: the month(s) to get data for (or formatted periods)
:return:
"""
# Get period to process (if not given)
if not period or not isinstance(period, list):
period = period or input("Enter a period to get data for: ")
periods = []
months = 0
try:
if len(period) == 7: # one month
start = datetime.strptime(period, "%b%Y")
end = last_day_of_month(start)
periods.append((start, end))
months += 1
else: # a period
first = datetime.strptime(period[:7], "%b%Y")
last = datetime.strptime(period[-7:], "%b%Y")
months += (last.year - first.year)*12 + \
(last.month - first.month + 1)
start = first
for _ in range(months):
end = last_day_of_month(start)
periods.append((start, end))
start = next_month(start)
except ValueError as e:
raise Exception("Incorrect period format. Accepted formats:\n"
"\tJan2016 (single month)\n"
"\tJan2016-Feb2017 (range of months)") from e
else: # properly formatted list
periods = period
months = len(periods)
# print(*periods, sep="\n")
if not exists("Data"):
makedirs("Data")
username = input("Username: ")
password = getpass() if terminal else input("Password: ")
# (Thanks to tigerFinch @ http://stackoverflow.com/a/17633072)
# Fill in your details here to be posted to the login form.
login_payload = {"txtUserName": username,
"txtPassword": password,
"btnLogin": "Login"}
query_string = {"SiteID": site_id,
"SiteName": site_name}
# print(query_string)
# Use 'with' to ensure the session context is closed after use.
with Session() as session:
response = session.post(site_url + login_url, data=login_payload)
assert response.status_code == 200, "Error from data server"
# print("url: {}".format(response.url))
assert response.url == site_url + "/propertylist.php", \
"Incorrect username/password"
update_progress_bar(0) # start progress bar
for idx, (start, end) in enumerate(periods):
if end - start > timedelta(days=55): # more than 1 month
x = start + timedelta(days=3) # actual month
y = end - timedelta(days=3) # actual month
period = "{}-{}_data.csv".format(x.strftime("%b%Y"),
y.strftime("%b%Y"))
else:
period = midpoint_day(start, end).strftime("Data/%b%Y_data.csv")
# Submeter Solutions uses inclusive dates, but City doesn't, so exclude "ToDate":
end = end - timedelta(days=1)
query_string["FromDate"] = start.strftime("%m/%d/%Y")
query_string["ToDate"] = end.strftime("%m/%d/%Y")
# print(period, ':',
# query_string["FromDate"], '-', query_string["ToDate"])
# An authorised request.
response = session.get(site_url + file_url, params=query_string)
assert response.status_code == 200, "Error from data server"
with open(period, 'xb') as f:
f.write(response.content)
update_progress_bar((idx+1) / months)
print("Data download complete. See 'Data' folder for files.")
def next_month(date):
month_after = date.replace(day=28) + timedelta(days=4) # never fails
return month_after.replace(day=1)
def last_day_of_month(date):
"""
Return the last day of the given month (leap year-sensitive),
with date unchanged.
Thanks to Augusto Men: http://stackoverflow.com/a/13565185
:param datetime date: the first day of the given month
:return: datetime
>>> d = datetime(2012, 2, 1)
>>> last_day_of_month(d)
datetime.datetime(2012, 2, 29, 0, 0)
>>> d.day == 1
True
"""
month_after = next_month(date)
return month_after - timedelta(days=month_after.day)
def midpoint_day(date1, date2):
"""
Finds the midpoint between two dates. (Rounds down.)
:type date1: datetime
:type date2: datetime
:return: datetime
>>> d1 = datetime(2016, 1, 1)
>>> d2 = datetime(2016, 1, 6)
>>> midpoint_day(d1, d2)
datetime.datetime(2016, 1, 3, 0, 0)
"""
if date1 > date2:
date1, date2 = date2, date1
return (date1 + (date2 - date1) / 2).replace(hour=0)
def update_progress_bar(percent: float):
if not terminal: # because PyCharm doesn't treat '\r' well
print("[{}{}]".format('#' * int(percent * 20),
' ' * (20 - int(percent * 20))))
elif percent == 1:
print("Progress: {:3.1%}".format(percent))
else:
print("Progress: {:3.1%}\r".format(percent), end="")
if __name__ == "__main__":
if not terminal:
print("WARNING: This is not a TTY/terminal. "
"Passwords will not be hidden.")
if periods_path and exists(periods_path):
p = []
with open(periods_path, 'r') as pf:
for line in pf:
if line[0] != '#': # skip comment lines
top, pot = line.split()[:2] # ignore inline comments
top = datetime.strptime(top, "%Y-%m-%d")
pot = datetime.strptime(pot, "%Y-%m-%d")
assert top < pot, "Improper period range (start !< end)"
p.append((top, pot))
get_data("128", "Brimley Plaza", p)
else:
get_data("128", "Brimley Plaza")
| apache-2.0 | 4,653,421,852,300,319,000 | 36.827586 | 93 | 0.558037 | false |
Glottotopia/aagd | moin/local/moin/MoinMoin/macro/_tests/test_Hits.py | 1 | 3829 | # -*- coding: iso-8859-1 -*-
"""
MoinMoin - MoinMoin.macro Hits tested
@copyright: 2007-2008 MoinMoin:ReimarBauer
@license: GNU GPL, see COPYING for details.
"""
import os
from MoinMoin import caching, macro
from MoinMoin.logfile import eventlog
from MoinMoin.PageEditor import PageEditor
from MoinMoin.Page import Page
from MoinMoin._tests import become_trusted, create_page, make_macro, nuke_eventlog, nuke_page
class TestHits:
"""Hits: testing Hits macro """
pagename = u'AutoCreatedMoinMoinTemporaryTestPageForHits'
def setup_class(self):
request = self.request
become_trusted(request)
self.page = create_page(request, self.pagename, u"Foo!")
# for that test eventlog needs to be empty
nuke_eventlog(request)
# hits is based on hitcounts which reads the cache
caching.CacheEntry(request, 'charts', 'hitcounts', scope='wiki').remove()
def teardown_class(self):
nuke_page(self.request, self.pagename)
def _test_macro(self, name, args):
m = make_macro(self.request, self.page)
return m.execute(name, args)
def _cleanStats(self):
# cleans all involved cache and log files
nuke_eventlog(self.request)
# hits is based on hitcounts which reads the cache
caching.CacheEntry(self.request, 'charts', 'hitcounts', scope='wiki').remove()
arena = Page(self.request, self.pagename)
caching.CacheEntry(self.request, arena, 'hitcounts', scope='item').remove()
def testHitsNoArg(self):
""" macro Hits test: 'no args for Hits (Hits is executed on current page) """
# <count> log entries for the current page and one for WikiSandBox simulating viewing
count = 3
eventlog.EventLog(self.request).add(self.request, 'VIEWPAGE', {'pagename': 'WikiSandBox'})
for i in range(count):
eventlog.EventLog(self.request).add(self.request, 'VIEWPAGE', {'pagename': self.pagename})
result = self._test_macro(u'Hits', u'')
self._cleanStats()
assert result == str(count)
def testHitsForAll(self):
""" macro Hits test: 'all=True' for Hits (all pages are counted for VIEWPAGE) """
# <count> * <num_pages> log entries for simulating viewing
pagenames = ['WikiSandBox', self.pagename]
num_pages = len(pagenames)
count = 2
for i in range(count):
for pagename in pagenames:
eventlog.EventLog(self.request).add(self.request, 'VIEWPAGE', {'pagename': pagename})
result = self._test_macro(u'Hits', u'all=True')
self._cleanStats()
assert result == str(count * num_pages)
def testHitsForFilter(self):
""" macro Hits test: 'event_type=SAVEPAGE' for Hits (SAVEPAGE counted for current page)"""
eventlog.EventLog(self.request).add(self.request, 'SAVEPAGE', {'pagename': self.pagename})
# simulate a log entry SAVEPAGE for WikiSandBox to destinguish current page
eventlog.EventLog(self.request).add(self.request, 'SAVEPAGE', {'pagename': 'WikiSandBox'})
result = self._test_macro(u'Hits', u'event_type=SAVEPAGE')
self._cleanStats()
assert result == "1"
def testHitsForAllAndFilter(self):
""" macro test: 'all=True, event_type=SAVEPAGE' for Hits (all pages are counted for SAVEPAGE)"""
eventlog.EventLog(self.request).add(self.request, 'SAVEPAGE', {'pagename': 'WikiSandBox'})
eventlog.EventLog(self.request).add(self.request, 'SAVEPAGE', {'pagename': self.pagename})
result = self._test_macro(u'Hits', u'all=True, event_type=SAVEPAGE')
self._cleanStats()
assert result == "2"
coverage_modules = ['MoinMoin.macro.Hits']
| mit | 7,299,168,824,747,804,000 | 43.047059 | 104 | 0.64116 | false |
briennakh/BIOF509 | Wk02/genetic_algorithm_optimizer.py | 1 | 5552 | """Module to calculate best path between multiple points, using genetic algorithm
Functions:
new_path -- path altering function that creates a new path
distance -- cost function that calculates distance as the cost of a path
select_best -- function that selects the best paths in a population
recombine -- path altering function that returns a child path recombined from two parent paths
genetic_algorithm_optimizer -- objective function that implements the genetic algorithm
"""
import random
def new_path(existing_path):
"""Switch two random consecutive points on a path
Arguments received:
existing_path -- list of coordinates, e.g. [(0, 0), (10, 5), (10, 10)], representing a path
Arguments returned:
path -- list of coordinates representing the mutated path
"""
path = existing_path[:]
point = random.randint(0, len(path)-2) # randomly choose a point between 1st and 2nd-to-last points on path
path[point+1], path[point] = path[point], path[point+1] # switch this point with the next point
return path
def distance(coords):
"""Calculate the distance of a path between multiple points
Arguments received:
coords — list of coordinates representing a path
Arguments returned:
distance -- total distance as a float
"""
distance = 0
for p1, p2 in zip(coords[:-1], coords[1:]):
distance += ((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2) ** 0.5
return distance
def select_best(population, cost_func, num_to_keep):
"""Select a given number of paths with the lowest cost (the best paths)
Arguments received:
population -- an array of lists of coordinates representing paths
cost_func -- function to calculate cost of a path
num_to_keep -- number of paths to select
Arguments returned:
[i[0] for i in scored_population[:num_to_keep]] -- an array of lists of coordinates representing the best paths
"""
scored_population = [(i, cost_func(i)) for i in population] # create a list of tuples: (path, cost)
scored_population.sort(key=lambda x: x[1]) # sort list by cost, lowest to highest
return [i[0] for i in scored_population[:num_to_keep]] # return num_to_keep paths with the lowest cost
def recombine(population):
"""Cross over two parent paths and return the resulting child path
Arguments received:
population -- an array of lists of coordinates representing paths
Arguments returned:
child -- list of coordinates representing a recombined child path
"""
# Randomly choose two parents
options = list(range(len(population))) # from 1 to 125
random.shuffle(options)
partner1 = options[0]
partner2 = options[1]
# Choose a split point, take the first parent's order to that split point,
# then the second parent's order for all remaining points
split_point = random.randint(0, len(population[0])-1)
child = population[partner1][:split_point]
for point in population[partner2]:
if point not in child:
child.append(point)
return child
# Our genetic algorithm function currently only uses recombination. As we saw from the simulated
# annealing approach mutation is also a powerful tool in locating the optimal solution.
# Add mutation to the genetic algorithm function using the new_path function we created.
def genetic_algorithm_optimizer(starting_path, cost_func, new_path_func, pop_size, generations):
"""Calculate the best path between multiple points using a genetic algorithm
The genetic algorithm begins with a given path, which it shuffles to create a starting population of a given
size. Once the population is generated, the cost of each path is evaluated. The top 25 percent then are sent
through recombination, then mutation -- to hopefully generate 'better' paths -- to form a new population.
Arugments received:
starting_path -- list of coordinates representing a path
cost_func -- function to calculate cost of a path
new_path_func -- function to generate a new path with two random consecutive points switched
pop_size -- population size, or amount of paths in one generation
generations -- number of iterations
Arguments returned:
population[0] -- list of coordinates representing the best path
cost_func(population[0]) -- cost of the best path
history -- an array of objects, each object containing information about each tested path
"""
# Create a starting population of 500 paths by randomly shuffling the points
population = []
for i in range(pop_size):
new_path = starting_path[:]
random.shuffle(new_path)
population.append(new_path)
history = []
# Take the top 25% of routes and recombine to create new routes, repeating for generations
for i in range(generations):
pop_best = select_best(population, cost_func, int(pop_size / 4))
new_population = []
mutated_population = []
for i in range(pop_size):
new_population.append(recombine(pop_best))
if (random.random() <= 1/len(new_population[i])): # mutation probability, 1/path length
mutated_population.append(new_path_func(new_population[i])) # mutate
else:
mutated_population.append(new_population[i]) # don't mutate
population = mutated_population
record = {'generation': i, 'current_cost': cost_func(population[0]), }
history.append(record)
return (population[0], cost_func(population[0]), history)
| mit | 957,501,307,047,593,300 | 42.359375 | 115 | 0.698018 | false |
amkahn/event-extraction | extract_events.py | 1 | 12233 | #!/usr/bin/python
# Written by Andrea Kahn
# Last updated Aug. 29, 2014
'''
This script takes as input:
1) A path to a file containing patients' clinic notes, each line having the format: MRN [tab] date [tab] description [tab] note (one note per line; date must be in format YYYY-MM-DD, YYYY-MM, or YYYY)
2) A path to the file containing keywords to search on, each line having the format: keyword [tab] position ([tab] window size), where position is PRE-DATE or POST-DATE and the parenthesized content is optional (default window size = 100) (NB: casing of keywords is ignored)
3) Optionally, a float corresponding to the minimum score a date candidate must have in order to be output (default = 0.0)
4) Optionally, an int corresponding to the minimum number of dates to be output, regardless of whether they all have the minimum score (NB: if the number of date candidates extracted is lower than this int, only the number of date candidates extracted will be output) (default = 0)
It then extracts dates correlated with the keywords from the patients' clinic notes and prints to standard out lines in the following format (one line per patient):
MRN [tab] date1 [tab] score1 [tab] date2 [tab] score2 ...
...where MRNs are sorted alphabetically, and dates for a particular patient appear in descending order by score.
To switch to verbose output (lists of supporting snippets are printed after scores), comment line 61 and uncomment line 62.
'''
from sys import argv
import logging
from date import *
from date_candidate import *
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.WARNING)
def main():
logging.basicConfig()
notes_filename = argv[1]
keywords_filename = argv[2]
if len(argv) > 3:
filter = argv[3]
if len(argv) > 4:
n = argv[4]
else:
n = 0
else:
filter = 0.0
n = 0
notes_file = open(notes_filename)
notes_dict = get_notes_dict(notes_file)
notes_file.close()
LOG.debug("Here is the notes dictionary: %s" % notes_dict)
keywords_file = open(keywords_filename)
keywords_list = get_keywords_list(keywords_file)
keywords_file.close()
LOG.debug("Here is the keywords list: %s" % keywords_list)
extracted = {}
for MRN in notes_dict:
extracted[MRN] = extract_events(notes_dict[MRN], keywords_list, filter, n)
print_output(extracted)
# print_output(extracted, True)
class ClinicNote(object):
'''
A ClinicNote has attributes 'date' (a Date object corresponding to the document creation date), 'desc' (a string corresponding to the description of the clinic note), and 'text' (a text blob corresponding to the contents of the note).
'''
def __init__(self, date, desc, text):
self.date = date
self.desc = desc
self.text = text
def __repr__(self):
return "date: %s; desc: %s" % (self.date, self.desc)
# return "date: %s; desc: %s; text: %s" % (self.date, self.desc, self.text)
class Keyword(object):
'''
A Keyword has 'text' (the keyword itself), 'position' (the string 'PRE-DATE' or 'POST-DATE'), an int 'window' (the number of characters before or after the keyword in which to look for a date). The last attribute, if not passed into the __init__ method, defaults to 100.
'''
def __init__(self, text, position, window=100):
if position not in ['PRE-DATE', 'POST-DATE']:
LOG.warning("Bad position value %s; setting position to None)" % str(position))
self.text = text
self.position = position
self.window = int(window)
def __repr__(self):
return "(%s, %s, %s)" % (self.text, self.position, str(self.window))
def get_notes_dict(file):
'''
This method takes as input an open file object and returns a dictionary of MRNs mapped to lists of ClinicNote objects corresponding to the clinic notes for that patient.
'''
notes_dict = {}
for line in file:
line_elements = line.strip().split('\t')
if len(line_elements) not in [3, 4]:
LOG.warning("Bad notes file line format; skipping: %s" % line)
else:
if len(line_elements) == 3:
note = ClinicNote(line_elements[1], line_elements[2], '')
else:
note = ClinicNote(line_elements[1], line_elements[2], line_elements[3])
if notes_dict.get(line_elements[0]):
notes_dict[line_elements[0]].append(note)
else:
notes_dict[line_elements[0]] = [note]
return notes_dict
def get_keywords_list(file):
'''
This method takes as input an open file object and returns a list of Keyword objects.
'''
keywords = []
for line in file:
line_elements = line.strip().split('\t')
if len(line_elements) not in [2, 3]:
LOG.warning("Bad keywords file line format; skipping: %s" % line)
else:
text = line_elements[0]
position = line_elements[1]
if len(line_elements) == 3:
keyword = Keyword(text, position, line_elements[2])
else:
keyword = Keyword(text, position)
keywords.append(keyword)
return keywords
def extract_events(notes_list, keywords_list, filter=0.0, n=0):
'''
This function takes as input a list of ClinicNote objects, a list of Keyword objects, an optional minimum confidence score (float; default = 0.0), and an optional int 'n' referring to the minimum number of candidate dates to be returned (default = 0), and returns a list of DateCandidate objects corresponding with date expressions that the system has identified in the patient's clinic notes based on Keyword objects.
'''
extracted = get_date_candidates(notes_list, keywords_list)
rerank_candidates(extracted, filter, n)
return extracted
def naive_extract_events(notes):
'''
This function takes as input a list of ClinicNote objects and returns a list of DateCandidate objects corresponding with ALL date expressions that the system has identified in the patient's clinic notes. (Not called in current code, it is intended to be used to establish a recall ceiling for evaluation -- i.e., to see how many of the gold dates actually appear in the notes at all.)
'''
candidates = []
for note in notes:
dates = [x[0] for x in extract_dates_and_char_indices(note.text)]
for d in dates:
date_candidate = DateCandidate(d, [note.text])
candidates.append(date_candidate)
rerank_candidates(candidates)
return candidates
def get_date_candidates(notes, keywords):
'''
This method takes as input a list of ClinicNote objects and a list of Keyword objects. It then returns a list of DateCandidate objects representing dates that appear in the clinic notes correlated with the input keywords.
'''
candidates = []
pre_date_keywords = filter(lambda x: x.position=='PRE-DATE', keywords)
post_date_keywords = filter(lambda x: x.position=='POST-DATE', keywords)
LOG.debug("Here are the pre-date keywords: %s" % pre_date_keywords)
LOG.debug("Here are the post-date keywords: %s" % post_date_keywords)
# Store the window sizes in a dictionary that maps (keyword text, position) tuples to window sizes
window_sizes = {}
for keyword in keywords:
window_sizes[(keyword.text.lower(), keyword.position)] = keyword.window
if pre_date_keywords:
# pre_date_regex = re.compile('|'.join(['['+keyword[0].upper()+keyword[0]+']'+keyword[1:] for keyword in pre_date_keywords]))
pre_date_keywords = map(lambda w: ''.join(map(lambda x: '[' + x.upper() + x + ']', w.text)), pre_date_keywords)
pre_date_regex = re.compile('|'.join(pre_date_keywords))
if post_date_keywords:
# post_date_regex = re.compile('|'.join(['['+keyword[0].upper()+keyword[0]+']'+keyword[1:] for keyword in post_date_keywords]))
post_date_keywords = map(lambda w: ''.join(map(lambda x: '[' + x.upper() + x + ']', w.text)), post_date_keywords)
post_date_regex = re.compile('|'.join(post_date_keywords))
for note in notes:
if pre_date_keywords:
pre_date_matches = pre_date_regex.finditer(note.text)
for match in pre_date_matches:
LOG.debug("Found pre-date keyword match: %s" % match.group(0))
window_size = window_sizes[(match.group(0).lower(), 'PRE-DATE')]
# Set the window beginning at the start of the match to pre_date_window_size characters or all remaining characters, whichever is less
window = note.text[match.start(0):(match.end(0)+window_size)]
# Look for first date in window -- do not pass a period or the end of the text
snippet = re.split('[.]|[a-z],|dmitted|:.*:', window)[0]
LOG.debug("Looking for date in: %s" % snippet)
event_date_str = extract_date(snippet, 'first')
LOG.debug("Extracted: %s" % event_date_str)
if event_date_str:
LOG.debug("Found date expression: %s" % event_date_str)
event_dates = make_date(event_date_str)
# FIXME: Consider alternatives that keep coordinated dates together (or throw them out entirely)
if event_dates:
for event_date in event_dates:
date_candidate = DateCandidate(event_date, [snippet])
candidates.append(date_candidate)
else:
LOG.debug("No date expression found")
if post_date_keywords:
LOG.debug("Looking for postdate matches")
post_date_matches = post_date_regex.finditer(note.text)
for match in post_date_matches:
LOG.debug("Found post-date keyword match: %s" % match.group(0))
window_size = window_sizes[(match.group(0).lower(), 'POST-DATE')]
# Set the window to include the event expression and the prewindow_size characters before the event expression or all preceding characters, whichever is less
window = note.text[(match.start(0)-window_size):match.end(0)]
# Look for the last date in the window -- do not pass a period
snippet = re.split('[.]|[a-z],|<%END%>|ischarge|dmitted.{20}', window)[-1]
LOG.debug("Looking for date in: %s" % snippet)
event_date_str = extract_date(snippet, 'last')
LOG.debug("Extracted: %s" % event_date_str)
if event_date_str:
LOG.debug("Found date expression: %s" % event_date_str)
event_dates = make_date(event_date_str)
if event_dates:
for event_date in event_dates:
date_candidate = DateCandidate(event_date, [snippet])
candidates.append(date_candidate)
return candidates
def print_output(output_dict, verbose=False):
'''
This method takes as input a hash of MRNs mapped to lists of DateCandidate objects and a boolean True or False specifying whether or not supporting snippets should be printed (default: False), and prints to standard out lines in the following format: MRN [tab] date1 [tab] score1 [tab] (snippets_list1 [tab]) date2 [tab] score2 (snippets_list2 [tab])... , where dates appear in descending order by score.
'''
for MRN in output_dict:
sorted_candidates = sorted(output_dict[MRN], key=lambda candidate: candidate.score, reverse=True)
if verbose:
print MRN+'\t'+'\t'.join([c.date.make_date_expression()+'\t'+str(c.score)+'\t'+str(c.snippets) for c in sorted_candidates])
else:
print MRN+'\t'+'\t'.join([c.date.make_date_expression()+'\t'+str(c.score) for c in sorted_candidates])
if __name__=='__main__':
main() | mit | 6,596,118,473,499,229,000 | 45.340909 | 422 | 0.622578 | false |
opentrials/opentrials-airflow | dags/pubmed.py | 1 | 1359 | import datetime
from airflow.models import DAG
from airflow.operators.latest_only_operator import LatestOnlyOperator
import utils.helpers as helpers
args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.datetime(2017, 4, 1),
'retries': 1,
}
dag = DAG(
dag_id='pubmed',
default_args=args,
max_active_runs=1,
schedule_interval='@monthly'
)
latest_only_task = LatestOnlyOperator(
task_id='latest_only',
dag=dag,
)
collector_task = helpers.create_collector_task(
name='pubmed',
dag=dag,
command='make start pubmed 1900-01-01 2100-01-01'
)
unregistered_trials_task = helpers.create_processor_task(
name='pubmed_unregistered_trials',
dag=dag
)
trials_remover_task = helpers.create_processor_task(
name='trial_remover',
dag=dag
)
pubmed_publications_task = helpers.create_processor_task(
name='pubmed_publications',
dag=dag
)
merge_identifiers_and_reindex_task = helpers.create_trigger_subdag_task(
trigger_dag_id='merge_identifiers_and_reindex',
dag=dag
)
collector_task.set_upstream(latest_only_task)
unregistered_trials_task.set_upstream(collector_task)
trials_remover_task.set_upstream(unregistered_trials_task)
pubmed_publications_task.set_upstream(trials_remover_task)
merge_identifiers_and_reindex_task.set_upstream(pubmed_publications_task)
| mpl-2.0 | 7,137,762,957,153,894,000 | 23.709091 | 73 | 0.729948 | false |
evfredericksen/gmapsbounds | gmapsbounds/reader.py | 1 | 6819 | from gmapsbounds import utils
from gmapsbounds import constants
from gmapsbounds import llpx
from gmapsbounds import polygon
def get_nodes(rgb_image):
nodes = []
width, height = rgb_image.size
do_not_check = set()
menu_borders = utils.get_menu_borders(rgb_image)
for x in range(width):
for y in range(height):
if ((y in range(menu_borders[0][0], menu_borders[0][1] + 1) and
x in range (menu_borders[1][0], menu_borders[1][1] + 1)) or
(x, y) in do_not_check):
continue
r, g, b = rgb_image.getpixel((x, y))
if [r, g, b] == constants.INVALID[2]:
exclude_surrounding_nodes(x, y, do_not_check, rgb_image)
if valid_color(r, g, b) and no_invalid_adjacent(x, y, rgb_image):
nodes.append(Node(x, y))
if not nodes:
raise RuntimeError('Could not detect a boundary around this location')
nodes[0].visited = True
return nodes
def prune_extra_nodes(polygons):
pruned_polygons = []
for poly in polygons:
if len(poly.nodes) < constants.MINIMUM_PRUNING_SIZE:
assert len(poly.nodes) > 2
pruned_polygons.append(poly)
continue
pruned = polygon.Polygon(poly.nodes[:2])
for node in poly.nodes[2:]:
if utils.get_distance(pruned.nodes[-1], node) <= 1:
continue
end_node = None
if utils.same_line(pruned.nodes[-2], pruned.nodes[-1], node):
end_node = node
else:
if end_node is None:
end_node = node
pruned.nodes.append(end_node)
if len(pruned.nodes) > 2:
pruned_polygons.append(pruned)
return pruned_polygons
def exclude_surrounding_nodes(x, y, nodes_to_exclude, rgb_im, depth=5):
for i in range(-depth, depth + 1):
for j in range(-depth, depth + 1):
r, g, b = rgb_im.getpixel((x+i, y+j))
if [r, g, b] != constants.INVALID[2] or (i == 0 and j == 0):
try:
nodes_to_exclude.add((x+i, y+j))
except:
pass
def valid_color(r, g, b):
if ([r, g, b] in constants.VALID or
(r in constants.ALLOWABLE_RED and g in constants.ALLOWABLE_GREEN and
b in constants.ALLOWABLE_BLUE and abs(g - b) < constants.MAX_BLUE_GREEN_DIFFERENCE)):
return True
return False
def no_invalid_adjacent(x, y, image):
for i in range(-2, 3):
for j in range(-2, 3):
try:
r, g, b = image.getpixel((x + i, y + j))
if [r, g, b] in constants.INVALID or (r in constants.ALLOWABLE_RED and 100 > g == b):
return False
except IndexError:
return False
return True
def get_polygons(nodes, rgb_im):
polygons = []
unvisited = [node for node in nodes if node.visited is False]
while unvisited:
poly = polygon.Polygon()
current = unvisited[0]
current.visited = True
closest, distance = get_closest_unvisited_node(current, nodes, rgb_im)
if distance is not None and distance > constants.MAX_NODE_DIFFERENCE:
unvisited = unvisited[1:]
continue
while closest is not None:
poly.nodes.append(current)
current = closest
current.visited = True
closest, distance = get_closest_unvisited_node(current, nodes, rgb_im)
if closest is None:
break
i = -1
while distance > constants.MAX_NODE_DIFFERENCE:
if (current is poly.nodes[0] or i < -constants.MAX_NODE_BACKTRACK
or (utils.get_distance(poly.nodes[0], current) < constants.MAX_NODE_DIFFERENCE)):
closest = None
break
current = poly.nodes[i]
closest, distance = get_closest_unvisited_node(current, unvisited, rgb_im)
i -= 1
if len(poly.nodes) > 2:
polygons.append(poly)
unvisited = [node for node in nodes if node.visited is False]
return polygons
def prune_overlapping_nodes(polygons):
assert polygons
polygons = utils.sort_by_polygon_length(polygons)
polygons.reverse()
exterior_polygons = [polygons[0]]
for test_polygon in polygons[1:]:
starting_count = len(test_polygon.nodes)
for exterior_polygon in exterior_polygons:
exterior_nodes = test_polygon.get_exterior_nodes(exterior_polygon)
if not exterior_nodes:
if len(test_polygon.nodes) == starting_count:
exterior_polygon.inner = test_polygon
elif (exterior_polygon is exterior_polygons[-1] and
len(exterior_nodes) > 2 and
utils.get_distance(exterior_nodes[0], exterior_nodes[-1]) <=
constants.MAX_NODE_DIFFERENCE):
test_polygon.nodes = exterior_nodes
exterior_polygons.append(test_polygon)
break
return exterior_polygons
def get_closest_unvisited_node(current, nodes, rgb_im):
closest_node = None
shortest_distance = None
pos = nodes.index(current)
i = 1
go_up = True
go_down = True
while (0 <= pos - i or len(nodes) > pos + i) and (go_up or go_down):
for sign in [-1, 1]:
if sign == -1 and not go_down or sign == 1 and not go_up:
continue
index = pos + i*sign
if not 0 <= index < len(nodes):
continue
node = nodes[index]
if closest_node is not None:
if sign == -1 and shortest_distance < current.x - node.x:
go_down = False
elif sign == 1 and shortest_distance < node.x - current.x:
go_up = False
if node.visited:
continue
distance = utils.get_distance(nodes[pos], node)
distance *= utils.get_water_multiplier(current, node, rgb_im)
if shortest_distance is None or distance < shortest_distance:
closest_node = node
shortest_distance = distance
i += 1
return closest_node, shortest_distance
class Node:
def __init__(self, x, y):
self.x = x
self.y = y
self.location = None
self.visited = False
def get_lat_lng(self):
return llpx.pixels_to_lat_lng(self.location.offset[0] - self.location.pixcenter[0] + self.x,
self.location.offset[1] - self.location.pixcenter[1] + self.y, self.location.zoom)
def __str__(self):
return '<Node at {}, {}>'.format(self.x, self.y)
def __repr__(self):
return self.__str__() | mit | 9,046,952,827,354,978,000 | 37.971429 | 101 | 0.555947 | false |
arunchandramouli/fanofpython | code/features/datatypes/lists1.py | 1 | 3368 |
'''
Aim :: To demonstrate the use of a list
Define a simple list , add values to it and iterate and print it
A list consists of comma seperated values which could be of any type
which is reprsented as [,,,,] .. all values are enclosed between '[' and ']'
** A list object is a mutable datatype which means it couldn't be hashed
Anything that can be hashed can be set as a dictionary key **
Modifying an exisiting list will not result in a new list object,
memory address will not be changed too.
There are 2 scenarios of modification;
-> Edit the existing item
-> Both Mutable and Immutable datatypes can be edited, memory location not changed
-> Replace the existing item
-> Both mutable and immutable can be replaced
'''
'''
Empty Mutable Types ...
'''
list1 = []
dict1 = {}
set1 = set()
'''
Empty Immutable Types ...
'''
tuple1 = ()
str1 = ""
'''
Define a simple list with multiple datatypes
'''
def_list = [1,2,"1","100","Python","Anne","A!@345<>_()",True,False,{1:100,2:200,3:300},range(10)]
'''
Now create a variable
'''
vara = def_list
'''
Modification of vara will result in modifying def_list
'''
vara.append("Hero")
print "Address of vara and def_list %s and %s "%(id(vara),id(def_list)),'\n\n'
print "vara = %s "%(vara),'\n\n'
print "def_list = %s "%(def_list),'\n\n'
'''
Now creating a Partial Slice ...
When a slice is created partially , we are actually breaking a container
into pieces , hence it shall represent a new memory location.
Hence modification of such will not affect the original container
'''
getmeasliceofit = def_list[3:]
print "Address of getmeasliceofit and def_list %s and %s "%(id(getmeasliceofit),id(def_list)),'\n\n'
print "getmeasliceofit = %s "%(getmeasliceofit),'\n\n'
print "def_list = %s "%(def_list),'\n\n'
'''
Now creating a Full Slice ...
When a slice is created fully , we are actually creating a container
which has its original values but represents the same address.
Hence modification of such will affect the original container
for eg ::
If you verify all of the address below, but for getmeasliceofit, rest are all the same
if I edit as def_list[0:] = range(5) , def_list will also get modified
Meanwhile also If I edit as def_list[3:] = range(5), def_list will get modified
But If I edit getmeasliceofit def_list will not get modified
'''
getmeasliceofit = def_list[:]
print "Address == ",id(def_list),'\n',id(def_list[3:]),'\n',id(getmeasliceofit),'\n',id(def_list[::]),'\n',id(def_list[0:]),'\n',id(def_list[:]),'\n'
'''
Modifying def_list[3:] will affect def_list , but modifying getmeasliceofit doesn't
This is because getmeasliceofit resides at a different memory location.
'''
print '\n\n' , def_list , '\n\n'
def_list[3:] = range(50)
getmeasliceofit = None
print def_list , '\n\n\n',def_list[3:],'\n\n' , getmeasliceofit,'\n\n\n'
print 'Analyze memory locations of mutables examples ... ... ','\n\n'
sayx = [1,2,3,4,5]
print id(sayx),'\n'
sayx = [4,5,6,7,8]
print id(sayx),'\n'
x = range(10)
print id(x),'\n'
x = range(10,50)
print id(x),'\n'
print 'Modify a mutable it shall still refer same location ... ... ','\n\n'
''' A Simple list '''
sayx = [1,2,3,4,5]
print id(sayx),'\n'
''' A Simple list modified - change element @ position 4 '''
sayx[4] = range(10)
print id(sayx),'\n' | gpl-3.0 | 158,409,647,628,030,750 | 19.542683 | 149 | 0.663005 | false |
deepmind/spriteworld | spriteworld/configs/cobra/exploration.py | 1 | 2320 | # Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Exploration task used in COBRA.
There is no reward for this task, as it is used for task-free curiosity-drive
exploration.
Episodes last 10 steps, and each is initialized with 1-6 sprites of random
shape, color, and position.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from spriteworld import factor_distributions as distribs
from spriteworld import sprite_generators
from spriteworld import tasks
from spriteworld.configs.cobra import common
def get_config(mode=None):
"""Generate environment config.
Args:
mode: Unused.
Returns:
config: Dictionary defining task/environment configuration. Can be fed as
kwargs to environment.Environment.
"""
del mode # No train/test split for pure exploration
factors = distribs.Product([
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
distribs.Discrete('shape', ['square', 'triangle', 'circle']),
distribs.Discrete('scale', [0.13]),
distribs.Continuous('c0', 0., 1.),
distribs.Continuous('c1', 0.3, 1.),
distribs.Continuous('c2', 0.9, 1.),
])
num_sprites = lambda: np.random.randint(1, 7)
sprite_gen = sprite_generators.generate_sprites(
factors, num_sprites=num_sprites)
task = tasks.NoReward()
config = {
'task': task,
'action_space': common.action_space(),
'renderers': common.renderers(),
'init_sprites': sprite_gen,
'max_episode_length': 10,
'metadata': {
'name': os.path.basename(__file__)
}
}
return config
| apache-2.0 | -3,151,919,387,541,338,600 | 30.780822 | 78 | 0.677155 | false |
IntersectAustralia/dc2c | mecat/rifcs/publishservice.py | 1 | 2428 | from tardis.tardis_portal.publish.publishservice import PublishService
PARTY_RIFCS_FILENAME = "MyTARDIS-party-%s.xml"
COLLECTION_RIFCS_FILENAME = "MyTARDIS-%s-dataset-%s.xml"
class PartyPublishService(PublishService):
def get_template(self, type):
return self.provider.get_template(type=type)
def _remove_rifcs_from_oai_dir(self, oaipath):
#owner = self.experiment.created_by
#import os
#filename = os.path.join(oaipath, PARTY_RIFCS_FILENAME % owner.id)
#if os.path.exists(filename):
# os.remove(filename)
return
def _write_rifcs_to_oai_dir(self, oaipath):
from tardis.tardis_portal.xmlwriter import XMLWriter
xmlwriter = XMLWriter()
owner = self.experiment.created_by
xmlwriter.write_template_to_dir(oaipath, PARTY_RIFCS_FILENAME % owner.id,
self.get_template(type="party"), self.get_context())
class CollectionPublishService(PublishService):
def get_template(self, type):
return self.provider.get_template(type=type)
def remove_specific_rifcs(self, oaipath, dataset_id):
import os
filename = os.path.join(oaipath, COLLECTION_RIFCS_FILENAME % (self.experiment.id, dataset_id) )
if os.path.exists(filename):
os.remove(filename)
def _remove_rifcs_from_oai_dir(self, oaipath):
import os
datasets = self.experiment.dataset_set
for dataset_vals in datasets.values():
dataset_id = dataset_vals['id']
filename = os.path.join(oaipath, COLLECTION_RIFCS_FILENAME % (self.experiment.id, dataset_id) )
if os.path.exists(filename):
os.remove(filename)
def _write_rifcs_to_oai_dir(self, oaipath):
from tardis.tardis_portal.xmlwriter import XMLWriter
xmlwriter = XMLWriter()
datasets = self.experiment.dataset_set
for dataset_vals in datasets.values():
dataset_id = dataset_vals['id']
self.provider.set_dataset_id(dataset_id)
xmlwriter.write_template_to_dir(oaipath, COLLECTION_RIFCS_FILENAME %
(self.experiment.id, dataset_id),
self.get_template(type="dataset"),
self.get_context()) | gpl-3.0 | -6,296,705,034,827,789,000 | 43.163636 | 107 | 0.601318 | false |
CSIS/proccer | src/proccer/t/test_periodic.py | 1 | 1030 | from __future__ import with_statement
from datetime import datetime, timedelta
from mock import patch
from proccer.database import Job
from proccer.periodic import main
from proccer.t.testing import setup_module
def test_periodic():
still_bad_job = Job.create(session, 'foo', 'bar', 'baz')
still_bad_job.last_seen = still_bad_job.last_stamp = datetime(1979, 7, 7)
still_bad_job.state = 'error'
still_bad_job.warn_after = timedelta(seconds=1)
silent_bad_job = Job.create(session, 'foo', 'bar', 'baz')
silent_bad_job.last_seen = silent_bad_job.last_stamp = datetime(1979, 7, 7)
silent_bad_job.state = 'error'
silent_bad_job.warn_after = None
still_late_job = Job.create(session, 'foo', 'bar', 'baz')
still_late_job.last_seen = still_late_job.last_stamp = datetime(1979, 7, 7)
still_late_job.state = 'error'
still_late_job.warn_after = timedelta(seconds=1)
session.flush()
# FIXME - This needs real tests!
with patch('proccer.notifications.smtplib'):
main()
| mit | -5,977,464,837,307,305,000 | 33.333333 | 79 | 0.683495 | false |
houqp/floyd-cli | floyd/cli/experiment.py | 1 | 11976 | import click
from tabulate import tabulate
from time import sleep
import webbrowser
import sys
from shutil import copyfile
import os
import floyd
from floyd.cli.utils import (
get_module_task_instance_id,
normalize_job_name,
get_namespace_from_name
)
from floyd.client.experiment import ExperimentClient
from floyd.client.module import ModuleClient
from floyd.client.project import ProjectClient
from floyd.client.resource import ResourceClient
from floyd.client.task_instance import TaskInstanceClient
from floyd.exceptions import FloydException
from floyd.manager.experiment_config import ExperimentConfigManager
from floyd.manager.floyd_ignore import FloydIgnoreManager
from floyd.model.experiment_config import ExperimentConfig
from floyd.log import logger as floyd_logger
from floyd.cli.utils import read_yaml_config
# Log output which defines the exit status of the job
SUCCESS_OUTPUT = "[success] Finished execution"
FAILURE_OUTPUT = "[failed] Task execution failed"
SHUTDOWN_OUTPUT = "[shutdown] Task execution cancelled"
TIMEOUT_OUTPUT = "[timeout] Task execution cancelled"
TERMINATION_OUTPUT_LIST = [SUCCESS_OUTPUT,
FAILURE_OUTPUT,
SHUTDOWN_OUTPUT,
TIMEOUT_OUTPUT]
@click.command()
@click.argument('project_name', nargs=1)
def init(project_name):
"""
Initialize new project at the current path.
After this you can run other FloydHub commands like status and run.
"""
project_obj = ProjectClient().get_by_name(project_name)
if not project_obj:
namespace, name = get_namespace_from_name(project_name)
create_project_base_url = "{}/projects/create".format(floyd.floyd_web_host)
create_project_url = "{}?name={}&namespace={}".format(create_project_base_url, name, namespace)
floyd_logger.info(('Project name does not yet exist on floydhub.com. '
'Create your new project on floydhub.com:\n\t%s'),
create_project_base_url)
webbrowser.open(create_project_url)
name = click.prompt('Press ENTER to use project name "%s" or enter a different name' % project_name, default=project_name, show_default=False)
project_name = name.strip() or project_name
project_obj = ProjectClient().get_by_name(project_name)
if not project_obj:
raise FloydException('Project "%s" does not exist on floydhub.com. Ensure it exists before continuing.' % project_name)
namespace, name = get_namespace_from_name(project_name)
experiment_config = ExperimentConfig(name=name,
namespace=namespace,
family_id=project_obj.id)
ExperimentConfigManager.set_config(experiment_config)
FloydIgnoreManager.init()
yaml_config = read_yaml_config()
if not yaml_config:
copyfile(os.path.join(os.path.dirname(__file__), 'default_floyd.yml'), 'floyd.yml')
floyd_logger.info("Project \"%s\" initialized in current directory", project_name)
@click.command()
@click.argument('id', required=False, nargs=1)
def status(id):
"""
View status of all jobs in a project.
The command also accepts a specific job name.
"""
if id:
try:
experiment = ExperimentClient().get(normalize_job_name(id))
except FloydException:
experiment = ExperimentClient().get(id)
print_experiments([experiment])
else:
experiments = ExperimentClient().get_all()
print_experiments(experiments)
def print_experiments(experiments):
"""
Prints job details in a table. Includes urls and mode parameters
"""
headers = ["JOB NAME", "CREATED", "STATUS", "DURATION(s)", "INSTANCE", "DESCRIPTION", "METRICS"]
expt_list = []
for experiment in experiments:
expt_list.append([normalize_job_name(experiment.name),
experiment.created_pretty, experiment.state,
experiment.duration_rounded, experiment.instance_type_trimmed,
experiment.description, format_metrics(experiment.latest_metrics)])
floyd_logger.info(tabulate(expt_list, headers=headers))
def format_metrics(latest_metrics):
return ', '.join(
["%s=%s" % (k, latest_metrics[k]) for k in sorted(latest_metrics.keys())]
) if latest_metrics else ''
@click.command()
@click.argument('id', nargs=1)
@click.option('--path', '-p',
help='Download files in a specific path from a job')
def clone(id, path):
"""
- Download all files from a job
Eg: alice/projects/mnist/1/
Note: This will download the files that were originally uploaded at
the start of the job.
- Download files in a specific path from a job
Specify the path to a directory and download all its files and subdirectories.
Eg: --path models/checkpoint1
"""
try:
experiment = ExperimentClient().get(normalize_job_name(id, use_config=False))
except FloydException:
experiment = ExperimentClient().get(id)
task_instance_id = get_module_task_instance_id(experiment.task_instances)
task_instance = TaskInstanceClient().get(task_instance_id) if task_instance_id else None
if not task_instance:
sys.exit("Cannot clone this version of the job. Try a different version.")
module = ModuleClient().get(task_instance.module_id) if task_instance else None
if path:
# Download a directory from Code
code_url = "{}/api/v1/download/artifacts/code/{}?is_dir=true&path={}".format(floyd.floyd_host,
experiment.id,
path)
else:
# Download the full Code
code_url = "{}/api/v1/resources/{}?content=true&download=true".format(floyd.floyd_host,
module.resource_id)
ExperimentClient().download_tar(url=code_url,
untar=True,
delete_after_untar=True)
@click.command()
@click.argument('job_name_or_id', nargs=1, required=False)
def info(job_name_or_id):
"""
View detailed information of a job.
"""
try:
experiment = ExperimentClient().get(normalize_job_name(job_name_or_id))
except FloydException:
experiment = ExperimentClient().get(job_name_or_id)
task_instance_id = get_module_task_instance_id(experiment.task_instances)
task_instance = TaskInstanceClient().get(task_instance_id) if task_instance_id else None
normalized_job_name = normalize_job_name(experiment.name)
table = [["Job name", normalized_job_name],
["Created", experiment.created_pretty],
["Status", experiment.state], ["Duration(s)", experiment.duration_rounded],
["Instance", experiment.instance_type_trimmed],
["Description", experiment.description],
["Metrics", format_metrics(experiment.latest_metrics)]]
if task_instance and task_instance.mode in ['jupyter', 'serving']:
table.append(["Mode", task_instance.mode])
table.append(["Url", experiment.service_url])
if experiment.tensorboard_url:
table.append(["TensorBoard", experiment.tensorboard_url])
floyd_logger.info(tabulate(table))
def get_log_id(job_id):
log_msg_printed = False
while True:
try:
experiment = ExperimentClient().get(normalize_job_name(job_id))
except FloydException:
experiment = ExperimentClient().get(job_id)
instance_log_id = experiment.instance_log_id
if instance_log_id:
break
elif not log_msg_printed:
floyd_logger.info("Waiting for logs ...\n")
log_msg_printed = True
sleep(1)
return instance_log_id
def follow_logs(instance_log_id, sleep_duration=1):
"""
Follow the logs until Job termination.
"""
cur_idx = 0
job_terminated = False
while not job_terminated:
# Get the logs in a loop and log the new lines
log_file_contents = ResourceClient().get_content(instance_log_id)
print_output = log_file_contents[cur_idx:]
# Get the status of the Job from the current log line
job_terminated = any(terminal_output in print_output for terminal_output in TERMINATION_OUTPUT_LIST)
cur_idx += len(print_output)
sys.stdout.write(print_output)
sleep(sleep_duration)
@click.command()
@click.option('-u', '--url', is_flag=True, default=False, help='Only print url for accessing logs')
@click.option('-f', '--follow', is_flag=True, default=False, help='Keep streaming the logs in real time')
@click.argument('id', nargs=1, required=False)
def logs(id, url, follow, sleep_duration=1):
"""
View the logs of a job.
To follow along a job in real time, use the --follow flag
"""
instance_log_id = get_log_id(id)
if url:
log_url = "{}/api/v1/resources/{}?content=true".format(
floyd.floyd_host, instance_log_id)
floyd_logger.info(log_url)
return
if follow:
floyd_logger.info("Launching job ...")
follow_logs(instance_log_id, sleep_duration)
else:
log_file_contents = ResourceClient().get_content(instance_log_id)
if len(log_file_contents.strip()):
floyd_logger.info(log_file_contents.rstrip())
else:
floyd_logger.info("Launching job now. Try after a few seconds.")
@click.command()
@click.option('-u', '--url', is_flag=True, default=False, help='Only print url for accessing logs')
@click.argument('id', nargs=1, required=False)
def output(id, url):
"""
View the files from a job.
"""
try:
experiment = ExperimentClient().get(normalize_job_name(id))
except FloydException:
experiment = ExperimentClient().get(id)
output_dir_url = "%s/%s/files" % (floyd.floyd_web_host, experiment.name)
if url:
floyd_logger.info(output_dir_url)
else:
floyd_logger.info("Opening output path in your browser ...")
webbrowser.open(output_dir_url)
@click.command()
@click.argument('id', nargs=1, required=False)
def stop(id):
"""
Stop a running job.
"""
try:
experiment = ExperimentClient().get(normalize_job_name(id))
except FloydException:
experiment = ExperimentClient().get(id)
if experiment.state not in ["queued", "queue_scheduled", "running"]:
floyd_logger.info("Job in {} state cannot be stopped".format(experiment.state))
sys.exit(1)
if not ExperimentClient().stop(experiment.id):
floyd_logger.error("Failed to stop job")
sys.exit(1)
floyd_logger.info("Experiment shutdown request submitted. Check status to confirm shutdown")
@click.command()
@click.argument('names', nargs=-1)
@click.option('-y', '--yes', is_flag=True, default=False, help='Skip confirmation')
def delete(names, yes):
"""
Delete a training job.
"""
failures = False
for name in names:
try:
experiment = ExperimentClient().get(normalize_job_name(name))
except FloydException:
experiment = ExperimentClient().get(name)
if not experiment:
failures = True
continue
if not yes and not click.confirm("Delete Job: {}?".format(experiment.name),
abort=False,
default=False):
floyd_logger.info("Job {}: Skipped.".format(experiment.name))
continue
if not ExperimentClient().delete(experiment.id):
failures = True
else:
floyd_logger.info("Job %s Deleted", experiment.name)
if failures:
sys.exit(1)
| apache-2.0 | -4,036,297,198,941,373,000 | 35.181269 | 150 | 0.632933 | false |
raphaelgyory/django-rest-messaging-centrifugo | runtests.py | 1 | 2355 | #! /usr/bin/env python
from __future__ import print_function
import pytest
import sys
import os
import subprocess
PYTEST_ARGS = {
'default': ['tests'],
'fast': ['tests', '-q'],
}
FLAKE8_ARGS = ['rest_messaging_centrifugo', 'tests', '--ignore=E501']
sys.path.append(os.path.dirname(__file__))
def exit_on_failure(ret, message=None):
if ret:
sys.exit(ret)
def flake8_main(args):
print('Running flake8 code linting')
ret = subprocess.call(['flake8'] + args)
print('flake8 failed' if ret else 'flake8 passed')
return ret
def split_class_and_function(string):
class_string, function_string = string.split('.', 1)
return "%s and %s" % (class_string, function_string)
def is_function(string):
# `True` if it looks like a test function is included in the string.
return string.startswith('test_') or '.test_' in string
def is_class(string):
# `True` if first character is uppercase - assume it's a class name.
return string[0] == string[0].upper()
if __name__ == "__main__":
try:
sys.argv.remove('--nolint')
except ValueError:
run_flake8 = True
else:
run_flake8 = False
try:
sys.argv.remove('--lintonly')
except ValueError:
run_tests = True
else:
run_tests = False
try:
sys.argv.remove('--fast')
except ValueError:
style = 'default'
else:
style = 'fast'
run_flake8 = False
if len(sys.argv) > 1:
pytest_args = sys.argv[1:]
first_arg = pytest_args[0]
if first_arg.startswith('-'):
# `runtests.py [flags]`
pytest_args = ['tests'] + pytest_args
elif is_class(first_arg) and is_function(first_arg):
# `runtests.py TestCase.test_function [flags]`
expression = split_class_and_function(first_arg)
pytest_args = ['tests', '-k', expression] + pytest_args[1:]
elif is_class(first_arg) or is_function(first_arg):
# `runtests.py TestCase [flags]`
# `runtests.py test_function [flags]`
pytest_args = ['tests', '-k', pytest_args[0]] + pytest_args[1:]
else:
pytest_args = PYTEST_ARGS[style]
if run_tests:
exit_on_failure(pytest.main(pytest_args))
if run_flake8:
exit_on_failure(flake8_main(FLAKE8_ARGS))
| isc | -397,090,748,982,040,600 | 24.879121 | 75 | 0.592357 | false |
arthurdejong/python-stdnum | stdnum/eu/vat.py | 1 | 5505 | # vat.py - functions for handling European VAT numbers
# coding: utf-8
#
# Copyright (C) 2012-2021 Arthur de Jong
# Copyright (C) 2015 Lionel Elie Mamane
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""VAT (European Union VAT number).
The European Union VAT number consists of a 2 letter country code (ISO
3166-1, except Greece which uses EL) followed by a number that is
allocated per country.
The exact format of the numbers varies per country and a country-specific
check is performed on the number using the VAT module that is relevant for
that country.
>>> compact('ATU 57194903')
'ATU57194903'
>>> validate('BE697449992')
'BE0697449992'
>>> validate('FR 61 954 506 077')
'FR61954506077'
>>> guess_country('00449544B01')
['nl']
"""
from stdnum.exceptions import *
from stdnum.util import clean, get_cc_module, get_soap_client
MEMBER_STATES = set([
'at', 'be', 'bg', 'cy', 'cz', 'de', 'dk', 'ee', 'es', 'fi', 'fr', 'gr',
'hr', 'hu', 'ie', 'it', 'lt', 'lu', 'lv', 'mt', 'nl', 'pl', 'pt', 'ro',
'se', 'si', 'sk', 'xi',
])
"""The collection of country codes that are queried. Greece is listed with a
country code of gr while for VAT purposes el is used instead. For Northern
Ireland numbers are prefixed with xi of United Kingdom numbers."""
_country_modules = dict()
vies_wsdl = 'https://ec.europa.eu/taxation_customs/vies/checkVatService.wsdl'
"""The WSDL URL of the VAT Information Exchange System (VIES)."""
def _get_cc_module(cc):
"""Get the VAT number module based on the country code."""
# Greece uses a "wrong" country code
cc = cc.lower()
if cc == 'el':
cc = 'gr'
if cc not in MEMBER_STATES:
return
if cc == 'xi':
cc = 'gb'
if cc not in _country_modules:
_country_modules[cc] = get_cc_module(cc, 'vat')
return _country_modules[cc]
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, '').upper().strip()
cc = number[:2]
module = _get_cc_module(cc)
if not module:
raise InvalidComponent()
number = module.compact(number)
if not number.startswith(cc):
number = cc + number
return number
def validate(number):
"""Check if the number is a valid VAT number. This performs the
country-specific check for the number."""
number = clean(number, '').upper().strip()
cc = number[:2]
module = _get_cc_module(cc)
if not module:
raise InvalidComponent()
number = module.validate(number)
if not number.startswith(cc):
number = cc + number
return number
def is_valid(number):
"""Check if the number is a valid VAT number. This performs the
country-specific check for the number."""
try:
return bool(validate(number))
except ValidationError:
return False
def guess_country(number):
"""Guess the country code based on the number. This checks the number
against each of the validation routines and returns the list of countries
for which it is valid. This returns lower case codes and returns gr (not
el) for Greece."""
return [cc
for cc in MEMBER_STATES
if _get_cc_module(cc).is_valid(number)]
def check_vies(number, timeout=30): # pragma: no cover (not part of normal test suite)
"""Query the online European Commission VAT Information Exchange System
(VIES) for validity of the provided number. Note that the service has
usage limitations (see the VIES website for details). The timeout is in
seconds. This returns a dict-like object."""
# this function isn't automatically tested because it would require
# network access for the tests and unnecessarily load the VIES website
number = compact(number)
client = get_soap_client(vies_wsdl, timeout)
return client.checkVat(number[:2], number[2:])
def check_vies_approx(number, requester, timeout=30): # pragma: no cover
"""Query the online European Commission VAT Information Exchange System
(VIES) for validity of the provided number, providing a validity
certificate as proof. You will need to give your own VAT number for this
to work. Note that the service has usage limitations (see the VIES
website for details). The timeout is in seconds. This returns a dict-like
object."""
# this function isn't automatically tested because it would require
# network access for the tests and unnecessarily load the VIES website
number = compact(number)
requester = compact(requester)
client = get_soap_client(vies_wsdl, timeout)
return client.checkVatApprox(
countryCode=number[:2], vatNumber=number[2:],
requesterCountryCode=requester[:2], requesterVatNumber=requester[2:])
| lgpl-2.1 | -3,631,167,909,865,830,000 | 35.946309 | 87 | 0.695186 | false |
tinloaf/home-assistant | homeassistant/components/homekit_controller/__init__.py | 1 | 10546 | """
Support for Homekit device discovery.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/homekit_controller/
"""
import json
import logging
import os
from homeassistant.components.discovery import SERVICE_HOMEKIT
from homeassistant.helpers import discovery
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import call_later
REQUIREMENTS = ['homekit==0.12.0']
DOMAIN = 'homekit_controller'
HOMEKIT_DIR = '.homekit'
# Mapping from Homekit type to component.
HOMEKIT_ACCESSORY_DISPATCH = {
'lightbulb': 'light',
'outlet': 'switch',
'switch': 'switch',
'thermostat': 'climate',
}
HOMEKIT_IGNORE = [
'BSB002',
'Home Assistant Bridge',
'TRADFRI gateway'
]
KNOWN_ACCESSORIES = "{}-accessories".format(DOMAIN)
KNOWN_DEVICES = "{}-devices".format(DOMAIN)
CONTROLLER = "{}-controller".format(DOMAIN)
_LOGGER = logging.getLogger(__name__)
REQUEST_TIMEOUT = 5 # seconds
RETRY_INTERVAL = 60 # seconds
class HomeKitConnectionError(ConnectionError):
"""Raised when unable to connect to target device."""
def get_serial(accessory):
"""Obtain the serial number of a HomeKit device."""
# pylint: disable=import-error
from homekit.model.services import ServicesTypes
from homekit.model.characteristics import CharacteristicsTypes
for service in accessory['services']:
if ServicesTypes.get_short(service['type']) != \
'accessory-information':
continue
for characteristic in service['characteristics']:
ctype = CharacteristicsTypes.get_short(
characteristic['type'])
if ctype != 'serial-number':
continue
return characteristic['value']
return None
class HKDevice():
"""HomeKit device."""
def __init__(self, hass, host, port, model, hkid, config_num, config):
"""Initialise a generic HomeKit device."""
_LOGGER.info("Setting up Homekit device %s", model)
self.hass = hass
self.controller = hass.data[CONTROLLER]
self.host = host
self.port = port
self.model = model
self.hkid = hkid
self.config_num = config_num
self.config = config
self.configurator = hass.components.configurator
self._connection_warning_logged = False
self.pairing = self.controller.pairings.get(hkid)
if self.pairing is not None:
self.accessory_setup()
else:
self.configure()
def accessory_setup(self):
"""Handle setup of a HomeKit accessory."""
# pylint: disable=import-error
from homekit.model.services import ServicesTypes
self.pairing.pairing_data['AccessoryIP'] = self.host
self.pairing.pairing_data['AccessoryPort'] = self.port
try:
data = self.pairing.list_accessories_and_characteristics()
except HomeKitConnectionError:
call_later(
self.hass, RETRY_INTERVAL, lambda _: self.accessory_setup())
return
for accessory in data:
serial = get_serial(accessory)
if serial in self.hass.data[KNOWN_ACCESSORIES]:
continue
self.hass.data[KNOWN_ACCESSORIES][serial] = self
aid = accessory['aid']
for service in accessory['services']:
service_info = {'serial': serial,
'aid': aid,
'iid': service['iid']}
devtype = ServicesTypes.get_short(service['type'])
_LOGGER.debug("Found %s", devtype)
component = HOMEKIT_ACCESSORY_DISPATCH.get(devtype, None)
if component is not None:
discovery.load_platform(self.hass, component, DOMAIN,
service_info, self.config)
def device_config_callback(self, callback_data):
"""Handle initial pairing."""
import homekit # pylint: disable=import-error
code = callback_data.get('code').strip()
try:
self.controller.perform_pairing(self.hkid, self.hkid, code)
except homekit.UnavailableError:
error_msg = "This accessory is already paired to another device. \
Please reset the accessory and try again."
_configurator = self.hass.data[DOMAIN+self.hkid]
self.configurator.notify_errors(_configurator, error_msg)
return
except homekit.AuthenticationError:
error_msg = "Incorrect HomeKit code for {}. Please check it and \
try again.".format(self.model)
_configurator = self.hass.data[DOMAIN+self.hkid]
self.configurator.notify_errors(_configurator, error_msg)
return
except homekit.UnknownError:
error_msg = "Received an unknown error. Please file a bug."
_configurator = self.hass.data[DOMAIN+self.hkid]
self.configurator.notify_errors(_configurator, error_msg)
raise
self.pairing = self.controller.pairings.get(self.hkid)
if self.pairing is not None:
pairing_file = os.path.join(
self.hass.config.path(),
HOMEKIT_DIR,
'pairing.json'
)
self.controller.save_data(pairing_file)
_configurator = self.hass.data[DOMAIN+self.hkid]
self.configurator.request_done(_configurator)
self.accessory_setup()
else:
error_msg = "Unable to pair, please try again"
_configurator = self.hass.data[DOMAIN+self.hkid]
self.configurator.notify_errors(_configurator, error_msg)
def configure(self):
"""Obtain the pairing code for a HomeKit device."""
description = "Please enter the HomeKit code for your {}".format(
self.model)
self.hass.data[DOMAIN+self.hkid] = \
self.configurator.request_config(self.model,
self.device_config_callback,
description=description,
submit_caption="submit",
fields=[{'id': 'code',
'name': 'HomeKit code',
'type': 'string'}])
class HomeKitEntity(Entity):
"""Representation of a Home Assistant HomeKit device."""
def __init__(self, accessory, devinfo):
"""Initialise a generic HomeKit device."""
self._name = accessory.model
self._accessory = accessory
self._aid = devinfo['aid']
self._iid = devinfo['iid']
self._address = "homekit-{}-{}".format(devinfo['serial'], self._iid)
self._features = 0
self._chars = {}
def update(self):
"""Obtain a HomeKit device's state."""
try:
pairing = self._accessory.pairing
data = pairing.list_accessories_and_characteristics()
except HomeKitConnectionError:
return
for accessory in data:
if accessory['aid'] != self._aid:
continue
for service in accessory['services']:
if service['iid'] != self._iid:
continue
self.update_characteristics(service['characteristics'])
break
@property
def unique_id(self):
"""Return the ID of this device."""
return self._address
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._accessory.pairing is not None
def update_characteristics(self, characteristics):
"""Synchronise a HomeKit device state with Home Assistant."""
raise NotImplementedError
def put_characteristics(self, characteristics):
"""Control a HomeKit device state from Home Assistant."""
chars = []
for row in characteristics:
chars.append((
row['aid'],
row['iid'],
row['value'],
))
self._accessory.pairing.put_characteristics(chars)
def setup(hass, config):
"""Set up for Homekit devices."""
# pylint: disable=import-error
import homekit
from homekit.controller import Pairing
hass.data[CONTROLLER] = controller = homekit.Controller()
data_dir = os.path.join(hass.config.path(), HOMEKIT_DIR)
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
pairing_file = os.path.join(data_dir, 'pairings.json')
if os.path.exists(pairing_file):
controller.load_data(pairing_file)
# Migrate any existing pairings to the new internal homekit_python format
for device in os.listdir(data_dir):
if not device.startswith('hk-'):
continue
alias = device[3:]
if alias in controller.pairings:
continue
with open(os.path.join(data_dir, device)) as pairing_data_fp:
pairing_data = json.load(pairing_data_fp)
controller.pairings[alias] = Pairing(pairing_data)
controller.save_data(pairing_file)
def discovery_dispatch(service, discovery_info):
"""Dispatcher for Homekit discovery events."""
# model, id
host = discovery_info['host']
port = discovery_info['port']
model = discovery_info['properties']['md']
hkid = discovery_info['properties']['id']
config_num = int(discovery_info['properties']['c#'])
if model in HOMEKIT_IGNORE:
return
# Only register a device once, but rescan if the config has changed
if hkid in hass.data[KNOWN_DEVICES]:
device = hass.data[KNOWN_DEVICES][hkid]
if config_num > device.config_num and \
device.pairing_info is not None:
device.accessory_setup()
return
_LOGGER.debug('Discovered unique device %s', hkid)
device = HKDevice(hass, host, port, model, hkid, config_num, config)
hass.data[KNOWN_DEVICES][hkid] = device
hass.data[KNOWN_ACCESSORIES] = {}
hass.data[KNOWN_DEVICES] = {}
discovery.listen(hass, SERVICE_HOMEKIT, discovery_dispatch)
return True
| apache-2.0 | 554,708,964,111,326,800 | 34.870748 | 78 | 0.591125 | false |
williamroot/opps | opps/channels/admin.py | 1 | 3732 | # -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from mptt.admin import MPTTModelAdmin
from .models import Channel
from .forms import ChannelAdminForm
from opps.core.admin import PublishableAdmin
from opps.core.admin import apply_opps_rules
from opps.core.permissions.admin import AdminViewPermission
from opps.core.utils import get_template_path
import json
@apply_opps_rules('channels')
class ChannelAdmin(PublishableAdmin, MPTTModelAdmin, AdminViewPermission):
prepopulated_fields = {"slug": ("name",)}
list_display = ['name', 'show_channel_path', 'get_parent', 'site',
'date_available', 'homepage', 'order', 'show_in_menu',
'published']
list_filter = ['date_available', 'published', 'site', 'homepage', 'parent',
'show_in_menu']
search_fields = ['name', 'slug', 'long_slug', 'description']
exclude = ('user', 'long_slug')
raw_id_fields = ['parent', 'main_image']
form = ChannelAdminForm
fieldsets = (
(_(u'Identification'), {
'fields': ('site', 'parent', 'name', 'slug', 'layout', 'hat',
'description', 'main_image',
'order', ('show_in_menu', 'menu_url_target'),
'include_in_main_rss', 'homepage', 'group',
'paginate_by')}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('published', 'date_available')}),
)
def get_parent(self, obj):
if obj.parent_id:
long_slug, slug = obj.long_slug.rsplit("/", 1)
return long_slug
get_parent.admin_order_field = "parent"
get_parent.short_description = "Parent"
def show_channel_path(self, obj):
return unicode(obj)
show_channel_path.short_description = _(u'Channel Path')
def save_model(self, request, obj, form, change):
long_slug = u"{0}".format(obj.slug)
if obj.parent:
long_slug = u"{0}/{1}".format(obj.parent.slug, obj.slug)
obj.long_slug = long_slug
super(ChannelAdmin, self).save_model(request, obj, form, change)
def get_form(self, request, obj=None, **kwargs):
form = super(ChannelAdmin, self).get_form(request, obj, **kwargs)
channel_json = []
def _get_template_path(_path):
template = get_template_path(_path)
with open(template) as f:
_jsonData = f.read().replace('\n', '')
return json.loads(_jsonData)
def _get_json_channel(_obj):
return _get_template_path(
u'containers/{0}/channel.json'.format(_obj.long_slug))
def _get_json_channel_recursivelly(_obj):
channel_json = []
try:
channel_json = _get_json_channel(_obj)
except:
_is_root = _obj.is_root_node()
if not _is_root:
channel_json = _get_json_channel_recursivelly(_obj.parent)
elif _is_root:
try:
channel_json = _get_template_path(
u'containers/channel.json')
except:
pass
finally:
return channel_json
channel_json = _get_json_channel_recursivelly(obj)
if u'layout' in channel_json:
layout_list = ['default'] + [l for l in channel_json['layout']]
layout_choices = (
(n, n.title()) for n in layout_list)
form.base_fields['layout'].choices = layout_choices
return form
admin.site.register(Channel, ChannelAdmin)
| mit | -3,664,716,400,936,687,000 | 34.542857 | 79 | 0.556002 | false |
foursquare/pants | tests/python/pants_test/engine/scheduler_test_base.py | 1 | 3930 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
from builtins import object
from pants.base.file_system_project_tree import FileSystemProjectTree
from pants.engine.nodes import Return, Throw
from pants.engine.scheduler import Scheduler
from pants.option.global_options import DEFAULT_EXECUTION_OPTIONS
from pants.util.contextutil import temporary_file_path
from pants.util.dirutil import safe_mkdtemp, safe_rmtree
from pants_test.engine.util import init_native
class SchedulerTestBase(object):
"""A mixin for classes (tests, presumably) which need to create temporary schedulers.
TODO: In the medium term, this should be part of pants_test.base_test.BaseTest.
"""
_native = init_native()
def _create_work_dir(self):
work_dir = safe_mkdtemp()
self.addCleanup(safe_rmtree, work_dir)
return work_dir
def mk_fs_tree(self, build_root_src=None, ignore_patterns=None, work_dir=None):
"""Create a temporary FilesystemProjectTree.
:param build_root_src: Optional directory to pre-populate from; otherwise, empty.
:returns: A FilesystemProjectTree.
"""
work_dir = work_dir or self._create_work_dir()
build_root = os.path.join(work_dir, 'build_root')
if build_root_src is not None:
shutil.copytree(build_root_src, build_root, symlinks=True)
else:
os.makedirs(build_root)
return FileSystemProjectTree(build_root, ignore_patterns=ignore_patterns)
def mk_scheduler(self,
rules=None,
project_tree=None,
work_dir=None,
include_trace_on_error=True):
"""Creates a SchedulerSession for a Scheduler with the given Rules installed."""
rules = rules or []
work_dir = work_dir or self._create_work_dir()
project_tree = project_tree or self.mk_fs_tree(work_dir=work_dir)
scheduler = Scheduler(self._native,
project_tree,
work_dir,
rules,
DEFAULT_EXECUTION_OPTIONS,
include_trace_on_error=include_trace_on_error)
return scheduler.new_session()
def context_with_scheduler(self, scheduler, *args, **kwargs):
return self.context(*args, scheduler=scheduler, **kwargs)
def execute(self, scheduler, product, *subjects):
"""Runs an ExecutionRequest for the given product and subjects, and returns the result value."""
request = scheduler.execution_request([product], subjects)
return self.execute_literal(scheduler, request)
def execute_literal(self, scheduler, execution_request):
result = scheduler.execute(execution_request)
if result.error:
raise result.error
states = [state for _, state in result.root_products]
if any(type(state) is not Return for state in states):
with temporary_file_path(cleanup=False, suffix='.dot') as dot_file:
scheduler.visualize_graph_to_file(dot_file)
raise ValueError('At least one root failed: {}. Visualized as {}'.format(states, dot_file))
return list(state.value for state in states)
def execute_expecting_one_result(self, scheduler, product, subject):
request = scheduler.execution_request([product], [subject])
result = scheduler.execute(request)
if result.error:
raise result.error
states = [state for _, state in result.root_products]
self.assertEqual(len(states), 1)
state = states[0]
if isinstance(state, Throw):
raise state.exc
return state
def execute_raising_throw(self, scheduler, product, subject):
resulting_value = self.execute_expecting_one_result(scheduler, product, subject)
self.assertTrue(type(resulting_value) is Throw)
raise resulting_value.exc
| apache-2.0 | 5,303,034,757,333,562,000 | 37.529412 | 100 | 0.692366 | false |
ptrsxu/snippetpy | ds/ringbuffer.py | 1 | 1268 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""from python cookbook 2nd edition."""
class RingBuffer(object):
""" a ringbuffer not filled """
def __init__(self, size_max):
self.max = size_max
self.data = []
class __Full(object):
""" a ringbuffer filled """
def append(self, x):
self.data[self.cur] = x
self.cur = (self.cur + 1) % self.max
def tolist(self):
""" return the list with real order """
return self.data[self.cur:] + self.data[:self.cur]
def append(self, x):
""" add an element at the end of the buffer """
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# chang the state of the instance to "FULL" forever
self.__class__ = self.__Full
def tolist(self):
""" return the list with real order """
return self.data
def main():
x = RingBuffer(5)
x.append(1)
x.append(2)
x.append(3)
x.append(4)
print x.__class__, x.tolist()
x.append(5)
x.append(6)
x.append(7)
print x.__class__, x.tolist()
x.append(8)
x.append(9)
x.append(10)
print x.__class__, x.tolist()
if __name__ == "__main__":
main()
| mit | 683,121,483,345,865,500 | 22.924528 | 63 | 0.518927 | false |
TheProjecter/jxtl | test/test.py | 1 | 1738 | #
# $Id$
#
# Description
# Runs the same tests, but does it by using the Python language bindings.
# The Python bindings need to be be built and installed to run this.
#
# Copyright 2010 Dan Rinehimer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import libjxtl;
import glob;
import os.path;
import filecmp;
def format_case( value, format, context ):
if ( format == "upper" ):
return value.upper();
elif ( format == "lower" ):
return value.lower();
else:
return value;
def compare( file1, file2 ):
if ( filecmp.cmp( file1, file2 ) == False ):
print "Failed test in " + os.path.dirname( file1 );
else:
os.remove( file2 );
inputs = glob.glob( "./t*/input" );
beers_xml = libjxtl.xml_to_dict( "t.xml" );
beers_json = libjxtl.json_to_dict( "t.json" );
t = libjxtl.Template();
for input in inputs:
dir = os.path.dirname( input );
t.load( input );
t.register_format( "upper", format_case );
t.register_format( "lower", format_case );
t.expand_to_file( dir + "/test.output", beers_xml );
compare( dir + "/output", dir + "/test.output" );
t.expand_to_file( dir + "/test.output", beers_json );
compare( dir + "/output", dir + "/test.output" );
| apache-2.0 | -1,674,219,676,627,167,200 | 30.6 | 75 | 0.659379 | false |
FourthLion/pydatasentry | pydatasentry/capture.py | 1 | 1585 | #!/usr/bin/env python
import uuid
import inspect
import json
import os, sys
import copy
from .config import get_config
from .helpers import dumper, merge
from .process import summarize_run
def capture_input(args, kwargs, metadata):
"""
Capture the function parameters for the functions that have been instrumented
"""
formula = kwargs.get('formula', None)
data = kwargs.get('data', None)
sentryopts = kwargs.pop('sentryopts', {})
# Inspect and get the source files...
curframe = inspect.currentframe()
calframes = inspect.getouterframes(curframe, 3)
filename = os.path.realpath(calframes[2][1])
lineno = calframes[2][2]
snippet = calframes[2][4]
uid = str(uuid.uuid1())
params = {
'uuid': uid,
'source': {
'filename': filename,
'lineno': lineno,
'snippet': snippet
},
'model': {
'library': {
'module': metadata['modname'],
'function': metadata['funcname']
},
'parameters': {
'formula': formula,
'data': data
},
}
#'other parameters': {
# 'args': args,
# 'kwargs': kwargs
#},
}
run = get_config()
merge(run, params)
merge(run, sentryopts)
return run
def capture_output(run, result):
"""
Capture the results of the instrumented function
"""
run['model']['result'] = result
summarize_run(run)
return
| mit | 5,243,514,757,579,922,000 | 23.384615 | 81 | 0.536278 | false |
mrakitin/sirepo | sirepo/srdb.py | 1 | 1714 | # -*- coding: utf-8 -*-
u"""db configuration
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkconfig
from pykern import pkinspect
from pykern import pkio
from pykern.pkdebug import pkdc, pkdexc, pkdlog, pkdp
import os.path
import sys
#: Relative to current directory only in dev mode
_DEFAULT_ROOT = 'run'
#: Configured root either by server_set_root or cfg
_root = None
def root():
return _root or _init_root()
@pkconfig.parse_none
def _cfg_root(value):
"""Config value or root package's parent or cwd with `_DEFAULT_ROOT`"""
return value
def _init_root():
global cfg, _root
cfg = pkconfig.init(
root=(None, _cfg_root, 'where database resides'),
)
v = cfg.root
if v:
assert os.path.isabs(v), \
'{}: SIREPO_SRDB_ROOT must be absolute'.format(v)
assert os.path.isdir(v), \
'{}: SIREPO_SRDB_ROOT must be a directory and exist'.format(v)
v = pkio.py_path(v)
else:
assert pkconfig.channel_in('dev'), \
'SIREPO_SRDB_ROOT must be configured except in DEV'
fn = sys.modules[pkinspect.root_package(_init_root)].__file__
root = pkio.py_path(pkio.py_path(pkio.py_path(fn).dirname).dirname)
# Check to see if we are in our dev directory. This is a hack,
# but should be reliable.
if not root.join('requirements.txt').check():
# Don't run from an install directory
root = pkio.py_path('.')
v = pkio.mkdir_parent(root.join(_DEFAULT_ROOT))
_root = v
return v
| apache-2.0 | 8,552,786,995,130,998,000 | 29.070175 | 75 | 0.63769 | false |
StichtingOpenGeo/transitpubsub | src/zmq_network.py | 1 | 1444 | from consts import ZMQ_SERVER_NETWORK, ZMQ_PUBSUB_KV17
from network import network
from helpers import serialize
import zmq
import sys
# Initialize the cached network
sys.stderr.write('Caching networkgraph...')
net = network()
sys.stderr.write('Done!\n')
# Initialize a zeromq context
context = zmq.Context()
# Set up a channel to receive network requests
sys.stderr.write('Setting up a ZeroMQ REP: %s\n' % (ZMQ_SERVER_NETWORK))
client = context.socket(zmq.REP)
client.bind(ZMQ_SERVER_NETWORK)
# Set up a channel to receive KV17 requests
sys.stderr.write('Setting up a ZeroMQ SUB: %s\n' % (ZMQ_PUBSUB_KV17))
subscribe_kv17 = context.socket(zmq.SUB)
subscribe_kv17.connect(ZMQ_PUBSUB_KV17)
subscribe_kv17.setsockopt(zmq.SUBSCRIBE, '')
# Set up a poller
poller = zmq.Poller()
poller.register(client, zmq.POLLIN)
poller.register(subscribe_kv17, zmq.POLLIN)
sys.stderr.write('Ready.\n')
while True:
socks = dict(poller.poll())
if socks.get(client) == zmq.POLLIN:
arguments = client.recv().split(',')
if arguments[0] == 'j' and len(arguments) == 2:
client.send(serialize(net.journeypatterncode(arguments[1])))
elif arguments[0] == 'p' and len(arguments) == 7:
client.send(serialize(net.passed(arguments[1], arguments[2], arguments[3], arguments[4], arguments[5], arguments[6])))
else:
client.send('')
elif socks.get(subscribe_kv17) == zmq.POLLIN:
pass
| agpl-3.0 | 4,215,510,292,400,556,500 | 30.391304 | 130 | 0.693906 | false |
maxalbert/colormap-selector | mapping_3d_to_2d_test.py | 1 | 1798 | import numpy as np
from cross_section import Plane
from mapping_3d_to_2d import *
def test_initialise_mapping_3d_to_2d_simple():
"""
Check that for a plane orthogonal to the x-axis the transformation
simply drops the constant x-coordinate.
"""
plane1 = Plane([50, 0, 0], n=[1, 0, 0])
f1 = Mapping3Dto2D(plane1)
# Check the 3d -> 2d transformation
assert np.allclose(f1.apply([50, -1, 4]), [-1, 4])
assert np.allclose(f1.apply([50, 3, 7]), [3, 7])
# Check the 2d -> 3d transformation
assert np.allclose(f1.apply_inv([-1, 4]), [50, -1, 4])
assert np.allclose(f1.apply_inv([3, 7]), [50, 3, 7])
assert f1.apply_inv([-1, 4]).ndim == 1
assert f1.apply_inv([[-1, 4]]).ndim == 2
assert np.allclose(f1.apply_inv([[-1, 4], [3, 7]]), [[50, -1, 4], [50, 3, 7]])
# Regression test: check that applying the transformation does not
# change the shape/dimension of the input array.
pt1 = np.array([2., 6., 4.])
pt2 = np.array([[2., 6., 4.]])
_ = f1.apply(pt1)
_ = f1.apply(pt2)
assert pt1.shape == (3,)
assert pt2.shape == (1, 3)
plane2 = Plane([0, 30, 0], n=[0, 1, 0])
f2 = Mapping3Dto2D(plane2)
# Check the 3d -> 2d transformation
assert np.allclose(f2.apply([-1, 30, 4]), [-1, 4])
assert np.allclose(f2.apply([3, 30, 7]), [3, 7])
# Check the 2d -> 3d transformation
assert np.allclose(f2.apply_inv([-1, 4]), [-1, 30, 4])
assert np.allclose(f2.apply_inv([3, 7]), [3, 30, 7])
# Regression test: check that applying the inverse transformation
# does not change the shape/dimension of the input array.
pt1 = np.array([2., 6.])
pt2 = np.array([[2., 6.]])
_ = f1.apply_inv(pt1)
_ = f1.apply_inv(pt2)
assert pt1.shape == (2,)
assert pt2.shape == (1, 2)
| mit | 6,102,046,278,074,886,000 | 33.576923 | 82 | 0.585095 | false |
bram85/topydo | test/test_view.py | 1 | 1542 | # Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 - 2015 Bram Schoenmakers <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from topydo.lib import Filter
from topydo.lib.Sorter import Sorter
from topydo.lib.TodoFile import TodoFile
from topydo.lib.TodoList import TodoList
from .facilities import load_file, print_view, todolist_to_string
from .topydo_testcase import TopydoTest
class ViewTest(TopydoTest):
def test_view(self):
""" Check filters and printer for views. """
todofile = TodoFile('test/data/FilterTest1.txt')
ref = load_file('test/data/ViewTest1-result.txt')
todolist = TodoList(todofile.read())
sorter = Sorter('text')
todofilter = Filter.GrepFilter('+Project')
view = todolist.view(sorter, [todofilter])
self.assertEqual(print_view(view), todolist_to_string(ref))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 3,500,970,238,699,097,000 | 35.714286 | 71 | 0.723735 | false |
thoreg/suds | suds/client.py | 1 | 25571 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{2nd generation} service proxy provides access to web services.
See I{README.txt}
"""
import suds
import suds.metrics as metrics
from cookielib import CookieJar
from suds import *
from suds.reader import DefinitionsReader
from suds.transport import TransportError, Request
from suds.transport.https import HttpAuthenticated
from suds.servicedefinition import ServiceDefinition
from suds import sudsobject
from sudsobject import Factory as InstFactory
from suds.resolver import PathResolver
from suds.builder import Builder
from suds.wsdl import Definitions
from suds.cache import ObjectCache
from suds.sax.parser import Parser
from suds.options import Options
from suds.properties import Unskin
from copy import deepcopy
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Client(object):
"""
A lightweight web services client.
I{(2nd generation)} API.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
"""
@classmethod
def items(cls, sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmethod
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = HttpAuthenticated()
self.options = options
options.cache = ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
This is useful for cases when a wsdl and referenced schemas make heavy
use of namespaces and those namespaces are subject to changed.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: when prefix is already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are
unique to the cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
def __str__(self):
return unicode(self)
def __unicode__(self):
s = ['\n']
build = suds.__build__.split()
s.append('Suds ( https://fedorahosted.org/suds/ )')
s.append(' version: %s' % suds.__version__)
s.append(' %s build: %s' % (build[0], build[1]))
for sd in self.sd:
s.append('\n\n%s' % unicode(sd))
return ''.join(s)
class Factory:
"""
A factory for instantiating types defined in the wsdl
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
create a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = InstFactory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception, e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug('%s created: %s', name, timer)
return result
def separator(self, ps):
"""
Set the path separator.
@param ps: The new path separator.
@type ps: char
"""
self.resolver = PathResolver(self.wsdl, ps)
class ServiceSelector:
"""
The B{service} selector is used to select a web service.
In most cases, the wsdl only defines (1) service in which access
by subscript is passed through to a L{PortSelector}. This is also the
behavior when a I{default} service has been specified. In cases
where multiple services have been defined and no default has been
specified, the service is found by name (or index) and a L{PortSelector}
for the service is returned. In all cases, attribute access is
forwarded to the L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __services: A list of I{wsdl} services.
@type __services: list
"""
def __init__(self, client, services):
"""
@param client: A suds client.
@type client: L{Client}
@param services: A list of I{wsdl} services.
@type services: list
"""
self.__client = client
self.__services = services
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@param name: The name of a method.
@type name: str
@return: A L{PortSelector}.
@rtype: L{PortSelector}.
"""
default = self.__ds()
if default is None:
port = self.__find(0)
else:
port = default
return getattr(port, name)
def __getitem__(self, name):
"""
Provides selection of the I{service} by name (string) or
index (integer). In cases where only (1) service is defined
or a I{default} has been specified, the request is forwarded
to the L{PortSelector}.
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the specified service.
@rtype: L{PortSelector}.
"""
if len(self.__services) == 1:
port = self.__find(0)
return port[name]
default = self.__ds()
if default is not None:
port = default
return port[name]
return self.__find(name)
def __find(self, name):
"""
Find a I{service} by name (string) or index (integer).
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the found service.
@rtype: L{PortSelector}.
"""
service = None
if not len(self.__services):
raise Exception('No services defined')
if isinstance(name, int):
try:
service = self.__services[name]
name = service.name
except IndexError:
raise ServiceNotFound('at [%d]' % name)
else:
for s in self.__services:
if name == s.name:
service = s
break
if service is None:
raise ServiceNotFound(name)
return PortSelector(self.__client, service.ports, name)
def __ds(self):
"""
Get the I{default} service if defined in the I{options}.
@return: A L{PortSelector} for the I{default} service.
@rtype: L{PortSelector}.
"""
ds = self.__client.options.service
if ds is None:
return None
else:
return self.__find(ds)
class PortSelector:
"""
The B{port} selector is used to select a I{web service} B{port}.
In cases where multiple ports have been defined and no default has been
specified, the port is found by name (or index) and a L{MethodSelector}
for the port is returned. In all cases, attribute access is
forwarded to the L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __ports: A list of I{service} ports.
@type __ports: list
@ivar __qn: The I{qualified} name of the port (used for logging).
@type __qn: str
"""
def __init__(self, client, ports, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param ports: A list of I{service} ports.
@type ports: list
@param qn: The name of the service.
@type qn: str
"""
self.__client = client
self.__ports = ports
self.__qn = qn
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@param name: The name of a method.
@type name: str
@return: A L{MethodSelector}.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
m = self.__find(0)
else:
m = default
return getattr(m, name)
def __getitem__(self, name):
"""
Provides selection of the I{port} by name (string) or
index (integer). In cases where only (1) port is defined
or a I{default} has been specified, the request is forwarded
to the L{MethodSelector}.
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the specified port.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
return self.__find(name)
else:
return default
def __find(self, name):
"""
Find a I{port} by name (string) or index (integer).
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the found port.
@rtype: L{MethodSelector}.
"""
port = None
if not len(self.__ports):
raise Exception('No ports defined: %s' % self.__qn)
if isinstance(name, int):
qn = '%s[%d]' % (self.__qn, name)
try:
port = self.__ports[name]
except IndexError:
raise PortNotFound(qn)
else:
qn = '.'.join((self.__qn, name))
for p in self.__ports:
if name == p.name:
port = p
break
if port is None:
raise PortNotFound(qn)
qn = '.'.join((self.__qn, port.name))
return MethodSelector(self.__client, port.methods, qn)
def __dp(self):
"""
Get the I{default} port if defined in the I{options}.
@return: A L{MethodSelector} for the I{default} port.
@rtype: L{MethodSelector}.
"""
dp = self.__client.options.port
if dp is None:
return None
else:
return self.__find(dp)
class MethodSelector:
"""
The B{method} selector is used to select a B{method} by name.
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __methods: A dictionary of methods.
@type __methods: dict
@ivar __qn: The I{qualified} name of the method (used for logging).
@type __qn: str
"""
def __init__(self, client, methods, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param methods: A dictionary of methods.
@type methods: dict
@param qn: The I{qualified} name of the port.
@type qn: str
"""
self.__client = client
self.__methods = methods
self.__qn = qn
def __getattr__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
return self[name]
def __getitem__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
m = self.__methods.get(name)
if m is None:
qn = '.'.join((self.__qn, name))
raise MethodNotFound(qn)
return Method(self.__client, m)
class Method:
"""
The I{method} (namespace) object.
@ivar client: A client object.
@type client: L{Client}
@ivar method: A I{wsdl} method.
@type I{wsdl} Method.
"""
def __init__(self, client, method):
"""
@param client: A client object.
@type client: L{Client}
@param method: A I{raw} method.
@type I{raw} Method.
"""
self.client = client
self.method = method
def __call__(self, *args, **kwargs):
"""
Invoke the method.
"""
clientclass = self.clientclass(kwargs)
client = clientclass(self.client, self.method)
if not self.faults():
try:
return client.invoke(args, kwargs)
except WebFault, e:
return (500, e)
else:
return client.invoke(args, kwargs)
def faults(self):
""" get faults option """
return self.client.options.faults
def clientclass(self, kwargs):
""" get soap client class """
if SimClient.simulation(kwargs):
return SimClient
else:
return SoapClient
class SoapClient:
"""
A lightweight soap based web client B{**not intended for external use}
@ivar service: The target method.
@type service: L{Service}
@ivar method: A target method.
@type method: L{Method}
@ivar options: A dictonary of options.
@type options: dict
@ivar cookiejar: A cookie jar.
@type cookiejar: libcookie.CookieJar
"""
def __init__(self, client, method):
"""
@param client: A suds client.
@type client: L{Client}
@param method: A target method.
@type method: L{Method}
"""
self.client = client
self.method = method
self.options = client.options
self.cookiejar = CookieJar()
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin}|I{subclass of} L{Object}
"""
timer = metrics.Timer()
timer.start()
result = None
binding = self.method.binding.input
soapenv = binding.get_message(self.method, args, kwargs)
timer.stop()
metrics.log.debug("message for '%s' created: %s", self.method.name, timer)
timer.start()
result = self.send(soapenv)
timer.stop()
metrics.log.debug("method '%s' invoked: %s", self.method.name, timer)
return result
def send(self, soapenv):
"""
Send soap message.
@param soapenv: A soap envelope to send.
@type soapenv: L{Document}
@return: The reply to the sent message.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
result = None
location = self.location()
binding = self.method.binding.input
transport = self.options.transport
retxml = self.options.retxml
prettyxml = self.options.prettyxml
log.debug('sending to (%s)\nmessage:\n%s', location, soapenv)
try:
self.last_sent(soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
if prettyxml:
soapenv = soapenv.str()
else:
soapenv = soapenv.plain()
soapenv = soapenv.encode('utf-8')
plugins.message.sending(envelope=soapenv)
request = Request(location, soapenv)
request.headers = self.headers()
reply = transport.send(request)
ctx = plugins.message.received(reply=reply.message)
reply.message = ctx.reply
if retxml:
result = reply.message
else:
result = self.succeeded(binding, reply.message)
except TransportError, e:
if e.httpcode in (202, 204):
result = None
else:
log.error(self.last_sent())
result = self.failed(binding, e)
return result
def headers(self):
"""
Get http headers or the http/https request.
@return: A dictionary of header/values.
@rtype: dict
"""
action = self.method.soap.action
stock = {'Content-Type': 'text/xml; charset=utf-8', 'SOAPAction': action}
result = dict(stock, **self.options.headers)
log.debug('headers = %s', result)
return result
def succeeded(self, binding, reply):
"""
Request succeeded, process the reply
@param binding: The binding to be used to process the reply.
@type binding: L{bindings.binding.Binding}
@param reply: The raw reply text.
@type reply: str
@return: The method result.
@rtype: I{builtin}, L{Object}
@raise WebFault: On server.
"""
log.debug('http succeeded:\n%s', reply)
plugins = PluginContainer(self.options.plugins)
if len(reply) > 0:
reply, result = binding.get_reply(self.method, reply)
self.last_received(reply)
else:
result = None
ctx = plugins.message.unmarshalled(reply=result)
result = ctx.reply
if self.options.faults:
return result
else:
return (200, result)
def failed(self, binding, error):
"""
Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError}
"""
status, reason = (error.httpcode, tostr(error))
reply = error.fp.read()
log.debug('http failed:\n%s', reply)
if status == 500:
if len(reply) > 0:
r, p = binding.get_fault(reply)
self.last_received(r)
return (status, p)
else:
return (status, None)
if self.options.faults:
raise Exception((status, reason))
else:
return (status, None)
def location(self):
p = Unskin(self.options)
return p.get('location', self.method.location)
def last_sent(self, d=None):
key = 'tx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
def last_received(self, d=None):
key = 'rx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
class SimClient(SoapClient):
"""
Loopback client used for message/reply simulation.
"""
injkey = '__inject'
@classmethod
def simulation(cls, kwargs):
""" get whether loopback has been specified in the I{kwargs}. """
return kwargs.has_key(SimClient.injkey)
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
simulation = kwargs[self.injkey]
msg = simulation.get('msg')
reply = simulation.get('reply')
fault = simulation.get('fault')
if msg is None:
if reply is not None:
return self.__reply(reply, args, kwargs)
if fault is not None:
return self.__fault(fault)
raise Exception('(reply|fault) expected when msg=None')
sax = Parser()
msg = sax.parse(string=msg)
return self.send(msg)
def __reply(self, reply, args, kwargs):
""" simulate the reply """
binding = self.method.binding.input
msg = binding.get_message(self.method, args, kwargs)
log.debug('inject (simulated) send message:\n%s', msg)
binding = self.method.binding.output
return self.succeeded(binding, reply)
def __fault(self, reply):
""" simulate the (fault) reply """
binding = self.method.binding.output
if self.options.faults:
r, p = binding.get_fault(reply)
self.last_received(r)
return (500, p)
else:
return (500, None)
| lgpl-3.0 | 7,985,433,385,657,277,000 | 31.95232 | 82 | 0.573032 | false |
liamcurry/py3kwarn | py3kwarn2to3/pgen2/pgen.py | 1 | 13781 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Pgen imports
from __future__ import print_function
from . import grammar, token, tokenize
class PgenGrammar(grammar.Grammar):
pass
class ParserGenerator(object):
def __init__(self, filename, stream=None):
close_stream = None
if stream is None:
stream = open(filename)
close_stream = stream.close
self.filename = filename
self.stream = stream
self.generator = tokenize.generate_tokens(stream.readline)
self.gettoken() # Initialize lookahead
self.dfas, self.startsymbol = self.parse()
if close_stream is not None:
close_stream()
self.first = {} # map from symbol name to set of tokens
self.addfirstsets()
def make_grammar(self):
c = PgenGrammar()
names = sorted(self.dfas.keys())
names.remove(self.startsymbol)
names.insert(0, self.startsymbol)
for name in names:
i = 256 + len(c.symbol2number)
c.symbol2number[name] = i
c.number2symbol[i] = name
for name in names:
dfa = self.dfas[name]
states = []
for state in dfa:
arcs = []
for label, next in state.arcs.items():
arcs.append((self.make_label(c, label), dfa.index(next)))
if state.isfinal:
arcs.append((0, dfa.index(state)))
states.append(arcs)
c.states.append(states)
c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
c.start = c.symbol2number[self.startsymbol]
return c
def make_first(self, c, name):
rawfirst = self.first[name]
first = {}
for label in rawfirst:
ilabel = self.make_label(c, label)
##assert ilabel not in first # XXX failed on <> ... !=
first[ilabel] = 1
return first
def make_label(self, c, label):
# XXX Maybe this should be a method on a subclass of converter?
ilabel = len(c.labels)
if label[0].isalpha():
# Either a symbol name or a named token
if label in c.symbol2number:
# A symbol name (a non-terminal)
if label in c.symbol2label:
return c.symbol2label[label]
else:
c.labels.append((c.symbol2number[label], None))
c.symbol2label[label] = ilabel
return ilabel
else:
# A named token (NAME, NUMBER, STRING)
itoken = getattr(token, label, None)
assert isinstance(itoken, int), label
assert itoken in token.tok_name, label
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
else:
# Either a keyword or an operator
assert label[0] in ('"', "'"), label
value = eval(label)
if value[0].isalpha():
# A keyword
if value in c.keywords:
return c.keywords[value]
else:
c.labels.append((token.NAME, value))
c.keywords[value] = ilabel
return ilabel
else:
# An operator (any non-numeric token)
itoken = grammar.opmap[value] # Fails if unknown token
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
def addfirstsets(self):
names = sorted(self.dfas.keys())
for name in names:
if name not in self.first:
self.calcfirst(name)
#print name, self.first[name].keys()
def calcfirst(self, name):
dfa = self.dfas[name]
self.first[name] = None # dummy to detect left recursion
state = dfa[0]
totalset = {}
overlapcheck = {}
for label, next in state.arcs.items():
if label in self.dfas:
if label in self.first:
fset = self.first[label]
if fset is None:
raise ValueError("recursion for rule %r" % name)
else:
self.calcfirst(label)
fset = self.first[label]
totalset.update(fset)
overlapcheck[label] = fset
else:
totalset[label] = 1
overlapcheck[label] = {label: 1}
inverse = {}
for label, itsfirst in overlapcheck.items():
for symbol in itsfirst:
if symbol in inverse:
raise ValueError("rule %s is ambiguous; %s is in the"
" first sets of %s as well as %s" %
(name, symbol, label, inverse[symbol]))
inverse[symbol] = label
self.first[name] = totalset
def parse(self):
dfas = {}
startsymbol = None
# MSTART: (NEWLINE | RULE)* ENDMARKER
while self.type != token.ENDMARKER:
while self.type == token.NEWLINE:
self.gettoken()
# RULE: NAME ':' RHS NEWLINE
name = self.expect(token.NAME)
self.expect(token.OP, ":")
a, z = self.parse_rhs()
self.expect(token.NEWLINE)
#self.dump_nfa(name, a, z)
dfa = self.make_dfa(a, z)
#self.dump_dfa(name, dfa)
oldlen = len(dfa)
self.simplify_dfa(dfa)
newlen = len(dfa)
dfas[name] = dfa
#print name, oldlen, newlen
if startsymbol is None:
startsymbol = name
return dfas, startsymbol
def make_dfa(self, start, finish):
# To turn an NFA into a DFA, we define the states of the DFA
# to correspond to *sets* of states of the NFA. Then do some
# state reduction. Let's represent sets as dicts with 1 for
# values.
assert isinstance(start, NFAState)
assert isinstance(finish, NFAState)
def closure(state):
base = {}
addclosure(state, base)
return base
def addclosure(state, base):
assert isinstance(state, NFAState)
if state in base:
return
base[state] = 1
for label, next in state.arcs:
if label is None:
addclosure(next, base)
states = [DFAState(closure(start), finish)]
for state in states: # NB states grows while we're iterating
arcs = {}
for nfastate in state.nfaset:
for label, next in nfastate.arcs:
if label is not None:
addclosure(next, arcs.setdefault(label, {}))
for label, nfaset in arcs.items():
for st in states:
if st.nfaset == nfaset:
break
else:
st = DFAState(nfaset, finish)
states.append(st)
state.addarc(st, label)
return states # List of DFAState instances; first one is start
def dump_nfa(self, name, start, finish):
print("Dump of NFA for", name)
todo = [start]
for i, state in enumerate(todo):
print(" State", i, state is finish and "(final)" or "")
for label, next in state.arcs:
if next in todo:
j = todo.index(next)
else:
j = len(todo)
todo.append(next)
if label is None:
print(" -> %d" % j)
else:
print(" %s -> %d" % (label, j))
def dump_dfa(self, name, dfa):
print("Dump of DFA for", name)
for i, state in enumerate(dfa):
print(" State", i, state.isfinal and "(final)" or "")
for label, next in state.arcs.items():
print(" %s -> %d" % (label, dfa.index(next)))
def simplify_dfa(self, dfa):
# This is not theoretically optimal, but works well enough.
# Algorithm: repeatedly look for two states that have the same
# set of arcs (same labels pointing to the same nodes) and
# unify them, until things stop changing.
# dfa is a list of DFAState instances
changes = True
while changes:
changes = False
for i, state_i in enumerate(dfa):
for j in range(i+1, len(dfa)):
state_j = dfa[j]
if state_i == state_j:
#print " unify", i, j
del dfa[j]
for state in dfa:
state.unifystate(state_j, state_i)
changes = True
break
def parse_rhs(self):
# RHS: ALT ('|' ALT)*
a, z = self.parse_alt()
if self.value != "|":
return a, z
else:
aa = NFAState()
zz = NFAState()
aa.addarc(a)
z.addarc(zz)
while self.value == "|":
self.gettoken()
a, z = self.parse_alt()
aa.addarc(a)
z.addarc(zz)
return aa, zz
def parse_alt(self):
# ALT: ITEM+
a, b = self.parse_item()
while (self.value in ("(", "[") or
self.type in (token.NAME, token.STRING)):
c, d = self.parse_item()
b.addarc(c)
b = d
return a, b
def parse_item(self):
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
if self.value == "[":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, "]")
a.addarc(z)
return a, z
else:
a, z = self.parse_atom()
value = self.value
if value not in ("+", "*"):
return a, z
self.gettoken()
z.addarc(a)
if value == "+":
return a, z
else:
return a, a
def parse_atom(self):
# ATOM: '(' RHS ')' | NAME | STRING
if self.value == "(":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, ")")
return a, z
elif self.type in (token.NAME, token.STRING):
a = NFAState()
z = NFAState()
a.addarc(z, self.value)
self.gettoken()
return a, z
else:
self.raise_error("expected (...) or NAME or STRING, got %s/%s",
self.type, self.value)
def expect(self, type, value=None):
if self.type != type or (value is not None and self.value != value):
self.raise_error("expected %s/%s, got %s/%s",
type, value, self.type, self.value)
value = self.value
self.gettoken()
return value
def gettoken(self):
tup = next(self.generator)
while tup[0] in (tokenize.COMMENT, tokenize.NL):
tup = next(self.generator)
self.type, self.value, self.begin, self.end, self.line = tup
#print token.tok_name[self.type], repr(self.value)
def raise_error(self, msg, *args):
if args:
try:
msg = msg % args
except:
msg = " ".join([msg] + list(map(str, args)))
raise SyntaxError(msg, (self.filename, self.end[0],
self.end[1], self.line))
class NFAState(object):
def __init__(self):
self.arcs = [] # list of (label, NFAState) pairs
def addarc(self, next, label=None):
assert label is None or isinstance(label, str)
assert isinstance(next, NFAState)
self.arcs.append((label, next))
class DFAState(object):
def __init__(self, nfaset, final):
assert isinstance(nfaset, dict)
assert isinstance(next(iter(nfaset)), NFAState)
assert isinstance(final, NFAState)
self.nfaset = nfaset
self.isfinal = final in nfaset
self.arcs = {} # map from label to DFAState
def addarc(self, next, label):
assert isinstance(label, str)
assert label not in self.arcs
assert isinstance(next, DFAState)
self.arcs[label] = next
def unifystate(self, old, new):
for label, next in self.arcs.items():
if next is old:
self.arcs[label] = new
def __eq__(self, other):
# Equality test -- ignore the nfaset instance variable
assert isinstance(other, DFAState)
if self.isfinal != other.isfinal:
return False
# Can't just return self.arcs == other.arcs, because that
# would invoke this method recursively, with cycles...
if len(self.arcs) != len(other.arcs):
return False
for label, next in self.arcs.items():
if next is not other.arcs.get(label):
return False
return True
__hash__ = None # For Py3 compatibility.
def generate_grammar(filename="Grammar.txt"):
p = ParserGenerator(filename)
return p.make_grammar()
| mit | -1,833,032,193,287,119,000 | 34.702073 | 78 | 0.495682 | false |
JNU-Include/CNN | Test/lab-11-4-mnist_cnn_ensemble2.py | 1 | 2134 | # Lab 11 MNIST and Deep learning CNN
import tensorflow as tf
from lib.ensemble.ensemble_core import EnsembleCore
from lib.ensemble.mnist_core import MnistCore
from lib.ensemble.cnn_core import CNNCore
class MyCNN (CNNCore):
def init_network(self):
self.set_placeholder(784, 10, 28, 28)
self.DO = tf.placeholder(tf.float32)
L1 = self.convolution_layer(self.X_2d, 3, 3, 1, 32, 1, 1)
L1 = self.relu(L1)
L1_maxpool = self.max_pool(L1, 2, 2, 2, 2)
L1_maxpool = self.dropout(L1_maxpool)
L2 = self.convolution_layer(L1_maxpool, 3, 3, 32, 64, 1, 1)
L2 = self.relu(L2)
L2_maxpool = self.max_pool(L2, 2, 2, 2, 2)
L2_maxpool = self.dropout(L2_maxpool)
L3 = self.convolution_layer(L2_maxpool, 3, 3, 64, 128, 1, 1)
L3 = self.relu(L3)
L3_maxpool = self.max_pool(L3, 2, 2, 2, 2)
L3_maxpool = self.dropout(L3_maxpool)
# L4 FC 4x4x128 inputs -> 625 outputs
reshaped = tf.reshape(L3_maxpool, [-1, 128 * 4 * 4])
L4 = self.fully_connected_layer(reshaped, 128 * 4 * 4, 625, 'W4')
L4 = self.relu(L4)
L4 = self.dropout(L4)
self.logit = self.fully_connected_layer(L4, 625, 10, 'W5')
self.set_hypothesis(self.logit)
self.set_cost_function()
self.set_optimizer(0.001)
class MyEnsemble (EnsembleCore):
mnist = MnistCore()
def load_db(self):
self.mnist.load_mnist()
def set_networks(self, sess, num_of_network):
self.create_networks(sess, MyCNN, 'network_name', 7)
def get_number_of_segment(self, seg_size):
return self.mnist.get_number_of_segment(seg_size)
def get_next_segment(self, seg_size):
return self.mnist.get_next_segment(seg_size)
def get_test_data(self):
return self.mnist.get_test_x_data(), self.mnist.get_test_y_data()
gildong = MyEnsemble()
gildong.learn_ensemble(7, 15, 100)
gildong.evaluate_all_models()
'''
0 Accuracy: 0.9933
1 Accuracy: 0.9946
2 Accuracy: 0.9934
3 Accuracy: 0.9935
4 Accuracy: 0.9935
5 Accuracy: 0.9949
6 Accuracy: 0.9941
Ensemble accuracy: 0.9952
'''
| mit | 3,337,229,261,010,518,000 | 26.358974 | 73 | 0.628397 | false |
compas-dev/compas | src/compas_rhino/objects/inspectors/meshinspector.py | 1 | 3026 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from System.Collections.Generic import List
from System.Drawing.Color import FromArgb
from Rhino.Geometry import Point3d
from Rhino.Geometry import Line
from compas_rhino.conduits import BaseConduit
from compas_rhino.ui import Mouse
from compas.geometry import length_vector
from compas.geometry import cross_vectors
from compas.geometry import subtract_vectors
__all__ = ['MeshVertexInspector']
class MeshVertexInspector(BaseConduit):
"""Inspect mesh topology at the vertices.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
tol: float, optional
dotcolor: rgb-tuple, optional
textcolor: rgb-tuple, optional
linecolor: rgb-tuple, optional
"""
def __init__(self, mesh, tol=0.1, dotcolor=None, textcolor=None, linecolor=None, **kwargs):
super(MeshVertexInspector, self).__init__(**kwargs)
self._vertex_xyz = None
dotcolor = dotcolor or (255, 255, 0)
textcolor = textcolor or (0, 0, 0)
linecolor = linecolor or (255, 255, 0)
self.mesh = mesh
self.tol = tol
self.dotcolor = FromArgb(*dotcolor)
self.textcolor = FromArgb(*textcolor)
self.linecolor = FromArgb(*linecolor)
self.mouse = Mouse(self)
self.vertex_nbr = {
vertex: [(vertex, nbr) if mesh.has_edge((vertex, nbr)) else (nbr, vertex) for nbr in mesh.vertex_neighbors(vertex)]
for vertex in mesh.vertices()
}
@property
def vertex_xyz(self):
if not self._vertex_xyz:
self._vertex_xyz = {vertex: self.mesh.vertex_attributes(vertex, 'xyz') for vertex in self.mesh.vertices()}
return self._vertex_xyz
@vertex_xyz.setter
def vertex_xyz(self, vertex_xyz):
self._vertex_xyz = vertex_xyz
def enable(self):
"""Enable the conduit."""
self.mouse.Enabled = True
self.Enabled = True
def disable(self):
"""Disable the conduit."""
self.mouse.Enabled = False
self.Enabled = False
def DrawForeground(self, e):
draw_dot = e.Display.DrawDot
draw_arrows = e.Display.DrawArrows
a = self.mouse.p1
b = self.mouse.p2
ab = subtract_vectors(b, a)
Lab = length_vector(ab)
if not Lab:
return
for index, vertex in enumerate(self.vertex_xyz):
c = self.vertex_xyz[vertex]
D = length_vector(cross_vectors(subtract_vectors(a, c), subtract_vectors(b, c)))
if D / Lab < self.tol:
point = Point3d(*c)
draw_dot(point, str(index), self.dotcolor, self.textcolor)
lines = List[Line](len(self.vertex_nbr[vertex]))
for u, v in self.vertex_nbr[vertex]:
lines.Add(Line(Point3d(* self.vertex_xyz[u]), Point3d(* self.vertex_xyz[v])))
draw_arrows(lines, self.linecolor)
break
| mit | 1,955,176,110,724,072,700 | 33 | 127 | 0.618638 | false |
dls-controls/pymalcolm | tests/test_profiler.py | 1 | 2778 | import ast
import logging
import time
import unittest
from malcolm.profiler import Profiler
# https://github.com/bdarnell/plop/blob/master/plop/test/collector_test.py
class ProfilerTest(unittest.TestCase):
def filter_stacks(self, results):
# Kind of hacky, but this is the simplest way to keep the tests
# working after the internals of the collector changed to support
# multiple formatters.
stack_counts = ast.literal_eval(results)
counts = {}
for stack, count in stack_counts.items():
filtered_stack = [
frame[2] for frame in stack if frame[0].endswith("test_profiler.py")
]
if filtered_stack:
counts[tuple(filtered_stack)] = count
return counts
def check_counts(self, counts, expected):
failed = False
output = []
for stack, count in expected.items():
# every expected frame should appear in the data, but
# the inverse is not true if the signal catches us between
# calls.
self.assertTrue(stack in counts)
ratio = float(counts[stack]) / float(count)
output.append(
"%s: expected %s, got %s (%s)" % (stack, count, counts[stack], ratio)
)
if not (0.70 <= ratio <= 1.25):
failed = True
if failed:
for line in output:
logging.warning(line)
for key in set(counts.keys()) - set(expected.keys()):
logging.warning("unexpected key: %s: got %s" % (key, counts[key]))
self.fail("collected data did not meet expectations")
def test_collector(self):
start = time.time()
def a(end):
while time.time() < end:
pass
c(time.time() + 0.1)
def b(end):
while time.time() < end:
pass
c(time.time() + 0.1)
def c(end):
while time.time() < end:
pass
profiler = Profiler("/tmp")
profiler.start(interval=0.01)
a(time.time() + 0.1)
b(time.time() + 0.2)
c(time.time() + 0.3)
end = time.time()
profiler.stop("profiler_test.plop")
elapsed = end - start
self.assertTrue(0.8 < elapsed < 0.9, elapsed)
with open("/tmp/profiler_test.plop") as f:
results = f.read()
counts = self.filter_stacks(results)
expected = {
("a", "test_collector"): 10,
("c", "a", "test_collector"): 10,
("b", "test_collector"): 20,
("c", "b", "test_collector"): 10,
("c", "test_collector"): 30,
}
self.check_counts(counts, expected)
| apache-2.0 | -715,875,177,703,284,500 | 31.682353 | 85 | 0.527358 | false |
mysociety/pombola | pombola/core/kenya_import_scripts/import_contacts_from_tuples.py | 1 | 1646 | #!/usr/bin/env python
import os
import sys
# Horrible boilerplate - there must be a better way :)
sys.path.append(
os.path.abspath(
os.path.dirname(__file__) + '../../..'
)
)
from pombola.core import models
from django.contrib.contenttypes.models import ContentType
import mp_contacts
phone_kind = models.ContactKind.objects.get(slug='phone')
email_kind = models.ContactKind.objects.get(slug='email')
for row in mp_contacts.entries:
(name, phone, email) = row
if not (phone or email):
continue
# code needs reworking now that the name structure of the database has changed
matches = models.Person.objects.all().is_politician().name_matches( name )
if matches.count() == 0:
print " no match for '%s', '%s', '%s'" % (name, phone, email)
continue
if matches.count() > 1:
print " several matches for %s" % name
continue
mp = matches[0]
# print "%s -> %s" % ( name, mp.name )
content_type = ContentType.objects.get_for_model(mp)
source = "SUNY Kenya spreadsheet entry for '%s'" % name
if phone:
models.Contact.objects.get_or_create(
content_type=content_type,
object_id=mp.id,
value=phone,
kind=phone_kind,
defaults = {
"source":source,
}
)
if email:
models.Contact.objects.get_or_create(
content_type=content_type,
object_id=mp.id,
value=email,
kind=email_kind,
defaults = {
"source":source,
}
)
| agpl-3.0 | -652,200,642,074,956,900 | 22.514286 | 82 | 0.565006 | false |
linuxscout/tashaphyne | tashaphyne/stemming.py | 1 | 47427 | # -*- coding: UTF-8 -*-
"""
Arabic Light Stemmer
A class which provides a configurable stemmer
and segmentor for arabic text.
Features:
=========
- Arabic word Light Stemming.
- Root Extraction.
- Word Segmentation
- Word normalization
- Default Arabic Affixes list.
- An customizable Light stemmer: possibility of change
stemmer options and data.
- Data independent stemmer
@author: Taha Zerrouki <taha_zerrouki at gmail dot com>
@author: Taha Zerrouki
@contact: taha dot zerrouki at gmail dot com
@copyright: Arabtechies, Arabeyes, Taha Zerrouki
@license: GPL
@date:2017/02/15
@version:0.3
"""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
division,
)
import re
import sys
sys.path.append('../support/')
import pyarabic.araby as araby
if __name__ == "__main__":
sys.path.append('../')
import normalize
import stem_const
import affix_const
import roots_const
import verb_stamp_const
import arabicstopwords
else:
from . import normalize
from . import stem_const
from . import affix_const
from . import roots_const
from . import verb_stamp_const
from . import arabicstopwords
class ArabicLightStemmer:
"""
ArabicLightStemmer: a class which proved a configurable stemmer
and segmentor for arabic text.
Features:
=========
- Arabic word Light Stemming.
- Root Extraction.
- Word Segmentation
- Word normalization
- Default Arabic Affixes list.
- An customizable Light stemmer: possibility of change
stemmer options and data.
- Data independent stemmer
@author: Taha Zerrouki <taha_zerrouki at gmail dot com>
@author: Taha Zerrouki
@contact: taha dot zerrouki at gmail dot com
@copyright: Arabtechies, Arabeyes, Taha Zerrouki
@license: GPL
@date:2017/02/15
@version:0.3
"""
def __init__(self):
#load affix information
# pass
self.prefix_letters = stem_const.DEFAULT_PREFIX_LETTERS
self.suffix_letters = stem_const.DEFAULT_SUFFIX_LETTERS
self.infix_letters = stem_const.DEFAULT_INFIX_LETTERS
self.max_prefix_length = stem_const.DEFAULT_MAX_PREFIX
self.max_suffix_length = stem_const.DEFAULT_MAX_SUFFIX
self.min_stem_length = stem_const.DEFAULT_MIN_STEM
self.joker = stem_const.DEFAULT_JOKER
self.prefix_list = stem_const.DEFAULT_PREFIX_LIST
self.suffix_list = stem_const.DEFAULT_SUFFIX_LIST
# root dictionary
self.root_list = roots_const.ROOTS
# lists used to validate affixation
#~ self.valid_affixes_list = []
self.valid_affixes_list = set(list(affix_const.VERB_AFFIX_LIST) + list(affix_const.NOUN_AFFIX_LIST))
self.word = u""
self.unvocalized = u""
self.normalized = u""
self.starword = u""
self.root = u""
self.left = 0
self.right = 0
self.segment_list = []
#token pattern
# letters and harakat
self.token_pat = re.compile(u"[^\w\u064b-\u0652']+", re.UNICODE)
self.prefixes_tree = self._create_prefix_tree(self.prefix_list)
self.suffixes_tree = self._create_suffix_tree(self.suffix_list)
######################################################################
#{ Attribut Functions
######################################################################
def get_prefix_letters(self, ):
""" return the prefixation letters.
This constant take DEFAULT_PREFIX_LETTERS by default.
@return: return a letters.
@rtype: unicode.
"""
return self.prefix_letters
def set_prefix_letters(self, new_prefix_letters):
""" set the prefixation letters.
This constant take DEFAULT_PREFIX_LETTERS by default.
@param new_prefix_letters: letters to be striped from a word,
e.g.new_prefix_letters = u"وف":.
@type new_prefix_letters: unicode.
"""
self.prefix_letters = new_prefix_letters
def get_suffix_letters(self, ):
""" return the suffixation letters.
This constant take DEFAULT_SUFFIX_LETTERS by default.
@return: return a letters.
@rtype: unicode.
"""
return self.suffix_letters
def set_suffix_letters(self, new_suffix_letters):
""" set the suffixation letters.
This constant take DEFAULT_SUFFIX_LETTERS by default.
@param new_suffix_letters: letters to be striped from the end of a word,
e.g.new_suffix_letters = u"ةون":.
@type new_suffix_letters: unicode.
"""
self.suffix_letters = new_suffix_letters
def get_infix_letters(self, ):
""" get the inffixation letters.
This constant take DEFAULT_INFIX_LETTERS by default.
@return: infixes letters.
@rtype: unicode.
"""
return self.infix_letters
def set_infix_letters(self, new_infix_letters):
""" set the inffixation letters.
This constant take DEFAULT_INFIX_LETTERS by default.
@param new_infix_letters: letters to be striped from the middle
of a word, e.g.new_infix_letters = u"أوي":.
@type new_infix_letters: unicode.
"""
self.infix_letters = new_infix_letters
def get_joker(self, ):
""" get the joker letter.
This constant take DEFAULT_JOKER by default.
@return: joker letter.
@rtype: unicode.
"""
return self.joker
def set_joker(self, new_joker):
""" set the joker letter.
This constant take DEFAULT_JOKER by default.
@param new_joker: joker letter.
@type new_joker: unicode.
"""
if len(new_joker) > 1:
new_joker = new_joker[0]
self.joker = new_joker
def get_max_prefix_length(self, ):
""" return the constant of max length of the prefix used by the stemmer.
This constant take DEFAULT_MAX_PREFIX_LENGTH by default.
@return: return a number.
@rtype: integer.
"""
return self.max_prefix_length
def set_max_prefix_length(self, new_max_prefix_length):
""" Set the constant of max length of the prefix used by the stemmer.
This constant take DEFAULT_MAX_PREFIX_LENGTH by default.
@param new_max_prefix_length: the new max prefix length constant.
@type new_max_prefix_length: integer.
"""
self.max_prefix_length = new_max_prefix_length
def get_max_suffix_length(self, ):
""" return the constant of max length of the suffix used by the stemmer.
This constant take DEFAULT_MAX_SUFFIX_LENGTH by default.
@return: return a number.
@rtype: integer.
"""
return self.max_suffix_length
def set_max_suffix_length(self, new_max_suffix_length):
""" Set the constant of max length of the suffix used by the stemmer.
This constant take DEFAULT_MAX_SUFFIX_LENGTH by default.
@param new_max_suffix_length: the new max suffix length constant.
@type new_max_suffix_length: integer.
"""
self.max_suffix_length = new_max_suffix_length
def get_min_stem_length(self, ):
""" return the constant of min length of the stem used by the stemmer.
This constant take DEFAULT_MIN_STEM_LENGTH by default.
@return: return a number.
@rtype: integer.
"""
return self.min_stem_length
def set_min_stem_length(self, new_min_stem_length):
""" Set the constant of min length of the stem used by the stemmer.
This constant take DEFAULT_MIN_STEM_LENGTH by default.
@param new_min_stem_length: the min stem length constant.
@type new_min_stem_length: integer.
"""
self.min_stem_length = new_min_stem_length
def get_prefix_list(self, ):
""" return the prefixes list used by the stemmer.
This constant take DEFAULT_PREFIX_LIST by default.
@return: prefixes list.
@rtype: set().
"""
return self.prefix_list
def set_prefix_list(self, new_prefix_list):
""" Set prefixes list used by the stemmer.
This constant take DEFAULT_PREFIX_LIST by default.
@param new_prefix_list: a set of prefixes.
@type new_prefix_list: set of unicode string.
"""
self.prefix_list = new_prefix_list
self._create_prefix_tree(self.prefix_list)
def get_suffix_list(self, ):
""" return the suffixes list used by the stemmer.
This constant take DEFAULT_SUFFIX_LIST by default.
@return: suffixes list.
@rtype: set().
"""
return self.suffix_list
def set_suffix_list(self, new_suffix_list):
""" Set suffixes list used by the stemmer.
This constant take DEFAULT_SUFFIX_LIST by default.
@param new_suffix_list: a set of suffixes.
@type new_suffix_list: set of unicode string.
"""
self.suffix_list = new_suffix_list
self._create_suffix_tree(self.suffix_list)
def get_roots_list(self, ):
""" return the roots list used by the stemmer to validate roots.
This constant take roots_const.ROOTS by default.
@return: roots list.
@rtype: set().
"""
return self.roots_list
def set_roots_list(self, new_roots_list):
""" Set roots list used by the stemmer to validate roots..
This constant take roots_const.ROOTS by default.
@param new_roots_list: a set of roots.
@type new_roots_list: set of unicode string.
"""
self.roots_list = new_roots_list
def get_valid_affixes_list(self, ):
""" return the valid_affixes list used by the stemmer to validate affixes.
This constant take valid_affixes_const.ROOTS by default.
@return: valid_affixes list.
@rtype: set().
"""
return self.valid_affixes_list
def set_valid_affixes_list(self, new_valid_affixes_list):
""" Set valid_affixes list used by the stemmer to validate affixes..
This constant take valid_affixes_const.ROOTS by default.
@param new_valid_affixes_list: a set of valid_affixes.
@type new_valid_affixes_list: set of unicode string.
"""
self.valid_affixes_list = new_valid_affixes_list
def set_word(self, new_word):
""" Set the word to treat by the stemmer.
@param new_word: the new word.
@type new_word: unicode.
"""
self.word = new_word
def get_word(self):
""" return the last word treated by the stemmer.
@return: word.
@rtype: unicode.
"""
return self.word
#########################################################
#{ Calculated Attribut Functions
#########################################################
def get_starword(self):
""" return the starlike word treated by the stemmer.
All non affix letters are converted to a joker.
The joker take by default DEFAULT_JOKER = "*".
Exmaple:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتصربونني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_starword()
أفت***ونني
@return: word.
@rtype: unicode.
"""
return self.starword
def get_root(self, prefix_index=-1, suffix_index=-1):
""" return the root of the treated word by the stemmer.
All non affix letters are converted to a joker.
All letters in the joker places are part of root.
The joker take by default DEFAULT_JOKER = "*".
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتصربونني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_starword()
أفت***ونني
>>> print ArListem.get_root()
ضرب
@param prefix_index: indicate the left stemming position
if = -1: not cosidered, and take the default word prefix lentgh.
@type prefix_index:integer.
@param suffix_index:indicate the right stemming position.
if = -1: not cosidered, and take the default word suffix position.
@type suffix_index: integer.
@return: root.
@rtype: unicode.
"""
# extract a root for a specific stem
if prefix_index >= 0 or suffix_index >= 0:
self.extract_root(prefix_index, suffix_index)
else:
self.root = self._choose_root()
return self.root
def _choose_root(self,):
""" choose a root for the given word """
if arabicstopwords.is_stop(self.word):
return arabicstopwords.stop_root(self.word)
if not self.segment_list:
self.segment(self.word)
affix_list = self.get_affix_list()
roots = [d['root'] for d in affix_list]
# filter by length
roots_tmp = roots
accepted = list(filter(self.is_root_length_valid, roots_tmp))
if accepted: # avoid empty list
roots_tmp = accepted
# filter by dictionary
accepted = list(filter(self.is_root, roots_tmp) )
if accepted: # avoid empty list
roots_tmp = accepted
# choose the most frequent root
accepted_root = self.most_common(roots_tmp)
return accepted_root
def _choose_stem(self,):
""" choose a stem for the given word """
# if word is stop word
if arabicstopwords.is_stop(self.word):
return arabicstopwords.stop_stem(self.word)
if not self.segment_list:
self.segment(self.word)
seg_list = self.segment_list
# verify affix against an affix list
seg_list = [(x,y) for (x,y) in seg_list if self._verify_affix(x,y)]
# choose the shortest stem
if not seg_list: # if empty
left = 0
right = len(self.word)
else:
left, right = self.get_left_right(seg_list)
return self.unvocalized[left:right]
def get_normalized(self):
""" return the normalized form of the treated word by the stemmer.
Some letters are converted into normal form like Hamzat.
Example:
>>> word = u"استؤجرُ"
>>> ArListem = ArabicLightStemmer()
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_normalized()
استءجر
@return: normalized word.
@rtype: unicode.
"""
return self.normalized
def get_unvocalized(self):
""" return the unvocalized form of the treated word by the stemmer.
Harakat are striped.
Example:
>>> word = u"الْعَرَبِيّةُ"
>>> ArListem = ArabicLightStemmer()
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_unvocalized()
العربية
@return: unvocalized word.
@rtype: unicode.
"""
return self.unvocalized
def get_left(self):
""" return the the left position of stemming
(prefixe end position )in the word treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتصربونني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_starword()
أفت***ونني
>>> print ArListem.get_left()
3
@return: the left position of stemming.
@rtype: integer.
"""
return self.left
def get_right(self):
""" return the the right position of stemming
(suffixe start position )in the word treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتصربونني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_starword()
أفت***ونني
>>> print ArListem.get_right()
6
@return: the right position of stemming.
@rtype: integer.
"""
return self.right
def get_stem(self, prefix_index=-1, suffix_index=-1):
""" return the stem of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتكاتبانني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_stem()
كاتب
@param prefix_index: indicate the left stemming position
if = -1: not cosidered, and take the default word prefix lentgh.
@type prefix_index:integer.
@param suffix_index:indicate the right stemming position.
if = -1: not cosidered, and take the default word suffix position.
@type suffix_index: integer.
@return: stem.
@rtype: unicode.
"""
#~ # ask for default stem
#~ if prefix_index < 0 and suffix_index < 0:
#~ return self._choose_stem()
if prefix_index >= 0 or suffix_index >= 0:
if prefix_index < 0:
left = self.stem_left
#~ left = self.left
else:
left = prefix_index
if suffix_index < 0:
right = self.stem_right
#~ right = self.right
else:
right = suffix_index
return self.unvocalized[left:right]
else:
stem = self._choose_stem()
return stem
def _handle_teh_infix(self, starword, left, right):
"""
Handle case of Teh as infix.
The Teh can be Dal after Zain, and Tah after Dhad
"""
newstarstem = starword
# case of Teh marbuta
key_stem = newstarstem.replace(araby.TEH_MARBUTA,'')
if len(key_stem) != 4:
# apply teh and variants only one stem has 4 letters
newstarstem = re.sub(u"[%s%s%s]"%(araby.TEH, araby.TAH, araby.DAL), self.joker, newstarstem)
return newstarstem
# substitube teh in infixes the teh mst be in the first
# or second place, all others, are converted
newstarstem = newstarstem[:2]+re.sub(araby.TEH, self.joker, newstarstem[2:])
# Tah طاء is infix if it's preceded by DHAD only
if self.word[left:right].startswith(u"ضط"):
newstarstem = newstarstem[:2]+re.sub(araby.TAH, self.joker, newstarstem[2:])
else:
newstarstem = re.sub(araby.TAH, self.joker, newstarstem)
# DAL دال is infix if it's preceded by زاي only
if self.word[left:right].startswith(u"زد"):
newstarstem = newstarstem[:2]+re.sub(araby.DAL, self.joker, newstarstem[2:])
else:
newstarstem = re.sub(araby.DAL, self.joker, newstarstem)
return newstarstem
def get_starstem(self, prefix_index=-1, suffix_index=-1):
""" return the star form stem of the treated word by the stemmer.
All non affix letters are converted to a joker.
The joker take by default DEFAULT_JOKER = "*".
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتكاتبانني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_stem()
كاتب
>>> print ArListem.get_starstem()
*ات*
@param prefix_index: indicate the left stemming position
if = -1: not cosidered, and take the default word prefix lentgh.
@type prefix_index:integer.
@param suffix_index:indicate the right stemming position.
if = -1: not cosidered, and take the default word suffix position.
@type suffix_index: integer.
@return: stared form of stem.
@rtype: unicode.
"""
#~ starword = self.starword
starword = self.word
if prefix_index < 0 and suffix_index < 0:
return starword[self.left:self.right]
else:
left = self.left
right = self.right
if prefix_index >= 0:
left = prefix_index
if suffix_index >= 0:
right = suffix_index
if self.infix_letters != "":
newstarstem = re.sub(u"[^%s%s]"%(self.infix_letters, araby.TEH_MARBUTA), \
self.joker, starword[left:right])
# substitube teh in infixes the teh mst be in the first
# or second place, all others, are converted
newstarstem = self._handle_teh_infix(newstarstem, left, right)
else:
newstarstem = self.joker*len(starword[left:right])
#~ print("star word", starword, newstarstem)
return newstarstem
def get_prefix(self, prefix_index=-1):
""" return the prefix of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتكاتبانني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_prefix()
أفت
@param prefix_index: indicate the left stemming position
if = -1: not cosidered, and take the default word prefix lentgh.
@type prefix_index:integer.
@return: prefixe.
@rtype: unicode.
"""
if prefix_index < 0:
return self.unvocalized[:self.left]
else:
return self.unvocalized[:prefix_index]
def get_suffix(self, suffix_index=-1):
""" return the suffix of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتكاتبانني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_suffix()
انني
@param suffix_index:indicate the right stemming position.
if = -1: not cosidered, and take the default word suffix position.
@type suffix_index: integer.
@return: suffixe.
@rtype: unicode.
"""
if suffix_index < 0:
return self.unvocalized[self.right:]
else:
return self.unvocalized[suffix_index:]
def get_affix(self, prefix_index=-1, suffix_index=-1):
""" return the affix of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتكاتبانني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_affix()
أفت-انني
@param prefix_index: indicate the left stemming position
if = -1: not cosidered, and take the default word prefix lentgh.
@type prefix_index:integer.
@param suffix_index:indicate the right stemming position.
if = -1: not cosidered, and take the default word suffix position.
@type suffix_index: in4teger.
@return: suffixe.
@rtype: unicode.
"""
return u"-".join([self.get_prefix(prefix_index), \
self.get_suffix(suffix_index)])
def get_affix_tuple(self, prefix_index=-1, suffix_index=0):
""" return the affix tuple of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتضاربانني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_affix_tuple()
{'prefix': u'أفت', 'root': u'ضرب', 'suffix': u'انني', 'stem': u'ضارب'}
@param prefix_index: indicate the left stemming position
if = -1: not cosidered, and take the default word prefix lentgh.
@type prefix_index:integer.
@param suffix_index:indicate the right stemming position.
if = -1: not cosidered, and take the default word suffix position.
@type suffix_index: integer.
@return: affix tuple.
@rtype: dict.
"""
return {
'prefix':self.get_prefix(prefix_index),
'suffix':self.get_suffix(suffix_index),
'stem':self.get_stem(prefix_index, suffix_index),
'starstem':self.get_starstem(prefix_index, suffix_index),
'root':self.get_root(prefix_index, suffix_index),
}
#########################################################
#{ Stemming Functions
#########################################################
def light_stem(self, word):
u"""
Stemming function, stem an arabic word, and return a stem.
This function store in the instance the stemming positions
(left, right), then it's possible to get other calculted
attributs like: stem, prefixe, suffixe, root.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتضاربانني'
>>> stem = ArListem.light_stem(word)
>>> print ArListem.get_stem()
ضارب
>>> print ArListem.get_starstem()
*ا**
>>> print ArListem.get_left()
3
>>> print ArListem.get_right()
6
>>> print ArListem.get_root()
ضرب
@param word: the input word.
@type word: unicode.
@return: stem.
@rtype: unicode.
"""
if word == u'':
return u''
#~ starword, left, right = self.transform2stars(word)
self.transform2stars(word)
# segment
self.segment(word)
#constitute the root
#~ self.extract_root()
return self.get_stem()
def transform2stars(self, word):
"""
Transform all non affixation letters into a star.
the star is a joker(by default '*').
which indicates that the correspandent letter is an original.
this function is used by the stmmer to identify original letters.
and return a stared form and stemming positions (left, right)
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتضاربانني'
>>> starword, left, right = ArListem.transformToStrars(word)
(أفت*ا**انني, 3, 6)
@param word: the input word.
@type word: unicode
@return: (starword, left, right):
- starword: all original letters converted into a star
- left: the greater possible left stemming position.
- right: the greater possible right stemming position.
@rtype: tuple.
"""
self.word = word
word = araby.strip_tashkeel(word)
# word, harakat = araby.separate(word)
self.unvocalized = word
word = re.sub(u"[%s]"%(araby.ALEF_MADDA), araby.HAMZA+araby.ALEF, word)
#~ word = re.sub(u"[^%s%s%s]"%(self.prefix_letters, self.suffix_letters, self.infix_letters), \
word = re.sub(u"[^%s%s]"%(self.prefix_letters, self.suffix_letters), \
self.joker, word)
#~ ln = len(word)
left = word.find(self.joker)
right = word.rfind(self.joker)
if left >= 0:
left = min(left, self.max_prefix_length-1)
right = max(right+1, len(word)-self.max_suffix_length)
prefix = word[:left]
#stem get the original word and make all letters as jokers except infixes
stem = self.word[left:right]
suffix = word[right:]
prefix = re.sub(u"[^%s]"%self.prefix_letters, self.joker, prefix)
# avoid null infixes
if self.infix_letters:
stem = re.sub(u"[^%s]"%self.infix_letters, self.joker, stem)
suffix = re.sub(u"[^%s]"%self.suffix_letters, self.joker, suffix)
word = prefix+stem+suffix
left = word.find(self.joker)
right = word.rfind(self.joker)
# prefix_list = self.PREFIX_LIST
# suffix_list = self.SUFFIX_LIST
if left < 0:
left = min(self.max_prefix_length, len(word)-2)
if left >= 0:
prefix = word[:left]
while prefix != "" and prefix not in self.prefix_list:
prefix = prefix[:-1]
if right < 0:
right = max(len(prefix), len(word)-self.max_suffix_length)
suffix = word[right:]
while suffix and suffix not in self.suffix_list:
suffix = suffix[1:]
left = len(prefix)
right = len(word)-len(suffix)
#stem get the original word and make all letters as jokers except infixes
stem = self.word[left:right]
# convert stem into stars.
# a stem must starts with alef, or end with alef.
# any other infixes letter isnt infixe at
#the border of the stem.
#substitute all non infixes letters
if self.infix_letters:
stem = re.sub(u"[^%s]"%self.infix_letters, self.joker, stem)
word = prefix+stem+suffix
# store result
self.stem_left = left
self.stem_right = right
self.starword = word
#~ self.extract_root()
# return starword, left, right position of stem
return (word, left, right)
def extract_root(self, prefix_index=-1, suffix_index=-1):
""" return the root of the treated word by the stemmer.
All non affix letters are converted to a joker.
All letters in the joker places are part of root.
The joker take by default DEFAULT_JOKER = "*".
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'أفتصربونني'
>>> stem = ArListem .light_stem(word)
>>> print ArListem.get_starword()
أفت***ونني
>>> print ArListem.get_root()
ضرب
@param prefix_index: indicate the left stemming position
if = -1: not cosidered, and take the default
word prefix lentgh.
@type prefix_index:integer.
@param suffix_index:indicate the right stemming position.
if = -1: not cosidered, and take the default word suffix position.
@type suffix_index: integer.
@return: root.
@rtype: unicode.
"""
stem = self.get_stem(prefix_index, suffix_index)
root = u""
# if the stem has 3 letters it can be the root directly
if len(stem) == 3:
self.root = self._ajust_root(root, stem)
return self.root
starstem = self.get_starstem(prefix_index, suffix_index)
root = u""
if len(starstem) == len(stem):
for i, char in enumerate(stem):
if starstem[i] == self.joker:
root += char
else:
root = stem
# normalize root
root = self.normalize_root(root)
#controls on root letters and length
#~ if not self.is_root_length_valid(root):
#~ root = ""
if len(root) == 2:
root = self._ajust_root(root, starstem)
self.root = root
return root
def _ajust_root(self, root, starstem):
"""
If the root has only three or two letters, we complete it by another letter
"""
if not starstem:
return root
if len(starstem) == 3:
starstem = starstem.replace(araby.ALEF, araby.WAW)
starstem = starstem.replace(araby.ALEF_MAKSURA, araby.YEH)
return starstem
# The starstem can starts with a joker (*) or a infix letter
# add a letter at the begining
first = starstem[0]
last = starstem[-1:]
if first in (araby.ALEF, araby.WAW):
root = araby.WAW + root
elif first == araby.YEH:
root = araby.YEH + root
elif first == self.joker and last in (araby.ALEF, araby.WAW):
root += araby.WAW
elif first == self.joker and last in (araby.ALEF_MAKSURA, araby.YEH):
root += araby.WAW
elif first == self.joker and last == self.joker:
# if lenght == 2, is doubled verb
if len(starstem) == 2:
root += root[-1]
else:
# I choose WAW because it's frequent
root = root[0]+ araby.WAW+ root[1]
return root
def _create_prefix_tree(self, prefixes):
"""
Create a prefixes tree from given prefixes list
@param prefixes: list of prefixes
@type prefixes: list of unicode
@return: prefixes tree
@rtype: Tree stucture
"""
prefixestree = {}
for prefix in prefixes:
# print prefix.encode('utf8')
branch = prefixestree
for char in prefix:
if char not in branch:
branch[char] = {}
branch = branch[char]
# branch['#'] = '#' # the hash # as an end postion
if '#' in branch:
branch['#'][prefix] = "#"
else:
branch['#'] = {prefix:"#", }
self.prefixes_tree = prefixestree
return self.prefixes_tree
def _create_suffix_tree(self, suffixes):
"""
Create a suffixes tree from given suffixes list
@param suffixes: list of suffixes
@type suffixes: list of unicode
@return: suffixes tree
@rtype: Tree stucture
"""
suffixestree = {}
for suffix in suffixes:
# print (u"'%s'"%suffix).encode('utf8')
branch = suffixestree
#reverse a string
for char in suffix[::-1]:
if char not in branch:
branch[char] = {}
branch = branch[char]
# branch['#'] = '#' # the hash # as an end postion
if "#" in branch:
branch['#'][suffix] = "#"
else:
branch['#'] = {suffix:"#", }
self.suffixes_tree = suffixestree
return self.suffixes_tree
def lookup_prefixes(self, word):
"""
lookup for prefixes in the word
@param word: the given word
@type word: unicode
@return: list of prefixes starts positions
@rtype: list of int
"""
branch = self.prefixes_tree
lefts = [0, ]
i = 0
while i < len(word) and word[i] in branch:
if "#" in branch:
# if branch['#'].has_key(word[:i]):
lefts.append(i)
if word[i] in branch:
branch = branch[word[i]]
else:
# i += 1
break
i += 1
if i < len(word) and "#" in branch:
lefts.append(i)
return lefts
def lookup_suffixes(self, word):
"""
lookup for suffixes in the word
@param word: the given word
@type word: unicode
@return: list of suffixes starts positions
@rtype: list of int
"""
branch = self.suffixes_tree
suffix = ''
# rights = [len(word)-1, ]
rights = []
i = len(word)-1
while i >= 0 and word[i] in branch:
suffix = word[i]+suffix
if '#' in branch:
# if branch['#'].has_key(word[i:]):
# rights.append(i)
rights.append(i+1)
if word[i] in branch:
branch = branch[word[i]]
else:
# i -= 1
break
i -= 1
if i >= 0 and "#" in branch:#and branch['#'].has_key(word[i+1:]):
rights.append(i+1)
return rights
#########################################################
#{ Segmentation Functions
#########################################################
def segment(self, word):
""" generate a list of all possible segmentation positions
(lef, right) of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'فتضربين'
>>> print ArListem.segment(word)
set(([(1, 5), (2, 5), (0, 7)])
@return: List of segmentation
@rtype: set of tuple of integer.
"""
self.word = word
self.unvocalized = araby.strip_tashkeel(word)
# word, harakat = araby.separate(word)
word = re.sub(u"[%s]"%(araby.ALEF_MADDA), araby.HAMZA+araby.ALEF, word)
# get all lefts position of prefixes
lefts = self.lookup_prefixes(word)
# get all rights position of suffixes
rights = self.lookup_suffixes(word)
if lefts:
self.left = max(lefts)
else:
self.left = -1
if rights:
self.right = min(rights)
else:
self.right = -1
#~ ln = len(word)
self.segment_list = set([(0, len(word))])
# print lefts, rights
for i in lefts:
for j in rights:
if j >= i+2 :
self.segment_list.add((i, j))
# filter segment according to valid affixes list
self.left, self.right = self.get_left_right(self.segment_list)
return self.segment_list
# #########################################################
# #{ Segmentation Functions
# #########################################################
def get_segment_list(self):
""" return a list of segmentation positions (left, right)
of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'فتضربين'
>>> ArListem.segment(word)
>>> print ArListem.get_segment_list()
set(([(1, 5), (2, 5), (0, 7)])
@return: List of segmentation
@rtype: set of tuple of integer.
"""
return self.segment_list
def get_affix_list(self, seg_list=[]):
u""" return a list of affix tuple of the treated word by the stemmer.
Example:
>>> ArListem = ArabicLightStemmer()
>>> word = u'فتضربين'
>>> ArListem.segment(word)
>>> print ArListem.get_affix_list()
[{'prefix': u'ف', 'root': u'ضرب', 'suffix': u'\u064aن', 'stem': u'تضرب'},
{'prefix': u'فت', 'root': u'ضرب', 'suffix': u'\u064aن', 'stem': u'ضرب'},
{'prefix': u'', 'root': u'فضربن', 'suffix': u'', 'stem': u'فتضرب\u064aن'}]
@return: List of Affixes tuple
@rtype: list of dict.
"""
if not seg_list:
seg_list = self.segment_list
affix_list = []
for left,right in seg_list:
affix_list.append(self.get_affix_tuple(left, right))
return affix_list
def _valid_stem(self, stem, tag="noun", prefix=""):
""" Test if the stem is accepted"""
if not stem:
return False
# valid stems for verbs
if tag == "verb":
# verb has length <= 6
if len(stem) > 6 or len(stem) < 2:
return False
# forbidden letters in verbs like Teh Marbuta
elif araby.TEH_MARBUTA in stem:
return False
# 6 letters stem must starts with ALEF
elif len(stem) == 6 and not stem.startswith(araby.ALEF):
return False
# 5 letters stem must starts with ALEF/TEH or SEEN (a 6 letters verbs striped from Alef)
# قد يكون الجذع الخماسي فعلا خماسيا
# لذا يجب أن يبدأ بالتاء أو الألف
# أما إذا كن منقلبنا عن فعل سداسي،
# مثل استغفر، افرنقع،
# فيجب أن يكون مسبوقا بحرف يحذف ألف الوصل
elif len(stem) == 5 and not stem[0] in (araby.ALEF, araby.TEH):
if prefix[-1:] in (araby.YEH, araby.TEH, araby.NOON, araby.ALEF_HAMZA_ABOVE):
return False
# لا يقبل ألف بعد حرف مضارعة
elif stem.startswith(araby.ALEF) and prefix[-1:] in (araby.YEH, araby.NOON, araby.TEH, araby.ALEF_HAMZA_ABOVE, araby.ALEF):
return False
## lookup for stamp
if not verb_stamp_const.is_verb_stamp(stem):
return False
elif tag == "noun":
if len(stem) >= 8 :
return False
return True
return True
def _verify_affix(self, prefix_index=-1, suffix_index=-1):
"""
validate affixes against a list of valid affixes
"""
prefix = self.get_prefix(prefix_index)
suffix = self.get_suffix(suffix_index)
TAG = True
if TAG:
affix = prefix+'-'+suffix
stem = self.get_stem(prefix_index, suffix_index)
if affix in affix_const.VERB_AFFIX_LIST and self._valid_stem(stem,"verb", prefix):
# is a valid verb stem
if affix in affix_const.NOUN_AFFIX_LIST and self._valid_stem(stem,"noun"):
# is also a noun stem
return True # TAG VN
else:
return True # TAG V
else:
if affix in affix_const.NOUN_AFFIX_LIST and self._valid_stem(stem,"noun"):
return True # TAG N
else:
return False # not a valid verb or not a noun
return True
if self.valid_affixes_list :
affix = prefix+'-'+suffix
return affix in self.valid_affixes_list
else:
#مراجعة مبسطة
# أل التعريف مع ضمير متصل
if ((u"ال" in prefix or u"لل" in prefix) and
(u'ه' in suffix or u'ك' in suffix)
):
return False
# التاء المربوطة مع حروف المضارعة
if ((u"ي" in prefix or u"يس" in prefix or u"نس" in prefix
or u"تس" in prefix or u"سي" in prefix or u"سأ" in prefix) and
(u'ة' in suffix)
):
return False
# التاء المتحركة مع حروف المضارعة
if ((u"ي" in prefix or u"يس" in prefix or u"نس" in prefix
or u"تس" in prefix or u"سي" in prefix or u"سأ" in prefix) and
(u'تم' in suffix or u'تن' in suffix )
):
return False
# حروف الجر مع واو جمع مذكر سالم
#ولمثنى المرفوع
if ((u"ك" in prefix or u"ب" in prefix or u"لل" in prefix) and
(u'و' in suffix or u'ان' in suffix)
):
return False
return True
###############################################################
#{ General Functions
###############################################################
def normalize(self, word=u""):
"""
Normalize a word.
Convert some leters forms into unified form.
@param word: the input word, if word is empty,
the word member of the class is normalized.
@type word: unicode.
@return: normalized word.
@rtype: unicode.
"""
if word == u'' and self.word == u"":
return u""
elif word != u'':
self.word = word
else:
word = self.word
self.normalized = normalize.normalize_searchtext(word)
return self.normalized
def tokenize(self, text=u""):
"""
Tokenize text into words
@param text: the input text.
@type text: unicode.
@return: list of words.
@rtype: list.
"""
if not text:
return []
else:
mylist = self.token_pat.split(text)
if u'' in mylist:
mylist.remove(u'')
return mylist
@staticmethod
def normalize_root(word):
""" test if word is a root"""
# change alef madda to hamza + ALEF
word = word.replace(araby.ALEF_MADDA, araby.HAMZA+ araby.ALEF)
word = word.replace(araby.TEH_MARBUTA, '')
word = word.replace(araby.ALEF_MAKSURA, araby.YEH)
return araby.normalize_hamza(word)
@staticmethod
def is_root_length_valid(root):
return (len(root) >= 2 and len(root)<=4)
@staticmethod
def most_common(lst):
triroots = [x for x in lst if len(x) == 3]
if triroots:
lst = triroots
return max(set(lst), key=lst.count)
def is_root(self, word):
""" test if word is a root"""
return word in self.root_list
@staticmethod
def get_left_right(ls):
"""
get the max left and the min right
"""
if not ls:
return -1,-1
l,_= max(ls)
r = min([y for (x,y) in ls if x==l])
return l, r
if __name__ == "__main__":
#~ from pyarabic.arabrepr import arepr as repr
ARLISTEM = ArabicLightStemmer()
wordlist =[u'أفتضاربانني',
u'بالمكتبة',
u'مزدهرة',
u'كاتب',
u'مضروب',
u'مضارب',
u"مردود",
u"مطلوب",
u"مشتت",
u'مزتهرة',
u'مضطرب',
u'بالمكتبة',
u'مالبدرسمه',
u"مكتوب",
u"الآجال",
u"بالبلدان",
u"وفيهما",
u"1245",
u"Taha",
u"@",
]
for word in wordlist:
# stemming word
ARLISTEM.light_stem(word)
# extract stem
print("stem", ARLISTEM.get_stem())
print(ARLISTEM.infix_letters)
# extract root
print("root:", ARLISTEM.get_root())
# get prefix position index
print("left",ARLISTEM.get_left())
print("left stem",ARLISTEM.stem_left)
# get prefix
print(ARLISTEM.get_prefix())
# get prefix with a specific index
print(ARLISTEM.get_prefix(2))
# get suffix position index
print("right",ARLISTEM.get_right())
print("right_stem",ARLISTEM.stem_right)
# get suffix
print("suffix", ARLISTEM.get_suffix())
# get suffix with a specific index
print(ARLISTEM.get_suffix(10))
# get affix tuple
print(ARLISTEM.get_affix_tuple())
# star words
print("starword", ARLISTEM.get_starword())
# get star stem
print("starstem",ARLISTEM.get_starstem())
# get normalized word
print("normalize", ARLISTEM.get_normalized())
# get unvocalized word
print("unvocalized",ARLISTEM.get_unvocalized())
# Detect all possible segmentation
print(ARLISTEM.segment(word))
print(ARLISTEM.get_segment_list())
# get affix list
print(repr(ARLISTEM.get_affix_list()))
| gpl-3.0 | -4,222,205,376,867,603,500 | 34.792945 | 136 | 0.546386 | false |
fpsw/Servo | servo/forms/devices.py | 1 | 2204 | # -*- coding: utf-8 -*-
from django import forms
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from servo.models import Tag, Device, Customer
from servo.forms import DatepickerInput, AutocompleteCharField
product_lines = [(k, x['name']) for k, x in Device.PRODUCT_LINES.items()]
class DeviceSearchForm(forms.Form):
product_line = forms.MultipleChoiceField(
choices=product_lines,
required=False
)
warranty_status = forms.MultipleChoiceField(
choices=Device.WARRANTY_CHOICES,
required=False,
)
date_start = forms.DateField(
required=False,
label=_('Created between'),
widget=DatepickerInput(attrs={'class': 'input-small'})
)
date_end = forms.DateField(
required=False,
label=mark_safe(' '),
widget=DatepickerInput(attrs={'class': 'input-small'})
)
sn = forms.CharField(required=False, label=_('Serial number contains'))
def __init__(self, *args, **kwargs):
super(DeviceSearchForm, self).__init__(*args, **kwargs)
self.fields['description'] = AutocompleteCharField('/api/device_models/',
max_length=128,
required=False,
label=_('Description contains')
)
class DeviceForm(forms.ModelForm):
"""The form for editing devices in the /devices view"""
"""
tags = forms.ModelMultipleChoiceField(
queryset=Tag.objects.filter(type='device'),
required=False
)
"""
class Meta:
model = Device
exclude = ('spec', 'customers', 'files', 'image_url',
'exploded_view_url', 'manual_url', )
widgets = {'purchased_on': DatepickerInput()}
class DeviceUploadForm(forms.Form):
datafile = forms.FileField(
help_text=_('Device data in Excel format (.xls or .xlsx)')
)
customer = forms.IntegerField(
required=False,
widget=forms.HiddenInput,
)
do_warranty_check = forms.BooleanField(
required=False,
initial=True,
help_text=_('Perform warranty check on uploaded serial numbers')
)
class DiagnosticsForm(forms.Form):
pass
| bsd-2-clause | -8,518,432,263,230,593,000 | 28.783784 | 81 | 0.632033 | false |
kevinselwyn/pokestop | api.py | 1 | 5080 | #!/usr/bin/python
# coding=utf-8
"""Pokéstop API"""
import sys
import argparse
import json
from pokestop import Pokestop
from flask import Flask, jsonify
from flask_restful import Api, reqparse, Resource
#----------------------------------------------------------------#
# Constants
HOSTNAME = '0.0.0.0'
PORT = 5000
MIME = 'application/json'
#----------------------------------------------------------------#
# Utilities
def custom_error(status_code=404, message=''):
"""Returns custom JSON error"""
response = jsonify({
'status': status_code,
'message': message
})
response.status_code = status_code
response.content_type = MIME
return response
def get_args(variables=None):
"""Parses data or header arguments"""
parser = reqparse.RequestParser()
for variable, val in variables.items():
parser.add_argument(variable)
args = parser.parse_args()
output = {}
for key, val in args.items():
output[key] = val
for key, val in variables.items():
if not key in output or not output[key]:
output[key] = val
return output
def make_response(output):
response = API.make_response(output, 200)
response.headers['X-Best-Team'] = 'Team Mystic'
return response
#----------------------------------------------------------------#
# App
APP = Flask(__name__)
API = Api(APP)
#----------------------------------------------------------------#
# Errors
@APP.errorhandler(404)
def page_not_found(error):
return custom_error(404, 'Invalid endpoint')
#----------------------------------------------------------------#
# Nearby
class NearbyEndpoint(Resource):
"""Nearby endpoint"""
routes = [
'/nearby',
'/nearby'
]
@classmethod
def get(cls):
"""Gets nearby"""
args = get_args({
'SACSID': '<SACSID cookie>',
'csrftoken': '<csrftoken cookie>',
'latitude': '',
'longitude': '',
'minimum': 0,
'maximum': 1000,
'order': 'ASC',
'limit': 1000
})
if not 'SACSID' in args or not args['SACSID'] or not 'csrftoken' in args or not args['csrftoken']:
return custom_error(401, 'Unauthorized request')
if not 'latitude' in args or not args['latitude'] or not 'longitude' in args or not args['longitude']:
return custom_error(404, 'Missing latitude and longitude')
pokestop = Pokestop(args)
output = pokestop.entities()
response = make_response(json.loads(output))
return response
@classmethod
def post(cls):
"""Gets nearby by post"""
return cls.get()
API.add_resource(NearbyEndpoint, *NearbyEndpoint.routes)
#----------------------------------------------------------------#
# Pokéstop
class PokestopEndpoint(Resource):
"""Pokéstop endpoint"""
routes = [
'/pokestop',
'/pokestop'
]
@classmethod
def get(cls):
"""Gets Pokéstop"""
args = get_args({
'SACSID': '<SACSID cookie>',
'csrftoken': '<csrftoken cookie>',
'guid': '',
'latitude': '',
'longitude': '',
'minimum': 0,
'maximum': 1000,
'order': 'ASC',
'limit': 1000
})
if not 'SACSID' in args or not args['SACSID'] or not 'csrftoken' in args or not args['csrftoken']:
return custom_error(401, 'Unauthorized request')
if not 'guid' in args or not args['guid']:
return custom_error(404, 'Missing Pokéstop GUID')
pokestop = Pokestop(args)
output = pokestop.entity(args['guid'])
response = make_response(json.loads(output))
return response
@classmethod
def post(cls):
"""Gets Pokéstop by post"""
return cls.get()
API.add_resource(PokestopEndpoint, *PokestopEndpoint.routes)
#----------------------------------------------------------------#
# Main
def main(argc=0, argv=None):
"""Main function"""
parser = argparse.ArgumentParser()
flags = [
{'short': '-n', 'long': '--host'},
{'short': '-p', 'long': '--port'},
{'short': '-d', 'long': '--debug'}
]
arguments = [
{
'help': 'Host',
'required': False,
'action': 'store',
'default': HOSTNAME
},
{
'help': 'Port',
'required': False,
'action': 'store',
'default': PORT
},
{
'help': 'Debugging',
'required': False,
'action': 'store_true',
'default': False
}
]
for i in range(0, len(flags)):
parser.add_argument(flags[i]['short'], flags[i]['long'], **arguments[i])
args = parser.parse_args(argv[1:argc])
APP.run(host=args.host, port=args.port, debug=args.debug)
if __name__ == '__main__':
main(len(sys.argv), sys.argv)
| gpl-3.0 | 4,443,108,776,689,280,500 | 23.394231 | 110 | 0.493102 | false |
Jamonek/Robinhood | docs/conf.py | 1 | 2026 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
project = "pyrh"
copyright = "2020, Unofficial Robinhood Python API"
author = "Unofficial Robinhood Python API"
master_doc = "index"
exclude_patterns = ["stubs/*"] # ignore stubs from checks
# The full version, including alpha/beta/rc tags
release = "2.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"autodocsumm",
"sphinx_autodoc_typehints",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# source_suffix = '.rst'
source_suffix = [".rst"]
# intersphinx
intersphinx_mapping = {
"requests": ("https://requests.readthedocs.io/en/master/", None),
}
# Autodoc
autodoc_default_flags = ["members"]
autosummary_generate = True
| mit | 3,453,407,910,459,892,000 | 31.15873 | 78 | 0.671273 | false |
nke001/attention-lvcsr | libs/Theano/theano/sandbox/cuda/cula.py | 1 | 3988 | import pkg_resources
import theano
from theano.sandbox.cuda.type import CudaNdarrayType
from theano.sandbox.cuda import GpuOp
from theano.sandbox.cuda.basic_ops import as_cuda_ndarray_variable
try:
from theano.sandbox.cuda import cuda_ndarray
dimshuffle = cuda_ndarray.cuda_ndarray.dimshuffle
except ImportError:
pass
cula_available = False
try:
from scikits.cuda import cula
cula_available = True
except (ImportError, OSError, pkg_resources.DistributionNotFound):
pass
cula_initialized = False
class GpuSolve(GpuOp):
"""
CULA GPU solver OP.
:param trans: Whether to take the transpose of the input matrix
or not.
"""
__props__ = ('trans',)
def __init__(self, trans='N'):
self.trans = trans
super(GpuSolve, self).__init__()
def output_type(self, inp):
return CudaNdarrayType(broadcastable=[False] * inp.type.ndim)
def make_node(self, inp1, inp2):
inp1 = as_cuda_ndarray_variable(inp1)
inp2 = as_cuda_ndarray_variable(inp2)
assert inp1.ndim == 2
assert inp2.ndim == 2
return theano.Apply(self, [inp1, inp2], [self.output_type(inp1)()])
def make_thunk(self,
node,
storage_map, _,
no_recycling=[]):
# Initialize CULA the first time it is needed
global cula_initialized
if not cula_available:
raise RuntimeError('Cula is not available and '
'GpuSolve Op can not be constructed.')
if not cula_initialized:
cula.culaInitialize()
cula_initialized = True
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
def thunk():
# size of the matrices to invert
z = outputs[0]
# Matrix
A = inputs[0][0]
# Solution vectors
b = inputs[1][0]
# A is not explicitly converted between C and F order, instead we
# switch the "transpose" flag
if self.trans in ('T', 'C'):
trans = 'N'
else:
trans = 'T'
# Convert b to F-order from c-order.
b_cpy = dimshuffle(b, (1, 0)).reshape((b.shape[0], b.shape[1]))
# This copy forces allocation of a new C-contiguous buffer
# and returns it.
A_cpy = A.copy()
b_cpy = b_cpy.copy()
def cula_gpu_solve(A_, b_, trans='T'):
A_shape = A_.shape
b_shape = b_.shape
assert(len(A_shape) == 2)
assert(len(b_shape) == 2)
if trans in ['T', 'C']:
l, n = A_shape
k, m = b_shape
if n != k:
raise ValueError('A and b must be aligned.')
elif trans in ['N']:
n, l = A_shape
k, m = b_shape
if l != m:
raise ValueError('A and b must be aligned.')
else:
raise ValueError('Invalid value for trans')
lda = max(1, n)
ldb = max(1, n, l)
# construct pointer arrays needed for culaDeviceSgels
# Cula requires you to pass a pointer for A and b.
A_ptr = A_.gpudata
b_ptr = b_.gpudata
cula.culaDeviceSgels(trans, n, l, m, A_ptr, lda, b_ptr, ldb)
return A_, b_
A_pycuda, b_pycuda = cula_gpu_solve(A_cpy, b_cpy, trans)
# Convert b to F-order from c-order and assign it to output:
b_cpy = b_cpy.reshape(b.shape[::-1])
b_cpy = dimshuffle(b_cpy, (1, 0))
z[0] = b_cpy
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
gpu_solve = GpuSolve()
| mit | 737,977,525,252,001,200 | 27.898551 | 77 | 0.515547 | false |
tkln/HelvarNet | http_gateway.py | 1 | 1266 | #!/usr/bin/python3
import http.server
import socketserver
import helvar
helvarNet = helvar.HelvarNet('10.254.1.2', 50000)
leds = [helvar.LedUnit(helvarNet, '1.2.1.1'),
helvar.LedUnit(helvarNet, '1.2.1.2'),
helvar.LedUnit(helvarNet, '1.2.1.3'),
helvar.LedUnit(helvarNet, '1.2.1.4'),
helvar.LedUnit(helvarNet, '1.2.1.5')]
class Handler(http.server.BaseHTTPRequestHandler):
def __parse_url(self):
parts = self.path.split('/')
print(self.path)
return {'base' : parts[1],
'id' : int(parts[2]),
'level' : int(parts[3]),
'fade_time' : int(parts[4])}
def do_HEAD(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
req = self.__parse_url()
if (req['base'] == 'lamp'):
leds[req['id']].set(req['level'], req['fade_time'])
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
#self.wfile.close()
PORT = 8002
socketserver.TCPServer.allow_reuse_address = True
httpd = socketserver.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()
| mit | 3,007,065,090,095,122,400 | 28.44186 | 63 | 0.578989 | false |
ddico/account-financial-tools | account_reversal/tests/test_account_reversal.py | 1 | 4593 | # -*- coding: utf-8 -*-
# Copyright 2014 Stéphane Bidoul <[email protected]>
# Copyright 2016 Antonio Espinosa <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests.common import TransactionCase
import random
class TestAccountReversal(TransactionCase):
def setUp(self):
super(TestAccountReversal, self).setUp()
self.move_obj = self.env['account.move']
self.move_line_obj = self.env['account.move.line']
self.company_id = self.env.ref('base.main_company').id
self.partner = self.env['res.partner'].create({
'name': 'Test partner',
})
self.journal = self.env['account.journal'].create({
'name': 'Test journal',
'code': 'COD',
'type': 'sale',
'company_id': self.company_id
})
type_revenue = self.env.ref('account.data_account_type_revenue')
type_payable = self.env.ref('account.data_account_type_payable')
self.account_sale = self.env['account.account'].create({
'name': 'Test sale',
'code': 'XX_700',
'user_type_id': type_revenue.id,
})
self.account_customer = self.env['account.account'].create({
'name': 'Test customer',
'code': 'XX_430',
'user_type_id': type_payable.id,
'reconcile': True,
})
def _create_move(self, with_partner=True, amount=100):
move_vals = {
'journal_id': self.journal.id,
'company_id': self.company_id,
'line_ids': [(0, 0, {
'name': '/',
'debit': amount,
'credit': 0,
'account_id': self.account_customer.id,
'company_id': self.company_id,
'partner_id': with_partner and self.partner.id
}), (0, 0, {
'name': '/',
'debit': 0,
'credit': amount,
'company_id': self.company_id,
'account_id': self.account_sale.id,
})]
}
return self.move_obj.create(move_vals)
def _move_str(self, move):
return ''.join(['%.2f%.2f%s' % (
x.debit, x.credit, x.account_id == self.account_sale and
':SALE_' or ':CUSTOMER_')
for x in move.line_ids.sorted(key=lambda r: r.account_id.id)])
def test_reverse(self):
move = self._create_move()
self.assertEqual(
self._move_str(move), '0.00100.00:SALE_100.000.00:CUSTOMER_')
move_prefix = 'REV_TEST_MOVE:'
line_prefix = 'REV_TEST_LINE:'
wizard = self.env['account.move.reverse'].with_context(
active_ids=move.ids
).create({
'move_prefix': move_prefix,
'line_prefix': line_prefix
})
self.assertEqual(wizard.date, move.date)
res = wizard.action_reverse()
rev = self.env['account.move'].browse(res['res_id'])
self.assertEqual(len(rev), 1)
self.assertEqual(rev.state, 'posted')
self.assertEqual(
self._move_str(rev), '100.000.00:SALE_0.00100.00:CUSTOMER_')
self.assertEqual(rev.ref[0:len(move_prefix)], move_prefix)
for line in rev.line_ids:
self.assertEqual(line.name[0:len(line_prefix)], line_prefix)
if line.account_id.reconcile:
self.assertTrue(line.reconciled)
def test_reverse_huge_move(self):
move = self._create_move()
for x in range(1, 100):
amount = random.randint(10, 100) * x
move.write({
'line_ids': [(0, 0, {
'name': '/',
'debit': amount,
'credit': 0,
'account_id': self.account_customer.id,
'company_id': self.company_id,
'partner_id': self.partner.id
}), (0, 0, {
'name': '/',
'debit': 0,
'credit': amount,
'company_id': self.company_id,
'account_id': self.account_sale.id,
})]
})
self.assertEqual(len(move.line_ids), 200)
move_prefix = 'REV_TEST_MOVE:'
line_prefix = 'REV_TEST_LINE:'
rev = move.create_reversals(move_prefix=move_prefix,
line_prefix=line_prefix, reconcile=True)
self.assertEqual(len(rev.line_ids), 200)
self.assertEqual(rev.state, 'posted')
| agpl-3.0 | -5,675,361,029,071,802,000 | 36.333333 | 76 | 0.514155 | false |
levilucio/SyVOLT | UMLRT2Kiltera_MM/Properties/from_thesis/HMM1_then1_ConnectedLHS.py | 1 | 2650 | from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM1_then1_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMM1_then1_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMM1_then1_ConnectedLHS, self).__init__(name='HMM1_then1_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MM1_then1')
# Set the node attributes
# Nodes that represent the edges of the property.
# Add the edges
self.add_edges([
])
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| mit | 8,863,518,393,962,690,000 | 42.442623 | 125 | 0.47434 | false |
digris/openbroadcast.org | website/apps/alibrary/models/artistmodels.py | 1 | 13676 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import os
import uuid
import arating
import tagging
from alibrary.models import MigrationMixin, Relation, Profession
from alibrary.util.slug import unique_slugify
from alibrary.util.storage import get_dir_for_object, OverwriteStorage
from base.cacheops_extra import cached_uuid_aware
from base.mixins import TimestampedModelMixin
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db import models
from django.db.models import Q
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.utils.encoding import python_2_unicode_compatible
from django.utils import translation
from django_date_extensions.fields import ApproximateDateField
from django_extensions.db.fields import AutoSlugField
from l10n.models import Country
from tagging.registry import register as tagging_register
from .mediamodels import MediaArtists, MediaExtraartists, Media
from .releasemodels import Release
log = logging.getLogger(__name__)
LOOKUP_PROVIDERS = (("discogs", _("Discogs")), ("musicbrainz", _("Musicbrainz")))
def upload_image_to(instance, filename):
filename, extension = os.path.splitext(filename)
return os.path.join(get_dir_for_object(instance), "image%s" % extension.lower())
@python_2_unicode_compatible
class NameVariation(models.Model):
name = models.CharField(max_length=250, db_index=True)
artist = models.ForeignKey(
"Artist",
related_name="namevariations",
on_delete=models.CASCADE,
null=True,
blank=True,
)
class Meta:
app_label = "alibrary"
verbose_name = _("Name variation")
verbose_name_plural = _("Name variation")
ordering = ("name",)
def __str__(self):
return self.name
class ArtistManager(models.Manager):
def listed(self):
return self.get_queryset().filter(listed=True, priority__gt=0)
@python_2_unicode_compatible
class Artist(MigrationMixin, TimestampedModelMixin, models.Model):
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
name = models.CharField(max_length=250, db_index=True)
slug = AutoSlugField(
populate_from="name", editable=True, blank=True, overwrite=True, db_index=True
)
TYPE_CHOICES = (
("person", _("Person")),
("group", _("Group")),
("orchestra", _("Orchestra")),
("other", _("Other")),
)
type = models.CharField(
verbose_name="Artist type",
max_length=128,
blank=True,
null=True,
choices=TYPE_CHOICES,
)
main_image = models.ImageField(
verbose_name=_("Image"),
upload_to=upload_image_to,
storage=OverwriteStorage(),
null=True,
blank=True,
)
real_name = models.CharField(max_length=250, blank=True, null=True)
disambiguation = models.CharField(max_length=256, blank=True, null=True)
country = models.ForeignKey(Country, blank=True, null=True)
booking_contact = models.CharField(
verbose_name=_("Booking"), max_length=256, blank=True, null=True
)
email = models.EmailField(
verbose_name=_("E-Mail"), max_length=256, blank=True, null=True
)
date_start = ApproximateDateField(
verbose_name=_("Begin"),
blank=True,
null=True,
help_text=_("date of formation / date of birth"),
)
date_end = ApproximateDateField(
verbose_name=_("End"),
blank=True,
null=True,
help_text=_("date of breakup / date of death"),
)
# properties to create 'special' objects. (like 'Unknown')
listed = models.BooleanField(
verbose_name="Include in listings",
default=True,
help_text=_("Should this Artist be shown on the default Artist-list?"),
)
disable_link = models.BooleanField(
verbose_name="Disable Link",
default=False,
help_text=_('Disable Linking. Useful e.g. for "Varius Artists"'),
)
disable_editing = models.BooleanField(
verbose_name="Disable Editing",
default=False,
help_text=_('Disable Editing. Useful e.g. for "Unknown Artist"'),
)
excerpt = models.TextField(blank=True, null=True)
biography = models.TextField(blank=True, null=True)
members = models.ManyToManyField(
"self",
through="ArtistMembership",
symmetrical=False,
)
aliases = models.ManyToManyField(
"self",
through="ArtistAlias",
related_name="artist_aliases",
blank=True,
symmetrical=False,
)
# relations a.k.a. links
relations = GenericRelation(Relation)
# tagging (d_tags = "display tags")
d_tags = tagging.fields.TagField(
max_length=1024,
verbose_name="Tags",
blank=True,
null=True,
)
professions = models.ManyToManyField(
Profession,
through="ArtistProfessions",
)
# user relations
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name="artists_owner",
on_delete=models.SET_NULL,
)
creator = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name="artists_creator",
on_delete=models.SET_NULL,
)
last_editor = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name="artists_last_editor",
on_delete=models.SET_NULL,
)
publisher = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name="artists_publisher",
on_delete=models.SET_NULL,
)
# identifiers
ipi_code = models.CharField(
verbose_name=_("IPI Code"), max_length=32, blank=True, null=True
)
isni_code = models.CharField(
verbose_name=_("ISNI Code"), max_length=32, blank=True, null=True
)
objects = ArtistManager()
class Meta:
app_label = "alibrary"
verbose_name = _("Artist")
verbose_name_plural = _("Artists")
ordering = ("name",)
def __str__(self):
return self.name
@property
def classname(self):
return self.__class__.__name__
def get_ct(self):
return "{}.{}".format(self._meta.app_label, self.__class__.__name__).lower()
def get_absolute_url(self):
if self.disable_link:
return None
return reverse("alibrary-artist-detail", kwargs={"uuid": str(self.uuid)})
def get_edit_url(self):
return reverse("alibrary-artist-edit", args=(self.pk,))
def get_admin_url(self):
return reverse("admin:alibrary_artist_change", args=(self.pk,))
def get_api_url(self):
return (
reverse(
"api_dispatch_detail",
kwargs={
"api_name": "v1",
"resource_name": "library/artist",
"pk": self.pk,
},
)
+ ""
)
@property
def description(self):
"""mapping to generic field"""
return self.biography
@cached_property
def get_membership(self):
""" get artists group/band membership """
return [m.parent for m in ArtistMembership.objects.filter(child=self)]
def get_alias_ids(self, exclude=None):
""" get ids of artists aliases """
exclude = exclude or []
alias_ids = []
parent_alias_ids = (
ArtistAlias.objects.filter(child__pk=self.pk)
.values_list("parent__pk", flat=True)
.distinct()
)
child_alias_ids = (
ArtistAlias.objects.filter(parent__pk=self.pk)
.values_list("child__pk", flat=True)
.distinct()
)
alias_ids.extend(parent_alias_ids)
alias_ids.extend(child_alias_ids)
for alias_id in alias_ids:
if not alias_id == self.pk and not alias_id in exclude:
exclude.append(alias_id)
alias_ids.extend(
Artist.objects.get(pk=alias_id).get_alias_ids(exclude=exclude)
)
return alias_ids
def get_aliases(self):
""" get artists aliases """
return (
Artist.objects.filter(pk__in=self.get_alias_ids([]))
.exclude(pk=self.pk)
.distinct()
)
###################################################################
# TODO: look for a better (=faster) way to get appearances!
###################################################################
@cached_uuid_aware(timeout=60 * 60 * 24)
def get_releases(self):
""" get releases where artist appears """
media_ids = []
qs_a = Media.objects.filter(artist=self)
qs_mediaartist = MediaArtists.objects.filter(artist=self)
media_ids += qs_a.values_list("id", flat=True)
media_ids += qs_mediaartist.values_list("media_id", flat=True)
return Release.objects.filter(
Q(media_release__pk__in=media_ids) | Q(album_artists__pk=self.pk)
).distinct()
@cached_uuid_aware(timeout=60 * 60 * 24)
def get_media(self):
""" get tracks where artist appears """
media_ids = []
qs_a = Media.objects.filter(artist=self)
qs_mediaartist = MediaArtists.objects.filter(artist=self)
qs_credited = MediaExtraartists.objects.filter(artist=self)
media_ids += qs_a.values_list("id", flat=True)
media_ids += qs_mediaartist.values_list("media_id", flat=True)
media_ids += qs_credited.values_list("media_id", flat=True)
return Media.objects.filter(pk__in=list(set(media_ids)))
def appearances(self):
""" get artists appearances (releases/tracks) """
try:
num_releases = self.get_releases().count()
except:
num_releases = 0
try:
num_media = self.get_media().count()
except:
num_media = 0
appearances = {"num_releases": num_releases, "num_media": num_media}
return appearances
def get_lookup_providers(self):
providers = []
for key, name in LOOKUP_PROVIDERS:
relations = self.relations.filter(service=key)
relation = None
if relations.exists():
relation = relations[0]
providers.append({"key": key, "name": name, "relation": relation})
return providers
def save(self, *args, **kwargs):
unique_slugify(self, self.name)
if self.type:
self.type = self.type.lower()
"""
TODO: implement otherwise
there is a special-case artist called "Various Artists" that should only exist once.
in the case - for whatever unplanned reason - there is a duplicate coming in we
add a counter to the name ensure uniqueness.
"""
if self.name == "Various Artists" and self.pk is None:
log.warning('attempt to create "Various Artists"')
original_name = self.name
i = 1
while Artist.objects.filter(name=self.name).count() > 0:
self.name = "%s %s" % (original_name, i)
i += 1
super(Artist, self).save(*args, **kwargs)
tagging_register(Artist)
arating.enable_voting_on(Artist)
# @receiver(post_save, sender=Artist)
# def action_handler(sender, instance, created, **kwargs):
# try:
# action_handler_task.delay(instance, created)
# except:
# pass
#
# @task
# def action_handler_task(instance, created):
# if created and instance.creator:
# action.send(instance.creator, verb=_('created'), target=instance)
#
# elif instance.last_editor:
# action.send(instance.last_editor, verb=_('updated'), target=instance)
@python_2_unicode_compatible
class ArtistMembership(models.Model):
parent = models.ForeignKey(
Artist, related_name="artist_parent", blank=True, null=True
)
child = models.ForeignKey(
Artist, related_name="artist_child", blank=True, null=True
)
profession = models.ForeignKey(
Profession, related_name="artist_membership_profession", blank=True, null=True
)
class Meta:
app_label = "alibrary"
verbose_name = _("Membersip")
verbose_name_plural = _("Membersips")
def __str__(self):
return '"%s" <> "%s"' % (self.parent.name, self.child.name)
def save(self, *args, **kwargs):
if not self.child or not self.parent:
self.delete()
super(ArtistMembership, self).save(*args, **kwargs)
@python_2_unicode_compatible
class ArtistAlias(models.Model):
parent = models.ForeignKey(Artist, related_name="alias_parent")
child = models.ForeignKey(Artist, related_name="alias_child")
class Meta:
app_label = "alibrary"
verbose_name = _("Alias")
verbose_name_plural = _("Aliases")
def __str__(self):
return '"%s" <> "%s"' % (self.parent.name, self.child.name)
@python_2_unicode_compatible
class ArtistProfessions(models.Model):
artist = models.ForeignKey("Artist")
profession = models.ForeignKey("Profession")
class Meta:
app_label = "alibrary"
verbose_name = _("Profession")
verbose_name_plural = _("Professions")
def __str__(self):
return '"%s" : "%s"' % (self.artist.name, self.profession.name)
| gpl-3.0 | -6,412,760,050,981,160,000 | 29.663677 | 92 | 0.602369 | false |
justinvanwinkle/wextracto | tests/test_etree.py | 1 | 7886 | from __future__ import unicode_literals, print_function
from six import BytesIO
from lxml import html
from operator import itemgetter
from wex.cache import Cache
from wex.response import Response, parse_headers
from wex import etree as e
from wex.iterable import first, flatten
example = b"""HTTP/1.1 200 OK
X-wex-request-url: http://some.com/
<html>
<head>
<base href="http://base.com/">
</head>
<body>
<h1>hi</h1>
<div id="div1">
<a href="/1"></a>
<a href=" /2 "></a>
<a></a>
</div>
<img src="http://other.com/src" />
<div id="links">
<a href="/1"></a>
<a href="http://subdomain.base.com/2"></a>
<a href="http://other.com/"></a>
</div>
<div id="iter_text">This is <span>some </span>text.</div>
<div id="nbsp"> </div>
<div id="br">oh<br>my</div>
<ul><li> 1</li><li></li><li>2 </li></ul>
<div class="thing">First <span>one thing</span></div>
<div class="thing">then <span>another thing</span>.</div>
<div id="drop-tree">Drop this<script>javascript</script> please.</div>
</body>
</html>
"""
example_with_dodgy_url = b"""HTTP/1.1 200 OK
X-wex-request-url: http://foo.com/bar[]/baz/
<html>
<body>
<a href="/1"></a>
</body>
</html>
"""
item0 = itemgetter(0)
def create_response(data):
return Response.from_readable(BytesIO(data))
def create_html_parser(monkeypatch, content_type):
class HTMLParser(object):
def __init__(self, **kw):
self.kw = kw
monkeypatch.setattr(e, 'HTMLParser', HTMLParser)
lines = [content_type, b'', b'']
CRLF = b'\r\n'
headers = parse_headers(BytesIO(CRLF.join(lines)))
return e.create_html_parser(headers)
def test_create_html_parser(monkeypatch):
content_type = b'Content-Type:text/html;charset=ISO8859-1'
parser = create_html_parser(monkeypatch, content_type)
assert parser.kw == {'encoding': 'windows-1252'}
def test_create_html_parser_charset_lookup_error(monkeypatch):
content_type = b'Content-Type:text/html;charset=wtf-123'
parser = create_html_parser(monkeypatch, content_type)
assert parser.kw == {'encoding': 'wtf-123'}
def test_parse():
etree = e.parse(create_response(example))
assert etree.xpath('//h1/text()') == ['hi']
def test_parse_unreadable():
obj = object()
assert e.parse(obj) is obj
def test_parse_ioerror():
class ProblemResponse(object):
def __init__(self):
self.headers = parse_headers(BytesIO())
self.url = None
def read(self, *args):
raise IOError
response = ProblemResponse()
etree = e.parse(response)
assert etree.getroot() is e.UNPARSEABLE
def test_xpath():
f = e.xpath('//h1/text()') | list
assert f(create_response(example)) == ['hi']
def test_xpath_re():
f = e.xpath('//*[re:test(text(), "SOME", "i")]/text()') | list
assert f(create_response(example)) == ['some ']
def test_xpath_re_match():
f = (e.xpath('re:match(//body, "\s+is\s+(some)\s+text", "gi")/text()') |
list)
assert f(create_response(example)) == ['some']
def test_css():
f = e.css('h1')
response = create_response(example)
res = f(response)
assert isinstance(res, list)
assert [elem.tag for elem in res] == ['h1']
def test_css_called_twice():
f = e.css('h1')
response = create_response(example)
with Cache():
assert f(response)== f(response)
def test_attrib():
f = e.css('#div1 a') | e.attrib('href') | list
r = create_response(example)
assert f(r) == ['/1', ' /2 ', None]
def test_attrib_default():
f = e.css('#div1 a') | e.attrib('nosuch', '') | list
assert f(create_response(example)) == ['', '', '']
def test_img_src():
f = e.css('img') | e.src_url
res = f(create_response(example))
assert hasattr(res, '__iter__')
assert not isinstance(res, list)
assert list(res) == ['http://other.com/src']
def test_get_base_url():
response = create_response(example)
tree = e.parse(response)
base_url = e.get_base_url(tree)
assert base_url == 'http://base.com/'
def test_href_url():
f = e.css('#links a') | e.href_url
res = f(create_response(example))
# we want the result to be an iterable, but not a list
assert hasattr(res, '__iter__')
assert not isinstance(res, list)
assert list(res) == ['http://base.com/1']
def test_href_url_same_suffix():
f = e.css('#links a') | e.href_url_same_suffix
res = f(create_response(example))
# we want the result to be an iterable, but not a list
assert hasattr(res, '__iter__')
assert not isinstance(res, list)
assert list(res) == ['http://base.com/1', 'http://subdomain.base.com/2']
def test_href_any_url():
f = e.css('#links a') | e.href_any_url
res = f(create_response(example))
# we want the result to be an iterable, but not a list
assert hasattr(res, '__iter__')
assert not isinstance(res, list)
assert list(res) == ['http://base.com/1',
'http://subdomain.base.com/2',
'http://other.com/']
def test_href_url_single():
f = e.css('#div1 a') | item0 | e.href_url
assert f(create_response(example)) == 'http://base.com/1'
def test_href_empty():
f = e.css('#nosuch') | e.href_url | list
assert f(create_response(example)) == []
def test_same_suffix():
f = e.same_suffix
base = 'http://example.net'
assert f((None, None)) == None
assert f(('', None)) == None
assert f(('com', None)) == None
assert f((base, None)) == None
assert f((base, 'http://example.net')) == 'http://example.net'
assert f((base, 'http://www.example.net')) == 'http://www.example.net'
assert f((base, 'javascript:alert("hi")')) == None
def test_same_domain():
base = 'http://example.net'
f = e.same_domain
assert f((None, None)) == None
assert f(('', None)) == None
assert f(('com', None)) == None
assert f((base, None)) == None
assert f((base, 'http://example.net')) == 'http://example.net'
assert f((base, 'http://www.example.net')) == None
assert f((base, 'javascript:alert("hi")')) == None
def test_text():
f = e.css('h1') | e.text | list
assert f(create_response(example)) == ['hi']
def test_nbsp():
func = e.css('#nbsp') | e.itertext() | list
assert func(create_response(example)) == [u'\xa0\xa0']
def test_text_content_with_br():
f = e.css('#br') | e.text_content
assert f(create_response(example)) == ['oh\nmy']
def test_text_html_comment():
tree = html.fromstring('<html><!-- comment --></html>')
assert [t for t in e.text(tree)] == []
def test_list_text_content():
func = e.css('ul li') | e.text_content
assert func(create_response(example)) == [' 1', '', '2 ']
def test_list_normalize_space():
func = e.css('ul li') | e.normalize_space
assert func(create_response(example)) == ['1', '', '2']
def test_href_when_url_contains_dodgy_characters():
f = e.css('a') | e.href_url | list
r = create_response(example_with_dodgy_url)
# This will fail if we don't quote/unquote the base_url
assert f(r) == ['http://foo.com/1']
def test_itertext():
f = e.css('.thing') | e.itertext() | flatten | list
expected = ['First ', 'one thing', 'then ', 'another thing', '.']
assert f(create_response(example)) == expected
def test_itertext_elem():
f = e.css('.thing') | first | e.itertext() | list
expected = ['First ', 'one thing']
assert f(create_response(example)) == expected
def test_normalize_space_nbsp():
f = e.css('#nbsp') | e.normalize_space
assert f(create_response(example)) == ['']
def test_drop_tree():
f = (e.xpath('//*[@id="drop-tree"]') |
e.drop_tree(e.css('script')) |
e.xpath('string()'))
assert f(create_response(example)) == ['Drop this please.']
| bsd-3-clause | 1,704,647,111,772,380,400 | 27.064057 | 76 | 0.593584 | false |
heLomaN/NetCrawler | nga_hot.py | 1 | 1579 | #!/usr/bin/env python
# coding=utf-8
import requests as rq
import random as rd
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
headers = {
'Connection': 'keep-alive',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.107 Safari/537.36',
}
page_idx = 1
query_dict = {'fid':'-7', 'page':str(page_idx)}
url_head = 'http://bbs.ngacn.cc/thread.php'
r = rq.get(url_head, params = query_dict)
#r = rq.get(url_head, params = query_dict, headers = headers)
print 'First init OK.'
r.encoding = 'gbk'
f = open('test_first.html', 'w')
f.write(r.text)
f.close()
headers = {
'Host': 'bbs.ngacn.cc',
'Connection': 'keep-alive',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.107 Safari/537.36',
'Referer': 'http://bbs.ngacn.cc/thread.php?fid=-7&page=1',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8'
}
rand = rd.randint(1, 999)
query_dict = {'fid':-7, 'page':page_idx, 'rand': rand}
#query_dict = {'fid':-7, 'page':page_idx, 'lite':'xml', 'rand': rand}
cookies = r.cookies
print cookies
#headers = r.headers
print headers
r = rq.get(url_head, params = query_dict, headers = headers, cookies = cookies)
print r.url
r.encoding = 'gbk'
#print r.text
f = open('test.html', 'w')
f.write(r.text)
f.close()
#print r.json()
| apache-2.0 | -929,872,826,803,928,700 | 25.762712 | 126 | 0.670044 | false |
lynchnf/maneki-neko-web | socialmedia/tests.py | 1 | 1601 | from django.test import TestCase
from cms.api import add_plugin
from cms.models import Placeholder
from socialmedia.cms_plugins import SocialLinkPlugin
from socialmedia.models import ICON_CHOICES
class SocialLinkPluginTest(TestCase):
def test_plugin_context(self):
placeholder = Placeholder.objects.create(slot='test')
model_instance = add_plugin(
placeholder,
SocialLinkPlugin,
'en',
icon = ICON_CHOICES[17][0],
size = 1,
url = "http://mimi-the-maneki-neko.tumblr.com/"
)
plugin_instance = model_instance.get_plugin_class_instance()
context = plugin_instance.render({}, model_instance, None)
model = context['instance']
self.assertEqual(model.url, "http://mimi-the-maneki-neko.tumblr.com/")
self.assertIn('title', context)
self.assertEqual(context['title'], 'Tumblr')
self.assertIn('styleClass', context)
self.assertEqual(context['styleClass'], 'fa fa-tumblr-square fa-lg')
def test_plugin_html(self):
placeholder = Placeholder.objects.create(slot='test')
model_instance = add_plugin(
placeholder,
SocialLinkPlugin,
'en',
icon = ICON_CHOICES[17][0],
size = 1,
url = "http://mimi-the-maneki-neko.tumblr.com/"
)
html = model_instance.render_plugin({})
self.assertEqual(html, '<a href="http://mimi-the-maneki-neko.tumblr.com/" title="Tumblr" target="_blank"><i class="fa fa-tumblr-square fa-lg"></i></a>') | mit | -7,077,663,203,663,897,000 | 38.073171 | 160 | 0.613991 | false |
smartdong/PythonPractise | Chapter 04/BombCatcher.py | 1 | 1793 | import sys, random, time, pygame
from pygame.locals import *
def print_text(font, x, y, text, color=(255,255,255)):
imgText = font.render(text, True, color)
screen.blit(imgText, (x,y))
pygame.init()
screen = pygame.display.set_mode((600,500))
pygame.display.set_caption("Bomb Catching Game")
font1 = pygame.font.Font(None, 24)
pygame.mouse.set_visible(False)
white = 255,255,255
red = 220, 50, 50
yellow = 230,230,50
black = 0,0,0
lives = 3
score = 0
clock_start = 0
game_over = True
mouse_x = mouse_y = 0
pos_x = 300
pos_y = 460
bomb_x = random.randint(0,500)
bomb_y = -50
vel_y = 7
while True:
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
elif event.type == MOUSEMOTION:
mouse_x,mouse_y = event.pos
move_x,move_y = event.rel
elif event.type == MOUSEBUTTONUP:
if game_over:
game_over = False
lives = 3
score = 0
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]:
sys.exit()
screen.fill((0,0,100))
if game_over:
print_text(font1, 100, 200, "<CLICK TO PLAY>")
else:
bomb_y += vel_y
if bomb_y > 500:
bomb_x = random.randint(0, 500)
bomb_y = -50
lives -= 1
if lives == 0:
game_over = True
elif bomb_y > pos_y:
if bomb_x > pos_x and bomb_x < pos_x + 120:
score += 10
bomb_x = random.randint(0, 500)
bomb_y = -50
pygame.draw.circle(screen, black, (bomb_x-4,int(bomb_y)-4), 30, 0)
pygame.draw.circle(screen, yellow, (bomb_x,int(bomb_y)), 30, 0)
pos_x = mouse_x
if pos_x < 0:
pos_x = 0
elif pos_x > 500:
pos_x = 500
pygame.draw.rect(screen, black, (pos_x-4,pos_y-4,120,40), 0)
pygame.draw.rect(screen, red, (pos_x,pos_y,120,40), 0)
print_text(font1, 0, 0, "LIVES: " + str(lives))
print_text(font1, 500, 0, "SCORE: " + str(score))
pygame.display.update()
| mit | -304,384,762,807,505,800 | 20.105882 | 68 | 0.625767 | false |
PHLF/rasa_nlu | _pytest/test_sanity.py | 1 | 2984 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import importlib
import pkgutil
from collections import defaultdict
import pytest
from multiprocessing import Queue, Process
from six import PY2
def import_submodules(package_name, skip_list):
""" Import all submodules of a module, recursively, including subpackages.
`skip_list` denotes packages that should be skipped during the import"""
package = importlib.import_module(package_name)
results = []
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + '.' + name
if full_name not in skip_list:
imported_module = importlib.import_module(full_name)
if PY2:
reload(imported_module)
else:
importlib.reload(imported_module)
results.append(full_name)
if is_pkg:
results += import_submodules(full_name, skip_list)
return results
@pytest.mark.parametrize("banned_package", ["spacy", "mitie", "sklearn", "duckling"])
def test_no_global_imports_of_banned_package(banned_package):
"""This test ensures that neither of the banned packages are imported module wise in any of our code files.
If one of the dependencies is needed, they should be imported within a function."""
q = Queue()
p = Process(target=get_tracked_imports, args=(q,))
p.start()
tracked_imports = q.get()
p.join()
def find_modules_importing(name):
return {v for k, vs in tracked_imports.items() if k.startswith(name) for v in vs}
assert not find_modules_importing(banned_package), \
"No module should import {} globally. Found in {}".format(
banned_package, ", ".join(find_modules_importing(banned_package)))
def get_tracked_imports(q):
import inspect
# To track imports accross modules, we will replace the default import function
try:
# noinspection PyCompatibility
import __builtin__
original_import_function = __builtin__.__import__
except ImportError:
# noinspection PyCompatibility
import builtins
original_import_function = builtins.__import__
tracked_imports = defaultdict(list)
def import_tracking(name, *x, **xs):
caller = inspect.currentframe().f_back
caller_name = caller.f_globals.get('__name__')
tracked_imports[name].append(caller_name)
return original_import_function(name, *x, **xs)
if PY2:
__builtin__.__import__ = import_tracking
else:
builtins.__import__ = import_tracking
# import all available modules and track imports on the way
import_submodules("rasa_nlu", skip_list={})
if PY2:
__builtin__.__import__ = original_import_function
else:
builtins.__import__ = original_import_function
q.put(tracked_imports)
| apache-2.0 | -1,151,380,040,605,416,700 | 32.155556 | 111 | 0.656836 | false |
eduardoklosowski/ergo-notes | ergonotes/migrations/0001_initial.py | 1 | 1776 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('priority', models.SmallIntegerField(choices=[(1, 'Alta'), (0, 'Normal'), (-1, 'Baixa')], default=0, verbose_name='prioridade')),
('title', models.CharField(max_length=32, verbose_name='título')),
('show_on_home', models.BooleanField(default=False, verbose_name='mostrar no home')),
('create_on', models.DateTimeField(auto_now_add=True, verbose_name='criado em')),
('modify_on', models.DateTimeField(auto_now=True, verbose_name='atualizado em')),
('markup', models.CharField(choices=[('txt', 'Texto'), ('html', 'HTML'), ('rst', 'reStructuredText'), ('mk', 'Markdown'), ('textile', 'Textile')], default='txt', verbose_name='markup', max_length=8)),
('text', models.TextField(verbose_name='texto', blank=True)),
('user', models.ForeignKey(verbose_name='usuário', related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'notas',
'verbose_name': 'nota',
'ordering': ('user', '-priority', 'title'),
},
),
migrations.AlterUniqueTogether(
name='note',
unique_together=set([('user', 'title')]),
),
]
| agpl-3.0 | -3,621,076,773,664,285,700 | 45.684211 | 216 | 0.56708 | false |
asimshankar/tensorflow | tensorflow/python/keras/integration_test.py | 1 | 13458 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.layers import core as tf_core_layers
from tensorflow.python.ops import nn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.platform import test
class KerasIntegrationTest(test.TestCase):
def test_version(self):
self.assertTrue(keras.__version__.endswith('-tf'))
@test_util.run_v1_only('b/120545219')
def test_vector_classification_sequential(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential([
keras.layers.Dense(16,
activation='relu',
input_shape=x_train.shape[1:]),
keras.layers.Dropout(0.1),
keras.layers.Dense(y_train.shape[-1], activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_deprecated_v1
def test_vector_classification_functional(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(20,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
inputs = keras.layers.Input(shape=x_train.shape[1:])
x = keras.layers.Dense(16, activation='relu')(inputs)
x = keras.layers.Dropout(0.1)(x)
outputs = keras.layers.Dense(y_train.shape[-1], activation='softmax')(x)
model = keras.models.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_deprecated_v1
def test_temporal_classification_sequential(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(4, 10),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(keras.layers.LSTM(5, return_sequences=True,
input_shape=x_train.shape[1:]))
model.add(keras.layers.GRU(y_train.shape[-1], activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=15, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_deprecated_v1
def test_temporal_classification_sequential_tf_rnn(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(4, 10),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(keras.layers.RNN(rnn_cell.LSTMCell(5), return_sequences=True,
input_shape=x_train.shape[1:]))
model.add(keras.layers.RNN(rnn_cell.GRUCell(y_train.shape[-1],
activation='softmax',
dtype=dtypes.float32)))
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=15, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
def test_image_classification_sequential(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(12, 12, 3),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(
4, 3,
padding='same',
activation='relu',
input_shape=x_train.shape[1:]))
model.add(keras.layers.Conv2D(
8, 3,
padding='same',
activation='relu'))
model.add(keras.layers.Conv2D(
16, 3,
padding='same',
activation='relu'))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(y_train.shape[-1], activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.8),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_v1_only('b/120545219')
def test_video_classification_functional(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(4, 8, 8, 3),
num_classes=3)
y_train = keras.utils.to_categorical(y_train)
inputs = keras.layers.Input(shape=x_train.shape[1:])
x = keras.layers.TimeDistributed(
keras.layers.Conv2D(4, 3, activation='relu'))(inputs)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.TimeDistributed(keras.layers.GlobalMaxPooling2D())(x)
x = keras.layers.Conv1D(8, 3, activation='relu')(x)
x = keras.layers.Flatten()(x)
outputs = keras.layers.Dense(y_train.shape[-1], activation='softmax')(x)
model = keras.models.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.8),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_v1_only('b/120545219')
def test_vector_classification_shared_sequential(self):
# Test that Sequential models that feature internal updates
# and internal losses can be shared.
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
base_model = keras.models.Sequential([
keras.layers.Dense(16,
activation='relu',
kernel_regularizer=keras.regularizers.l2(1e-5),
bias_regularizer=keras.regularizers.l2(1e-5),
input_shape=x_train.shape[1:]),
keras.layers.BatchNormalization(),
])
x = keras.layers.Input(x_train.shape[1:])
y = base_model(x)
y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y)
model = keras.models.Model(x, y)
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
self.assertEqual(len(model.losses), 2)
self.assertEqual(len(model.updates), 2)
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
@test_util.run_v1_only('b/120545219')
def test_vector_classification_shared_model(self):
# Test that functional models that feature internal updates
# and internal losses can be shared.
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
inputs = keras.layers.Input(x_train.shape[1:])
x = keras.layers.Dense(16,
activation='relu',
kernel_regularizer=keras.regularizers.l2(1e-5),
bias_regularizer=keras.regularizers.l2(1e-5),
input_shape=x_train.shape[1:])(inputs)
x = keras.layers.BatchNormalization()(x)
base_model = keras.models.Model(inputs, x)
x = keras.layers.Input(x_train.shape[1:])
y = base_model(x)
y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y)
model = keras.models.Model(x, y)
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
def test_embedding_with_clipnorm(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Embedding(input_dim=1, output_dim=1))
model.compile(optimizer=keras.optimizers.SGD(clipnorm=0.1), loss='mse')
model.fit(np.array([[0]]), np.array([[[0.5]]]), epochs=1)
def test_using_tf_layers_in_keras_sequential_model(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
model = keras.models.Sequential()
model.add(tf_core_layers.Dense(32, activation=nn.relu, input_shape=(10,)))
model.add(tf_core_layers.Dense(2, activation=nn.softmax))
model.summary()
y_train = keras.utils.to_categorical(y_train)
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=0)
self.assertGreater(history.history['val_acc'][-1], 0.7)
def test_using_tf_layers_in_keras_functional_model(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
inputs = keras.Input(shape=(10,))
x = tf_core_layers.Dense(32, activation=nn.relu)(inputs)
outputs = tf_core_layers.Dense(2, activation=nn.softmax)(x)
model = keras.Model(inputs, outputs)
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=0)
self.assertGreater(history.history['val_acc'][-1], 0.7)
if __name__ == '__main__':
test.main()
| apache-2.0 | -2,980,240,026,002,302,500 | 40.409231 | 80 | 0.588572 | false |
g2p/SimpleTAL | examples/elementtree-example/basic-example.py | 1 | 2377 | #!/usr/bin/python
""" Example TAL program
Copyright (c) 2009 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
As simple as it gets:
1 - Create a context
2 - Compile a template
3 - Expand the template
Module Dependencies: simpleTAL, simpleTALES
"""
from simpletal import simpleTAL, simpleTALES, simpleElementTree
import sys, logging
logging.basicConfig()
xmlTree = simpleElementTree.parseFile (file="input.xml")
# Create the context that is used by the template
context = simpleTALES.Context(allowPythonPath=1)
# Add the XML element tree to the context
context.addGlobal ("input", xmlTree)
# Open the template file
templateFile = open ("basic.xml", 'rb')
# Compile a template
template = simpleTAL.compileXMLTemplate (templateFile)
# Close the template file
templateFile.close()
# Expand the template as HTML using this context
template.expand (context, sys.stdout, "utf-8")
| bsd-3-clause | 628,443,351,640,474,600 | 37.967213 | 75 | 0.766933 | false |
techtonik/warehouse | tests/accounts/test_db.py | 1 | 1989 | # Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import datetime
import mock
from warehouse.accounts.tables import users, emails
def test_get_user(dbapp):
dbapp.engine.execute(users.insert().values(
password="!",
username="test-user",
name="Test User",
last_login=datetime.datetime.utcnow(),
is_active=True,
is_superuser=False,
is_staff=False,
))
assert {
"date_joined": mock.ANY,
"email": None,
"name": "Test User",
"username": "test-user",
} == dbapp.db.accounts.get_user("test-user")
def test_get_user_with_email(dbapp):
dbapp.engine.execute(users.insert().values(
id=1,
password="!",
username="test-user",
name="Test User",
last_login=datetime.datetime.utcnow(),
is_active=True,
is_superuser=False,
is_staff=False,
))
dbapp.engine.execute(emails.insert().values(
user_id=1,
email="[email protected]",
primary=True,
verified=True,
))
assert {
"date_joined": mock.ANY,
"email": "[email protected]",
"name": "Test User",
"username": "test-user",
} == dbapp.db.accounts.get_user("test-user")
def test_get_user_missing(dbapp):
assert dbapp.db.accounts.get_user("test-user") is None
| apache-2.0 | 6,569,239,910,698,849,000 | 27.414286 | 74 | 0.641528 | false |
CiscoSystems/nova | nova/tests/test_notifications.py | 1 | 13358 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for common notifications."""
import copy
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova.network import api as network_api
from nova import notifications
from nova import test
from nova.tests import fake_network
from nova.tests import fake_notifier
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
class NotificationsTestCase(test.TestCase):
def setUp(self):
super(NotificationsTestCase, self).setUp()
self.net_info = fake_network.fake_get_instance_nw_info(self.stubs, 1,
1)
def fake_get_nw_info(cls, ctxt, instance):
self.assertTrue(ctxt.is_admin)
return self.net_info
self.stubs.Set(network_api.API, 'get_instance_nw_info',
fake_get_nw_info)
fake_network.set_stub_network_methods(self.stubs)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
self.flags(compute_driver='nova.virt.fake.FakeDriver',
network_manager='nova.network.manager.FlatManager',
notify_on_state_change="vm_and_task_state",
host='testhost')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.instance = self._wrapped_create()
def _wrapped_create(self, params=None):
instance_type = flavors.get_flavor_by_name('m1.tiny')
sys_meta = flavors.save_flavor_info({}, instance_type)
inst = {}
inst['image_ref'] = 1
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['instance_type_id'] = instance_type['id']
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['access_ip_v4'] = '1.2.3.4'
inst['access_ip_v6'] = 'feed:5eed'
inst['display_name'] = 'test_instance'
inst['hostname'] = 'test_instance_hostname'
inst['node'] = 'test_instance_node'
inst['system_metadata'] = sys_meta
if params:
inst.update(params)
return db.instance_create(self.context, inst)
def test_send_api_fault_disabled(self):
self.flags(notify_api_faults=False)
notifications.send_api_fault("http://example.com/foo", 500, None)
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
def test_send_api_fault(self):
self.flags(notify_api_faults=True)
exception = None
try:
# Get a real exception with a call stack.
raise test.TestingException("junk")
except test.TestingException as e:
exception = e
notifications.send_api_fault("http://example.com/foo", 500, exception)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
n = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(n.priority, 'ERROR')
self.assertEqual(n.event_type, 'api.fault')
self.assertEqual(n.payload['url'], 'http://example.com/foo')
self.assertEqual(n.payload['status'], 500)
self.assertIsNotNone(n.payload['exception'])
def test_notif_disabled(self):
# test config disable of the notifications
self.flags(notify_on_state_change=None)
old = copy.copy(self.instance)
self.instance["vm_state"] = vm_states.ACTIVE
old_vm_state = old['vm_state']
new_vm_state = self.instance["vm_state"]
old_task_state = old['task_state']
new_task_state = self.instance["task_state"]
notifications.send_update_with_states(self.context, self.instance,
old_vm_state, new_vm_state, old_task_state, new_task_state,
verify_states=True)
notifications.send_update(self.context, old, self.instance)
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
def test_task_notif(self):
# test config disable of just the task state notifications
self.flags(notify_on_state_change="vm_state")
# we should not get a notification on task stgate chagne now
old = copy.copy(self.instance)
self.instance["task_state"] = task_states.SPAWNING
old_vm_state = old['vm_state']
new_vm_state = self.instance["vm_state"]
old_task_state = old['task_state']
new_task_state = self.instance["task_state"]
notifications.send_update_with_states(self.context, self.instance,
old_vm_state, new_vm_state, old_task_state, new_task_state,
verify_states=True)
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
# ok now enable task state notifications and re-try
self.flags(notify_on_state_change="vm_and_task_state")
notifications.send_update(self.context, old, self.instance)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
def test_send_no_notif(self):
# test notification on send no initial vm state:
old_vm_state = self.instance['vm_state']
new_vm_state = self.instance['vm_state']
old_task_state = self.instance['task_state']
new_task_state = self.instance['task_state']
notifications.send_update_with_states(self.context, self.instance,
old_vm_state, new_vm_state, old_task_state, new_task_state,
service="compute", host=None, verify_states=True)
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
def test_send_on_vm_change(self):
# pretend we just transitioned to ACTIVE:
params = {"vm_state": vm_states.ACTIVE}
(old_ref, new_ref) = db.instance_update_and_get_original(self.context,
self.instance['uuid'], params)
notifications.send_update(self.context, old_ref, new_ref)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
def test_send_on_task_change(self):
# pretend we just transitioned to task SPAWNING:
params = {"task_state": task_states.SPAWNING}
(old_ref, new_ref) = db.instance_update_and_get_original(self.context,
self.instance['uuid'], params)
notifications.send_update(self.context, old_ref, new_ref)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
def test_no_update_with_states(self):
notifications.send_update_with_states(self.context, self.instance,
vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
task_states.SPAWNING, verify_states=True)
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
def test_vm_update_with_states(self):
notifications.send_update_with_states(self.context, self.instance,
vm_states.BUILDING, vm_states.ACTIVE, task_states.SPAWNING,
task_states.SPAWNING, verify_states=True)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
notif = fake_notifier.NOTIFICATIONS[0]
payload = notif.payload
access_ip_v4 = self.instance["access_ip_v4"]
access_ip_v6 = self.instance["access_ip_v6"]
display_name = self.instance["display_name"]
hostname = self.instance["hostname"]
node = self.instance["node"]
self.assertEqual(vm_states.BUILDING, payload["old_state"])
self.assertEqual(vm_states.ACTIVE, payload["state"])
self.assertEqual(task_states.SPAWNING, payload["old_task_state"])
self.assertEqual(task_states.SPAWNING, payload["new_task_state"])
self.assertEqual(payload["access_ip_v4"], access_ip_v4)
self.assertEqual(payload["access_ip_v6"], access_ip_v6)
self.assertEqual(payload["display_name"], display_name)
self.assertEqual(payload["hostname"], hostname)
self.assertEqual(payload["node"], node)
def test_task_update_with_states(self):
self.flags(notify_on_state_change="vm_and_task_state")
notifications.send_update_with_states(self.context, self.instance,
vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
None, verify_states=True)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
notif = fake_notifier.NOTIFICATIONS[0]
payload = notif.payload
access_ip_v4 = self.instance["access_ip_v4"]
access_ip_v6 = self.instance["access_ip_v6"]
display_name = self.instance["display_name"]
hostname = self.instance["hostname"]
self.assertEqual(vm_states.BUILDING, payload["old_state"])
self.assertEqual(vm_states.BUILDING, payload["state"])
self.assertEqual(task_states.SPAWNING, payload["old_task_state"])
self.assertIsNone(payload["new_task_state"])
self.assertEqual(payload["access_ip_v4"], access_ip_v4)
self.assertEqual(payload["access_ip_v6"], access_ip_v6)
self.assertEqual(payload["display_name"], display_name)
self.assertEqual(payload["hostname"], hostname)
def test_update_no_service_name(self):
notifications.send_update_with_states(self.context, self.instance,
vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
None)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
# service name should default to 'compute'
notif = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('compute.testhost', notif.publisher_id)
def test_update_with_service_name(self):
notifications.send_update_with_states(self.context, self.instance,
vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
None, service="testservice")
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
# service name should default to 'compute'
notif = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('testservice.testhost', notif.publisher_id)
def test_update_with_host_name(self):
notifications.send_update_with_states(self.context, self.instance,
vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
None, host="someotherhost")
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
# service name should default to 'compute'
notif = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('compute.someotherhost', notif.publisher_id)
def test_payload_has_fixed_ip_labels(self):
info = notifications.info_from_instance(self.context, self.instance,
self.net_info, None)
self.assertIn("fixed_ips", info)
self.assertEqual(info["fixed_ips"][0]["label"], "test1")
def test_send_access_ip_update(self):
notifications.send_update(self.context, self.instance, self.instance)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
notif = fake_notifier.NOTIFICATIONS[0]
payload = notif.payload
access_ip_v4 = self.instance["access_ip_v4"]
access_ip_v6 = self.instance["access_ip_v6"]
self.assertEqual(payload["access_ip_v4"], access_ip_v4)
self.assertEqual(payload["access_ip_v6"], access_ip_v6)
def test_send_name_update(self):
param = {"display_name": "new_display_name"}
new_name_inst = self._wrapped_create(params=param)
notifications.send_update(self.context, self.instance, new_name_inst)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
notif = fake_notifier.NOTIFICATIONS[0]
payload = notif.payload
old_display_name = self.instance["display_name"]
new_display_name = new_name_inst["display_name"]
self.assertEqual(payload["old_display_name"], old_display_name)
self.assertEqual(payload["display_name"], new_display_name)
def test_send_no_state_change(self):
called = [False]
def sending_no_state_change(context, instance, **kwargs):
called[0] = True
self.stubs.Set(notifications, '_send_instance_update_notification',
sending_no_state_change)
notifications.send_update(self.context, self.instance, self.instance)
self.assertTrue(called[0])
def test_fail_sending_update(self):
def fail_sending(context, instance, **kwargs):
raise Exception('failed to notify')
self.stubs.Set(notifications, '_send_instance_update_notification',
fail_sending)
notifications.send_update(self.context, self.instance, self.instance)
self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
| apache-2.0 | -8,300,314,221,059,833,000 | 40.484472 | 78 | 0.64613 | false |
DataDog/integrations-extras | storm/tests/conftest.py | 1 | 1254 | # (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import os
import socket
import pytest
from datadog_checks.dev import docker_run, get_here, run_command
from datadog_checks.dev.conditions import WaitFor
from .common import HOST, INSTANCE
def wait_for_thrift():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, 6627))
sock.close()
@pytest.fixture(scope='session')
def dd_environment():
compose_file = os.path.join(get_here(), 'compose', 'docker-compose.yaml')
# Build the topology jar to use in the environment
with docker_run(compose_file, build=True, service_name='topology-maker', sleep=15):
run_command(['docker', 'cp', 'topology-build:/topology.jar', os.path.join(get_here(), 'compose')])
nimbus_condition = WaitFor(wait_for_thrift)
with docker_run(compose_file, service_name='storm-nimbus', conditions=[nimbus_condition]):
with docker_run(compose_file, service_name='storm-ui', log_patterns=[r'org.apache.storm.ui.core']):
with docker_run(
compose_file, service_name='topology', log_patterns=['Finished submitting topology: topology']
):
yield INSTANCE
| bsd-3-clause | -365,325,387,029,298,400 | 35.882353 | 110 | 0.69059 | false |
Suwmlee/XX-Net | Python3/lib/socket.py | 1 | 27859 | # Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
IntEnum constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Integer constants:
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io, selectors
from enum import IntEnum
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["fromfd", "getfqdn", "create_connection",
"AddressFamily", "SocketKind"]
__all__.extend(os._get_exports_list(_socket))
# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for
# nicer string representations.
# Note that _socket only knows about the integer values. The public interface
# in this module understands the enums and translates them back from integers
# where needed (e.g. .family property of a socket object).
IntEnum._convert(
'AddressFamily',
__name__,
lambda C: C.isupper() and C.startswith('AF_'))
IntEnum._convert(
'SocketKind',
__name__,
lambda C: C.isupper() and C.startswith('SOCK_'))
_LOCALHOST = '127.0.0.1'
_LOCALHOST_V6 = '::1'
def _intenum_converter(value, enum_klass):
"""Convert a numeric family value to an IntEnum member.
If it's not a known member, return the numeric value itself.
"""
try:
return enum_klass(value)
except ValueError:
return value
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
class _GiveupOnSendfile(Exception): pass
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
# For user code address family and type values are IntEnum members, but
# for the underlying _socket.socket they're just integers. The
# constructor of _socket.socket converts the given argument to an
# integer automatically.
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name and socket
address(es).
"""
closed = getattr(self, '_closed', False)
s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \
% (self.__class__.__module__,
self.__class__.__qualname__,
" [closed]" if closed else "",
self.fileno(),
self.family,
self.type,
self.proto)
if not closed:
try:
laddr = self.getsockname()
if laddr:
s += ", laddr=%s" % str(laddr)
except error:
pass
try:
raddr = self.getpeername()
if raddr:
s += ", raddr=%s" % str(raddr)
except error:
pass
s += '>'
return s
def __getstate__(self):
raise TypeError("Cannot serialize socket object")
def dup(self):
"""dup() -> socket object
Duplicate the socket. Return a new socket object connected to the same
system resource. The new socket is non-inheritable.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
# If our type has the SOCK_NONBLOCK flag, we shouldn't pass it onto the
# new socket. We do not currently allow passing SOCK_NONBLOCK to
# accept4, so the returned socket is always blocking.
type = self.type & ~globals().get("SOCK_NONBLOCK", 0)
sock = socket(self.family, type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename,
except the only mode characters supported are 'r', 'w' and 'b'.
The semantics are similar too. (XXX refactor to share code?)
"""
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
if hasattr(os, 'sendfile'):
def _sendfile_use_sendfile(self, file, offset=0, count=None):
self._check_sendfile_params(file, offset, count)
sockno = self.fileno()
try:
fileno = file.fileno()
except (AttributeError, io.UnsupportedOperation) as err:
raise _GiveupOnSendfile(err) # not a regular file
try:
fsize = os.fstat(fileno).st_size
except OSError:
raise _GiveupOnSendfile(err) # not a regular file
if not fsize:
return 0 # empty file
blocksize = fsize if not count else count
timeout = self.gettimeout()
if timeout == 0:
raise ValueError("non-blocking sockets are not supported")
# poll/select have the advantage of not requiring any
# extra file descriptor, contrarily to epoll/kqueue
# (also, they require a single syscall).
if hasattr(selectors, 'PollSelector'):
selector = selectors.PollSelector()
else:
selector = selectors.SelectSelector()
selector.register(sockno, selectors.EVENT_WRITE)
total_sent = 0
# localize variable access to minimize overhead
selector_select = selector.select
os_sendfile = os.sendfile
try:
while True:
if timeout and not selector_select(timeout):
raise _socket.timeout('timed out')
if count:
blocksize = count - total_sent
if blocksize <= 0:
break
try:
sent = os_sendfile(sockno, fileno, offset, blocksize)
except BlockingIOError:
if not timeout:
# Block until the socket is ready to send some
# data; avoids hogging CPU resources.
selector_select()
continue
except OSError as err:
if total_sent == 0:
# We can get here for different reasons, the main
# one being 'file' is not a regular mmap(2)-like
# file, in which case we'll fall back on using
# plain send().
raise _GiveupOnSendfile(err)
raise err from None
else:
if sent == 0:
break # EOF
offset += sent
total_sent += sent
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset)
else:
def _sendfile_use_sendfile(self, file, offset=0, count=None):
raise _GiveupOnSendfile(
"os.sendfile() not available on this platform")
def _sendfile_use_send(self, file, offset=0, count=None):
self._check_sendfile_params(file, offset, count)
if self.gettimeout() == 0:
raise ValueError("non-blocking sockets are not supported")
if offset:
file.seek(offset)
blocksize = min(count, 8192) if count else 8192
total_sent = 0
# localize variable access to minimize overhead
file_read = file.read
sock_send = self.send
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
data = memoryview(file_read(blocksize))
if not data:
break # EOF
while True:
try:
sent = sock_send(data)
except BlockingIOError:
continue
else:
total_sent += sent
if sent < len(data):
data = data[sent:]
else:
break
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not self.type & SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
def sendfile(self, file, offset=0, count=None):
"""sendfile(file[, offset[, count]]) -> sent
Send a file until EOF is reached by using high-performance
os.sendfile() and return the total number of bytes which
were sent.
*file* must be a regular file object opened in binary mode.
If os.sendfile() is not available (e.g. Windows) or file is
not a regular file socket.send() will be used instead.
*offset* tells from where to start reading the file.
If specified, *count* is the total number of bytes to transmit
as opposed to sending the file until EOF is reached.
File position is updated on return or also in case of error in
which case file.tell() can be used to figure out the number of
bytes which were sent.
The socket must be of SOCK_STREAM type.
Non-blocking sockets are not supported.
"""
try:
return self._sendfile_use_sendfile(file, offset, count)
except _GiveupOnSendfile:
return self._sendfile_use_send(file, offset, count)
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def detach(self):
"""detach() -> file descriptor
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
self._closed = True
return super().detach()
@property
def family(self):
"""Read-only access to the address family for this socket.
"""
return _intenum_converter(super().family, AddressFamily)
@property
def type(self):
"""Read-only access to the socket type.
"""
return _intenum_converter(super().type, SocketKind)
if os.name == 'nt':
def get_inheritable(self):
return os.get_handle_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_handle_inheritable(self.fileno(), inheritable)
else:
def get_inheritable(self):
return os.get_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_inheritable(self.fileno(), inheritable)
get_inheritable.__doc__ = "Get the inheritable flag of the socket"
set_inheritable.__doc__ = "Set the inheritable flag of the socket"
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
Create a socket object from the bytes object returned by
socket.share(pid).
"""
return socket(0, 0, 0, info)
__all__.append("fromshare")
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
else:
# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain.
def socketpair(family=AF_INET, type=SOCK_STREAM, proto=0):
if family == AF_INET:
host = _LOCALHOST
elif family == AF_INET6:
host = _LOCALHOST_V6
else:
raise ValueError("Only AF_INET and AF_INET6 socket address families "
"are supported")
if type != SOCK_STREAM:
raise ValueError("Only SOCK_STREAM socket type is supported")
if proto != 0:
raise ValueError("Only protocol zero is supported")
# We create a connected TCP socket. Note the trick with
# setblocking(False) that prevents us from having to create a thread.
lsock = socket(family, type, proto)
try:
lsock.bind((host, 0))
lsock.listen()
# On IPv6, ignore flow_info and scope_id
addr, port = lsock.getsockname()[:2]
csock = socket(family, type, proto)
try:
csock.setblocking(False)
try:
csock.connect((addr, port))
except (BlockingIOError, InterruptedError):
pass
csock.setblocking(True)
ssock, _ = lsock.accept()
except:
csock.close()
raise
finally:
lsock.close()
return (ssock, csock)
socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is AF_UNIX
if defined on the platform; otherwise, the default is AF_INET.
"""
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise OSError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except error as e:
if e.args[0] in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
"""Resolve host and port into list of address info entries.
Translate the host/port argument into a sequence of 5-tuples that contain
all the necessary arguments for creating a socket connected to that service.
host is a domain name, a string representation of an IPv4/v6 address or
None. port is a string service name such as 'http', a numeric port number or
None. By passing None as the value of host and port, you can pass NULL to
the underlying C API.
The family, type and proto arguments can be optionally specified in order to
narrow the list of addresses returned. Passing zero as a value for each of
these arguments selects the full range of results.
"""
# We override this function since we want to translate the numeric family
# and socket type values to enum constants.
addrlist = []
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
af, socktype, proto, canonname, sa = res
addrlist.append((_intenum_converter(af, AddressFamily),
_intenum_converter(socktype, SocketKind),
proto, canonname, sa))
return addrlist
| bsd-2-clause | -947,895,180,707,628,000 | 35.800543 | 95 | 0.56786 | false |
Heappl/scripts | context.py | 1 | 3041 | #!/usr/bin/python3
def parse_commandline_options():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-s", "--stack", action='store_true', dest="stack", help="produces stack trace for each running component")
parser.add_option("-l", "--last_line", type='int', dest="line", help="prints n last lines for each running component")
return parser.parse_args()
(options, args) = parse_commandline_options()
def isStartingEvent(event):
return event in ["fnen", "cost"]
def isEndingEvent(event):
return event in ["fnlv", "cofi"]
def interesting(line):
tokens = line.split("|")
if (len(tokens) < 2):
return False
return isStartingEvent(tokens[1]) or isEndingEvent(tokens[1])
def getEndingFor(event):
events = {"fnen" : "fnlv", "cost" : "cofi"}
return events.get(event, "invalid")
def getStackDescriptionFor(event):
events = {"fnlv" : "->", "cofi" : "component"}
return events.get(event, "")
def getEvent(line):
tokens = line.split("|")
return tokens[1]
def getEventData(line):
tokens = line.split("|")
if (len(tokens) < 2):
return (None, None)
if (tokens[1] in ["fnen", "fnlv"]):
return (tokens[1], tokens[3].split("(")[0])
return (tokens[1], tokens[2].split("=")[0])
def getThreadName(line):
tokens = line.split("|")
if (len(tokens) < 3):
return ""
threadTokens = tokens[2].split("=")
if (threadTokens[0] == "?"):
return threadTokens[1]
return threadTokens[0]
def splitPerThread(content):
ret = {}
for line in content:
name = getThreadName(line)
if (len(name) == 0):
continue
threadLog = ret.get(name, [])
threadLog.append(line)
ret[name] = threadLog
return ret
def generateStackForSingleThread(threadName, logs):
logs = [line for line in logs if interesting(line)]
stack = []
for line in logs:
(event, ident) = getEventData(line)
if isEndingEvent(event):
(topEvent, topIdent) = stack.pop()
if (topEvent != event) or (topIdent != ident):
print("ERROR: wrong ending event encountered (expected:{" + topEvent + "," + topIdent + "}" +
", seen:{" + event + "," + ident + "})")
else:
stack.append((getEndingFor(event), ident))
if (len(stack) > 0):
for (event, name) in stack:
print(getStackDescriptionFor(event), name)
for filepath in args:
perThreadLogs = splitPerThread(open(filepath).read().split("\n")[:-1])
if (options.stack):
for key in perThreadLogs.keys():
generateStackForSingleThread(key, perThreadLogs[key])
if (options.line):
for key in perThreadLogs.keys():
if getEvent(perThreadLogs[key][-1]) == 'cofi':
continue
for i in range(1, options.line + 1):
if len(perThreadLogs[key]) >= i:
print(perThreadLogs[key][-i])
print("\n")
| gpl-2.0 | 6,712,782,048,356,157,000 | 32.788889 | 130 | 0.585334 | false |
yiwen-luo/LeetCode | Python/design-log-storage-system.py | 1 | 1067 | # Time: put: O(1)
# retrieve: O(n + dlogd), n is the size of the total logs
# , d is the size of the found logs
# Space: O(n)
class LogSystem(object):
def __init__(self):
self.__logs = []
self.__granularity = {'Year': 4, 'Month': 7, 'Day': 10, \
'Hour': 13, 'Minute': 16, 'Second': 19}
def put(self, id, timestamp):
"""
:type id: int
:type timestamp: str
:rtype: void
"""
self.__logs.append((id, timestamp))
def retrieve(self, s, e, gra):
"""
:type s: str
:type e: str
:type gra: str
:rtype: List[int]
"""
i = self.__granularity[gra]
begin = s[:i]
end = e[:i]
return sorted(id for id, timestamp in self.__logs \
if begin <= timestamp[:i] <= end)
# Your LogSystem object will be instantiated and called as such:
# obj = LogSystem()
# obj.put(id,timestamp)
# param_2 = obj.retrieve(s,e,gra)
| mit | 8,674,042,364,039,677,000 | 25.675 | 69 | 0.469541 | false |
lutris/website | scripts/import_steam_linux_games.py | 1 | 2485 | # pylint: disable=missing-docstring
import logging
import requests
from games.models import Game, Genre
from games.util.steam import get_store_info, create_steam_installer
from platforms.models import Platform
from common.util import slugify
LOGGER = logging.getLogger(__name__)
def run():
response = requests.get(
"https://raw.githubusercontent.com/SteamDatabase/SteamLinux/master/GAMES.json"
)
linux_games = response.json()
for game_id in linux_games:
if linux_games[game_id] is not True:
LOGGER.debug(
"Game %s likely has problems, skipping. "
"This game should be added manually if appropriate.",
game_id
)
continue
if Game.objects.filter(steamid=game_id).count():
# LOGGER.debug("Game %s is already in Lutris", game_id)
continue
store_info = get_store_info(game_id)
if not store_info:
LOGGER.warning("No store info for game %s", game_id)
continue
if store_info["type"] != "game":
LOGGER.warning("%s: %s is not a game (type: %s)",
game_id, store_info["name"], store_info["type"])
continue
slug = slugify(store_info["name"])
if Game.objects.filter(slug=slug).count():
LOGGER.warning("Game %s already in Lutris but does not have a Steam ID", game_id)
continue
game = Game.objects.create(
name=store_info["name"],
slug=slug,
steamid=game_id,
description=store_info["short_description"],
website=store_info["website"] or "",
is_public=True,
)
game.set_logo_from_steam()
LOGGER.debug("%s created", game)
if store_info["platforms"]["linux"]:
platform = Platform.objects.get(slug='linux')
LOGGER.info("Creating installer for %s", game)
create_steam_installer(game)
else:
platform = Platform.objects.get(slug='windows')
game.platforms.add(platform)
for steam_genre in store_info["genres"]:
genre, created = Genre.objects.get_or_create(slug=slugify(steam_genre["description"]))
if created:
genre.name = steam_genre["description"]
LOGGER.info("Created genre %s", genre.name)
genre.save()
game.genres.add(genre)
game.save()
| agpl-3.0 | 2,682,142,902,040,070,000 | 36.651515 | 98 | 0.57666 | false |
pelme/vasa | vasa/http/endpoints.py | 1 | 1052 | import asyncio
import mimetypes
from pathlib import Path
from .response import DataResponse, ResponseNotFound
@asyncio.coroutine
def index(request, writer, settings):
full_path = (Path(settings.webapp_root) / 'index.html').resolve()
with full_path.open('rb') as f:
return DataResponse(writer, data=f.read(), content_type='text/html')
@asyncio.coroutine
def webapp_files(request, writer, settings, path):
try:
full_path = (Path(settings.webapp_root) / path).resolve()
except FileNotFoundError:
return ResponseNotFound(writer)
if not str(full_path).startswith(settings.webapp_root):
return ResponseNotFound(writer)
try:
with full_path.open('rb') as f:
contents = f.read()
except FileNotFoundError:
return ResponseNotFound(writer)
else:
(content_type, encoding) = mimetypes.guess_type(str(full_path))
content_type = content_type or 'application/octet-stream'
return DataResponse(writer, data=contents, content_type=content_type)
| mit | -6,921,504,578,001,710,000 | 28.222222 | 77 | 0.689163 | false |
ezralanglois/arachnid | arachnid/core/parallel/process_tasks.py | 1 | 12031 | ''' Common parallel/serial design patterns
This module defines a set of common tasks that can be performed in parallel or serial.
.. Created on Jun 23, 2012
.. codeauthor:: Robert Langlois <[email protected]>
'''
import process_queue
import logging
import numpy.ctypeslib
import multiprocessing.sharedctypes
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
def process_mp(process, vals, worker_count, init_process=None, ignored_errors=None, **extra):
''' Generator that runs a process functor in parallel (or serial if worker_count
is less than 2) over a list of given data values and returns the result
:Parameters:
process : function
Functor to be run in parallel (or serial if worker_count is less than 2)
vals : list
List of items to process in parallel
worker_count : int
Number of processes to run in parallel
init_process : function
Initalize the parameters for the child process
ignored_errors : list
Single element list with counter for ignored errors
extra : dict
Unused keyword arguments
:Returns:
val : object
Return value of process functor
'''
#_logger.error("worker_count1=%d"%worker_count)
if len(vals) < worker_count: worker_count = len(vals)
#_logger.error("worker_count2=%d"%worker_count)
if worker_count > 1:
def process_helper(val, **extra):
try:
return process(val, **extra)
except:
if ignored_errors is not None and len(ignored_errors) > 0:ignored_errors[0]+=1
if _logger.getEffectiveLevel()==logging.DEBUG or 1 == 1:
_logger.exception("Unexpected error in process - report this problem to the developer")
else:
_logger.warn("nexpected error in process - report this problem to the developer")
return extra.get('process_number', 0), val
qout = process_queue.start_workers_with_output(vals, process_helper, worker_count, init_process, ignore_error=True, **extra)
index = 0
while index < len(vals):
val = process_queue.safe_get(qout.get)
if isinstance(val, process_queue.ProcessException):
index = 0
while index < worker_count:
if process_queue.safe_get(qout.get) is None:
index += 1;
raise val
if val is None: continue
index += 1
yield val
else:
#_logger.error("worker_count3=%d"%worker_count)
logging.debug("Running with single process: %d"%len(vals))
for i, val in enumerate(vals):
try:
f = process(val, **extra)
except:
if ignored_errors is not None and len(ignored_errors) > 0: ignored_errors[0]+=1
if _logger.getEffectiveLevel()==logging.DEBUG or 1 == 1:
_logger.exception("Unexpected error in process - report this problem to the developer")
else:
_logger.warn("nexpected error in process - report this problem to the developer")
yield i, val
continue
yield i, f
def iterate_map(for_func, worker, thread_count, queue_limit=None, **extra):
''' Iterate over the input value and reduce after finished processing
'''
if thread_count < 2:
for val in worker(enumerate(for_func), process_number=0, **extra):
yield val
return
def queue_iterator(qin, process_number):
try:
while True:
val = process_queue.safe_get(qin.get)
if val is None: break
yield val
finally: pass
#_logger.error("queue-done")
def iterate_map_worker(qin, qout, process_number, process_limit, extra):
val = None
try:
val = worker(queue_iterator(qin, process_number), process_number=process_number, **extra)
except:
_logger.exception("Error in child process")
while True:
val = process_queue.safe_get(qin.get)
if val is None: break
finally:
qout.put(val)
#process_queue.safe_get(qin.get)
if queue_limit is None: queue_limit = thread_count*8
else: queue_limit *= thread_count
qin, qout = process_queue.start_raw_enum_workers(iterate_map_worker, thread_count, queue_limit, 1, extra)
try:
for val in enumerate(for_func):
qin.put(val)
except:
_logger.error("for_func=%s"%str(for_func))
raise
for i in xrange(thread_count): qin.put(None)
#qin.join()
for i in xrange(thread_count):
val = process_queue.safe_get(qout.get)
#qin.put(None)
if val is None: raise ValueError, "Exception in child process"
yield val
def iterate_reduce(for_func, worker, thread_count, queue_limit=None, shmem_array_info=None, **extra):
''' Iterate over the input value and reduce after finished processing
'''
if thread_count < 2:
yield worker(enumerate(for_func), process_number=0, **extra)
return
shmem_map=None
shmem_map_base=None
if shmem_array_info is not None:
shmem_map=[]
shmem_map_base=[]
for i in xrange(thread_count):
base = {}
arr = {}
for key in shmem_array_info.iterkeys():
ar = shmem_array_info[key]
if ar.dtype.str[1]=='c':
typestr = ar.dtype.str[0]+'f'+str(int(ar.dtype.str[2:])/2)
ar = ar.view(numpy.dtype(typestr))
if ar.dtype == numpy.dtype(numpy.float64):
typecode="d"
elif ar.dtype == numpy.dtype(numpy.float32):
typecode="f"
else: raise ValueError, "dtype not supported: %s"%str(ar.dtype)
base[key] = multiprocessing.sharedctypes.RawArray(typecode, ar.ravel().shape[0])
arr[key] = numpy.ctypeslib.as_array(base[key])
arr[key] = arr[key].view(shmem_array_info[key].dtype).reshape(shmem_array_info[key].shape)
shmem_map.append(arr)
shmem_map_base.append(base)
del shmem_array_info
def queue_iterator(qin, process_number):
try:
while True:
val = process_queue.safe_get(qin.get)
if val is None: break
yield val
finally: pass
def iterate_reduce_worker(qin, qout, process_number, process_limit, extra, shmem_map_base=None):#=shmem_map):
val = None
try:
if shmem_map_base is not None:
ar = shmem_map_base[process_number]
ar_map={}
for key in ar.iterkeys():
ar_map[key] = numpy.ctypeslib.as_array(ar[key])
ar_map[key] = ar_map[key].view(shmem_map[process_number][key].dtype).reshape(shmem_map[process_number][key].shape)
extra.update(ar_map)
val = worker(queue_iterator(qin, process_number), process_number=process_number, **extra)
except:
_logger.exception("Error in child process")
while True:
val = process_queue.safe_get(qin.get)
if val is None: break
finally:
if shmem_map_base is not None:
qout.put(process_number)
else:
qout.put(val)
if queue_limit is None: queue_limit = thread_count*8
else: queue_limit *= thread_count
qin, qout = process_queue.start_raw_enum_workers(iterate_reduce_worker, thread_count, queue_limit, 1, extra, shmem_map_base)
try:
for val in enumerate(for_func):
qin.put(val)
except:
_logger.error("for_func=%s"%str(for_func))
raise
for i in xrange(thread_count): qin.put(None)
#qin.join()
for i in xrange(thread_count):
val = process_queue.safe_get(qout.get)
if shmem_map is not None:
val = shmem_map[val]
#qin.put(None)
if val is None: raise ValueError, "Exception in child process"
yield val
def for_process_mp(for_func, worker, shape, thread_count=0, queue_limit=None, **extra):
''' Generator to process collection of arrays in parallel
:Parameters:
for_func : func
Generate a list of data
work : function
Function to preprocess the images
thread_count : int
Number of threads
shape : int
Shape of worker result array
extra : dict
Unused keyword arguments
:Returns:
index : int
Yields index of output array
out : array
Yields output array of worker
'''
if thread_count < 2:
for i, val in enumerate(for_func):
res = worker(val, i, **extra)
yield i, res
else:
if queue_limit is None: queue_limit = thread_count*8
else: queue_limit *= thread_count
qin, qout = process_queue.start_raw_enum_workers(process_worker2, thread_count, queue_limit, -1, worker, extra)
try:
total = 0
for i, val in enumerate(for_func):
if i >= thread_count:
pos = process_queue.safe_get(qout.get) #if i > thread_count else i
if pos is None or pos == -1: raise ValueError, "Error occured in process: %d"%pos
res, idx = pos
yield idx, res
else:
pos = i
total += 1
qin.put((val,i))
for i in xrange(total):
pos = process_queue.safe_get(qout.get)
if pos is None or pos == -1: raise ValueError, "Error occured in process: %d"%pos
res, idx = pos
yield idx, res
finally:
#_logger.error("Terminating %d workers"%(thread_count))
for i in xrange(thread_count):
qin.put((-1, -1))
pos = process_queue.safe_get(qout.get)
if pos != -1:
_logger.error("Wrong return value: %s"%str(pos))
assert(pos==-1)
raise StopIteration
def process_worker2(qin, qout, process_number, process_limit, worker, extra):
''' Worker in each process that preprocesses the images
:Parameters:
qin : multiprocessing.Queue
Queue with index for input images in shared array
qout : multiprocessing.Queue
Queue with index and offset for the output images in shared array
process_number : int
Process number
process_limit : int
Number of processes
worker : function
Function to preprocess the images
shmem_img : multiprocessing.RawArray
Shared memory image array
shape : tuple
Dimensions of the shared memory array
extra : dict
Keyword arguments
'''
_logger.debug("Worker %d of %d - started"%(process_number, process_limit))
try:
while True:
pos = process_queue.safe_get(qin.get)
if pos is None or not hasattr(pos[0], 'ndim'): break
res, idx = pos
val = worker(res, idx, **extra)
qout.put((val, idx))
_logger.debug("Worker %d of %d - ending ..."%(process_number, process_limit))
qout.put(-1)
except:
_logger.exception("Finished with error")
qout.put(None)
else:
_logger.debug("Worker %d of %d - finished"%(process_number, process_limit))
| gpl-2.0 | 7,339,000,769,296,700,000 | 36.596875 | 134 | 0.555232 | false |
penguintutor/networking-quiz | src/quizstrings.py | 1 | 2597 | # Text has been moved to this class, potential to add different languages in future
# Note that this is not complete, some text (eg. buttons) has not been changed
# The corresponding json file, must be consistant by having all entries for all pages
# If an entry is not required (or is updated using a different method - eg. quiz options)
# then it should be added as empty quotes ""
import json
class QuizStrings():
filename = "quizstrings.json"
# pages contains a dict (indexed by page / screen name), then includes a dictionary which may contain lists (eg. details)
pages = {}
# Returns as a hash dictionary - useful for a full page update
def getPage(self, page_name):
return self.pages[page_name]
def getTitle(self):
return self.title
def load(self):
##todo - possibly add error checking - Not so important as it should
# fail anyway and not neccessarily in a user friendly way (user should not be editing
# the strings file and if it's missing then it's as bad as a missing .py file
with open(self.filename) as json_file:
json_data = json.load(json_file)
# Get title of the app from the root key
root_keys = list(json_data.keys())
self.title = root_keys[0]
# Json file is then broken down into relevant screens (referred to as pages)
for this_page in json_data[self.title]:
page = list(this_page.keys())[0]
page_title = this_page[page]["title"]
page_details = [
this_page[page]["details1"],
this_page[page]["details2"],
this_page[page]["details3"],
this_page[page]["details4"],
this_page[page]["details5"],
this_page[page]["details6"]
]
page_options = [
this_page[page]["option1"],
this_page[page]["option2"],
this_page[page]["option3"],
this_page[page]["option4"]
]
page_image = this_page[page]["image"]
page_left_button = this_page[page]["left_button"]
page_right_button = this_page[page]["right_button"]
self.pages[page]={"title" : page_title, "details": page_details, "options" : page_options, "image" : page_image, "left_button" : page_left_button, "right_button" : page_right_button}
| gpl-3.0 | 1,300,464,390,233,187,300 | 42.3 | 199 | 0.562187 | false |
RoboJackets/robocup-software | soccer/gameplay/tactics/positions/celebration.py | 1 | 2691 | import behavior
import robocup
import constants
import single_robot_composite_behavior
import main
import enum
import skills
import random
import time
class Celebration(
single_robot_composite_behavior.SingleRobotCompositeBehavior):
MaxSpinAngle = 360
SpinPerTick = 1
class State(enum.Enum):
run_around = 0
spin = 1
def __init__(self):
super().__init__(continuous=True)
for s in Celebration.State:
self.add_state(s, behavior.Behavior.State.running)
self.add_transition(behavior.Behavior.State.start,
Celebration.State.run_around, lambda: True,
'immediately')
self.add_transition(Celebration.State.run_around,
Celebration.State.spin, lambda: self.spin_time,
'time to yeet')
self.add_transition(
Celebration.State.spin,
Celebration.State.run_around, lambda: self.im_dizzy(),
'time to go running')
self.spin_angle = 0
self.spin_time = False
r = constants.Robot.Radius
self.corners = [
robocup.Point(-constants.Field.Width / 2 + r, r),
robocup.Point(constants.Field.Width / 2 - r, r), robocup.Point(
constants.Field.Width / 2 - r, constants.Field.Length - r),
robocup.Point(-constants.Field.Width / 2 + r,
constants.Field.Length - r),
robocup.Point(0, constants.Field.Length / 2)
]
self.current_corner = 0
self.start_time = time.time()
def on_enter_run_around(self):
self.current_corner = random.randint(0, 4)
self.robot.move_to_direct(self.corners[0])
def execute_run_around(self):
if (self.robot.pos - self.corners[self.current_corner]).mag() <= .05:
if (self.current_corner == 4):
self.spin_time = True
self.current_corner = random.randint(0, 3)
else:
self.current_corner = random.randint(0, 4)
if (self.current_corner < 5):
self.robot.move_to_direct(self.corners[self.current_corner])
def on_enter_spint(self):
self.start_time = time.time()
def execute_spin(self):
angle = self.robot.angle
facing_point = robocup.Point.direction(angle) + self.robot.pos
facing_point.rotate(self.robot.pos, Celebration.SpinPerTick)
self.spin_angle += Celebration.SpinPerTick
self.robot.face(facing_point)
def im_dizzy(self):
return time.time() - self.start_time > 8 and time.time(
) - self.start_time < 20
| apache-2.0 | -893,343,985,385,578,400 | 31.421687 | 77 | 0.586771 | false |
ncadou/proctor | proctor/tor.py | 1 | 10805 | from datetime import datetime
from itertools import chain, cycle
from os import path
from threading import Event, Lock, Thread
from time import sleep
import socks
from desub import desub
from proctor.socket import InstrumentedSocket
import logging
log = logging.getLogger(__name__)
class TorProcess(Thread):
""" Runs and manages a Tor process in a thread.
This class takes care of starting and stopping a Tor process, as well as
monitoring connection times and the error rate and restarting the process
when unhealthy.
"""
def __init__(self, name, socks_port, control_port, base_work_dir,
boot_time_max=30, errors_max=10, conn_time_avg_max=2,
grace_time=30, sockets_max=None, resurrections_max=10):
super(TorProcess, self).__init__()
self.name = name
self.socks_port = socks_port
self.control_port = control_port
self.base_work_dir = base_work_dir
self.boot_time_max = boot_time_max
self.errors_max = errors_max
self.conn_time_avg_max = conn_time_avg_max
self.grace_time = grace_time
self.sockets_max = sockets_max
self.resurrections_max = resurrections_max
self._connected = Event()
self._exclusive_access = Lock()
self._ref_count = 0
self._ref_count_lock = Lock()
self._socket_count = 0
self._socket_count_lock = Lock()
self._stats_lock = Lock()
self._stats_window = 200
self._stoprequest = Event()
self._terminated = False
def run(self):
""" Run and supervise the Tor process. """
args = dict(CookieAuthentication=0, HashedControlPassword='',
ControlPort=self.control_port, PidFile=self.pid_file,
SocksPort=self.socks_port, DataDirectory=self.work_dir)
args = map(str, chain(*(('--' + k, v) for k, v in args.iteritems())))
tor = desub.join(['tor'] + args)
self._start(tor)
resurrections = 0
while not self._stoprequest.is_set():
if not tor.is_running():
if resurrections >= self.resurrections_max:
log.error('Resurrected %s %s times, giving up.'
% (self.name, resurrections))
self._terminated = True
break
resurrections += 1
self._restart(tor, died=True)
else:
log.info('Started %s' % self.name)
self.monitor(tor)
def monitor(self, tor):
""" Make sure Tor starts and stops when appropriate. """
while tor.is_running():
# Stop nicely when asked nicely.
if self._stoprequest.wait(1):
tor.stop()
log.debug('Stopped %s' % self.name)
# Check health and restart when appropriate.
elif self._connected.is_set():
errors, timing_avg, samples = self.get_stats()
too_many_errors = errors > self.errors_max
too_slow = timing_avg > self.conn_time_avg_max
max_use_reached = (self.sockets_max
and self._socket_count >= self.sockets_max)
needs_restart = too_many_errors or too_slow or max_use_reached
if self.age > self.grace_time and needs_restart:
self._restart(tor)
else:
out = tor.stdout.read()
# Check for successful connection.
if 'Bootstrapped 100%: Done.' in out:
self._connected.set()
log.info('%s is connected' % self.name)
self._start_time = datetime.utcnow()
else:
# Check if initialization takes too long.
if self.time_since_boot > self.boot_time_max:
self._restart(tor, failed_boot=True)
# Check for socket binding failures.
else:
for port in [self.socks_port, self.control_port]:
if 'Could not bind to 127.0.0.1:%s' % port in out:
error = ('Could not bind %s to 127.0.0.1:%s'
% (self.name, port))
log.warn(error)
self._terminated = True
break
def stop(self):
""" Signal the thread to stop itself. """
self._stoprequest.set()
@property
def work_dir(self):
return path.join(self.base_work_dir, self.name)
@property
def pid_file(self):
return path.join(self.work_dir, 'pid')
@property
def connected(self):
return self._connected.is_set()
@property
def age(self):
""" Return the number of seconds since the Tor circuit is usable. """
return (datetime.utcnow() - self._start_time).total_seconds()
@property
def terminated(self):
return self._terminated
@property
def time_since_boot(self):
""" Return the number of seconds since the last Tor process start. """
return (datetime.utcnow() - self._boot_time).total_seconds()
def _start(self, tor):
""" Start a Tor process. """
with self._stats_lock:
self._boot_time = datetime.utcnow()
self._socket_count = 0
self._stats_errors = list()
self._stats_timing = list()
tor.start()
def _restart(self, tor, failed_boot=False, died=False):
""" Safely replace a Tor instance with a fresh one. """
with self._exclusive_access: # Prevent creating sockets.
# Wait until all sockets have finished.
wait_start = datetime.utcnow()
while self._ref_count > 0:
if (datetime.utcnow() - wait_start).total_seconds() > 30:
log.error('Likely got a ref_count accounting error in %s'
% self.name)
self._ref_count = 0
break
sleep(1)
self._connected.clear()
if failed_boot:
log.warn('Restarting %s (did not initialize in time)'
% self.name)
elif died:
log.warn('Resurrected %s' % self.name)
else:
errors, timing_avg, samples = self.get_stats()
log.warn(('Restarting %s '
'(errors: %s, avg time: %s, count: %s, age: %s)')
% (self.name, errors, timing_avg, self._socket_count,
int(self.age)))
tor.stop()
self._start(tor)
def _inc_socket_count(self):
""" Increment the internal socket counter. """
with self._socket_count_lock:
self._socket_count += 1
def _inc_ref_count(self):
""" Increment the internal reference counter. """
with self._ref_count_lock:
self._ref_count += 1
def _dec_ref_count(self):
""" Decrement the internal reference counter. """
with self._ref_count_lock:
self._ref_count -= 1
def _receive_stats(self, timing, errors):
""" Maintain connection statistics over time. """
with self._stats_lock:
self._stats_errors.append(errors)
self._stats_timing.append(timing)
if len(self._stats_errors) > self._stats_window:
self._stats_errors = self._stats_errors[-self._stats_window:]
self._stats_timing = self._stats_timing[-self._stats_window:]
# We consider the socket at end of life when it sends the stats.
self._dec_ref_count()
def get_stats(self):
""" Return current statistics. """
with self._stats_lock:
samples = len(self._stats_timing)
errors = sum(self._stats_errors)
timing_avg = sum(self._stats_timing) / (samples or 1)
return errors, timing_avg, samples
def create_socket(self, suppress_errors=False, *args, **kwargs):
""" Return an InstrumentedSocket that will connect through Tor. """
if self.connected:
if not self._exclusive_access.acquire(False):
return None
try:
sock = InstrumentedSocket(self._receive_stats, *args, **kwargs)
args = (socks.PROXY_TYPE_SOCKS4, 'localhost', self.socks_port,
True, None, None) # rdns, username, password
sock.setproxy(*args)
# Keep track of how many sockets are using this Tor instance.
self._inc_ref_count()
self._inc_socket_count()
return sock
finally:
self._exclusive_access.release()
elif suppress_errors:
sleep(0.1) # Prevent fast spinning in (the proxy code) caused by
# a race condition when Tor restarts.
return None
else:
raise RuntimeError('%s not yet connected.' % self.name)
class TorSwarm(object):
""" Manages a number of Tor processes. """
def __init__(self, base_socks_port, base_control_port, work_dir,
sockets_max, **kwargs):
self.base_socks_port = base_socks_port
self.base_control_port = base_control_port
self.work_dir = work_dir
self.sockets_max = sockets_max
self.kwargs = kwargs
self._instances = list()
def instances(self):
""" Return an infinite generator cycling through Tor instances. """
for instance in cycle(self._instances):
if instance.terminated:
alive = list(i for i in self._instances if not i.terminated)
if len(alive) == 0:
log.critical('No alive Tor instance left. Bailing out.')
return
yield instance
def start(self, num_instances):
""" Start and return the Tor processes. """
log.info('Starting Tor swarm with %d instances...' % num_instances)
self._instances = list()
for i in range(num_instances):
tor = TorProcess('tor-%d' % i, self.base_socks_port + i,
self.base_control_port + i, self.work_dir,
sockets_max=self.sockets_max, **self.kwargs)
self._instances.append(tor)
tor.start()
sleep(0.1)
return self._instances
def stop(self):
""" Stop the Tor processes and wait for their completion. """
for tor in self._instances:
tor.stop()
tor.join()
| bsd-3-clause | -6,406,841,245,967,044,000 | 38.870849 | 79 | 0.537436 | false |
qiyuangong/Basic_Mondrian | basic_mondrain_test.py | 1 | 1879 | import unittest
from mondrian import mondrian
# from utils.read_data import read_data, read_tree
from models.gentree import GenTree
from models.numrange import NumRange
import random
import pdb
# Build a GenTree object
ATT_TREE = []
def init():
global ATT_TREE
ATT_TREE = []
tree_temp = {}
tree = GenTree('*')
tree_temp['*'] = tree
lt = GenTree('1,5', tree)
tree_temp['1,5'] = lt
rt = GenTree('6,10', tree)
tree_temp['6,10'] = rt
for i in range(1, 11):
if i <= 5:
t = GenTree(str(i), lt, True)
else:
t = GenTree(str(i), rt, True)
tree_temp[str(i)] = t
numrange = NumRange(['1', '2', '3', '4', '5',
'6', '7', '8', '9', '10'], dict())
ATT_TREE.append(tree_temp)
ATT_TREE.append(numrange)
class functionTest(unittest.TestCase):
def test1_mondrian(self):
init()
data = [['6', '1', 'haha'],
['6', '1', 'test'],
['8', '2', 'haha'],
['8', '2', 'test'],
['4', '1', 'hha'],
['4', '2', 'hha'],
['4', '3', 'hha'],
['4', '4', 'hha']]
result, eval_r = mondrian(ATT_TREE, data, 2)
# print result
# print eval_r
self.assertTrue(abs(eval_r[0] - 100.0 / 36) < 0.05)
def test2_mondrian(self):
init()
data = [['6', '1', 'haha'],
['6', '1', 'test'],
['8', '2', 'haha'],
['8', '2', 'test'],
['4', '1', 'hha'],
['4', '1', 'hha'],
['1', '1', 'hha'],
['2', '1', 'hha']]
result, eval_r = mondrian(ATT_TREE, data, 2)
# print result
# print eval_r
self.assertTrue(abs(eval_r[0] - 100.0 / 8) < 0.05)
if __name__ == '__main__':
unittest.main()
| mit | 86,076,437,654,644,900 | 26.632353 | 59 | 0.432145 | false |
KanoComputing/terminal-quest | linux_story/story/challenges/challenge_10.py | 1 | 6406 | # challenge_10.py
#
# Copyright (C) 2014-2016 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# A chapter of the story
from linux_story.StepTemplate import StepTemplate
from linux_story.step_helper_functions import unblock_commands_with_cd_hint
from linux_story.story.terminals.terminal_cd import TerminalCd
class StepTemplateCd(StepTemplate):
TerminalClass = TerminalCd
# ----------------------------------------------------------------------------------------
class Step1(StepTemplateCd):
story = [
_("You're in your house. You appear to be alone."),
_("Use {{yb:cat}} to {{lb:examine}} some of the objects around you.\n")
]
allowed_commands = [
"cat banana",
"cat cake",
"cat croissant",
"cat grapes",
"cat milk",
"cat newspaper",
"cat oven",
"cat pie",
"cat sandwich",
"cat table"
]
start_dir = "~/my-house/kitchen"
end_dir = "~/my-house/kitchen"
counter = 0
deleted_items = ["~/my-house/kitchen/note"]
file_list = [
{"path": "~/town/.hidden-shelter/Eleanor"},
{"path": "~/town/.hidden-shelter/Edward"},
{"path": "~/town/.hidden-shelter/Edith"},
{"path": "~/town/.hidden-shelter/apple"},
{"path": "~/town/.hidden-shelter/dog"},
{"path": "~/town/.hidden-shelter/basket/empty-bottle"},
{"path": "~/town/.hidden-shelter/.tiny-chest/MV"},
]
first_time = True
def check_command(self, line):
if line in self.allowed_commands:
self.counter += 1
self.allowed_commands.remove(line)
hint = _("{{gb:Well done! Just look at one more item.}}")
else:
if self.first_time:
hint = _("{{rb:Use}} {{yb:cat}} {{rb:to look at two of the " +\
"objects around you.}}")
else:
hint = _("{{rb:Use the command}} {{yb:%s}} {{rb:to progress.}}")\
% self.allowed_commands[0]
level_up = (self.counter >= 2)
if not level_up:
self.send_hint(hint)
self.first_time = False
else:
return level_up
def next(self):
return 10, 2
class Step2(StepTemplateCd):
story = [
_("There doesn't seem to be anything here but loads of food."),
_("See if you can find something back in {{bb:town}}.\n"),
_("First, use {{yb:cd ..}} to {{lb:leave}} the {{bb:kitchen}}.\n")
]
start_dir = "~/my-house/kitchen"
end_dir = "~/town"
commands = [
"cd ~/town",
"cd ~/town/",
"cd ..",
"cd ../",
"cd town",
"cd town/",
"cd ../..",
"cd ../../",
"cd"
]
num_turns_in_home_dir = 0
def block_command(self, line):
return unblock_commands_with_cd_hint(line, self.commands)
def check_command(self, line):
if self.get_fake_path() == self.end_dir:
return True
hint = ""
# decide command needed to get to next part of town
if self.get_fake_path() == '~/my-house/kitchen' or self.get_fake_path() == '~/my-house':
# If the last command the user used was to get here
# then congratulate them
if line == "cd .." or line == 'cd ../':
hint = _("{{gb:Good work! Now replay the last command using " +\
"the}} {{ob:UP}} {{gb:arrow on your keyboard.}}")
# Otherwise, give them a hint
else:
hint = _("{{rb:Use}} {{yb:cd ..}} {{rb:to make your way to town.}}")
elif self.get_fake_path() == '~':
# If they have only just got to the home directory,
# then they used an appropriate command
if self.num_turns_in_home_dir == 0:
hint = _("{{gb:Cool! Now use}} {{yb:cd town}} {{gb:to head to town.}}")
# Otherwise give them a hint
else:
hint = _("{{rb:Use}} {{yb:cd town}} {{rb:to go into town.}}")
# So we can keep track of the number of turns they've been in the
# home directory
self.num_turns_in_home_dir += 1
# print the hint
self.send_hint(hint)
def next(self):
return 10, 3
class Step3(StepTemplateCd):
story = [
_("Use {{yb:ls}} to {{lb:look around}}.\n"),
]
start_dir = "~/town"
end_dir = "~/town"
commands = "ls"
hints = [_("{{rb:Use}} {{yb:ls}} {{rb:to have a look around the town.}}")]
def next(self):
return 10, 4
class Step4(StepTemplateCd):
story = [
_("The place appears to be deserted."),
_("However, you think you hear whispers."),
# TODO make this writing small
_("\n{{wb:?:}} {{Bn:\".....if they use}} {{yb:ls -a}}{{Bn:, they'll see us...\"}}"),
_("{{wb:?:}} {{Bn:\"..Shhh! ...might hear....\"}}\n")
]
start_dir = "~/town"
end_dir = "~/town"
commands = "ls -a"
hints = [
_("{{rb:You heard whispers referring to}} {{yb:ls -a}}" +\
"{{rb:, try using it!}}"),
]
def next(self):
return 10, 5
class Step5(StepTemplateCd):
story = [
_("You see a {{bb:.hidden-shelter}} that you didn't notice before.\n"),
_("{{gb:Something that starts with . is normally hidden from view.\n}}"),
_("It sounds like the whispers are coming from there. Try going in.\n")
]
start_dir = "~/town"
end_dir = "~/town/.hidden-shelter"
commands = [
"cd .hidden-shelter",
"cd .hidden-shelter/"
]
hints = [
_("{{rb:Try going inside the}} {{lb:.hidden-shelter}} {{rb:using }}" +\
"{{yb:cd}}{{rb:.}}"),
_("{{rb:Use the command}} {{yb:cd .hidden-shelter }}" +\
"{{rb:to go inside.}}")
]
def block_command(self, line):
return unblock_commands_with_cd_hint(line, self.commands)
def next(self):
return 10, 6
class Step6(StepTemplateCd):
story = [
_("Is anyone there? Have a {{lb:look around}}.\n")
]
start_dir = "~/town/.hidden-shelter"
end_dir = "~/town/.hidden-shelter"
commands = [
"ls",
"ls -a"
]
hints = [
_("{{rb:Use}} {{yb:ls}} {{rb:to have a look around you.}}")
]
def next(self):
return 11, 1
| gpl-2.0 | 7,936,211,760,984,238,000 | 28.657407 | 96 | 0.503278 | false |
saikrishnar/AudioRenderingofSTEM | systems/technique3/code/3.py | 1 | 13759 | # -*- coding: utf-8 -*-
from lxml import etree as et
import sys
import os
# for TTS(default system TTS)
import pyttsx
#coding: utf-8
#function that reads the xhtml file and returns the root of the document
def getData(fname):
with open(fname) as f:
parser = et.XMLParser(load_dtd=True, no_network=False,resolve_entities=False)
data = f.read()
data = data.replace('\t','')
data = data.replace('\n','')
doc = et.fromstring(data, parser=parser)
return doc
#function to generate a basic sable root and append the version and other info at the end to a file "equation.sable"
def generateSable(node=None,flag=None):
##print 'in function generateSable'
##print flag
documentInfo = '<?xml version="1.0"?><!DOCTYPE SABLE PUBLIC "-//SABLE//DTD SABLE speech mark up//EN" "Sable.v0_2.dtd" []>'
if flag:
sable = ''
sable = documentInfo+et.tostring(node)
##print 'writing sable to file'
f = open('equation.sable','w')
f.write(sable)
f.close()
return
return et.Element('SABLE')
#function to parse the operators in the <mo> tagsunction that takes text and speaks it
def operatorParse(op):
if op == '+':
return 'plus'
if op == '+':
return 'plus'
if op == '∫':
return 'integral'
if op == '∑':
return ' summation'
if op == '∏':
return ' product '
if op == '-':
return 'minus'
if op == '−':
return 'minus'
if op == '±':
return 'plus or minus '
if op == '...':
return 'so on till,'
if op == '=':
return 'is equal to'
if op == '≠':
return 'is not equal to'
if op == '≈':
return ' is almost equal to '
if op == '∝':
return 'is proportional to '
if op == '≤':
return 'is less than or equal to'
if op == '≥':
return 'is greater than or equal to'
if op == '<':
return ' is less than'
if op == '>':
return 'is greater than '
if op == '<':
return ' is less than'
if op == '(':
return 'the quantity ('
if op == ')':
return ')'
if op == 'sin':
return 'sine'
if op == 'cos':
return 'cos'
if op == 'tan':
return 'tan'
if op == 'log':
return 'log'
if op == '*':
return 'times'
if op == '×':
return ' multiplied by'
if op == '/':
return 'divided by'
if op == '÷':
return 'divided by'
if op == '%':
return 'modulo divided by'
if op == '′':
return 'first order derivative '
if op == '″':
return ' second derivative '
if op == '‴':
return 'third derivative '
if op == '⁗':
return 'forth derivative '
if op == '∂':
return ' parcial differential'
if op == '∮':
return ' contour integral of'
if op == '∯':
return ' surface integral of'
if op == '∰':
return ' volume integral of'
if op == '∱':
return ' clockwise integral of'
if op == '∂':
return 'partial derivative of'
if op == '∠':
return ' angle of'
# alternative way for an integral, using the direct symbol should also work
if op == 'ⅆ':
return 'D'
if op == '∫':
return 'integral'
if op == '.':
return '.'
if op == '∞':
return 'infinity'
if op == 'lim':
return 'limit'
if op == '→':
return 'tends to'
if op == ',':
return ','
return op
#fill in operators
# if op == the operator:
#return a text form of the operator
def getEntityValue(node):
if node.tag.split('}')[1] == 'mi':
if node[0].text == 'α':
node.text = 'alpha'
if node[0].text == 'β':
node.text = 'beta'
if node[0].text == '&gama;':
node. text = 'gama'
if node[0].text == 'θ':
node.text = 'theta'
if node[0].text == 'π':
node.text='pi'
else:
node.text = node[0].text
deleteElement = node[0]
node.remove(deleteElement)
return node
"""def speek(text):
engine = pyttsx.init()
engine.say(text)
engine.runAndWait()
return
"""
#function to parse the mathML
def mathparse(element,snode,exp = []):
###print 'testing element'
###print 'text:'
###print element.text
###print 'tag:',element.tag
#mtag = ''
#try:
mtag = element.tag.split('}')[1]
#except:
#return []
#mtag = element.tag
##print 'modified tag:',mtag
###print 'expression string:', exp
# numbers and variables
if mtag == 'mi' or mtag == 'mn':
if len(element) > 0:
element = getEntityValue(element)
exp.append(element.text)
# operators
if mtag == 'mo':
if len(element) > 0:
element = getEntityValue(element)
#print element.text
##print 'this is'
##print operatorParse(element.text)
exp.append(operatorParse(element.text))
# fractions
if mtag == 'mfrac':
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
#et.SubElement(snode,'AUDIO',SRC='http://localhost/test/testsound.wav')
et.SubElement(snode,'EMPH').text = 'fraction'
tnode = et.SubElement(snode,'RATE',SPEED = '+25%')
t2node = et.SubElement(tnode,'PITCH',BASE = '+25%')
tnode.tail = 'over'
exp=mathparse(element[0],t2node,exp)
if len(t2node) > 0:
t2node[-1].tail = ' '.join(exp)
else:
t2node.text = ' '.join(exp)
exp = []
dnode = et.SubElement(snode,'RATE',SPEED = '+25%')
d2node = et.SubElement(dnode,'PITCH',BASE = '-25%')
exp=mathparse(element[1],d2node,exp)
if len(d2node) > 0:
d2node[-1].tail = ' '.join(exp)
else:
d2node.text = ' '.join(exp)
exp = []
return []
# superscript
if mtag == 'msup':
###print 'expression before superscript manipulation:\n',exp
###print 'mathML before superscript manipulation:\n',et.tostring(snode)
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
###print '##printing tail of node\n',snode.tail
else:
snode.text = ' '.join(exp)
###print '##printing text of node\n',snode.text
exp = []
#et.SubElement(snode,'AUDIO',SRC='http://localhost/test/superscript.wav')
et.SubElement(snode,'BREAK',LEVEL = 'Large')
exp=mathparse(element[0],snode,exp)
###print '##printing exp after parsing base of superscript\n',exp
###print '##printing node after parsing base of superscript\n',et.tostring(snode)
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
###print '##printing tail after parsing base of superscript\n',snode.tail
else:
snode.text = ' '.join(exp)
###print '##printing text of node after parsing the base of superscript\n',snode.text
exp = []
et.SubElement(snode,'EMPH').text = 'superscript'
enode =et.SubElement(snode,'PITCH',BASE = '+50%')
###print 'exp list before calling function on the exponent node:\n',exp
exp=mathparse(element[1],enode,exp)
###print 'exp after passing it to the function for the exponent node:\n',exp
if len(enode) > 0:
enode[-1].tail = ' '.join(exp)
###print '##printing tail after parsing the superscript text\n',enode.tail
else:
enode.text = ' '.join(exp)
###print '##printing text after parsing superscript\n',enode.text
exp = []
return []
#subscript
if mtag == 'msub':
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
#et.SubElement(snode,'AUDIO',SRC='http://localhost/test/subscript.wav')
et.SubElement(snode,'BREAK',LEVEL = 'Medium')
exp=mathparse(element[0],snode,exp)
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
et.SubElement(snode,'EMPH').text = 'subscript'
subNode =et.SubElement(snode,'PITCH',BASE = '-50%')
exp=mathparse(element[1],subNode,exp)
if len(subNode) > 0:
subNode[-1].tail = ' '.join(exp)
else:
subNode.text = ' '.join(exp)
exp = []
return []
#subscript-superscript pairs
if mtag == 'msubsup':
exp=mathparse(element[0],snode,exp)
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
#et.SubElement(snode,'AUDIO',SRC='http://localhost/test/subsuper.wav')
et.SubElement(snode,'BREAK',LEVEL='Large')
et.SubElement(snode,'EMPH').text = 'subscript'
ssSub = et.SubElement(snode,'PITCH',BASE='-50%')
exp=mathparse(element[1],ssSub,exp)
if len(ssSub) > 0:
ssSub[-1].tail = ' '.join(exp)
else:
ssSub.text = ' '.join(exp)
exp = []
et.SubElement(snode,'EMPH').text = 'superscript'
ssSup = et.SubElement(snode,'PITCH',BASE = '+50%')
exp=mathparse(element[2],ssSup,exp)
if len(ssSup) > 0:
ssSup[-1].tail = ' '.join(exp)
else:
ssSup.text = ' '.join(exp)
exp = []
return []
#fence
if mtag == 'mfence':
exp.append('the quantity')
if snode.text:
snode[-1].tail = '' .join(exp)
else:
snode.text = ' '.join(exp)
exp = []
et.SubElement(snode,'BREAK',LEVEL = 'Large')
for c in element:
exp=mathparse(c,snode,exp)
et.SubElement(snode,'BREAK',LEVEL='Medium')
exp = []
return exp
# over script
if mtag == 'mover':
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
#et.SubElement(snode,'AUDIO',SRC='http://localhost/test/superscript.wav')
et.SubElement(snode,'BREAK',LEVEL = 'Medium')
exp=mathparse(element[0],snode,exp)
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
et.SubElement(snode,'EMPH').text = 'overscript'
overNode =et.SubElement(snode,'PITCH',BASE = '+60%')
exp=mathparse(element[1],overNode,exp)
if len(overNode) > 0:
overNode[-1].tail = ' '.join(exp)
else:
overNode.text = ' '.join(exp)
exp = []
return []
#underscript
if mtag == 'munder':
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
#et.SubElement(snode,'AUDIO',SRC='http://localhost/test/subscript.wav')
et.SubElement(snode,'BREAK',LEVEL = 'Medium')
exp=mathparse(element[0],snode,exp)
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
et.SubElement(snode,'EMPH').text = 'underscript'
underNode =et.SubElement(snode,'PITCH',BASE = '-60%')
exp=mathparse(element[1],underNode,exp)
if len(underNode) > 0:
underNode[-1].tail = ' '.join(exp)
else:
underNode.text = ' '.join(exp)
exp = []
return []
# underscript-overscript pair
if mtag == 'munderover':
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
#et.SubElement(snode,'AUDIO',SRC='http://localhost/test/subsuper.wav')
et.SubElement(snode,'BREAK',LEVEL = 'Medium')
#et.SubElement(snode,'BREAK',LEVEL='medium')
underOverBase=et.SubElement(snode,'EMPH')
underOverBase.text = ' '.join(mathparse(element[0],snode,exp))
underOverBase.tail = 'from'
exp = []
underOverSub =et.SubElement(snode,'PITCH',BASE = '-60%')
underOverSub.tail = 'to'
exp=mathparse(element[1],underOverSub,exp)
if len(underOverSub) > 0:
underOverSub[-1].tail = ' '.join(exp)
else:
underOverSub.text = ' '.join(exp)
exp = []
underOverSup =et.SubElement(snode,'PITCH',BASE = '+60%')
exp=mathparse(element[2],underOverSup,exp)
if len(underOverSup) > 0:
underOverSup[-1].tail = ' '.join(exp)
else:
underOverSup.text = ' '.join(exp)
exp = []
return []
# square root
if mtag == 'msqrt':
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
#et.SubElement(snode,'AUDIO',SRC='http://localhost/test/squareroot.wav')
et.SubElement(snode,'BREAK',LEVEL='Medium')
et.SubElement(snode,'EMPH').text = 'square root of'
sqrtNode = et.SubElement(snode,'RATE',SPEED='+30%')
for c in element:
exp=mathparse(c,sqrtNode,exp)
if len(sqrtNode)>0:
sqrtNode[-1].tail = ' '.join(exp)
else:
sqrtNode.text = ' '.join(exp)
exp = []
return []
# general root
if mtag == 'mroot':
exp=mathparse(element[-1],snode,exp)
mathparse(element[-1],snode,exp)
exp.append('root of')
for c in element[:-1]:
mathparse(c,snode,exp)
if len(exp) > 0:
exp[-1] = exp[-1]+','
return exp
###print 'list:',len(exp)
###print 'items in the list:\n',exp
##print 'sable markup:\n',et.tostring(snode)
for e in element:
exp=mathparse(e,snode,exp)
#print exp
if len(snode) > 0:
if snode[-1].tail != None:
snode[-1].tail = snode[-1].tail+' '.join(exp)
else:
snode[-1].tail = ' '.join(exp)
#exp = []
else:
if snode.text:
snode.text = snode.text + ' '.join(exp)
else:
snode.text = ' '.join(exp)
##print 'sable just before exiting:\n',et.tostring(snode)
return exp
def main():
args = sys.argv
if len(args) < 2:
##print 'usage:\nbasicSable.py inputFile.xhtml'
exit(1)
fileName = str(sys.argv[1])
xmlroot = getData(fileName) #'example1.xhtml' contains the xhtml code given above
sableroot=generateSable()
expList = mathparse(xmlroot,sableroot)
if len(sableroot) > 0:
sableroot[-1].tail = ' '.join(expList)
else:
sableroot.text = ' '.join(expList)
generateSable(sableroot,1)
###print 'list in the main function:\n',expList
###print len(expList)
expression = ' '.join(expList)
###print the resulting string
##print 'result:',expression
#speak the expression
#speek(expression)
#speak the expression using festival
cmd = 'echo "'+expression+'" | festival --tts'
festCmd = 'festival --tts equation.sable'
os.system(festCmd)
if __name__ == '__main__':
main()
| apache-2.0 | -669,407,135,783,003,500 | 27.819706 | 124 | 0.590456 | false |
GoteoFoundation/goteo-api | goteoapi/ratelimit.py | 1 | 1974 | # -*- coding: utf-8 -*-
import time
from functools import update_wrapper
from flask import request, g
from flask_redis import FlaskRedis
from .helpers import bad_request
from . import app
#
# REDIS RATE LIMITER
# ==================
redis = False
if app.config['REDIS_URL']:
redis = FlaskRedis(app)
class RateLimit(object):
expiration_window = 10
def __init__(self, key_prefix, limit, per):
self.reset = (int(time.time()) // per) * per + per
self.key = key_prefix + str(self.reset)
self.limit = limit
self.per = per
p = redis.pipeline()
p.incr(self.key)
p.expireat(self.key, self.reset + self.expiration_window)
self.current = min(p.execute()[0], limit)
remaining = property(lambda x: x.limit - x.current)
over_limit = property(lambda x: x.current >= x.limit)
def get_view_rate_limit():
return getattr(g, '_view_rate_limit', None)
def on_over_limit(limit):
resp = bad_request('Too many requests', 429)
return resp
def ratelimit(limit=app.config['REQUESTS_LIMIT'],
per=app.config['REQUESTS_TIME'],
over_limit=on_over_limit):
def decorator(f):
def rate_limited(*args, **kwargs):
if not app.config['REQUESTS_LIMIT'] or not redis:
return f(*args, **kwargs)
if app.config['AUTH_ENABLED'] and request.authorization:
key = 'rate-limit/%s/' % request.authorization.username
else:
remote_ip = request.environ.get('HTTP_X_REAL_IP',
request.remote_addr)
key = 'rate-limit/%s/' % remote_ip
rlimit = RateLimit(key, limit, per)
g._view_rate_limit = rlimit
if over_limit is not None and rlimit.over_limit:
return over_limit(rlimit)
return f(*args, **kwargs)
return update_wrapper(rate_limited, f)
return decorator
| agpl-3.0 | 721,773,584,731,300,600 | 28.462687 | 71 | 0.581054 | false |
pombredanne/openaire | bibsched/lib/bibsched_tasklets/bst_openaire_check_rights.py | 1 | 2572 | #!/usr/bin/env python
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Tasklets to update the list of OpenAIRE keywords to match any edits
made in the records.
"""
from invenio.bibdocfile import BibRecDocs
from invenio.bibtask import write_message, task_update_progress, \
task_sleep_now_if_required
from invenio.openaire_deposit_config import CFG_ACCESS_RIGHTS_KEYS
from invenio.search_engine import search_pattern, get_fieldvalues
def bst_openaire_check_rights():
"""
Tasklet to verify access rights consistency.
"""
restrictions = {
'cc0' : '',
'openAccess' : '',
'closedAccess' : 'status: closedAccess',
'restrictedAccess' : 'status: restrictedAccess',
'embargoedAccess' : 'firerole: deny until "%(date)s"\nallow any',
}
errors = []
for access_rights in CFG_ACCESS_RIGHTS_KEYS:
write_message("Checking records with access rights '%s'" % access_rights)
recids = search_pattern(p=access_rights, f="542__l")
for r in recids:
date = ''
if access_rights == 'embargoedAccess':
try:
date = get_fieldvalues(r, "942__a")[0]
except IndexError:
raise Exception("Embargoed record %s is missing embargo date in 942__a" % r)
expected_status = restrictions[access_rights] % { 'date' : date }
brd = BibRecDocs(r)
for d in brd.list_bibdocs():
real_status = d.get_status()
if real_status != expected_status:
d.set_status(expected_status)
write_message("Fixed record %s with wrong status. From: %s To: %s" % (r, real_status, expected_status))
for e in errors:
write_message(e)
if __name__ == '__main__':
bst_openaire_check_rights()
| gpl-2.0 | 5,037,854,944,358,963,000 | 36.823529 | 123 | 0.641135 | false |
chetan/cherokee | admin/consts.py | 1 | 7437 | # -*- coding: utf-8 -*-
#
# Cherokee-admin
#
# Authors:
# Alvaro Lopez Ortega <[email protected]>
#
# Copyright (C) 2001-2010 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
AVAILABLE_LANGUAGES = [
('en', N_('English')),
('es', N_('Spanish')),
('de', N_('German')),
('fr', N_('French')),
('it', N_('Italian')),
('nl', N_('Dutch')),
('pl', N_('Polish')),
('sv_SE', N_('Swedish')),
('po_BR', N_('Brazilian Portuguese')),
('zh_CN', N_('Chinese Simplified')),
('ca', N_('Catalan')),
('gl', N_('Galician'))
]
PRODUCT_TOKENS = [
('', N_('Default')),
('product', N_('Product only')),
('minor', N_('Product + Minor version')),
('minimal', N_('Product + Minimal version')),
('os', N_('Product + Platform')),
('full', N_('Full Server string'))
]
HANDLERS = [
('', N_('None')),
('common', N_('List & Send')),
('file', N_('Static Content')),
('dirlist', N_('Only Listing')),
('redir', N_('Redirection')),
('fcgi', N_('FastCGI')),
('scgi', N_('SCGI')),
('uwsgi', N_('uWSGI')),
('proxy', N_('HTTP Reverse Proxy')),
('post_report', N_('Upload Reporting')),
('streaming', N_('Audio/Video Streaming')),
('cgi', N_('CGI')),
('ssi', N_('Server Side Includes')),
('secdownload', N_('Hidden Downloads')),
('server_info', N_('Server Info')),
('dbslayer', N_('MySQL Bridge')),
('custom_error', N_('HTTP Error')),
('admin', N_('Remote Administration')),
('empty_gif', N_('1x1 Transparent GIF'))
]
ERROR_HANDLERS = [
('', N_('Default errors')),
('error_redir', N_('Custom redirections')),
('error_nn', N_('Closest match'))
]
VALIDATORS = [
('', N_('None')),
('plain', N_('Plain text file')),
('htpasswd', N_('Htpasswd file')),
('htdigest', N_('Htdigest file')),
('ldap', N_('LDAP server')),
('mysql', N_('MySQL server')),
('pam', N_('PAM')),
('authlist', N_('Fixed list'))
]
VALIDATOR_METHODS = [
('basic', N_('Basic')),
('digest', N_('Digest')),
('basic,digest', N_('Basic or Digest'))
]
LOGGERS = [
('', N_('None')),
('combined', N_('Apache compatible')),
('ncsa', N_('NCSA')),
('custom', N_('Custom'))
]
LOGGER_WRITERS = [
('file', N_('File')),
('syslog', N_('System logger')),
('stderr', N_('Standard Error')),
('exec', N_('Execute program'))
]
BALANCERS = [
('', N_('None')),
('round_robin', N_("Round Robin")),
('ip_hash', N_("IP Hash")),
('failover', N_("Failover"))
]
SOURCE_TYPES = [
('interpreter', N_('Local interpreter')),
('host', N_('Remote host'))
]
ENCODERS = [
('gzip', N_('GZip')),
('deflate', N_('Deflate'))
]
THREAD_POLICY = [
('', N_('Default')),
('fifo', N_('FIFO')),
('rr', N_('Round-robin')),
('other', N_('Dynamic'))
]
POLL_METHODS = [
('', N_('Automatic')),
('epoll', 'epoll() - Linux >= 2.6'),
('kqueue', 'kqueue() - BSD, OS X'),
('ports', 'Solaris ports - >= 10'),
('poll', 'poll()'),
('select', 'select()'),
('win32', 'Win32')
]
REDIR_SHOW = [
('1', N_('External')),
('0', N_('Internal'))
]
ERROR_CODES = [
('400', '400 Bad Request'),
('401', '401 Unauthorized'),
('402', '402 Payment Required'),
('403', '403 Forbidden'),
('404', '404 Not Found'),
('405', '405 Method Not Allowed'),
('406', '406 Not Acceptable'),
('407', '407 Proxy Auth Required'),
('408', '408 Request Timeout'),
('409', '409 Conflict'),
('410', '410 Gone'),
('411', '411 Length Required'),
('412', '412 Precondition Failed'),
('413', '413 Request Entity too large'),
('414', '414 Request-URI too long'),
('415', '415 Unsupported Media Type'),
('416', '416 Requested range not satisfiable'),
('417', '417 Expectation Failed'),
('422', '422 Unprocessable Entity'),
('423', '423 Locked'),
('424', '424 Failed Dependency'),
('425', '425 Unordered Collection'),
('426', '426 Upgrade Required'),
('449', '449 Retry With'),
('500', '500 Internal Server Error'),
('501', '501 Not Implemented'),
('502', '502 Bad gateway'),
('503', '503 Service Unavailable'),
('504', '504 Gateway Timeout'),
('505', '505 HTTP Version Not Supported'),
('506', '506 Variant Also Negotiates'),
('507', '507 Insufficient Storage'),
('509', '509 Bandwidth Limit Exceeded'),
('510', '510 Not Extended')
]
RULES = [
('directory', N_('Directory')),
('extensions', N_('Extensions')),
('request', N_('Regular Expression')),
('header', N_('Header')),
('exists', N_('File Exists')),
('method', N_('HTTP Method')),
('bind', N_('Incoming IP/Port')),
('tls', N_('SSL / TLS')),
('fullpath', N_('Full Path')),
('from', N_('Connected from')),
('url_arg', N_('URL Argument')),
('geoip', N_('GeoIP'))
]
VRULES = [
('', N_('Match Nickname')),
('wildcard', N_('Wildcards')),
('rehost', N_('Regular Expressions')),
('target_ip', N_('Server IP'))
]
EXPIRATION_TYPE = [
('', N_('Not set')),
('epoch', N_('Already expired on 1970')),
('max', N_('Do not expire until 2038')),
('time', N_('Custom value'))
]
CRYPTORS = [
('', N_('No TLS/SSL')),
('libssl', N_('OpenSSL / libssl'))
]
EVHOSTS = [
('', N_('Off')),
('evhost', N_('Enhanced Virtual Hosting'))
]
CLIENT_CERTS = [
('', N_('Skip')),
('accept', N_('Accept')),
('required', N_('Require'))
]
COLLECTORS = [
('', N_('Disabled')),
('rrd', N_('RRDtool graphs'))
]
UTC_TIME = [
('', N_('Local time')),
('1', N_('UTC: Coordinated Universal Time'))
]
DWRITER_LANGS = [
('json', N_('JSON')),
('python', N_('Python')),
('php', N_('PHP')),
('ruby', N_('Ruby'))
]
POST_TRACKERS = [
('', N_('Disabled')),
('post_track', N_('POST tracker'))
]
CACHING_OPTIONS = [
('', N_('Not set')),
('public', N_('Public')),
('private', N_('Private')),
('no-cache', N_('No Cache'))
]
COMPRESSION_LEVELS = [
('', N_('Default')),
('0', N_('0 - No compression')),
('1', N_('1')),
('2', N_('2')),
('3', N_('3')),
('4', N_('4')),
('5', N_('5')),
('6', N_('6')),
('7', N_('7')),
('8', N_('8')),
('9', N_('9 - Max compression'))
]
| gpl-2.0 | -8,224,613,567,037,564,000 | 26.544444 | 67 | 0.477746 | false |
cbertinato/pandas | pandas/tests/sparse/test_combine_concat.py | 1 | 19708 | import itertools
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
import pandas.util.testing as tm
class TestSparseArrayConcat:
@pytest.mark.parametrize('kind', ['integer', 'block'])
def test_basic(self, kind):
a = pd.SparseArray([1, 0, 0, 2], kind=kind)
b = pd.SparseArray([1, 0, 2, 2], kind=kind)
result = pd.SparseArray._concat_same_type([a, b])
# Can't make any assertions about the sparse index itself
# since we aren't don't merge sparse blocs across arrays
# in to_concat
expected = np.array([1, 2, 1, 2, 2], dtype='int64')
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
@pytest.mark.parametrize('kind', ['integer', 'block'])
def test_uses_first_kind(self, kind):
other = 'integer' if kind == 'block' else 'block'
a = pd.SparseArray([1, 0, 0, 2], kind=kind)
b = pd.SparseArray([1, 0, 2, 2], kind=other)
result = pd.SparseArray._concat_same_type([a, b])
expected = np.array([1, 2, 1, 2, 2], dtype='int64')
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseSeriesConcat:
@pytest.mark.parametrize('kind', [
'integer',
'block',
])
def test_concat(self, kind):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
sparse1 = pd.SparseSeries(val1, fill_value=0, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, fill_value=0, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, fill_value=0, kind=kind)
tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
def test_concat_axis1(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y')
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
exp = pd.SparseDataFrame(exp)
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
def test_concat_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind, fill_value=0)
with tm.assert_produces_warning(PerformanceWarning,
raise_on_extra_warnings=False):
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
with tm.assert_produces_warning(PerformanceWarning,
raise_on_extra_warnings=False):
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_concat_axis1_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y', fill_value=0)
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_different_kind(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x', kind='integer')
sparse2 = pd.SparseSeries(val2, name='y', kind='block')
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=sparse1.kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=sparse2.kind)
tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
@pytest.mark.parametrize('kind', [
'integer',
'block',
])
def test_concat_sparse_dense(self, kind):
# use first input's fill_value
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse = pd.SparseSeries(val1, name='x', kind=kind)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
exp = pd.SparseSeries(pd.concat([pd.Series(val1), dense]), kind=kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
# XXX: changed from SparseSeries to Series[sparse]
exp = pd.Series(
pd.SparseArray(exp, kind=kind),
index=exp.index,
name=exp.name,
)
tm.assert_series_equal(res, exp)
sparse = pd.SparseSeries(val1, name='x', kind=kind, fill_value=0)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
# XXX: changed from SparseSeries to Series[sparse]
exp = pd.concat([pd.Series(val1), dense])
exp = pd.Series(
pd.SparseArray(exp, kind=kind, fill_value=0),
index=exp.index,
name=exp.name,
)
tm.assert_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
# XXX: changed from SparseSeries to Series[sparse]
exp = pd.Series(
pd.SparseArray(exp, kind=kind, fill_value=0),
index=exp.index,
name=exp.name,
)
tm.assert_series_equal(res, exp)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
class TestSparseDataFrameConcat:
def setup_method(self, method):
self.dense1 = pd.DataFrame({'A': [0., 1., 2., np.nan],
'B': [0., 0., 0., 0.],
'C': [np.nan, np.nan, np.nan, np.nan],
'D': [1., 2., 3., 4.]})
self.dense2 = pd.DataFrame({'A': [5., 6., 7., 8.],
'B': [np.nan, 0., 7., 8.],
'C': [5., 6., np.nan, np.nan],
'D': [np.nan, np.nan, np.nan, np.nan]})
self.dense3 = pd.DataFrame({'E': [5., 6., 7., 8.],
'F': [np.nan, 0., 7., 8.],
'G': [5., 6., np.nan, np.nan],
'H': [np.nan, np.nan, np.nan, np.nan]})
def test_concat(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse()
res = pd.concat([sparse, sparse])
exp = pd.concat([self.dense1, self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse2])
exp = pd.concat([self.dense2, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse2 = self.dense2.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse])
exp = pd.concat([self.dense1, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse2])
exp = pd.concat([self.dense2, self.dense2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
def test_concat_different_fill_value(self):
# 1st fill_value will be used
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse(fill_value=0)
with tm.assert_produces_warning(PerformanceWarning,
raise_on_extra_warnings=False):
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
with tm.assert_produces_warning(PerformanceWarning,
raise_on_extra_warnings=False):
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
def test_concat_different_columns_sort_warns(self):
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
# stacklevel is wrong since we have two FutureWarnings,
# one for depr, one for sorting.
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False,
raise_on_extra_warnings=False):
res = pd.concat([sparse, sparse3])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False,
raise_on_extra_warnings=False,):
exp = pd.concat([self.dense1, self.dense3])
exp = exp.to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
def test_concat_different_columns(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
res = pd.concat([sparse, sparse3], sort=True)
exp = pd.concat([self.dense1, self.dense3], sort=True).to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
res = pd.concat([sparse3, sparse], sort=True)
exp = pd.concat([self.dense3, self.dense1], sort=True).to_sparse()
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, check_kind=False)
def test_concat_bug(self):
from pandas.core.sparse.api import SparseDtype
x = pd.SparseDataFrame({"A": pd.SparseArray([np.nan, np.nan],
fill_value=0)})
y = pd.SparseDataFrame({"B": []})
res = pd.concat([x, y], sort=False)[['A']]
exp = pd.DataFrame({"A": pd.SparseArray([np.nan, np.nan],
dtype=SparseDtype(float, 0))})
tm.assert_frame_equal(res, exp)
def test_concat_different_columns_buggy(self):
sparse = self.dense1.to_sparse(fill_value=0)
sparse3 = self.dense3.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse3], sort=True)
exp = (pd.concat([self.dense1, self.dense3], sort=True)
.to_sparse(fill_value=0))
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, check_kind=False,
consolidate_block_indices=True)
res = pd.concat([sparse3, sparse], sort=True)
exp = (pd.concat([self.dense3, self.dense1], sort=True)
.to_sparse(fill_value=0))
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, check_kind=False,
consolidate_block_indices=True)
# different fill values
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse(fill_value=0)
# each columns keeps its fill_value, thus compare in dense
res = pd.concat([sparse, sparse3], sort=True)
exp = pd.concat([self.dense1, self.dense3], sort=True)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
res = pd.concat([sparse3, sparse], sort=True)
exp = pd.concat([self.dense3, self.dense1], sort=True)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_series(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse()
for col in ['A', 'D']:
res = pd.concat([sparse, sparse2[col]])
exp = pd.concat([self.dense1, self.dense2[col]]).to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
res = pd.concat([sparse2[col], sparse])
exp = pd.concat([self.dense2[col], self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse2 = self.dense2.to_sparse(fill_value=0)
for col in ['C', 'D']:
res = pd.concat([sparse, sparse2[col]])
exp = pd.concat([self.dense1,
self.dense2[col]]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, check_kind=False,
consolidate_block_indices=True)
res = pd.concat([sparse2[col], sparse])
exp = pd.concat([self.dense2[col],
self.dense1]).to_sparse(fill_value=0)
exp['C'] = res['C']
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True,
check_kind=False)
def test_concat_axis1(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3], axis=1).to_sparse()
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1], axis=1).to_sparse()
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse3 = self.dense3.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3],
axis=1).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1],
axis=1).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# different fill values
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse(fill_value=0)
# each columns keeps its fill_value, thus compare in dense
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
@pytest.mark.parametrize('fill_value,sparse_idx,dense_idx',
itertools.product([None, 0, 1, np.nan],
[0, 1],
[1, 0]))
def test_concat_sparse_dense_rows(self, fill_value, sparse_idx, dense_idx):
frames = [self.dense1, self.dense2]
sparse_frame = [frames[dense_idx],
frames[sparse_idx].to_sparse(fill_value=fill_value)]
dense_frame = [frames[dense_idx], frames[sparse_idx]]
# This will try both directions sparse + dense and dense + sparse
for _ in range(2):
res = pd.concat(sparse_frame)
exp = pd.concat(dense_frame)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
sparse_frame = sparse_frame[::-1]
dense_frame = dense_frame[::-1]
@pytest.mark.parametrize('fill_value,sparse_idx,dense_idx',
itertools.product([None, 0, 1, np.nan],
[0, 1],
[1, 0]))
@pytest.mark.xfail(reason="The iloc fails and I can't make expected",
strict=False)
def test_concat_sparse_dense_cols(self, fill_value, sparse_idx, dense_idx):
# See GH16874, GH18914 and #18686 for why this should be a DataFrame
from pandas.core.dtypes.common import is_sparse
frames = [self.dense1, self.dense3]
sparse_frame = [frames[dense_idx],
frames[sparse_idx].to_sparse(fill_value=fill_value)]
dense_frame = [frames[dense_idx], frames[sparse_idx]]
# This will try both directions sparse + dense and dense + sparse
for _ in range(2):
res = pd.concat(sparse_frame, axis=1)
exp = pd.concat(dense_frame, axis=1)
cols = [i for (i, x) in enumerate(res.dtypes) if is_sparse(x)]
for col in cols:
exp.iloc[:, col] = exp.iloc[:, col].astype("Sparse")
for column in frames[dense_idx].columns:
if dense_idx == sparse_idx:
tm.assert_frame_equal(res[column], exp[column])
else:
tm.assert_series_equal(res[column], exp[column])
tm.assert_frame_equal(res, exp)
sparse_frame = sparse_frame[::-1]
dense_frame = dense_frame[::-1]
| bsd-3-clause | 3,471,453,257,615,332,000 | 40.578059 | 79 | 0.559823 | false |
hassy/informatics-explorer | utils.py | 1 | 1514 | # author: Hasan Veldstra <[email protected]>
# license: MIT
import os
import fnmatch
try:
import simplejson as json
except:
import json
def locate(pattern, root=os.getcwd()):
"""
Generate of all files in a directory that match the pattern.
"""
for path, dirs, files in os.walk(root):
for filename in [os.path.abspath(os.path.join(path, filename)) for filename in files if fnmatch.fnmatch(filename, pattern)]:
yield filename
def sort_by_value(d):
"""
Sort dict by numeric value.
"""
return sorted(d.iteritems(), key=lambda (k, v): (v, k), reverse=True)
def flatten(x):
"""
Return flat list of items from all sub-sequences in list x.
"""
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def slurp(fn):
"""
Read text file into a string.
"""
f = open(fn)
s = f.read()
f.close()
return s
def write(fn, data):
"""
Write string to a file.
"""
f = open(fn, "w")
f.write(data)
f.close()
return True
def load_json(filename):
"""
Return datastructure from JSON data in a file.
"""
return json.loads(slurp(filename))
def dump_as_json(filename, datastructure):
"""
Writes datastructure as JSON into a file.
"""
write(filename, json.dumps(datastructure, sort_keys=True, indent=4)) | mit | 741,321,748,822,468,600 | 21.954545 | 132 | 0.599736 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.