code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
import os
from slackclient import SlackClient
BOT_NAME = 'chopbot3000'
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
if __name__ == "__main__":
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
# retrieve all users so we can find our bot
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
print("Bot ID for '" + user['name'] + "' is " + user.get('id'))
else:
print("could not find bot user with the name " + BOT_NAME)
| baylesj/chopBot3000 | scripts/print_bot_id.py | Python | mit | 604 |
import re
import os
import itertools
import time
from string import upper
import ete3
import copy
import subprocess
from collections import defaultdict
from sys import platform
from scipy import stats
from ete3 import Tree
from natsort import natsorted
from Bio import AlignIO
"""
Functions:
~
Chabrielle Allen
Travis Benedict
Peter Dulworth
"""
def run_saved_dgen(stat_file,sequence_files,window_size=999999999999999999999999999999,
window_offset=999999999999999999999999999999, verbose=False, alpha=0.01,
plot=False, meta=False):
"""
Creates a network tree based on the species tree
and the two leaves to be connected.
Inputs:
inheritance --- inputted tuple containing inheritance probability ex. (0.7, 0.3)
species_tree --- generated or inputted file or newick string
network_map --- inputted mapping of leaves where nodes will be added
Output:
network --- a newick string network with the added nodes.
"""
#decided to do a rename here
alignments = sequence_files
# read in dgen stat from file
# (have to wait for file to exist sometimes)
while not os.path.exists(stat_file):
time.sleep(1)
with(open(stat_file, "r")) as s:
lines = s.readlines()
taxa = eval(lines[0].split(None, 1)[1])
stat_species_tree = lines[1].split(None, 2)[2].replace("\n", "")
stat_species_network = lines[2].split(None, 2)[2].replace("\n", "")
outgroup = lines[3].split(None, 1)[1].replace("\n", "")
invariants = []
for oneInvLine in range(4,len(lines)):
this_line_invariant_group = eval(lines[oneInvLine].split(None, 6)[6])
invariants.append(this_line_invariant_group)
#increase = eval(lines[1].split(None, 2)[2])
#decrease = eval(lines[2].split(None, 2)[2])
#increase_resized = increase
#decrease_resized = decrease
#overall_coefficient = 1
#patterns_to_coeff = {}
# DONE READING IN STATISTIC FROM FILE, RUN THE STAT
#window_size = 5000
#window_offset = 5000
#verbose = True
#alpha = 0.01
#alignments.append(sequence_file)
alignments_to_windows_to_DGEN = calculate_windows_to_DGEN(alignments, taxa, outgroup,
invariants, window_size,
window_offset, verbose, alpha)
# the lazy way to do alignments to d using same function and not having to rewrite a nearly identical function
alignments_to_DGEN = calculate_windows_to_DGEN(alignments, taxa, outgroup,
invariants, 999999999999999999999999999999,
999999999999999999999999999999, verbose, alpha)
for alignment in alignments:
alignments_to_DGEN[alignment] = alignments_to_DGEN[alignment][0]
# print stuff
s = ""
for alignment in alignments:
if verbose:
dgen2_dof, significant_dgen, dgen2_num_ignored, dgen2_chisq, l_pval_dgen = alignments_to_DGEN[alignment]
s += "\n"
s += alignment + ": " + "\n"
s += "\n"
s += "(format = degrees of freedom, is significant?, num. of sites ignored, chi squared value, DGEN p value)"
s += "\n"
s += "Windows to D value: " + str(alignments_to_windows_to_DGEN[alignment]) + "\n"
s += "\n"
s += "Final Overall DGEN p value: {0}".format(l_pval_dgen) + "\n"
s += "Significant p value: {0}".format(significant_dgen) + "\n"
s += "\n"
s += "(Verbose) Number Of Sites Ignored: {0}".format(dgen2_num_ignored) + "\n"
s += "(Verbose) Degrees Of Freedom: {0}".format(dgen2_dof) + "\n"
s += "(Verbose) ChiSquared Value: {0}".format(dgen2_chisq) + "\n"
s += "\n"
s += "For easy plotting of DGEN values:"
s += "\n"
windowIndex = 0
for windowEntryIndex in alignments_to_windows_to_DGEN[alignment]:
s += str(windowIndex) + "," + str(alignments_to_windows_to_DGEN[alignment][windowEntryIndex][4]) + "\n"
windowIndex += 1
else:
l_pval_dgen, significant_dgen = alignments_to_DGEN[alignment]
s += "\n"
s += alignment + ": " + "\n"
s += "\n"
s += "(format = DGEN p value, is significant?)"
s += "\n"
s += "Windows to D value: " + str(alignments_to_windows_to_DGEN[alignment]) + "\n"
s += "\n"
s += "Final Overall DGEN p value: {0}".format(l_pval_dgen) + "\n"
s += "Significant p value: {0}".format(significant_dgen) + "\n"
# finally do one more output of just window#,dgen val for easy plotting
s += "\n"
s += "For easy plotting of DGEN values:"
s += "\n"
windowIndex = 0
for windowEntryIndex in alignments_to_windows_to_DGEN[alignment]:
s += str(windowIndex) + "," + str(alignments_to_windows_to_DGEN[alignment][windowEntryIndex][0]) + "\n"
windowIndex += 1
print s
if plot:
plot_formatting((alignments_to_DGEN, alignments_to_windows_to_DGEN), plot, meta)
return s
def calculate_windows_to_DGEN(alignments, taxa_order, outgroup, list_of_tree_and_net_invariants, window_size, window_offset,
verbose= False, alpha=0.01):
"""
Calculates the DGEN statistic for the given alignment
Input:
alignment --- a sequence alignment in phylip format
taxa_order --- the desired order of the taxa
patterns_of_interest --- a tuple containing the sets of patterns used for determining a statistic
window_size --- the desired window size
windw_offset --- the desired offset between windows
Output:
l_stat --- the L statistic value
windows_to_l --- a mapping of window indices to L statistic values
"""
# create a map that will map from all the patterns we care about to their counts
pattern_count_map = defaultdict(int)
for aLine in list_of_tree_and_net_invariants:
for aPatternGroup in aLine: # technically could skip the first one or just use the first one
for aPattern in aPatternGroup:
pattern_count_map[aPattern] = 0
# Separate the patterns of interest into their two terms
#terms1 = patterns_of_interest[0]
#terms2 = patterns_of_interest[1]
alignments_to_windows_to_d = {}
for alignment in alignments:
sequence_list = []
taxon_list = []
with open(alignment) as f:
# Create a list of each line in the file
lines = f.readlines()
# First line contains the number and length of the sequences
first_line = lines[0].split()
length_of_sequences = int(first_line[1])
for line in lines[1:]:
# Add each sequence to a list
sequence = line.split()[1]
sequence_list.append(sequence)
# Add each taxon to a list
taxon = line.split()[0]
taxon_list.append(taxon)
i = 0
num_windows = 0
if window_size > length_of_sequences:
num_windows = 1
window_size = length_of_sequences
else:
# Determine the total number of windows needed
while i + window_size - 1 < length_of_sequences:
i += window_offset
num_windows += 1
site_idx = 0
windows_to_l = {}
# Iterate over each window
for window in range(num_windows):
terms1_counts = defaultdict(int)
terms2_counts = defaultdict(int)
num_ignored = 0
# Iterate over the indices in each window
for window_idx in range(window_size):
# Map each taxa to the base at a given site
taxa_to_site = {}
# Create a set of the bases at a given site to determine if the site is biallelic
bases = set([])
# Iterate over each sequence in the alignment
for sequence, taxon in zip(sequence_list, taxon_list):
# Map each taxon to the corresponding base at the site
# Add upper function to handle both lowercase and uppercase sequences identically (wasnt working for undercase sequences)
base = upper(sequence[site_idx])
taxa_to_site[taxon] = base
bases.add(base)
# there are too many non ACTG letters allowed in fasta files, so i will just make sure bases only has A C T G
possibleBases = set([])
possibleBases.add("A")
possibleBases.add("C")
possibleBases.add("G")
possibleBases.add("T")
# Statistic can only be calculated where the nucleotides are known
# this includes things like -'s, but also N's, and I will now exclude anything but ACTG
# if len(bases) == 2 and "-" not in bases and "N" not in bases:
if len(bases) == 2 and bases.issubset(possibleBases):
# Create the pattern that each site has
site_pattern = []
# The ancestral gene is always the same as the outgroup
ancestral = taxa_to_site[outgroup]
# Iterate over each taxon
for taxon in taxa_order:
nucleotide = taxa_to_site[taxon]
# Determine if the correct derived/ancestral status of each nucleotide
if nucleotide == ancestral:
site_pattern.append("A")
else:
site_pattern.append("B")
# Convert the site pattern to a string
sites = pattern_string_generator([site_pattern])
if sites:
site_string = sites[0]
# If the site string is a pattern of interest add to its count for one of the terms
# add to my new DGEN map
if site_string in pattern_count_map:
pattern_count_map[site_string] += 1
#elif "-" in bases or "N" in bases: #(more can happen now, i will just check not in subset of possible bases
elif bases.issubset(possibleBases) == False:
num_ignored += 1
#final catch all else for ignored sites (forgot to add back in check for non biallelic (they were ignored but not counted in verbose output. this line now properly counts them for verbose mode))
#includes also sites that only have all A's or whatever, basically anything non strictly biallelic
else:
num_ignored += 1
#should i add count here for sites that specifically violate biallelic as in 3 or 4 dif letters? (len(bases)>2)?
# Increment the site index
site_idx += 1
# create counts based on tree / net invariant groupings
list_of_tree_and_net_invariants_counts = []
for aLine in list_of_tree_and_net_invariants:
lineAdd = []
for aPatternGroup in aLine: # technically could skip the first one or just use the first one
groupCount = 0
for aPattern in aPatternGroup:
groupCount += pattern_count_map[aPattern]
lineAdd.append(groupCount)
list_of_tree_and_net_invariants_counts.append(lineAdd)
#for debugging, how many sites are actually observed that go into the observed calculations?
# calculate new version of chi squared test
# chi squared = sum of (obs - exp)^2 / exp
# obs is Na and exp is Nt * |Na| / |Nt|
chiSquared = 0
dof = 0
cCardinality = 0
for invariantIndex in range(0,len(list_of_tree_and_net_invariants)):
treeGroupSize = len(list_of_tree_and_net_invariants[invariantIndex][0])
treeGroupCount = list_of_tree_and_net_invariants_counts[invariantIndex][0]
cCardinality += 1
for oneNetInvariant in range(1,len(list_of_tree_and_net_invariants[invariantIndex])):
netGroupSize = len(list_of_tree_and_net_invariants[invariantIndex][oneNetInvariant])
netGroupCount = list_of_tree_and_net_invariants_counts[invariantIndex][oneNetInvariant]
chiNumerator = (netGroupCount - (treeGroupCount * (netGroupSize / float(treeGroupSize)))) ** 2 # apparently 'python will return a float if at least one number in an equation is a float'. apparently this is not actually true as making just the first term here a float does not work
chiDenominator = treeGroupCount * (netGroupSize / float(treeGroupSize))
if chiDenominator != 0: # handles the case where zero counts cause 0 in the denominator
chiSquared += chiNumerator / float(chiDenominator)
else:
chiSquared = 0.0
dof += 1
#final dof is one less than this total (woops >.<)
#dof = dof - 1 (not any more, totally changing the formulation now based on luays comments)
#newest sum|Y| - |C| DoF calc. at this point in the code, dof stores the sum|Y| value
#so i have made a new variable for |C| and i just take the difference
dof = dof - cCardinality
# determine if the chisquared is significant. ALSO NOTE - dispensing with the 'd value' and just looking at chiSq and pval
# Verbose output
if verbose:
signif, chisq, pval = calculate_significance_custom_dof(chiSquared, dof, verbose, alpha)
# The line below can be changed to add more information to the windows to L mapping
#windows_to_l[window] = (l_stat, signif, num_ignored, chisq, pval)
windows_to_l[window] = (dof, signif, num_ignored, chisq, pval)
# Standard output
else:
signif, pval = calculate_significance_custom_dof(chiSquared, dof, verbose, alpha)
windows_to_l[window] = (pval, signif)
# Account for overlapping windows
site_idx += (window_offset - window_size)
alignments_to_windows_to_d[alignment] = windows_to_l
return alignments_to_windows_to_d
def Create_Network_Helper(species_tree, reticulations, inheritanceProb):
"""
leo - making a slightly more user friendly fucntion for this (dont need to input a tuple and maybe other things)
Creates a network tree based on the species tree
and the two leaves to be connected.
Inputs:
inheritance --- inputted single containing inheritance probability ex. (0.7, 0.3)
species_tree --- generated or inputted file or newick string
network_map --- inputted mapping of leaves where nodes will be added
Output:
network --- a newick string network with the added nodes.
"""
inheritance = (inheritanceProb, 1 - float(inheritanceProb))
#inheritance[0] = inheritanceProb
#inheritance[1] = 1 - float(inheritanceProb)
# check for a species tree file
if os.path.isfile(species_tree):
with open(species_tree) as f:
network = f.readline()
# check for a species tree string
else:
network = species_tree
for i in range(len(reticulations)):
# get taxa for the edge in the network
start = reticulations[i][0]
end = reticulations[i][1]
# add nodes into tree in proper format
#network = network.replace(start, '((' + start + ')#H' + str(i+1) + ':0::' + str(inheritance[0]) + ')') # one too many paranthesis here apparently
network = network.replace(start, '(' + start + ')#H' + str(i + 1) + ':0::' + str(inheritance[0]) + '') # took a paranthesis off
network = network.replace(end, '(#H' + str(i+1) + ':0::' + str(inheritance[1]) + ',' + end + ')')
return network
def generate_network_tree(inheritance, species_tree, reticulations):
"""
Creates a network tree based on the species tree
and the two leaves to be connected.
Inputs:
inheritance --- inputted tuple containing inheritance probability ex. (0.7, 0.3)
species_tree --- generated or inputted file or newick string
network_map --- inputted mapping of leaves where nodes will be added
Output:
network --- a newick string network with the added nodes.
"""
# check for a species tree file
if os.path.isfile(species_tree):
with open(species_tree) as f:
network = f.readline()
# check for a species tree string
else:
network = species_tree
for i in range(len(reticulations)):
# get taxa for the edge in the network
start = reticulations[i][0]
end = reticulations[i][1]
# add nodes into tree in proper format
network = network.replace(start, '((' + start + ')#H' + str(i+1) + ':0::' + str(inheritance[0]) + ')')
network = network.replace(end, '(#H' + str(i+1) + ':0::' + str(inheritance[1]) + ',' + end + ')')
return network
##### Generate all unique trees functions
def genDistinct(n):
"""
Generate all full binary trees with n leaves
Input:
n --- the number of leaves
Output:
dp[-1] --- the set of all full binary trees with n nodes
"""
leafnode = '(.)'
dp = []
newset = set()
newset.add(leafnode)
dp.append(newset)
for i in range(1, n):
newset = set()
for j in range(i):
for leftchild in dp[j]:
for rightchild in dp[i - j - 1]:
newset.add('(' + '.' + leftchild + rightchild + ')')
dp.append(newset)
return dp[-1]
def generate_all_trees(taxa):
"""
Create all trees given a set of taxa
Inputs:
taxa --- a set of the taxa to be used for leaf names
Output:
trees --- the set of all trees over the taxa
"""
# Regex pattern for identifying leaves next to a clade in newick string
pattern = "([\)][a-zA-Z0-9_.-])"
# Generate all distinct binary trees
trees = genDistinct(len(taxa))
# Get all possible permutations of the taxa
taxa_orders = itertools.permutations(taxa)
taxa_orders = list(taxa_orders)
all_trees = []
# Iterate over each tree in the set
for tree in trees:
# Reformat the tree
tree = tree.replace('.', '')
# Iterate over each permutation of taxa
for taxa_perm in taxa_orders:
# Create a copy of the tree
bi_tree = tree
# replace the leaves with taxons and reformat string
for i in range(len(taxa_perm)):
taxon = taxa_perm[i] + ","
bi_tree = bi_tree.replace("()", taxon, 1)
bi_tree = bi_tree.replace(",)", ")")
# Find all instances of a ")" followed by a taxon and add a "," between
clades = re.findall(pattern, bi_tree)
for clade in clades:
taxon = clade[1]
bi_tree = bi_tree.replace(clade, ")," + taxon)
bi_tree = bi_tree.replace(")(", "),(")
bi_tree = bi_tree + ";"
all_trees.append(bi_tree)
return all_trees
def generate_unique_trees(taxa, outgroup):
"""
Generate the set of unique trees over a set of taxa with an outgroup
Inputs:
taxa --- a list of taxa to be used as the leaves of trees
outgroup --- the outgroup to root at
Output:
unique_newicks --- a set of all unique topologies over the given taxa
"""
# Create a set for unique trees
unique_trees = set([])
unique_newicks = set([])
all_trees = generate_all_trees(taxa)
# Iterate over each tree in all_trees
for tree in all_trees:
tree = Tree(tree)
tree.set_outgroup(outgroup)
is_unique = True
# Iterate the unique trees for comparison
for unique_tree in unique_trees:
# Compute robinson-foulds distance
rf_distance = tree.robinson_foulds(unique_tree)[0]
# If rf distance is 0 the tree is not unique
if rf_distance == 0:
is_unique = False
if is_unique:
unique_trees.add(tree)
# Iterate over the trees
for tree in unique_trees:
# Get newick strings from the tree objects
tree = tree.write()
# Get rid of branch lengths in the newick strings
tree = branch_removal(tree)
tree = outgroup_reformat(tree, outgroup)
# Add the newick strings to the set of unique newick strings
unique_newicks.add(tree)
return unique_newicks
###### Statistics Calculations Functions
def calculate_pgtst(species_tree, gene_tree):
"""
Calculate p(gt|st) or p(gt|sn)
Input:
species_tree --- a species tree or network in newick format
gene_tree --- a gene tree in newick format
Output:
pgtst --- p(gt|st) or p(gt|sn)
"""
# Get the global path name to the jar file
dir_path = os.path.dirname(os.path.realpath(__file__))
j = os.path.join(dir_path, "Unstable.jar")
# Run PhyloNet p(g|S) jar file
p = subprocess.Popen("java -jar {0} {1} {2}".format(j, species_tree, gene_tree), stdout=subprocess.PIPE,
shell=True)
# Read output and convert to float
pgtst = float(p.stdout.readline())
return pgtst
def calculate_newicks_to_stats(species_tree, species_network, unique_trees):
"""
Compute p(g|S) and p(g|N) for each g in unique_trees and
map the tree newick string to those values
Inputs:
species_tree --- the species tree newick string for the taxa
species_network --- the network newick string derived from adding a branch to the species tree between the interested taxa
unique_trees --- the set of all unique topologies over n taxa
outgroup --- the outgroup
Output:
trees_to_pgS--- a mapping of tree newick strings to their p(g|S) values
trees_to_pgN--- a mapping of tree newick strings to their p(g|N) values
"""
trees_to_pgS = {}
trees_to_pgN = {}
if platform == 'darwin':
# macs need single quotes for some reason
species_tree = str(species_tree)
species_tree = "'" + species_tree + "'"
species_network = str(species_network)
species_network = "'" + species_network + "'"
# Iterate over the trees
for tree in unique_trees:
if platform == 'darwin':
# macs need single quotes for some reason
tree = "'" + tree + "'"
p_of_g_given_s = calculate_pgtst(species_tree, tree)
p_of_g_given_n = calculate_pgtst(species_network, tree)
if platform == 'darwin':
# remove the quotes from the tree before we add it to the mapping
tree = tree[1:-1]
trees_to_pgS[tree] = p_of_g_given_s
trees_to_pgN[tree] = p_of_g_given_n
return trees_to_pgS, trees_to_pgN
##### Site Pattern Functions
def outgroup_reformat(newick, outgroup):
"""
Move the location of the outgroup in a newick string to be at the end of the string
Inputs:
newick --- a newick string to be reformatted
outgroup --- the outgroup
Output:
newick --- the reformatted string
"""
# Replace the outgroup and comma with an empty string
newick = newick.replace(outgroup + ",", "")
newick = newick[:-2] + "," + outgroup + ");"
return newick
def pattern_inverter(patterns):
"""
Switches "A"s to "B"s and "B"s to "A" in a site pattern excluding the outgroup
Inputs:
patterns --- a list of site patterns
Output:
inverted --- a list of the inverted patterns
"""
inverted = []
# Iterate over the patterns
for pattern in patterns:
a_count = 0
b_count = 0
inverted_pattern = []
# Iterate over each site in the pattern
for site in pattern:
if site == "A":
inverted_pattern.append("B")
b_count += 1
elif site == "B":
inverted_pattern.append("A")
a_count += 1
if inverted_pattern[-1] != "A":
# Change the last site to an "A"
inverted_pattern[-1] = "A"
b_count -= 1
a_count += 1
if a_count > 1 and b_count > 0:
inverted.append(inverted_pattern)
return inverted
def pattern_string_generator(patterns):
"""
Creates a list of viable pattern strings that are easier to read
Input:
patterns --- a list of lists of individual characters e.g. [["A","B","B","A"],["B","A","B","A"]]
Output:
pattern_strings --- a list of lists of strings e.g. [["ABBA"],["BABA"]]
"""
# Convert the site pattern lists to strings
pattern_strings = []
while patterns:
a_count = 0
b_count = 0
pattern_str = ""
pattern = patterns.pop()
for site in pattern:
if site == "A":
b_count += 1
elif site == "B":
a_count += 1
pattern_str += site
if a_count > 0 and b_count > 0:
pattern_strings.append(pattern_str)
return pattern_strings
def branch_removal(n):
"""
Remove the branch lengths from an inputted newick string
Input:
n --- a newick string
Output:
n --- the reformatted string
"""
float_pattern = "([+-]?\\d*\\.\\d+)(?![-+0-9\\.])"
# Regular expressions for removing branch lengths and confidence values
pattern2 = "([\:][\\d])"
pattern3 = "([\)][\\d])"
# Get rid of branch lengths in the newick strings
n = (re.sub(float_pattern, '', n))
n = (re.sub(pattern2, '', n)).replace(":", "")
n = (re.sub(pattern3, ')', n))
return n
def site_pattern_generator(taxa_order, newick, outgroup):
"""
Generate the appropriate AB list patterns
Inputs:
taxa_order --- the desired order of the taxa
newick --- the newick string to generate site patterns for
outgroup --- the outgroup of the tree
Output:
finished_patterns --- the list of site patterns generated for the newick string
"""
# Create a tree object
tree = ete3.Tree(newick, format=1)
tree.ladderize(direction=1)
tree.set_outgroup(outgroup)
# Initialize containers for the final patterns and patterns being altered
final_site_patterns = []
# Keep a count of clades in the tree that contain 2 leaves
clade_count = 0
# Number of possible patterns is number of taxa - 2 + the number of clades
num_patterns = len(taxa_order) - 2
# Initialize pattern to be a list of strings
pattern = ["B" for x in range(len(taxa_order))]
# Create list of nodes in order of appearance
nodes = []
for node in tree.traverse("preorder"):
# Add node name to list of nodes
nodes.append(node.name)
# If there are internal nodes at the second and third position travel over the tree in postorder
if nodes[2] == "" and nodes[3] == "":
nodes = []
for node in tree.traverse("postorder"):
# Add node name to list of nodes
nodes.append(node.name)
# Keep track of visited leaves
seen_leaves = []
# Iterate over the order that the nodes occur beginning at the root
for node_idx in range(len(nodes)):
node = nodes[node_idx]
# If the node is the outgroup add A to the end of the pattern
if node == outgroup:
pattern[-1] = "A"
# Add outgroup to the seen leaves
seen_leaves.append(node)
elif outgroup not in seen_leaves:
pass
# Else if the node is a leaf and is after the outgroup
elif node != "" and seen_leaves[-1] == outgroup and outgroup in seen_leaves:
# If the next node is a leaf a clade has been found
if nodes[node_idx + 1] != "":
node2 = nodes[node_idx + 1]
# Get the indices of the leaves in the pattern
pat_idx1 = taxa_order.index(node)
pat_idx2 = taxa_order.index(node2)
# Set those pattern indices to "A"
pattern[pat_idx1] = "A"
pattern[pat_idx2] = "A"
clade_count += 1
final_site_patterns.append(pattern)
seen_leaves.append(node)
seen_leaves.append(node2)
# Get the index that final clade occurs at
end_idx = node_idx + 1
break
# Otherwise there is no clade
else:
# Get the index of the leaf in the pattern
pat_idx = taxa_order.index(node)
# Set those pattern indices to "A"
pattern[pat_idx] = "A"
seen_leaves.append(node)
# Get the index that final leaf occurs at
end_idx = node_idx
break
num_patterns = num_patterns + clade_count
# All patterns can be derived from the pattern with the most B's
working_patterns = [pattern for x in range(num_patterns)]
# Pop a pattern off of working patterns and add it to the final site patterns
final_site_patterns.append(working_patterns.pop())
# Iterate over each pattern in working patterns and change them
while working_patterns:
# Get a pattern and copy it
pattern = copy.deepcopy(working_patterns.pop())
# Iterate over the order that the nodes occur beginning at the last clade or leaf
for node_idx in range(end_idx + 1, len(nodes)):
# If the last clade is reached break
if node_idx == len(nodes) - 1:
if node != "":
# Get the index of the leaf in the pattern
pat_idx1 = taxa_order.index(node)
# Set those pattern indices to "A"
pattern[pat_idx1] = "A"
# Get the index that final leaf occurs at
end_idx = node_idx
break
else:
break
node = nodes[node_idx]
# If the next node is a leaf a clade has been found
if node != "" and nodes[node_idx + 1] != "":
node2 = nodes[node_idx + 1]
# Get the indices of the leaves in the pattern
pat_idx1 = taxa_order.index(node)
pat_idx2 = taxa_order.index(node2)
# Set those pattern indices to "A"
pattern[pat_idx1] = "A"
pattern[pat_idx2] = "A"
clade_count += 1
final_site_patterns.append(pattern)
# Get the index that final clade occurs at
end_idx = node_idx + 1
break
# Else if the node is a leaf
elif node != "":
# Get the index of the leaf in the pattern
pat_idx1 = taxa_order.index(node)
# Set those pattern indices to "A"
pattern[pat_idx1] = "A"
# Get the index that final leaf occurs at
end_idx = node_idx
break
# Add the altered pattern to the final site patterns
final_site_patterns.append(pattern)
# Update the working patterns to be the same as the most recent pattern
working_patterns = [pattern for x in range(num_patterns - len(final_site_patterns))]
# Create a list of patterns without duplicates
finished_patterns = []
# Iterate over each pattern and determine which ones are duplicates
for pattern in final_site_patterns:
if pattern not in finished_patterns:
finished_patterns.append(pattern)
# If a site pattern only has a single B consider it as the inverse
for pattern in finished_patterns:
if b_count(pattern) == 1:
finished_patterns.remove(pattern)
new_pattern = pattern_inverter([pattern])[0]
finished_patterns.append(new_pattern)
# Always do calculation with the inverse patterns
inverted_patterns = pattern_inverter(finished_patterns)
# Iterate over the inverted patterns and add them to finished patterns
for pattern in inverted_patterns:
if pattern not in finished_patterns:
finished_patterns.append(pattern)
finished_patterns = pattern_string_generator(finished_patterns)
inverted_patterns = pattern_string_generator(inverted_patterns)
return finished_patterns, inverted_patterns
def newicks_to_patterns_generator(taxa_order, newicks, outgroup):
"""
Generate the site patterns for each newick string and map the strings to their patterns
Inputs:
taxa_order --- the desired order of the taxa
newicks --- a list of newick strings
Output:
newicks_to_patterns --- a mapping of newick strings to their site patterns
"""
newicks_to_patterns = {}
inverse_to_counts = defaultdict(int)
# Iterate over the newick strings
for newick in newicks:
# Get the total set of site patterns and the inverses
all_patterns, inverses = site_pattern_generator(taxa_order, newick, outgroup)
newicks_to_patterns[newick] = all_patterns
# Count the number of times a site pattern appears as an inverse
for pattern in inverses:
inverse_to_counts[pattern] += 1
return newicks_to_patterns, inverse_to_counts
##### Interesting sites functions
def calculate_pattern_probabilities(newicks_to_patterns, newicks_to_pgS, newicks_to_pgN):
"""
Creates a mapping of site patterns to their total p(g|S) values across all gene trees and
a mapping of site patterns to their total p(g|N) values across all gene trees
Inputs:
newicks_to_patterns --- a mapping of tree newick strings to their site patterns
newicks_to_pgS--- a mapping of tree newick strings to their p(g|S) values
newicks_to_pgN--- a mapping of tree newick strings to their p(g|N) values
Outputs:
patterns_to_pgS --- a mapping of site patterns to their total p(g|S) value
patterns_to_pgN --- a mapping of site patterns to their total p(g|N) value
"""
patterns_to_pgS = defaultdict(float)
patterns_to_pgN = defaultdict(float)
# Iterate over each newick string
for newick in newicks_to_patterns:
# Iterate over each site pattern of a tree
for pattern in newicks_to_patterns[newick]:
patterns_to_pgS[pattern] += newicks_to_pgS[newick]
patterns_to_pgN[pattern] += newicks_to_pgN[newick]
return patterns_to_pgS, patterns_to_pgN
def determine_patterns(pattern_set, patterns_to_equality, patterns_to_pgN, patterns_to_pgS, use_inv):
"""
Determine which patterns are useful in determining introgression
Inputs:
pattern_set -- a set containing all patterns of interest
patterns_to_equality --- a mapping of site patterns to site patterns with equivalent p(gt|st)
patterns_to_pgN --- a mapping of site patterns to their total p(g|N) value for a network
patterns_to_pgS --- a mapping of site patterns to their total p(g|st)
Outputs:
terms1 --- set of patterns to count whose probabilities increase under introgression
terms2 --- set of patterns to count whose probabilities decrease under introgression
"""
terms1 = set([])
terms2 = set([])
# Iterate over each pattern to determine the terms of interest
for pattern1 in pattern_set:
pat1_prob = patterns_to_pgN[pattern1]
if pattern1 in patterns_to_equality.keys():
for pattern2 in patterns_to_equality[pattern1]:
pat2_prob = patterns_to_pgN[pattern2]
# Issues with randomness when very small values are close but not technically equal
if not approximately_equal(pat1_prob, pat2_prob):
if pat1_prob > pat2_prob:
terms1.add(pattern1)
terms2.add(pattern2)
elif pat1_prob < pat2_prob:
terms1.add(pattern2)
terms2.add(pattern1)
terms1_resized, terms2_resized = resize_terms(terms1, terms2, patterns_to_pgS, use_inv)
patterns_to_coefficients = scale_terms(terms1, terms2, patterns_to_pgS)
return terms1, terms2, terms1_resized, terms2_resized, patterns_to_coefficients
def resize_terms(terms1, terms2, patterns_to_pgS, use_inv):
"""
Resize the terms to ensure that the probabilities are the same on both sides.
This is necessary to maintain the null hypothesis that D = 0 under no introgression.
Inputs:
terms1 --- a set of patterns to count and add to each other to determine introgression
terms2 --- a set of other patterns to count and add to each other to determine introgression
use_inv --- boolean for determining if inverse site patterns will be used
patterns_to_pgS --- a mapping of site patterns to their p(gt|st) values
Outputs:
terms1 --- a set of patterns to count and add to each other to determine introgression
terms2 --- a set of other patterns to count and add to each other to determine introgression
"""
terms1 = list(terms1)
terms2 = list(terms2)
# Create a mapping of pgtst to trees for each term
pgtst_to_trees1 = defaultdict(set)
pgtst_to_trees2 = defaultdict(set)
for tree in terms1:
# Round the probability to the 15th digit to prevent the randomness issues with small values
prob = float(format(patterns_to_pgS[tree], '.15f'))
pgtst_to_trees1[prob].add(tree)
for tree in terms2:
# Round the probability to the 15th digit to prevent the randomness issues with small values
prob = float(format(patterns_to_pgS[tree], '.15f'))
pgtst_to_trees2[prob].add(tree)
# Balance terms
terms1_prob_counts = defaultdict(int)
terms2_prob_counts = defaultdict(int)
# Round each probability and count the number of times it occurs
for tree in terms1:
prob = float(format(patterns_to_pgS[tree], '.15f'))
terms1_prob_counts[prob] += 1
for tree in terms2:
prob = float(format(patterns_to_pgS[tree], '.15f'))
terms2_prob_counts[prob] += 1
# Iterate over each probability
for prob in terms1_prob_counts:
# Get the number of times each probability occurs
count1, count2 = terms1_prob_counts[prob], terms2_prob_counts[prob]
removed = set([])
# The number of site patterns to remove is the difference in counts
num_remove = abs(count2 - count1)
if use_inv:
# If not using inverses remove the inverse along with the normal pattern
num_remove = num_remove / 2
# If probabilities do not occur an equal number of times remove site patterns until they do
if count1 > count2:
for i in range(num_remove):
# Get a pattern to remove and remove it from the possible removals
r = sorted(list(pgtst_to_trees1[prob])).pop(0)
pgtst_to_trees1[prob].remove(r)
removed.add(r)
terms1_remove = True
if count1 < count2:
for i in range(num_remove):
# Get a pattern to remove and remove it from the possible removals
r = sorted(list(pgtst_to_trees2[prob])).pop(0)
pgtst_to_trees2[prob].remove(r)
removed.add(r)
terms1_remove = False
if use_inv:
# Remove site patterns and their inverses
rm = set([])
inv_rm = pattern_inverter(removed)
for pattern in inv_rm:
rm.add(''.join(pattern))
removed = removed.union(rm)
# Iterate over each pattern to be removed and remove it
for pattern in removed:
if terms1_remove:
terms1.remove(pattern)
else:
terms2.remove(pattern)
terms1, terms2 = tuple(terms1), tuple(terms2)
return terms1, terms2
def scale_terms(terms1, terms2, patterns_to_pgS):
"""
Multiply the terms by a scalar to ensure that the probabilities are the same on both sides.
This is necessary to maintain the null hypothesis that D = 0 under no introgression.
Inputs:
terms1 --- a set of patterns to count and add to each other to determine introgression
terms2 --- a set of other patterns to count and add to each other to determine introgression
patterns_to_pgS --- a mapping of site patterns to their p(gt|st) values
Outputs:
patterns_to_coefficient --- a mapping of site patterns to a coefficent to multiply their counts by
"""
terms1 = list(terms1)
terms2 = list(terms2)
# Create a mapping of pgtst to trees for each term
pgtst_to_trees1 = defaultdict(set)
pgtst_to_trees2 = defaultdict(set)
patterns_to_coefficient = {}
for tree in terms1:
prob = float(format(patterns_to_pgS[tree], '.15f'))
pgtst_to_trees1[prob].add(tree)
for tree in terms2:
prob = float(format(patterns_to_pgS[tree], '.15f'))
pgtst_to_trees2[prob].add(tree)
# Balance terms
terms1_prob_counts = defaultdict(int)
terms2_prob_counts = defaultdict(int)
# Round each probability and count the number of times it occurs
for tree in terms1:
prob = float(format(patterns_to_pgS[tree], '.15f'))
terms1_prob_counts[prob] += 1
for tree in terms2:
prob = float(format(patterns_to_pgS[tree], '.15f'))
terms2_prob_counts[prob] += 1
# Iterate over each probability
for prob in terms1_prob_counts:
# Get the number of times each probability occurs
count1, count2 = terms1_prob_counts[prob], terms2_prob_counts[prob]
# Get the patterns in the left set of terms corresponding the probability
patterns1 = pgtst_to_trees1[prob]
# Multiply each term in terms1 by count2 / count1
for pattern in patterns1:
patterns_to_coefficient[pattern] = float(count2) / count1
return patterns_to_coefficient
def generate_statistic_string(patterns_of_interest):
"""
Create a string representing the statistic for determining introgression like "(ABBA - BABA)/(ABBA + BABA)"
Input:
patterns_of_interest --- a tuple containing the sets of patterns used for determining a statistic
Output:
L_statistic --- a string representation of the statistic
"""
calculation = []
# Iterate over each set of patterns
for pattern_set in patterns_of_interest:
term = "("
# Combine each term with a "+"
for pattern in sorted(pattern_set):
term = term + pattern + " + "
term = term[:-3] + ")"
calculation.append(term)
L_statistic = "({0} - {1}) / ({0} + {1})".format(calculation[0], calculation[1])
return L_statistic
##### Function for calculating statistic
def calculate_significance_custom_dof(chiSqValue, dofValue, verbose, alpha):
"""
Determines statistical significance based on a chi-squared goodness of fit test
Input:
left --- the total count for site patterns in the left term of the statistic
right --- the total count for site patterns in the right term of the statistic
verbose --- a boolean corresponding to a verbose output
alpha --- the significance level
Output:
significant --- a boolean corresponding to whether or not the result is statistically significant
"""
# Calculate the test statistic
# if left + right > 0:
# chisq = abs((left - right)**2 / float(left + right))
# else:
# chisq = 0
# Calculate the p-value based on a chi square distribution with df = 1
# pval = 1 - stats.chi2.cdf(chisq, 1)
chiSquaredDistVal = stats.chi2.cdf(chiSqValue, dofValue)
pval = 1 - chiSquaredDistVal
if pval < alpha:
signif = True
else:
signif = False
if verbose:
return signif, chiSqValue, pval
else:
return signif, pval
def calculate_significance(left, right, verbose, alpha):
"""
Determines statistical significance based on a chi-squared goodness of fit test
Input:
left --- the total count for site patterns in the left term of the statistic
right --- the total count for site patterns in the right term of the statistic
verbose --- a boolean corresponding to a verbose output
alpha --- the significance level
Output:
significant --- a boolean corresponding to whether or not the result is statistically significant
"""
# Calculate the test statistic
if left + right > 0:
chisq = abs((left - right)**2 / float(left + right))
else:
chisq = 0
# Calculate the p-value based on a chi square distribution with df = 1
pval = 1 - stats.chi2.cdf(chisq, 1)
if pval < alpha:
signif = True
else:
signif = False
if verbose:
return signif, chisq, pval
else:
return signif
def calculate_L(alignments, taxa_order, outgroup, patterns_of_interest, verbose, alpha, patterns_of_interest_resized,
overall_coefficient=1, patterns_to_coefficients={}):
"""
Calculates the L statistic for the given alignment
Input:
alignments --- a list of sequence alignment in phylip format
taxa_order --- the desired order of the taxa
patterns_of_interest --- a tuple containing the sets of patterns used for determining a statistic
verbose --- a booolean if more output information is desired
alpha --- the significance value
patterns_of_interest_resized --- the patterns of interest after block resizing
overall_coefficient --- the probability coefficient used to maintain the null hypothesis
patterns _to_coefficients --- a mapping of site patterns to coefficients needed to maintain the null
Output:
l_stat --- the L statistic value
significant --- a boolean denoting if the l_stat value is statistically significant
"""
# Separate the patterns of interest into their two terms
terms1 = patterns_of_interest[0]
terms2 = patterns_of_interest[1]
# Do the same for the resized terms
terms1_resized = patterns_of_interest_resized[0]
terms2_resized = patterns_of_interest_resized[1]
# Create a mapping for each generalized D type
alignments_to_d_resized = {}
alignments_to_d_pattern_coeff = {}
alignments_to_d_ovr_coeff = {}
for alignment in alignments:
# Initialize these things for all files
terms1_counts = defaultdict(int)
terms2_counts = defaultdict(int)
terms1_counts_resized = defaultdict(int)
terms2_counts_resized = defaultdict(int)
sequence_list = []
taxon_list = []
with open(alignment) as f:
# Create a list of each line in the file
lines = f.readlines()
# First line contains the number and length of the sequences
first_line = lines[0].split()
length_of_sequences = int(first_line[1])
for line in lines[1:]:
# Add each sequence to a list
sequence = line.split()[1]
sequence_list.append(sequence)
# Add each taxon to a list
taxon = line.split()[0]
taxon_list.append(taxon)
length_of_sequences = len(min(sequence_list, key=len))
num_ignored = 0
# Iterate over the site indices
for site_idx in range(length_of_sequences):
# Map each taxa to the base at a given site
taxa_to_site = {}
# Create a set of the bases at a given site to determine if the site is biallelic
bases = set([])
# Iterate over each sequence in the alignment
for sequence, taxon in zip(sequence_list, taxon_list):
# Map each taxon to the corresponding base at the site
base = sequence[site_idx]
taxa_to_site[taxon] = base
bases.add(base)
# Statistic can only be calculated where the nucleotides are known
if "-" not in bases and "N" not in bases and len(bases) == 2:
# Create the pattern that each site has
site_pattern = []
# The ancestral gene is always the same as the outgroup
ancestral = taxa_to_site[outgroup]
# Iterate over each taxon
for taxon in taxa_order:
nucleotide = taxa_to_site[taxon]
# Determine if the correct derived/ancestral status of each nucleotide
if nucleotide == ancestral:
site_pattern.append("A")
else:
site_pattern.append("B")
sites = pattern_string_generator([site_pattern])
if sites:
site_string = sites[0]
# If the site string is a pattern of interest add to its count for one of the terms
if site_string in terms1:
terms1_counts[site_string] += 1
if site_string in terms2:
terms2_counts[site_string] += 1
if site_string in terms1_resized:
terms1_counts_resized[site_string] += 1
if site_string in terms2_resized:
terms2_counts_resized[site_string] += 1
elif "-" in bases or "N" in bases:
num_ignored += 1
terms1_total = sum(terms1_counts.values())
terms2_total = sum(terms2_counts.values())
terms1_total_resized = sum(terms1_counts_resized.values())
terms2_total_resized = sum(terms2_counts_resized.values())
# Calculate the generalized d for the block resizing method
numerator_resized = terms1_total_resized - terms2_total_resized
denominator_resized = terms1_total_resized + terms2_total_resized
if denominator_resized != 0:
l_stat_resized = numerator_resized / float(denominator_resized)
else:
l_stat_resized = 0
# Calculate the generalized d for the total coefficient method
numerator_ovr_coeff = (overall_coefficient * terms1_total) - terms2_total
denominator_ovr_coeff = (overall_coefficient * terms1_total) + terms2_total
if denominator_ovr_coeff != 0:
l_stat_ovr_coeff = numerator_ovr_coeff / float(denominator_ovr_coeff)
else:
l_stat_ovr_coeff = 0
# Calculate the generalized d for the pattern coefficient method
weighted_terms1_total, weighted_counts = weight_counts(terms1_counts, patterns_to_coefficients)
numerator_pattern_coeff = weighted_terms1_total - terms2_total
denominator_pattern_coeff = weighted_terms1_total + terms2_total
if denominator_pattern_coeff != 0:
l_stat_pattern_coeff = numerator_pattern_coeff / float(denominator_pattern_coeff)
else:
l_stat_pattern_coeff = 0
# Verbose output
if verbose:
significant, chisq, pval = calculate_significance(terms1_total_resized, terms2_total_resized, verbose, alpha)
alignments_to_d_resized[
alignment] = l_stat_resized, significant, terms1_counts_resized, terms2_counts_resized, num_ignored, chisq, pval
significant, chisq, pval = calculate_significance(weighted_terms1_total, terms2_total, verbose, alpha)
alignments_to_d_pattern_coeff[
alignment] = l_stat_pattern_coeff, significant, weighted_counts, terms2_counts, num_ignored, chisq, pval
significant, chisq, pval = calculate_significance(overall_coefficient * terms1_total, terms2_total, verbose, alpha)
alignments_to_d_ovr_coeff[
alignment] = l_stat_ovr_coeff, significant, terms1_counts, terms2_counts, num_ignored, chisq, pval, overall_coefficient
# Standard output
else:
significant = calculate_significance(terms1_total_resized, terms2_total_resized, verbose,
alpha)
alignments_to_d_resized[
alignment] = l_stat_resized, significant
significant = calculate_significance(weighted_terms1_total, terms2_total, verbose, alpha)
alignments_to_d_pattern_coeff[
alignment] = l_stat_pattern_coeff, significant
significant = calculate_significance(overall_coefficient * terms1_total, terms2_total, verbose,alpha)
alignments_to_d_ovr_coeff[
alignment] = l_stat_ovr_coeff, significant
return alignments_to_d_resized, alignments_to_d_pattern_coeff, alignments_to_d_ovr_coeff
def weight_counts(term_counts, patterns_to_coefficients):
"""
Inputs:
term_counts --- a mapping of terms to their counts
patterns_to_coefficients --- a mapping of site patterns to coefficients needed to maintain the null
Output:
weighted_total --- the total counts for the site patterns weighted
"""
# Create a mapping of patterns to their weighted counts
weighted_counts = {}
# Iterate over each pattern
for pattern in term_counts:
# Weight its count based on the coefficient
if pattern in patterns_to_coefficients:
coefficient = patterns_to_coefficients[pattern]
else:
coefficient = 1
count = term_counts[pattern]
weighted_counts[pattern] = count * coefficient
weighted_total = sum(weighted_counts.values())
return weighted_total, weighted_counts
def calculate_windows_to_L(alignments, taxa_order, outgroup, patterns_of_interest, window_size, window_offset,
verbose= False, alpha=0.01):
"""
Calculates the L statistic for the given alignment
Input:
alignment --- a sequence alignment in phylip format
taxa_order --- the desired order of the taxa
patterns_of_interest --- a tuple containing the sets of patterns used for determining a statistic
window_size --- the desired window size
windw_offset --- the desired offset between windows
Output:
l_stat --- the L statistic value
windows_to_l --- a mapping of window indices to L statistic values
"""
# Separate the patterns of interest into their two terms
terms1 = patterns_of_interest[0]
terms2 = patterns_of_interest[1]
alignments_to_windows_to_d = {}
for alignment in alignments:
sequence_list = []
taxon_list = []
with open(alignment) as f:
# Create a list of each line in the file
lines = f.readlines()
# First line contains the number and length of the sequences
first_line = lines[0].split()
length_of_sequences = int(first_line[1])
for line in lines[1:]:
# Add each sequence to a list
sequence = line.split()[1]
sequence_list.append(sequence)
# Add each taxon to a list
taxon = line.split()[0]
taxon_list.append(taxon)
i = 0
num_windows = 0
if window_size > length_of_sequences:
num_windows = 1
window_size = length_of_sequences
else:
# Determine the total number of windows needed
while i + window_size - 1 < length_of_sequences:
i += window_offset
num_windows += 1
site_idx = 0
windows_to_l = {}
# Iterate over each window
for window in range(num_windows):
terms1_counts = defaultdict(int)
terms2_counts = defaultdict(int)
num_ignored = 0
# Iterate over the indices in each window
for window_idx in range(window_size):
# Map each taxa to the base at a given site
taxa_to_site = {}
# Create a set of the bases at a given site to determine if the site is biallelic
bases = set([])
# Iterate over each sequence in the alignment
for sequence, taxon in zip(sequence_list, taxon_list):
# Map each taxon to the corresponding base at the site
base = sequence[site_idx]
taxa_to_site[taxon] = base
bases.add(base)
# Statistic can only be calculated where the nucleotides are known
if "-" not in bases and len(bases) == 2:
# Create the pattern that each site has
site_pattern = []
# The ancestral gene is always the same as the outgroup
ancestral = taxa_to_site[outgroup]
# Iterate over each taxon
for taxon in taxa_order:
nucleotide = taxa_to_site[taxon]
# Determine if the correct derived/ancestral status of each nucleotide
if nucleotide == ancestral:
site_pattern.append("A")
else:
site_pattern.append("B")
# Convert the site pattern to a string
sites = pattern_string_generator([site_pattern])
if sites:
site_string = sites[0]
# If the site string is a pattern of interest add to its count for one of the terms
if site_string in terms1:
terms1_counts[site_string] += 1
elif site_string in terms2:
terms2_counts[site_string] += 1
elif "-" in bases or "N" in bases:
num_ignored += 1
# Increment the site index
site_idx += 1
terms1_total = sum(terms1_counts.values())
terms2_total = sum(terms2_counts.values())
numerator = terms1_total - terms2_total
denominator = terms1_total + terms2_total
if denominator != 0:
l_stat = numerator / float(denominator)
else:
l_stat = 0
# Verbose output
if verbose:
signif, chisq, pval = calculate_significance(terms1_total, terms2_total, verbose, alpha)
# The line below can be changed to add more information to the windows to L mapping
windows_to_l[window] = (l_stat, signif, num_ignored, chisq, pval)
# Standard output
else:
signif = calculate_significance(terms1_total, terms2_total, verbose, alpha)
windows_to_l[window] = (l_stat, signif)
# Account for overlapping windows
site_idx += (window_offset - window_size)
alignments_to_windows_to_d[alignment] = windows_to_l
return alignments_to_windows_to_d
##### Functions for total ordering
def branch_adjust(species_tree):
"""
Create all possible combinations of branch lengths for the given species tree
Input:
species_tree --- a newick string containing the overall species tree
Output:
adjusted_trees --- a set of trees with all combinations of branch lengths
"""
branch_lengths = [.5, 1.0, 2.0, 4.0]
adjusted_trees = set([])
taxa = []
pattern = "((?<=\()[\w]+)|((?<=\,)[\w]+)"
leaves = re.findall(pattern, species_tree)
for leaf in leaves:
if leaf[0] == '':
taxa.append(leaf[1])
else:
taxa.append(leaf[0])
for b in branch_lengths:
new_t = species_tree
for taxon in taxa:
new_t = new_t.replace(taxon, "{0}:{1}".format(taxon, b))
new_t = new_t.replace("),", "):{0},".format(b))
adjusted_trees.add(new_t)
return adjusted_trees, taxa
def approximately_equal(x, y, tol=0.00000000000001):
"""
Determines if floats x and y are equal within a degree of uncertainty
Inputs:
x --- a float
y --- a float
tol --- an error tolerance
"""
return abs(x - y) <= tol
def equality_sets(species_trees, network, taxa, outgroup, use_inv):
"""
Create mappings of site patterns to patterns with equivalent probabilities
Input:
species_tree --- a newick string containing the overall species tree without branch lengths
Output:
trees_to_equality --- a mapping of tree strings to a set of other trees with the same p(gt|st)
trees_to_equality --- a mapping of tree strings to a set of other trees with the same p(gt|N)
"""
st_to_pattern_probs = {}
st_to_pattern_probs_N = {}
trees_to_equality = {}
trees_to_equality_N = {}
gene_trees = generate_unique_trees(taxa, outgroup)
newick_patterns, inverses_to_counts = newicks_to_patterns_generator(taxa, gene_trees, outgroup)
# If inverses are not desired remove them
if not use_inv:
newick_patterns = remove_inverse(newick_patterns, inverses_to_counts)
for st in species_trees:
ts_to_pgS, ts_to_pgN = calculate_newicks_to_stats(st, network, gene_trees)
patterns_pgS, patterns_pgN = calculate_pattern_probabilities(newick_patterns, ts_to_pgS, ts_to_pgN)
st_to_pattern_probs[st] = sorted(patterns_pgS.items(), key=lambda tup: tup[1], reverse=True)
st_to_pattern_probs_N[st] = sorted(patterns_pgN.items(), key=lambda tup: tup[1], reverse=True)
# Generate equality sets based on p(gt|st)
for st in sorted(st_to_pattern_probs.keys()):
gt_probs = st_to_pattern_probs[st]
for i in range(len(gt_probs)):
gt1, prob1 = gt_probs[i]
equal_trees = set([])
for j in range(len(gt_probs)):
gt2, prob2 = gt_probs[j]
if approximately_equal(prob1, prob2):
equal_trees.add(gt2)
# Add the equality set to the mapping if tbe pattern is not already in the mapping and set is non empty
if len(equal_trees) != 0:
trees_to_equality[gt1] = equal_trees
# Generate equality sets based on p(gt|N)
for st in sorted(st_to_pattern_probs_N.keys()):
gt_probs = st_to_pattern_probs_N[st]
for i in range(len(gt_probs)):
gt1, prob1 = gt_probs[i]
equal_trees = set([])
for j in range(len(gt_probs)):
gt2, prob2 = gt_probs[j]
if approximately_equal(prob1, prob2):
equal_trees.add(gt2)
# Add the equality set to the mapping if tbe pattern is not already in the mapping and set is non empty
if len(equal_trees) != 0:
trees_to_equality_N[gt1] = equal_trees
return trees_to_equality, trees_to_equality_N, patterns_pgS, patterns_pgN
def set_of_interest(trees_to_equality, trees_to_equality_N):
"""
Inputs:
trees_to_equality --- a mapping of tree strings to a set of other trees with the same p(gt|st)
trees_to_equality_N --- a mapping of tree strings to a set of other trees with the same p(gt|N)
Output:
trees_of_interest --- a set of trees that changed equality under the species network
"""
trees_of_interest = set([])
for tree in trees_to_equality:
if tree not in trees_to_equality_N:
t_set = copy.deepcopy(trees_to_equality[tree])
t_set.add(tree)
trees_of_interest = trees_of_interest.union(t_set)
elif trees_to_equality[tree] != trees_to_equality_N[tree]:
t_set = copy.deepcopy(trees_to_equality[tree])
t_set.add(tree)
trees_of_interest = trees_of_interest.union(t_set)
return trees_of_interest
def concat_directory(directory_path):
"""
Concatenates all the alignments in a given directory and returns a single file.
Input:
directory_path --- a string path to the directory the use wants to use.
Output:
file_path --- a string path to the file that was created as a result of the concatenation.
"""
# filter out hidden files
filenames = filter(lambda n: not n.startswith(".") , natsorted(os.listdir(directory_path)))
# get the number of lines on each file
with open(os.path.join(directory_path, filenames[0]), "r") as f:
n = len(list(f))
# initialize a list with an empty string for each line
output_file_list = [""] * n
# Iterate over each folder in the given directory in numerical order
for i in range(len(filenames)):
# get full path of file
input_file = os.path.join(directory_path, filenames[i])
# if its a fasta file -> convert to phylip
if filenames[i].endswith(".fa") or filenames[i].endswith(".fasta"):
input_handle = open(input_file, "rU")
output_handle = open(input_file + ".phylip", "w")
alignments = AlignIO.parse(input_handle, "fasta")
AlignIO.write(alignments, output_handle, "phylip-sequential")
output_handle.close()
input_handle.close()
input_file = input_file + ".phylip"
# create a list of the input files lines
with open(input_file, 'r') as f:
input_file_list = [l.rstrip() for l in list(f)]
for j in range(len(input_file_list)):
# if this is the first file
if i == 0:
output_file_list[j] = input_file_list[j]
else:
if j == 0:
num_bp = int(input_file_list[0].split(" ")[2])
total_bp = int(output_file_list[j].split(" ")[2]) + num_bp
output_file_list[j] = " " + str(n - 1) + " " + str(total_bp)
else:
output_file_list[j] += input_file_list[j].split(" ")[-1]
# write the contents of the output file list to a text file
with open(os.path.abspath(directory_path) + "/concatFile.phylip.txt", "w") as o:
for line in output_file_list:
print >> o, line
return os.path.abspath(directory_path) + "/concatFile.phylip.txt"
def remove_inverse(newick_patterns, inverses_to_counts):
"""
Remove inverse site patterns
Input:
term --- a tuple of site patterns and their inverses
Output:
term --- the original tuple with site patterns removed
"""
# Create a ,mapping of each site pattern to its inverse
patterns_to_inverse = {}
d = set([])
for newick in newick_patterns:
d = d.union(set(newick_patterns[newick]))
#Map each pattern to its inverse
for newick in newick_patterns:
for pattern in newick_patterns[newick]:
# Represent the pattern as a list
pattern_lst = [x for x in pattern]
# Create the inverse pattern
inv_lst = pattern_inverter([pattern_lst])[0]
inverse = ''.join(inv_lst)
# If the pattern is not already in the mapping map it
if pattern not in patterns_to_inverse.keys() and pattern not in patterns_to_inverse.values():
patterns_to_inverse[pattern] = inverse
# Real inverses are the site patterns that appear as inverses more frequently
real_inverses = []
# Iterate over all possible patterns
for pat in patterns_to_inverse:
possible_inv = patterns_to_inverse[pat]
# If a pattern only has one B define it as the inverse
if b_count(pat) == 1:
real_inverses.append(pat)
elif b_count(possible_inv) == 1:
real_inverses.append(possible_inv)
# If a pattern appears as an inverse more often than its "inverse" then it is an inverse
elif inverses_to_counts[pat] > inverses_to_counts[possible_inv]:
real_inverses.append(pat)
# Otherwise the "inverse" is the true inverse
else:
real_inverses.append(possible_inv)
# Remove all real inverse
for newick in newick_patterns:
inverses_removed = list(newick_patterns[newick])
for p in newick_patterns[newick]:
if p in real_inverses:
inverses_removed.remove(p)
newick_patterns[newick] = inverses_removed
return newick_patterns
def b_count(pattern):
"""
Count the number of B's that occur in a site pattern
Input:
pattern --- a site pattern
Output:
num_b --- the number of B's in the site pattern
"""
num_b = 0
for char in pattern:
if char == "B":
num_b += 1
return num_b
def calculate_total_term_prob(patterns_pgS, term):
"""
Calculate the total probability for a term
"""
term_prob = 0
for pattern in term:
term_prob += patterns_pgS[pattern]
return term_prob
def calculate_generalized(alignments, species_tree=None, reticulations=None, outgroup=None, window_size=100000000000,
window_offset=100000000000, verbose=False, alpha=0.01, use_inv=False, useDir=False, directory="",
statistic=False, save=False, f="DGenStatistic_", plot=False, meta=False):
"""
Calculates the L statistic for the given alignment
Input:
alignment --- a sequence alignment in phylip format
species_tree --- the inputted species tree over the given taxa
reticulations --- a tuple containing two dictionaries mapping the start leaves to end leaves
outgroup --- the desired root of the species tree
window_size --- the desired window size
window_offset --- the desired offset between windows
verbose --- a boolean for determining if extra information will be printed
alpha --- the significance level
use_inv --- a boolean for using inverse site patterns or not
useDir --- a boolean for determining if the user wants to input an entire directory of alignments or only a single alignment
directory --- a string path to the directory the use wants to use. NOTE: only necessary if useDir=True.
statistic --- a text file containing a saved statistic
save --- a boolean corresponding to save a statistic or not, note that saving uses block resizing
f --- the desired save statistic file name
plot --- a boolean corresponding to using plot formatting for the output
meta --- a string of metadata added to the plot formatting output
Output:
l_stat --- the generalized d statistic value
"""
# If the user does not have a specific statistic file to use
if not statistic:
st = re.sub("\:\d+\.\d+", "", species_tree)
st = Tree(st)
st.set_outgroup(outgroup)
st.ladderize(direction=1)
st = st.write()
trees, taxa = branch_adjust(st)
network = generate_network_tree((0.1, 0.9), list(trees)[0], reticulations)
trees_to_equality, trees_to_equality_N, patterns_pgS, patterns_pgN = equality_sets(trees, network, taxa, outgroup, use_inv)
trees_of_interest = set_of_interest(trees_to_equality, trees_to_equality_N)
increase, decrease, increase_resized, decrease_resized, patterns_to_coeff = determine_patterns(
trees_of_interest, trees_to_equality, patterns_pgN, patterns_pgS, use_inv)
# Calculate the total probabilities for creating a coefficient
inc_prob = calculate_total_term_prob(patterns_pgS, increase)
dec_prob = calculate_total_term_prob(patterns_pgS, decrease)
if inc_prob != 0:
overall_coefficient = dec_prob / inc_prob
else:
overall_coefficient = 0
# If users want to save the statistic and speed up future runs
if save:
num = 0
file_name = f + ".txt"
while os.path.exists(file_name):
file_name = "DGenStatistic_{0}.txt".format(num)
num += 1
with open(file_name, "w") as text_file:
output_str = "Taxa: {0}\n".format(taxa)
text_file.write(output_str)
output_str = "Left Terms: {0}\n".format(increase_resized)
text_file.write(output_str)
output_str = "Right Terms: {0}\n".format(decrease_resized)
text_file.write(output_str)
output_str = "Statistic: {0}\n".format(generate_statistic_string((increase_resized, decrease_resized)))
text_file.write(output_str)
output_str = "Species Tree: {0}\n".format(species_tree)
text_file.write(output_str)
output_str = "Outgroup: {0}\n".format(outgroup)
text_file.write(output_str)
output_str = "Reticulations: {0}\n".format(reticulations)
text_file.write(output_str)
text_file.close()
# Users can specify a previously generated statistic to use for alignment counting
else:
with(open(statistic, "r")) as s:
lines = s.readlines()
taxa = eval(lines[0].split(None, 1)[1])
increase = eval(lines[1].split(None, 2)[2])
decrease = eval(lines[2].split(None, 2)[2])
outgroup = lines[5].split(None, 1)[1].replace("\n", "")
increase_resized = increase
decrease_resized = decrease
overall_coefficient = 1
patterns_to_coeff = {}
if useDir:
alignments = [concat_directory(directory)]
alignments_to_d_resized, alignments_to_d_pattern_coeff, alignments_to_d_ovr_coeff = calculate_L(
alignments, taxa, outgroup, (increase, decrease), verbose, alpha, (increase_resized, decrease_resized),
overall_coefficient, patterns_to_coeff)
alignments_to_windows_to_d = calculate_windows_to_L(alignments, taxa, outgroup, (increase_resized, decrease_resized), window_size,
window_offset, verbose, alpha)
s = ""
n = ""
# Create the output string
if verbose and not statistic:
s += "\n"
s += "Probability of gene tree patterns: " + str(patterns_pgS) + "\n"
s += "\n"
s += "Probability of species network patterns:" + str(patterns_pgN) + "\n"
s += "\n"
s += "Patterns that were formerly equal with increasing probability: " + str(increase) + "\n"
s += "Patterns that were formerly equal with decreasing probability: " + str(decrease) + "\n"
s += "Total p(gt|st) for increasing site patterns: " + str(inc_prob) + "\n"
s += "Total p(gt|st) for decreasing site patterns: " + str(dec_prob) + "\n"
s += "\n"
s += "Taxa order used for site patterns: " + str(taxa) + "\n"
s += "Statistic without coefficient weighting: " + str(generate_statistic_string((increase, decrease))) + "\n"
s += "\n"
s += "Increasing patterns after block resizing: " + str(increase_resized) + "\n"
s += "Decreasing patterns after block resizing: " + str(decrease_resized) + "\n"
s += "Total p(gt|st) for resized increasing site patterns: " + str(calculate_total_term_prob(patterns_pgS, increase_resized)) + "\n"
s += "Total p(gt|st) for resized decreasing site patterns: " + str(calculate_total_term_prob(patterns_pgS, decrease_resized)) + "\n"
s += "Statistic using block resizing: " + str(generate_statistic_string((increase_resized, decrease_resized))) + "\n"
s += "\n"
s += "\n"
s += "Information for each file: " + "\n"
s += display_alignment_info(alignments_to_d_resized, alignments_to_d_pattern_coeff, alignments_to_d_ovr_coeff)
print s
elif verbose and statistic:
s += "Taxa order used for site patterns: " + str(taxa) + "\n"
s += "\n"
s += "Patterns that were formerly equal with increasing probability: " + str(increase) + "\n"
s += "Patterns that were formerly equal with decreasing probability: " + str(decrease) + "\n"
s += "\n"
s += "Statistic: " + str(generate_statistic_string((increase, decrease))) + "\n"
s += "\n"
s += "Information for each file: " + "\n"
n += "Information for each file: " + "\n"
for alignment in alignments_to_d_resized:
l_stat, significant, left_counts, right_counts, num_ignored, chisq, pval = alignments_to_d_resized[alignment]
s += alignment + ": "
n += alignment + ": " + "\n"
s += "\n"
s += "Final Overall D value using Block Resizing Method: {0}".format(l_stat) + "\n"
s += "Significant deviation from 0: {0}".format(significant) + "\n"
s += "Overall Chi-Squared statistic: " + str(chisq) + "\n"
s += "Overall p value: " + str(pval) + "\n"
s += "Number of site ignored due to \"N\" or \"-\": {0}".format(num_ignored) + "\n"
s += "\n"
s += "Left term counts: " + "\n"
for pattern in left_counts:
s += pattern + ": {0}".format(left_counts[pattern]) + "\n"
s += "\n"
s += "Right term counts: " + "\n"
for pattern in right_counts:
s += pattern + ": {0}".format(right_counts[pattern]) + "\n"
s += "\n"
s += "Windows to D value: " + str(alignments_to_windows_to_d[alignment]) + "\n"
s += "\n"
s += "Final Overall D value {0}".format(l_stat) + "\n"
s += "Significant deviation from 0: {0}".format(significant) + "\n"
n += "Final Overall D value {0}".format(l_stat) + "\n"
n += "Significant deviation from 0: {0}".format(significant) + "\n"
print s
else:
for alignment in alignments_to_d_resized:
l_stat_r, significant_r = alignments_to_d_resized[alignment]
l_stat_pc, significant_pc = alignments_to_d_pattern_coeff[alignment]
l_stat_oc, significant_oc = alignments_to_d_ovr_coeff[alignment]
s += "\n"
s += alignment + ": " + "\n"
s += "\n"
s += "Windows to D value: " + str(alignments_to_windows_to_d[alignment]) + "\n"
s += "\n"
s += "Final Overall D value using Block Resizing Method: {0}".format(l_stat_r) + "\n"
s += "Significant deviation from 0: {0}".format(significant_r) + "\n"
s += "\n"
s += "Final Overall D value using Pattern Coefficient Method: {0}".format(l_stat_pc) + "\n"
s += "Significant deviation from 0: {0}".format(significant_pc) + "\n"
s += "\n"
s += "Final Overall D value using Overall Coefficient Method: {0}".format(l_stat_oc) + "\n"
s += "Significant deviation from 0: {0}".format(significant_oc) + "\n"
print s
if plot:
plot_formatting((alignments_to_d_resized, alignments_to_windows_to_d), plot, meta)
return alignments_to_d_resized, alignments_to_windows_to_d, n, s
def display_alignment_info(alignments_to_d_resized, alignments_to_d_pattern_coeff, alignments_to_d_ovr_coeff):
"""
Print information for an alignment to D mapping
Inputs:
alignments_to_d_resized --- a mapping of alignment files to their D information using block resizing
alignments_to_d_pattern_coeff --- a mapping of alignment files to their D information using pattern coefficient
alignments_to_d_ovr_coeff --- --- a mapping of alignment files to their D information using overall coefficient
Output:
s --- the output string
"""
s = ""
n = ""
for alignment in alignments_to_d_resized:
# Get the information for each alignment file
l_stat, significant, left_counts_res, right_counts_res, num_ignored, chisq, pval = alignments_to_d_resized[alignment]
output_resized = [("Final Overall D value using Block Resizing method: ", l_stat),
("Significant deviation from 0: ", significant),
("Overall p value: ", pval),
("Overall Chi-Squared statistic: ", chisq),
("", ""),
("Number of site ignored due to \"N\" or \"-\": ", num_ignored)]
l_stat, significant, left_counts_pcoeff, right_counts, num_ignored, chisq, pval = alignments_to_d_pattern_coeff[alignment]
output_pattern_coeff = [("Final Overall D value using Pattern Weighting method: ", l_stat),
("Significant deviation from 0: ", significant),
("Overall p value: ", pval),
("Overall Chi-Squared statistic: ", chisq),
("", ""),
("Number of site ignored due to \"N\" or \"-\": ", num_ignored)]
l_stat, significant, left_counts_ocoeff, right_counts, num_ignored, chisq, pval, coeff = alignments_to_d_ovr_coeff[alignment]
output_overall_coeff = [("Final Overall D value using Overall Weighting method: ", l_stat),
("Significant deviation from 0: ", significant),
("Overall p value: ", pval),
("Overall Chi-Squared statistic: ", chisq),
("", ""),
("Number of site ignored due to \"N\" or \"-\": ", num_ignored)]
# Create the output string
s += "\n"
s += "\n"
s += alignment + ": "
s += "\n"
n += "\n" + "\n" + alignment + ": " + "\n"
# Print output for resizing method
for output in output_resized:
s += str(output[0]) + str(output[1]) + "\n"
n += str(output[0]) + str(output[1]) + "\n"
s += "Left term counts: " + "\n"
for pattern in left_counts_res:
s += pattern + ": {0}".format(left_counts_res[pattern]) + "\n"
s += "\n"
s += "Right term counts: " + "\n"
for pattern in right_counts_res:
s += pattern + ": {0}".format(right_counts_res[pattern]) + "\n"
s += "\n"
s += "\n"
# Print output for pattern coefficient method
for output in output_pattern_coeff:
s += str(output[0]) + str(output[1]) + "\n"
s += "Left term counts weighted by pattern probability: " + "\n"
for pattern in left_counts_pcoeff:
s += pattern + ": {0}".format(left_counts_pcoeff[pattern]) + "\n"
s += "\n"
s += "Right term counts: " + "\n"
for pattern in right_counts:
s += pattern + ": {0}".format(right_counts[pattern]) + "\n"
s += "\n"
s += "\n"
# Print output for overall coefficient method
for output in output_overall_coeff:
s += str(output[0]) + str(output[1]) + "\n"
s += "Overall Coefficient for weighting: {0}".format(coeff) + "\n"
s += "Left term counts after weighting: " + "\n"
for pattern in left_counts_ocoeff:
s += pattern + ": {0}".format(left_counts_ocoeff[pattern] * coeff) + "\n"
s += "\n"
s += "Right term counts: " + "\n"
for pattern in right_counts:
s += pattern + ": {0}".format(right_counts[pattern]) + "\n"
return s
def plot_formatting(info_tuple, name, meta):
"""
Reformats and writes the dictionary output to a text file to make plotting it in Excel easy
Input:
info_tuple --- a tuple from the calculate_generalized output
"""
alignments_to_d, alignments_to_windows_to_d = info_tuple
num = 0
file_name = "{0}_{1}.txt".format(name, num)
while os.path.exists(file_name):
num += 1
file_name = "{0}_{1}.txt".format(name, num)
with open(file_name, "w") as text_file:
for alignment in alignments_to_d:
l_stat, significant = alignments_to_d[alignment][0], alignments_to_d[alignment][1]
significant = str(significant).upper()
windows_to_l = alignments_to_windows_to_d[alignment]
output_str = "{0}, {1}, {2} \n".format(l_stat, meta, significant)
text_file.write(output_str)
text_file.close()
if __name__ == '__main__':
r = [('P3', 'P2')]
species_tree = '(((P1,P2),P3),O);'
# species_tree = '((P1,P2),(P3,O));'
# species_tree = '(((P1,P2),(P3,P4)),O);' # DFOIL tree
# species_tree = '((((P1,P2),P3),P4),O);' # Smallest asymmetrical tree
# species_tree = '(((P1,P2),(P3,(P4,P5))),O);'
# n = '((P2,(P1,P3)),O);'
# n = '(((P1,P3),P2),O);'
# n = '((P1,(P2,(P3,P4))),O);'
# t = ["P1", "P2", "P3", "P4", "O"]
# o = "O"
# print site_pattern_generator(t, n, o, False)
if platform == "darwin":
alignments = ["/Users/Peter/PycharmProjects/ALPHA/exampleFiles/seqfile.txt"]
else:
alignments = ["C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim2\\seqfile.txt"]
# alignments = ["C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim5\\seqfile",
# "C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim7\\seqfile",
# "C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim4\\seqfile",
# "C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim6\\seqfile",
# "C:\\Users\\travi\\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim8\\seqfile"]
print calculate_generalized(alignments, species_tree, r, "O", 500000, 500000,
alpha=0.01, verbose=False, use_inv=False)
# alignments = ["C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames"]
#
#
# species_tree, r = '((((P1,P4),P3),P2),O);', [('P3', 'P2'),('P1', 'P2')]
#
# # 3 to 2
# calculate_generalized( ['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P3', 'P2')], 50000, 50000, True, save=True, f='stat_6tax_sub_3to2.txt')
# print "done"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# calculate_generalized( ['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P3', 'P2')], 50000, 50000, True, save=True, f='stat_inv_6tax_sub_3to2.txt', use_inv=True)
# print "done with inverses"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# # 4 to 3
# calculate_generalized( ['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P4', 'P3')], 50000, 50000, True, save=True, f='stat_6tax_sub_4to3.txt')
# print "done"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# calculate_generalized( ['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P4', 'P3')], 50000, 50000, True, save=True, f='stat_inv_6tax_sub_4to3.txt', use_inv=True)
# print "done with inverses"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# # both
# calculate_generalized(['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P3', 'P2'),('P4', 'P3')], 50000, 50000, True, save=True, f='stat_6tax_sub_3to2_4to3.txt')
# print "done"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# calculate_generalized(['C:\\Users\\travi\\Desktop\\390 Errors\\seqfileNames'], '(((P5,P6),((P1,P2),P3)),P4);', [('P3', 'P2'),('P4', 'P3')], 50000, 50000, True, save=True, f='stat_inv_6tax_sub_3to2_4to3.txt', use_inv=True)
# print "done with inverses"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
# species_tree, r = "(((P5,P6),((P1,P2),P3)),P4);", [('P3', 'P2')]
# alignments = ["C:\\Users\\travi\\Desktop\\MosquitoConcat.phylip.txt"]
# species_tree, r = '((C,G),(((A,Q),L),R));', [('Q', 'G')]
# print calculate_generalized(alignments, 500000, 500000, statistic="C:\\Users\\travi\\Desktop\\stat_mosqSubset.txt", alpha=0.01, verbose=False, use_inv=False)
# alignments = ["C:\\Users\\travi\\Desktop\\MosquitoConcat.phylip.txt"]
# alignments = ["C:\\Users\\travi\\Desktop\\3L\\3L\\3L.41960870.634.fa.phylip"]
#
# calculate_generalized(alignments , '((C,G),(((A,Q),L),R));', [('Q', 'G')], 50000, 50000, True, save=True, f='stat_QuaToGam.txt')
# print "Q to G done"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# calculate_generalized(alignments, '((C,G),(((A,Q),L),R));', [('Q', 'G')], 50000, 50000, True, save=True, f='stat_inv_QuaToGam.txt', use_inv=True)
# print "Q to G done with inverses"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# # next generate Q to R, the bottom right one in dingqiaos fig
# calculate_generalized(alignments , '((C,G),(((A,Q),L),R));', [('Q', 'R')], 50000, 50000, True, save=True, f='stat_QuaToMer.txt')
# print "Q to R done"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# calculate_generalized(alignments , '((C,G),(((A,Q),L),R));', [('Q', 'R')], 50000, 50000, True, save=True, f='stat_inv_QuaToMer.txt', use_inv=True)
# print "Q to R done with inverses"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# # last generate L to R, the top right one in dingqiaos fig
# calculate_generalized(alignments, '((C,G),(((A,Q),L),R));', [('L', 'R')], 50000, 50000, True, save=True, f='stat_MelToMer.txt')
# print "L to R done"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
#
# calculate_generalized(alignments , '((C,G),(((A,Q),L),R));', [('L', 'R')], 50000, 50000, True, save=True, f='stat_inv_MelToMer.txt', use_inv=True)
# print "L to R done with inverses"
# playsound.playsound("C:\\Users\\travi\\Downloads\\app-5.mp3")
# print calculate_generalized(alignments, species_tree, r, 50000, 50000, alpha=0.01, statistic=False, save=False,
# verbose=True, use_inv=False)
# s = "C:\\Users\\travi\\Documents\\ALPHA\\CommandLineFiles\\DGenStatistic_85.txt"
# print calculate_generalized(alignments, species_tree, r, 50000, 50000, alpha=0.01, statistic=s,
# verbose=True, use_inv=False)
# print calculate_generalized(alignments, species_tree, r, 50000, 50000, alpha=0.01, statistic=False, save=False,
# verbose=True, use_inv=False)
# print calculate_generalized(alignments, species_tree, r, 50000, 50000, alpha=0.01, statistic=False, save=False,
# verbose=True, use_inv=False)
# calculate_generalized(alignments, species_tree, r, 500000, 500000, True, 0.01, statistic=False, save=True, f="C:\\Users\\travi\\Documents\\ALPHA\\ABBABABATest2")
# print calculate_generalized(alignments, species_tree, r, 50000, 50000, alpha=0.01, statistic="C:\\Users\\travi\\Documents\\ALPHA\\ABBABABATest2.txt", verbose=True)
#
# save_file = "C:\\Users\\travi\\Documents\\ALPHA\\CommandLineFiles\\DGenStatistic_11.txt"
# plot_formatting(calculate_generalized(alignments, statistic=save_file, verbose=True))
# python -c"from CalculateGeneralizedDStatistic import *; plot_formatting(calculate_generalized('C:\\Users\\travi\\Desktop\\seqfileNamed', '(((P1,P2),(P3,P4)),O);', [('P1', 'P3')], 100000, 100000, True, 0.01), True)"
# Uncomment this to speed up 6 taxa debugging
# trees_of_interest = set(['BBABBA', 'ABBBAA', 'BABBBA', 'ABBABA', 'ABBBBA', 'AAABAA', 'ABAABA', 'BBBABA', 'BABABA', 'ABBAAA',
# 'BAAABA', 'ABABAA', 'BABBAA', 'BAAAAA', 'BBBBAA', 'ABABBA', 'BAABBA', 'AABAAA', 'BAABAA', 'BABAAA',
# 'ABAAAA', 'AAAABA'])
# trees_to_equality = {'BBABBA': set(['BBABBA', 'AAABAA', 'BBBBAA', 'BBBABA', 'AABAAA', 'AAAABA']),
# 'ABBBAA': set(['BABAAA', 'ABBBAA', 'ABBABA', 'ABABBA', 'BAABAA', 'BAAABA']),
# 'BABBBA': set(['BABBBA', 'ABAAAA', 'ABBBBA', 'BAAAAA']),
# 'AABBAA': set(['AABABA', 'BBABAA', 'AABBAA', 'BBAABA']),
# 'AAABAA': set(['BBABBA', 'AAABAA', 'BBBBAA', 'BBBABA', 'AABAAA', 'AAAABA']),
# 'BBBABA': set(['BBABBA', 'AAABAA', 'BBBBAA', 'BBBABA', 'AABAAA', 'AAAABA']),
# 'ABBAAA': set(['ABABAA', 'ABAABA', 'BABABA', 'BAABBA', 'ABBAAA', 'BABBAA']),
# 'BBAABA': set(['AABABA', 'BBABAA', 'AABBAA', 'BBAABA']),
# 'BABAAA': set(['BABAAA', 'ABBBAA', 'ABBABA', 'ABABBA', 'BAABAA', 'BAAABA']),
# 'BAAAAA': set(['BABBBA', 'ABAAAA', 'ABBBBA', 'BAAAAA']),
# 'AABABA': set(['AABABA', 'BBABAA', 'AABBAA', 'BBAABA']),
# 'BBBBAA': set(['BBABBA', 'AAABAA', 'BBBBAA', 'BBBABA', 'AABAAA', 'AAAABA']),
# 'ABABBA': set(['BABAAA', 'ABBBAA', 'ABBABA', 'ABABBA', 'BAABAA', 'BAAABA']),
# 'BAABAA': set(['BABAAA', 'ABBBAA', 'ABBABA', 'ABABBA', 'BAABAA', 'BAAABA']),
# 'BABBAA': set(['ABABAA', 'ABAABA', 'BABABA', 'BAABBA', 'ABBAAA', 'BABBAA']),
# 'AAAABA': set(['BBABBA', 'AAABAA', 'BBBBAA', 'BBBABA', 'AABAAA', 'AAAABA']),
# 'AABBBA': set(['BBAAAA', 'AABBBA']),
# 'ABAABA': set(['ABABAA', 'ABAABA', 'BABABA', 'BAABBA', 'ABBAAA', 'BABBAA']),
# 'ABBBBA': set(['BABBBA', 'ABAAAA', 'ABBBBA', 'BAAAAA']), 'BBBAAA': set(['AAABBA', 'BBBAAA']),
# 'ABBABA': set(['BABAAA', 'ABBBAA', 'ABBABA', 'ABABBA', 'BAABAA', 'BAAABA']),
# 'BBABAA': set(['AABABA', 'BBABAA', 'AABBAA', 'BBAABA']), 'AAABBA': set(['AAABBA', 'BBBAAA']),
# 'BAAABA': set(['BABAAA', 'ABBBAA', 'ABBABA', 'ABABBA', 'BAABAA', 'BAAABA']),
# 'BBAAAA': set(['BBAAAA', 'AABBBA']),
# 'ABABAA': set(['ABABAA', 'ABAABA', 'BABABA', 'BAABBA', 'ABBAAA', 'BABBAA']),
# 'BABABA': set(['ABABAA', 'ABAABA', 'BABABA', 'BAABBA', 'ABBAAA', 'BABBAA']),
# 'BAABBA': set(['ABABAA', 'ABAABA', 'BABABA', 'BAABBA', 'ABBAAA', 'BABBAA']),
# 'AABAAA': set(['BBABBA', 'AAABAA', 'BBBBAA', 'BBBABA', 'AABAAA', 'AAAABA']),
# 'ABAAAA': set(['BABBBA', 'ABAAAA', 'ABBBBA', 'BAAAAA'])}
# patterns_pgN = {'BBABBA': 0.032771235848126294, 'ABBBAA': 0.02098066450471356, 'BABBBA': 0.161652195191427,
# 'AABBAA': 0.03153707911255491, 'AAABAA': 0.1777623151093396, 'BBBABA': 0.1777623151093396,
# 'ABBAAA': 0.014809880826856624, 'BBAABA': 0.03153707911255491, 'BABAAA': 0.63719275136487,
# 'BAAAAA': 0.016661115930213705, 'AABABA': 0.03153707911255492, 'BBBBAA': 0.1777623151093396,
# 'ABABBA': 0.63719275136487, 'BAABAA': 0.02098066450471356, 'BABBAA': 0.15980096008806993,
# 'AAAABA': 0.1777623151093396, 'AABBBA': 0.08944867415584207, 'ABAABA': 0.15980096008806993,
# 'ABBBBA': 0.016661115930213705, 'BBBAAA': 0.2180376149041211, 'ABBABA': 0.02098066450471356,
# 'BBABAA': 0.03153707911255492, 'AAABBA': 0.2180376149041211, 'BAAABA': 0.02098066450471356,
# 'BBAAAA': 0.08944867415584207, 'ABABAA': 0.15980096008806996, 'BABABA': 0.15980096008806996,
# 'BAABBA': 0.014809880826856624, 'AABAAA': 0.032771235848126294, 'ABAAAA': 0.161652195191427}
# patterns_pgS = {'BBABBA': 0.11019080921752063, 'ABBBAA': 0.037037004438901525, 'BABBBA': 0.029411738819127668,
# 'AABBAA': 0.10801216189758524, 'AAABAA': 0.11019080921752065, 'BBBABA': 0.11019080921752065,
# 'ABBAAA': 0.026143767839224594, 'BBAABA': 0.10801216189758524, 'BABAAA': 0.03703700443890152,
# 'BAAAAA': 0.029411738819127668, 'AABABA': 0.10801216189758527, 'BBBBAA': 0.11019080921752064,
# 'ABABBA': 0.03703700443890152, 'BAABAA': 0.03703700443890151, 'BABBAA': 0.026143767839224594,
# 'AAAABA': 0.11019080921752064, 'AABBBA': 0.38034805363207147, 'ABAABA': 0.026143767839224594,
# 'ABBBBA': 0.029411738819127668, 'BBBAAA': 0.1303855768171189, 'ABBABA': 0.03703700443890151,
# 'BBABAA': 0.10801216189758527, 'AAABBA': 0.1303855768171189, 'BAAABA': 0.037037004438901525,
# 'BBAAAA': 0.38034805363207147, 'ABABAA': 0.026143767839224594, 'BABABA': 0.026143767839224594,
# 'BAABBA': 0.026143767839224594, 'AABAAA': 0.11019080921752063, 'ABAAAA': 0.029411738819127668}
# Debug for CL introgression file
# trees_of_interest = set(['ABBBAA', 'ABAABA', 'AABBAA', 'BBBAAA', 'ABBABA', 'BBABAA', 'BABABA', 'AAABBA', 'BBAABA', 'BAAABA', 'ABABAA',
# 'AABABA', 'ABABBA', 'BAABBA', 'ABBAAA', 'BAABAA', 'BABAAA', 'BABBAA'])
# trees_to_equality = {'BBABBA': set(['BBABBA']), 'ABBBAA': set(['ABBBAA', 'ABBABA', 'ABABBA', 'BAABBA', 'BABABA', 'BABBAA']),
# 'BABBBA': set(['BABBBA']), 'AABBAA': set(['AABABA', 'AAABBA', 'AABBAA']), 'BBBABA': set(['BBBABA']),
# 'ABBAAA': set(['ABABAA', 'ABBAAA', 'ABAABA']), 'BBAABA': set(['BBBAAA', 'BBABAA', 'BBAABA']),
# 'BABAAA': set(['BABAAA', 'BAAABA', 'BAABAA']), 'AABABA': set(['AABABA', 'AAABBA', 'AABBAA']),
# 'BBBBAA': set(['BBBBAA']), 'ABABBA': set(['ABBBAA', 'ABBABA', 'ABABBA', 'BAABBA', 'BABABA', 'BABBAA']),
# 'BAABAA': set(['BABAAA', 'BAAABA', 'BAABAA']),
# 'BABBAA': set(['ABBBAA', 'ABBABA', 'ABABBA', 'BAABBA', 'BABABA', 'BABBAA']), 'AABBBA': set(['AABBBA']),
# 'ABAABA': set(['ABABAA', 'ABBAAA', 'ABAABA']), 'ABBBBA': set(['ABBBBA']),
# 'BBBAAA': set(['BBBAAA', 'BBABAA', 'BBAABA']),
# 'ABBABA': set(['ABBBAA', 'ABBABA', 'ABABBA', 'BAABBA', 'BABABA', 'BABBAA']),
# 'BBABAA': set(['BBBAAA', 'BBABAA', 'BBAABA']), 'AAABBA': set(['AABABA', 'AAABBA', 'AABBAA']),
# 'BAAABA': set(['BABAAA', 'BAAABA', 'BAABAA']), 'BBAAAA': set(['BBAAAA']),
# 'ABABAA': set(['ABABAA', 'ABBAAA', 'ABAABA']),
# 'BABABA': set(['ABBBAA', 'ABBABA', 'ABABBA', 'BAABBA', 'BABABA', 'BABBAA']),
# 'BAABBA': set(['ABBBAA', 'ABBABA', 'ABABBA', 'BAABBA', 'BABABA', 'BABBAA'])}
# patterns_pgN = {'BBABBA': 0.25178403007053934, 'ABBBAA': 0.00925617551678539, 'BABBBA': 0.14956960525299257,
# 'AABBAA': 0.011432470392906461, 'BBBABA': 0.1888697257294821, 'ABBAAA': 0.006170783677856928,
# 'BBAABA': 0.025366295434697986, 'BABAAA': 0.22118908909853413, 'AABABA': 0.011432470392906461,
# 'BBBBAA': 0.2346987308343348, 'ABABBA': 0.11799948496269537, 'BAABAA': 0.0037024702067141564,
# 'BABBAA': 0.1542472547779987, 'AABBBA': 0.02133876545521984, 'ABAABA': 0.042418553493160246,
# 'ABBBBA': 0.011107410620142468, 'BBBAAA': 0.1703573746959113, 'ABBABA': 0.00925617551678539,
# 'BBABAA': 0.025366295434697986, 'AAABBA': 0.04768024020820978, 'BAAABA': 0.0037024702067141564,
# 'BBAAAA': 0.027867650083583044, 'ABABAA': 0.04241855349316025, 'BABABA': 0.15424725477799872,
# 'BAABBA': 0.00925617551678539}
# patterns_pgS = {'BBABBA': 0.09979995455763162, 'ABBBAA': 0.016339854899515373, 'BABBBA': 0.15058034441671714,
# 'AABBAA': 0.03326665151921054, 'BBBABA': 0.12979863509693912, 'ABBAAA': 0.010893236599676915,
# 'BBAABA': 0.09711892529790832, 'BABAAA': 0.006535941959806149, 'AABABA': 0.03326665151921054,
# 'BBBBAA': 0.15979731563624658, 'ABABBA': 0.016339854899515373, 'BAABAA': 0.006535941959806149,
# 'BABBAA': 0.016339854899515373, 'AABBBA': 0.0769241576983101, 'ABAABA': 0.010893236599676915,
# 'ABBBBA': 0.019607825879418447, 'BBBAAA': 0.09711892529790833, 'ABBABA': 0.016339854899515373,
# 'BBABAA': 0.09711892529790832, 'AAABBA': 0.03326665151921054, 'BAAABA': 0.006535941959806149,
# 'BBAAAA': 0.12770454755739558, 'ABABAA': 0.010893236599676915, 'BABABA': 0.016339854899515373,
# 'BAABBA': 0.016339854899515373}
| chilleo/ALPHA | CommandLineFiles/RunDGEN.py | Python | mit | 100,444 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Configuration for Invenio-Formatter."""
from __future__ import absolute_import, print_function
FORMATTER_BADGES_ALLOWED_TITLES = ['DOI']
"""List of allowed titles in badges."""
FORMATTER_BADGES_TITLE_MAPPING = {}
"""Mapping of titles."""
| tiborsimko/invenio-formatter | invenio_formatter/config.py | Python | mit | 479 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
test_data = [
{
"Transaction" : {
"transaction_date" : "2015-01-08",
"amount" : -1286.75,
"security_amount" : 4.0726,
"security_rate" : 413.68
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #1"
},
"TransactionType" : {
"name" : "Sälj"
},
"TransactionData" : {
"ISIN" : "SE0000000001",
"courtage" : 10.50
}
},
{
"Transaction" : {
"transaction_date" : "2015-01-07",
"amount" : -1329.5,
"security_amount" : 15.1663,
"security_rate" : 222.17
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #2"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE0000000002",
"courtage" : 20
}
},
{
"Transaction" : {
"transaction_date" : "2015-01-07",
"amount" : -682.61,
"security_amount" : 0.8534,
"security_rate" : 1974
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #3"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE0000000003",
"courtage" : 30.50
}
},
{
"Transaction" : {
"transaction_date" : "2015-01-05",
"amount" : 2728.8,
"security_amount" : None,
"security_rate" : None
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Insättning Januari"
},
"TransactionType" : {
"name" : "Insättning"
},
"TransactionData" : {
"ISIN" : "SE0000000004",
"courtage" : 40
}
},
{
"Transaction" : {
"transaction_date" : "2014-12-08",
"amount" : -1144.98,
"security_amount" : 5.1423,
"security_rate" : 222.66
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #2"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE0000000005",
"courtage" : 50.50
}
},
{
"Transaction" : {
"transaction_date" : "2014-11-26",
"amount" : 2145.42,
"security_amount" : None,
"security_rate" : None
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Insättning November"
},
"TransactionType" : {
"name" : "Insättning"
},
"TransactionData" : {
"ISIN" : "SE0000000006",
"courtage" : 60
}
},
{
"Transaction" : {
"transaction_date" : "2014-10-29",
"amount" : -863.81,
"security_amount" : 16.2254,
"security_rate" : 114.87
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #3"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE0000000007",
"courtage" : 70.50
}
},
{
"Transaction" : {
"transaction_date" : "2014-10-28",
"amount" : -862.99,
"security_amount" : 8.7321,
"security_rate" : 213.35
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #2"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE0000000008",
"courtage" : 80
}
},
{
"Transaction" : {
"transaction_date" : "2014-10-27",
"amount" : 2826.80,
"security_amount" : None,
"security_rate" : None
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Insättning Oktober"
},
"TransactionType" : {
"name" : "Insättning"
},
"TransactionData" : {
"ISIN" : "SE0000000009",
"courtage" : 90.50
}
},
{
"Transaction" : {
"transaction_date" : "2014-10-02",
"amount" : -10218.04,
"security_amount" : 149.8263,
"security_rate" : 114.92
},
"Account" : {
"name" : "Spar Aktie"
},
"Currency" : {
"code" : "SEK"
},
"SecurityProvider" : {
"name" : "Aktiefond #1"
},
"TransactionType" : {
"name" : "Köp"
},
"TransactionData" : {
"ISIN" : "SE00000000010",
"courtage" : 100
}
},
] | nilsFK/py-privatekonomi | py_privatekonomi/tests/dataset/avanza/sample1.py | Python | mit | 6,015 |
import logging
from marshmallow import ValidationError, post_load
from marshmallow_jsonapi import Schema, fields
from timeswitch.auth.dao import User
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
LOGGER = logging.getLogger(__name__)
class AppError(Exception):
pass
def dasherize(text):
return text.replace('_', '-')
class UserSchema(Schema):
id = fields.String(dump_only=True, required=True)
name = fields.String(required=True)
password = fields.String(load_only=True, required=False, attribute="password_clear")
new_password = fields.String(load_only=True, required=False)
email = fields.Email(required=False)
last_loggin = fields.String(required=False)
privilege = fields.String(required=False)
@post_load
def make_user(self, data):
return User(**data)
def handle_error(self, exc, data):
raise ValidationError('An error occurred with input: {0} \n {1}'.format(data, str(exc)))
class Meta:
type_ = 'users'
# inflect = dasherize
| weichweich/Pi-Timeswitch | Flask-Server/timeswitch/auth/schema.py | Python | mit | 1,110 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from random import shuffle
class Carta():
def __init__(self, numero, naipe):
self.numero = numero
self.naipe = naipe
def __repr__(self):
return '%s de %s' % (self.numero, self.naipe)
class Baralho():
def __init__(self):
self._cartas = [Carta(numero, naipe) for numero in 'As 1 2 3 4 5 6 7 8 9 10 Q J K'.split()
for naipe in 'Ouros Espadas Copas Paus'.split()]
def __getitem__(self, index):
return self._cartas[index]
def __setitem__(self, key, value):
self._cartas[key] = value
def __len__(self):
return len(self._cartas)
print Carta('As', 'Paus')
baralho = Baralho()
baralho[55] = Carta('As', 'Paus')
shuffle(baralho)
for carta in baralho:
print carta
print baralho[0]
class Vetor():
def __init__(self, x, y):
self.y = y
self.x = x
def __repr__(self):
return '(%s, %s)' % (self.x, self.y)
def __add__(self, other):
return Vetor(self.x + other.x, self.y + other.y)
def __eq__(self, other):
return self.x==other.x and self.y==other.y
vetor1 = Vetor(1, 1)
vetor2 = Vetor(1, 1)
print vetor1 + vetor2
print vetor1 == vetor2 | renzon/fatec-script | backend/appengine/pythonicos.py | Python | mit | 1,283 |
__all__ = ("settings", "urls", "wsgi")
__version__ = "0.159.0"
| lopopolo/hyperbola | hyperbola/__init__.py | Python | mit | 63 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from os import path
current_dir = path.dirname(__file__)
sys.path.insert(0, path.join(path.dirname(current_dir), 'wdom'))
| miyakogi/livemark | livemark/__init__.py | Python | mit | 182 |
import sys
import time
from pprint import pprint
import telepot
from telepot.namedtuple import StickerSet
TOKEN = sys.argv[1]
USER_ID = long(sys.argv[2])
STICKER_SET = sys.argv[3]
bot = telepot.Bot(TOKEN)
f = bot.uploadStickerFile(USER_ID, open('gandhi.png', 'rb'))
print 'Uploaded Gandhi'
bot.addStickerToSet(USER_ID, STICKER_SET, f['file_id'], u'\U0001f60a')
bot.addStickerToSet(USER_ID, STICKER_SET, open('lincoln.png', 'rb'), u'\U0001f60a')
print 'Added Gandhi and Lincoln to set'
s = bot.getStickerSet(STICKER_SET)
pprint(s)
ss = StickerSet(**s)
for s in ss.stickers:
bot.deleteStickerFromSet(s.file_id)
print 'Deleted', s.file_id
time.sleep(3) # throttle
s = bot.getStickerSet(STICKER_SET)
pprint(s)
| nickoala/telepot | test/test27_sticker.py | Python | mit | 727 |
from __future__ import unicode_literals
import unittest
from ship.datastructures import rowdatacollection as rdc
from ship.datastructures import dataobject as do
from ship.fmp.datunits import ROW_DATA_TYPES as rdt
class RowDataCollectionTests(unittest.TestCase):
def setUp(self):
# Create some object to use and add a couple of rows
# create chainage in position 1
self.obj1 = do.FloatData(rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3)
self.obj1.data_collection.append(0.00)
self.obj1.data_collection.append(3.65)
# Create elevation in position 2
self.obj2 = do.FloatData(rdt.ELEVATION, format_str='{:>10}', default=None, no_of_dps=3)
self.obj2.data_collection.append(32.345)
self.obj2.data_collection.append(33.45)
# Create roughness in position 3
self.obj3 = do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=None, no_of_dps=3)
self.obj3.data_collection.append(0.035)
self.obj3.data_collection.append(0.035)
self.testcol = rdc.RowDataCollection()
self.testcol._collection.append(self.obj1)
self.testcol._collection.append(self.obj2)
self.testcol._collection.append(self.obj3)
def test_initCollection(self):
'''
'''
# Create a dummy collection
obj1 = do.FloatData(rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3)
obj2 = do.FloatData(rdt.ELEVATION, format_str='{:>10}', default=None, no_of_dps=3)
obj3 = do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3)
localcol = rdc.RowDataCollection()
localcol._collection.append(obj1)
localcol._collection.append(obj2)
localcol._collection.append(obj3)
# Initiliase a real collection
col = rdc.RowDataCollection()
col.addToCollection(do.FloatData(rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ELEVATION, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3))
# Check that they're the same
col_eq, msg = self.checkCollectionEqual(localcol, col)
self.assertTrue(col_eq, 'rdc.RowDataCollection initialisation fail - ' + msg)
def test_bulkInitCollection(self):
objs = [
do.FloatData(rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3),
do.FloatData(rdt.ELEVATION, format_str='{:>10}', default=None, no_of_dps=3),
do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3),
]
col = rdc.RowDataCollection.bulkInitCollection(objs)
localcol = rdc.RowDataCollection()
localcol._collection.append(objs[0])
localcol._collection.append(objs[1])
localcol._collection.append(objs[2])
# Check they're the same
col_eq, msg = self.checkCollectionEqual(localcol, col)
self.assertTrue(col_eq, 'rdc.RowDataCollection initialisation fail - ' + msg)
def checkCollectionEqual(self, c1, c2):
'''Check the two given collections to make sure that they contain the same data.
@param c1: First rdc.RowDataCollection object
@param c2: Second rdc.RowDataCollection object
@return: True if they're equal False and reason if not.
'''
if not len(c1._collection) == len(c2._collection):
return False, 'Collections are different lengths'
for i in range(0, len(c1._collection)):
if not c1._collection[i].data_type == c2._collection[i].data_type:
return False, 'Collections have different data_types'
if not c1._collection[i].format_str == c2._collection[i].format_str:
return False, 'Collections have different format_str'
if not c1._collection[i].default == c2._collection[i].default:
return False, 'Collections have different default'
for j in range(0, len(c1._collection[i].data_collection)):
if not c1._collection[i].data_collection[j] == c1._collection[i].data_collection[j]:
return False, 'Collections have different data'
return True, ''
def test_indexOfDataObject(self):
"""Should return the corrent index of a particular ADataObject in colleciton."""
index1 = self.testcol.indexOfDataObject(rdt.CHAINAGE)
index2 = self.testcol.indexOfDataObject(rdt.ELEVATION)
index3 = self.testcol.indexOfDataObject(rdt.ROUGHNESS)
self.assertEquals(index1, 0)
self.assertEquals(index2, 1)
self.assertEquals(index3, 2)
def test_iterateRows(self):
"""Test generator for complete row as a list"""
testrows = [
[0.00, 32.345, 0.035],
[3.65, 33.45, 0.035],
]
i = 0
for row in self.testcol.iterateRows():
self.assertListEqual(row, testrows[i])
i += 1
def test_iterateRowsWithKey(self):
"""Test generator for a single DataObject"""
testrows = [
32.345,
33.45,
]
i = 0
for row in self.testcol.iterateRows(rdt.ELEVATION):
self.assertEqual(row, testrows[i])
i += 1
def test_rowAsDict(self):
"""Shoud return a row as a dict of single values."""
test_dict = {rdt.CHAINAGE: 0.00, rdt.ELEVATION: 32.345, rdt.ROUGHNESS: 0.035}
row = self.testcol.rowAsDict(0)
self.assertDictEqual(row, test_dict)
def test_rowAsList(self):
test_list = [0.00, 32.345, 0.035]
row = self.testcol.rowAsList(0)
self.assertListEqual(row, test_list)
def test_dataObject(self):
"""Should return the correct ADataObject."""
test_vals = [0.00, 3.65]
obj = self.testcol.dataObject(rdt.CHAINAGE)
self.assertEqual(obj.data_type, rdt.CHAINAGE)
for i, o in enumerate(obj):
self.assertEqual(o, test_vals[i])
def test_dataObjectAsList(self):
"""Should return the contents of a DataObject as a list."""
test_list = [0.00, 3.65]
obj_list = self.testcol.dataObjectAsList(rdt.CHAINAGE)
self.assertListEqual(obj_list, test_list)
def test_toList(self):
test_list = [
[0.00, 3.65],
[32.345, 33.45],
[0.035, 0.035]
]
row_list = self.testcol.toList()
self.assertListEqual(row_list, test_list)
def test_toDict(self):
test_dict = {
rdt.CHAINAGE: [0.00, 3.65],
rdt.ELEVATION: [32.345, 33.45],
rdt.ROUGHNESS: [0.035, 0.035],
}
row_dict = self.testcol.toDict()
self.assertDictEqual(row_dict, test_dict)
def test_addValue(self):
# Initiliase a real collection
col = rdc.RowDataCollection()
col.addToCollection(do.FloatData(rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ELEVATION, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3))
col._addValue(rdt.CHAINAGE, 2.5)
self.assertEqual(col._collection[0][0], 2.5)
def test_setValue(self):
# Initiliase a real collection
col = rdc.RowDataCollection()
col.addToCollection(do.FloatData(rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ELEVATION, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3))
col._collection[0].addValue(2.5)
self.assertEqual(col._collection[0][0], 2.5)
col._setValue(rdt.CHAINAGE, 3.5, 0)
self.assertEqual(col._collection[0][0], 3.5)
def test_getPrintableRow(self):
test_row = ' 0.000 32.345 0.035'
row = self.testcol.getPrintableRow(0)
self.assertEqual(row, test_row)
def test_updateRow(self):
new_row = {rdt.CHAINAGE: 0.1, rdt.ELEVATION: 40, rdt.ROUGHNESS: 0.06}
self.testcol.updateRow(new_row, 0)
row = self.testcol.rowAsDict(0)
self.assertDictEqual(row, new_row)
with self.assertRaises(IndexError):
self.testcol.updateRow(new_row, 3)
fake_row = {'fakekey': 4.3, 'andagain': 3454}
with self.assertRaises(KeyError):
self.testcol.updateRow(fake_row, 0)
def test_addRow(self):
# Initiliase a real collection
col = rdc.RowDataCollection()
col.addToCollection(do.FloatData(rdt.CHAINAGE, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ELEVATION, format_str='{:>10}', default=None, no_of_dps=3))
col.addToCollection(do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=0.0, no_of_dps=3))
new_row = {rdt.CHAINAGE: 3.0, rdt.ELEVATION: 41, rdt.ROUGHNESS: 0.06}
new_row2 = {rdt.CHAINAGE: 6.0, rdt.ELEVATION: 42, rdt.ROUGHNESS: 0.07}
new_row3 = {rdt.CHAINAGE: 10.0, rdt.ELEVATION: 43, rdt.ROUGHNESS: 0.08}
new_row4 = {rdt.CHAINAGE: 20.0, rdt.ELEVATION: 44, rdt.ROUGHNESS: 0.09}
# append and insert rows
col.addRow(new_row2)
col.addRow(new_row, 0)
# append and insert again
col.addRow(new_row4)
col.addRow(new_row3, 2)
row = col.rowAsDict(0)
row2 = col.rowAsDict(1)
row3 = col.rowAsDict(2)
row4 = col.rowAsDict(3)
self.assertDictEqual(row, new_row)
self.assertDictEqual(row2, new_row2)
fake_row = {59: 4.3}
with self.assertRaises(KeyError):
col.addRow(fake_row)
def test_numberOfRows(self):
self.assertEqual(self.testcol.numberOfRows(), 2)
def test_deleteRow(self):
test_list = [3.65, 33.45, 0.035]
self.testcol.deleteRow(0)
self.assertEqual(self.testcol.numberOfRows(), 1)
row = self.testcol.rowAsList(0)
self.assertListEqual(row, test_list)
| duncan-r/SHIP | tests/test_rowdatacollection.py | Python | mit | 10,219 |
"""
Django settings for busquecursos project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hb&=!izzysndvyjd_i@2pdx^d&px8ty%1g3#&%l$k))lpo(dvf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'website',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'busquecursos.urls'
WSGI_APPLICATION = 'busquecursos.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| ProfessionalIT/products | busquecursos/busquecursos/busquecursos/settings.py | Python | mit | 2,076 |
# -*- coding: utf-8 -*-
class PortalMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
| WST/django-project-template | portal/middleware.py | Python | mit | 388 |
from snovault import upgrade_step
@upgrade_step('gene', '1', '2')
def gene_1_2(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-5005
# go_annotations are replaced by a link on UI to GO
value.pop('go_annotations', None)
@upgrade_step('gene', '2', '3')
def gene_2_3(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-6228
if value.get('locations') == []:
value.pop('locations', None)
| ENCODE-DCC/encoded | src/encoded/upgrade/gene.py | Python | mit | 437 |
#!/usr/bin/env python
import numpy as np
import os,sys
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
import argparse
ap=argparse.ArgumentParser()
ap.add_argument('-vis') # 1 plot cropped point cloud
ap.add_argument('-refine') # 1 refine mesh
ap.add_argument('-clean') # 1 remove tmp files
if ap.parse_args().vis==None:
vis=0
else:
vis=int(ap.parse_args().vis)
if ap.parse_args().refine==None:
refine=0
else:
refine=int(ap.parse_args().refine)
if ap.parse_args().clean==None:
clean=0
else:
clean=int(ap.parse_args().clean)
# Synthetic fault pixels
z=np.linspace(.2, -.8, num=100)
y=np.linspace(-.625,.625, num=120)
grid=np.meshgrid(y,z)
x=np.zeros((len(z)*len(y),1),dtype=np.float)
dat_vert=np.hstack((x,grid[0].reshape(x.shape),grid[1].reshape(x.shape)))
# weak
wl=np.linspace(.12,.18,num=8); amp=.03125*np.sqrt(wl)
e=1.025; r=-.2
dip=70.; zcnt=-.35
omg=[ 0.82976173, 0.89624834, 0.03829284, -0.50016345, -1.06606012, 1.40505898, -1.24256034, 1.28623393]
#omg=(np.random.rand(wl.shape[0])-.5)*np.pi
L=dat_vert[1,:].max()-dat_vert[1,:].min()
zmax=z.max(); zmin=z.min()
for i in range(len(wl)):
phs=dat_vert[:,1]/wl[i]*np.pi+omg[i]
dat_vert[:,0]=dat_vert[:,0]+amp[i]*np.cos(phs)*(e*zmax-dat_vert[:,2])/(e*zmax-zmin)*np.exp(r*abs(phs)/np.pi)
dat_vert[:,0]=dat_vert[:,0]+(zcnt-dat_vert[:,2])*np.tan((90.-dip)/180.*np.pi)
# ridge patch
def flt_patch(dat_vert,slope1,slope2,trunc1,trunc2,hlw,hup):
b1=-slope1*trunc1-.7
b2=-slope2*trunc2-.7
in_id=np.where(np.logical_and(dat_vert[:,2]-slope1*dat_vert[:,1]<b1, dat_vert[:,2]-slope2*dat_vert[:,1]<b2))[0]
out_id=np.setdiff1d(np.array(range(len(dat_vert)),dtype=np.int32),in_id)
x_shift=dat_vert[in_id,0]
# ridge patch
k=0
zup=dat_vert[:,2].max()
zlw=dat_vert[:,2].min()
for i in in_id:
r=abs(dat_vert[i,1]-.5*(trunc1+trunc2))
R=.5*((dat_vert[i,2]-b2)/slope2-(dat_vert[i,2]-b1)/slope1)
h=hlw+(dat_vert[i,2]-zlw)/(zup-zlw)*(hup-hlw)
x_shift[k]=x_shift[k]+np.cos(r/R*np.pi/2.)*h
k+=1
dat_vert=np.vstack((dat_vert[out_id,:],
np.hstack((x_shift.reshape(len(in_id),1),
dat_vert[in_id,1].reshape(len(in_id),1),
dat_vert[in_id,2].reshape(len(in_id),1)))))
return dat_vert
slope1=10.;slope2=-10.
trunc1=.1;trunc2=.6
hup=0.;hlw=.08
#dat_vert=flt_patch(dat_vert,slope1,slope2,trunc1,trunc2,hlw,hup)
print omg
fout='F3D_syn.xyz'
f=open(fout,'w+')
np.savetxt(f,dat_vert,delimiter=' ', fmt='%.6f '*3)
f.close()
from subprocess import call
fin=fout
fout=fout.rsplit('.')[0]+'.stl'
mxl='xyz2stl.mlx'
call(['meshlabserver', '-i',fin,'-o',fout,'-s',mxl])
if clean==1: os.remove(fin)
# Mesh
fin=fout
if refine==1:
fout=fout.rsplit('.')[0]+'_dns.exo'
else:
fout=fout.rsplit('.')[0]+'.exo'
jou='F3D_tet.jou'
txt_jou=open(jou,'r')
txt_jou_tmp=open('tmp.jou','w+')
hf=0.0025 # fault grid length (0.0025 for ~100 m tet model, 0.003 for ~40 m)
hm=0.0075 # matrix grid length (0.0075 for ~100 m tet model, 0.010 for ~40 m)
for line in txt_jou:
line=line.strip('\r\n')
if 'import' in line.lower():
line='import stl "'+fin+'"'
if 'export' in line.lower():
line='export mesh "'+fout+'" dimension 3 overwrite'
if 'surface 46 94 95 97 size' in line.lower():
line='surface 46 94 95 97 size %0.6f' %(2*hf)
if 'volume all size' in line.lower():
line='volume all size %0.6f' %(2*hm)
txt_jou_tmp.write(line+'\n')
if 'mesh volume all' in line.lower() and refine==1:
txt_jou_tmp.write('refine volume all\n')
txt_jou.close();txt_jou_tmp.close()
call(['trelis','-nojournal','-nographics','tmp.jou'])
if clean==1: os.remove('tmp.jou')
# Preprocessing msh=>inp
dt_dyn=2E-5 #1E-5 for dns 100 m tet model, 8E-5 for 40 m tet, 8E-4 for ~1 m tet
import F3D_msh2inp
_=F3D_msh2inp.msh2inp(fout,dt_dyn)
# Fault plot
if vis==1:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(dat_vert[:,0], dat_vert[:,1], dat_vert[:,2], c='b', marker='.')
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([np.max(dat_vert[:,0])-np.min(dat_vert[:,0]),np.max(dat_vert[:,1])\
-np.min(dat_vert[:,1]), np.max(dat_vert[:,2])-np.min(dat_vert[:,2])]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten()
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten()
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten()
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w',)
plt.title('fault [km]')
plt.grid()
plt.show()
| Chunfang/defmod-swpc | example/F3Dp/F3D_syn.py | Python | mit | 4,626 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
"""oclubs filters."""
from __future__ import absolute_import
from oclubs.filters.resfilter import ResFilter, ResFilterConverter
from oclubs.filters.clubfilter import ClubFilter, ClubFilterConverter
from oclubs.filters.roomfilter import RoomFilter, RoomFilterConverter
__all__ = ['ResFilter', 'ResFilterConverter',
'ClubFilter', 'ClubFilterConverter',
'RoomFilter', 'RoomFilterConverter']
| SHSIDers/oclubs | oclubs/filters/__init__.py | Python | mit | 462 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-28 18:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('benchmark', '0008_benchmarkdefinition_commit_keyword_updated'),
]
operations = [
migrations.AlterField(
model_name='benchmarkexecutionentry',
name='status',
field=models.IntegerField(choices=[(0, b'Ready'), (1, b'In_Progress'), (2, b'Finished'), (3, b'Finished_With_Errors')], default=0),
),
]
| imvu/bluesteel | app/logic/benchmark/migrations/0009_auto_20160828_1131.py | Python | mit | 586 |
"""Creates beautiful visualizations of the publication database."""
import datetime
import sqlite3 as sql
import numpy as np
from astropy import log
from matplotlib import pyplot as plt
import matplotlib.patheffects as path_effects
import matplotlib as mpl
from matplotlib import style
import seaborn as sns
from kpub import PublicationDB
MISSIONS = ['k2']
SCIENCES = ['exoplanets', 'astrophysics']
output_fn = 'publications-per-year-k2.png'
db = PublicationDB()
first_year = 2014
barwidth = 0.75
extrapolate = True
current_year = datetime.datetime.now().year
palette = sns.color_palette(['#f1c40f', '#2980b9'])
style.use('../styles/black.mplstyle')
plt.rc('xtick.major', size=0)
plt.rc('ytick.major', size=0)
# Initialize a dictionary to contain the data to plot
counts = {}
for mission in MISSIONS:
counts[mission] = {}
for year in range(first_year, current_year + 1):
counts[mission][year] = 0
cur = db.con.execute("SELECT year, COUNT(*) FROM pubs "
"WHERE mission = ? "
"AND year >= '2014' "
"GROUP BY year;",
[mission])
rows = list(cur.fetchall())
for row in rows:
counts[mission][int(row[0])] = row[1]
# Now make the actual plot
fig = plt.figure(figsize=(8, 4.5))
ax = fig.add_subplot(111)
plt.bar(np.array(list(counts['k2'].keys())) - 0.5*barwidth,
counts['k2'].values(),
label='K2',
facecolor=palette[0],
edgecolor='black',
width=barwidth)
# Also plot the extrapolated precition for the current year
if extrapolate:
now = datetime.datetime.now()
fraction_of_year_passed = float(now.strftime("%-j")) / 365.2425
current_total = (counts['k2'][current_year])
expected = (1/fraction_of_year_passed - 1) * current_total
plt.bar(current_year - 0.5*barwidth,
expected,
bottom=current_total,
label='Extrapolation',
facecolor='#34495e',
edgecolor='black',
width=barwidth)
# Aesthetics
plt.ylabel("Publications per year", fontsize=18)
ax.get_xaxis().get_major_formatter().set_useOffset(False)
plt.xticks(range(first_year - 1, current_year + 1), fontsize=18)
plt.yticks(range(0, 151, 50), fontsize=18)
plt.xlim([first_year - 0.75*barwidth, current_year + 0.75*barwidth])
"""
plt.legend(bbox_to_anchor=(0.1, 1),
loc='upper left',
ncol=3,
borderaxespad=0.,
handlelength=0.8,
frameon=False,
fontsize=18)
"""
# Disable spines
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
# Only show bottom and left ticks
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Only show horizontal grid lines
ax.grid(axis='y')
n_pub = sum(counts['k2'].values())
plt.suptitle("K2 Contributed to "
"{} Publications So Far".format(n_pub),
fontsize=22)
plt.tight_layout(rect=(0, 0, 1, 0.92), h_pad=1.5)
log.info("Writing {}".format(output_fn))
plt.savefig(output_fn)
plt.close()
| barentsen/exoplanet-charts | publication-stats/publications-per-year-k2.py | Python | mit | 3,121 |
# -*- coding: utf-8 -*-
#
# BayesPy documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 27 12:22:11 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
ON_RTD = os.environ.get('READTHEDOCS') == 'True'
# Use some dummy modules on Read the Docs because they are not available
# (requires some C libraries)
# http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
if ON_RTD:
from unittest.mock import MagicMock
MOCK_MODULES = ['h5py']
sys.modules.update((mod_name, MagicMock()) for mod_name in MOCK_MODULES)
# -- General configuration -----------------------------------------------------
import bayespy as bp
# Use the 'Read the Docs' theme
html_theme = 'sphinx_rtd_theme'
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.doctest',
'numpydoc',
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.autosummary',
'sphinxcontrib.tikz',
'sphinxcontrib.bayesnet',
'sphinxcontrib.bibtex',
'nbsphinx',
]
# Image format for math
imgmath_image_format = 'svg'
# Choose the image processing ‹suite›, either 'Netpbm', 'pdf2svg', 'GhostScript', 'ImageMagick' ('Netpbm' by default):
# If you want your documentation to be built on http://readthedocs.org, you have to choose GhostScript.
# All suites produce png images, excepted 'pdf2svg' which produces svg.
if ON_RTD:
tikz_proc_suite = 'GhostScript'
else:
tikz_proc_suite = 'pdf2svg'
if ON_RTD:
# For some reason, RTD needs these to be set explicitly although they
# should have default values
math_number_all = False
numpydoc_show_class_members = False
# Include TODOs in the documentation?
todo_include_todos = True
# Generate autosummary stub pages automatically
# Or manually: sphinx-autogen -o source/generated source/*.rst
#autosummary_generate = False
import glob
autosummary_generate = glob.glob("*.rst") + glob.glob("*/*.rst") + glob.glob("*/*/*.rst") + glob.glob("*/*/*/*.rst")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = "BayesPy"
copyright = bp.__copyright__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = bp.__version__
# The full version, including alpha/beta/rc tags.
release = bp.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'**.ipynb_checkpoints'
]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# Sphinx-TikZ extension
tikz_latex_preamble = r"""
\usepackage{amsmath}
"""
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinxdoc'
#html_theme = 'nature'
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# "sidebarwidth": 300
# }
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "BayesPy v%s Documentation" % (version)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'BayesPydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': r'''
\usepackage{tikz}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{svg}
\usetikzlibrary{shapes}
\usetikzlibrary{fit}
\usetikzlibrary{chains}
\usetikzlibrary{arrows}
''',
# Do not use [T1]{fontenc} because it does not work on libre systems
'fontenc': ''
}
#latex_additional_files = ['images/bayesnet.sty',]
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'BayesPy.tex', u'BayesPy Documentation',
u'Jaakko Luttinen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bayespy', u'BayesPy Documentation',
[u'Jaakko Luttinen'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'BayesPy', u'BayesPy Documentation',
u'Jaakko Luttinen', 'BayesPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'BayesPy'
epub_author = bp.__author__
epub_publisher = bp.__author__
epub_copyright = bp.__copyright__
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Read the docs fails to import _tkinter so use Agg backend
import matplotlib
matplotlib.use('agg')
| jluttine/bayespy | doc/source/conf.py | Python | mit | 11,116 |
import logging
from office365.sharepoint.helpers.utils import to_camel
logger = logging.getLogger(__name__)
class QueryStringBuilder:
"""class to map web-querystring dictionary to sharepoint-querystring"""
date_operators = ['ge', 'gt', 'le', 'lt']
mapping_operator = {
'gte': 'ge',
'gt': 'gt',
'lte': 'le',
'lt': 'lt',
'not': 'ne',
'contains': 'substringof'
}
search = []
filters = {}
def __init__(self, filters):
super().__init__()
if filters:
self.filters = filters
def get_filter_querystring(self):
filter_queries = []
for filter_name, filter_value in self.filters.items():
# operator
querystring_operator = filter_name.split('__')[-1]
operator = self.mapping_operator.get(querystring_operator, 'eq')
# filter
filter_name = to_camel(filter_name.split('__')[0])
if operator in self.date_operators:
values = ["{}T00:00:00Z".format(filter_value)] # 2016-03-26
query = ' or '.join([f"{filter_name} {operator} datetime'{value}'" for value in values])
elif operator == 'substringof':
values = filter_value.split(',')
query = ' or '.join([f"{operator}('{value}', {filter_name})" for value in values])
else:
values = filter_value.split(',')
query = ' or '.join([f"{filter_name} {operator} '{value}'" for value in values])
if len(values) > 1:
query = f'({query})'
filter_queries.append(query)
logger.info(query)
return str(" and ".join(filter_queries))
def get_querystring(self):
return self.get_filter_querystring() or ''
| vgrem/SharePointOnline-REST-Python-Client | office365/sharepoint/helpers/querystring_builder.py | Python | mit | 1,822 |
import unittest
import json
import time
from celery import current_app
from django.conf import settings
from django.utils import timezone
from ep.models import DPMeasurements, DeviceParameter
from ep.tasks import send_msg
from django.test import TestCase, modify_settings, override_settings
from ep.tests.static_factories import SiteFactory
from ep_secure_importer.controllers.secure_client import secure_site_name
__author__ = 'schien'
@override_settings(IODICUS_MESSAGING_HOST='messaging.iodicus.net')
class TaskTest(TestCase):
def test_messaging(self):
print(settings.IODICUS_MESSAGING_HOST)
# print(settings.BROKER_URL)
self.assertTrue(send_msg.delay(json.dumps({'test': 1})))
class LocalTaskTest(TestCase):
def test_messaging(self):
print(settings.IODICUS_MESSAGING_HOST)
print(settings.BROKER_URL)
self.assertTrue(send_msg.delay(json.dumps({'test': 1})))
# @override_settings(INFLUXDB_HOST='52.49.171.8')
class InfluxDBTest(TestCase):
@classmethod
def setUpTestData(cls):
SiteFactory.create(name=secure_site_name)
# @unittest.skip
def test_simple_add(self):
print(settings.INFLUXDB_HOST)
m = DPMeasurements(device_parameter=DeviceParameter.objects.first())
before = len(list(m.all()))
print(before)
m.add(time=timezone.now(), value=255)
m.add(time=timezone.now(), value=0)
m.add(time=timezone.now(), value=20.5)
time.sleep(5)
after = len(list(m.all()))
print(after)
self.assertTrue(before + 3 == after)
if __name__ == '__main__':
unittest.main()
| dschien/energy-aggregator | ep/tests/test_docker.py | Python | mit | 1,639 |
from django.http import Http404, HttpResponse
from django.template.context_processors import csrf
from rest_framework.authentication import TokenAuthentication
from rest_framework.parsers import JSONParser
from rest_framework.permissions import DjangoModelPermissions
from rest_framework.views import APIView
from .models import FrontendDeployment
dev = """
<!doctype html>
<html lang="en">
<head>
<title>Loading | Falmer</title>
</head>
<body class="FalmerSite">
<script type="text/javascript">window.CSRF = "{csrf_token}";</script>
<div class="FalmerAppRoot"></div>
<script type="text/javascript" src="http://localhost:8080/vendor.js"></script>
<script type="text/javascript" src="http://localhost:8080/devFonts.js"></script>
<script type="text/javascript" src="http://localhost:8080/main.js"></script>
<script type="text/javascript" src="http://localhost:8080/productionFonts.js"></script>
</body>
</html>
"""
def application_serve(request):
if request.is_ajax() is False:
try:
deployment = FrontendDeployment.objects.filter(enabled=True).latest('created_at')
except FrontendDeployment.DoesNotExist:
return HttpResponse(dev.format(csrf_token=csrf(request)['csrf_token']))
return HttpResponse(deployment.content.format(csrf_token=csrf(request)['csrf_token']))
raise Http404()
class FrontendAPI(APIView):
authentication_classes = [TokenAuthentication, ]
permission_classes = [DjangoModelPermissions, ]
queryset = FrontendDeployment.objects.none()
def post(self, request):
FrontendDeployment.objects.create(
content=request.data['contents'],
)
return HttpResponse(status=200)
| sussexstudent/falmer | falmer/frontend/views.py | Python | mit | 1,727 |
#!/usr/bin/env python
#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import optparse
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
import threading
import utils
from os.path import join, dirname, abspath, basename, isdir, exists
from datetime import datetime
from Queue import Queue, Empty
VERBOSE = False
# ---------------------------------------------
# --- P r o g r e s s I n d i c a t o r s ---
# ---------------------------------------------
class ProgressIndicator(object):
def __init__(self, cases):
self.cases = cases
self.queue = Queue(len(cases))
for case in cases:
self.queue.put_nowait(case)
self.succeeded = 0
self.remaining = len(cases)
self.total = len(cases)
self.failed = [ ]
self.crashed = 0
self.terminate = False
self.lock = threading.Lock()
def PrintFailureHeader(self, test):
if test.IsNegative():
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
print "Path: %s" % "/".join(test.path)
def Run(self, tasks):
self.Starting()
threads = []
# Spawn N-1 threads and then use this thread as the last one.
# That way -j1 avoids threading altogether which is a nice fallback
# in case of threading problems.
for i in xrange(tasks - 1):
thread = threading.Thread(target=self.RunSingle, args=[])
threads.append(thread)
thread.start()
try:
self.RunSingle()
# Wait for the remaining threads
for thread in threads:
# Use a timeout so that signals (ctrl-c) will be processed.
thread.join(timeout=10000000)
except Exception, e:
# If there's an exception we schedule an interruption for any
# remaining threads.
self.terminate = True
# ...and then reraise the exception to bail out
raise
self.Done()
return not self.failed
def RunSingle(self):
while not self.terminate:
try:
test = self.queue.get_nowait()
except Empty:
return
case = test.case
self.lock.acquire()
self.AboutToRun(case)
self.lock.release()
try:
start = datetime.now()
output = case.Run()
case.duration = (datetime.now() - start)
except IOError, e:
assert self.terminate
return
if self.terminate:
return
self.lock.acquire()
if output.UnexpectedOutput():
self.failed.append(output)
if output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.HasRun(output)
self.lock.release()
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class SimpleProgressIndicator(ProgressIndicator):
def Starting(self):
print 'Running %i tests' % len(self.cases)
def Done(self):
print
for failed in self.failed:
self.PrintFailureHeader(failed.test)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- CRASHED ---"
if failed.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.failed)
if self.crashed > 0:
print "=== %i tests CRASHED" % self.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
print 'Starting %s...' % case.GetLabel()
sys.stdout.flush()
def HasRun(self, output):
if output.UnexpectedOutput():
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (output.test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
pass
def HasRun(self, output):
total = self.succeeded + len(self.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if output.UnexpectedOutput():
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class TapProgressIndicator(SimpleProgressIndicator):
def Starting(self):
print '1..%i' % len(self.cases)
self._done = 0
def AboutToRun(self, case):
pass
def HasRun(self, output):
self._done += 1
command = basename(output.command[-1])
if output.UnexpectedOutput():
print 'not ok %i - %s' % (self._done, command)
for l in output.output.stderr.splitlines():
print '#' + l
for l in output.output.stdout.splitlines():
print '#' + l
else:
print 'ok %i - %s' % (self._done, command)
duration = output.test.duration
# total_seconds() was added in 2.7
total_seconds = (duration.microseconds +
(duration.seconds + duration.days * 24 * 3600) * 10**6) / 10**6
print ' ---'
print ' duration_ms: %d.%d' % (total_seconds, duration.microseconds / 1000)
print ' ...'
def Done(self):
pass
class CompactProgressIndicator(ProgressIndicator):
def __init__(self, cases, templates):
super(CompactProgressIndicator, self).__init__(cases)
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Starting(self):
pass
def Done(self):
self.PrintProgress('Done')
def AboutToRun(self, case):
self.PrintProgress(case.GetLabel())
def HasRun(self, output):
if output.UnexpectedOutput():
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(output.test)
stdout = output.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = output.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- CRASHED ---"
if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
return str[:(length-3)] + "..."
else:
return str
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.succeeded,
'remaining': (((self.total - self.remaining) * 100) // self.total),
'failed': len(self.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
'max_length': 78
}
super(MonochromeProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'tap': TapProgressIndicator,
'mono': MonochromeProgressIndicator
}
# -------------------------
# --- F r a m e w o r k ---
# -------------------------
class CommandOutput(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path, mode):
self.path = path
self.context = context
self.duration = None
self.mode = mode
def IsNegative(self):
return False
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
def GetSource(self):
return "(no source available)"
def RunCommand(self, command):
full_command = self.context.processor(command)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self.mode))
self.Cleanup()
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def BeforeRun(self):
pass
def AfterRun(self, result):
pass
def Run(self):
self.BeforeRun()
try:
result = self.RunCommand(self.GetCommand())
finally:
# Tests can leave the tty in non-blocking mode. If the test runner
# tries to print to stdout/stderr after that and the tty buffer is
# full, it'll die with a EAGAIN OSError. Ergo, put the tty back in
# blocking mode before proceeding.
if sys.platform != 'win32':
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK
for fd in 0,1,2: fcntl(fd, F_SETFL, ~O_NONBLOCK & fcntl(fd, F_GETFL))
self.AfterRun(result)
return result
def Cleanup(self):
return
class TestOutput(object):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasPreciousOutput(self):
return self.UnexpectedOutput() and self.store_unexpected_output
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.output.timed_out:
return False
return self.output.exit_code < 0 and \
self.output.exit_code != -signal.SIGABRT
def HasTimedOut(self):
return self.output.timed_out;
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
return not execution_failed
else:
return execution_failed
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode);
except ImportError:
pass
return prev_error_mode
def RunProcess(context, timeout, args, **rest):
if context.verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE;
if utils.IsWindows():
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX;
prev_error_mode = Win32SetErrorMode(error_mode);
Win32SetErrorMode(error_mode | prev_error_mode);
process = subprocess.Popen(
shell = utils.IsWindows(),
args = popen_args,
**rest
)
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
try:
os.unlink(name)
except OSError, e:
PrintError("os.unlink() " + str(e))
def Execute(args, context, timeout=None):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
stdout = fd_out,
stderr = fd_err,
)
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def ExecuteNoCapture(args, context, timeout=None):
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
)
return CommandOutput(exit_code, False, "", "")
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
class TestConfiguration(object):
def __init__(self, context, root):
self.context = context
self.root = root
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in xrange(len(path)):
if not path[i].match(file[i]):
return False
return True
def GetTestStatus(self, sections, defs):
pass
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
# Use this to run several variants of the tests, e.g.:
# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
VARIANT_FLAGS = [[]]
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
file = None
try:
(file, pathname, description) = imp.find_module('testcfg', [ self.path ])
module = imp.load_module('testcfg', file, pathname, description)
self.config = module.GetConfiguration(context, self.path)
finally:
if file:
file.close()
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def AddTestsToList(self, result, current_path, path, context, mode):
for v in VARIANT_FLAGS:
tests = self.GetConfiguration(context).ListTests(current_path, path, mode)
for t in tests: t.variant_flags = v
result += tests
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests):
super(LiteralTestSuite, self).__init__('root')
self.tests = tests
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def ListTests(self, current_path, path, context, mode):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, mode)
result.sort(cmp=lambda a, b: cmp(a.GetName(), b.GetName()))
return result
def GetTestStatus(self, context, sections, defs):
for test in self.tests:
test.GetTestStatus(context, sections, defs)
SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
'release' : []}
TIMEOUT_SCALEFACTOR = {
'debug' : 4,
'release' : 1 }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
self.vm_root = vm
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
def GetVm(self, mode):
if mode == 'debug':
name = 'out/Debug/node'
else:
name = 'out/Release/node'
# Currently GYP does not support output_dir for MSVS.
# http://code.google.com/p/gyp/issues/detail?id=40
# It will put the builds into Release/node.exe or Debug/node.exe
if utils.IsWindows():
out_dir = os.path.join(dirname(__file__), "..", "out")
if not exists(out_dir):
if mode == 'debug':
name = os.path.abspath('Debug/node.exe')
else:
name = os.path.abspath('Release/node.exe')
else:
name = os.path.abspath(name + '.exe')
return name
def GetVmCommand(self, testcase, mode):
return [self.GetVm(mode)] + self.GetVmFlags(testcase, mode)
def GetVmFlags(self, testcase, mode):
return testcase.variant_flags + FLAGS[mode]
def GetTimeout(self, mode):
return self.timeout * TIMEOUT_SCALEFACTOR[mode]
def RunTestCases(cases_to_run, progress, tasks):
progress = PROGRESS_INDICATORS[progress](cases_to_run)
return progress.Run(tasks)
def BuildRequirements(context, requirements, mode, scons_flags):
command_line = (['scons', '-Y', context.workspace, 'mode=' + ",".join(mode)]
+ requirements
+ scons_flags)
output = ExecuteNoCapture(command_line, context)
return output.exit_code == 0
# -------------------------------------------
# --- T e s t C o n f i g u r a t i o n ---
# -------------------------------------------
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return ListSet([env[self.name]])
else: return Nothing()
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return ListSet([self.name])
class Set(object):
pass
class ListSet(Set):
def __init__(self, elms):
self.elms = elms
def __str__(self):
return "ListSet%s" % str(self.elms)
def Intersect(self, that):
if not isinstance(that, ListSet):
return that.Intersect(self)
return ListSet([ x for x in self.elms if x in that.elms ])
def Union(self, that):
if not isinstance(that, ListSet):
return that.Union(self)
return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
def IsEmpty(self):
return len(self.elms) == 0
class Everything(Set):
def Intersect(self, that):
return that
def Union(self, that):
return self
def IsEmpty(self):
return False
class Nothing(Set):
def Intersect(self, that):
return self
def Union(self, that):
return that
def IsEmpty(self):
return True
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return not inter.IsEmpty()
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
elif self.op == 'if':
if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
else: return Nothing()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
elif IsAlpha(scan.Current()):
name = scan.Current()
scan.Advance()
return Outcome(name.lower())
elif scan.Current() == '$':
scan.Advance()
if not IsAlpha(scan.Current()):
return None
name = scan.Current()
scan.Advance()
return Variable(name.lower())
elif scan.Current() == '(':
scan.Advance()
result = ParseLogicalExpression(scan)
if (not result) or (scan.Current() != ')'):
return None
scan.Advance()
return result
else:
return None
BINARIES = ['==']
def ParseOperatorExpression(scan):
left = ParseAtomicExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in BINARIES):
op = scan.Current()
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseConditionalExpression(scan):
left = ParseOperatorExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() == 'if'):
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left= Operation(left, 'if', right)
return left
LOGICALS = ["&&", "||", ","]
def ParseLogicalExpression(scan):
left = ParseConditionalExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in LOGICALS):
op = scan.Current()
scan.Advance()
right = ParseConditionalExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseCondition(expr):
"""Parses a logical expression into an Expression object"""
tokens = Tokenizer(expr).Tokenize()
if not tokens:
print "Malformed expression: '%s'" % expr
return None
scan = Scanner(tokens)
ast = ParseLogicalExpression(scan)
if not ast:
print "Malformed expression: '%s'" % expr
return None
if scan.HasMore():
print "Malformed expression: '%s'" % expr
return None
return ast
class ClassifiedTest(object):
def __init__(self, case, outcomes):
self.case = case
self.outcomes = outcomes
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
self.defs = defs
def ClassifyTests(self, cases, env):
sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)]
all_rules = reduce(list.__add__, [s.rules for s in sections], [])
unused_rules = set(all_rules)
result = [ ]
all_outcomes = set([])
for case in cases:
matches = [ r for r in all_rules if r.Contains(case.path) ]
outcomes = set([])
for rule in matches:
outcomes = outcomes.union(rule.GetOutcomes(env, self.defs))
unused_rules.discard(rule)
if not outcomes:
outcomes = [PASS]
case.outcomes = outcomes
all_outcomes = all_outcomes.union(outcomes)
result.append(ClassifiedTest(case, outcomes))
return (result, list(unused_rules), all_outcomes)
class Section(object):
"""A section of the configuration file. Sections are enabled or
disabled prior to running the tests, based on their conditions"""
def __init__(self, condition):
self.condition = condition
self.rules = [ ]
def AddRule(self, rule):
self.rules.append(rule)
class Rule(object):
"""A single rule that specifies the expected outcome for a single
test."""
def __init__(self, raw_path, path, value):
self.raw_path = raw_path
self.path = path
self.value = value
def GetOutcomes(self, env, defs):
set = self.value.GetOutcomes(env, defs)
assert isinstance(set, ListSet)
return set.elms
def Contains(self, path):
if len(self.path) > len(path):
return False
for i in xrange(len(self.path)):
if not self.path[i].match(path[i]):
return False
return True
HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
def ReadConfigurationInto(path, sections, defs):
current_section = Section(Constant(True))
sections.append(current_section)
prefix = []
for line in utils.ReadLinesFrom(path):
header_match = HEADER_PATTERN.match(line)
if header_match:
condition_str = header_match.group(1).strip()
condition = ParseCondition(condition_str)
new_section = Section(condition)
sections.append(new_section)
current_section = new_section
continue
rule_match = RULE_PATTERN.match(line)
if rule_match:
path = prefix + SplitPath(rule_match.group(1).strip())
value_str = rule_match.group(2).strip()
value = ParseCondition(value_str)
if not value:
return False
current_section.AddRule(Rule(rule_match.group(1), path, value))
continue
def_match = DEF_PATTERN.match(line)
if def_match:
name = def_match.group(1).lower()
value = ParseCondition(def_match.group(2).strip())
if not value:
return False
defs[name] = value
continue
prefix_match = PREFIX_PATTERN.match(line)
if prefix_match:
prefix = SplitPath(prefix_match.group(1).strip())
continue
print "Malformed line: '%s'." % line
return False
return True
# ---------------
# --- M a i n ---
# ---------------
ARCH_GUESS = utils.GuessArchitecture()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
default='release')
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("-S", dest="scons_flags", help="Flag to pass through to scons",
default=[], action="append")
result.add_option("-p", "--progress",
help="The style of progress indicator (verbose, dots, color, mono, tap)",
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--no-build", help="Don't build requirements",
default=True, action="store_true")
result.add_option("--build-only", help="Only build requirements, don't run the tests",
default=False, action="store_true")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=60, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--simulator", help="Run tests with architecture simulator",
default='none')
result.add_option("--special-command", default=None)
result.add_option("--use-http1", help="Pass --use-http1 switch to node",
default=False, action="store_true")
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to V8 shell", default="shell")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
return result
def ProcessOptions(options):
global VERBOSE
VERBOSE = options.verbose
options.mode = options.mode.split(',')
for mode in options.mode:
if not mode in ['debug', 'release']:
print "Unknown mode %s" % mode
return False
if options.simulator != 'none':
# Simulator argument was set. Make sure arch and simulator agree.
if options.simulator != options.arch:
if options.arch == 'none':
options.arch = options.simulator
else:
print "Architecture %s does not match sim %s" %(options.arch, options.simulator)
return False
# Ensure that the simulator argument is handed down to scons.
options.scons_flags.append("simulator=" + options.simulator)
else:
# If options.arch is not set by the command line and no simulator setting
# was found, set the arch to the guess.
if options.arch == 'none':
options.arch = ARCH_GUESS
options.scons_flags.append("arch=" + options.arch)
if options.snapshot:
options.scons_flags.append("snapshot=on")
return True
REPORT_TEMPLATE = """\
Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix\
"""
def PrintReport(cases):
def IsFlaky(o):
return (PASS in o) and (FAIL in o) and (not CRASH in o) and (not OKAY in o)
def IsFailOk(o):
return (len(o) == 2) and (FAIL in o) and (OKAY in o)
unskipped = [c for c in cases if not SKIP in c.outcomes]
print REPORT_TEMPLATE % {
'total': len(cases),
'skipped': len(cases) - len(unskipped),
'nocrash': len([t for t in unskipped if IsFlaky(t.outcomes)]),
'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]])
}
class Pattern(object):
def __init__(self, pattern):
self.pattern = pattern
self.compiled = None
def match(self, str):
if not self.compiled:
pattern = "^" + self.pattern.replace('*', '.*') + "$"
self.compiled = re.compile(pattern)
return self.compiled.match(str)
def __str__(self):
return self.pattern
def SplitPath(s):
stripped = [ c.strip() for c in s.split('/') ]
return [ Pattern(s) for s in stripped if len(s) > 0 ]
def GetSpecialCommandProcessor(value):
if (not value) or (value.find('@') == -1):
def ExpandCommand(args):
return args
return ExpandCommand
else:
pos = value.find('@')
import urllib
prefix = urllib.unquote(value[:pos]).split()
suffix = urllib.unquote(value[pos+1:]).split()
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
BUILT_IN_TESTS = ['simple', 'pummel', 'message', 'internet', 'gc']
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
workspace = abspath(join(dirname(sys.argv[0]), '..'))
suites = GetSuites(join(workspace, 'test'))
repositories = [TestRepository(join(workspace, 'test', name)) for name in suites]
repositories += [TestRepository(a) for a in options.suite]
root = LiteralTestSuite(repositories)
if len(args) == 0:
paths = [SplitPath(t) for t in BUILT_IN_TESTS]
else:
paths = [ ]
for arg in args:
path = SplitPath(arg)
paths.append(path)
# Check for --valgrind option. If enabled, we overwrite the special
# command flag with a command that uses the run-valgrind.py script.
if options.valgrind:
run_valgrind = join(workspace, "tools", "run-valgrind.py")
options.special_command = "python -u " + run_valgrind + " @"
shell = abspath(options.shell)
buildspace = dirname(shell)
processor = GetSpecialCommandProcessor(options.special_command)
if options.use_http1:
def wrap(processor):
return lambda args: processor(args[:1] + ['--use-http1'] + args[1:])
processor = wrap(processor)
context = Context(workspace,
buildspace,
VERBOSE,
shell,
options.timeout,
processor,
options.suppress_dialogs,
options.store_unexpected_output)
# First build the required targets
if not options.no_build:
reqs = [ ]
for path in paths:
reqs += root.GetBuildRequirements(path, context)
reqs = list(set(reqs))
if len(reqs) > 0:
if options.j != 1:
options.scons_flags += ['-j', str(options.j)]
if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
return 1
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
# Get status for tests
sections = [ ]
defs = { }
root.GetTestStatus(context, sections, defs)
config = Configuration(sections, defs)
# List the tests
all_cases = [ ]
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for mode in options.mode:
if not exists(context.GetVm(mode)):
print "Can't find shell executable: '%s'" % context.GetVm(mode)
continue
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': options.arch,
'simulator': options.simulator
}
test_list = root.ListTests([], path, context, mode)
unclassified_tests += test_list
(cases, unused_rules, all_outcomes) = config.ClassifyTests(test_list, env)
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
globally_unused_rules = globally_unused_rules.intersection(unused_rules)
all_cases += cases
all_unused.append(unused_rules)
if options.cat:
visited = set()
for test in unclassified_tests:
key = tuple(test.path)
if key in visited:
continue
visited.add(key)
print "--- begin source: %s ---" % test.GetLabel()
source = test.GetSource().strip()
print source
print "--- end source: %s ---" % test.GetLabel()
return 0
if options.warn_unused:
for rule in globally_unused_rules:
print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
if options.report:
PrintReport(all_cases)
result = None
def DoSkip(case):
return SKIP in case.outcomes or SLOW in case.outcomes
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
if len(cases_to_run) == 0:
print "No tests to run."
return 0
else:
try:
start = time.time()
if RunTestCases(cases_to_run, options.progress, options.j):
result = 0
else:
result = 1
duration = time.time() - start
except KeyboardInterrupt:
print "Interrupted"
return 1
if options.time:
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
return result
if __name__ == '__main__':
sys.exit(Main())
| x684867/nemesis | src/node/tools/test.py | Python | mit | 42,571 |
#!/usr/bin/env python
import getopt
import json
import locale
import os
import re
import sys
from urllib import request, parse
import platform
import threading
from .version import __version__
from .util import log, sogou_proxy_server, get_filename, unescape_html
dry_run = False
force = False
player = None
sogou_proxy = None
sogou_env = None
cookies_txt = None
fake_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) Gecko/20100101 Firefox/13.0'
}
if sys.stdout.isatty():
default_encoding = sys.stdout.encoding.lower()
else:
default_encoding = locale.getpreferredencoding().lower()
def tr(s):
try:
s.encode(default_encoding)
return s
except:
return str(s.encode('utf-8'))[2:-1]
# DEPRECATED in favor of match1()
def r1(pattern, text):
m = re.search(pattern, text)
if m:
return m.group(1)
# DEPRECATED in favor of match1()
def r1_of(patterns, text):
for p in patterns:
x = r1(p, text)
if x:
return x
def match1(text, *patterns):
"""Scans through a string for substrings matched some patterns (first-subgroups only).
Args:
text: A string to be scanned.
patterns: Arbitrary number of regex patterns.
Returns:
When only one pattern is given, returns a string (None if no match found).
When more than one pattern are given, returns a list of strings ([] if no match found).
"""
if len(patterns) == 1:
pattern = patterns[0]
match = re.search(pattern, text)
if match:
return match.group(1)
else:
return None
else:
ret = []
for pattern in patterns:
match = re.search(pattern, text)
if match:
ret.append(match.group(1))
return ret
def launch_player(player, urls):
import subprocess
import shlex
subprocess.call(shlex.split(player) + list(urls))
def parse_query_param(url, param):
"""Parses the query string of a URL and returns the value of a parameter.
Args:
url: A URL.
param: A string representing the name of the parameter.
Returns:
The value of the parameter.
"""
try:
return parse.parse_qs(parse.urlparse(url).query)[param][0]
except:
return None
def unicodize(text):
return re.sub(r'\\u([0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f])', lambda x: chr(int(x.group(0)[2:], 16)), text)
# DEPRECATED in favor of util.legitimize()
def escape_file_path(path):
path = path.replace('/', '-')
path = path.replace('\\', '-')
path = path.replace('*', '-')
path = path.replace('?', '-')
return path
# DEPRECATED in favor of util.legitimize()
def filenameable(text):
"""Converts a string to a legal filename through various OSes.
"""
# All POSIX systems
text = text.translate({
0: None,
ord('/'): '-',
})
if platform.system() == 'Windows': # For Windows
text = text.translate({
ord(':'): '-',
ord('*'): '-',
ord('?'): '-',
ord('\\'): '-',
ord('\"'): '\'',
ord('<'): '-',
ord('>'): '-',
ord('|'): '-',
ord('+'): '-',
ord('['): '(',
ord(']'): ')',
})
else:
if text.startswith("."):
text = text[1:]
if platform.system() == 'Darwin': # For Mac OS
text = text.translate({
ord(':'): '-',
})
return text
def ungzip(data):
"""Decompresses data for Content-Encoding: gzip.
"""
from io import BytesIO
import gzip
buffer = BytesIO(data)
f = gzip.GzipFile(fileobj=buffer)
return f.read()
def undeflate(data):
"""Decompresses data for Content-Encoding: deflate.
(the zlib compression is used.)
"""
import zlib
decompressobj = zlib.decompressobj(-zlib.MAX_WBITS)
return decompressobj.decompress(data)+decompressobj.flush()
# DEPRECATED in favor of get_content()
def get_response(url, faker = False):
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
else:
response = request.urlopen(url)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
response.data = data
return response
# DEPRECATED in favor of get_content()
def get_html(url, encoding = None, faker = False):
content = get_response(url, faker).data
return str(content, 'utf-8', 'ignore')
# DEPRECATED in favor of get_content()
def get_decoded_html(url, faker = False):
response = get_response(url, faker)
data = response.data
charset = r1(r'charset=([\w-]+)', response.headers['content-type'])
if charset:
return data.decode(charset, 'ignore')
else:
return data
def get_content(url, headers={}, decoded=True):
"""Gets the content of a URL via sending a HTTP GET request.
Args:
url: A URL.
headers: Request headers used by the client.
decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.
Returns:
The content as a string.
"""
req = request.Request(url, headers=headers)
if cookies_txt:
cookies_txt.add_cookie_header(req)
req.headers.update(req.unredirected_hdrs)
response = request.urlopen(req)
data = response.read()
# Handle HTTP compression for gzip and deflate (zlib)
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = ungzip(data)
elif content_encoding == 'deflate':
data = undeflate(data)
# Decode the response body
if decoded:
charset = match1(response.getheader('Content-Type'), r'charset=([\w-]+)')
if charset is not None:
data = data.decode(charset)
else:
data = data.decode('utf-8')
return data
def url_size(url, faker = False):
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
else:
response = request.urlopen(url)
size = int(response.headers['content-length'])
return size
def urls_size(urls):
return sum(map(url_size, urls))
def url_info(url, faker = False):
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
else:
response = request.urlopen(request.Request(url))
headers = response.headers
type = headers['content-type']
mapping = {
'video/3gpp': '3gp',
'video/f4v': 'flv',
'video/mp4': 'mp4',
'video/MP2T': 'ts',
'video/quicktime': 'mov',
'video/webm': 'webm',
'video/x-flv': 'flv',
'video/x-ms-asf': 'asf',
'audio/mpeg': 'mp3'
}
if type in mapping:
ext = mapping[type]
else:
type = None
if headers['content-disposition']:
try:
filename = parse.unquote(r1(r'filename="?([^"]+)"?', headers['content-disposition']))
if len(filename.split('.')) > 1:
ext = filename.split('.')[-1]
else:
ext = None
except:
ext = None
else:
ext = None
if headers['transfer-encoding'] != 'chunked':
size = int(headers['content-length'])
else:
size = None
return type, ext, size
def url_locations(urls, faker = False):
locations = []
for url in urls:
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
else:
response = request.urlopen(request.Request(url))
locations.append(response.url)
return locations
def url_save(url, filepath, bar, refer = None, is_part = False, faker = False):
file_size = url_size(url, faker = faker)
if os.path.exists(filepath):
if not force and file_size == os.path.getsize(filepath):
if not is_part:
if bar:
bar.done()
print('Skipping %s: file already exists' % tr(os.path.basename(filepath)))
else:
if bar:
bar.update_received(file_size)
return
else:
if not is_part:
if bar:
bar.done()
print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
temp_filepath = filepath + '.download'
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
if received < file_size:
if faker:
headers = fake_headers
else:
headers = {}
if received:
headers['Range'] = 'bytes=' + str(received) + '-'
if refer:
headers['Referer'] = refer
response = request.urlopen(request.Request(url, headers = headers), None)
try:
range_start = int(response.headers['content-range'][6:].split('/')[0].split('-')[0])
end_length = end = int(response.headers['content-range'][6:].split('/')[1])
range_length = end_length - range_start
except:
range_length = int(response.headers['content-length'])
if file_size != received + range_length:
received = 0
if bar:
bar.received = 0
open_mode = 'wb'
with open(temp_filepath, open_mode) as output:
while True:
buffer = response.read(1024 * 256)
if not buffer:
if received == file_size: # Download finished
break
else: # Unexpected termination. Retry request
headers['Range'] = 'bytes=' + str(received) + '-'
response = request.urlopen(request.Request(url, headers = headers), None)
output.write(buffer)
received += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath), temp_filepath)
if os.access(filepath, os.W_OK):
os.remove(filepath) # on Windows rename could fail if destination filepath exists
os.rename(temp_filepath, filepath)
def url_save_chunked(url, filepath, bar, refer = None, is_part = False, faker = False):
if os.path.exists(filepath):
if not force:
if not is_part:
if bar:
bar.done()
print('Skipping %s: file already exists' % tr(os.path.basename(filepath)))
else:
if bar:
bar.update_received(os.path.getsize(filepath))
return
else:
if not is_part:
if bar:
bar.done()
print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
temp_filepath = filepath + '.download'
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
if faker:
headers = fake_headers
else:
headers = {}
if received:
headers['Range'] = 'bytes=' + str(received) + '-'
if refer:
headers['Referer'] = refer
response = request.urlopen(request.Request(url, headers = headers), None)
with open(temp_filepath, open_mode) as output:
while True:
buffer = response.read(1024 * 256)
if not buffer:
break
output.write(buffer)
received += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath))
if os.access(filepath, os.W_OK):
os.remove(filepath) # on Windows rename could fail if destination filepath exists
os.rename(temp_filepath, filepath)
class SimpleProgressBar:
def __init__(self, total_size, total_pieces = 1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
def update(self):
self.displayed = True
bar_size = 40
percent = round(self.received * 100 / self.total_size, 1)
if percent > 100:
percent = 100
dots = bar_size * int(percent) // 100
plus = int(percent) - dots // bar_size * 100
if plus > 0.8:
plus = '='
elif plus > 0.4:
plus = '>'
else:
plus = ''
bar = '=' * dots + plus
bar = '{0:>5}% ({1:>5}/{2:<5}MB) [{3:<40}] {4}/{5}'.format(percent, round(self.received / 1048576, 1), round(self.total_size / 1048576, 1), bar, self.current_piece, self.total_pieces)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print()
self.displayed = False
class PiecesProgressBar:
def __init__(self, total_size, total_pieces = 1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
def update(self):
self.displayed = True
bar = '{0:>5}%[{1:<40}] {2}/{3}'.format('?', '?' * 40, self.current_piece, self.total_pieces)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print()
self.displayed = False
class DummyProgressBar:
def __init__(self, *args):
pass
def update_received(self, n):
pass
def update_piece(self, n):
pass
def done(self):
pass
def download_urls(urls, title, ext, total_size, output_dir='.', refer=None, merge=True, faker=False):
assert urls
if dry_run:
print('Real URLs:\n%s\n' % urls)
return
if player:
launch_player(player, urls)
return
if not total_size:
try:
total_size = urls_size(urls)
except:
import traceback
import sys
traceback.print_exc(file = sys.stdout)
pass
title = get_filename(title)
filename = '%s.%s' % (title, ext)
filepath = os.path.join(output_dir, filename)
if total_size:
if not force and os.path.exists(filepath) and os.path.getsize(filepath) >= total_size * 0.9:
print('Skipping %s: file already exists' % tr(filepath))
print()
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
url = urls[0]
print('Downloading %s ...' % tr(filename))
url_save(url, filepath, bar, refer = refer, faker = faker)
bar.done()
else:
parts = []
print('Downloading %s.%s ...' % (tr(title), ext))
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
#print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
bar.update_piece(i + 1)
url_save(url, filepath, bar, refer = refer, is_part = True, faker = faker)
bar.done()
if not merge:
print()
return
if ext == 'flv':
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_flv_to_mp4
ffmpeg_concat_flv_to_mp4(parts, os.path.join(output_dir, title + '.mp4'))
else:
from .processor.join_flv import concat_flv
concat_flv(parts, os.path.join(output_dir, title + '.flv'))
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == 'mp4':
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_mp4_to_mp4
ffmpeg_concat_mp4_to_mp4(parts, os.path.join(output_dir, title + '.mp4'))
else:
from .processor.join_mp4 import concat_mp4
concat_mp4(parts, os.path.join(output_dir, title + '.mp4'))
except:
raise
else:
for part in parts:
os.remove(part)
else:
print("Can't merge %s files" % ext)
print()
def download_urls_chunked(urls, title, ext, total_size, output_dir='.', refer=None, merge=True, faker=False):
assert urls
if dry_run:
print('Real URLs:\n%s\n' % urls)
return
if player:
launch_player(player, urls)
return
assert ext in ('ts')
title = get_filename(title)
filename = '%s.%s' % (title, 'ts')
filepath = os.path.join(output_dir, filename)
if total_size:
if not force and os.path.exists(filepath[:-3] + '.mkv'):
print('Skipping %s: file already exists' % tr(filepath[:-3] + '.mkv'))
print()
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
parts = []
url = urls[0]
print('Downloading %s ...' % tr(filename))
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
url_save_chunked(url, filepath, bar, refer = refer, faker = faker)
bar.done()
if not merge:
print()
return
if ext == 'ts':
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_convert_ts_to_mkv
if ffmpeg_convert_ts_to_mkv(parts, os.path.join(output_dir, title + '.mkv')):
for part in parts:
os.remove(part)
else:
os.remove(os.path.join(output_dir, title + '.mkv'))
else:
print('No ffmpeg is found. Conversion aborted.')
else:
print("Can't convert %s files" % ext)
else:
parts = []
print('Downloading %s.%s ...' % (tr(title), ext))
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
#print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
bar.update_piece(i + 1)
url_save_chunked(url, filepath, bar, refer = refer, is_part = True, faker = faker)
bar.done()
if not merge:
print()
return
if ext == 'ts':
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_ts_to_mkv
if ffmpeg_concat_ts_to_mkv(parts, os.path.join(output_dir, title + '.mkv')):
for part in parts:
os.remove(part)
else:
os.remove(os.path.join(output_dir, title + '.mkv'))
else:
print('No ffmpeg is found. Merging aborted.')
else:
print("Can't merge %s files" % ext)
print()
def download_rtmp_url(url, playpath, title, ext, total_size=0, output_dir='.', refer=None, merge=True, faker=False):
assert url
if dry_run:
print('Real URL:\n%s\n' % [url])
print('Real Playpath:\n%s\n' % [playpath])
return
if player:
from .processor.rtmpdump import play_rtmpdump_stream
play_rtmpdump_stream(player, url, playpath)
return
from .processor.rtmpdump import has_rtmpdump_installed, download_rtmpdump_stream
assert has_rtmpdump_installed(), "RTMPDump not installed."
download_rtmpdump_stream(url, playpath, title, ext, output_dir)
def playlist_not_supported(name):
def f(*args, **kwargs):
raise NotImplementedError('Playlist is not supported for ' + name)
return f
def print_info(site_info, title, type, size):
if type:
type = type.lower()
if type in ['3gp']:
type = 'video/3gpp'
elif type in ['asf', 'wmv']:
type = 'video/x-ms-asf'
elif type in ['flv', 'f4v']:
type = 'video/x-flv'
elif type in ['mkv']:
type = 'video/x-matroska'
elif type in ['mp3']:
type = 'audio/mpeg'
elif type in ['mp4']:
type = 'video/mp4'
elif type in ['mov']:
type = 'video/quicktime'
elif type in ['ts']:
type = 'video/MP2T'
elif type in ['webm']:
type = 'video/webm'
if type in ['video/3gpp']:
type_info = "3GPP multimedia file (%s)" % type
elif type in ['video/x-flv', 'video/f4v']:
type_info = "Flash video (%s)" % type
elif type in ['video/mp4', 'video/x-m4v']:
type_info = "MPEG-4 video (%s)" % type
elif type in ['video/MP2T']:
type_info = "MPEG-2 transport stream (%s)" % type
elif type in ['video/webm']:
type_info = "WebM video (%s)" % type
#elif type in ['video/ogg']:
# type_info = "Ogg video (%s)" % type
elif type in ['video/quicktime']:
type_info = "QuickTime video (%s)" % type
elif type in ['video/x-matroska']:
type_info = "Matroska video (%s)" % type
#elif type in ['video/x-ms-wmv']:
# type_info = "Windows Media video (%s)" % type
elif type in ['video/x-ms-asf']:
type_info = "Advanced Systems Format (%s)" % type
#elif type in ['video/mpeg']:
# type_info = "MPEG video (%s)" % type
elif type in ['audio/mpeg']:
type_info = "MP3 (%s)" % type
else:
type_info = "Unknown type (%s)" % type
print("Video Site:", site_info)
print("Title: ", unescape_html(tr(title)))
print("Type: ", type_info)
print("Size: ", round(size / 1048576, 2), "MiB (" + str(size) + " Bytes)")
print()
def parse_host(host):
"""Parses host name and port number from a string.
"""
if re.match(r'^(\d+)$', host) is not None:
return ("0.0.0.0", int(host))
if re.match(r'^(\w+)://', host) is None:
host = "//" + host
o = parse.urlparse(host)
hostname = o.hostname or "0.0.0.0"
port = o.port or 0
return (hostname, port)
def get_sogou_proxy():
return sogou_proxy
def set_proxy(proxy):
proxy_handler = request.ProxyHandler({
'http': '%s:%s' % proxy,
'https': '%s:%s' % proxy,
})
opener = request.build_opener(proxy_handler)
request.install_opener(opener)
def unset_proxy():
proxy_handler = request.ProxyHandler({})
opener = request.build_opener(proxy_handler)
request.install_opener(opener)
# DEPRECATED in favor of set_proxy() and unset_proxy()
def set_http_proxy(proxy):
if proxy == None: # Use system default setting
proxy_support = request.ProxyHandler()
elif proxy == '': # Don't use any proxy
proxy_support = request.ProxyHandler({})
else: # Use proxy
proxy_support = request.ProxyHandler({'http': '%s' % proxy, 'https': '%s' % proxy})
opener = request.build_opener(proxy_support)
request.install_opener(opener)
def download_main(download, download_playlist, urls, playlist, output_dir, merge, info_only):
for url in urls:
if url.startswith('https://'):
url = url[8:]
if not url.startswith('http://'):
url = 'http://' + url
if playlist:
download_playlist(url, output_dir = output_dir, merge = merge, info_only = info_only)
else:
download(url, output_dir = output_dir, merge = merge, info_only = info_only)
def get_version():
try:
import subprocess
real_dir = os.path.dirname(os.path.realpath(__file__))
git_hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], cwd=real_dir, stderr=subprocess.DEVNULL).decode('utf-8').strip()
assert git_hash
return '%s-%s' % (__version__, git_hash)
except:
return __version__
def script_main(script_name, download, download_playlist = None):
version = 'You-Get %s, a video downloader.' % get_version()
help = 'Usage: %s [OPTION]... [URL]...\n' % script_name
help += '''\nStartup options:
-V | --version Display the version and exit.
-h | --help Print this help and exit.
'''
help += '''\nDownload options (use with URLs):
-f | --force Force overwriting existed files.
-i | --info Display the information of videos without downloading.
-u | --url Display the real URLs of videos without downloading.
-c | --cookies Load NetScape's cookies.txt file.
-n | --no-merge Don't merge video parts.
-o | --output-dir <PATH> Set the output directory for downloaded videos.
-p | --player <PLAYER [options]> Directly play the video with PLAYER like vlc/smplayer.
-x | --http-proxy <HOST:PORT> Use specific HTTP proxy for downloading.
--no-proxy Don't use any proxy. (ignore $http_proxy)
-S | --sogou Use a Sogou proxy server for downloading.
--sogou-proxy <HOST:PORT> Run a standalone Sogou proxy server.
--debug Show traceback on KeyboardInterrupt.
'''
short_opts = 'Vhfiuc:nSo:p:x:'
opts = ['version', 'help', 'force', 'info', 'url', 'cookies', 'no-merge', 'no-proxy', 'debug', 'sogou', 'output-dir=', 'player=', 'http-proxy=', 'sogou-proxy=', 'sogou-env=']
if download_playlist:
short_opts = 'l' + short_opts
opts = ['playlist'] + opts
try:
opts, args = getopt.getopt(sys.argv[1:], short_opts, opts)
except getopt.GetoptError as err:
log.e(err)
log.e("try 'you-get --help' for more options")
sys.exit(2)
global force
global dry_run
global player
global sogou_proxy
global sogou_env
global cookies_txt
cookies_txt = None
info_only = False
playlist = False
merge = True
output_dir = '.'
proxy = None
traceback = False
for o, a in opts:
if o in ('-V', '--version'):
print(version)
sys.exit()
elif o in ('-h', '--help'):
print(version)
print(help)
sys.exit()
elif o in ('-f', '--force'):
force = True
elif o in ('-i', '--info'):
info_only = True
elif o in ('-u', '--url'):
dry_run = True
elif o in ('-c', '--cookies'):
from http import cookiejar
cookies_txt = cookiejar.MozillaCookieJar(a)
cookies_txt.load()
elif o in ('-l', '--playlist'):
playlist = True
elif o in ('-n', '--no-merge'):
merge = False
elif o in ('--no-proxy',):
proxy = ''
elif o in ('--debug',):
traceback = True
elif o in ('-o', '--output-dir'):
output_dir = a
elif o in ('-p', '--player'):
player = a
elif o in ('-x', '--http-proxy'):
proxy = a
elif o in ('-S', '--sogou'):
sogou_proxy = ("0.0.0.0", 0)
elif o in ('--sogou-proxy',):
sogou_proxy = parse_host(a)
elif o in ('--sogou-env',):
sogou_env = a
else:
log.e("try 'you-get --help' for more options")
sys.exit(2)
if not args:
if sogou_proxy is not None:
try:
if sogou_env is not None:
server = sogou_proxy_server(sogou_proxy, network_env=sogou_env)
else:
server = sogou_proxy_server(sogou_proxy)
server.serve_forever()
except KeyboardInterrupt:
if traceback:
raise
else:
sys.exit()
else:
print(help)
sys.exit()
set_http_proxy(proxy)
try:
download_main(download, download_playlist, args, playlist, output_dir, merge, info_only)
except KeyboardInterrupt:
if traceback:
raise
else:
sys.exit(1)
| kzganesan/you-get | src/you_get/common.py | Python | mit | 30,127 |
import math
import scipy.optimize as opt
log = math.log
exp = math.exp
small = 1e-20 # unitless
T0 = 1 # K
Tcrit = 650 # K
zero_C = 273.15 # K
p0 = 1 # Pa
atm = 101325 # Pa
bar = 100000 # Pa
# Tcrit is slightly above the critical point of water. This is used as an upperbound
# on values of T that would be even vaguely physically reasonable for our thermodynamic
# equations. Exact value here is unimportant.
#####
# The following values are quoted exactly* from
# CRC Handbook of Chemistry and Physics, 84th edition, 2003-2004, ISBN 0-8493-0484-9
# Comments specify units and section number in the source.
# *changes in units like g -> kg are done silently
#####
R = 8.314510 # J / mol / K 1-54
Mw_ = 0.01801528 # kg / mol 6-4 molecular weight of water
# enthalpy of vaporization of water at specified temperature
vap_T = zero_C # K 6-3
vap_enthalpy = 45054 # J / mol 6-3
# heat capacity and density of air (dry air?) at specified temperature and 1 bar pressure
air_T = 300 # K 6-1
air_cp = 1007 # J / kg / K 6-1
air_rho = 1.161 # kg / m^3 6-1
# heat capacity of liquid water at specified temperature
lw_T = 10 + zero_C # K 6-3
lw_cp = 4192.1 # J / kg / K 6-3
# saturation vapor pressure at specified temperature
sat_T = 10 + zero_C # K 6-10
sat_p_star = 1228.1 # Pa 6-10
####
# End of CRC reference values
###
# Old value of cv_ I was using is 37.47 J / mol / K.
# New value is from the following source:
# 1870 J / kg / K (or 33.68857 J / mol / K)
# page 77
# Iribarne & Godson (Eds.) (2012). Atmospheric thermodynamics (Vol. 6). Springer Science & Business Media.
# Derived values
Md_ = air_rho * R * air_T / bar # kg / mol molecular weight of air
cd_ = air_cp * Md_ # J / mol / K heat capacity of air, constant pressure
cv_ = 1870 * Mw_ # J / mol / K heat capacity of water vapor, constant p
cl_ = lw_cp * Mw_ # J / mol / K heat capacity of liquid water, constant p
cd = cd_ / R # unitless
cv = cv_ / R # unitless
cl = cl_ / R # unitless
Md = Md_ / R # kg K / J
Mw = Mw_ / R # kg K / J
epsilon = Mw_ / Md_ # unitless
Lc = vap_enthalpy / R + (cl - cv) * vap_T # K
Tc = sat_T # K
pc = sat_p_star * exp(Lc / Tc) # Pa
# Clausius-Clapeyron relation
def compute_p_star(T):
return pc * exp((cv - cl) * log(T / Tc) - Lc / T)
def compute_y_s(p, p_star):
return p_star / (p - p_star)
def compute_y_s_from_T(p, T):
return compute_y_s(p, compute_p_star(T))
def compute_ell(T):
return cv - cl + Lc / T
def compute_issat_ypT(y, p, T):
y_s = compute_y_s_from_T(p, T)
return (y_s > 0) and (y > y_s)
# Correctness of this is non-trivial.
def compute_issat_yps(y, p, s):
return compute_issat_ypT(y, p, compute_T_unsat(y, p, s))
def compute_M(y):
return Md * (1 + epsilon * y)
def compute_Ms_unsat(y, p, T):
if y < small:
return cd * log(T / T0) - log(p / p0)
else:
return ((cd + y * cv) * log(T / T0)
- (1 + y) * log(p / p0)
+ (1 + y) * log(1 + y)
- y * log(y))
def compute_Ms_sat(y, p, T):
p_star = compute_p_star(T)
y_s = compute_y_s(p, p_star)
ell = compute_ell(T)
if y < small:
# Unlikely to represent a physical situation,
# since y > y_s for saturated parcels.
return cd * log(T / T0) - log(p_star / p0) + log(y_s) + y_s * ell
else:
return ((cd + y * cv) * log(T / T0)
- (1 + y) * log(p_star / p0)
+ log (y_s)
+ (y_s - y) * ell)
def compute_T_unsat(y, p, s):
Ms = compute_M(y) * s
if y < small:
return T0 * exp((Md * s + log(p / p0)) / cd)
else:
return T0 * exp(
(Ms + (1 + y) * log(p / p0) - (1 + y) * log(1 + y) + y * log(y))
/ (cd + y * cv)
)
#
# For ease of writing this function and computation speed, we assume that the parcel
# specified is saturated, that y > 1e-10, that p < 1e10 Pa, and that the parcel's temperature
# is less than Tcrit. If any of these assumptions are violated this function may diverge,
# throw an exception, or return a nonsense value.
#
# This function is the main bottleneck in speeding up the code.
#
def compute_T_sat(y, p, s):
if y < 1e-10 or p > 1e10:
raise ValueError()
#
# Equation we wish to solve:
# M * s = (cd + y*cv) * log(T / T0) - (1 + y)*log(p_star / p0) + log(y_s) + (y_s - y) * ell
# where
# p_star is a function of T
# y_s = p_star / (p - p_star)
# ell = cv - cl + Lc / T
#
# Note that for T < Tcrit, ell > 0 and d p_star/dT > 0.
#
# Let
# f(T) = c0 * log(T) - (1 + y) * log(p_star) + log(y_s) + (y_s - y) * ell + c1
# = c0 * log(T) - y * log(p_star) - log(p - p_star) + (y_s - y) * ell + c1
# = c0 * log(T) - y * ((cv - cl) log(T / Tc) - Lc / T) - log(p - p_star)
# + y_s * ell - y * (cv - cl) - y * Lc / T + c1 - y * log(pc)
# = c0 * log(T) - y * (cv - cl) * log(T) - log(p - p_star)
# + y_s * ell + c2
# = c3 * log(T) - log(p - p_star) + y_s * ell + c2
# where
# c0 = cd + y * cv
# c1 = - (cd + y * cv) * log(T0) + (1 + y) * log(p0) - compute_M(y) * s
# c2 = c1 - y * log(pc) - y * (cv - cl) + y * (cv - cl) * log(Tc)
# c3 = cd + y * cl
#
# Note that f(T) is increasing in T for reasonable values of p and T. We want to find
# where f(T) = 0.
#
c1 = - (cd + y * cv) * log(T0) + (1 + y) * log(p0) - compute_M(y) * s
c2 = c1 - y * log(pc) - y * (cv - cl) + y * (cv - cl) * log(Tc)
c3 = cd + y * cl
#
# Since the parcel is saturated we know that y_s < y, so
# p_star = p (y_s / (1 + y_s)) = p (1 - 1 / (1 + y_s)) < p (1 - 1 / (1 + y))
# so we have an upperbound on the value of p_star. Furthermore, since cv - cl < 0,
# p_star = pc exp((cv - cl) log(T / Tc) - Lc / T)
# > pc exp((cv - cl) log(Tcrit / Tc) - Lc / T)
# so
# -Lc / T < log(p_star / pc) + (cl - cv) log(Tcrit / Tc)
# Lc / T > -log(p_star / pc) + (cv - cl) log(Tcrit / Tc) [1]
# T < Lc / (-log(p_star / pc) + (cv - cl) log(Tcrit / Tc))
# T < Lc / (-log(p / pc) - log(y / (1 + y)) + (cv - cl) log(Tcrit / Tc))
# where we have used that the right side of [1] is positive for p_star smaller than 1e11 Pa
# or so.
#
c4 = (cv - cl) * log(Tcrit / Tc)
p_star_max = p * y / (1 + y)
Tmax = Lc / (c4 - log(p_star_max / pc))
Tmax = min(Tmax, Tcrit)
# Couldn't figure out a good way to lower bound it. 100 K is pretty safe.
Tmin = 100
def f(T):
p_star = compute_p_star(T)
if p_star >= p_star_max:
return T * 1.0e200
y_s = p_star / (p - p_star)
ell = cv - cl + Lc / T
return c3 * log(T) - log(p - p_star) + y_s * ell + c2
if f(Tmin) >= 0:
return Tmin
if f(Tmax) <= 0:
return Tmax
return opt.brentq(f, Tmin, Tmax)
def compute_Tv_sat(y, p, s):
T = compute_T_sat(y, p, s)
y_s = compute_y_s_from_T(p, T)
return T * (1 + y_s) / (1 + y * epsilon)
def compute_Tv_unsat(y, p, s):
return compute_T_unsat(y, p, s) * (1 + y) / (1 + y * epsilon)
def compute_Mh_unsat(y, p, s):
return (cd + y * cv) * compute_T_unsat(y, p, s)
def compute_Mh_sat(y, p, s):
T = compute_T_sat(y, p, s)
y_s = compute_y_s_from_T(p, T)
ell = compute_ell(T)
return (cd + y * cv + (y_s - y) * ell) * T
def compute_Mh_dp_unsat(y, p, s):
return (1 + y) * compute_T_unsat(y, p, s) / p
def compute_Mh_dp_sat(y, p, s):
T = compute_T_sat(y, p, s)
y_s = compute_y_s_from_T(p, T)
return (1 + y_s) * T / p
##############################
#
# User-friendly thermodynamic functions with user-friendly names
#
##############################
# w is kg / kg
def compute_w(y):
return y * epsilon
# y is mol / mol
def compute_y(w):
return w / epsilon
# kg / mol
def molecular_weight_water():
return Mw_
# kg / mol
def molecular_weight_dry_air():
return Md_
# kg / mol
def molecular_weight_moist_air(y):
return (Md_ + y * Mw_) / (1 + y)
# partial pressure of water vapor at the saturation point
# Pa
def saturation_vapor_pressure(T):
return p_star(T)
# unitless
def relative_humidity(y, p, T):
y_s = compute_y_s_from_T(p, T)
if y > y_s:
return 1
else:
return y / y_s
# J / mol
def latent_heat_condensation(T):
return compute_ell(T) * R * T
# True or False
def is_saturated(y, p, T):
return compute_issat_ypT(y, p, T)
# J / kg / K
def entropy(y, p, T):
if compute_issat_ypT(y, p, T):
return compute_Ms_sat(y, p, T) / compute_M(y)
else:
return compute_Ms_unsat(y, p, T) / compute_M(y)
# K
def temperature(y, p, s):
if compute_issat_yps(y, p, s):
return compute_T_sat(y, p, s)
else:
return compute_T_unsat(y, p, s)
# K
def virtual_temperature(y, p, s):
if compute_issat_yps(y, p, s):
return compute_Tv_sat(y, p, s)
else:
return compute_Tv_unsat(y, p, s)
# J / kg
def enthalpy(y, p, s):
if compute_issat_yps(y, p, s):
return compute_Mh_sat(y, p, s) / compute_M(y)
else:
return compute_Mh_unsat(y, p, s) / compute_M(y)
# J / kg / Pa = m^3 / kg, units of specific volume
def enthalpy_dp(y, p, s):
if compute_issat_yps(y, p, s):
return compute_Mh_dp_sat(y, p, s) / compute_M(y)
else:
return compute_Mh_dp_unsat(y, p, s) / compute_M(y)
# For a parcel moving from pold to pnew, given the old temperature,
# compute the new temperature
# K
def new_temperature(y, Told, pold, pnew):
return temperature(y, pnew, entropy(y, pold, Told))
# For a parcel moving from pold to pnew, given the old temperature,
# compute the change in enthalpy
# J / kg
def change_in_enthalpy(y, Told, pold, pnew):
s = entropy(y, Told, pold)
return enthalpy(y, pnew, s) - enthalpy(y, pold, s)
| estansifer/mape | src/thermo.py | Python | mit | 10,645 |
# coding:ascii
""" Example usage of DbC (design by contract)
In this example we show you how to use pre- and post-condition checkers
decorating the same function.
"""
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import DbC
DbC.ASSERTION_LEVEL = DbC.ASSERTION_ALL
# in this example we bring `pre` and `post` into our namespace
from DbC import pre, post
def check_pre(*args):
'Pre-condition checker.'
# must have an even number of args
assert ( len(args) & 1 ) == 0, 'Expected an even number of arguments'
# all numbers must be non-negative ints
assert all(i>=0 and isinstance(i,int) for i in args), \
'Numbers must be positive integers'
# all second numbers must be < 10
assert all(i<10 for i in args[1::2]), 'Numbers must be < 10'
def check_post(*args):
'Post-condition checker.'
# return value from decorated function is always the last positional
# parameter
rval = args[-1]
# simple check of the number of items in the return
assert 2 * len(rval) == len(args) - 1
# check units
units_out = [i%10 for i in rval]
units_in = [i for i in args[1:-1:2]]
assert units_out == units_in
# check tens
tens_out = [i//10 for i in rval]
tens_in = [i for i in args[0:-1:2]]
assert tens_out == tens_in
# It doesn't matter which order you include the decorators
@pre(check_pre)
@post(check_post)
def pairoff(*args):
'Make tens+units from pairs of numbers.'
it = iter(args)
return [10*a+b for a,b in zip(it,it)]
# Test data
print( pairoff(*range(8)) )
print( pairoff(4,2, 10,1) )
try: # odd number of args
pairoff(1,2,3,4,5)
except AssertionError as e:
print(e)
try: # unit >= 10
pairoff(4,2, 9,10)
except AssertionError as e:
print(e)
try: # negative
pairoff(4,2, -1,2)
except AssertionError as e:
print(e)
try: # non-integer
pairoff(1.25,0.6)
except AssertionError as e:
print(e)
| JazzyServices/jazzy | examples/DbC_methods_with_both_pre_and_post.py | Python | mit | 1,944 |
# Generated by Django 2.0.13 on 2019-06-22 18:48
import django.db.models.deletion
import django.utils.timezone
import django_fsm
from django.conf import settings
from django.db import migrations, models
import apps.core.models
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="Meeting",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"date_created",
apps.core.models.DateTimeCreatedField(
blank=True, default=django.utils.timezone.now, editable=False
),
),
(
"date_modified",
apps.core.models.DateTimeModifiedField(
blank=True, default=django.utils.timezone.now, editable=False
),
),
("format", models.CharField(blank=True, max_length=50)),
("message", models.TextField(blank=True)),
("datetime", models.DateTimeField()),
("state", django_fsm.FSMField(default="available", max_length=50)),
(
"cancelled_by",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
(
"mentor",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="mentors",
to=settings.AUTH_USER_MODEL,
),
),
(
"protege",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="proteges",
to=settings.AUTH_USER_MODEL,
),
),
],
options={"ordering": ("-datetime",)},
)
]
| SoPR/horas | apps/meetings/migrations/0001_initial.py | Python | mit | 2,640 |
from datetime import date
from openpyxl import load_workbook
if __name__ == '__main__':
wb = load_workbook('FixedCouponBond.xlsx')
ws = wb.active
# Take the input parameters
today = ws['C2'].value.date()
# OIS Data
ois_startdate = today
ois_maturities = []
ois_mktquotes = []
for cell in list(ws.iter_rows('B15:C44')):
ois_maturities.append(cell[0].value)
ois_mktquotes.append(cell[1].value)
# Credit Curve Data
ndps = []
ndpdates = []
for cell in list(ws.iter_rows('B6:C11')):
ndpdates.append(cell[0].value.date())
ndps.append(cell[1].value)
# Bond data
nominals = []
start_dates = []
end_dates = []
cpn_frequency = []
coupons = []
recovery_rates = []
for cell in list(ws.iter_rows('E5:J19')):
nominals.append(cell[0].value)
start_dates.append(cell[1].value.date())
end_dates.append(cell[2].value.date())
cpn_frequency.append(cell[3].value)
coupons.append(cell[4].value)
recovery_rates.append(cell[5].value)
# YOUR CODE HERE ....
# In the coupon calculation use 30e360 convention to compute the accrual period (i.e. tau)
# The result of your code must be a variables of type list named
# output_npv. The length of this list has to be the equal to the number of bonds
# i.e len(nominals) for example
# END OF YOUR CODE
# Write results
# A variable named output_results of type list, with the same length of output_dates, is expected.
# In case this is not present, a message is written
if 'output_npv' not in locals():
output_npv = ["Not Successful" for x in range(len(nominals))]
out_list = list(ws.iter_rows('K5:K19'))
for i in range(len(output_npv)):
out_list[i][0].value = output_npv[i]
# A new file with the results is created
wb.save("FixedCouponBond_output.xlsx") | gabberthomson/fm_finpy | fixedcouponbond_project.py | Python | mit | 1,921 |
'''
ÏÂÀý չʾÁËÒ»¸öʵÓÃС¹¤¾ß, Ëü¿ÉÒÔ°Ñ GIF ¸ñʽת»»Îª Python ½Å±¾, ±ãÓÚʹÓà Tkinter ¿â.
'''
import base64, sys
if not sys.argv[1:]:
print "Usage: gif2tk.py giffile >pyfile"
sys.exit(1)
data = open(sys.argv[1], "rb").read()
if data[:4] != "GIF8":
print sys.argv[1], "is not a GIF file"
sys.exit(1)
print '# generated from', sys.argv[1], 'by gif2tk.py'
print
print 'from Tkinter import PhotoImage'
print
print 'image = PhotoImage(data="""'
print base64.encodestring(data),
print '""")' | iamweilee/pylearn | base64-example-4.py | Python | mit | 513 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
########## boafi Pentest script
########## - Perform various pentests automatically and save reports for further study
########## - Features/TODOs: Ipv6,DHCP,DNS,NTP,exploits,mitm..
########## - Router bruteforce for easy guessable passwords
########## - Scan networks hosts and identify vulnerabilities
########## ...
### Author: Yessou Sami
### Project Boafi
## Dependencies: dsniff(arpspoof),paramiko(ssh bruteforce),iptables,scapy
import os,time,argparse,random,paramiko,socket,logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
from datetime import datetime
## Functions
def brute_pass(usr,passwd,ip,port):
print "Trying for "+usr+" - "+passwd
ssh=paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(ip,port,usr,passwd)
print "Password is: ",passwd
open("foundpass","a").write("IP: "+ip+" PORT: "+port+" USER: "+usr+" PASS: "+passwd)
except paramiko.AuthenticationException:
print("Bad Password - "+passwd)
ssh.close()
except socket.error:
print("Failed connection")
ssh.close()
def EnaLogging():
os.popen("iptables -I FORWARD -p all -j LOG --log-prefix 'GENERAL-LOG-'")
#Start Logging eve,ry connection to /var/log/messages
#Log also images on /tmp?
os.popen("iptables -I FORWARD -p all -m string --string 'jpg' --algo kmp -j LOG --log-prefix 'JPG-LOG-'")
os.popen("iptables -I FORWARD -p all -m string --string 'gif' --algo kmp -j LOG --log-prefix 'GIF-LOG-'")
os.popen("iptables -I FORWARD -p all -m string --string 'png' --algo kmp -j LOG --log-prefix 'PNG-LOG-'")
os.popen("iptables -I FORWARD -p all -m string --string 'mp4' --algo kmp -j LOG --log-prefix 'mp4-LOG-'")
#Log urls/web request
os.popen("iptables -I FORWARD -p tcp -m multiport --dports 80,443 -j LOG --log-prefix 'WWW-LOG-' ")
#Log DNS
os.popen("iptables -I FORWARD -p udp --dport 53 -j LOG --log-prefix 'DNS-LOG-'")
#Log credentials HTTP
os.popen("iptables -I FORWARD -p all -m string --string 'pass' --algo kmp -j LOG --log-prefix 'PASSWORD-LOG-'")
os.popen("iptables -I FORWARD -p all -m string --string 'user' --algo kmp -j LOG --log-prefix 'USERNAME-LOG-'")
###
parser = argparse.ArgumentParser()
parser.add_argument('-timeout', action='store', dest='timeout', default="none",
help='Define given seconds before the attack timeouts (mitm,scan,stress) if not specified will run until is killed')
parser.add_argument('-RA', action='store', dest='ipv6ra', default=False,
help='Flood ipv6 router advertisements for given minutes')
parser.add_argument('-file', action='store', dest='output', default=False,
help='File output for scans')
parser.add_argument('-scan', action='store', dest='scan', default=False,
help='Scan the given network address or host')
##ArpScan still in betatest.. need to fix scapy responses
parser.add_argument('--arpScan', action='store_true', dest='arpscan', default=False,
help='Arpscan to scan fast on LAN')
parser.add_argument('--syn', action='store_true', dest='syn', default=False,
help='SYN Scan enabled')
parser.add_argument('--service', action='store_true', dest='service', default=False,
help='Service Version detection enabled')
parser.add_argument('-brute', action='store', dest='brute', default="none",
help='Bruteforce SSH of given ip... example : -brute file-192.168.1.254:22')
parser.add_argument('-mitm', action='store', dest='mitm', default="none",
help='Perform MITM Attack on target')
parser.add_argument('-mitmAll', action='store', dest='mitmall', default="none",
help='Perform MITM Attack on all hosts')
parser.add_argument('-stop-mitm', action='store_true', dest='stopmitm', default=False,
help='Stop any Running MITM Attack')
parser.add_argument('-denyTcp', action='store', dest='denytcp', default="none",
help='Deny tcp connections of given host')
parser.add_argument('--dg', action='store', dest='dg', default="none",
help='Perform MITM Attack with given Default Gateway')
parser.add_argument('-craft', action='store', dest='packetcraft', default=False,
help='Enable Packet Crafting.. Example: -craft IP-TCP-DST192.168.1.1-SRC192.168.1.10-DPORT80')
parser.add_argument('-stress', action='store', dest='stress', default="none",
help='Perform Stress Testing on LAN.. Modes: DHCPv4-50,DHCPv6')
results = parser.parse_args()
### Functions
def httpflood(target):
ip=target
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, 80))
s.send("""GET /?="""+str(random.randrange(9999999))+""" HTTP/1.1\r\n
Connection: Keep-Alive """)
print """GET /"""+str(random.randrange(9999999))+""" HTTP/1.1\r\n
Connection: Keep-Alive """
except ValueError:
print "Host seems down or some connection error trying again..."
##################
if not(results.output):
output=str(time.time())
else:
output=results.output
syn=""
scantype="-sn" #basic ping scan
if not(results.timeout=="none"):
timeout="timeout "+results.timeout+"s "
print "\n\nTimeout set for seconds:"+results.timeout
else:
timeout=""
if(results.scan):
ipaddr=str(results.scan)
if(results.arpscan): ##BETA TEST
res,unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=ipaddr))
output=str(res.summary( lambda (s,r): r.sprintf("%Ether.src% %ARP.psrc%")))
file=open("arpscan.txt","a")
print output
file.write(output)
file.close()
else:
print ipaddr
if(results.syn):
scantype="-sS -O" #syn and
if(results.service):
scantype=scantype+" -sV"
scancmd=timeout+"sudo nmap "+scantype+" -oX "+output+" "+ipaddr #writes xml output so we can convert it into html
print scancmd
print os.popen(scancmd).read() #ping scan to know online hosts
if(results.ipv6ra):
minutes=results.ipv6ra
print "running for minutes: "+minutes
#run ipv6 RA flooding for N minutes
i=0
while (i <= minutes):
print "Firing RAs everywhere"
a = IPv6()
a.dst = "ff02::1" #IPv6 Destination "Everyone" Multicast (broadcast)
a.display()
b = ICMPv6ND_RA()
b.display()
c = ICMPv6NDOptSrcLLAddr()
c.lladdr = "00:50:56:24:3b:c0" #MAC
c.display()
d = ICMPv6NDOptMTU()
d.display()
e = ICMPv6NDOptPrefixInfo()
e.prefixlen = 64
randomhex=hex(random.randint(0, 16777215))[2:].upper()
prefix=randomhex[:4]
e.prefix = prefix+"::" #Global Prefix
e.display()
send(a/b/c/d/e) # Send the packet
print "Sending IPv6 RA Packet :)"
time.sleep(1)
i=i+1
print i
if not(results.denytcp=="none"): #Works if you are the gateway or during MITM
target=results.denytcp
os.popen("nohup "+timeout+"tcpkill host "+target+" >/dev/null 2>&1 &")
#deny tcp traffic
if not(results.mitmall=="none"): #Most efficent way to arpspoof subnet
ipnet=results.mitmall
iplist=os.popen("nmap -sP "+ipnet+" | grep 'Nmap scan' | awk '{ print $5; }'").read()
iplist=iplist.split()
dgip=os.popen("ip route show | grep 'default' | awk '{print $3}' ").read()
dgip=dgip.split()[0]
print "Spoofing "+dgip+"\n\n"
print "Targets: \n"
for ip in iplist:
print ip
os.popen("nohup "+timeout+"arpspoof -t "+ip+" "+dgip+" >/dev/null 2>&1 &")
os.popen("nohup "+timeout+"urlsnarf >> visitedsites >/dev/null 2>&1 &")
EnaLogging() # Enable iptables-logging
if not(results.mitm=="none"):
print "im in"
target=results.mitm
if(results.dg=="none"): #Searches for gateway
dg=os.popen("ip route show | grep 'default' | awk '{print $3}' ").read()
dg=dg.split()[0]
print dg
else:
dg=results.dg
#Automatically searches for gateway and arpspoof all hosts
os.popen("nohup "+timeout+"arpspoof -t "+target+" "+dg+" >/dev/null 2>&1 &")
os.popen("nohup "+timeout+"urlsnarf >> visitedsites &")
print "Started ARP Spoof and URL Logging"
#Start ARP Spoofing with given arguments or calculated ones
EnaLogging() # Enable iptables-logging
print "Added temp firewall rules to log MITM traffic"
if(results.packetcraft): #Packet Crafting with scapy
########### PACKET CRAFTING EXAMPLE TCP-DST192.168.1.1-SRC192.168.1.10
########### ./boafiPenTest.py -craft TCP-DST192.168.1.1-SRC192.168.1.10-DPORT80-5
craft=(results.packetcraft).split("-")
if("TCP" in craft[0]):
a=IP()/TCP()
elif("UDP" in craft[0]):
a=IP()/UDP()
if("DST" in craft[1]):
ipdst=craft[1].replace("DST","")
a.dst=ipdst
if("SRC" in craft[2]):
ipsrc=craft[2].replace("SRC","")
a.src=ipsrc
if("DPORT" in craft[3]):
dport=craft[3].replace("DPORT","")
a.dport=dport
n=craft[4] ##N° of packets
i=0
while(i<=n):
i=i+1
a.display()
send(a)
print "Sent packet"
if not(results.stress=="none"):
try: #if it can
rawstring=results.stress.split("-")
mode=rawstring[0]
except:
print "Can't parse your command"
print "\nusing default DHCPv4 stress attack"
mode="DHCPv4"
count=20
if("DHCPv4" in mode): # DHCPv4-50
count=int(rawstring[1])
iface = "eth0"
unique_hexdigits = str.encode("".join(set(string.hexdigits.lower())))
print unique_hexdigits
packet = (Ether(dst="ff:ff:ff:ff:ff:ff")/
IP(src="0.0.0.0", dst="255.255.255.255")/
UDP(sport=68, dport=67)/
BOOTP(chaddr=RandString(12, unique_hexdigits))/
DHCP(options=[("message-type", "discover"), "end"]))
print "Sending dhcp requests"
sendp(packet,iface=iface,count=count)
if("HTTP" in mode): #HTTP-192.168.1.1-500
ip=rawstring[1]
count=int(rawstring[2])
i=0
while(i<=count):
i=i+1
httpflood(ip)
print "Finished flooding!"
if not(results.brute=="none"): # file-192.168.1.254:22 # file example : usr:pass format!!
cmd=results.brute ### Parsing strings to avoid errors
file=cmd.split("-")[0]
ip=cmd.split("-")[1]
ipparsed=ip.split(":")
ip=ipparsed[0].split()[0]
port=int(ipparsed[1].split()[0]) #remove spaces and then int
f=open(file,"r")
print "Start bruteforcing "+ip+" with list: "+file
for line in f:
usr=line.split(":")[0].split()[0] # remove spaces if any
passwd=line.split(":")[1].split()[0] #like above
brute_pass(usr,passwd,ip,port)
if(results.stopmitm): #Stop MITM...hosts should re-generate ARP automatically
os.popen("killall arpspoof")
os.popen("killall tcpkill")
# TODO
## mitm --> interact with SDS to get realtime data for visualization
## metasploit attacks?
## if connected to internet send info on "cloud"(site db)
## save data on xml,csv for webGUI visualization
| fnzv/Boafi | BoafiPenTest.py | Python | mit | 12,450 |
from setuptools import setup
setup(name='PyRankinity',
version='0.1',
description='Rankinity API Wrapper - See http://my.rankinity.com/api.en',
author='UpCounsel',
author_email='[email protected]',
url='https://www.github.com/upcounsel/pyrankinity',
packages=['pyrankinity'],
install_requires=[
'requests',
],
license='MIT'
) | upcounsel/PyRankinity | setup.py | Python | mit | 396 |
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^external/', include('external.urls')),
url(r'^dev/', include('dev.urls')),
]
| SequencingDOTcom/App-Market-API-integration | python/bootstrap/urls.py | Python | mit | 231 |
from collections import defaultdict
import re
import sys
from stop_words import STOP_WORD_SET
from collections import Counter
PUNCTUATION_RE = re.compile("[%s]" % re.escape(
"""!"&()*+,-\.\/:;<=>?\[\\\]^`\{|\}~]+"""))
DISCARD_RE = re.compile("^('{|`|git@|@|https?:)")
def remove_stop_words(word_seq, stop_words):
"""Sanitize using intersection and list.remove()"""
return [w for w in word_seq if w and w not in stop_words]
def remove_punctuation(word_seq):
def remove_punc_inner(word):
return PUNCTUATION_RE.sub("", word)
removed = map(remove_punc_inner, word_seq)
# Remove emptry strings
return [w for w in removed if w]
def filter_discards(word_seq):
def discard(word):
return not DISCARD_RE.match(word)
return filter(discard, word_seq)
def count_words_from_seq(word_seq):
word_count = defaultdict(int)
for word in word_seq:
word_count[word] += 1
return word_count
def keep_top_n_words(word_counts, n):
return dict(Counter(word_counts).most_common(n))
def count_words(text_blob):
word_seq = re.split('[=|\s]+', text_blob.lower())
print ' Splitting blob'
word_seq = filter_discards(word_seq)
print ' Filtering discards'
word_seq = remove_punctuation(word_seq)
print ' Removing punctuation'
word_seq = remove_stop_words(word_seq, STOP_WORD_SET)
print ' Removing stop words'
word_counts = count_words_from_seq(word_seq)
print ' Counting words'
top_n = keep_top_n_words(word_counts, 100)
print ' Filtering to top 100 words'
return top_n
if __name__ == '__main__':
print count_words(sys.stdin.read())
| sprin/heroku-tut | worker/word_count.py | Python | mit | 1,663 |
# Generated by Django 2.0.4 on 2018-04-24 21:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0006_auto_20180423_1629'),
]
operations = [
migrations.AddField(
model_name='contact',
name='recruiter',
field=models.BooleanField(default=False),
),
]
| RobSpectre/garfield | garfield/contacts/migrations/0007_contact_recruiter.py | Python | mit | 392 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import socket
import traceback
from lineup.datastructures import Queue
class Node(object):
def __init__(self, *args, **kw):
self.initialize(*args, **kw)
def initialize(self, *args, **kw):
pass
@property
def id(self):
return '|'.join([self.get_hostname(), str(os.getpid())])
@property
def taxonomy(self):
class_name = self.__class__.__name__
module_name = self.__class__.__module__
return '.'.join([module_name, class_name])
def get_name(self):
return getattr(self, 'name', None) or self.taxonomy
def get_hostname(self):
return socket.gethostname()
def make_worker(self, Worker, index):
return Worker(self, self.input, self.output)
def start(self):
for worker in self.workers:
worker.start()
def feed(self, item):
self.input.put(item)
def enqueue_error(self, source_class, instructions, exception):
print exception, source_class, instructions
def wait_and_get_work(self):
return self.output.get()
@property
def running(self):
return all([w.alive for w in self.workers])
def are_running(self):
if self.running:
return True
self.start()
return self.running
class Pipeline(Node):
def initialize(self):
self.queues = self.get_queues(*args, **kw)
self.workers = [self.make_worker(Worker, index) for index, Worker in enumerate(steps)]
@property
def input(self):
return self.queues[0]
@property
def output(self):
return self.queues[-1]
def get_queues(self):
steps = getattr(self, 'steps', None) or []
return [Queue() for _ in steps] + [Queue()]
def make_worker(self, Worker, index):
return Worker(self, self.queues[index], self.queues[index + 1])
| pombredanne/lineup | lineup/framework.py | Python | mit | 1,964 |
# coding: utf-8
from flask import Blueprint
user = Blueprint('user', __name__)
from . import views
| spark8103/ops17 | app/user/__init__.py | Python | mit | 101 |
import datetime
import os
import shutil
import time
from files_by_date.utils.logging_wrapper import get_logger, log_message
from files_by_date.validators.argument_validator import ArgumentValidator
logger = get_logger(name='files_service')
class FilesService:
def __init__(self):
raise NotImplementedError
@classmethod
def gather_files(cls, parent_directory, files):
for dir_name, subdir_list, file_list in os.walk(parent_directory):
if file_list:
files.extend(
['{dir_name}{os_sep}{file_name}'.format(dir_name=dir_name, os_sep=os.sep, file_name=file) for file
in file_list])
# [f'{dir_name}{os.sep}{file}' for file in file_list] # 3.6
for subdir in subdir_list:
files = cls.gather_files(subdir, files)
return files
@classmethod
def group_files_by_modified_date(cls, files):
grouped_files = {}
for file in files:
directory_tag = cls._get_directory_tag_for_file(file)
file_group = grouped_files.get(directory_tag, list())
file_group.append(file)
grouped_files[directory_tag] = file_group
return grouped_files
@classmethod
def copy_files(cls, file_groups, target_dir, force_overwrite):
if not os.path.exists(target_dir):
os.makedirs(target_dir) # TODO: not covered
total_count = Count()
for group in file_groups:
group_count = Count()
# group_dir = f'{target_dir}{os.sep}{group}' # 3.6
group_dir = '{target_dir}{os_sep}{group}'.format(target_dir=target_dir, os_sep=os.sep, group=group)
ArgumentValidator.validate_target_dir(group_dir)
if not os.path.exists(group_dir):
os.makedirs(group_dir)
# log_message(f'Created directory: {group_dir}') # 3.6
log_message('Created directory: {group_dir}'.format(group_dir=group_dir))
# log_message(f'Copying {len(file_groups[group])} files to {group_dir}') # 3.6
log_message('Moving {group_size} files to {group_dir}'.format(group_size=len(file_groups[group]),
group_dir=group_dir))
for file in file_groups[group]:
# file_path = f'{group_dir}{os.sep}{os.path.basename(file)}' # 3.6
file_path = '{group_dir}{os_sep}{file_name}'.format(group_dir=group_dir, os_sep=os.sep,
file_name=os.path.basename(file))
if force_overwrite and os.path.exists(file_path):
os.remove(file_path)
if not os.path.exists(file_path):
shutil.copy2(file, group_dir)
group_count.add_copied(count=1)
else:
group_count.add_skipped(count=1) # TODO: not covered
total_count.add_files(count=len(file_groups[group]))
total_count.add_copied(count=group_count.copied)
total_count.add_skipped(count=group_count.skipped)
# log_message(f'Copied {group_count.copied}, skipped {group_count.skipped}') # 3.6
log_message('Copied {local_copied_count}, skipped {local_skipped_count}'.format(
local_copied_count=group_count.copied, local_skipped_count=group_count.skipped))
log_message(
# f'Total files count {total_count.files}, total copied {total_count.copied}, total skipped {total_count.skipped}') # 3.6
'Total files count {total_files_count}, total copied {total_copied_count}, total skipped {total_skipped_count}'.format(
total_files_count=total_count.files,
total_copied_count=total_count.copied,
total_skipped_count=total_count.skipped))
return total_count
@staticmethod
def _get_directory_tag_for_file(file):
return datetime.datetime.strptime(time.ctime(os.path.getmtime(file)), "%a %b %d %H:%M:%S %Y").strftime('%Y%m')
class Count:
def __init__(self, *, files=0, copied=0, skipped=0):
self.files = files
self.copied = copied
self.skipped = skipped
def __str__(self):
# return f'files={self.files}, copied={self.copied}, skipped={self.skipped}' # 3.6
return 'files={files}, copied={copied}, skipped={skipped}'.format(files=self.files, copied=self.copied,
skipped=self.skipped)
def add_files(self, *, count=1):
self.files += count
def add_copied(self, *, count=0):
self.copied += count
def add_skipped(self, *, count=0):
self.skipped += count
| DEV3L/python-files-by-date | files_by_date/service/files_service.py | Python | mit | 4,825 |
"""
Example taken from http://matplotlib.org/1.5.0/examples/showcase/xkcd.html
"""
import matplotlib.pyplot as plt
import numpy as np
with plt.xkcd():
# Based on "The Data So Far" from XKCD by Randall Monroe
# http://xkcd.com/373/
index = [0, 1]
data = [0, 100]
labels = ['CONFIRMED BY EXPERIMENT', 'REFUTED BY EXPERIMENT']
fig = plt.figure()
ax = fig.add_axes((0.1, 0.2, 0.8, 0.7))
ax.bar(index, data, 0.25)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks([0, 1])
ax.set_xlim([-0.5, 1.5])
ax.set_ylim([0, 110])
ax.set_xticklabels(labels)
plt.yticks([])
plt.title("CLAIMS OF SUPERNATURAL POWERS")
fig.text(
0.5, 0.05,
'"The Data So Far" from xkcd by Randall Monroe',
ha='center')
plt.show()
| apdavison/space-station-transylvania | xkcd.py | Python | mit | 870 |
#!/usr/bin/env python3
# encoding: utf8
# license: ISC (MIT/BSD compatible) https://choosealicense.com/licenses/isc/
# This library is principally created for python 3. However python 2 support may be doable and is welcomed.
"""Use python in a more object oriented, saner and shorter way.
# WARNING
First: A word of warning. This library is an experiment. It is based on a wrapper that aggressively
wraps anything it comes in contact with and tries to stay invisible from then on (apart from adding methods).
However this means that this library is probably quite unsuitable for use in bigger projects. Why?
Because the wrapper will spread in your runtime image like a virus, 'infecting' more and more objects
causing strange side effects. That being said, this library is perfect for short scripts and especially
'one of' shell commands. Use it's power wisely!
# Introduction
This library is heavily inspired by jQuery and underscore / lodash in the javascript world. Or you
could say that it is inspired by SmallTalk and in extension Ruby and how they deal with collections
and how to work with them.
In JS the problem is that the standard library sucks very badly and is missing many of the
most important convenience methods. Python is better in this regard, in that it has (almost) all
those methods available somewhere. BUT: quite a lot of them are available on the wrong object or
are free methods where they really should be methods. Examples: `str.join` really should be on iterable.
`map`, `zip`, `filter` should really be on iterable. Part of this problem comes from the design
choice of the python language, to provide a strange kind of minimal duck typing interface with the __*__
methods that the free methods like `map`, `zip`, `filter` then use. This however has the unfortunate
side effect in that writing python code using these methods often requires the reader to mentally skip
back and forth in a line to parse what it does. While this is not too bad for simple usage of these
functions, it becomes a nightmare if longer statements are built up from them.
Don't believe me? Try to parse this simple example as fast as you can:
>>> map(print, map(str.upper, sys.stdin.read().split('\n')))
How many backtrackings did you have to do? To me this code means, finding out that it starts in the
middle at `sys.stdin.read().split('\n')`, then I have to backtrack to `map(str.upper, …)`, then to
`map(print, …)`. Then while writing, I have to make sure that the number of parens at the end are
correct, which is something I usually have to use editor support for as it's quite hard to accurately
identify where the matching paren is.
The problem with this? This is hard! Hard to write, as it doesn't follow the way I think about this
statement. Literally, this means I usually write these statements from the inside out and wrap them
using my editor as I write them. As demonstrated above, it's also hard to read - requireing quite a
bit of backtracking.
So, what's the problem you say? Just don't do it, it's not pythonic you say! Well, Python has two
main workarounds available for this mess. One is to use list comprehension / generator
statements like this:
>>> [print(line.upper()) for line in sys.stdin.read().split('\n')]
This is clearly better. Now you only have to skip back and forth once instead of twice Yay! Win!
To me that is not a good workaround. Sure it's nice to easily be able to create generators this
way, but it still requires of me to find where the statement starts and to backtrack to the beginning
to see what is happening. Oh, but they support filtering too!
>>> [print(line.upper()) for line in sys.stdin.read().split('\n') if line.upper().startswith('FNORD')]
Well, this is little better. For one thing, this doesn't solve the backtracking problem, but more
importantly, if the filtering has to be done on the processed version (here artificially on
`line.upper().startswith()`) then the operation has to be applied twice - which sucks because you have to write it twice, but also because it is computed twice.
The solution? Nest them!
[print(line) for line in (line.upper() for line in sys.stdin.read().split('\n')) if line.startswith('FNORD')]
Do you start seing the problem?
Compare it to this:
>>> for line in sys.stdin.read().split('\n'):
>>> uppercased = line.upper()
>>> if uppercased.startswith('FNORD'):
>>> print(uppercased)
Almost all my complaints are gone. It reads and writes almost completely in order it is computed.
Easy to read, easy to write - but one drawback. It's not an expression - it's a bunch of statements.
Which means that it's not easily combinable and abstractable with higher order methods or generators.
Also (to complain on a high level), you had to invent two variable names `line` and `uppercased`.
While that is not bad, especially if they explain what is going on - in this case it's not really
helping _and_ (drummroll) it requires some backtracking and buildup of mental state to read. Oh well.
Of course you can use explaining variables to untangle the mess of using higher order functions too:
Consider this code:
>>> cross_product_of_dependency_labels = \
>>> set(map(frozenset, itertools.product(*map(attrgetter('_labels'), dependencies))))
That certainly is hard to read (and write). Pulling out explaining variables, makes it better. Like so:
>>> labels = map(attrgetter('_labels'), dependencies)
>>> cross_product_of_dependency_labels = set(map(frozenset, itertools.product(*labels)))
Better, but still hard to read. Sure, those explaining variables are nice and sometimes
essential to understand the code. - but it does take up space in lines, and space in my head
while parsing this code. The question would be - is this really easier to read than something
like this?
>>> cross_product_of_dependency_labels = _(dependencies) \
>>> .map(_.each._labels) \
>>> .star_call(itertools.product) \
>>> .map(frozenset) \
>>> .call(set)
Sure you are not used to this at first, but consider the advantages. The intermediate variable
names are abstracted away - the data flows through the methods completely naturally. No jumping
back and forth to parse this at all. It just reads and writes exactly in the order it is computed.
What I think that I want to accomplish, I can write down directly in order. Oh, and I don't have
to keep track of extra closing parantheses at the end of the expression.
So what is the essence of all of this?
Python is an object oriented language - but it doesn't really use what object orientation has tought
us about how we can work with collections and higher order methods in the languages that came before it
(especially SmallTalk, but more recently also Ruby). Why can't I make those beautiful fluent call chains
that SmallTalk could do 20 years ago in Python today?
Well, now you can.
# Features
To enable this style of coding this library has some features that might not be so obvious at first.
## Aggressive (specialized) wrapping
The most important entry point for this library is the function `wrap` or the perhaps preferrable and
shorter alias `_`:
>>> _(something)
>>> # or
>>> wrap(something)
`wrap` is a factory function that returns a subclass of Wrapper, the basic and main object of this library.
This does two things: First it ensures that every attribute access, item access or method call off of
the wrapped object will also return a wrapped object. This means that once you wrap something, unless
you unwrap it explicitly via `.unwrap` or `._` it stays wrapped - pretty much no matter what you do
with it. The second thing this does is that it returns a subclass of Wrapper that has a specialized set
of methods depending on the type of what is wrapped. I envision this to expand in the future, but right
now the most usefull wrappers are: Iterable, where we add all the python collection functions (map,
filter, zip, reduce, …) as well as a good batch of methods from itertools and a few extras for good
measure. Callable, where we add `.curry()` and `.compose()` and Text, where most of the regex methods
are added.
## Imports as expressions
Import statements are (ahem) statements in python. This is fine, but can be really annoying at times.
Consider this shell text filter written in python:
$ curl -sL 'https://www.iblocklist.com/lists.php' | egrep -A1 'star_[345]' | python3 -c "import sys, re; from xml.sax.saxutils import unescape; print('\n'.join(map(unescape, re.findall(r'value=\'(.*)\'', sys.stdin.read()))))"
Sure it has all the backtracking problems I talked about already. Using fluent this would already be much better.
$ curl -sL 'https://www.iblocklist.com/lists.php' \
| egrep -A1 'star_[345]' \
| python3 -c "from fluent import *; import sys, re; from xml.sax.saxutils import unescape; _(sys.stdin.read()).findall(r'value=\'(.*)\'').map(unescape).map(print)"
But this still leaves the problem that it has to start with this fluff
`from fluent import *; import sys, re; from xml.sax.saxutils import unescape;`
This doesn't really do anything to make it easier to read and write and is almost half the characters
it took to achieve the wanted effect. Wouldn't it be nice if you could have
some kind of object (lets call it `lib` for lack of a better word), where you could just access the whole
python library via attribute access and let it's machinery handle importing behind the scenes?
Like this:
$ curl -sL 'https://www.iblocklist.com/lists.php' | egrep -A1 'star_[345]' | python3 -m fluent "lib.sys.stdin.read().findall(r'value=\'(.*)\'').map(lib.xml.sax.saxutils.unescape).map(print)"
How's that for reading and writing if all the imports are inlined? Oh, and of course everything imported
via `lib` comes already pre-wrapped, so your code becomes even shorter.
More formally:The `lib` object, which is a wrapper around the python import machinery, allows to import
anything that is accessible by import to be imported as an expression for inline use.
So instead of
>>> import sys
>>> input = sys.stdin.read()
You can do
>>> input = lib.sys.stdin.read()
As a bonus, everything imported via lib is already pre-wrapped, so you can chain off of it immediately.
`lib` is also available on `_` which is itself just an alias for `wrap`. This is usefull if you want
to import fewer symbols from fluent or want to import the library under a custom name
>>> from fluent import _ # alias for wrap
>>> _.lib.sys.stdin.split('\n').map(str.upper).map(print)
>>> from fluent import _ as fluent # alias for wrap
>>> fluent.lib.sys.stdin.split('\n').map(str.upper).map(print)
Not sure if that is so super usefull though, as you could also just do:
>>> import fluent
>>> fluent.lib.sys.stdin.split('\n').map(str.upper).map(print)
## Generating lambda's from expressions
`lambda` is great - it's often exactly what the doctor ordered. But it can also be a bit annyoing
if you have to write it down everytime you just want to get an attribute or call a method on every
object in a collection.
>>> _([dict(fnord='foo'), dict(fnord='bar')]).map(lambda each: each['fnord']) == ['foo', 'bar]
>>> class Foo(object):
>>> attr = 'attrvalue'
>>> def method(self, arg): return 'method+'+arg
>>> _([Foo(), Foo()]).map(lambda each: each.attr) == ['attrvalue', 'attrvalue']
>>> _([Foo(), Foo()]).map(lambda each: each.method('arg')) == ['method+arg', 'method+arg']
Sure it works, but wouldn't it be nice if we could save a variable and do this a bit shorter?
I mean, python does have attrgetter, itemgetter and methodcaller - they are just a bit
inconvenient to use:
>>> from operator import itemgetter, attrgetter, methodcaller
>>> _([dict(fnord='foo'), dict(fnord='bar')]).map(itemgetter('fnord')) == ['foo', 'bar]
>>> class Foo(object):
>>> attr = 'attrvalue'
>>> def method(self, arg): return 'method+'+arg
>>> _([Foo(), Foo()]).map(attrgetter(attr)) == ['attrvalue', 'attrvalue']
>>> _([Foo(), Foo()]).map(methodcaller(method, 'arg')) == ['method+arg', 'method+arg']
So there is an object `_.each` that just exposes a bit of syntactic shugar for these
(and a few operators). Basically, everything you do to `_.each` it will do to each object
in the collection:
>>> _([1,2,3]).map(_.each + 3) == [4,5,6]
>>> _([1,2,3]).filter(_.each < 3) == [1,2]
>>> _([1,2,3]).map(- _.each) == [-1,-2,-3]
>>> _([dict(fnord='foo'), dict(fnord='bar')]).map(_.each['fnord']) == ['foo', 'bar]
>>> class Foo(object):
>>> attr = 'attrvalue'
>>> def method(self, arg): return 'method+'+arg
>>> _([Foo(), Foo()]).map(_.each.attr) == ['attrvalue', 'attrvalue']
>>> _([Foo(), Foo()]).map(_.each.call.method('arg')) == ['method+arg', 'method+arg']
Yeah I know `_.each.call.*()` is crude - but I haven't found a good syntax to get rid of
the .call yet. Feedback welcome.
## Chaining off of methods that return None
A major nuissance for using fluent interfaces are methods that return None. Now this is mostly
a feature of python, where methods that don't have a return statement return None.
While this is way better than e.g. Ruby where that will just return the value of the last
expression - which means objects constantly leak internals, it is very annoying if you want to
chain off of one of these method calls. Fear not though, fluent has you covered. :)
Fluent wrapped objects will behave more like SmallTalk objects, in that they pretend
that every method that returns None actually returned self - thus allowing chaining. So this just works:
>>> _([3,2,1]).sort().reverse().call(print)
Even though both sort() and reverse() return None
Of course, if you unwrap at any point with `.unwrap` or `._` you will get the true return value of `None`.
# Famous Last Words
This library tries to do a little of what underscore does for javascript. Just provide the missing glue to make the standard library nicer and easier to use - especially for short oneliners or short script. Have fun!
While I know that this is not something you want to use in big projects (see warning at the beginning)
I envision this to be very usefull in quick python scripts and shell one liner filters, where python was previously just that little bit too hard to use, that 'overflowed the barrel' and prevented you from doing so.
"""
"""Future Ideas:
# TODO consider numeric type to do stuff like wrap(3).times(...)
or wrap([1,2,3]).call(len).times(yank_me)
Rework _.each.call.foo(bar) so 'call' is no longer a used-up symbol on each.
Also _.each.call.method(...) has a somewhat different meaning as the .call method on callable
could _.each.method(_, ...) work when auto currying is enabled?
Rework fluent so explicit unwrapping is required to do anythign with wrapped objects.
(Basically calling ._ at the end)
The idea here is that this would likely enable the library to be used in big / bigger
projects as it looses it's virus like qualities.
* Maybe this is best done as a separate import?
* This would also be a chance to consider always using the iterator versions of
all the collection methods under their original name and automatically unpacking
/ triggering the iteration on ._? Not sure that's a great idea, as getting the
iterator to abstract over it is a) great and b) triggering the iteration is also
hard see e.g. groupby.
* This would require carefull analysis where wrapped objects are handed out as arguments
to called methods e.g. .tee(). Also requires __repr__ and __str__ implementations that
make sense.
Roundable (for all numeric needs?)
round, times, repeat, if_true, if_false, else_
if_true, etc. are pretty much like conditional versions of .tee() I guess.
.if_true(function_to_call).else_(other_function_to_call)
"""
# REFACT rename wrap -> fluent? perhaps as an alias?
__all__ = [
'wrap', # generic wrapper factory that returns the appropriate subclass in this package according to what is wrapped
'_', # _ is an alias for wrap
'lib', # wrapper for python import machinery, access every importable package / function directly on this via attribute access
]
import typing
import re
import math
import types
import functools
import itertools
import operator
import collections.abc
def wrap(wrapped, *, previous=None, chain=None):
"""Factory method, wraps anything and returns the appropriate Wrapper subclass.
This is the main entry point into the fluent wonderland. Wrap something and
everything you call off of that will stay wrapped in the apropriate wrappers.
"""
if isinstance(wrapped, Wrapper):
return wrapped
by_type = (
(types.ModuleType, Module),
(typing.Text, Text),
(typing.Mapping, Mapping),
(typing.AbstractSet, Set),
(typing.Iterable, Iterable),
(typing.Callable, Callable),
)
if wrapped is None and chain is None and previous is not None:
chain = previous.chain
decider = wrapped
if wrapped is None and chain is not None:
decider = chain
for clazz, wrapper in by_type:
if isinstance(decider, clazz):
return wrapper(wrapped, previous=previous, chain=chain)
return Wrapper(wrapped, previous=previous, chain=chain)
# sadly _ is pretty much the only valid python identifier that is sombolic and easy to type. Unicode would also be a candidate, but hard to type $, § like in js cannot be used
_ = wrap
def wrapped(wrapped_function, additional_result_wrapper=None, self_index=0):
"""
Using these decorators will take care of unwrapping and rewrapping the target object.
Thus all following code is written as if the methods live on the wrapped object
Also perfect to adapt free functions as instance methods.
"""
@functools.wraps(wrapped_function)
def wrapper(self, *args, **kwargs):
result = wrapped_function(*args[0:self_index], self.chain, *args[self_index:], **kwargs)
if callable(additional_result_wrapper):
result = additional_result_wrapper(result)
return wrap(result, previous=self)
return wrapper
def unwrapped(wrapped_function):
"""Like wrapped(), but doesn't wrap the result.
Use this to adapt free functions that should not return a wrapped value"""
@functools.wraps(wrapped_function)
def forwarder(self, *args, **kwargs):
return wrapped_function(self.chain, *args, **kwargs)
return forwarder
def wrapped_forward(wrapped_function, additional_result_wrapper=None, self_index=1):
"""Forwards a call to a different object
This makes its method available on the wrapper.
This specifically models the case where the method forwarded to,
takes the current object as its first argument.
This also deals nicely with methods that just live on the wrong object.
"""
return wrapped(wrapped_function, additional_result_wrapper=additional_result_wrapper, self_index=self_index)
def tupleize(wrapped_function):
""""Wrap the returned obect in a tuple to force execution of iterators.
Especially usefull to de-iterate methods / function
"""
@functools.wraps(wrapped_function)
def wrapper(self, *args, **kwargs):
return wrap(tuple(wrapped_function(self, *args, **kwargs)), previous=self)
return wrapper
class Wrapper(object):
"""Universal wrapper.
This class ensures that all function calls and attribute accesses
that can be caught in python will be wrapped with the wrapper again.
This ensures that the fluent interface will persist and everything
that is returned is itself able to be chaned from again.
Using this wrapper changes the behaviour of python soure code in quite a big way.
a) If you wrap something, if you want to get at the real object from any
function call or attribute access off of that object, you will have to
explicitly unwrap it.
b) All returned objects will be enhanced by behaviour that matches the
wrapped type. I.e. iterables will gain the collection interface,
mappings will gain the mapping interface, strings will gain the
string interface, etc.
"""
def __init__(self, wrapped, *, previous, chain):
assert wrapped is not None or chain is not None, 'Cannot chain off of None'
self.__wrapped = wrapped
self.__previous = previous
self.__chain = chain
# Proxied methods
__getattr__ = wrapped(getattr)
__getitem__ = wrapped(operator.getitem)
def __str__(self):
return "fluent.wrap(%s)" % self.chain
def __repr__(self):
return "fluent.wrap(%r)" % self.chain
# REFACT consider wether I want to support all other operators too or wether explicit
# unwrapping is actually a better thing
__eq__ = unwrapped(operator.eq)
# Breakouts
@property
def unwrap(self):
return self.__wrapped
_ = unwrap # alias
@property
def previous(self):
return self.__previous
@property
def chain(self):
"Like .unwrap but handles chaining off of methods / functions that return None like SmallTalk does"
if self.unwrap is not None:
return self.unwrap
return self.__chain
# Utilities
@wrapped
def call(self, function, *args, **kwargs):
"Call function with self as first argument"
# Different from __call__! Calls function(self, …) instead of self(…)
return function(self, *args, **kwargs)
setattr = wrapped(setattr)
getattr = wrapped(getattr)
hasattr = wrapped(hasattr)
delattr = wrapped(delattr)
isinstance = wrapped(isinstance)
issubclass = wrapped(issubclass)
def tee(self, function):
"""Like tee on the shell
Calls the argument function with self, but then discards the result and allows
further chaining from self."""
function(self)
return self
dir = wrapped(dir)
vars = wrapped(vars)
# REFACT consider to use wrap as the placeholder to have less symbols? Probably not worth it...
virtual_root_module = object()
class Module(Wrapper):
"""Importer shortcut.
All attribute accesses to instances of this class are converted to
an import statement, but as an expression that returns the wrapped imported object.
Example:
>>> lib.sys.stdin.read().map(print)
Is equivalent to
>>> import importlib
>>> wrap(importlib.import_module('sys').stdin).read().map(print)
But of course without creating the intermediate symbol 'stdin' in the current namespace.
All objects returned from lib are pre-wrapped, so you can chain off of them immediately.
"""
def __getattr__(self, name):
if hasattr(self.chain, name):
return wrap(getattr(self.chain, name))
import importlib
module = None
if self.chain is virtual_root_module:
module = importlib.import_module(name)
else:
module = importlib.import_module('.'.join((self.chain.__name__, name)))
return wrap(module)
wrap.lib = lib = Module(virtual_root_module, previous=None, chain=None)
class Callable(Wrapper):
def __call__(self, *args, **kwargs):
""""Call through with a twist.
If one of the args is `wrap` / `_`, then this acts as a shortcut to curry instead"""
# REFACT consider to drop the auto curry - doesn't look like it is so super usefull
# REFACT Consider how to expand this so every method in the library supports auto currying
if wrap in args:
return self.curry(*args, **kwargs)
result = self.chain(*args, **kwargs)
chain = None if self.previous is None else self.previous.chain
return wrap(result, previous=self, chain=chain)
# REFACT rename to partial for consistency with stdlib?
# REFACT consider if there could be more utility in supporting placeholders for more usecases.
# examples:
# Switching argument order?
@wrapped
def curry(self, *curry_args, **curry_kwargs):
""""Like functools.partial, but with a twist.
If you use `wrap` or `_` as a positional argument, upon the actual call,
arguments will be left-filled for those placeholders.
For example:
>>> _(operator.add).curry(_, 'foo')('bar') == 'barfoo'
"""
placeholder = wrap
def merge_args(curried_args, args):
assert curried_args.count(placeholder) == len(args), \
'Need the right ammount of arguments for the placeholders'
new_args = list(curried_args)
if placeholder in curried_args:
index = 0
for arg in args:
index = new_args.index(placeholder, index)
new_args[index] = arg
return new_args
@functools.wraps(self)
def wrapper(*actual_args, **actual_kwargs):
return self(
*merge_args(curry_args, actual_args),
**dict(curry_kwargs, **actual_kwargs)
)
return wrapper
@wrapped
def compose(self, outer):
return lambda *args, **kwargs: outer(self(*args, **kwargs))
# REFACT consider aliasses wrap = chain = cast = compose
class Iterable(Wrapper):
"""Add iterator methods to any iterable.
Most iterators in python3 return an iterator by default, which is very interesting
if you want to build efficient processing pipelines, but not so hot for quick and
dirty scripts where you have to wrap the result in a list() or tuple() all the time
to actually get at the results (e.g. to print them) or to actually trigger the
computation pipeline.
Thus all iterators on this class are by default immediate, i.e. they don't return the
iterator but instead consume it immediately and return a tuple. Of course if needed,
there is also an i{map,zip,enumerate,...} version for your enjoyment that returns the
iterator.
"""
__iter__ = unwrapped(iter)
@wrapped
def star_call(self, function, *args, **kwargs):
"Calls function(*self), but allows to prepend args and add kwargs."
return function(*args, *self, **kwargs)
# This looks like it should be the same as
# starcall = wrapped(lambda function, wrapped, *args, **kwargs: function(*wrapped, *args, **kwargs))
# but it's not. Why?
@wrapped
def join(self, with_what):
""""Like str.join, but the other way around. Bohoo!
Also calls str on all elements of the collection before handing
it off to str.join as a convenience.
"""
return with_what.join(map(str, self))
## Reductors .........................................
len = wrapped(len)
max = wrapped(max)
min = wrapped(min)
sum = wrapped(sum)
any = wrapped(any)
all = wrapped(all)
reduce = wrapped_forward(functools.reduce)
## Iterators .........................................
imap = wrapped_forward(map)
map = tupleize(imap)
istar_map = istarmap = wrapped_forward(itertools.starmap)
star_map = starmap = tupleize(istarmap)
ifilter = wrapped_forward(filter)
filter = tupleize(ifilter)
ienumerate = wrapped(enumerate)
enumerate = tupleize(ienumerate)
ireversed = wrapped(reversed)
reversed = tupleize(ireversed)
isorted = wrapped(sorted)
sorted = tupleize(isorted)
@wrapped
def igrouped(self, group_length):
"s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1), (s2n,s2n+1,s2n+2,...s3n-1), ..."
return zip(*[iter(self)]*group_length)
grouped = tupleize(igrouped)
izip = wrapped(zip)
zip = tupleize(izip)
@wrapped
def iflatten(self, level=math.inf):
"Modeled after rubys array.flatten @see http://ruby-doc.org/core-1.9.3/Array.html#method-i-flatten"
for element in self:
if level > 0 and isinstance(element, typing.Iterable):
for subelement in _(element).iflatten(level=level-1):
yield subelement
else:
yield element
return
flatten = tupleize(iflatten)
igroupby = wrapped(itertools.groupby)
def groupby(self, *args, **kwargs):
# Need an extra wrapping function to consume the deep iterators in time
result = []
for key, values in self.igroupby(*args, **kwargs):
result.append((key, tuple(values)))
return wrap(tuple(result))
def tee(self, function):
"This override tries to retain iterators, as a speedup"
if hasattr(self.chain, '__next__'): # iterator
first, second = itertools.tee(self.chain, 2)
function(wrap(first, previous=self))
return wrap(second, previous=self)
else:
return super().tee(function)
class Mapping(Iterable):
def __getattr__(self, name):
"Support JavaScript like dict item access via attribute access"
if name in self.chain:
return self[name]
return super().__getattr__(self, name)
@wrapped
def star_call(self, function, *args, **kwargs):
"Calls function(**self), but allows to add args and set defaults for kwargs."
return function(*args, **dict(kwargs, **self))
class Set(Iterable): pass
# REFACT consider to inherit from Iterable? It's how Python works...
class Text(Wrapper):
"Supports most of the regex methods as if they where native str methods"
# Regex Methods ......................................
search = wrapped_forward(re.search)
match = wrapped_forward(re.match)
fullmatch = wrapped_forward(re.match)
split = wrapped_forward(re.split)
findall = wrapped_forward(re.findall)
# REFACT consider ifind and find in the spirit of the collection methods?
finditer = wrapped_forward(re.finditer)
sub = wrapped_forward(re.sub, self_index=2)
subn = wrapped_forward(re.subn, self_index=2)
def make_operator(name):
__op__ = getattr(operator, name)
@functools.wraps(__op__)
def wrapper(self, *others):
return wrap(__op__).curry(wrap, *others)
return wrapper
class Each(Wrapper):
for name in dir(operator):
if not name.startswith('__'):
continue
locals()[name] = make_operator(name)
@wrapped
def __getattr__(self, name):
return operator.attrgetter(name)
@wrapped
def __getitem__(self, index):
return operator.itemgetter(index)
@property
def call(self):
class MethodCallerConstructor(object):
_method_name = None
def __getattr__(self, method_name):
self._method_name = method_name
return self
def __call__(self, *args, **kwargs):
assert self._method_name is not None, \
'Need to access the method to call first! E.g. _.each.call.method_name(arg1, kwarg="arg2")'
return wrap(operator.methodcaller(self._method_name, *args, **kwargs))
return MethodCallerConstructor()
each_marker = object()
wrap.each = Each(each_marker, previous=None, chain=None)
import unittest
from pyexpect import expect
import pytest
class FluentTest(unittest.TestCase): pass
class WrapperTest(FluentTest):
def test_should_not_wrap_a_wrapper_again(self):
wrapped = _(4)
expect(type(_(wrapped).unwrap)) == int
def test_should_provide_usefull_str_and_repr_output(self):
expect(repr(_('foo'))) == "fluent.wrap('foo')"
expect(str(_('foo'))) == "fluent.wrap(foo)"
def test_should_wrap_callables(self):
counter = [0]
def foo(): counter[0] += 1
expect(_(foo)).is_instance(Wrapper)
_(foo)()
expect(counter[0]) == 1
def test_should_wrap_attribute_accesses(self):
class Foo(): bar = 'baz'
expect(_(Foo()).bar).is_instance(Wrapper)
def test_should_wrap_item_accesses(self):
expect(_(dict(foo='bar'))['foo']).is_instance(Wrapper)
def test_should_error_when_accessing_missing_attribute(self):
class Foo(): pass
expect(lambda: _(Foo().missing)).to_raise(AttributeError)
def test_should_explictly_unwrap(self):
foo = 1
expect(_(foo).unwrap).is_(foo)
def test_should_wrap_according_to_returned_type(self):
expect(_('foo')).is_instance(Text)
expect(_([])).is_instance(Iterable)
expect(_(iter([]))).is_instance(Iterable)
expect(_({})).is_instance(Mapping)
expect(_({1})).is_instance(Set)
expect(_(lambda: None)).is_instance(Callable)
class CallMe(object):
def __call__(self): pass
expect(_(CallMe())).is_instance(Callable)
expect(_(object())).is_instance(Wrapper)
def test_should_remember_call_chain(self):
def foo(): return 'bar'
expect(_(foo)().unwrap) == 'bar'
expect(_(foo)().previous.unwrap) == foo
def test_should_delegate_equality_test_to_wrapped_instance(self):
# REFACT makes these tests much nicer - but probably has to go to make this library less virus like
expect(_(1)) == 1
expect(_('42')) == '42'
callme = lambda: None
expect(_(callme)) == callme
def test_hasattr_getattr_setattr_delattr(self):
expect(_((1,2)).hasattr('len'))
expect(_('foo').getattr('__len__')()) == 3
class Attr(object):
def __init__(self): self.foo = 'bar'
expect(_(Attr()).setattr('foo', 'baz').foo) == 'baz'
expect(_(Attr()).delattr('foo').unwrap) == None
expect(_(Attr()).delattr('foo').chain).isinstance(Attr)
expect(_(Attr()).delattr('foo').vars()) == {}
def test_isinstance_issubclass(self):
expect(_('foo').isinstance(str)) == True
expect(_('foo').isinstance(int)) == False
expect(_(str).issubclass(object)) == True
expect(_(str).issubclass(str)) == True
expect(_(str).issubclass(int)) == False
def test_dir_vars(self):
expect(_(object()).dir()).contains('__class__', '__init__', '__eq__')
class Foo(object): pass
foo = Foo()
foo.bar = 'baz'
expect(_(foo).vars()) == {'bar': 'baz'}
class CallableTest(FluentTest):
def test_call(self):
expect(_(lambda: 3)()) == 3
expect(_(lambda *x: x)(1,2,3)) == (1,2,3)
expect(_(lambda x=3: x)()) == 3
expect(_(lambda x=3: x)(x=4)) == 4
expect(_(lambda x=3: x)(4)) == 4
def test_star_call(self):
expect(wrap([1,2,3]).star_call(str.format, '{} - {} : {}')) == '1 - 2 : 3'
def test_should_call_callable_with_wrapped_as_first_argument(self):
expect(_([1,2,3]).call(min)) == 1
expect(_([1,2,3]).call(min)) == 1
expect(_('foo').call(str.upper)) == 'FOO'
expect(_('foo').call(str.upper)) == 'FOO'
def test_tee_breakout_a_function_with_side_effects_and_disregard_return_value(self):
side_effect = {}
def observer(a_list): side_effect['tee'] = a_list.join('-')
expect(_([1,2,3]).tee(observer)) == [1,2,3]
expect(side_effect['tee']) == '1-2-3'
def fnording(ignored): return 'fnord'
expect(_([1,2,3]).tee(fnording)) == [1,2,3]
def test_curry(self):
expect(_(lambda x, y: x*y).curry(2, 3)()) == 6
expect(_(lambda x=1, y=2: x*y).curry(x=3)()) == 6
def test_auto_currying(self):
expect(_(lambda x: x + 3)(_)(3)) == 6
expect(_(lambda x, y: x + y)(_, 'foo')('bar')) == 'barfoo'
expect(_(lambda x, y: x + y)('foo', _)('bar')) == 'foobar'
def test_curry_should_support_placeholders_to_curry_later_positional_arguments(self):
expect(_(operator.add).curry(_, 'foo')('bar')) == 'barfoo'
expect(_(lambda x, y, z: x + y + z).curry(_, 'baz', _)('foo', 'bar')) == 'foobazbar'
# expect(_(operator.add).curry(_2, _1)('foo', 'bar')) == 'barfoo'
def test_compose_cast_wraps_chain(self):
expect(_(lambda x: x*2).compose(lambda x: x+3)(5)) == 13
expect(_(str.strip).compose(str.capitalize)(' fnord ')) == 'Fnord'
class SmallTalkLikeBehaviour(FluentTest):
def test_should_pretend_methods_that_return_None_returned_self(self):
expect(_([3,2,1]).sort().unwrap) == None
expect(_([3,2,1]).sort().previous.previous) == [1,2,3]
expect(_([3,2,1]).sort().chain) == [1,2,3]
expect(_([2,3,1]).sort().sort(reverse=True).unwrap) == None
expect(_([2,3,1]).sort().sort(reverse=True).previous.previous.previous.previous) == [3,2,1]
expect(_([2,3,1]).sort().sort(reverse=True).chain) == [3,2,1]
def test_should_chain_off_of_previous_if_our_functions_return_none(self):
class Attr(object):
foo = 'bar'
expect(_(Attr()).setattr('foo', 'baz').foo) == 'baz'
# TODO check individually that the different forms of wrapping behave according to the SmallTalk contract
# wrapped
# unwrapped
# wrapped_forward
class IterableTest(FluentTest):
def test_should_call_callable_with_star_splat_of_self(self):
expect(_([1,2,3]).star_call(lambda x, y, z: z-x-y)) == 0
def test_join(self):
expect(_(['1','2','3']).join(' ')) == '1 2 3'
expect(_([1,2,3]).join(' ')) == '1 2 3'
def test_any(self):
expect(_((True, False)).any()) == True
expect(_((False, False)).any()) == False
def test_all(self):
expect(_((True, False)).all()) == False
expect(_((True, True)).all()) == True
def test_len(self):
expect(_((1,2,3)).len()) == 3
def test_min_max_sum(self):
expect(_([1,2]).min()) == 1
expect(_([1,2]).max()) == 2
expect(_((1,2,3)).sum()) == 6
def test_map(self):
expect(_([1,2,3]).imap(lambda x: x * x).call(list)) == [1, 4, 9]
expect(_([1,2,3]).map(lambda x: x * x)) == (1, 4, 9)
def test_starmap(self):
expect(_([(1,2), (3,4)]).istarmap(lambda x, y: x+y).call(list)) == [3, 7]
expect(_([(1,2), (3,4)]).starmap(lambda x, y: x+y)) == (3, 7)
def test_filter(self):
expect(_([1,2,3]).ifilter(lambda x: x > 1).call(list)) == [2,3]
expect(_([1,2,3]).filter(lambda x: x > 1)) == (2,3)
def test_zip(self):
expect(_((1,2)).izip((3,4)).call(tuple)) == ((1, 3), (2, 4))
expect(_((1,2)).izip((3,4), (5,6)).call(tuple)) == ((1, 3, 5), (2, 4, 6))
expect(_((1,2)).zip((3,4))) == ((1, 3), (2, 4))
expect(_((1,2)).zip((3,4), (5,6))) == ((1, 3, 5), (2, 4, 6))
def test_reduce(self):
# no iterator version of reduce as it's not a mapping
expect(_((1,2)).reduce(operator.add)) == 3
def test_grouped(self):
expect(_((1,2,3,4,5,6)).igrouped(2).call(list)) == [(1,2), (3,4), (5,6)]
expect(_((1,2,3,4,5,6)).grouped(2)) == ((1,2), (3,4), (5,6))
expect(_((1,2,3,4,5)).grouped(2)) == ((1,2), (3,4))
def test_group_by(self):
actual = {}
for key, values in _((1,1,2,2,3,3)).igroupby():
actual[key] = tuple(values)
expect(actual) == {
1: (1,1),
2: (2,2),
3: (3,3)
}
actual = {}
for key, values in _((1,1,2,2,3,3)).groupby():
actual[key] = tuple(values)
expect(actual) == {
1: (1,1),
2: (2,2),
3: (3,3)
}
def test_tee_should_not_break_iterators(self):
# This should work because the extend as well als the .call(list)
# should not exhaust the iterator created by .imap()
recorder = []
def record(generator): recorder.extend(generator)
expect(_([1,2,3]).imap(lambda x: x*x).tee(record).call(list)) == [1,4,9]
expect(recorder) == [1,4,9]
def test_enumerate(self):
expect(_(('foo', 'bar')).ienumerate().call(list)) == [(0, 'foo'), (1, 'bar')]
expect(_(('foo', 'bar')).enumerate()) == ((0, 'foo'), (1, 'bar'))
def test_reversed_sorted(self):
expect(_([2,1,3]).ireversed().call(list)) == [3,1,2]
expect(_([2,1,3]).reversed()) == (3,1,2)
expect(_([2,1,3]).isorted().call(list)) == [1,2,3]
expect(_([2,1,3]).sorted()) == (1,2,3)
expect(_([2,1,3]).isorted(reverse=True).call(list)) == [3,2,1]
expect(_([2,1,3]).sorted(reverse=True)) == (3,2,1)
def test_flatten(self):
expect(_([(1,2),[3,4],(5, [6,7])]).iflatten().call(list)) == \
[1,2,3,4,5,6,7]
expect(_([(1,2),[3,4],(5, [6,7])]).flatten()) == \
(1,2,3,4,5,6,7)
expect(_([(1,2),[3,4],(5, [6,7])]).flatten(level=1)) == \
(1,2,3,4,5,[6,7])
class MappingTest(FluentTest):
def test_should_call_callable_with_double_star_splat_as_keyword_arguments(self):
def foo(*, foo): return foo
expect(_(dict(foo='bar')).star_call(foo)) == 'bar'
expect(_(dict(foo='baz')).star_call(foo, foo='bar')) == 'baz'
expect(_(dict()).star_call(foo, foo='bar')) == 'bar'
def test_should_support_attribute_access_to_mapping_items(self):
expect(_(dict(foo='bar')).foo) == 'bar'
class StrTest(FluentTest):
def test_search(self):
expect(_('foo bar baz').search(r'b.r').span()) == (4,7)
def test_match_fullmatch(self):
expect(_('foo bar').match(r'foo\s').span()) == (0, 4)
expect(_('foo bar').fullmatch(r'foo\sbar').span()) == (0, 7)
def test_split(self):
expect(_('foo\nbar\nbaz').split(r'\n')) == ['foo', 'bar', 'baz']
expect(_('foo\nbar/baz').split(r'[\n/]')) == ['foo', 'bar', 'baz']
def test_findall_finditer(self):
expect(_("bazfoobar").findall('ba[rz]')) == ['baz', 'bar']
expect(_("bazfoobar").finditer('ba[rz]').map(_.each.call.span())) == ((0,3), (6,9))
def test_sub_subn(self):
expect(_('bazfoobar').sub(r'ba.', 'foo')) == 'foofoofoo'
expect(_('bazfoobar').sub(r'ba.', 'foo', 1)) == 'foofoobar'
expect(_('bazfoobar').sub(r'ba.', 'foo', count=1)) == 'foofoobar'
class ImporterTest(FluentTest):
def test_import_top_level_module(self):
import sys
expect(lib.sys) == sys
def test_import_symbol_from_top_level_module(self):
import sys
expect(lib.sys.stdin) == sys.stdin
def test_import_submodule_that_is_also_a_symbol_in_the_parent_module(self):
import os
expect(lib.os.name) == os.name
expect(lib.os.path.join) == os.path.join
def test_import_submodule_that_is_not_a_symbol_in_the_parent_module(self):
import dbm
expect(lambda: dbm.dumb).to_raise(AttributeError)
def delayed_import():
import dbm.dumb
return dbm.dumb
expect(lib.dbm.dumb) == delayed_import()
def test_imported_objects_are_pre_wrapped(self):
lib.os.path.join('/foo', 'bar', 'baz').findall(r'/(\w*)') == ['foo', 'bar', 'baz']
class EachTest(FluentTest):
def test_should_produce_attrgetter_on_attribute_access(self):
class Foo(object):
bar = 'baz'
expect(_([Foo(), Foo()]).map(_.each.bar)) == ('baz', 'baz')
def test_should_produce_itemgetter_on_item_access(self):
expect(_([['foo'], ['bar']]).map(_.each[0])) == ('foo', 'bar')
def test_should_produce_callable_on_binary_operator(self):
expect(_(['foo', 'bar']).map(_.each == 'foo')) == (True, False)
expect(_([3, 5]).map(_.each + 3)) == (6, 8)
expect(_([3, 5]).map(_.each < 4)) == (True, False)
def test_should_produce_callable_on_unary_operator(self):
expect(_([3, 5]).map(- _.each)) == (-3, -5)
expect(_([3, 5]).map(~ _.each)) == (-4, -6)
def test_should_produce_methodcaller_on_call_attribute(self):
# problem: _.each.call is now not an attrgetter
# _.each.method.call('foo') # like a method chaining
# _.each_call.method('foo')
# _.eachcall.method('foo')
class Tested(object):
def method(self, arg): return 'method+'+arg
expect(_(Tested()).call(_.each.call.method('argument'))) == 'method+argument'
expect(lambda: _.each.call('argument')).to_raise(AssertionError, '_.each.call.method_name')
class IntegrationTest(FluentTest):
def test_extrac_and_decode_URIs(self):
from xml.sax.saxutils import unescape
line = '''<td><img src='/sitefiles/star_5.png' height='15' width='75' alt=''></td>
<td><input style='width:200px; outline:none; border-style:solid; border-width:1px; border-color:#ccc;' type='text' id='ydxerpxkpcfqjaybcssw' readonly='readonly' onClick="select_text('ydxerpxkpcfqjaybcssw');" value='http://list.iblocklist.com/?list=ydxerpxkpcfqjaybcssw&fileformat=p2p&archiveformat=gz'></td>'''
actual = _(line).findall(r'value=\'(.*)\'').imap(unescape).call(list)
expect(actual) == ['http://list.iblocklist.com/?list=ydxerpxkpcfqjaybcssw&fileformat=p2p&archiveformat=gz']
def test_call_module_from_shell(self):
from subprocess import check_output
output = check_output(
['python', '-m', 'fluent', "lib.sys.stdin.read().split('\\n').imap(str.upper).imap(print).call(list)"],
input=b'foo\nbar\nbaz')
expect(output) == b'FOO\nBAR\nBAZ\n'
if __name__ == '__main__':
import sys
assert len(sys.argv) == 2, \
"Usage: python -m fluent 'some code that can access fluent functions without having to import them'"
exec(sys.argv[1], dict(wrap=wrap, _=_, lib=lib))
| dwt/BayesianNetworks | fluent.py | Python | mit | 46,253 |
from collections import OrderedDict as odict
from values import decode_kv_pairs, encode_kv_pairs
from loops import decode_loops, encode_loops
def split_frames(lines):
'''
splits a list of lines into lines that are not part of a frame,
and a list of lines, where each list is part of the same frame.
frames start with a `save_` and end with a `stop_`. They also
end with the next data block, but this function is only called
with lines from a single data block.
'''
def parse_frame_name(line):
return line.split()[0][len('save_'):]
def frame_starts(line):
return line.startswith('save_')
def frame_stops(line):
return line.startswith('stop_')
outer = []
frame = None
frames = odict()
for line in lines:
if frame_stops(line):
frame = None
elif frame_starts(line):
name = parse_frame_name(line, 'save_')
if name not in frames:
frames[name] = []
frame = frames[name]
elif frame is None:
outer.append(line)
else:
frame.append(line)
return outer, frames
def decode_frames(lines):
outer, frames = split_frames(lines)
for key in frames:
frames[key] = decode_frame(frames[key])
return outer, frames
def decode_frame(lines):
outer, loops = decode_loops(lines)
frame = decode_kv_pairs(outer)
for key in loops:
frame[key] = loops[key]
return frame
def encode_frames(block):
def block_frames(block):
return [(k, block[k]) for k in block if isinstance(block[k], dict)]
lines = []
for name, frame in block_frames(block):
lines.append('save_%s' % name)
lines.append(encode_frame(frame))
lines.append('stop_')
return '\n'.join(lines)
def encode_frame(frame):
lines = []
lines.append(encode_kv_pairs(frame))
lines.append(encode_loops(frame))
return '\n'.join(lines)
| craigyk/pystar | frames.py | Python | mit | 1,984 |
#!/usr/bin/env python
from lighthouse import app
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| Nizebulous/lighthouse | run.py | Python | mit | 119 |
""" Python test discovery, setup and run of test functions. """
import re
import fnmatch
import functools
import py
import inspect
import sys
import pytest
from _pytest.mark import MarkDecorator, MarkerError
from py._code.code import TerminalRepr
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
import _pytest
import pluggy
cutdir2 = py.path.local(_pytest.__file__).dirpath()
cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
NoneType = type(None)
NOTSET = object()
isfunction = inspect.isfunction
isclass = inspect.isclass
callable = py.builtin.callable
# used to work around a python2 exception info leak
exc_clear = getattr(sys, 'exc_clear', lambda: None)
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(''))
def filter_traceback(entry):
return entry.path != cutdir1 and not entry.path.relto(cutdir2)
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = py.code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
try:
return func.im_func
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception.
Attribute access can potentially fail for 'evil' Python objects.
See issue214
"""
try:
return getattr(object, name, default)
except Exception:
return default
class FixtureFunctionMarker:
def __init__(self, scope, params,
autouse=False, yieldctx=False, ids=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.yieldctx = yieldctx
self.ids = ids
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or or without parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids)
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a yield-fixture factory function
(EXPERIMENTAL).
This takes the same arguments as :py:func:`pytest.fixture` but
expects a fixture function to use a ``yield`` instead of a ``return``
statement to provide a fixture. See
http://pytest.org/en/latest/yieldfixture.html for more info.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, yieldctx=True)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse,
yieldctx=True, ids=ids)
defaultfuncargprefixmarker = fixture()
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(pytest, name))
if node is not None:
return node.obj
doc = "python %s object this node was collected from (can be None)." % (
name.lower(),)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--fixtures', '--funcargs',
action="store_true", dest="showfixtures", default=False,
help="show available fixtures, sorted by plugin appearance")
parser.addini("usefixtures", type="args", default=[],
help="list of default fixtures to be used with this project")
parser.addini("python_files", type="args",
default=['test_*.py', '*_test.py'],
help="glob-style file patterns for Python test module discovery")
parser.addini("python_classes", type="args", default=["Test",],
help="prefixes or glob names for Python test class discovery")
parser.addini("python_functions", type="args", default=["test",],
help="prefixes or glob names for Python test function and "
"method discovery")
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
def pytest_generate_tests(metafunc):
# those alternative spellings are common - raise a specific error to alert
# the user
alt_spellings = ['parameterize', 'parametrise', 'parameterise']
for attr in alt_spellings:
if hasattr(metafunc.function, attr):
msg = "{0} has '{1}', spelling should be 'parametrize'"
raise MarkerError(msg.format(metafunc.function.__name__, attr))
try:
markers = metafunc.function.parametrize
except AttributeError:
return
for marker in markers:
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line("markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and "
"examples."
)
config.addinivalue_line("markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
)
def pytest_sessionstart(session):
session._fixturemanager = FixtureManager(session)
@pytest.hookimpl(trylast=True)
def pytest_namespace():
raises.Exception = pytest.fail.Exception
return {
'fixture': fixture,
'yield_fixture': yield_fixture,
'raises' : raises,
'collect': {
'Module': Module, 'Class': Class, 'Instance': Instance,
'Function': Function, 'Generator': Generator,
'_fillfuncargs': fillfixtures}
}
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
@pytest.hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
if pyfuncitem._isyieldedfunction():
testfunction(*pyfuncitem._args)
else:
funcargs = pyfuncitem.funcargs
testargs = {}
for arg in pyfuncitem._fixtureinfo.argnames:
testargs[arg] = funcargs[arg]
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
for pat in parent.config.getini('python_files'):
if path.fnmatch(pat):
break
else:
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def pytest_pycollect_makemodule(path, parent):
return Module(path, parent)
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
raise StopIteration
# nothing was collected elsewhere, let's do it here
if isclass(obj):
if collector.istestclass(obj, name):
Class = collector._getcustomclass("Class")
outcome.force_result(Class(name, parent=collector))
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
if not isfunction(obj):
collector.warn(code="C2", message=
"cannot collect %r because it is not a function."
% name, )
if getattr(obj, "__test__", True):
if is_generator(obj):
res = Generator(name, parent=collector)
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def is_generator(func):
try:
return py.code.getrawcode(func).co_flags & 32 # generator function
except AttributeError: # builtin functions have no bytecode
# assume them to not be generators
return False
class PyobjContext(object):
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
def obj():
def fget(self):
try:
return self._obj
except AttributeError:
self._obj = obj = self._getobj()
return obj
def fset(self, value):
self._obj = value
return property(fget, fset, None, "underlying python object")
obj = obj()
def _getobj(self):
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
assert name.endswith(".py")
name = name[:-3]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def _getfslineno(self):
return getfslineno(self.obj)
def reportinfo(self):
# XXX caching?
obj = self.obj
if hasattr(obj, 'compat_co_firstlineno'):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = obj.compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, pytest.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_functions', name)
def isnosetest(self, obj):
""" Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator
"""
return safe_getattr(obj, '__test__', False)
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_classes', name)
def istestfunction(self, obj, name):
return (
(self.funcnamefilter(name) or self.isnosetest(obj))
and safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
)
def istestclass(self, obj, name):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ('*' in option or '?' in option or '[' in option) and \
fnmatch.fnmatch(name, option):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, '__dict__', {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
l = []
for dic in dicts:
for name, obj in dic.items():
if name in seen:
continue
seen[name] = True
res = self.makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
l.extend(res)
l.sort(key=lambda item: item.reportinfo()[:2])
return l
def makeitem(self, name, obj):
#assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
transfer_markers(funcobj, cls, module)
fm = self.session._fixturemanager
fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
metafunc = Metafunc(funcobj, fixtureinfo, self.config,
cls=cls, module=module)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
if methods:
self.ihook.pytest_generate_tests.call_extra(methods,
dict(metafunc=metafunc))
else:
self.ihook.pytest_generate_tests(metafunc=metafunc)
Function = self._getcustomclass("Function")
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
add_funcarg_pseudo_fixture_def(self, metafunc, fm)
for callspec in metafunc._calls:
subname = "%s[%s]" %(name, callspec.id)
yield Function(name=subname, parent=self,
callspec=callspec, callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id:True})
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
def _marked(func, mark):
""" Returns True if :func: is already marked with :mark:, False otherwise.
This can happen if marker is applied to class and the test file is
invoked more than once.
"""
try:
func_mark = getattr(func, mark.name)
except AttributeError:
return False
return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
def transfer_markers(funcobj, cls, mod):
# XXX this should rather be code in the mark plugin or the mark
# plugin should merge with the python plugin.
for holder in (cls, mod):
try:
pytestmark = holder.pytestmark
except AttributeError:
continue
if isinstance(pytestmark, list):
for mark in pytestmark:
if not _marked(funcobj, mark):
mark(funcobj)
else:
if not _marked(funcobj, pytestmark):
pytestmark(funcobj)
class Module(pytest.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._memoizedcall('_obj', self._importtestmodule)
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Module, self).collect()
def _importtestmodule(self):
# we assume we are only called once per module
try:
mod = self.fspath.pyimport(ensuresyspath="append")
except SyntaxError:
raise self.CollectError(
py.code.ExceptionInfo().getrepr(style="short"))
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules"
% e.args
)
#print "imported test module", mod
self.config.pluginmanager.consider_module(mod)
return mod
def setup(self):
setup_module = xunitsetup(self.obj, "setUpModule")
if setup_module is None:
setup_module = xunitsetup(self.obj, "setup_module")
if setup_module is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a pytest style one
# so we pass the current module object
if inspect.getargspec(setup_module)[0]:
setup_module(self.obj)
else:
setup_module()
fin = getattr(self.obj, 'tearDownModule', None)
if fin is None:
fin = getattr(self.obj, 'teardown_module', None)
if fin is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, it's probably a pytest style one
# so we pass the current module object
if inspect.getargspec(fin)[0]:
finalizer = lambda: fin(self.obj)
else:
finalizer = fin
self.addfinalizer(finalizer)
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if hasinit(self.obj):
self.warn("C1", "cannot collect test class %r because it has a "
"__init__ constructor" % self.obj.__name__)
return []
return [self._getcustomclass("Instance")(name="()", parent=self)]
def setup(self):
setup_class = xunitsetup(self.obj, 'setup_class')
if setup_class is not None:
setup_class = getattr(setup_class, 'im_func', setup_class)
setup_class = getattr(setup_class, '__func__', setup_class)
setup_class(self.obj)
fin_class = getattr(self.obj, 'teardown_class', None)
if fin_class is not None:
fin_class = getattr(fin_class, 'im_func', fin_class)
fin_class = getattr(fin_class, '__func__', fin_class)
self.addfinalizer(lambda: fin_class(self.obj))
class Instance(PyCollector):
def _getobj(self):
obj = self.parent.obj()
return obj
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Instance, self).collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if hasattr(self, '_preservedparent'):
obj = self._preservedparent
elif isinstance(self.parent, Instance):
obj = self.parent.newinstance()
self.obj = self._getobj()
else:
obj = self.parent.obj
if inspect.ismethod(self.obj):
setup_name = 'setup_method'
teardown_name = 'teardown_method'
else:
setup_name = 'setup_function'
teardown_name = 'teardown_function'
setup_func_or_method = xunitsetup(obj, setup_name)
if setup_func_or_method is not None:
setup_func_or_method(self.obj)
fin = getattr(obj, teardown_name, None)
if fin is not None:
self.addfinalizer(lambda: fin(self.obj))
def _prunetraceback(self, excinfo):
if hasattr(self, '_obj') and not self.config.option.fulltrace:
code = py.code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
#ntraceback = ntraceback.cut(excludepath=cutdir2)
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.option.tbstyle == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style('short')
def _repr_failure_py(self, excinfo, style="long"):
if excinfo.errisinstance(pytest.fail.Exception):
if not excinfo.value.pytrace:
return str(excinfo.value)
return super(FunctionMixin, self)._repr_failure_py(excinfo,
style=style)
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.option.tbstyle
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class Generator(FunctionMixin, PyCollector):
def collect(self):
# test generators are seen as collectors but they also
# invoke setup/teardown on popular request
# (induced by the common "test_*" naming shared with normal tests)
self.session._setupstate.prepare(self)
# see FunctionMixin.setup and test_setupstate_is_preserved_134
self._preservedparent = self.parent.obj
l = []
seen = {}
for i, x in enumerate(self.obj()):
name, call, args = self.getcallargs(x)
if not callable(call):
raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
if name is None:
name = "[%d]" % i
else:
name = "['%s']" % name
if name in seen:
raise ValueError("%r generated tests with non-unique name %r" %(self, name))
seen[name] = True
l.append(self.Function(name, self, args=args, callobj=call))
return l
def getcallargs(self, obj):
if not isinstance(obj, (tuple, list)):
obj = (obj,)
# explict naming
if isinstance(obj[0], py.builtin._basestring):
name = obj[0]
obj = obj[1:]
else:
name = None
call, args = obj[0], obj[1:]
return name, call, args
def hasinit(obj):
init = getattr(obj, '__init__', None)
if init:
if init != object.__init__:
return True
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
_notexists = object()
class CallSpec2(object):
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = _notexists
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
self.keywords = {}
self.indices = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.keywords.update(self.keywords)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalid_args = self._globalid_args
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate %r" %(arg,))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is _notexists:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
param_index):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
valtype_for_arg = valtypes[arg]
getattr(self, valtype_for_arg)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not _notexists:
self._idlist.append(id)
if param is not _notexists:
assert self._globalparam is _notexists
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = scopenum_function
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
class Metafunc(FuncargnamesCompatAttr):
"""
Metafunc objects are passed to the ``pytest_generate_tests`` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
:ivar fixturenames: set of fixture names required by the test function
:ivar function: underlying python test function
:ivar cls: class object where the test function is defined in or ``None``.
:ivar module: the module object where the test function is defined in.
:ivar config: access to the :class:`_pytest.config.Config` object for the
test session.
:ivar funcargnames:
.. deprecated:: 2.3
Use ``fixturenames`` instead.
"""
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
self.config = config
self.module = module
self.function = function
self.fixturenames = fixtureinfo.names_closure
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
self.cls = cls
self._calls = []
self._ids = py.builtin.set()
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of simple values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: The list of argnames or boolean. A list of arguments'
names (subset of argnames). If True the list contains all names from
the argnames. Each argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
# individual parametrized argument sets can be wrapped in a series
# of markers in which case we unwrap the values and apply the mark
# at Function init
newkeywords = {}
unwrapped_argvalues = []
for i, argval in enumerate(argvalues):
while isinstance(argval, MarkDecorator):
newmark = MarkDecorator(argval.markname,
argval.args[:-1], argval.kwargs)
newmarks = newkeywords.setdefault(i, {})
newmarks[newmark.markname] = newmark
argval = argval.args[-1]
unwrapped_argvalues.append(argval)
argvalues = unwrapped_argvalues
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if len(argnames) == 1:
argvalues = [(val,) for val in argvalues]
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
if scope is None:
scope = "function"
scopenum = scopes.index(scope)
valtypes = {}
for arg in argnames:
if arg not in self.fixturenames:
raise ValueError("%r uses no fixture %r" %(self.function, arg))
if indirect is True:
valtypes = dict.fromkeys(argnames, "params")
elif indirect is False:
valtypes = dict.fromkeys(argnames, "funcargs")
elif isinstance(indirect, (tuple, list)):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
raise ValueError("indirect given to %r: fixture %r doesn't exist" %(
self.function, arg))
valtypes[arg] = "params"
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids and len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
if not ids:
ids = idmaker(argnames, argvalues, idfn)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, valset in enumerate(argvalues):
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtypes, argnames, valset, ids[param_index],
newkeywords.get(param_index, {}), scopenum,
param_index)
newcalls.append(newcallspec)
self._calls = newcalls
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
""" (deprecated, use parametrize) Add a new call to the underlying
test function during the collection phase of a test run. Note that
request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
pytest.fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is _notexists:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs)
def _idval(val, argname, idx, idfn):
if idfn:
try:
s = idfn(val)
if s:
return s
except Exception:
pass
if isinstance(val, (float, int, str, bool, NoneType)):
return str(val)
elif isinstance(val, REGEX_TYPE):
return val.pattern
elif enum is not None and isinstance(val, enum.Enum):
return str(val)
elif isclass(val) and hasattr(val, '__name__'):
return val.__name__
return str(argname)+str(idx)
def _idvalset(idx, valset, argnames, idfn):
this_id = [_idval(val, argname, idx, idfn)
for val, argname in zip(valset, argnames)]
return "-".join(this_id)
def idmaker(argnames, argvalues, idfn=None):
ids = [_idvalset(valindex, valset, argnames, idfn)
for valindex, valset in enumerate(argvalues)]
if len(set(ids)) < len(ids):
# user may have provided a bad idfn which means the ids are not unique
ids = [str(i) + testid for i, testid in enumerate(ids)]
return ids
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
fixturedef = fixturedefs[-1]
loc = getlocation(fixturedef.func, curdir)
available.append((len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname, fixturedef))
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from %s" %(module,))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if verbose > 0:
funcargspec = "%s -- %s" %(argname, bestrel,)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
for line in doc.strip().split("\n"):
tw.line(" " + line.strip())
else:
tw.line(" %s: no docstring available" %(loc,),
red=True)
def getlocation(function, curdir):
import inspect
fn = py.path.local(inspect.getfile(function))
lineno = py.builtin._getcode(function).co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" %(fn, lineno+1)
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
""" assert that a code block/function call raises @expected_exception
and raise a failure exception otherwise.
This helper produces a ``py.code.ExceptionInfo()`` object.
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
Or you can specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
A third possibility is to use a string to be executed::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
Performance note:
-----------------
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``py.code.ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
if expected_exception is AssertionError:
# we want to catch a AssertionError
# replace our subclass with the builtin one
# see https://github.com/pytest-dev/pytest/issues/176
from _pytest.assertion.util import BuiltinAssertionError \
as expected_exception
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(expected_exception, tuple):
for exc in expected_exception:
if not isclass(exc):
raise TypeError(msg % type(exc))
elif not isclass(expected_exception):
raise TypeError(msg % type(expected_exception))
if not args:
return RaisesContext(expected_exception)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
#print "raises frame scope: %r" % frame.f_locals
try:
code = py.code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return py.code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return py.code.ExceptionInfo()
pytest.fail("DID NOT RAISE")
class RaisesContext(object):
def __init__(self, expected_exception):
self.expected_exception = expected_exception
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(py.code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail("DID NOT RAISE")
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
return issubclass(self.excinfo.type, self.expected_exception)
#
# the basic pytest Function item
#
class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
_genid = None
def __init__(self, name, parent, args=None, config=None,
callspec=None, callobj=NOTSET, keywords=None, session=None,
fixtureinfo=None):
super(Function, self).__init__(name, parent, config=config,
session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
if callspec:
self.callspec = callspec
self.keywords.update(callspec.keywords)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self.parent, self.obj, self.cls,
funcargs=not self._isyieldedfunction())
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
def _initrequest(self):
self.funcargs = {}
if self._isyieldedfunction():
assert not hasattr(self, "callspec"), (
"yielded functions (deprecated) cannot have funcargs")
else:
if hasattr(self, "callspec"):
callspec = self.callspec
assert not callspec.funcargs
self._genid = callspec.id
if hasattr(callspec, "param"):
self.param = callspec.param
self._request = FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getattr(self.obj, 'im_func', self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def _isyieldedfunction(self):
return getattr(self, "_args", None) is not None
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
# check if parametrization happend with an empty list
try:
self.callspec._emptyparamspecified
except AttributeError:
pass
else:
fs, lineno = self._getfslineno()
pytest.skip("got empty parameter set, function %s at %s:%d" %(
self.function.__name__, fs, lineno))
super(Function, self).setup()
fillfixtures(self)
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "cls", "module", "session"
self.scope = "function"
self._funcargs = {}
self._fixturedefs = {}
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self.fixturenames = fixtureinfo.names_closure
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfuncargvalue(argname) usage which was naturally
# not known at parsing/collection time
fixturedefs = self._fixturemanager.getfixturedefs(
argname, self._pyfuncitem.parent.nodeid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(pytest.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(pytest.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfuncargvalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfuncargvalue(self, argname):
""" Dynamically retrieve a named fixture function argument.
As of pytest-2.3, it is easier and usually better to access other
fixture values by stating it as an input argument in the fixture
function. If you only can decide about using another fixture at test
setup time, you may use this function to retrieve it inside a fixture
function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixturedefs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfuncargvalue(fixturedef)
self._funcargs[argname] = result
self._fixturedefs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfuncargvalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
pytest.fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" %(
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = inspect.formatargspec(*inspect.getargspec(factory))
lines.append("%s:%d: def %s%s" %(
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" %(self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._funcargs = request._funcargs
self._fixturedefs = request._fixturedefs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self.fixturenames = request.fixturenames
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
stack = stack[:-1] # the last fixture raise an error, let's present
# it at the requesting side
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except IOError:
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno+1))
else:
addline("file %s, line %s" % (fspath, lineno+1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
for name, fixturedef in fm._arg2fixturedefs.items():
parentid = self.request._pyfuncitem.parent.nodeid
faclist = list(fm._matchfactories(fixturedef, parentid))
if faclist:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" %(", ".join(available),)
msg += "\n use 'py.test --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
#tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
for line in self.errorstring.split("\n"):
tw.line(" " + line.strip(), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
if cls is not None:
startindex = 1
else:
startindex = None
argnames = getfuncargnames(func, startindex)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i+1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
if argname not in func_params:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
obj = getattr(holderobj, name, None)
if not callable(obj):
continue
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
marker = defaultfuncargprefixmarker
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
assert not name.startswith(self._argprefix)
fixturedef = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
yieldctx=marker.yieldctx,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixturedef.has_location:
faclist.append(fixturedef)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixturedef)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
source = py.code.Source(fixturefunc)
pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
if yieldctx:
if not is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="yield_fixture requires yield statement in function")
iter = fixturefunc(**kwargs)
next = getattr(iter, "__next__", None)
if next is None:
next = getattr(iter, "next")
res = next()
def teardown():
try:
next()
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
if is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="pytest.fixture functions cannot use ``yield``. "
"Instead write and return an inner function/generator "
"and let the consumer call and iterate over it.")
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
yieldctx, unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scopes.index(scope or "function")
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.yieldctx = yieldctx
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
try:
while self._finalizer:
func = self._finalizer.pop()
func()
finally:
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
kwargs = {}
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixturedef.cached_result
request._check_scope(argname, request.scope, fixturedef.scope)
kwargs[argname] = result
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
fixturefunc = self.func
if self.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = self.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "self" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(self.func)
if fixturefunc != self.func:
fixturefunc = fixturefunc.__get__(request.instance)
try:
result = call_fixture_func(fixturefunc, request, kwargs,
self.yieldctx)
except Exception:
self.cached_result = (None, my_cache_key, sys.exc_info())
raise
self.cached_result = (result, my_cache_key, None)
return result
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
if mock is not None:
return len([p for p in patchings
if not p.attribute_name and p.new is mock.DEFAULT])
return len(patchings)
def getfuncargnames(function, startindex=None):
# XXX merge with main.py's varnames
#assert not inspect.isclass(function)
realfunction = function
while hasattr(realfunction, "__wrapped__"):
realfunction = realfunction.__wrapped__
if startindex is None:
startindex = inspect.ismethod(function) and 1 or 0
if realfunction != function:
startindex += num_mock_patch_args(function)
function = realfunction
if isinstance(function, functools.partial):
argnames = inspect.getargs(py.code.getrawcode(function.func))[0]
partial = function
argnames = argnames[len(partial.args):]
if partial.keywords:
for kw in partial.keywords:
argnames.remove(kw)
else:
argnames = inspect.getargs(py.code.getrawcode(function))[0]
defaults = getattr(function, 'func_defaults',
getattr(function, '__defaults__', None)) or ()
numdefaults = len(defaults)
if numdefaults:
return tuple(argnames[startindex:-numdefaults])
return tuple(argnames[startindex:])
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = set(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache,scopenum+1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
argkeys = argkeys.difference(ignore)
if argkeys: # found a slicing key
slicing_argkey = argkeys.pop()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indictes.items() is random order of argnames but
# then again different functions (items) can change order of
# arguments so it doesn't matter much probably
for argname, param_index in cs.indices.items():
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
def xunitsetup(obj, name):
meth = getattr(obj, name, None)
if getfixturemarker(meth) is None:
return meth
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
scopename2class = {
'class': Class,
'module': Module,
'function': pytest.Item,
}
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
| codewarrior0/pytest | _pytest/python.py | Python | mit | 84,482 |
"""
This example uses OpenGL via Pyglet and draws
a bunch of rectangles on the screen.
"""
import random
import time
import pyglet.gl as GL
import pyglet
import ctypes
# Set up the constants
SCREEN_WIDTH = 700
SCREEN_HEIGHT = 500
RECT_WIDTH = 50
RECT_HEIGHT = 50
class Shape():
def __init__(self):
self.x = 0
self.y = 0
class VertexBuffer():
""" Class to hold vertex buffer info. """
def __init__(self, vbo_id, size):
self.vbo_id = vbo_id
self.size = size
def add_rect(rect_list, x, y, width, height, color):
""" Create a vertex buffer for a rectangle. """
rect_list.extend([-width / 2, -height / 2,
width / 2, -height / 2,
width / 2, height / 2,
-width / 2, height / 2])
def create_vbo_for_rects(v2f):
vbo_id = GL.GLuint()
GL.glGenBuffers(1, ctypes.pointer(vbo_id))
data2 = (GL.GLfloat*len(v2f))(*v2f)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, vbo_id)
GL.glBufferData(GL.GL_ARRAY_BUFFER, ctypes.sizeof(data2), data2,
GL.GL_STATIC_DRAW)
shape = VertexBuffer(vbo_id, len(v2f)//2)
return shape
def render_rect_filled(shape, x, y):
""" Render the shape at the right spot. """
# Set color
GL.glDisable(GL.GL_BLEND)
GL.glColor4ub(shape.color[0], shape.color[1], shape.color[2], 255)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, shape.vbo_id)
GL.glVertexPointer(2, GL.GL_FLOAT, 0, 0)
GL.glLoadIdentity()
GL.glTranslatef(x + shape.width / 2, y + shape.height / 2, 0)
GL.glDrawArrays(GL.GL_QUADS, 0, shape.size)
class MyApplication():
""" Main application class. """
def setup(self):
""" Set up the game and initialize the variables. """
# Set background to white
GL.glClearColor(1, 1, 1, 1)
self.rect_list = []
self.shape_list = []
for i in range(2000):
x = random.randrange(0, SCREEN_WIDTH)
y = random.randrange(0, SCREEN_HEIGHT)
width = random.randrange(20, 71)
height = random.randrange(20, 71)
d_x = random.randrange(-3, 4)
d_y = random.randrange(-3, 4)
red = random.randrange(256)
blue = random.randrange(256)
green = random.randrange(256)
alpha = random.randrange(256)
color = (red, blue, green, alpha)
shape = Shape()
shape.x = x
shape.y = y
self.shape_list.append(shape)
add_rect(self.rect_list, 0, 0, width, height, color)
print("Creating vbo for {} vertices.".format(len(self.rect_list) // 2))
self.rect_vbo = create_vbo_for_rects(self.rect_list)
print("VBO {}".format(self.rect_vbo.vbo_id))
def animate(self, dt):
""" Move everything """
pass
def on_draw(self):
"""
Render the screen.
"""
start = time.time()
float_size = ctypes.sizeof(ctypes.c_float)
record_len = 10 * float_size
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
GL.glColor4ub(255, 0, 0, 255)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.rect_vbo.vbo_id)
GL.glVertexPointer(2, GL.GL_FLOAT, record_len, 0)
for i in range(len(self.shape_list)):
shape = self.shape_list[i]
GL.glLoadIdentity()
GL.glTranslatef(shape.x, shape.y, 0)
GL.glDrawArrays(GL.GL_QUADS, i * 8, 8)
# GL.glDrawArrays(GL.GL_QUADS,
# 0,
# self.rect_vbo.size)
elapsed = time.time() - start
print(elapsed)
def main():
window = pyglet.window.Window(SCREEN_WIDTH, SCREEN_HEIGHT)
app = MyApplication()
app.setup()
pyglet.clock.schedule_interval(app.animate, 1/60)
@window.event
def on_draw():
window.clear()
app.on_draw()
pyglet.app.run()
main()
| mwreuter/arcade | experimental/a_quick_test5.py | Python | mit | 4,042 |
import mock
from django.test import TestCase
from mediaviewer.views.signout import signout
class TestSignout(TestCase):
def setUp(self):
self.logout_patcher = mock.patch('mediaviewer.views.signout.logout')
self.mock_logout = self.logout_patcher.start()
self.addCleanup(self.logout_patcher.stop)
self.setSiteWideContext_patcher = mock.patch(
'mediaviewer.views.signout.setSiteWideContext')
self.mock_setSiteWideContext = self.setSiteWideContext_patcher.start()
self.addCleanup(self.setSiteWideContext_patcher.stop)
self.render_patcher = mock.patch('mediaviewer.views.signout.render')
self.mock_render = self.render_patcher.start()
self.addCleanup(self.render_patcher.stop)
self.request = mock.MagicMock()
def test_signout(self):
expected_context = {'active_page': 'logout',
'loggedin': False,
'title': 'Signed out'}
expected = self.mock_render.return_value
actual = signout(self.request)
self.assertEqual(expected, actual)
self.mock_logout.assert_called_once_with(self.request)
self.mock_setSiteWideContext.assert_called_once_with(
expected_context, self.request)
self.mock_render.assert_called_once_with(
self.request,
'mediaviewer/logout.html',
expected_context)
| kyokley/MediaViewer | mediaviewer/tests/views/test_signout.py | Python | mit | 1,454 |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 1 16:10:56 2013
@author: vterzopoulos, abrys
"""
# To ignore numpy errors:
# pylint: disable=E1101
import nibabel
import numpy
from dicom2nifti.image_volume import load, SliceType, ImageVolume
def reorient_image(input_image, output_image):
"""
Change the orientation of the Image data in order to be in LAS space
x will represent the coronal plane, y the sagittal and z the axial plane.
x increases from Right (R) to Left (L), y from Posterior (P) to Anterior (A) and z from Inferior (I) to Superior (S)
:returns: The output image in nibabel form
:param output_image: filepath to the nibabel image
:param input_image: filepath to the nibabel image
"""
# Use the imageVolume module to find which coordinate corresponds to each plane
# and get the image data in RAS orientation
# print 'Reading nifti'
if isinstance(input_image, nibabel.Nifti1Image):
image = ImageVolume(input_image)
else:
image = load(input_image)
# 4d have a different conversion to 3d
# print 'Reorganizing data'
if image.nifti_data.squeeze().ndim == 4:
new_image = _reorient_4d(image)
elif image.nifti_data.squeeze().ndim == 3 or image.nifti_data.ndim == 3 or image.nifti_data.squeeze().ndim == 2:
new_image = _reorient_3d(image)
else:
raise Exception('Only 3d and 4d images are supported')
# print 'Recreating affine'
affine = image.nifti.affine
# Based on VolumeImage.py where slice orientation 1 represents the axial plane
# Flipping on the data may be needed based on x_inverted, y_inverted, ZInverted
# Create new affine header by changing the order of the columns of the input image header
# the last column with the origin depends on the origin of the original image, the size and the direction of x,y,z
new_affine = numpy.eye(4)
new_affine[:, 0] = affine[:, image.sagittal_orientation.normal_component]
new_affine[:, 1] = affine[:, image.coronal_orientation.normal_component]
new_affine[:, 2] = affine[:, image.axial_orientation.normal_component]
point = [0, 0, 0, 1]
# If the orientation of coordinates is inverted, then the origin of the "new" image
# would correspond to the last voxel of the original image
# First we need to find which point is the origin point in image coordinates
# and then transform it in world coordinates
if not image.axial_orientation.x_inverted:
new_affine[:, 0] = - new_affine[:, 0]
point[image.sagittal_orientation.normal_component] = image.dimensions[
image.sagittal_orientation.normal_component] - 1
# new_affine[0, 3] = - new_affine[0, 3]
if image.axial_orientation.y_inverted:
new_affine[:, 1] = - new_affine[:, 1]
point[image.coronal_orientation.normal_component] = image.dimensions[
image.coronal_orientation.normal_component] - 1
# new_affine[1, 3] = - new_affine[1, 3]
if image.coronal_orientation.y_inverted:
new_affine[:, 2] = - new_affine[:, 2]
point[image.axial_orientation.normal_component] = image.dimensions[image.axial_orientation.normal_component] - 1
# new_affine[2, 3] = - new_affine[2, 3]
new_affine[:, 3] = numpy.dot(affine, point)
# DONE: Needs to update new_affine, so that there is no translation difference between the original
# and created image (now there is 1-2 voxels translation)
# print 'Creating new nifti image'
if new_image.ndim > 3: # do not squeeze single slice data
new_image = new_image.squeeze()
output = nibabel.nifti1.Nifti1Image(new_image, new_affine)
output.header.set_slope_inter(1, 0)
output.header.set_xyzt_units(2) # set units for xyz (leave t as unknown)
output.to_filename(output_image)
return output
def _reorient_4d(image):
"""
Reorganize the data for a 4d nifti
"""
# print 'converting 4d image'
# Create empty array where x,y,z correspond to LR (sagittal), PA (coronal), IS (axial) directions and the size
# of the array in each direction is the same with the corresponding direction of the input image.
new_image = numpy.zeros([image.dimensions[image.sagittal_orientation.normal_component],
image.dimensions[image.coronal_orientation.normal_component],
image.dimensions[image.axial_orientation.normal_component],
image.dimensions[3]],
dtype=image.nifti_data.dtype)
# loop over all timepoints
for timepoint in range(0, image.dimensions[3]):
# Fill the new image with the values of the input image but with mathicng the orientation with x,y,z
if image.coronal_orientation.y_inverted:
for i in range(new_image.shape[2]):
new_image[:, :, i, timepoint] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
new_image.shape[2] - 1 - i,
timepoint).original_data))
else:
for i in range(new_image.shape[2]):
new_image[:, :, i, timepoint] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
i, timepoint).original_data))
return new_image
def _reorient_3d(image):
"""
Reorganize the data for a 3d nifti
"""
# Create empty array where x,y,z correspond to LR (sagittal), PA (coronal), IS (axial) directions and the size
# of the array in each direction is the same with the corresponding direction of the input image.
new_image = numpy.zeros([image.dimensions[image.sagittal_orientation.normal_component],
image.dimensions[image.coronal_orientation.normal_component],
image.dimensions[image.axial_orientation.normal_component]],
dtype=image.nifti_data.dtype)
# Fill the new image with the values of the input image but with matching the orientation with x,y,z
if image.coronal_orientation.y_inverted:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(image.get_slice(SliceType.AXIAL,
new_image.shape[2] - 1 - i).original_data)
else:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(image.get_slice(SliceType.AXIAL,
i).original_data)
return new_image
| icometrix/dicom2nifti | dicom2nifti/image_reorientation.py | Python | mit | 6,865 |
# This is a modified version of original twilio_sms Gluu's script to work with Casa
from java.util import Arrays
from javax.faces.application import FacesMessage
from org.gluu.jsf2.message import FacesMessages
from org.gluu.oxauth.security import Identity
from org.gluu.oxauth.service import UserService, AuthenticationService
from org.gluu.oxauth.util import ServerUtil
from org.gluu.model.custom.script.type.auth import PersonAuthenticationType
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.util import StringHelper, ArrayHelper
from com.google.common.base import Joiner
from com.twilio import Twilio
import com.twilio.rest.api.v2010.account.Message as TwMessage
from com.twilio.type import PhoneNumber
import random
import sys
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, customScript, configurationAttributes):
print "Twilio SMS. Initialized"
return True
def destroy(self, configurationAttributes):
print "Twilio SMS. Destroyed successfully"
return True
def getApiVersion(self):
return 11
def getAuthenticationMethodClaims(self, configurationAttributes):
return None
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
print "TwilioSMS. Authenticate for Step %s" % str(step)
identity = CdiUtil.bean(Identity)
authenticationService = CdiUtil.bean(AuthenticationService)
user = authenticationService.getAuthenticatedUser()
if step == 1:
if user == None:
credentials = identity.getCredentials()
user_name = credentials.getUsername()
user_password = credentials.getPassword()
if StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password):
authenticationService.authenticate(user_name, user_password)
user = authenticationService.getAuthenticatedUser()
if user == None:
return False
#Attempt to send message now if user has only one mobile number
mobiles = user.getAttributeValues("mobile")
if mobiles == None:
return False
else:
code = random.randint(100000, 999999)
identity.setWorkingParameter("randCode", code)
sid = configurationAttributes.get("twilio_sid").getValue2()
token = configurationAttributes.get("twilio_token").getValue2()
self.from_no = configurationAttributes.get("from_number").getValue2()
Twilio.init(sid, token)
if mobiles.size() == 1:
self.sendMessage(code, mobiles.get(0))
else:
chopped = ""
for numb in mobiles:
l = len(numb)
chopped += "," + numb[max(0, l-4) : l]
#converting to comma-separated list (identity does not remember lists in 3.1.3)
identity.setWorkingParameter("numbers", Joiner.on(",").join(mobiles.toArray()))
identity.setWorkingParameter("choppedNos", chopped[1:])
return True
else:
if user == None:
return False
session_attributes = identity.getSessionId().getSessionAttributes()
code = session_attributes.get("randCode")
numbers = session_attributes.get("numbers")
if step == 2 and numbers != None:
#Means the selection number page was used
idx = ServerUtil.getFirstValue(requestParameters, "OtpSmsloginForm:indexOfNumber")
if idx != None and code != None:
sendToNumber = numbers.split(",")[int(idx)]
self.sendMessage(code, sendToNumber)
return True
else:
return False
success = False
form_passcode = ServerUtil.getFirstValue(requestParameters, "OtpSmsloginForm:passcode")
if form_passcode != None and code == form_passcode:
print "TwilioSMS. authenticate. 6-digit code matches with code sent via SMS"
success = True
else:
facesMessages = CdiUtil.bean(FacesMessages)
facesMessages.setKeepMessages()
facesMessages.clear()
facesMessages.add(FacesMessage.SEVERITY_ERROR, "Wrong code entered")
return success
def prepareForStep(self, configurationAttributes, requestParameters, step):
print "TwilioSMS. Prepare for Step %s" % str(step)
return True
def getExtraParametersForStep(self, configurationAttributes, step):
if step > 1:
return Arrays.asList("randCode", "numbers", "choppedNos")
return None
def getCountAuthenticationSteps(self, configurationAttributes):
print "TwilioSMS. getCountAuthenticationSteps called"
if CdiUtil.bean(Identity).getWorkingParameter("numbers") == None:
return 2
else:
return 3
def getPageForStep(self, configurationAttributes, step):
print "TwilioSMS. getPageForStep called %s" % step
print "numbers are %s" % CdiUtil.bean(Identity).getWorkingParameter("numbers")
defPage = "/casa/otp_sms.xhtml"
if step == 2:
if CdiUtil.bean(Identity).getWorkingParameter("numbers") == None:
return defPage
else:
return "/casa/otp_sms_prompt.xhtml"
elif step == 3:
return defPage
return ""
def logout(self, configurationAttributes, requestParameters):
return True
def hasEnrollments(self, configurationAttributes, user):
return user.getAttribute("mobile") != None
def sendMessage(self, code, numb):
try:
if numb[:1] != "+":
numb = "+" + numb
print "TwilioSMS. Sending SMS message (%s) to %s" % (code, numb)
msg = "%s is your passcode to access your account" % code
message = TwMessage.creator(PhoneNumber(numb), PhoneNumber(self.from_no), msg).create()
print "TwilioSMS. Message Sid: %s" % message.getSid()
except:
print "TwilioSMS. Error sending message", sys.exc_info()[1]
| GluuFederation/community-edition-setup | static/casa/scripts/casa-external_twilio_sms.py | Python | mit | 6,740 |
from db_utils import deleteLinksByHost
from db_utils import deleteHost
from db_utils import addNewHost
from db_utils import getAllHosts
from error_message import showErrorPage
from error_message import ErrorMessages
import utils
import webapp2
from google.appengine.api import users
from google.appengine.ext import ndb
JINJA_ENVIRONMENT = utils.getJinjaEnvironment()
class AddHost(webapp2.RequestHandler):
def get(self):
"""
descripion:
adds a new host to the database, and redirect to '/'
params:
name - host name
interval - pinging interval for all the links belonging to the host.
response:
redirect to '/admin'
"""
name = self.request.get('name')
if name is None or len(name) == 0:
showErrorPage(self, ErrorMessages.invalidHostName())
return
if ndb.Key('Host', name).get() is not None:
showErrorPage(self, ErrorMessages.duplicatingHostName())
return
try:
interval = int(self.request.get('interval'))
except ValueError:
showErrorPage(self, ErrorMessages.invalidHostInterval())
return
if interval == 0:
showErrorPage(self, ErrorMessages.invalidHostInterval())
return
addNewHost(name, interval)
self.redirect('/admin')
class DeleteHost(webapp2.RequestHandler):
def get(self):
"""
description:
deletes an existing host, and redirects to '/'. All the links belonging
to the host will also be deleted.
params:
name - host name
response:
redirect to '/'
"""
name = self.request.get('name')
if name is None or len(name) == 0:
showErrorPage(self, ErrorMessages.invalidHostName())
return
hostKey = ndb.Key('Host', name)
if hostKey.get() is None:
showErrorPage(self, ErrorMessages.hostDoesNotExist())
return
deleteLinksByHost(name)
deleteHost(name)
self.redirect('/')
class AdminPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
hosts = getAllHosts()
template_values = {
'hosts': hosts,
'user': user,
}
template = JINJA_ENVIRONMENT.get_template('admin.html')
self.response.write(template.render(template_values))
app = webapp2.WSGIApplication([
('/admin/host/add', AddHost),
('/admin/host/delete', DeleteHost),
], debug=True)
| cloud-io/CloudUp | src/admin_views.py | Python | mit | 2,370 |
# Joey Velez-Ginorio
# Gridworld Implementation
# ---------------------------------
from mdp import MDP
from grid import Grid
from scipy.stats import uniform
from scipy.stats import beta
from scipy.stats import expon
import numpy as np
import random
import pyprind
import matplotlib.pyplot as plt
class GridWorld(MDP):
"""
Defines a gridworld environment to be solved by an MDP!
"""
def __init__(self, grid, goalVals, discount=.99, tau=.01, epsilon=.001):
MDP.__init__(self, discount=discount, tau=tau, epsilon=epsilon)
self.goalVals = goalVals
self.grid = grid
self.setGridWorld()
self.valueIteration()
self.extractPolicy()
def isTerminal(self, state):
"""
Specifies terminal conditions for gridworld.
"""
return True if tuple(self.scalarToCoord(state)) in self.grid.objects.values() else False
def isObstacle(self, sCoord):
"""
Checks if a state is a wall or obstacle.
"""
if tuple(sCoord) in self.grid.walls:
return True
if sCoord[0] > (self.grid.row - 1) or sCoord[0] < 0:
return True
if sCoord[1] > (self.grid.col - 1) or sCoord[1] < 0:
return True
return False
def takeAction(self, sCoord, action):
"""
Receives an action value, performs associated movement.
"""
if action is 0:
return self.up(sCoord)
if action is 1:
return self.down(sCoord)
if action is 2:
return self.left(sCoord)
if action is 3:
return self.right(sCoord)
if action is 4:
return sCoord
if action is 5:
return self.upleft(sCoord)
if action is 6:
return self.upright(sCoord)
if action is 7:
return self.downleft(sCoord)
if action is 8:
return self.downright(sCoord)
def up(self, sCoord):
"""
Move agent up, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def upright(self, sCoord):
"""
Move agent up and right, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] -= 1
newCoord[1] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def upleft(self, sCoord):
"""
Move agent up and left, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] -= 1
newCoord[1] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def down(self, sCoord):
"""
Move agent down, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def downleft(self, sCoord):
"""
Move agent down, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] += 1
newCoord[1] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def downright(self, sCoord):
"""
Move agent down, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] += 1
newCoord[1] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def left(self, sCoord):
"""
Move agent left, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[1] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def right(self, sCoord):
"""
Move agent right, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[1] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def coordToScalar(self, sCoord):
"""
Convert state coordinates to corresponding scalar state value.
"""
return sCoord[0]*(self.grid.col) + sCoord[1]
def scalarToCoord(self, scalar):
"""
Convert scalar state value into coordinates.
"""
return np.array([scalar / self.grid.col, scalar % self.grid.col])
def getPossibleActions(self, sCoord):
"""
Will return a list of all possible actions from a current state.
"""
possibleActions = list()
if self.up(sCoord) is not sCoord:
possibleActions.append(0)
if self.down(sCoord) is not sCoord:
possibleActions.append(1)
if self.left(sCoord) is not sCoord:
possibleActions.append(2)
if self.right(sCoord) is not sCoord:
possibleActions.append(3)
if self.upleft(sCoord) is not sCoord:
possibleActions.append(5)
if self.upright(sCoord) is not sCoord:
possibleActions.append(6)
if self.downleft(sCoord) is not sCoord:
possibleActions.append(7)
if self.downright(sCoord) is not sCoord:
possibleActions.append(8)
return possibleActions
def setGridWorld(self):
"""
Initializes states, actions, rewards, transition matrix.
"""
# Possible coordinate positions + Death State
self.s = np.arange(self.grid.row*self.grid.col + 1)
# 4 Actions {Up, Down, Left, Right}
self.a = np.arange(9)
# Reward Zones
self.r = np.zeros(len(self.s))
for i in range(len(self.grid.objects)):
self.r[self.coordToScalar(self.grid.objects.values()[i])] = self.goalVals[i]
self.r_sa = np.zeros([len(self.s),len(self.a)])
for i in range(len(self.s)):
for j in range(len(self.a)):
if j <= 4:
self.r_sa[i][j] = self.r[self.coordToScalar(self.takeAction(self.scalarToCoord(i),j))]-1.0
else:
self.r_sa[i][j] = self.r[self.coordToScalar(self.takeAction(self.scalarToCoord(i),j))]-np.sqrt(2)
self.r = self.r_sa
# Transition Matrix
self.t = np.zeros([len(self.s),len(self.a),len(self.s)])
for state in range(len(self.s)):
possibleActions = self.getPossibleActions(self.scalarToCoord(state))
if self.isTerminal(state):
for i in range(len(self.a)):
if i == 4:
self.t[state][4][state]=1.0
else:
self.t[state][i][len(self.s)-1] = 1.0
continue
for action in self.a:
# Up
if action == 0:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 0)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 1:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 1)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 2:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 2)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 3:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 3)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 4:
self.t[state][action][state] = 1.0
if action == 5:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 5)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 6:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 6)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 7:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 7)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 8:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 8)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
def simulate(self, state):
"""
Runs the solver for the MDP, conducts value iteration, extracts policy,
then runs simulation of problem.
NOTE: Be sure to run value iteration (solve values for states) and to
extract some policy (fill in policy vector) before running simulation
"""
# Run simulation using policy until terminal condition met
actions = ['up', 'down', 'left', 'right']
count = 0
while not self.isTerminal(state):
# Determine which policy to use (non-deterministic)
policy = self.policy[np.where(self.s == state)[0][0]]
p_policy = self.policy[np.where(self.s == state)[0][0]] / \
self.policy[np.where(self.s == state)[0][0]].sum()
# Get the parameters to perform one move
stateIndex = np.where(self.s == state)[0][0]
policyChoice = np.random.choice(policy, p=p_policy)
actionIndex = np.random.choice(np.array(np.where(self.policy[state][:] == policyChoice)).ravel())
# print actionIndex
if actionIndex <= 3:
count += 1
else:
count += np.sqrt(2)
# Take an action, move to next state
nextState = self.takeAction(self.scalarToCoord(int(stateIndex)), int(actionIndex))
nextState = self.coordToScalar(nextState)
# print "In state: {}, taking action: {}, moving to state: {}".format(
# self.scalarToCoord(state), actions[actionIndex], self.scalarToCoord(nextState))
# End game if terminal state reached
state = int(nextState)
# if self.isTerminal(state):
# print "Terminal state: {} has been reached. Simulation over.".format(self.scalarToCoord(state))
return count
| joeyginorio/Action-Understanding-with-Rational-Rules | model_src/grid_world.py | Python | mit | 9,591 |
from fam.buffer import buffered_db
cache = buffered_db
| paulharter/fam | src/fam/database/caching.py | Python | mit | 56 |
"""
Created on Thu May 05 20:02:00 2011
@author: Tillsten
"""
import numpy as np
from scipy.linalg import qr
eps = np.finfo(float).eps
def mls(B, v, umin, umax, Wv=None, Wu=None, ud=None, u=None, W=None, imax=100):
"""
mls - Control allocation using minimal least squares.
[u,W,iter] = mls_alloc(B,v,umin,umax,[Wv,Wu,ud,u0,W0,imax])
Solves the bounded sequential least-squares problem
min ||Wu(u-ud)|| subj. to u in M
where M is the set of control signals solving
min ||Wv(Bu-v)|| subj. to umin <= u <= umax
using a two stage active set method. Wu must be diagonal since the
problem is reformulated as a minimal least squares problem. The
implementation does not handle the case of coplanar controls.
Inputs:
-------
B control effectiveness matrix (k x m)
v commanded virtual control (k x 1)
umin lower position limits (m x 1)
umax upper position limits (m x 1)
Wv virtual control weighting matrix (k x k) [I]
Wu control weighting matrix (m x m), diagonal [I]
ud desired control (m x 1) [0]
u0 initial point (m x 1)
W0 initial working set (m x 1) [empty]
imax max no. of iterations [100]
Outputs:
-------
u optimal control
W optimal active set
iter no. of iterations (= no. of changes in the working set + 1)
0 if u_i not saturated
Active set syntax: W_i = -1 if u_i = umin_i
+1 if u_i = umax_i
Directly Based on the code from:
Ola Harkegard, www.control.isy.liu.se/~ola
see licsence.
"""
#k = number of virtual controls
#m = number of variables (actuators)
k, m = B.shape
if u == None:
u = np.mean(umin + umax, 0)[:, None]
if W == None:
W = np.zeros((m, 1))
if ud == None:
ud = np.zeros((m, 1))
if Wu == None:
Wu = np.eye(m)
if Wv == None:
Wv = np.eye(k)
phase = 1
#Reformulate as a minimal least squares problem. See 2002-03-08 (1).
A = Wv.dot(B).dot(np.linalg.pinv(Wu))
b = Wv.dot(v - B.dot(ud))
xmin = (umin - ud).flatten()
xmax = (umax - ud).flatten()
# Compute initial point and residual.
x = Wu.dot(u - ud)
r = np.atleast_2d(A.dot(x) - b)
#Determine indeces of free variables
i_free = (W == 0).flatten()
m_free = np.sum(i_free)
for i in range(imax):
#print 'Iter: ', i
if phase == 1:
A_free = A[:, i_free]
if m_free <= k:
if m_free > 0:
p_free = np.linalg.lstsq(-A_free, r)[0]
else:
q1, r1 = qr(A_free.T)
p_free = -q1.dot(np.solve(r1.T, r))
p = np.zeros((m, 1))
if A.shape[1] > 1:
p[i_free] = p_free
else:
p[i_free] = p_free.flatten()
else:
i_fixed = np.logical_not(i_free)
m_fixed = m - m_free
if m_fixed > 0:
HT = U[i_fixed.squeeze(), :].T
V, Rtot = qr(np.atleast_2d(HT))
V1 = V[:, :m_fixed]
V2 = V[:, m_fixed + 1:]
R = Rtot[:, m_fixed]
else:
V, Rtot = np.array([[]]), np.array([[]])
V1 = V2 = R = V.T
s = -V2.T.dot(z)
pz = V2.dot(s)
p = U.dot(pz)
x_opt = x + p
infeasible = np.logical_or(x_opt < xmin, x_opt > xmax)
if not np.any(infeasible[i_free]):
x = x_opt
if phase == 1:
r = r + A.dot(p)
else:
z = z + pz
if phase == 1 and m_free >= k:
phase = 2
Utot, Stot = qr(A.T)
U = Utot[:, k:]
z = U.T.dot(x)
else:
lam = np.zeros((m, 1))
if m_free < m:
if phase == 1:
g = A.T.dot(r)
lam = -W * g
else:
lam[i_fixed] = -W[i_fixed] * np.linalg.solve(R, V1.T.dot(z))
if np.all(lam >= -eps):
u = np.linalg.solve(Wu, x) + ud
return u
lambda_neg, i_neg = np.min(lam), np.argmin(lam)
W[i_neg] = 0
i_free[i_neg] = True
m_free += 1
else:
dist = np.ones(m)
i_min = np.logical_and(i_free, p.flat < 0).flatten()
i_max = np.logical_and(i_free, p.flat > 0).flatten()
dist[i_min] = (xmin[i_min] - x[i_min]) / p[i_min]
dist[i_max] = (xmax[i_max] - x[i_max]) / p[i_max]
alpha, i_alpha = np.min(dist), np.argmin(dist)
x = x + alpha * p
if phase == 1:
r = r + A.dot(alpha * p) #!!
else:
z = z + alpha * pz
W[i_alpha] = np.sign(p[i_alpha])
if i_free[i_alpha]:
i_free[i_alpha] = False
m_free -= 1
u = np.linalg.solve(Wu, x) + ud
return u
def bounded_lsq(A, b, lower_lim, upper_lim):
"""
Minimizes:
|Ax-b|_2
for lower_lim<x<upper_lim.
"""
return mls(A, b, lower_lim, upper_lim)
def test_bounded_lsq():
from numpy.core.umath_tests import matrix_multiply
s = np.linspace(0, 10, 100)
A = np.exp(-((s - 5) ** 2) / 20)
A = A[:, None]
b = 16 * A
x = bounded_lsq(A, b, np.atleast_2d(0), np.atleast_2d(15))
np.testing.assert_almost_equal(x, 15)
A = np.array([[1, -3], [5, 7]])
b = np.array([[-50], [50]])
ll = np.array(([[-10], [-10]]))
ul = np.array(([[10], [10]]))
x0 = bounded_lsq(A, b, ll, ul)
np.testing.assert_array_almost_equal(x0, np.array([[-4.61538462], [10.]]))
if __name__ == '__main__':
from numpy.core.umath_tests import matrix_multiply
import matplotlib.pyplot as plt
test_bounded_lsq()
s = np.linspace(0, 10, 100)
A = np.exp(-((s - 5) ** 2) / 20)
A = A[:, None]
b = 16 * A
x = bounded_lsq(A, b, np.atleast_2d(0), np.atleast_2d(4))
plt.plot(A.dot(x))
plt.plot(b)
plt.figure()
plt.rcParams['font.family'] = 'serif'
A = np.array([[1, -3], [5, 7]])
b = np.array([[-50], [50]])
ll = np.array(([[-10], [-10]]))
ul = np.array(([[10], [10]]))
Ud = np.array(([0, 0]))
gamma = 1000
x0 = bounded_lsq(A, b, ll, ul)
x = np.linspace(-30, 30, 500)
y = np.linspace(-30, 30, 500)
X, Y = np.meshgrid(x, y)
S = np.dstack((X, Y))
SN = matrix_multiply(S, A.T)
plt.clf()
plt.contourf(x, y, np.sqrt(((SN - b.T) ** 2).sum(-1)), 30,
cmap=plt.cm.PuBu_r)
plt.colorbar()
#plt.axhline(ll[0])
#plt.axhline(ul[0])
#plt.axvline(ll[1])
#plt.axvline(ul[1])
rect = np.vstack((ll, ul - ll))
patch = plt.Rectangle(ll, *(ul - ll), facecolor=(0.0, 0., 0., 0))
plt.gca().add_patch(patch)
plt.annotate("Bounded Min",
xy=x0, xycoords='data',
xytext=(-5, 5), textcoords='data',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
)
plt.annotate("Lsq Min",
xy=np.linalg.lstsq(A, b)[0], xycoords='data',
xytext=(20, 10), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
)
plt.scatter(*x0)
plt.scatter(*np.linalg.lstsq(A, b)[0])
plt.show()
| treverhines/ModEst | modest/pymls/init.py | Python | mit | 7,780 |
from __future__ import annotations
import contextlib
import os.path
import shutil
import sys
import pytest
from pre_commit import parse_shebang
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import Var
from pre_commit.util import make_executable
def _echo_exe() -> str:
exe = shutil.which('echo')
assert exe is not None
return exe
def test_file_doesnt_exist():
assert parse_shebang.parse_filename('herp derp derp') == ()
def test_simple_case(tmpdir):
x = tmpdir.join('f')
x.write('#!/usr/bin/env echo')
make_executable(x.strpath)
assert parse_shebang.parse_filename(x.strpath) == ('echo',)
def test_find_executable_full_path():
assert parse_shebang.find_executable(sys.executable) == sys.executable
def test_find_executable_on_path():
assert parse_shebang.find_executable('echo') == _echo_exe()
def test_find_executable_not_found_none():
assert parse_shebang.find_executable('not-a-real-executable') is None
def write_executable(shebang, filename='run'):
os.mkdir('bin')
path = os.path.join('bin', filename)
with open(path, 'w') as f:
f.write(f'#!{shebang}')
make_executable(path)
return path
@contextlib.contextmanager
def bin_on_path():
bindir = os.path.join(os.getcwd(), 'bin')
with envcontext((('PATH', (bindir, os.pathsep, Var('PATH'))),)):
yield
def test_find_executable_path_added(in_tmpdir):
path = os.path.abspath(write_executable('/usr/bin/env sh'))
assert parse_shebang.find_executable('run') is None
with bin_on_path():
assert parse_shebang.find_executable('run') == path
def test_find_executable_path_ext(in_tmpdir):
"""Windows exports PATHEXT as a list of extensions to automatically add
to executables when doing PATH searching.
"""
exe_path = os.path.abspath(
write_executable('/usr/bin/env sh', filename='run.myext'),
)
env_path = {'PATH': os.path.dirname(exe_path)}
env_path_ext = dict(env_path, PATHEXT=os.pathsep.join(('.exe', '.myext')))
assert parse_shebang.find_executable('run') is None
assert parse_shebang.find_executable('run', _environ=env_path) is None
ret = parse_shebang.find_executable('run.myext', _environ=env_path)
assert ret == exe_path
ret = parse_shebang.find_executable('run', _environ=env_path_ext)
assert ret == exe_path
def test_normexe_does_not_exist():
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe('i-dont-exist-lol')
assert excinfo.value.args == ('Executable `i-dont-exist-lol` not found',)
def test_normexe_does_not_exist_sep():
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe('./i-dont-exist-lol')
assert excinfo.value.args == ('Executable `./i-dont-exist-lol` not found',)
@pytest.mark.xfail(os.name == 'nt', reason='posix only')
def test_normexe_not_executable(tmpdir): # pragma: win32 no cover
tmpdir.join('exe').ensure()
with tmpdir.as_cwd(), pytest.raises(OSError) as excinfo:
parse_shebang.normexe('./exe')
assert excinfo.value.args == ('Executable `./exe` is not executable',)
def test_normexe_is_a_directory(tmpdir):
with tmpdir.as_cwd():
tmpdir.join('exe').ensure_dir()
exe = os.path.join('.', 'exe')
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe(exe)
msg, = excinfo.value.args
assert msg == f'Executable `{exe}` is a directory'
def test_normexe_already_full_path():
assert parse_shebang.normexe(sys.executable) == sys.executable
def test_normexe_gives_full_path():
assert parse_shebang.normexe('echo') == _echo_exe()
assert os.sep in _echo_exe()
def test_normalize_cmd_trivial():
cmd = (_echo_exe(), 'hi')
assert parse_shebang.normalize_cmd(cmd) == cmd
def test_normalize_cmd_PATH():
cmd = ('echo', '--version')
expected = (_echo_exe(), '--version')
assert parse_shebang.normalize_cmd(cmd) == expected
def test_normalize_cmd_shebang(in_tmpdir):
echo = _echo_exe().replace(os.sep, '/')
path = write_executable(echo)
assert parse_shebang.normalize_cmd((path,)) == (echo, path)
def test_normalize_cmd_PATH_shebang_full_path(in_tmpdir):
echo = _echo_exe().replace(os.sep, '/')
path = write_executable(echo)
with bin_on_path():
ret = parse_shebang.normalize_cmd(('run',))
assert ret == (echo, os.path.abspath(path))
def test_normalize_cmd_PATH_shebang_PATH(in_tmpdir):
echo = _echo_exe()
path = write_executable('/usr/bin/env echo')
with bin_on_path():
ret = parse_shebang.normalize_cmd(('run',))
assert ret == (echo, os.path.abspath(path))
| pre-commit/pre-commit | tests/parse_shebang_test.py | Python | mit | 4,687 |
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - [email protected]
#
# twitter : @edonyzpc
#
# Last modified: 2015-05-10 15:02
#
# Filename: filebuf.py
#
# Description: All Rights Are Reserved
#
"""
class PyColor(object):
""" This class is for colored print in the python interpreter!
"F3" call Addpy() function to add this class which is defined
in the .vimrc for vim Editor."""
def __init__(self):
self.self_doc = r"""
STYLE: \033['display model';'foreground';'background'm
DETAILS:
FOREGROUND BACKGOUND COLOR
---------------------------------------
30 40 black
31 41 red
32 42 green
33 43 yellow
34 44 blue
35 45 purple
36 46 cyan
37 47 white
DISPLAY MODEL DETAILS
-------------------------
0 default
1 highlight
4 underline
5 flicker
7 reverse
8 non-visiable
e.g:
\033[1;31;40m <!--1-highlight;31-foreground red;40-background black-->
\033[0m <!--set all into default-->
"""
self.warningcolor = '\033[0;37;41m'
self.tipcolor = '\033[0;31;42m'
self.endcolor = '\033[0m'
self._newcolor = ''
@property
def new(self):
"""
Customized Python Print Color.
"""
return self._newcolor
@new.setter
def new(self,color_str):
"""
New Color.
"""
self._newcolor = color_str
def disable(self):
"""
Disable Color Print.
"""
self.warningcolor = ''
self.endcolor = ''
class FileBuf(object):
"""
FILEBUF: class to write the each different lines into buffer file named `tmp`.
"""
def __init__(self, file1, file2):
"""
Initialize the instance attributes: [file1, file2, file1_line_num, file2_line_num]
"""
self.file1 = file1
self.file2 = file2
self.file1_line_num = len(open(self.file1).readlines())
self.file2_line_num = len(open(self.file2).readlines())
self.buffer = []
def mark_diff(self):
"""
Mark up the different lines into buffer
"""
f1 = open(self.file1)
f2 = open(self.file2)
if self.file1_line_num > self.file2_line_num:
line1_num_counter = 0
line2_num_counter = 0
for line1 in f1.readlines():
line2 = f2.readline()
line1_num_counter += 1
line2_num_counter += 1
if line1 == line2:
continue
else:
if line1 == '':
line1 = line1 + '\n'
if line2 == '':
line2 = line2 + '\n'
line1 = str(line1_num_counter) + '-' + line1
line2 = str(line2_num_counter) + '-' + line2
self.buffer.append(line1)
self.buffer.append(line2)
else:
line1_num_counter = 0
line2_num_counter = 0
for line2 in f2.readlines():
line1 = f1.readline()
line1_num_counter += 1
line2_num_counter += 1
if line1 == line2:
continue
else:
if line1 == '':
line1 = line1 + '\n'
if line2 == '':
line2 = line2 + '\n'
line1 = str(line1_num_counter) + '+' + line1
line2 = str(line2_num_counter) + '+' + line2
self.buffer.append(line1)
self.buffer.append(line2)
def write_file(self):
"""
Write the buffer into buffer file `tmp` in current direction
"""
file_write = open('tmp','w')
for line in self.buffer:
file_write.write(line)
if __name__ == '__main__':
test_file_buf = FileBuf('f2.txt', 'f1.txt')
test_file_buf.mark_diff()
test_file_buf.write_file()
| edonyM/toolkitem | fileprocess/mergefile/filebuf.py | Python | mit | 5,258 |
from django.db.models import fields, ForeignKey, ManyToOneRel, OneToOneRel
from .obj_types import clss
from .search_schema import schema as search_schema
def build_search_filters(cls):
"""Return list of dicts of options for a QueryBuilder filter.
See https://querybuilder.js.org/#filters for details.
"""
filters = [
_build_search_filter(cls, field_name)
for field_name in search_schema[cls.obj_type]["fields"]
]
return filters
def _build_search_filter(cls, field_name):
if field_name == "bnf_code":
return _build_search_filter_bnf_code_prefox()
field = cls._meta.get_field(field_name)
builder = {
ForeignKey: _build_search_filter_fk,
ManyToOneRel: _build_search_filter_rev_fk,
OneToOneRel: _build_search_filter_rev_fk,
fields.CharField: _build_search_filter_char,
fields.DateField: _build_search_filter_date,
fields.BooleanField: _build_search_filter_boolean,
fields.DecimalField: _build_search_filter_decimal,
}[type(field)]
search_filter = builder(field)
search_filter["id"] = field_name
return search_filter
def _build_search_filter_bnf_code_prefox():
return {
"id": "bnf_code",
"type": "string",
"label": "BNF code",
"operators": ["begins_with", "not_begins_with"],
"validation": {"min": 4},
}
def _build_search_filter_fk(field):
values = field.related_model.objects.values_list("cd", "descr").order_by("descr")
values = [{r[0]: r[1]} for r in values]
# The type is "string", even though the values are actually integers. This is
# because the QueryBuilder library calls parseInt on any values produced by a filter
# of type "integer" (see call to Utils.changeType in getRuleInputValue). It turns
# out that parseInt cannot actually parse integers larger than
# Number.MAX_SAFE_INTEGER, which is (2 ** 53) - 1, or 9007199254740991, and loses
# precision when it tries. This is a problem, because certain dm+d models have
# identifiers larger than Number.MAX_SAFE_INTEGER. Fortunately, Django is able to
# deal with query parameters for integer fields that are submitted as strings.
return {
"type": "string",
"label": field.help_text,
"input": "select",
"values": values,
"operators": ["equal"],
"plugin": "selectpicker",
"plugin_config": {"liveSearch": True, "liveSearchStyle": "contains"},
}
def _build_search_filter_rev_fk(field):
intermediate_model = field.related_model
fk_fields = [
f
for f in intermediate_model._meta.get_fields()
if (
isinstance(f, ForeignKey)
and f.related_model not in clss
and "prev" not in f.name
)
]
assert len(fk_fields) == 1
field = fk_fields[0]
return _build_search_filter_fk(field)
def _build_search_filter_char(field):
return {
"type": "string",
"label": field.help_text,
"operators": ["contains"],
"validation": {"min": 3},
}
def _build_search_filter_date(field):
return {
"type": "date",
"label": field.help_text,
"operators": ["equal", "before", "after"],
"plugin": "datepicker",
"plugin_config": {"format": "yyyy-mm-dd"},
}
def _build_search_filter_boolean(field):
return {
"type": "boolean",
"label": field.help_text,
"input": "radio",
"values": [{1: "Yes"}, {0: "No"}],
"operators": ["equal"],
}
def _build_search_filter_decimal(field):
return {
"type": "double",
"label": field.help_text,
"operators": ["equal", "less than", "greater than"],
}
| ebmdatalab/openprescribing | openprescribing/dmd/build_search_filters.py | Python | mit | 3,765 |
# -*- coding: utf-8 -*-
"""VGG16 model for Keras.
# Reference
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
"""
from __future__ import print_function
from __future__ import absolute_import
import warnings
from keras.models import Model
from keras.layers import Flatten, Dense, Input,Lambda
from keras.layers import Convolution2D, MaxPooling2D
from keras.engine.topology import get_source_inputs
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
from keras import backend as K
import numpy as np
from keras.applications.imagenet_utils import decode_predictions, preprocess_input
#sandeep to fix later
#, _obtain_input_shape
TH_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels.h5'
TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
TH_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels_notop.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
vgg_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape((3,1,1))
def img_preprocess(x):
x = x - vgg_mean
return x[:,::-1]
def VGG16(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
classes=1000):
"""Instantiate the VGG16 architecture,
optionally loading weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_dim_ordering="tf"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The dimension ordering
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `tf` dim ordering)
or `(3, 224, 244)` (with `th` dim ordering).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
#input_shape = _obtain_input_shape(input_shape,
# default_size=224,
# min_size=48,
# dim_ordering=K.image_dim_ordering(),
# include_top=include_top)
# sandeep to fix later for now do this as topmodel is retained n theano
input_shape = (3,224,224)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
print("sandeep adding lambda layer buddy good luck ")
x = Lambda(img_preprocess,input_shape=(3,224,224),output_shape=(3,224,224))(img_input)
x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv1')(x)
x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv1')(x)
x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv1')(x)
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv2')(x)
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv1')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv2')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv1')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv2')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='vgg16')
# load weights
if weights == 'imagenet':
if K.image_dim_ordering() == 'th':
if include_top:
weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels.h5',
TH_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5',
TH_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image dimension ordering convention '
'(`image_dim_ordering="th"`). '
'For best performance, set '
'`image_dim_ordering="tf"` in '
'your Keras config '
'at ~/.keras/keras.json.')
convert_all_kernels_in_model(model)
else:
if include_top:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'theano':
convert_all_kernels_in_model(model)
return model
| cosmos342/VisionClassifier | vgg16.py | Python | mit | 8,410 |
#open a gsd file and write out a subsampled version, keeping only every N timesteps
#useful if you want to be analyzing a shorter trajectory
import gsd.hoomd
import argparse
import time
start = time.time()
parser = argparse.ArgumentParser(description='Subsamble GSD trajectory')
parser.add_argument('fname',metavar='input',type=str,help='trajectory file to be subsampled')
parser.add_argument('ofname',metavar='output',type=str,help='where to write subsampled trajectory file')
parser.add_argument('N',metavar='N',type=int,help='keep frame each N timesteps')
args = parser.parse_args()
traj = gsd.hoomd.open(args.fname)
frame0 = traj[0]
newtraj = gsd.hoomd.open(args.ofname,'wb')
newtraj.append(frame0)
for i in range(args.N,len(traj),args.N):
s = gsd.hoomd.Snapshot()
pos = traj[i].particles.position
s.particles.position = pos
s.particles.N = len(pos)
newtraj.append(s)
end = time.time()
print('Subsampling took {0} s.'.format(end-start))
| ramansbach/cluster_analysis | clustering/scripts/gsdSubsample.py | Python | mit | 959 |
#!/usr/bin/env python
"""
othello.py Humberto Henrique Campos Pinheiro
Game initialization and main loop
"""
import pygame
import ui
import player
import board
from config import BLACK, WHITE, HUMAN
import log
logger = log.setup_custom_logger('root')
# py2exe workaround
# import sys
# import os
# sys.stdout = open(os.devnull, 'w')
# sys.stderr = open(os.devnull, 'w')
class Othello:
"""
Game main class.
"""
def __init__(self):
""" Show options screen and start game modules"""
# start
self.gui = ui.Gui()
self.board = board.Board()
self.gui.show_menu(self.start)
def start(self, *args):
player1, player2, level = args
logger.info('Settings: player 1: %s, player 2: %s, level: %s ', player1, player2, level)
if player1 == HUMAN:
self.now_playing = player.Human(self.gui, BLACK)
else:
self.now_playing = player.Computer(BLACK, level + 3)
if player2 == HUMAN:
self.other_player = player.Human(self.gui, WHITE)
else:
self.other_player = player.Computer(WHITE, level + 3)
self.gui.show_game()
self.gui.update(self.board.board, 2, 2, self.now_playing.color)
def run(self):
clock = pygame.time.Clock()
while True:
clock.tick(60)
if self.board.game_ended():
whites, blacks, empty = self.board.count_stones()
if whites > blacks:
winner = WHITE
elif blacks > whites:
winner = BLACK
else:
winner = None
break
self.now_playing.get_current_board(self.board)
valid_moves = self.board.get_valid_moves(self.now_playing.color)
if valid_moves != []:
score, self.board = self.now_playing.get_move()
whites, blacks, empty = self.board.count_stones()
self.gui.update(self.board.board, blacks, whites,
self.now_playing.color)
self.now_playing, self.other_player = self.other_player, self.now_playing
self.gui.show_winner(winner)
pygame.time.wait(1000)
self.restart()
def restart(self):
self.board = board.Board()
self.gui.show_menu(self.start)
self.run()
def main():
game = Othello()
game.run()
if __name__ == '__main__':
main()
| humbhenri/pyOthello | othello.py | Python | mit | 2,473 |
"""
Usage:
run.py mlp --train=<train> --test=<test> --config=<config>
run.py som --train=<train> --test=<test> --config=<config>
Options:
--train Path to training data, txt file.
--test Path to test data, txt file.
--config Json configuration for the network.
"""
from redes_neurais.resources.manager import run_mlp, run_som
import docopt
def run():
try:
args = docopt.docopt(__doc__)
if args["mlp"]:
run_mlp(args['--config'], args['--train'], args['--test'])
if args["som"]:
run_som(args['--config'], args['--train'], args['--test'])
except docopt.DocoptExit as e:
print e.message
if __name__ == "__main__":
run()
| senechal/ssc0570-Redes-Neurais | run.py | Python | mit | 708 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import string
import random
# Simple recursive descent parser for dice rolls, e.g. '3d6+1d8+4'.
#
# roll := die {('+' | '-') die} ('+' | '-') modifier
# die := number 'd' number
# modifier := number
class StringBuf(object):
def __init__(self, s):
self.s = s
self.pos = 0
def peek(self):
return self.s[self.pos]
def getc(self):
c = self.peek()
self.pos += 1
return c
def ungetc(self):
self.pos -= 1
def tell(self):
return self.pos
class Symbol(object):
NUMBER = 0
D = 1
PLUS = 2
MINUS = 3
def __init__(self, type_, pos, value)
def next_symbol(s):
c = s.getc()
while c in string.whitespace:
c = s.getc()
if c in string.digits:
# start of a number
literal = c
c = s.getc()
while c in string.digits:
literal += c
c = s.getc()
s.ungetc()
sym = (Symbol.NUMBER,
elif c == 'd':
# die indicator
pass
elif c == '+':
# plus sign
pass
elif c == '-':
# minus sign
pass
else:
# unrecognized input
raise ValueError('Syntax error at position ' + s.tell())
return ()
| jcarreiro/jmc-python | imp/dice.py | Python | mit | 1,405 |
from __future__ import absolute_import, unicode_literals
from copy import copy
import json
from peewee import Model, CharField, ForeignKeyField, IntegerField
from utils.modules import BaseModule, modules
from utils.modules.api import api as pmb_api
from utils import db
class Action(Model):
class Meta:
database = db
# Name of the module or "__pmb" for the global one
module = CharField()
# Name of the method to call
method = CharField()
# JSON encoded parameters
parameters = CharField()
def get_info(self):
return get_action_info(self)
def get_parameters(self):
return json.loads(self.parameters)
class Command(Model):
class Meta:
database = db
command = CharField(unique=True)
def get_actions(self):
return (
Action
.select()
.join(CommandAction)
.join(Command)
.where(Command.id == self.id)
.order_by(CommandAction.order)
)
def clear_actions(self):
for action in self.get_actions():
action.delete_instance()
for commandaction in (
CommandAction.select()
.join(Command)
.where(Command.id == self.id)
):
commandaction.delete_instance()
class CommandAction(Model):
class Meta:
database = db
command = ForeignKeyField(Command)
action = ForeignKeyField(Action)
order = IntegerField(default=0)
def get_action_info(action):
return get_info_from_module(action.module, action.method)
def get_info_from_module(module, method):
if module == '__pmb':
api = pmb_api
else:
api = modules[module].api
return api[method]
| Gagaro/PimpMyBot | pimpmybot/utils/commands.py | Python | mit | 1,789 |
import _plotly_utils.basevalidators
class SourceattributionValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="sourceattribution",
parent_name="layout.mapbox.layer",
**kwargs
):
super(SourceattributionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/layout/mapbox/layer/_sourceattribution.py | Python | mit | 521 |
#! /usr/bin/python
f = open("birds.txt", "r")
data = f.read()
f.close()
lines = data.split("\n")
print("Wrong: The number of lines is", len(lines))
for l in lines:
if not l:
# Can also do this: if len(l) == 0
lines.remove(l)
print("Right: The number of lines is", len(lines))
| shantnu/PyEng | WordCount/count_lines_fixed.py | Python | mit | 299 |
from boto.ses import SESConnection
import os
def sendmail(name, comment):
source = "[email protected]"
subject = "Kommentar eingegangen"
body = 'Es wurde ein neues Wetter bewertet. Von: ' + name + ': ' + comment
to_addresses = ["[email protected]"]
connection = SESConnection(aws_access_key_id=os.environ['AWS_ACCESS_KEY'],
aws_secret_access_key=os.environ['AWS_SECRET_KEY'])
connection.send_email(source, subject, body, to_addresses) | PatteWi/pythonwetter | executables/mailsend.py | Python | mit | 513 |
import os
import os.path
import sys
import pygame
from buffalo import utils
from buffalo.scene import Scene
from buffalo.label import Label
from buffalo.button import Button
from buffalo.input import Input
from buffalo.tray import Tray
from camera import Camera
from mapManager import MapManager
from pluginManager import PluginManager
from toolManager import ToolManager
class CameraController:
def __init__(self):
self.fPos = (0.0, 0.0)
self.pos = (int(self.fPos[0]), int(self.fPos[1]))
self.xv, self.yv = 0.0, 0.0
self.speed = 1.2
self.shift_speed = self.speed * 5.0
def update(self, keys):
w, a, s, d, shift = (
keys[pygame.K_w],
keys[pygame.K_a],
keys[pygame.K_s],
keys[pygame.K_d],
keys[pygame.K_LSHIFT],
)
if shift:
speed = self.shift_speed
else:
speed = self.speed
speed *= utils.delta / 16.0
self.xv = 0.0
self.yv = 0.0
if w:
self.yv -= speed
if a:
self.xv -= speed
if s:
self.yv += speed
if d:
self.xv += speed
x, y = self.fPos
x += self.xv
y += self.yv
self.fPos = x, y
self.pos = (int(self.fPos[0]), int(self.fPos[1]))
class EditMapTestScene(Scene):
def on_escape(self):
sys.exit()
def blit(self):
Camera.blitView()
def update(self):
super(EditMapTestScene, self).update()
keys = pygame.key.get_pressed()
self.camera_controller.update(keys)
Camera.update()
MapManager.soft_load_writer()
def __init__(self):
Scene.__init__(self)
self.BACKGROUND_COLOR = (0, 0, 0, 255)
PluginManager.loadPlugins()
self.camera_controller = CameraController()
Camera.lock(self.camera_controller, initial_update=True)
Button.DEFAULT_SEL_COLOR = (50, 50, 100, 255)
self.tool_tray = Tray(
(utils.SCREEN_W - 270, 20),
(250, 800),
min_width=250, max_width=250,
min_height=250, max_height=800,
color=(100, 50, 50, 100),
)
self.tool_tray.labels.add(
Label(
(int(self.tool_tray.width / 2), 10),
"Tool Tray",
color=(255,255,255,255),
x_centered=True,
font="default24",
)
)
self.tool_tray.labels.add(
Label(
(int(self.tool_tray.width / 2), 25),
"________________",
color=(255,255,255,255),
x_centered=True,
font="default18",
)
)
self.tool_tray.labels.add(
Label(
(int(self.tool_tray.width / 2), 50),
"Function",
color=(255,255,255,255),
x_centered=True,
font="default18",
)
)
def set_func_state_to_select():
ToolManager.set_func_state(ToolManager.FUNC_SELECT)
self.tool_tray.render()
self.button_select_mode = Button(
(15, 80),
" Select Mode ",
color=(255,255,255,255),
bg_color=(100,100,200,255),
font="default12",
func=set_func_state_to_select,
)
self.tool_tray.buttons.add(self.button_select_mode)
def set_func_state_to_fill():
ToolManager.set_func_state(ToolManager.FUNC_FILL)
self.tool_tray.render()
self.button_fill_mode = Button(
(self.tool_tray.width - 15, 80),
" Fill Mode ",
color=(255,255,255,255),
bg_color=(100,100,200,255),
invert_x_pos=True,
font="default12",
func=set_func_state_to_fill,
)
self.tool_tray.buttons.add(self.button_fill_mode)
self.tool_tray.labels.add(
Label(
(int(self.tool_tray.width / 2), 120),
"________________",
color=(255,255,255,255),
x_centered=True,
font="default18",
)
)
self.tool_tray.labels.add(
Label(
(int(self.tool_tray.width / 2), 150),
"Area of Effect",
color=(255,255,255,255),
x_centered=True,
font="default18",
)
)
def set_effect_state_to_draw():
ToolManager.set_effect_state(ToolManager.EFFECT_DRAW)
self.tool_tray.render()
self.button_draw_mode = Button(
(15, 180),
" Draw Mode ",
color=(255,255,255,255),
bg_color=(100,100,200,255),
font="default12",
func=set_effect_state_to_draw,
)
self.tool_tray.buttons.add(self.button_draw_mode)
def set_effect_state_to_area():
ToolManager.set_effect_state(ToolManager.EFFECT_AREA)
self.tool_tray.render()
self.button_area_mode = Button(
(self.tool_tray.width - 15, 180),
" Area Mode ",
color=(255,255,255,255),
bg_color=(100,100,200,255),
invert_x_pos=True,
font="default12",
func=set_effect_state_to_area,
)
self.tool_tray.buttons.add(self.button_area_mode)
ToolManager.initialize_states(
ToolManager.FUNC_SELECT, ToolManager.EFFECT_DRAW,
(
self.button_fill_mode,
self.button_select_mode,
self.button_draw_mode,
self.button_area_mode,
),
)
self.tool_tray.render()
self.trays.add(self.tool_tray)
| benjamincongdon/adept | editMapTestScene.py | Python | mit | 5,875 |
"""Contains the drivers and interface code for pinball machines which use the Multimorphic R-ROC hardware controllers.
This code can be used with P-ROC driver boards, or with Stern SAM, Stern
Whitestar, Williams WPC, or Williams WPC95 driver boards.
Much of this code is from the P-ROC drivers section of the pyprocgame project,
written by Adam Preble and Gerry Stellenberg. It was originally released under
the MIT license and is released here under the MIT License.
More info on the P-ROC hardware platform: http://pinballcontrollers.com/
Original code source on which this module was based:
https://github.com/preble/pyprocgame
"""
from typing import Dict, List
from mpf.core.platform import DmdPlatform, DriverConfig, SwitchConfig, SegmentDisplaySoftwareFlashPlatform
from mpf.devices.segment_display.segment_display_text import ColoredSegmentDisplayText
from mpf.platforms.interfaces.dmd_platform import DmdPlatformInterface
from mpf.platforms.interfaces.segment_display_platform_interface import SegmentDisplaySoftwareFlashPlatformInterface
from mpf.platforms.p_roc_common import PDBConfig, PROCBasePlatform
from mpf.core.utility_functions import Util
from mpf.platforms.p_roc_devices import PROCDriver
MYPY = False
if MYPY: # pragma: no cover
from mpf.core.machine import MachineController # pylint: disable-msg=cyclic-import,unused-import
class PRocHardwarePlatform(PROCBasePlatform, DmdPlatform, SegmentDisplaySoftwareFlashPlatform):
"""Platform class for the P-ROC hardware controller.
Args:
----
machine: The MachineController instance.
"""
__slots__ = ["dmd", "alpha_display", "aux_port", "_use_extended_matrix",
"_use_first_eight_direct_inputs"]
def __init__(self, machine):
"""Initialise P-ROC."""
super().__init__(machine)
# validate config for p_roc
self.config = self.machine.config_validator.validate_config("p_roc", self.machine.config.get('p_roc', {}))
self._configure_device_logging_and_debug('P-Roc', self.config)
if self.config['driverboards']:
self.machine_type = self.pinproc.normalize_machine_type(self.config['driverboards'])
else:
self.machine_type = self.pinproc.normalize_machine_type(self.machine.config['hardware']['driverboards'])
self.dmd = None
self.alpha_display = None
self.aux_port = None
self._use_extended_matrix = False
self._use_first_eight_direct_inputs = False
async def connect(self):
"""Connect to the P-Roc."""
await super().connect()
self.aux_port = AuxPort(self)
self.aux_port.reset()
# Because PDBs can be configured in many different ways, we need to
# traverse the YAML settings to see how many PDBs are being used.
# Then we can configure the P-ROC appropriately to use those PDBs.
# Only then can we relate the YAML coil/light #'s to P-ROC numbers for
# the collections.
if self.machine_type == self.pinproc.MachineTypePDB:
self.debug_log("Configuring P-ROC for PDBs (P-ROC driver boards)")
self.pdbconfig = PDBConfig(self, self.machine.config, self.pinproc.DriverCount)
else:
self.debug_log("Configuring P-ROC for OEM driver boards")
def _get_default_subtype(self):
"""Return default subtype for P-Roc."""
return "matrix"
def __repr__(self):
"""Return string representation."""
return '<Platform.P-ROC>'
def get_info_string(self):
"""Dump infos about boards."""
infos = "Firmware Version: {} Firmware Revision: {} Hardware Board ID: {}\n".format(
self.version, self.revision, self.hardware_version)
return infos
@classmethod
def get_coil_config_section(cls):
"""Return coil config section."""
return "p_roc_coils"
def configure_driver(self, config: DriverConfig, number: str, platform_settings: dict):
"""Create a P-ROC driver.
Typically drivers are coils or flashers, but for the P-ROC this is
also used for matrix-based lights.
Args:
----
config: Dictionary of settings for the driver.
number: Number of this driver
platform_settings: Platform specific setting for this driver.
Returns a reference to the PROCDriver object which is the actual object
you can use to pulse(), patter(), enable(), etc.
"""
# todo need to add Aux Bus support
# todo need to add virtual driver support for driver counts > 256
# Find the P-ROC number for each driver. For P-ROC driver boards, the
# P-ROC number is specified via the Ax-By-C format. For OEM driver
# boards configured via driver numbers, libpinproc's decode() method
# can provide the number.
if self.machine_type == self.pinproc.MachineTypePDB:
proc_num = self.pdbconfig.get_proc_coil_number(str(number))
if proc_num == -1:
raise AssertionError("Driver {} cannot be controlled by the P-ROC. ".format(str(number)))
else:
proc_num = self.pinproc.decode(self.machine_type, str(number))
polarity = platform_settings.get("polarity", None)
driver = PROCDriver(proc_num, config, self, number, polarity)
self._late_init_futures.append(driver.initialise())
return driver
def configure_switch(self, number: str, config: SwitchConfig, platform_config: dict):
"""Configure a P-ROC switch.
Args:
----
number: String number of the switch to configure.
config: SwitchConfig settings.
platform_config: Platform specific settings.
Returns: A configured switch object.
"""
del platform_config
try:
if number.startswith("SD") and 0 <= int(number[2:]) <= 7:
self._use_first_eight_direct_inputs = True
_, y = number.split('/', 2)
if int(y) > 7:
self._use_extended_matrix = True
except ValueError:
pass
if self._use_extended_matrix and self._use_first_eight_direct_inputs:
raise AssertionError(
"P-Roc vannot use extended matrix and the first eight direct inputs at the same "
"time. Either only use SD8 to SD31 or only use matrix X/Y with Y <= 7. Offending "
"switch: {}".format(number))
if self.machine_type == self.pinproc.MachineTypePDB:
proc_num = self.pdbconfig.get_proc_switch_number(str(number))
if proc_num == -1:
raise AssertionError("Switch {} cannot be controlled by the P-ROC. ".format(str(number)))
else:
proc_num = self.pinproc.decode(self.machine_type, str(number))
return self._configure_switch(config, proc_num)
async def get_hw_switch_states(self) -> Dict[str, bool]:
"""Read in and set the initial switch state.
The P-ROC uses the following values for hw switch states:
1 - closed (debounced)
2 - open (debounced)
3 - closed (not debounced)
4 - open (not debounced)
"""
switch_states = await self.run_proc_cmd("switch_get_states")
states = {}
for switch, state in enumerate(switch_states):
states[switch] = bool(state in (1, 3))
return states
def configure_dmd(self):
"""Configure a hardware DMD connected to a classic P-ROC."""
self.dmd = PROCDMD(self, self.machine)
return self.dmd
async def configure_segment_display(self, number: str, display_size: int, platform_settings) \
-> "SegmentDisplaySoftwareFlashPlatformInterface":
"""Configure display."""
del platform_settings
del display_size
number_int = int(number)
if 0 < number_int >= 4:
raise AssertionError("Number must be between 0 and 3 for p_roc segment display.")
if not self.alpha_display:
self.alpha_display = AuxAlphanumericDisplay(self, self.aux_port)
display = PRocAlphanumericDisplay(self.alpha_display, number_int)
self._handle_software_flash(display)
return display
def process_events(self, events):
"""Process events from the P-Roc."""
for event in events:
event_type = event['type']
event_value = event['value']
if event_type == self.pinproc.EventTypeDMDFrameDisplayed:
# ignore this for now
pass
elif event_type in (self.pinproc.EventTypeSwitchClosedDebounced,
self.pinproc.EventTypeSwitchClosedNondebounced):
self.machine.switch_controller.process_switch_by_num(
state=1, num=event_value, platform=self)
elif event_type in (self.pinproc.EventTypeSwitchOpenDebounced,
self.pinproc.EventTypeSwitchOpenNondebounced):
self.machine.switch_controller.process_switch_by_num(
state=0, num=event_value, platform=self)
else:
self.log.warning("Received unrecognized event from the P-ROC. "
"Type: %s, Value: %s", event_type, event_value)
class PROCDMD(DmdPlatformInterface):
"""Parent class for a physical DMD attached to a P-ROC.
Args:
----
platform: Reference to the MachineController's proc attribute.
machine: Reference to the MachineController
"""
__slots__ = ["machine", "platform"]
def __init__(self, platform, machine):
"""Set up DMD."""
self.platform = platform # type: PROCBasePlatform
self.machine = machine # type: MachineController
# dmd_timing defaults should be 250, 400, 180, 800
if self.machine.config['p_roc']['dmd_timing_cycles']:
dmd_timing = Util.string_to_event_list(
self.machine.config['p_roc']['dmd_timing_cycles'])
self.platform.run_proc_cmd_no_wait("dmd_update_config", dmd_timing)
def set_brightness(self, brightness: float):
"""Set brightness."""
# currently not supported. can be implemented using dmd_timing_cycles
assert brightness == 1.0
def update(self, data):
"""Update the DMD with a new frame.
Args:
----
data: A 4096-byte raw string.
"""
if len(data) == 4096:
self.platform.run_proc_cmd_no_wait("_dmd_send", data)
else:
self.machine.log.warning("Received DMD frame of length %s instead"
"of 4096. Discarding...", len(data))
class AuxPort:
"""Aux port on the P-Roc."""
__slots__ = ["platform", "_commands"]
def __init__(self, platform):
"""Initialise aux port."""
self.platform = platform
self._commands = []
def reset(self):
"""Reset aux port."""
commands = [self.platform.pinproc.aux_command_disable()]
for _ in range(1, 255):
commands += [self.platform.pinproc.aux_command_jump(0)]
self.platform.run_proc_cmd_no_wait("aux_send_commands", 0, commands)
def reserve_index(self):
"""Return index of next free command slot and reserve it."""
self._commands += [[]]
return len(self._commands) - 1
def update(self, index, commands):
"""Update command slot with command."""
self._commands[index] = commands
self._write_commands()
def _write_commands(self):
"""Write commands to hardware."""
# disable program
commands = [self.platform.pinproc.aux_command_disable()]
# build command list
for command_set in self._commands:
commands += command_set
self.platform.run_proc_cmd_no_wait("aux_send_commands", 0, commands)
# jump from slot 0 to slot 1. overwrites the disable
self.platform.run_proc_cmd_no_wait("aux_send_commands", 0, [self.platform.pinproc.aux_command_jump(1)])
class PRocAlphanumericDisplay(SegmentDisplaySoftwareFlashPlatformInterface):
"""Since AuxAlphanumericDisplay updates all four displays wrap it and set the correct offset."""
__slots__ = ["display"]
def __init__(self, display, index):
"""Initialise alpha numeric display."""
super().__init__(index)
self.display = display
def _set_text(self, text: ColoredSegmentDisplayText):
"""Set digits to display."""
# TODO: use DisplayCharacter and intern dots and commas
self.display.set_text(text.convert_to_str(), self.number)
class AuxAlphanumericDisplay:
"""An alpha numeric display connected to the aux port on the P-Roc."""
# Start at ASCII table offset 32: ' '
ascii_segments = [0x0000, # ' '
0x016a, # '!' Random Debris Character 1
0x3014, # '"' Random Debris Character 2
0x5d80, # '#' Random Debris Character 3
0x00a4, # '$' Random Debris Character 4
0x3270, # '%' Random Debris Character 5
0x4640, # '&' Random Debris Character 6
0x0200, # '''
0x1400, # '('
0x4100, # ')'
0x7f40, # '*'
0x2a40, # '+'
0x8080, # ','
0x0840, # '-'
0x8000, # '.'
0x4400, # '/'
0x003f, # '0'
0x0006, # '1'
0x085b, # '2'
0x084f, # '3'
0x0866, # '4'
0x086d, # '5'
0x087d, # '6'
0x0007, # '7'
0x087f, # '8'
0x086f, # '9'
0x0821, # ':' Random Debris Character 7
0x1004, # ';' Random Debris Character 8
0x1c00, # '<' Left Arrow
0x1386, # '=' Random Debris Character 9
0x4140, # '>' Right Arrow
0x0045, # '?' Random Debris Character 10
0x4820, # '@' Random Debris Character 11
0x0877, # 'A'
0x2a4f, # 'B'
0x0039, # 'C'
0x220f, # 'D'
0x0879, # 'E'
0x0871, # 'F'
0x083d, # 'G'
0x0876, # 'H'
0x2209, # 'I'
0x001e, # 'J'
0x1470, # 'K'
0x0038, # 'L'
0x0536, # 'M'
0x1136, # 'N'
0x003f, # 'O'
0x0873, # 'P'
0x103f, # 'Q'
0x1873, # 'R'
0x086d, # 'S'
0x2201, # 'T'
0x003e, # 'U'
0x4430, # 'V'
0x5036, # 'W'
0x5500, # 'X'
0x2500, # 'Y'
0x4409, # 'Z'
0x6004, # '[' Random Debris Character 12
0x6411, # '\' Random Debris Character 13
0x780a, # ']' Random Debris Character 14
0x093a, # '^' Random Debris Character 15
0x0008, # '_'
0x2220, # '`' Random Debris Character 16
0x0c56, # 'a' Broken Letter a
0x684e, # 'b' Broken Letter b
0x081c, # 'c' Broken Letter c
0x380e, # 'd' Broken Letter d
0x1178, # 'e' Broken Letter e
0x4831, # 'f' Broken Letter f
0x083d, # 'g' Broken Letter g NOT CREATED YET
0x0854, # 'h' Broken Letter h
0x2209, # 'i' Broken Letter i NOT CREATED YET
0x001e, # 'j' Broken Letter j NOT CREATED YET
0x1070, # 'k' Broken Letter k
0x0038, # 'l' Broken Letter l NOT CREATED YET
0x0536, # 'm' Broken Letter m NOT CREATED YET
0x1136, # 'n' Broken Letter n NOT CREATED YET
0x085c, # 'o' Broken Letter o
0x0873, # 'p' Broken Letter p NOT CREATED YET
0x103f, # 'q' Broken Letter q NOT CREATED YET
0x1c72, # 'r' Broken Letter r
0x116c, # 's' Broken Letter s
0x2120, # 't' Broken Letter t
0x003e, # 'u' Broken Letter u NOT CREATED YET
0x4430, # 'v' Broken Letter v NOT CREATED YET
0x5036, # 'w' Broken Letter w NOT CREATED YET
0x5500, # 'x' Broken Letter x NOT CREATED YET
0x2500, # 'y' Broken Letter y NOT CREATED YET
0x4409 # 'z' Broken Letter z NOT CREATED YET
]
strobes = [8, 9, 10, 11, 12]
full_intensity_delay = 350 # microseconds
inter_char_delay = 40 # microseconds
__slots__ = ["platform", "aux_controller", "aux_index", "texts"]
def __init__(self, platform, aux_controller):
"""Initialise the alphanumeric display."""
self.platform = platform
self.aux_controller = aux_controller
self.aux_index = aux_controller.reserve_index()
self.texts = [" "] * 4
def set_text(self, text, index):
"""Set text for display."""
if len(text) != 8:
text = text[0:8].rjust(8, ' ')
self.texts[index] = text
# build expected format
input_strings = [self.texts[0] + self.texts[1], self.texts[2] + self.texts[3]]
self.display(input_strings)
def display(self, input_strings, intensities=None):
"""Set display text."""
strings = []
if intensities is None:
intensities = [[1] * 16] * 2
# Make sure strings are at least 16 chars.
# Then convert each string to a list of chars.
for j in range(0, 2):
if len(input_strings[j]) < 16:
input_strings[j] += ' ' * (16 - len(input_strings[j]))
strings += [list(input_strings[j])]
# Make sure insensities are 1 or less
for i in range(0, 16):
for j in range(0, 2):
if intensities[j][i] > 1:
intensities[j][i] = 1
commands = []
char_on_time = []
char_off_time = []
# Initialize a 2x16 array for segments value
segs = [[0] * 16 for _ in range(2)]
# Loop through each character
for i in range(0, 16):
# Activate the character position (this goes to both displayas)
commands += [self.platform.pinproc.aux_command_output_custom(i, 0, self.strobes[0], False, 0)]
for j in range(0, 2):
segs[j][i] = self.ascii_segments[ord(strings[j][i]) - 32]
# Check for commas or periods.
# If found, squeeze comma into previous character.
# No point checking the last character (plus, this avoids an
# indexing error by not checking i+1 on the 16th char.
if i < 15:
comma_dot = strings[j][i + 1]
if comma_dot in (".", ","):
segs[j][i] |= self.ascii_segments[ord(comma_dot) - 32]
strings[j].remove(comma_dot)
# Append a space to ensure there are enough chars.
strings[j].append(' ')
# character is 16 bits long, characters are loaded in 2 lots of 8 bits,
# for each display (4 enable lines total)
commands += [self.platform.pinproc.aux_command_output_custom(
segs[j][i] & 0xff, 0,
self.strobes[j * 2 + 1], False, 0)] # first 8 bits of characater data
commands += [self.platform.pinproc.aux_command_output_custom(
(segs[j][i] >> 8) & 0xff, 0,
self.strobes[j * 2 + 2], False, 0)] # second 8 bits of characater data
char_on_time += [intensities[j][i] * self.full_intensity_delay]
char_off_time += [self.inter_char_delay + (self.full_intensity_delay - char_on_time[j])]
if char_on_time[0] < char_on_time[1]:
first = 0
second = 1
else:
first = 1
second = 0
# Determine amount of time to leave the other char on after the
# first is off.
between_delay = char_on_time[second] - char_on_time[first]
# Not sure if the hardware will like a delay of 0
# Use 2 to be extra safe. 2 microseconds won't affect display.
if between_delay == 0:
between_delay = 2
# Delay until it's time to turn off the character with the lowest intensity
commands += [self.platform.pinproc.aux_command_delay(char_on_time[first])]
commands += [self.platform.pinproc.aux_command_output_custom(0, 0, self.strobes[first * 2 + 1], False, 0)]
commands += [self.platform.pinproc.aux_command_output_custom(0, 0, self.strobes[first * 2 + 2], False, 0)]
# Delay until it's time to turn off the other character.
commands += [self.platform.pinproc.aux_command_delay(between_delay)]
commands += [self.platform.pinproc.aux_command_output_custom(0, 0, self.strobes[second * 2 + 1], False, 0)]
commands += [self.platform.pinproc.aux_command_output_custom(0, 0, self.strobes[second * 2 + 2], False, 0)]
# Delay for the inter-digit delay.
commands += [self.platform.pinproc.aux_command_delay(char_off_time[second])]
# Send the new list of commands to the Aux port controller.
self.aux_controller.update(self.aux_index, commands)
| missionpinball/mpf | mpf/platforms/p_roc.py | Python | mit | 22,477 |
# Copyright (C) Ivan Kravets <[email protected]>
# See LICENSE for details.
"""
Builder for Atmel AVR series of microcontrollers
"""
from os.path import join
from time import sleep
from SCons.Script import (COMMAND_LINE_TARGETS, AlwaysBuild, Default,
DefaultEnvironment, SConscript)
from platformio.util import get_serialports
def BeforeUpload(target, source, env): # pylint: disable=W0613,W0621
def _rpi_sysgpio(path, value):
with open(path, "w") as f:
f.write(str(value))
if "micronucleus" in env['UPLOADER']:
print "Please unplug/plug device ..."
upload_options = env.get("BOARD_OPTIONS", {}).get("upload", {})
if "usb" in env.subst("$UPLOAD_PROTOCOL"):
upload_options['require_upload_port'] = False
env.Replace(UPLOAD_SPEED=None)
if env.subst("$UPLOAD_SPEED"):
env.Append(UPLOADERFLAGS=[
"-b", "$UPLOAD_SPEED",
"-D"
])
if not upload_options.get("require_upload_port", False):
return
env.AutodetectUploadPort()
env.Append(UPLOADERFLAGS=["-P", "$UPLOAD_PORT"])
if env.subst("$BOARD") == "raspduino":
_rpi_sysgpio("/sys/class/gpio/export", 18)
_rpi_sysgpio("/sys/class/gpio/gpio18/direction", "out")
_rpi_sysgpio("/sys/class/gpio/gpio18/value", 1)
sleep(0.1)
_rpi_sysgpio("/sys/class/gpio/gpio18/value", 0)
_rpi_sysgpio("/sys/class/gpio/unexport", 18)
else:
if not upload_options.get("disable_flushing", False):
env.FlushSerialBuffer("$UPLOAD_PORT")
before_ports = [i['port'] for i in get_serialports()]
if upload_options.get("use_1200bps_touch", False):
env.TouchSerialPort("$UPLOAD_PORT", 1200)
if upload_options.get("wait_for_upload_port", False):
env.Replace(UPLOAD_PORT=env.WaitForNewSerialPort(before_ports))
env = DefaultEnvironment()
SConscript(env.subst(join("$PIOBUILDER_DIR", "scripts", "baseavr.py")))
if "digispark" in env.get(
"BOARD_OPTIONS", {}).get("build", {}).get("core", ""):
env.Replace(
UPLOADER=join("$PIOPACKAGES_DIR", "tool-micronucleus", "micronucleus"),
UPLOADERFLAGS=[
"-c", "$UPLOAD_PROTOCOL",
"--timeout", "60"
],
UPLOADHEXCMD='"$UPLOADER" $UPLOADERFLAGS $SOURCES'
)
else:
env.Replace(
UPLOADER=join("$PIOPACKAGES_DIR", "tool-avrdude", "avrdude"),
UPLOADERFLAGS=[
"-v",
"-p", "$BOARD_MCU",
"-C",
'"%s"' % join("$PIOPACKAGES_DIR", "tool-avrdude", "avrdude.conf"),
"-c", "$UPLOAD_PROTOCOL"
],
UPLOADHEXCMD='"$UPLOADER" $UPLOADERFLAGS -U flash:w:$SOURCES:i',
UPLOADEEPCMD='"$UPLOADER" $UPLOADERFLAGS -U eeprom:w:$SOURCES:i'
)
#
# Target: Build executable and linkable firmware
#
target_elf = env.BuildFirmware()
#
# Target: Extract EEPROM data (from EEMEM directive) to .eep file
#
target_eep = env.Alias("eep", env.ElfToEep(join("$BUILD_DIR", "firmware"),
target_elf))
#
# Target: Build the .hex file
#
if "uploadlazy" in COMMAND_LINE_TARGETS:
target_firm = join("$BUILD_DIR", "firmware.hex")
else:
target_firm = env.ElfToHex(join("$BUILD_DIR", "firmware"), target_elf)
#
# Target: Print binary size
#
target_size = env.Alias("size", target_elf, "$SIZEPRINTCMD")
AlwaysBuild(target_size)
#
# Target: Upload by default .hex file
#
upload = env.Alias(["upload", "uploadlazy"], target_firm,
[BeforeUpload, "$UPLOADHEXCMD"])
AlwaysBuild(upload)
#
# Target: Upload .eep file
#
uploadeep = env.Alias("uploadeep", target_eep, [
BeforeUpload, "$UPLOADEEPCMD"])
AlwaysBuild(uploadeep)
#
# Setup default targets
#
Default([target_firm, target_size])
| bkudria/platformio | platformio/builder/scripts/atmelavr.py | Python | mit | 3,837 |
"""
## NumPy UltraQuick Tutorial
[source](https://colab.research.google.com/github/google/eng-edu/blob/main/ml/cc/exercises/numpy_ultraquick_tutorial.ipynb?utm_source=mlcc)
> create/manipulate vectors and matrices
"""
## import module as
import numpy as np
## populate array with specific numbers
### 'np.array' to create NumPy matrix with hand-picked values
one_dim_array = np.array([1.3, 3.7, 4.3, 5.6, 7.9])
print(one_dim_array)
two_dim_array = np.array([[1.3, 3.7], [4.3, 5.6], [6.4, 7.9]])
print(two_dim_array)
### can populate matrix with all zeros or one using 'np.zeros' or 'np.ones'
## populate arrays with number sequences using 'np.arange'
seq_int = np.arange(3, 9)
print(seq_int)
## populate arrays with random numbers
### 'randint' for integers
rand_ints_between_10_and_50 = np.random.randint(low=10, high=51, size=(5))
print(rand_ints_between_10_and_50)
### 'random' for floats between 0.0 & 1.0
rand_floats_between_0_and_1 = np.random.random([5])
print(rand_floats_between_0_and_1)
## math operations on NumPy operands
### 'broadcasting' is expanding shape of an operand in matrix math operation
### to dimensions compatible for that operation
rand_floats_between_1_and_2 = rand_floats_between_0_and_1 + 1.0
rand_floats_between_100_and_200 = rand_floats_between_1_and_2 * 100.0
"""
Task.1 Create a Linear Dataset
to create a simple dataset consisting single feature and label
* assign int sequence from 6 to 20 to a NumPy array name 'feature'
* assign 15 values to NumPy array named 'label' as: 'label = (3) (feature) + 4'; as first value be '(3) (6) + 4 = 22'
"""
feature = np.arange(6, 21)
print(feature)
label = (feature * 3) + 4.0
print(label)
"""
Task.2 Add some noise to the dataset
to mae dataset realistic; insert random noise to each element of 'label' array
* modify each value assigned to 'label' by adding different random float between -2/+2 without 'broadcasting'
instead create noise array having same dimension
"""
noise = (np.random.random([15]) * 4)
print(noise)
label = label + noise
print(label)
| abhishekkr/tutorials_as_code | talks-articles/machine-learning/google-courses/prework--numpy-ultraquick-tutorial.py | Python | mit | 2,051 |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
PROJECT_MAIL_SUBJECT_PREFIX = '[Project]'
PROJECT_MAIL_SENDER = 'Project Admin <[email protected]>'
PROJECT_ADMIN = os.environ.get('PROJECT_ADMIN')
CELERY_BROKER_URL = 'amqp://localhost//'
CELERY_RESULT_BACKEND = 'amqp://'
CELERY_INCLUDE = ['celery_worker']
SQL_USERNAME = os.environ.get('MYSQL_USERNAME')
SQL_PASSWORD = os.environ.get('MYSQL_PASSWORD')
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgresql://' + str(Config.SQL_USERNAME) + ':' + str(
Config.SQL_PASSWORD) + '@localhost/testproject'
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| ivngithub/testproject | config.py | Python | mit | 1,559 |
"""TailorDev Biblio
Bibliography management with Django.
"""
__version__ = "2.0.0"
default_app_config = "td_biblio.apps.TDBiblioConfig"
| TailorDev/django-tailordev-biblio | td_biblio/__init__.py | Python | mit | 139 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='SkipRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(max_length=64, verbose_name='Sender Key')),
],
options={
'verbose_name': 'Skip request',
'verbose_name_plural': 'Skip requests',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.TextField(help_text='Description text for the video', verbose_name='Description', blank=True)),
('youtube_url', models.URLField(help_text='URL to a youtube video', verbose_name='Youtube URL')),
('key', models.CharField(max_length=64, null=True, verbose_name='Sender Key', blank=True)),
('deleted', models.IntegerField(default=False, verbose_name='Deleted')),
('playing', models.BooleanField(default=False, verbose_name='Playing')),
('duration', models.IntegerField(default=0, verbose_name='Duration')),
],
options={
'verbose_name': 'Video',
'verbose_name_plural': 'Videos',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='skiprequest',
name='event',
field=models.ForeignKey(verbose_name='Video', to='manager.Video'),
preserve_default=True,
),
]
| katajakasa/utuputki | Utuputki/manager/migrations/0001_initial.py | Python | mit | 1,910 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-04 18:58
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('snippets', '0006_snippet_last_used'),
]
operations = [
migrations.AlterModelOptions(
name='snippet',
options={'ordering': ('-updated_at',), 'verbose_name': 'snippet', 'verbose_name_plural': 'snippets'},
),
migrations.AlterField(
model_name='snippet',
name='description',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='description'),
),
migrations.AlterField(
model_name='snippet',
name='slug',
field=models.SlugField(max_length=255, verbose_name='name'),
),
migrations.AlterField(
model_name='snippet',
name='updated_at',
field=models.DateTimeField(blank=True, null=True, verbose_name='updated at'),
),
migrations.AlterField(
model_name='snippet',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='snippet', to=settings.AUTH_USER_MODEL),
),
]
| vitorfs/cmdbox | cmdbox/snippets/migrations/0007_auto_20160404_1858.py | Python | mit | 1,372 |
import itertools, logging
from gibbs.models import CommonName, Compound, Enzyme
from haystack.query import SearchQuerySet
class Error(Exception):
pass
class IllegalQueryError(Error):
pass
class Match(object):
"""An object containing a string match and it's score."""
def __init__(self, key, value, score):
"""Initialize a Match.
Args:
key: the object that matched.
value: the value of the match (the object pointed to by the key).
score: between 0.0 and 1.0, higher is better.
"""
self.key = key
self.value = value
self.score = score
def __eq__(self, other):
"""Equality checking between matches, used for testing."""
return (self.key == other.key and
self.value == other.value and
self.score == other.score)
def __str__(self):
"""Get as a string for debugging/printability."""
return '<matcher.Match> value=%s, score=%f' % (self.value,
self.score)
def TypeStr(self):
if self.IsCompound():
return 'Compound'
elif self.IsEnzyme():
return 'Enzyme'
return ''
def IsCompound(self):
return isinstance(self.value, Compound)
def IsEnzyme(self):
return isinstance(self.value, Enzyme)
def Key(self):
if self.IsCompound():
return self.value.kegg_id
elif self.IsEnzyme():
return self.value.ec
return None
class Matcher(object):
"""A class that matches a string against the database.
The base implementation does exact matching.
"""
def __init__(self, max_results=10, min_score=0.0, match_enzymes=True):
"""Initializes the Matcher.
Args:
scorer: a MatchScorer object for scoring.
max_results: the maximum number of matches to return.
min_score: the minimum match score to return.
"""
self._max_results = max_results
self._min_score = min_score
self._match_enzymes = match_enzymes
self._prefetch_objects = ['compound_set']
if self._match_enzymes:
self._prefetch_objects.extend(['enzyme_set', 'enzyme_set__reactions'])
def _AcceptQuery(self, query):
"""Accept or rejec expression = self._PrepareExpression(query)
results = models.CommonName.objects.filter(name__iregex=expression)t the query.
Returns:
True if the query is accepted.
"""
if query.strip():
return True
return False
def _PreprocessQuery(self, query):
"""Perform pre-search query manipulation.
Default implementation simply strips leading/trailing whitespace
and makes the query lowercase.
Args:
query: the string query.
Returns:
The pre-processed query as a string.
"""
query = query.strip().lower()
return query
def _PrepocessCandidate(self, candidate):
"""Perform pre-match candidate manipulation.
Default implementation converts to a lower-case string.
Args:
candidate: the candidate object (convertible to a string).
Returns:
The pre-processed candidate as a string.
"""
return str(candidate).strip().lower()
def _FindNameMatches(self, query):
"""Find all the matches for this query.
Args:
query: the query to match.
Returns:
A list of CommonName objects matching the query.
"""
try:
res = SearchQuerySet().filter(text__exact=query).best_match()
return [res.object]
except Exception as e:
logging.warning('Query failed: ' + str(e))
return []
def _MakeMatchObjects(self, common_names):
"""Given the list of CommonNames, make the Matches.
Args:
common_names: a list of CommonNames.
Returns:
A list of Match objects.
"""
matches = []
for name in common_names:
for compound in name.compound_set.all():
matches.append(Match(name, compound, 0.0))
if self._match_enzymes:
for enzyme in name.enzyme_set.all():
matches.append(Match(name, enzyme, 0.0))
return matches
def _GetScore(self, query, match):
"""Get the score for a query-match pair.
Args:
query: the query string.
match: the Match object.
Returns:
A score between 0.0 and 1.0.
"""
query_len = float(len(query))
candidate_len = float(len(str(match.key)))
return (query_len / candidate_len)
def _ScoreMatches(self, query, matches):
"""Set the match scores for all matches.
Args:
query: the query string.
matches: a list of match objects with uninitialized scores.
"""
for m in matches:
m.score = self._GetScore(query, m)
def _FilterMatches(self, matches):
"""Filter the match list for min score.
Args:
matches: an unfiltered list of match objects.
"""
# Filter matches without data or beneath the score limit.
f = lambda match: (match.score >= self._min_score and
match.value)
filtered = filter(f, matches)
# Take only unique matches.
group_key = lambda match: match.Key()
filtered_matches = []
for _, g in itertools.groupby(filtered, key=group_key):
# Keep the unique match with the top score.
max_match = None
for match in g:
if not max_match or max_match.score < match.score:
max_match = match
filtered_matches.append(max_match)
return filtered_matches
def _SortAndClip(self, matches):
matches.sort(key=lambda m: m.score, reverse=True)
return matches[:self._max_results]
def Match(self, query):
"""Find matches for the query in the library.
Args:
query: the string query.
Returns:
A sorted list of Match objects or None if
the query could not be parsed.
"""
if not self._AcceptQuery(query):
raise IllegalQueryError('%s is not a valid query' % query)
processed_query = self._PreprocessQuery(query)
logging.debug('Query = %s' % processed_query)
name_matches = self._FindNameMatches(processed_query)
logging.debug('Found %d name matches' % len(name_matches))
matches = self._MakeMatchObjects(name_matches)
self._ScoreMatches(processed_query, matches)
matches = self._FilterMatches(matches)
logging.debug('Found %d matches' % len(matches))
return self._SortAndClip(matches)
| flamholz/equilibrator | matching/matcher.py | Python | mit | 7,321 |
# -*- coding: utf-8 -*-
from sys import version_info
import copy
import types
try:
from collections import OrderedDict
except ImportError: # pragma: nocover
# Django < 1.5 fallback
from django.utils.datastructures import SortedDict as OrderedDict # noqa
# There is a bug with deepcopy in 2.6, patch if we are running python < 2.7
# http://bugs.python.org/issue1515
if version_info < (2, 7, 0):
def _deepcopy_method(x, memo):
return type(x)(x.im_func, copy.deepcopy(x.im_self, memo), x.im_class)
copy._deepcopy_dispatch[types.MethodType] = _deepcopy_method
| rjusher/djsqla-query-operations | djsqla_query_operations/compat.py | Python | mit | 590 |
import os, sys, re
import ConfigParser
import optparse
import shutil
import subprocess
import difflib
import collections
#import numpy as np
# Alberto Meseguer file; 18/11/2016
# Modified by Quim Aguirre; 13/03/2017
# This file is the master coordinator of the DIANA project. It is used to run multiple DIANA commands in parallel in the cluster
#-------------#
# Functions #
#-------------#
#-------------#
# Options #
#-------------#
def parse_options():
'''
This function parses the command line arguments and returns an optparse object.
'''
parser = optparse.OptionParser("pddi.py [--dummy=DUMMY_DIR] -i INPUT_FILE [-o OUTPUT_DIR] [-v]")
# Directory arguments
parser.add_option("-i", action="store", type="string", dest="input_file", help="Input crossings file", metavar="INPUT_FILE")
parser.add_option("-s", action="store", type="string", dest="sif_file", help="Input SIF file")
parser.add_option("-t", action="store", type="string", dest="type_of_analysis", help="Type of analysis: 'profile_creation' or 'comparison'")
parser.add_option("--dummy_dir", default="dummy/", action="store", type="string", dest="dummy_dir", help="Dummy directory (default = ./)", metavar="DUMMY_DIR")
parser.add_option('-ws','--worspace',dest='workspace',action = 'store',default=os.path.join(os.path.dirname(__file__), 'workspace'),
help = """Define the workspace directory where the data directory and the results directory will be created""")
(options, args) = parser.parse_args()
if options.input_file is None or options.sif_file is None or options.type_of_analysis is None:
parser.error("missing arguments: type option \"-h\" for help")
return options
#-------------#
# Main #
#-------------#
# Add "." to sys.path #
src_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(src_path)
# Read configuration file #
config = ConfigParser.ConfigParser()
config_file = os.path.join(src_path, "config_marvin.ini")
config.read(config_file)
import hashlib
# Imports my functions #
import functions
# Define which python to be used #
python = os.path.join(config.get("Paths", "python_path"), "python")
# Arguments & Options #
options = parse_options()
# Directory arguments
input_file = os.path.abspath(options.input_file)
dummy_dir = os.path.abspath(options.dummy_dir)
# Create directories if necessary
logs_dir = src_path + "/logs"
if not os.path.exists(logs_dir):
os.mkdir(logs_dir)
f = open(input_file, "r")
# Depending on the type of analysis, we will submit different commands
if options.type_of_analysis == 'profile_creation':
analysis = '-prof'
all_drugs = set()
for line in f:
(drug1, drug2) = line.strip().split('---')
all_drugs.add(drug1)
all_drugs.add(drug2)
f.close()
for drug in all_drugs:
# Check if the p-value file is already created. If so, skip
pvalue_file = data_dir + "/" + drug + "/guild_results_using_sif/output_scores.sif.netcombo.pval"
if os.path.exists(pvalue_file):
continue
guild_path = '/gpfs42/robbyfs/homes/users/qaguirre/guild/scoreN'
command = 'python {}/diana_cluster/scripts/generate_profiles.py -d {} -pt geneid -sif {} -gu {}'.format( src_path, drug, options.sif_file, guild_path )
print(command)
# python /home/quim/project/diana_cluster/scripts/generate_profiles.py -d 'DCC0303' -pt 'geneid' -sif /home/quim/project/diana_cluster/workspace/sif/human_eAFF_geneid_2017.sif -gu /home/quim/project/diana_cluster/diana/toolbox/scoreN
# To run the command at the local machine
#os.system(command)
#To run in the cluster submitting files to queues
functions.submit_command_to_queue(command, max_jobs_in_queue=int(config.get("Cluster", "max_jobs_in_queue")), queue_file="command_queues_marvin.txt", dummy_dir=dummy_dir)
elif options.type_of_analysis == 'comparison':
analysis = '-comp'
for line in f:
(drug1, drug2) = line.strip().split('---')
# Check if the results are already done
comp_results_dir = res_dir + "/results_" + drug1 + "_" + drug2
table_file = comp_results_dir + '/table_results_' + drug1 + '_' + drug2 + '.txt'
if os.path.exists(table_file):
continue
command = 'python {}/diana_cluster/scripts/compare_profiles.py -d1 {} -d2 {} -pt geneid'.format( src_path, drug1, drug2 )
print(command)
# python /home/quim/project/diana_cluster/scripts/compare_profiles.py -d1 'DCC0303' -d2 'DCC1743' -pt 'geneid'
# To run the command at the local machine
#os.system(command)
#To run in the cluster submitting files to queues
functions.submit_command_to_queue(command, max_jobs_in_queue=int(config.get("Cluster", "max_jobs_in_queue")), queue_file="command_queues_marvin.txt", dummy_dir=dummy_dir)
f.close()
else:
print('The type of analysis has been wrongly defined. Introduce \'profile_creation\' or \'comparison\'')
sys.exit(10)
| quimaguirre/diana | scripts/old_scripts/run_experiment_cluster.py | Python | mit | 5,102 |
from __future__ import print_function
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
#from torchtext import vocab
class Vocabulary(object):
def __init__(self, tokens, unk_idx):
self.tokens = tokens
self.unk_idx = unk_idx
self.vocab_size = len(tokens)
self.forward_dict = dict((token, i) for i, token in enumerate(tokens))
self.backward_dict = dict(enumerate(tokens))
def encode(self, tokens):
return [self.forward_dict.get(token, self.unk_idx) for token in tokens]
def decode(self, ids):
return [self.backward_dict.get(idx, "<UNK>") for idx in ids]
def batch_encode(self, inputs):
batch = [self.encode(token) for token in inputs]
max_len = max(map(len, batch))
batch = [ids + (max_len - len(ids))*[0] for ids in batch]
return batch
def __len__(self):
return len(self.tokens)
class Bilinear(nn.Module):
"""
Documentation for Bilinear
"""
def __init__(self, first_dim, second_dim, out_dim):
super(Bilinear, self).__init__()
self.first_dim = first_dim
self.second_dim = second_dim
self.out_dim = out_dim
self.weights = nn.Parameter(data=th.randn(first_dim, second_dim, out_dim).double(),
requires_grad=True)
def forward(self, input1, input2):
# preconditions
assert input1.ndimension() == 2, "Inputs must be matrices (2-dimensional). Input 1 has {} dimensions.".format(input1.ndimension())
assert input2.ndimension() == 2, "Inputs must be matrices (2-dimensional). Input 2 has {} dimensions.".format(input2.ndimension())
assert input1.size(1) == self.first_dim, "Input 1's shape is inconsistent with the bilinear weight matrix."
assert input2.size(1) == self.second_dim, "Input 2's shape is inconsistent with the bilinear weight matrix."
assert input1.size(0) == input2.size(0), """Input batch sizes must be equal.
Input 1 has batch size {}, while input 2 has batch size {}.""".format(input1.size(0), input2.size(0))
# computation
batch_size = input1.size(0)
input1_expanded = input1.unsqueeze(2).unsqueeze(3).expand(batch_size, self.first_dim,
self.second_dim, self.out_dim)
input2_expanded = input2.unsqueeze(1).unsqueeze(3).expand(batch_size, self.first_dim,
self.second_dim, self.out_dim)
weights_expanded = self.weights.unsqueeze(0).expand(batch_size, self.first_dim,
self.second_dim, self.out_dim)
output = (weights_expanded*input1_expanded*input2_expanded).sum(1).sum(2)
return output.squeeze(1).squeeze(1)
class EncoderRNN(nn.Module):
"""
Documentation for EncoderRNN
"""
def __init__(self, vocab, embed_dim, hidden_dim):
super(EncoderRNN, self).__init__()
self.vocab = vocab
self.vocab_size = len(vocab)
self.embedding = nn.Embedding(self.vocab_size, embed_dim)
self.embedding.double()
self.rnn = nn.LSTM(embed_dim, hidden_dim,
batch_first=True, bias=False)
self.rnn.double()
def forward(self, input, h0, c0, lens=None):
embedded = self.embedding(input)
if lens:
embedded = pack_padded_sequence(embedded, lens, batch_first=True)
output, (hn, cn) = self.rnn(embedded, (h0, c0))
return output, hn, cn
def load_embeddings(self, weights, fix_weights=True):
self.embedding.weight.data = weights
if fix_weights:
self.embedding.weight.requires_grad = False
class DecoderRNN(nn.Module):
"""
Documentation for DecoderRNN
"""
def __init__(self, vocab, start_idx, end_idx, embed_dim, hidden_dim):
super(DecoderRNN, self).__init__()
self.vocab = vocab
self.start_idx = start_idx
self.end_idx = end_idx
self.encoder = EncoderRNN(vocab, embed_dim, hidden_dim)
self.scorer = nn.Sequential(nn.Linear(hidden_dim, len(vocab)),
nn.LogSoftmax())
self.scorer.double()
def load_embeddings(self, weights, fix_weights=True):
self.encoder.load_embeddings(weights, fix_weights)
def forward(self, input, h0, c0, lens=None):
output, hn, cn = self.encoder(input, h0, c0, lens)
if lens:
output, _ = pad_packed_sequence(output)
logprobs = self.scorer(output.contiguous().view(output.size(0)*output.size(1), output.size(2)))
logprobs = logprobs.view(output.size(0), output.size(1), logprobs.size(1))
return logprobs, hn, cn
def generate(self, h0, c0, method="beam", **kwargs):
generator = {"greedy": self.greedy_decode,
"beam": self.beam_decode,
"sample": self.temperature_sample}.get(method)
ids = generator(h0, c0, **kwargs)
tokens = self.vocab.decode(ids)
return tokens
def temperature_sample(self, h0, c0, temp=1, max_length=20, **kwargs):
pass
def greedy_decode(self, h0, c0, max_length=20, **kwargs):
pass
def beam_decode(self, h0, c0, beam_size=5, max_length=10, cuda=False, **kwargs):
def get_ij(idx, n):
j = idx % n
i = (idx - j)/n
return i, j
beam = []
completed = []
prune_factor = float("-inf")
start_symbol = Variable(th.LongTensor([self.start_idx]))
beam_symbols = start_symbol.unsqueeze(1)
if cuda:
start_symbol = start_symbol.cuda()
beam_symbols = beam_symbols.cuda()
scores, out_h, out_c = self.forward(beam_symbols, h0, c0)
top_scores, top_ids = scores.view(scores.numel()).sort(0, True)
_, dim_beam, dim_vocab = scores.size()
for idx in range(min(beam_size, dim_vocab)):
i, j = get_ij(top_ids[idx], dim_vocab)
if cuda:
j = j.cuda()
seq = th.cat([start_symbol, j])
score = top_scores[idx]
if j.data[0] == self.end_idx:
completed.append({"seq": seq.data.tolist(), "score": score})
prune_factor = top_scores[idx].data[0]
else:
beam.append({"seq": seq, "h": out_h[:, 0, :],
"c": out_c[:, 0, :], "score": score})
count = 0
while len(beam) > 0 and count < max_length:
beam_symbols = th.cat([item["seq"][-1].unsqueeze(1) for item in beam], 0)
beam_h = th.cat([item["h"].unsqueeze(1) for item in beam], 1)
beam_c = th.cat([item["c"].unsqueeze(1) for item in beam], 1)
log_probs, out_h, out_c = self.forward(beam_symbols, beam_h, beam_c)
dim_beam, _, dim_vocab = log_probs.size()
beam_scores = th.cat([item["score"] for item in beam]).unsqueeze(1).unsqueeze(1)
beam_scores = beam_scores.expand(dim_beam, 1, dim_vocab)
scores = beam_scores + log_probs
top_scores, top_ids = scores.view(scores.numel()).sort(0, True)
new_beam = []
for idx in range(min(beam_size, len(beam))):
score = top_scores[idx]
i, j = get_ij(top_ids[idx], dim_vocab)
if (score.data[0] >= prune_factor):
seq = th.cat([beam[i.data[0]]["seq"], j])
if j.data[0] == self.end_idx:
completed.append({"seq": seq.data.tolist(), "score": score})
prune_factor = score.data[0]
else:
new_beam.append({"seq": seq, "h": out_h[:, i.data[0], :],
"c": out_c[:, i.data[0], :], "score": score})
else:
break
beam = new_beam
count += 1
return completed[-1]["seq"]
class Seq2Seq(nn.Module):
"""
Documentation for Seq2Seq
"""
def __init__(self, in_vocab, out_vocab, in_embed_dim,
out_embed_dim, hidden_dim, transfer):
super(Seq2Seq, self).__init__()
self.in_vocab = in_vocab
self.out_vocab = out_vocab
self.hidden_dim = hidden_dim
self.h0 = nn.Parameter(th.randn(1, 1, hidden_dim).double())
self.c0 = nn.Parameter(th.randn(1, 1, hidden_dim).double())
self.encoder = EncoderRNN(in_vocab, in_embed_dim, hidden_dim)
self.decoder = DecoderRNN(out_vocab, 1, 2,
out_embed_dim, hidden_dim)
self.transfer = transfer
def forward(self, input, output, input_lens=None, output_lens=None, lookup=None, **kwargs):
h0 = self.h0.expand(1, input.size(0), self.hidden_dim).contiguous()
c0 = self.c0.expand(1, input.size(0), self.hidden_dim).contiguous()
input_encoded, input_h, input_c = self.encoder(input, h0, c0, lens=input_lens)
if lookup:
input_h = th.index_select(input_h, 1, lookup)
input_c = th.index_select(input_c, 1, lookup)
transfer_h, transfer_c = self.transfer(input_h, input_c, **kwargs)
log_probs, _, _ = self.decoder(output, transfer_h, transfer_c, lens=output_lens)
return log_probs
def generate(self, input_seq, method="beam", cuda=False, **kwargs):
input_ids = self.in_vocab.encode(input_seq.split(" "))
input = Variable(th.LongTensor(input_ids)).unsqueeze(0)
h0 = Variable(th.zeros(1, 1, self.hidden_dim).contiguous())
c0 = Variable(th.zeros(1, 1, self.hidden_dim).contiguous())
if cuda:
input = input.cuda()
h0 = h0.cuda()
c0 = c0.cuda()
input_encoded, input_h, input_c = self.encoder(input, h0, c0)
transfer_h, transfer_c = self.transfer(input_h, input_c, **kwargs)
output = self.decoder.generate(transfer_h, transfer_c, method=method, cuda=cuda, **kwargs)
return " ".join(output)
class IdentityTransfer(nn.Module):
def __init__(self):
super(IdentityTransfer, self).__init__()
def forward(self, h, c, **kwargs):
return h, c
class GatedBilinearTransfer(nn.Module):
def __init__(self, in_dim, gate_dim, hidden_dim,
out_dim, target="h"):
super(GatedBilinearTransfer, self).__init__()
self.target = target
self.in_bilinear = Bilinear(in_dim, gate_dim, hidden_dim)
self.tanh = nn.Tanh()
self.out_bilinear = Bilinear(hidden_dim, gate_dim, out_dim)
def forward(self, h, c, g, **kwargs):
if self.target in ["h", "both"]:
h = self.in_bilinear(h.squeeze(0), g)
h = self.tanh(h)
h = self.out_bilinear(h, g).unsqueeze(0)
if self.target in ["c", "both"]:
c = self.in_bilinear(c.squeeze(0), g)
c = self.tanh(c)
c = self.out_bilinear(c, g).unsqueeze(0)
return h, c
class PairClassifier(nn.Module):
"""
A classifier for pairs of sequences.
"""
def __init__(self, voab_1, vocab_2, embed_dim_1, embed_dim_2,
hidden_dim, class_dim, pair_encoder, n_layers,
n_classes, class_hidden_dim):
super(PairClassifier, self).__init__()
self.first_encoder = EncoderRNN(vocab_1, embed_dim_1, hidden_dim)
self.second_encoder = EncoderRNN(vocab_2, embed_dim_2, hidden_dim)
self.pair_encoder = pair_encoder
self.classifier = nn.Sequential(nn.Linear(class_dim, class_hidden_dim), nn.Tanh())
for i in range(n_layers):
self.classifier.add(nn.Linear(class_hidden_dim, class_hidden_dim))
self.classifier.add(nn.Tanh())
self.classifier.add(nn.Linear(class_hidden_dim, n_classes))
self.classifier.add(nn.LogSoftmax())
def forward(self, input_1, input_2):
h_1, hn_1, cn_1 = self.first_encoder(input1)
h_2, hn_2, cn_2 = self.second_encoder(input2)
encoded = self.pair_encoder(h_1, hn_1, cn_1, h_2, hn_2, cn_2)
probs = self.classifier(encoded)
return probs
class ConcatPairClassifier(PairClassifier):
"""
A classifier for pairs of sequences that embeds and then concatenates them.
"""
def __init__(self, voab_1, vocab_2, embed_dim_1, embed_dim_2,
hidden_dim, n_layers, n_classes, class_hidden_dim):
#TODO add code for concatenation-based `pair_encoder`
super(PairClassifier, self).__init__(voab_1, vocab_2, embed_dim_1, embed_dim_2,
hidden_dim, class_dim, pair_encoder, n_layers,
n_classes, class_hidden_dim)
| douwekiela/nncg-negation | acl17/models.py | Python | mit | 13,191 |
import mongoengine as db
class User(db.Document):
user_id = db.StringField(required=True, unique=True)
created = db.DateTimeField(required=True)
last_login = db.DateTimeField()
nino = db.StringField()
linked_ids = db.ListField(db.ReferenceField('User'), default=[])
def link(self, other):
self.update(push__linked_ids=other)
other.update(push__linked_ids=self)
| crossgovernmentservices/userstore-prototype | application/models.py | Python | mit | 404 |
#!/usr/bin/env python
'''
Module that update the software and its databases
'''
import os
import shutil
from sys import exit
import os.path
import tarfile
import requests
from bs4 import BeautifulSoup
from ...base import *
from ...sentry import sentry
from ...clint import progress
class Updater(object):
def __init__(self, path, ver, url):
self.inst_path = path
self.repo_url = url
self.version = ver
def update_all(self):
'''
Upgrade BigBrother completely
'''
print(color.info.info("Fetching version from Github..."))
# Retrieving github releases
try:
response = requests.get(self.repo_url)
except requests.exceptions.RequestException as e:
print(color.info.error(e))
return
# Getting latest release
soup = BeautifulSoup(response.content, 'html.parser')
try: # Parsing info from page
version = soup.select("ul.tag-references > li > a > span")[0].text
download_url = "https://github.com" + \
soup.select(".release-downloads > li > a")[1]['href']
except Exception as e:
sentry.client.captureException()
print(color.info.error(e))
return
# check version
if version == self.version:
print(color.info.info("You have already the latest version"))
else:
print(color.info.info("New version " + color.bold(
"{ver}".format(ver=version)) + " found"))
# install
if self.install(self.inst_path, download_url):
print(color.info.info("Need to be restarted for changes to be effective"))
exit()
def install(self, path, url):
try:
# downloaded file name
dl_file = self.download(url, path)
# change directory
os.chdir(path)
# extract in path directory
inst_module = self.extract(dl_file)
# normalize name
inst_module_norm = inst_module[:inst_module.find('-')]
if inst_module_norm in os.listdir():
shutil.rmtree(inst_module_norm)
shutil.move(inst_module, inst_module_norm)
print(color.info.info(color.info.success("Installation completed")))
return 1
except Exception as e:
print(color.info.info(color.info.fail("Installation failed")))
print(color.info.error(e))
return 0
def download(self, url, path):
'''
Download module from [url] to [path]
'''
# get name of file to downaload
local_filename = url.split('/')[-1]
try:
stream = requests.get(url, stream=True)
total_length = int(stream.headers['Content-Length'])
except requests.exceptions.RequestException as e:
print(color.info.error(e))
return
# change to downlaod dir
try:
os.chdir(path)
except Exception as e:
print(color.info.error(e))
return
# write on file
with open(local_filename, 'wb') as f:
for chunk in progress.bar(stream.iter_content(chunk_size=1024),
label=local_filename, expected_size=(total_length/1024)):
if chunk:
f.write(chunk)
f.flush()
return local_filename
def extract(self, filename):
try:
tar = tarfile.open(filename)
repo = tar.getnames()[0]
# remove old repo
if repo in os.listdir():
shutil.rmtree(repo)
# extract in current directory
tar.extractall()
return repo
except Exception as e:
print(color.info.error(e))
return
finally:
tar.close()
os.remove(filename)
| PhantomGhosts/BigBrother | lib/bbro/core/update.py | Python | mit | 3,968 |
from venv import _venv
from fabric.api import task
@task
def migrate():
"""
Run Django's migrate command
"""
_venv("python manage.py migrate")
@task
def syncdb():
"""
Run Django's syncdb command
"""
_venv("python manage.py syncdb")
| pastpages/wordpress-memento-plugin | fabfile/migrate.py | Python | mit | 268 |
import sys
import os.path
from xml.etree import ElementTree as et
if len(sys.argv) != 3:
raise Exception("Expected at least 2 args, {} given!".format(len(sys.argv) - 1))
version = sys.argv[1]
csprojPath = sys.argv[2]
if not os.path.isfile(csprojPath):
raise Exception("File {} does not exist!".format(csprojPath))
tree = et.parse(csprojPath)
root = tree.getroot()
versionLeaf = root.find('PropertyGroup[1]/Version')
if versionLeaf != None:
versionLeaf.text = version
tree.write(csprojPath) | dukeofharen/wolk | scripts/build/patch-csproj.py | Python | mit | 506 |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import sys
extra_install = []
if sys.version_info <= (3,1):
extra_install.append('futures')
if sys.version_info <= (3,6):
extra_install.append('pysha3')
setup(
name="moneywagon",
version='{{ version }}',
description='Next Generation Cryptocurrency Platform',
long_description=open('README.md').read(),
author='Chris Priest',
author_email='[email protected]',
url='https://github.com/priestc/moneywagon',
packages=find_packages(),
scripts=['bin/moneywagon'],
include_package_data=True,
license='LICENSE',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
install_requires=[
'requests',
'tabulate',
'base58',
'pytz',
'arrow',
'bitcoin',
'beautifulsoup4'
] + extra_install
)
| cst13/canstel | setup_template.py | Python | mit | 1,030 |
#!/usr/bin/python
from heapq import heapify, heapreplace
class Solution(object):
def kthSmallest(self, matrix, k):
"""
:type matrix: List[List[int]]
:type k: int
:rtype: int
"""
if len(matrix) is 1:
return matrix[0][0]
z = zip(*matrix[1:])
h = [(matrix[0][i], z[i]) for i in xrange(len(matrix))]
heapify(h)
i = 0
while i < k - 1:
val, nextval = h[0]
if nextval:
heapreplace(h, (nextval[0], nextval[1:]))
else:
heappop(h)
i += 1
return h[0][0]
a = [[1,5,10], [4,5,11], [7,8,12]]
s = Solution()
print s.kthSmallest(a, 3)
| pisskidney/leetcode | medium/378.py | Python | mit | 745 |
import json, sys, glob, datetime, math, random, pickle, gzip
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import chainer
from chainer import computational_graph as c
from chainer import cuda
import chainer.functions as F
from chainer import optimizers
class AutoEncoder:
def __init__(self, n_units=64):
self.n_units = n_units
def load(self, train_x):
self.N = len(train_x[0])
self.x_train = train_x
#
self.model = chainer.FunctionSet(encode=F.Linear(self.N, self.n_units),
decode=F.Linear(self.n_units, self.N))
print("Network: encode({}-{}), decode({}-{})".format(self.N, self.n_units, self.n_units, self.N))
#
self.optimizer = optimizers.Adam()
self.optimizer.setup(self.model.collect_parameters())
def forward(self, x_data, train=True):
x = chainer.Variable(x_data)
t = chainer.Variable(x_data)
h = F.relu(self.model.encode(x))
y = F.relu(self.model.decode(h))
return F.mean_squared_error(y, t), y
def calc(self, n_epoch):
for epoch in range(n_epoch):
self.optimizer.zero_grads()
loss, y = self.forward(self.x_train)
loss.backward()
self.optimizer.update()
#
print('epoch = {}, train mean loss={}'.format(epoch, loss.data))
def getY(self, test_x):
self.test_x = test_x
loss, y = self.forward(x_test, train=False)
return y.data
def getEncodeW(self):
return self.model.encode.W
def load_mnist():
with open('mnist.pkl', 'rb') as mnist_pickle:
mnist = pickle.load(mnist_pickle)
return mnist
def save_mnist(s,l=28,prefix=""):
n = len(s)
print("exporting {} images.".format(n))
plt.clf()
plt.figure(1)
for i,bi in enumerate(s):
plt.subplot(math.floor(n/6),6,i+1)
bi = bi.reshape((l,l))
plt.imshow(bi, cmap=cm.Greys_r) #Needs to be in row,col order
plt.axis('off')
plt.savefig("output/{}.png".format(prefix))
if __name__=="__main__":
rf = AutoEncoder(n_units=64)
mnist = load_mnist()
mnist['data'] = mnist['data'].astype(np.float32)
mnist['data'] /= 255
x_train = mnist['data'][0:2000]
x_test = mnist['data'][2000:2036]
rf.load(x_train)
save_mnist(x_test,prefix="test")
for k in [1,9,90,400,1000,4000]:
rf.calc(k) # epoch
yy = rf.getY(x_test)
ww = rf.getEncodeW()
save_mnist(yy,prefix="ae-{}".format(k))
print("\ndone.")
| miyamotok0105/deeplearning-sample | src/chainer1.7/ae/train.py | Python | mit | 2,598 |
""" This file contains code for working on lists and dictionaries. """
def moreThanOne(dict, key):
""" Checks if a key in a dictionary has a value more than one.
Arguments:
dict -- the dictionary
key -- the key
Returns:
True if the key exists in the dictionary and the value is at least one, otherwise false
"""
return key in dict and dict[key] > 0
def anyMoreThanOne(dict, keys):
""" Checks if any of a list of keys in a dictionary has a value more than one.
Arguments:
dict -- the dictionary
keys -- the keys
Returns:
True if any key exists in the dictionary and the value is at least one, otherwise false
"""
for key in keys:
if key in dict and dict[key] > 0:
return True
return False
def makeUnique(list):
""" Removes duplicates from a list. """
u = []
for l in list:
if not l in u:
u.append(l)
return u
def alphabetical(lst):
""" Sorts a list of tuples in reverse alphabetical order by the first key
in the tuple.
Arguments:
lst -- the list to sort
Returns:
the sorted list
"""
return list(reversed(sorted(lst, key=lambda x: x[0]))) | ephracis/hermes | utilities/lists.py | Python | mit | 1,117 |
# -*- coding: utf-8 -*-
#
desc = 'Color bars'
phash = ''
def plot():
import matplotlib as mpl
from matplotlib import pyplot as pp
from matplotlib import style
import numpy as np
# Make a figure and axes with dimensions as desired.
fig, ax = pp.subplots(3)
# Set the colormap and norm to correspond to the data for which
# the colorbar will be used.
cmap = mpl.cm.cool
norm = mpl.colors.Normalize(vmin=-5, vmax=10)
# ColorbarBase derives from ScalarMappable and puts a colorbar in a
# specified axes, so it has everything needed for a standalone colorbar.
# There are many more kwargs, but the following gives a basic continuous
# colorbar with ticks and labels.
cb1 = mpl.colorbar.ColorbarBase(
ax[0],
cmap=cmap,
norm=norm,
orientation='horizontal'
)
cb1.set_label('Some Units')
# The second example illustrates the use of a ListedColormap, a
# BoundaryNorm, and extended ends to show the "over" and "under" value
# colors.
cmap = mpl.colors.ListedColormap(['r', 'g', 'b', 'c'])
cmap.set_over('0.25')
cmap.set_under('0.75')
# If a ListedColormap is used, the length of the bounds array must be one
# greater than the length of the color list. The bounds must be
# monotonically increasing.
bounds = [1, 2, 4, 7, 8]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cb2 = mpl.colorbar.ColorbarBase(
ax[1],
cmap=cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
boundaries=[0] + bounds + [13],
extend='both',
ticks=bounds, # optional
spacing='proportional',
orientation='horizontal'
)
cb2.set_label('Discrete intervals, some other units')
# The third example illustrates the use of custom length colorbar
# extensions, used on a colorbar with discrete intervals.
cmap = mpl.colors.ListedColormap(
[[0., .4, 1.],
[0., .8, 1.],
[1., .8, 0.],
[1., .4, 0.]
])
cmap.set_over((1., 0., 0.))
cmap.set_under((0., 0., 1.))
bounds = [-1., -.5, 0., .5, 1.]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cb3 = mpl.colorbar.ColorbarBase(
ax[2],
cmap=cmap,
norm=norm,
boundaries=[-10]+bounds+[10],
extend='both',
# Make the length of each extension
# the same as the length of the
# interior colors:
extendfrac='auto',
ticks=bounds,
spacing='uniform',
orientation='horizontal'
)
cb3.set_label('Custom extension lengths, some other units')
return fig
| dougnd/matplotlib2tikz | test/testfunctions/colorbars.py | Python | mit | 2,797 |
import lexer
s = "program id; var beto: int; { id = 1234; }"
lexer.lexer.input(s)
for token in lexer.lexer:
print token
| betoesquivel/PLYpractice | testingLexer.py | Python | mit | 126 |
'''
Created on 2013-01-22
@author: levi
'''
import unittest
import time
from path_condition_generator import PathConditionGenerator
from t_core.matcher import Matcher
from t_core.rewriter import Rewriter
from t_core.iterator import Iterator
from t_core.messages import Packet
from t_core.tc_python.frule import FRule
from t_core.tc_python.arule import ARule
from merge_preprocess import MergePreprocessFactory
# all runs are the same transformation, but with different metamodel elements
# the purpose is to do scalability testing with multiple configurations and multiple sets of rules
# run 1
from police_station_transformation.run1.transformation.HS2S_run1 import HS2S_run1
from police_station_transformation.run1.transformation.HM2M_run1 import HM2M_run1
from police_station_transformation.run1.transformation.HF2F_run1 import HF2F_run1
from police_station_transformation.run1.transformation.HSM2SM_run1 import HSM2SM_run1
from police_station_transformation.run1.transformation.HSF2SF_run1 import HSF2SF_run1
from police_station_transformation.run1.transformation.HMM2MM_run1 import HMM2MM_run1
from police_station_transformation.run1.transformation.HFF2FF_run1 import HFF2FF_run1
from police_station_transformation.run1.backward_matchers.HSM2SMBackS2S_run1LHS import HSM2SMBackS2S_run1LHS
from police_station_transformation.run1.backward_matchers.HSM2SMBackM2M_run1LHS import HSM2SMBackM2M_run1LHS
from police_station_transformation.run1.backward_matchers.HSF2SFBackS2S_run1LHS import HSF2SFBackS2S_run1LHS
from police_station_transformation.run1.backward_matchers.HSF2SFBackF2F_run1LHS import HSF2SFBackF2F_run1LHS
from police_station_transformation.run1.backward_matchers.HMM2MMBackM2M1_run1LHS import HMM2MMBackM2M1_run1LHS
from police_station_transformation.run1.backward_matchers.HMM2MMBackM2M2_run1LHS import HMM2MMBackM2M2_run1LHS
from police_station_transformation.run1.backward_matchers.HFF2FFBackF2F1_run1LHS import HFF2FFBackF2F1_run1LHS
from police_station_transformation.run1.backward_matchers.HFF2FFBackF2F2_run1LHS import HFF2FFBackF2F2_run1LHS
from police_station_transformation.run1.backward_matchers.HSM2SMBackComplete_run1LHS import HSM2SMBackComplete_run1LHS
from police_station_transformation.run1.backward_matchers.HSF2SFBackComplete_run1LHS import HSF2SFBackComplete_run1LHS
from police_station_transformation.run1.backward_matchers.HMM2MMBackComplete_run1LHS import HMM2MMBackComplete_run1LHS
from police_station_transformation.run1.backward_matchers.HFF2FFBackComplete_run1LHS import HFF2FFBackComplete_run1LHS
# run 2
from police_station_transformation.run2.transformation.HS2S_run2 import HS2S_run2
from police_station_transformation.run2.transformation.HM2M_run2 import HM2M_run2
from police_station_transformation.run2.transformation.HF2F_run2 import HF2F_run2
from police_station_transformation.run2.transformation.HSM2SM_run2 import HSM2SM_run2
from police_station_transformation.run2.transformation.HSF2SF_run2 import HSF2SF_run2
from police_station_transformation.run2.transformation.HMM2MM_run2 import HMM2MM_run2
from police_station_transformation.run2.transformation.HFF2FF_run2 import HFF2FF_run2
from police_station_transformation.run2.backward_matchers.HSM2SMBackS2S_run2LHS import HSM2SMBackS2S_run2LHS
from police_station_transformation.run2.backward_matchers.HSM2SMBackM2M_run2LHS import HSM2SMBackM2M_run2LHS
from police_station_transformation.run2.backward_matchers.HSF2SFBackS2S_run2LHS import HSF2SFBackS2S_run2LHS
from police_station_transformation.run2.backward_matchers.HSF2SFBackF2F_run2LHS import HSF2SFBackF2F_run2LHS
from police_station_transformation.run2.backward_matchers.HMM2MMBackM2M1_run2LHS import HMM2MMBackM2M1_run2LHS
from police_station_transformation.run2.backward_matchers.HMM2MMBackM2M2_run2LHS import HMM2MMBackM2M2_run2LHS
from police_station_transformation.run2.backward_matchers.HFF2FFBackF2F1_run2LHS import HFF2FFBackF2F1_run2LHS
from police_station_transformation.run2.backward_matchers.HFF2FFBackF2F2_run2LHS import HFF2FFBackF2F2_run2LHS
from police_station_transformation.run2.backward_matchers.HSM2SMBackComplete_run2LHS import HSM2SMBackComplete_run2LHS
from police_station_transformation.run2.backward_matchers.HSF2SFBackComplete_run2LHS import HSF2SFBackComplete_run2LHS
from police_station_transformation.run2.backward_matchers.HMM2MMBackComplete_run2LHS import HMM2MMBackComplete_run2LHS
from police_station_transformation.run2.backward_matchers.HFF2FFBackComplete_run2LHS import HFF2FFBackComplete_run2LHS
# run 3
from police_station_transformation.run3.transformation.HS2S_run3 import HS2S_run3
from police_station_transformation.run3.transformation.HM2M_run3 import HM2M_run3
from police_station_transformation.run3.transformation.HF2F_run3 import HF2F_run3
from police_station_transformation.run3.transformation.HSM2SM_run3 import HSM2SM_run3
from police_station_transformation.run3.transformation.HSF2SF_run3 import HSF2SF_run3
from police_station_transformation.run3.transformation.HMM2MM_run3 import HMM2MM_run3
from police_station_transformation.run3.transformation.HFF2FF_run3 import HFF2FF_run3
from police_station_transformation.run3.backward_matchers.HSM2SMBackS2S_run3LHS import HSM2SMBackS2S_run3LHS
from police_station_transformation.run3.backward_matchers.HSM2SMBackM2M_run3LHS import HSM2SMBackM2M_run3LHS
from police_station_transformation.run3.backward_matchers.HSF2SFBackS2S_run3LHS import HSF2SFBackS2S_run3LHS
from police_station_transformation.run3.backward_matchers.HSF2SFBackF2F_run3LHS import HSF2SFBackF2F_run3LHS
from police_station_transformation.run3.backward_matchers.HMM2MMBackM2M1_run3LHS import HMM2MMBackM2M1_run3LHS
from police_station_transformation.run3.backward_matchers.HMM2MMBackM2M2_run3LHS import HMM2MMBackM2M2_run3LHS
from police_station_transformation.run3.backward_matchers.HFF2FFBackF2F1_run3LHS import HFF2FFBackF2F1_run3LHS
from police_station_transformation.run3.backward_matchers.HFF2FFBackF2F2_run3LHS import HFF2FFBackF2F2_run3LHS
from police_station_transformation.run3.backward_matchers.HSM2SMBackComplete_run3LHS import HSM2SMBackComplete_run3LHS
from police_station_transformation.run3.backward_matchers.HSF2SFBackComplete_run3LHS import HSF2SFBackComplete_run3LHS
from police_station_transformation.run3.backward_matchers.HMM2MMBackComplete_run3LHS import HMM2MMBackComplete_run3LHS
from police_station_transformation.run3.backward_matchers.HFF2FFBackComplete_run3LHS import HFF2FFBackComplete_run3LHS
# run 4
from police_station_transformation.run4.transformation.HS2S_run4 import HS2S_run4
from police_station_transformation.run4.transformation.HM2M_run4 import HM2M_run4
from police_station_transformation.run4.transformation.HF2F_run4 import HF2F_run4
from police_station_transformation.run4.transformation.HSM2SM_run4 import HSM2SM_run4
from police_station_transformation.run4.transformation.HSF2SF_run4 import HSF2SF_run4
from police_station_transformation.run4.transformation.HMM2MM_run4 import HMM2MM_run4
from police_station_transformation.run4.transformation.HFF2FF_run4 import HFF2FF_run4
from police_station_transformation.run4.backward_matchers.HSM2SMBackS2S_run4LHS import HSM2SMBackS2S_run4LHS
from police_station_transformation.run4.backward_matchers.HSM2SMBackM2M_run4LHS import HSM2SMBackM2M_run4LHS
from police_station_transformation.run4.backward_matchers.HSF2SFBackS2S_run4LHS import HSF2SFBackS2S_run4LHS
from police_station_transformation.run4.backward_matchers.HSF2SFBackF2F_run4LHS import HSF2SFBackF2F_run4LHS
from police_station_transformation.run4.backward_matchers.HMM2MMBackM2M1_run4LHS import HMM2MMBackM2M1_run4LHS
from police_station_transformation.run4.backward_matchers.HMM2MMBackM2M2_run4LHS import HMM2MMBackM2M2_run4LHS
from police_station_transformation.run4.backward_matchers.HFF2FFBackF2F1_run4LHS import HFF2FFBackF2F1_run4LHS
from police_station_transformation.run4.backward_matchers.HFF2FFBackF2F2_run4LHS import HFF2FFBackF2F2_run4LHS
from police_station_transformation.run4.backward_matchers.HSM2SMBackComplete_run4LHS import HSM2SMBackComplete_run4LHS
from police_station_transformation.run4.backward_matchers.HSF2SFBackComplete_run4LHS import HSF2SFBackComplete_run4LHS
from police_station_transformation.run4.backward_matchers.HMM2MMBackComplete_run4LHS import HMM2MMBackComplete_run4LHS
from police_station_transformation.run4.backward_matchers.HFF2FFBackComplete_run4LHS import HFF2FFBackComplete_run4LHS
from property_prover_rules.traceability_construction.Himesis.HBuildTraceabilityForRuleLHS import HBuildTraceabilityForRuleLHS
from property_prover_rules.traceability_construction.Himesis.HBuildTraceabilityForRuleRHS import HBuildTraceabilityForRuleRHS
from police_station_transformation.traceability.HTraceabilityConstructionLHS import HTraceabilityConstructionLHS
from police_station_transformation.traceability.HTraceabilityConstructionRHS import HTraceabilityConstructionRHS
from police_station_transformation.traceability.HBuildTraceabilityGMLHS import HBuildTraceabilityGMLHS
from police_station_transformation.traceability.HBuildTraceabilityGMRHS import HBuildTraceabilityGMRHS
from copy import deepcopy
from himesis_utils import disjoint_model_union, graph_to_dot
from merge_inter_layer import MergeInterLayerFactory
class Test(unittest.TestCase):
def setUp(self):
self.rules = { 'HS2S_run1': HS2S_run1(),
'HM2M_run1': HM2M_run1(),
'HF2F_run1': HF2F_run1(),
'HSM2SM_run1': HSM2SM_run1(),
'HSF2SF_run1': HSF2SF_run1(),
'HMM2MM_run1': HMM2MM_run1(),
'HFF2FF_run1': HFF2FF_run1(),
'HS2S_run2': HS2S_run2(),
'HM2M_run2': HM2M_run2(),
'HF2F_run2': HF2F_run2(),
'HSM2SM_run2': HSM2SM_run2(),
'HSF2SF_run2': HSF2SF_run2(),
'HMM2MM_run2': HMM2MM_run2(),
'HFF2FF_run2': HFF2FF_run2(),
'HS2S_run3': HS2S_run3(),
'HM2M_run3': HM2M_run3(),
'HF2F_run3': HF2F_run3(),
'HSM2SM_run3': HSM2SM_run3(),
'HSF2SF_run3': HSF2SF_run3(),
'HMM2MM_run3': HMM2MM_run3(),
'HFF2FF_run3': HFF2FF_run3(),
'HS2S_run4': HS2S_run4(),
'HM2M_run4': HM2M_run4(),
'HF2F_run4': HF2F_run4(),
'HSM2SM_run4': HSM2SM_run4(),
'HSF2SF_run4': HSF2SF_run4(),
'HMM2MM_run4': HMM2MM_run4(),
'HFF2FF_run4': HFF2FF_run4()}
self.backwardPatterns = { 'HS2S_run1': None,
'HM2M_run1': None,
'HF2F_run1': None,
'HSM2SM_run1': [Matcher(HSM2SMBackS2S_run1LHS()),Matcher(HSM2SMBackM2M_run1LHS())],
'HSF2SF_run1': [Matcher(HSF2SFBackS2S_run1LHS()),Matcher(HSF2SFBackF2F_run1LHS())],
'HMM2MM_run1': [Matcher(HMM2MMBackM2M1_run1LHS()),Matcher(HMM2MMBackM2M2_run1LHS())],
'HFF2FF_run1': [Matcher(HFF2FFBackF2F1_run1LHS()),Matcher(HFF2FFBackF2F2_run1LHS())],
'HS2S_run2': None,
'HM2M_run2': None,
'HF2F_run2': None,
'HSM2SM_run2': [Matcher(HSM2SMBackS2S_run2LHS()),Matcher(HSM2SMBackM2M_run2LHS())],
'HSF2SF_run2': [Matcher(HSF2SFBackS2S_run2LHS()),Matcher(HSF2SFBackF2F_run2LHS())],
'HMM2MM_run2': [Matcher(HMM2MMBackM2M1_run2LHS()),Matcher(HMM2MMBackM2M2_run2LHS())],
'HFF2FF_run2': [Matcher(HFF2FFBackF2F1_run2LHS()),Matcher(HFF2FFBackF2F2_run2LHS())],
'HS2S_run3': None,
'HM2M_run3': None,
'HF2F_run3': None,
'HSM2SM_run3': [Matcher(HSM2SMBackS2S_run3LHS()),Matcher(HSM2SMBackM2M_run3LHS())],
'HSF2SF_run3': [Matcher(HSF2SFBackS2S_run3LHS()),Matcher(HSF2SFBackF2F_run3LHS())],
'HMM2MM_run3': [Matcher(HMM2MMBackM2M1_run3LHS()),Matcher(HMM2MMBackM2M2_run3LHS())],
'HFF2FF_run3': [Matcher(HFF2FFBackF2F1_run3LHS()),Matcher(HFF2FFBackF2F2_run3LHS())],
'HS2S_run4': None,
'HM2M_run4': None,
'HF2F_run4': None,
'HSM2SM_run4': [Matcher(HSM2SMBackS2S_run4LHS()),Matcher(HSM2SMBackM2M_run4LHS())],
'HSF2SF_run4': [Matcher(HSF2SFBackS2S_run4LHS()),Matcher(HSF2SFBackF2F_run4LHS())],
'HMM2MM_run4': [Matcher(HMM2MMBackM2M1_run4LHS()),Matcher(HMM2MMBackM2M2_run4LHS())],
'HFF2FF_run4': [Matcher(HFF2FFBackF2F1_run4LHS()),Matcher(HFF2FFBackF2F2_run4LHS())]}
self.backwardPatterns2Rules = { 'HSM2SMBackS2S_run1LHS': 'HSM2SM_run1',
'HSM2SMBackM2M_run1LHS': 'HSM2SM_run1',
'HSF2SFBackS2S_run1LHS': 'HSF2SF_run1',
'HSF2SFBackF2F_run1LHS': 'HSF2SF_run1',
'HMM2MMBackM2M1_run1LHS': 'HMM2MM_run1',
'HMM2MMBackM2M2_run1LHS': 'HMM2MM_run1',
'HFF2FFBackF2F1_run1LHS': 'HFF2FF_run1',
'HFF2FFBackF2F2_run1LHS': 'HFF2FF_run1',
'HSM2SMBackS2S_run2LHS': 'HSM2SM_run2',
'HSM2SMBackM2M_run2LHS': 'HSM2SM_run2',
'HSF2SFBackS2S_run2LHS': 'HSF2SF_run2',
'HSF2SFBackF2F_run2LHS': 'HSF2SF_run2',
'HMM2MMBackM2M1_run2LHS': 'HMM2MM_run2',
'HMM2MMBackM2M2_run2LHS': 'HMM2MM_run2',
'HFF2FFBackF2F1_run2LHS': 'HFF2FF_run2',
'HFF2FFBackF2F2_run2LHS': 'HFF2FF_run2',
'HSM2SMBackS2S_run3LHS': 'HSM2SM_run3',
'HSM2SMBackM2M_run3LHS': 'HSM2SM_run3',
'HSF2SFBackS2S_run3LHS': 'HSF2SF_run3',
'HSF2SFBackF2F_run3LHS': 'HSF2SF_run3',
'HMM2MMBackM2M1_run3LHS': 'HMM2MM_run3',
'HMM2MMBackM2M2_run3LHS': 'HMM2MM_run3',
'HFF2FFBackF2F1_run3LHS': 'HFF2FF_run3',
'HFF2FFBackF2F2_run3LHS': 'HFF2FF_run3',
'HSM2SMBackS2S_run4LHS': 'HSM2SM_run4',
'HSM2SMBackM2M_run4LHS': 'HSM2SM_run4',
'HSF2SFBackS2S_run4LHS': 'HSF2SF_run4',
'HSF2SFBackF2F_run4LHS': 'HSF2SF_run4',
'HMM2MMBackM2M1_run4LHS': 'HMM2MM_run4',
'HMM2MMBackM2M2_run4LHS': 'HMM2MM_run4',
'HFF2FFBackF2F1_run4LHS': 'HFF2FF_run4',
'HFF2FFBackF2F2_run4LHS': 'HFF2FF_run4'}
self.backwardPatternsComplete = {
'HS2S_run1': None,
'HM2M_run1': None,
'HF2F_run1': None,
'HSM2SM_run1': [Matcher(HSM2SMBackComplete_run1LHS())],
'HSF2SF_run1': [Matcher(HSF2SFBackComplete_run1LHS())],
'HMM2MM_run1': [Matcher(HMM2MMBackComplete_run1LHS())],
'HFF2FF_run1': [Matcher(HFF2FFBackComplete_run1LHS())],
'HS2S_run2': None,
'HM2M_run2': None,
'HF2F_run2': None,
'HSM2SM_run2': [Matcher(HSM2SMBackComplete_run2LHS())],
'HSF2SF_run2': [Matcher(HSF2SFBackComplete_run2LHS())],
'HMM2MM_run2': [Matcher(HMM2MMBackComplete_run2LHS())],
'HFF2FF_run2': [Matcher(HFF2FFBackComplete_run2LHS())],
'HS2S_run3': None,
'HM2M_run3': None,
'HF2F_run3': None,
'HSM2SM_run3': [Matcher(HSM2SMBackComplete_run3LHS())],
'HSF2SF_run3': [Matcher(HSF2SFBackComplete_run3LHS())],
'HMM2MM_run3': [Matcher(HMM2MMBackComplete_run3LHS())],
'HFF2FF_run3': [Matcher(HFF2FFBackComplete_run3LHS())],
'HS2S_run4': None,
'HM2M_run4': None,
'HF2F_run4': None,
'HSM2SM_run4': [Matcher(HSM2SMBackComplete_run4LHS())],
'HSF2SF_run4': [Matcher(HSF2SFBackComplete_run4LHS())],
'HMM2MM_run4': [Matcher(HMM2MMBackComplete_run4LHS())],
'HFF2FF_run4': [Matcher(HFF2FFBackComplete_run4LHS())]}
def test_combine(self):
# build_traceability_for_rule = ARule(HBuildTraceabilityNoBackwardLHS(),HBuildTraceabilityNoBackwardRHS())
# build_traceability_for_rule = ARule(HTraceabilityConstructionLHS(),HTraceabilityConstructionRHS())
build_traceability_for_rule = ARule(HBuildTraceabilityForRuleLHS(),HBuildTraceabilityForRuleRHS())
# build_traceability_for_rule = ARule(HBuildTraceabilityGMLHS(),HBuildTraceabilityGMRHS())
build_traceability_for_rule_match = Matcher(HBuildTraceabilityForRuleLHS())
build_traceability_for_rule_rewrite = Rewriter(HBuildTraceabilityForRuleRHS())
s2s = HS2S_run1()
m2m = HM2M_run1()
f2f = HF2F_run1()
sm2sm = HSM2SM_run1()
sf2sf = HSF2SF_run1()
mm2mm = HMM2MM_run1()
ff2ff = HFF2FF_run1()
mergeInterLayerFactory = MergeInterLayerFactory(1)
combineResult = mergeInterLayerFactory.merge_two_rules_inter_layer(mm2mm,m2m)
graph_to_dot("combinelargerrule", combineResult, 1)
l = [HSM2SM_run1(),HFF2FF_run1()]
l.extend([])
print l
# graph_to_dot("bla",p.graph)
| levilucio/SyVOLT | tests/combine_test.py | Python | mit | 20,391 |
'''
plans.py
'''
from forex_python.converter import CurrencyCodes
from .base import Base
class Plan(Base):
'''
Plan class for making payment plans
'''
interval = None
name = None
amount = None
plan_code = None
currency = None
id = None
send_sms = True
send_invoices = True
description = None
__interval_values = ('hourly', 'daily', 'weekly', 'monthly', 'annually')
def __init__(self, name, interval, amount, currency='NGN', plan_code=None,
id=None, send_sms=None, send_invoices=None, description=None):
super().__init__()
#Check if currency supplied is valid
if not CurrencyCodes().get_symbol(currency.upper()):
raise ValueError("Invalid currency supplied")
if interval.lower() not in self.__interval_values:
raise ValueError("Interval should be one of 'hourly',"
"'daily', 'weekly', 'monthly','annually'"
)
try:
amount = int(amount)
except ValueError:
raise ValueError("Invalid amount")
else:
self.interval = interval.lower()
self.name = name
self.interval = interval
self.amount = amount
self.currency = currency
self.plan_code = plan_code
self.id = id
self.send_sms = send_sms
self.send_invoices = send_invoices
self.description = description
def __str__(self):
return "%s plan" % self.name
| Chibuzor-IN/python-paystack | python_paystack/objects/plans.py | Python | mit | 1,566 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from jcconv import kata2hira, hira2kata
from itertools import chain
from printable import PrintableDict, PrintableList
__by_vowels = PrintableDict(**{
u'ア': u'ワラヤャマハナタサカアァ',
u'イ': u'リミヒニちシキイィ',
u'ウ': u'ルユュムフヌツスクウゥ',
u'エ': u'レメヘネテセケエェ',
u'オ': u'ヲロヨョモホノトソコオォ',
})
__to_dakuten = PrintableDict(**{
u'か': u'が',
u'き': u'ぎ',
u'く': u'ぐ',
u'け': u'げ',
u'こ': u'ご',
u'さ': u'ざ',
u'し': u'じ',
u'す': u'ず',
u'せ': u'ぜ',
u'そ': u'ぞ',
u'た': u'だ',
u'ち': u'ぢ',
u'つ': u'づ',
u'て': u'で',
u'と': u'ど',
u'は': u'ばぱ',
u'ひ': u'びぴ',
u'ふ': u'ぶぷ',
u'へ': u'べぺ',
u'ほ': u'ぼぽ',
})
__to_mini = PrintableDict(**{
u'く': u'っ',
u'つ': u'っ',
u'や': u'ゃ',
u'よ': u'ょ',
u'ゆ': u'ゅ',
u'わ': u'ゎ',
u'か': u'ゕ',
u'け': u'ゖ',
u'あ': u'ぁ',
u'い': u'ぃ',
u'う': u'ぅ',
u'え': u'ぇ',
u'お': u'ぉ',
})
EXTENDABLE_MINIS = (
u'つ',
u'く',
)
__by_dakuten = PrintableDict()
for vowel, letters in __to_dakuten.iteritems():
for letter in letters:
__by_dakuten[letter] = vowel
__to_vowels = PrintableDict()
for vowel, letters in __by_vowels.iteritems():
for letter in letters:
__to_vowels[letter] = vowel
def codepoint_range(start, end):
for val in range(start, end):
try:
yield unichr(val)
except ValueError:
# Sometimes certain codepoints can't be used on a machine
pass
def char_set(value):
if isinstance(value, list) or isinstance(value, tuple):
return codepoint_range(*value)
else:
return [value]
def unipairs(lst):
return PrintableList(reduce(lambda a, b: chain(a, b), map(char_set, lst)))
__KATAKANA = (
# Katakana: http://en.wikipedia.org/wiki/Katakana
(0x30A0, 0x30FF + 1),
(0x31F0, 0x31FF + 1),
(0x3200, 0x32FF + 1),
(0xFF00, 0xFFEF + 1),
)
__HIRAGANA = (
# Hiragana: http://en.wikipedia.org/wiki/Hiragana
(0x3040, 0x309F + 1),
(0x1B000, 0x1B0FF + 1),
)
__KANJI = (
(0x4e00, 0x9faf + 1),
)
__BONUS_KANA = (
u'〜',
)
KATAKANA = unipairs(__KATAKANA)
HIRAGANA = unipairs(__HIRAGANA)
KANA = PrintableList(KATAKANA + HIRAGANA + unipairs(__BONUS_KANA))
KANJI = unipairs(__KANJI)
def __is_katakana(char):
return char in KATAKANA
def is_katakana(string):
for char in string:
if not __is_katakana(char):
return False
return True
def __is_hiragana(char):
return char in HIRAGANA
def is_hiragana(string):
for char in string:
if not __is_hiragana(char):
return False
return True
def __is_kana(char):
return char in KANA
def is_kana(string):
for char in string:
if not __is_kana(char):
return False
return True
def __is_kanji(char):
return char in KANJI
def is_kanji(string):
for char in string:
if not __is_kanji(char):
return False
return True
def kana_minus_dakuten(char):
if is_katakana(char):
hira = kata2hira(char)
hira = __by_dakuten.get(hira, hira)
return hira2kata(hira)
else:
return __by_dakuten.get(char, char)
def kana_plus_dakuten(char):
yield char
is_kata = is_katakana(char)
if is_kata:
char = kata2hira(char)
for char in __to_dakuten.get(char, ''):
yield hira2kata(char) if is_kata else char
def kana_plus_mini(char):
yield char
is_kata = is_katakana(char)
if is_kata:
char = kata2hira(char)
for char in __to_mini.get(char, ''):
yield hira2kata(char) if is_kata else char
def extend_dakuten_reading(string):
if len(string) == 0:
yield ''
return
char = string[0]
for mult in kana_plus_dakuten(char):
yield mult + string[1:]
def extend_mini_reading(string):
if len(string) == 0:
yield ''
return
char = string[-1]
if char not in EXTENDABLE_MINIS:
yield string
return
for substr in kana_plus_mini(char):
yield string[:-1] + substr
def char_to_base_vowel(char):
char = kana_minus_dakuten(char)
translated = __to_vowels.get(char, False) or __to_vowels.get(hira2kata(char), False)
if translated is False:
raise Exception(u"Can't convert")
return translated
def all_to_hiragana(string):
out = u''
for index, char in enumerate(string):
if char == u'ー' or char == u'|':
char = char_to_base_vowel(out[-1])
char = kata2hira(char)
out += char
return out
if __name__ == u'__main__':
from tester import *
test_equal(kana_minus_dakuten(u'は'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ば'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ぱ'), u'は', u"No Dakuten failure")
test_equal(kana_minus_dakuten(u'ジ'), u'シ', u"Katakana failure")
test_equal(kana_minus_dakuten(u'本'), u'本', u"Kanji changed")
test_true(is_katakana(u'ハ'), u"Katakana check wrong")
test_true(is_katakana(u'ー'), u"Katakana check wrong")
test_true(is_katakana(u'ジ'), u"Katakana check wrong")
test_true(is_katakana(u'ッ'), u"Katakana check wrong")
test_true(not is_katakana(u'本'), u"Katakana Kanji check wrong")
test_true(not is_katakana(u'っ'), u"Katakana small hiragana check wrong")
test_true(not is_katakana(u'は'), u"Katakana hiragana wrong")
test_true(is_hiragana(u'っ'), u"Hiragana check wrong")
test_true(is_hiragana(u'つ'), u"Hiragana check wrong")
test_true(is_hiragana(u'を'), u"Hiragana check wrong")
test_true(not is_hiragana(u'本'), u"Hiragana Kanji check wrong")
test_true(not is_hiragana(u'ッ'), u"Hiragana small katakana check wrong")
test_true(not is_hiragana(u'ハ'), u"Hiragana katakana check wrong")
test_true(is_kana(u'っ'), u"Kana check wrong")
test_true(is_kana(u'つ'), u"Kana check wrong")
test_true(is_kana(u'を'), u"Kana check wrong")
test_true(is_kana(u'ッ'), u"Kana check wrong")
test_true(is_kana(u'ハ'), u"Kana check wrong")
test_true(is_kana(u'〜・'), u"Kana special check wrong")
test_true(not is_kana(u'本'), u"Kana check wrong")
test_equal(kana_minus_dakuten(u'は'), u'は')
test_equal(kana_minus_dakuten(u'ば'), u'は')
test_equal(kana_minus_dakuten(u'バ'), u'ハ')
test_equal(kana_minus_dakuten(u'本'), u'本')
test_equal(''.join(kana_plus_dakuten(u'は')), u'はばぱ')
test_equal(''.join(kana_plus_dakuten(u'本')), u'本')
test_equal(''.join(kana_plus_dakuten(u'シ')), u'シジ')
test_list_equal(extend_dakuten_reading(u'しゃし'), [u'しゃし', u'じゃし'])
test_list_equal(extend_mini_reading(u'し'), [u'し'])
test_list_equal(extend_mini_reading(u'いつ'), [u'いつ', u'いっ'])
test_equal(all_to_hiragana(u'ジータ'), u'じいた')
| Saevon/AnkiHelpers | kana.py | Python | mit | 7,233 |
import requests, os
from bs4 import BeautifulSoup
url = 'http://www.nytimes.com'
def extractArticles (url):
data = requests.get(url)
soup = BeautifulSoup(data.text, 'html.parser')
articles = []
for article in soup.find_all('article'):
if article.find('h2') != None and article.find('h2').find('a') != None:
heading = article.find('h2').find('a').get_text().strip()
if heading != "":
articles.append(heading)
articles = sorted(list(set(articles)))
f = open('./articles/headlines2.txt', 'w')
for heading in articles:
f.write(heading)
f.write('\n')
f.close()
extractArticles(url) | timgrossmann/dailyProgrammer | Challenges/Python/decodeWebPage.py | Python | mit | 698 |
from base import IfbyphoneApiBase
class Addons(IfbyphoneApiBase):
def list(self):
"""List all purchased Addons for an account
"""
self.options['action'] = 'addons.list'
return self.call(self.options)
def purchase(self, **kwargs):
"""Purchase an addon for an account
keyword arguments:
item_id -- ID number of desired addon
qty -- the quantity of the addon
send_receipt -- set to 1 to send a receipt to account email
"""
self.options.update(kwargs)
self.options['action'] = 'addons.purchase'
return self.call(self.options) | Opus1no2/Ifbyphone-API-Module | src/Ifbyphone/api/addons.py | Python | mit | 685 |
# -*- coding: utf-8 -*-
"""Utility functions.
"""
from collections import OrderedDict
from .bsd_checksum import bsd_checksum # make name available from this module
def n_(s, replacement='_'):
"""Make binary fields more readable.
"""
if isinstance(s, (str, unicode, bytearray)):
return s.replace('\0', replacement)
return s
def split_string(s, *ndxs):
"""String sub-class with a split() method that splits a given indexes.
Usage:
>>> print split_string('D2008022002', 1, 5, 7, 9)
['D', '2008', '02', '20', '02']
"""
if len(ndxs) == 0:
return [s]
if len(ndxs) == 1:
i = ndxs[0]
return [s[:i], s[i:]]
res = []
b = 0
while ndxs:
a, b, ndxs = b, ndxs[0], ndxs[1:]
res.append(s[a:b])
res.append(s[b:])
return res
def split_fields(s, sizes):
"""Split a string into fields based on field `sizes`.
"""
slen = len(s)
if None in sizes:
nonesize = slen - sum(v for v in sizes if v is not None)
sizes = [v or nonesize for v in sizes]
ndxs = [sizes[0]]
cur = 1
while cur < len(sizes) - 1:
ndxs.append(ndxs[-1] + sizes[cur])
cur += 1
return split_string(s, *ndxs)
class pset(OrderedDict):
"""A property set is an OrderedDict with prettier string display
(useful when working with record lengths that are wider than your
terminal).
"""
def __repr__(self):
return '{%s}' % ', '.join('%s: %r' % (str(k), str(v))
for k,v in self.items())
def __str__(self):
return "{\n%s\n}" % ',\n'.join(' %s: %r' % (str(k), str(v))
for k,v in self.items())
def pad(data, size, padchar=' '):
"""Pad the `data` to exactly length = `size`.
"""
if len(data) > size:
raise ValueError("Data is longer than size, cannot pad.")
if len(data) == size:
return data
return data + padchar * (size - len(data))
| thebjorn/fixedrec | fixedrec/utils.py | Python | mit | 2,032 |
# encoding: utf-8
# pylint: disable=redefined-outer-name,missing-docstring
import pytest
from tests import utils
from app import create_app
@pytest.yield_fixture(scope='session')
def flask_app():
app = create_app(flask_config_name='testing')
from app.extensions import db
with app.app_context():
db.create_all()
yield app
db.drop_all()
@pytest.yield_fixture()
def db(flask_app):
# pylint: disable=unused-argument,invalid-name
from app.extensions import db as db_instance
yield db_instance
db_instance.session.rollback()
@pytest.fixture(scope='session')
def flask_app_client(flask_app):
flask_app.test_client_class = utils.AutoAuthFlaskClient
flask_app.response_class = utils.JSONResponse
return flask_app.test_client()
@pytest.yield_fixture(scope='session')
def regular_user(flask_app):
# pylint: disable=invalid-name,unused-argument
from app.extensions import db
regular_user_instance = utils.generate_user_instance(
username='regular_user'
)
db.session.add(regular_user_instance)
db.session.commit()
yield regular_user_instance
db.session.delete(regular_user_instance)
db.session.commit()
@pytest.yield_fixture(scope='session')
def readonly_user(flask_app):
# pylint: disable=invalid-name,unused-argument
from app.extensions import db
readonly_user_instance = utils.generate_user_instance(
username='readonly_user',
is_regular_user=False
)
db.session.add(readonly_user_instance)
db.session.commit()
yield readonly_user_instance
db.session.delete(readonly_user_instance)
db.session.commit()
@pytest.yield_fixture(scope='session')
def admin_user(flask_app):
# pylint: disable=invalid-name,unused-argument
from app.extensions import db
admin_user_instance = utils.generate_user_instance(
username='admin_user',
is_admin=True
)
db.session.add(admin_user_instance)
db.session.commit()
yield admin_user_instance
db.session.delete(admin_user_instance)
db.session.commit()
@pytest.yield_fixture(scope='session')
def internal_user(flask_app):
# pylint: disable=invalid-name,unused-argument
from app.extensions import db
internal_user_instance = utils.generate_user_instance(
username='internal_user',
is_regular_user=False,
is_admin=False,
is_active=True,
is_internal=True
)
db.session.add(internal_user_instance)
db.session.commit()
yield internal_user_instance
db.session.delete(internal_user_instance)
db.session.commit()
| millen1m/flask-restplus-server-example | tests/conftest.py | Python | mit | 2,625 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str,
subscription_id: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-05-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
subscription_id: str,
resource_name: str,
configuration_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-05-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs/{ConfigurationId}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"ConfigurationId": _SERIALIZER.url("configuration_id", configuration_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
resource_group_name: str,
subscription_id: str,
resource_name: str,
configuration_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2018-05-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs/{ConfigurationId}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"ConfigurationId": _SERIALIZER.url("configuration_id", configuration_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class ProactiveDetectionConfigurationsOperations(object):
"""ProactiveDetectionConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.applicationinsights.v2018_05_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> List["_models.ApplicationInsightsComponentProactiveDetectionConfiguration"]:
"""Gets a list of ProactiveDetection configurations of an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ApplicationInsightsComponentProactiveDetectionConfiguration, or the result of
cls(response)
:rtype:
list[~azure.mgmt.applicationinsights.v2018_05_01_preview.models.ApplicationInsightsComponentProactiveDetectionConfiguration]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ApplicationInsightsComponentProactiveDetectionConfiguration"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
resource_name=resource_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[ApplicationInsightsComponentProactiveDetectionConfiguration]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
resource_name: str,
configuration_id: str,
**kwargs: Any
) -> "_models.ApplicationInsightsComponentProactiveDetectionConfiguration":
"""Get the ProactiveDetection configuration for this configuration id.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param configuration_id: The ProactiveDetection configuration ID. This is unique within a
Application Insights component.
:type configuration_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationInsightsComponentProactiveDetectionConfiguration, or the result of
cls(response)
:rtype:
~azure.mgmt.applicationinsights.v2018_05_01_preview.models.ApplicationInsightsComponentProactiveDetectionConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationInsightsComponentProactiveDetectionConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
resource_name=resource_name,
configuration_id=configuration_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationInsightsComponentProactiveDetectionConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs/{ConfigurationId}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
resource_name: str,
configuration_id: str,
proactive_detection_properties: "_models.ApplicationInsightsComponentProactiveDetectionConfiguration",
**kwargs: Any
) -> "_models.ApplicationInsightsComponentProactiveDetectionConfiguration":
"""Update the ProactiveDetection configuration for this configuration id.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param configuration_id: The ProactiveDetection configuration ID. This is unique within a
Application Insights component.
:type configuration_id: str
:param proactive_detection_properties: Properties that need to be specified to update the
ProactiveDetection configuration.
:type proactive_detection_properties:
~azure.mgmt.applicationinsights.v2018_05_01_preview.models.ApplicationInsightsComponentProactiveDetectionConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationInsightsComponentProactiveDetectionConfiguration, or the result of
cls(response)
:rtype:
~azure.mgmt.applicationinsights.v2018_05_01_preview.models.ApplicationInsightsComponentProactiveDetectionConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationInsightsComponentProactiveDetectionConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(proactive_detection_properties, 'ApplicationInsightsComponentProactiveDetectionConfiguration')
request = build_update_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
resource_name=resource_name,
configuration_id=configuration_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationInsightsComponentProactiveDetectionConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs/{ConfigurationId}'} # type: ignore
| Azure/azure-sdk-for-python | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2018_05_01_preview/operations/_proactive_detection_configurations_operations.py | Python | mit | 15,441 |
class Heap(object):
def __init__(self, data=[]):
if len(data) == 0:
self.data = [None] * 100
else:
self.data = data
self.__size = sum([1 if item is not None else 0 for item in self.data])
self.__heapify()
def size(self):
return self.__size
def empty(self):
return self.__size == 0
def get_max(self):
return self.data[0]
def delete_max(self):
max_data = self.data[0]
self.__swap(0, self.__size - 1)
self.data[self.__size - 1] = None
self.__size -= 1
self.__percolate_down(0)
return max_data
def insert(self, number):
if self.__size == len(self.data):
self.__expand()
self.__size += 1
self.data[self.__size - 1] = number
return self.__percolate_up(self.__size - 1)
@staticmethod
def heap_sort(data):
heap = Heap(data)
index = heap.size() - 1
while not heap.empty():
heap.data[index] = heap.delete_max()
index -= 1
return heap.data
def __percolate_down(self, i):
initial_value = self.data[i]
current_index = i
potential_parent = self.__proper_parent(current_index)
while self.data[potential_parent] > self.data[current_index]:
self.data[current_index] = self.data[potential_parent]
current_index = potential_parent
potential_parent = self.__proper_parent(current_index)
self.data[current_index] = initial_value
return current_index
def __percolate_up(self, i):
if not self.__has_parent(i):
return 0
initial_value = self.data[i]
parent_indexes = []
h = 1
current_index = i
while self.__has_parent(current_index):
current_index = ((i + 1) >> h) - 1
parent_indexes.append(current_index)
h += 1
lo = 0
hi = len(parent_indexes) - 1
while lo + 1 < hi:
mi = (lo + hi) / 2
if self.data[parent_indexes[mi]] <= self.data[i]:
lo = mi
else:
hi = mi
parent_indexes.insert(0, i)
lo = lo + 1
index = 0
while index < lo:
self.data[parent_indexes[index]] = self.data[parent_indexes[index + 1]]
index += 1
self.data[parent_indexes[lo]] = initial_value
return parent_indexes[lo]
def __expand(self):
new_data = [None] * (self.__size * 2)
for i in range(self.__size):
new_data[i] = self.data[i]
self.data = new_data
def __heapify(self):
i = self.__last_internal()
while self.__in_heap(i):
self.__percolate_down(i)
i -= 1
def __swap(self, i , j):
temp = self.data[i]
self.data[i] = self.data[j]
self.data[j] = temp
def __in_heap(self, i):
return 0 <= i < self.size()
def __parent(self, i):
return (i - 1) >> 1
def __last_internal(self):
return self.__parent(self.size() - 1)
def __left_child(self, i):
return (i << 1) + 1
def __right_child(self, i):
return (i + 1) << 1
def __has_parent(self, i):
return 0 < i
def __has_left_child(self, i):
return self.__in_heap(self.__left_child(i))
def __has_right_child(self, i):
return self.__in_heap(self.__right_child(i))
def __bigger(self, i, j):
return i if self.data[i] > self.data[j] else j
def __proper_parent(self, i):
return self.__bigger(self.__bigger(self.__left_child(i), self.__right_child(i)), i) if self.__has_right_child(i) else \
self.__bigger(self.__left_child(i), i) if self.__has_left_child(i) else \
i
| haoliangyu/basic-data-structure | Heap.py | Python | mit | 3,852 |
#!/usr/bin/python
# File: mapentity.py
# import pygtk
# pygtk.require('2.0')
from gi.repository import Gtk, Gdk
class MapEntity:
# self.x = None
# self.y = None
# self.name = None
# self.texture = None
def getCoords(self):
return self.x,self.y
def getx(self):
return self.x
def gety(self):
return self.y
def setCoords(self,xcoord,ycoord):
self.x = xcoord
self.y = ycoord
def getName(self):
return self.name
def setName(self, strname):
self.name = strname
def __init__(self, xarg, yarg, namearg):
self.x = xarg
self.y = yarg
self.name = namearg
return | lotusronin/KronosEngine | editor/mapentity.py | Python | mit | 592 |
"""
.. module:: test
test
*************
:Description: test
:Authors: bejar
:Version:
:Created on: 10/02/2015 9:50
"""
__author__ = 'bejar'
from MeanPartition import MeanPartitionClustering
from kemlglearn.datasets import cluster_generator
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs, load_iris, make_circles
nc = 15
X, y = cluster_generator(n_clusters=nc, sepval=0.2, numNonNoisy=100, numNoisy=10, rangeN=[150, 200])
# print X[:,:-2]
#
#print X,y
# X, y = make_blobs(n_samples=1000, n_features=20, centers=nc, random_state=2)
gkm = MeanPartitionClustering(n_clusters=nc, n_components=40, n_neighbors=3, trans='spectral', cdistance='ANMI')
res, l = gkm.fit(X, y)
fig = plt.figure()
# ax = fig.gca(projection='3d')
# pl.scatter(X[:, 1], X[:, 2], zs=X[:, 0], c=gkm.labels_, s=25)
ax = fig.add_subplot(111)
plt.scatter(res[:, 0], res[:, 1], c=l)
plt.show()
| bejar/kemlglearn | kemlglearn/cluster/consensus/test.py | Python | mit | 905 |
# This doesn't work- not updated with eventmaster.py updates
# TODO: Fix This :)
# Import Libraries
import eventmaster
import time
import random
import sys
import unittest
import sys
class InputsTestCase(unittest.TestCase):
def setUp(self):
self.s3 = E2S3.E2S3Switcher()
self.s3.set_verbose(0)
self.s3.set_CommsXML_IP("127.0.0.1")
self.s3.set_CommsXML_Port(9876)
if not self.s3.connect(): return -1
while self.s3.is_ready() != 1: time.sleep(1)
def test_set_valid_name_on_invalid_input(self):
test_str = "PYTEST-{0!s}".format(random.randint(1,10))
self.assertRaises(ValueError, lambda: self.s3.get_input(99).set_Name(test_str))
def test_set_valid_name_on_valid_input(self):
test_str = "PYTEST-{0!s}".format(random.randint(1,10))
while(self.s3.has_been_processed(self.s3.get_input(0).set_Name(test_str))==0): time.sleep(1)
time.sleep(1)
self.assertEqual(test_str, self.s3.get_input(0).get_Name())
def test_set_invalid_name_on_valid_input(self):
MyObject = type('MyObject', (object,), {})
self.assertEqual(self.s3.get_input(0).set_Name(MyObject), None)
print unittest.main()
sys.exit()
| kyelewisstgc/EventMaster-Python | tests/test_unit.py | Python | mit | 1,220 |
""" synrcat
gaussian mixture model
"""
import sys
import os
import numpy as np
import logging
from collections import OrderedDict
from astropy.table import Table
from pypeline import pype, add_param, depends_on
from syn import Syn
from syncat.errors import NoPoints
import syncat.misc as misc
import syncat.fileio as fileio
import time
@add_param('cat_model', metavar='filename', default='out/syn.pickle', type=str,
help='file with catalogue model to load')
@add_param('hints_file', metavar='filename', default='in/syn_hints.txt', type=str,
help='give hints about parameter distributions')
@depends_on(Syn)
class GaussianMixtureModel(pype):
""" SynCat mode to generate random catalogue by sampling from a gaussian mixture model.
Parameters
----------
mask : minimask.Mask instance
mask describing survey geometry to sample from. If None, sample from full-sky.
cat_model : str
path to file with catalogue model to load
hints_file : str
path to file with hints about parameter distributions
"""
def __init__(self, config={}, mask=None, **kwargs):
""" """
self._parse_config(config, **kwargs)
self._setup_logging()
self.load_hints()
self.mask = mask
self.syn = None
def sample_sky(self, zone=None, nside=None, order=None):
""" Sample sky coordinates.
Parameters
----------
zone : int, list
optional healpix zone index or list of indices from which to sample. Otherwise sample from all zones.
nside : int
healpix nside for zone pixelization
order : str
healpix ordering for zone pixelization
"""
return np.transpose(self.mask.draw_random_position(density=self.config['density'], n=self.config['count'],
cell=zone, nside=nside))
def load_hints(self):
""" Load the hints file.
The hints file contains information about the parameter distributions.
"""
self.hints = {}
if os.path.exists(self.config['hints_file']):
for line in file(self.config['hints_file']):
line = line.strip()
if line == "":
continue
if line.startswith("#"):
continue
words = line.split()
instruction = None
low = None
high = None
name = words.pop(0)
if len(words) > 0:
instruction = words.pop(0)
if len(words) > 0:
low = float(words.pop(0))
if len(words) > 0:
high = float(words.pop(0))
if instruction not in self.hints:
self.hints[instruction] = []
self.hints[instruction].append((name, low, high))
self.logger.info("got hint for '%s': instruction is %s with range: %s, %s", name, instruction, low, high)
return self.hints
def fit(self, filename=None, add_columns=True):
""" Fit a Gaussian mixture model to the input catalogue.
Parameters
----------
filename : str
path to input catalogue.
"""
if filename is None:
filename = self.config['in_cat']
if os.path.exists(self.config['cat_model']) and not self.config['overwrite']:
self.logger.info("reading %s", self.config['cat_model'])
self.syn = Syn(self.config['cat_model'])
self.labels = self.syn.labels
return
hints = self.load_hints()
self.logger.info("loading %s", filename)
table = fileio.read_catalogue(filename, format=self.config['input_format'], columns=self.config['input_columns'], quick=self.config['quick'])
table_dtype = table.dtype
table = misc.remove_columns(table, self.config['skip'])
properties = list(table.dtype.names)
if self.logger.isEnabledFor(logging.INFO):
mesg = ""
for i, p in enumerate(properties):
mesg += "\n{:>3} {}".format(1 + i, p)
self.logger.info("got these %i columns:%s", len(properties), mesg)
self.syn = Syn(labels=properties, hints=hints, config=self.config)
dtype = table.dtype
if add_columns:
dtype = misc.append_dtypes(dtype, self.config['add_columns'], table_dtype)
if self.config['sample_sky'] and self.config['skycoord_name'] not in dtype.names:
skycoord_name = self.config['skycoord_name']
alpha, delta = skycoord_name
skycoord_dtype = np.dtype([(alpha, np.float64), (delta, np.float64)])
dtype = misc.concatenate_dtypes([dtype, skycoord_dtype])
self.syn.fit(table, dtype=dtype)
# store column names
self.labels = properties
# save catalogue model
self.syn.save(self.config['cat_model'])
def sample(self):
""" Sample from the Gaussian mixture model.
Returns
-------
numpy strucarray : random catalogue
"""
if self.syn is None:
if not os.path.exists(self.config['cat_model']):
raise Exception("Cannot load catalogue model. Files does not exist: %s"%self.config['cat_model'])
self.syn = Syn(self.config['cat_model'])
if self.config['sample_sky']:
skycoord = self.sample_sky()
count = len(skycoord)
else:
count = self.config['count']
if count == 0:
raise NoPoints
randoms = self.syn.sample(n=count)
if self.config['sample_sky']:
skycoord_name = self.config['skycoord_name']
for i in range(len(skycoord_name)):
randoms[skycoord_name[i]] = skycoord[:,i]
return randoms
| bengranett/syncat | syncat/methods/gmm.py | Python | mit | 6,003 |
# -*- coding: utf-8 -*-
import re
from jinja2 import Environment
from .observable import Observable
def get_attribute(render_data, variable):
levels = variable.split('.')
r = render_data
for level in levels:
if hasattr(r, level):
r = getattr(r, level)
else:
r = r.get(level) or {}
if not r:
return ''
return r
variable_re = re.compile(r'({\s*.*\s*})')
def is_function(text, node):
fnname = render_template(text, node)
return hasattr(fnname, node)
env = Environment(variable_start_string='{', variable_end_string='}')
def render_template(text, render_data):
if not (text.startswith('{') and text.endswith('}')):
return env.from_string(text).render(**vars(render_data))
expr = text[1:-1]
return env.compile_expression(expr)(**vars(render_data))
variables = variable_re.findall(text)
variables = {var[1:-1].strip(): var for var in variables}
for variable in variables:
rendered = get_attribute(render_data, variable)
if callable(rendered):
return rendered
text = text.replace(variables[variable], rendered)
return text
| soasme/riotpy | riot/template.py | Python | mit | 1,172 |
#!/usr/bin/env python
#------------------------------------------------------------
#
# Ciro D. Santilli
#
# Prints a list of paths which are files followed by their inodes and sha1 sums.
#
# Useful to make a backup of paths names before mass renaming them,
# supposing your files are distinct by SHA1 and that SHA1 has not changed,
# or that the inodes have not changed.
#
#------------------------------------------------------------
import os
import os.path
import stat
import hashlib
import sys
SHA1_MAX_BYTES_READ_DEFAULT = float("inf") # defaults to read entire file
def sha1_hex_file(filepath, max_bytes=None):
"""
Returns the SHA1 of a given filepath in hexadecimal.
Opt-args:
* max_bytes. If given, reads at most max_bytes bytes from the file.
"""
sha1 = hashlib.sha1()
f = open(filepath, 'rb')
try:
if max_bytes:
data = f.read(max_bytes)
else:
data = f.read()
sha1.update(data)
finally:
f.close()
return sha1.hexdigest()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="""Finds files and creates a lists of their paths, inodes and sha1 checksums.' +
Useful to make a backup of filepaths before renaming them, for example before a large number of renames by a script.
SAMPLE CALLS
find_path_sha1.py
#finds, calculates sha1 based on the entire files, and prints path\nsha1 to stdout.
find_path_sha1.py -n 100000
#finds, calculates sha1 based on 100000 bytes
""",
epilog="Report any bugs to [email protected]",
prog='Program')
parser.add_argument('-m', '--max-sha1-bytes',
action="store",
dest="sha1_max_bytes_read",
type=int,
default=SHA1_MAX_BYTES_READ_DEFAULT,
help='Maximum number of bytes to read to calculate SHA1 checksum.'+
'Reading the whole file might be too slow, and unnecessary for some applications.')
args = parser.parse_args(sys.argv[1:])
sha1_max_bytes_read = args.sha1_max_bytes_read
file_output = ""
print "sha1_max_bytes_read"
print sha1_max_bytes_read
print
paths = []
for root, dirs, files in os.walk('.'):
for bname in files:
paths.append(os.path.join(root,bname))
paths.sort()
for path in paths:
print path
print str(sha1_hex_file(path,sha1_max_bytes_read))
print
| cirosantilli/python-utils | bin/find_path_sha1.py | Python | mit | 2,436 |
# -*- coding:utf-8 -*-
'''
Test
'''
import sys
sys.path.append('.')
from tornado.testing import AsyncHTTPSTestCase
from application import APP
class TestSomeHandler(AsyncHTTPSTestCase):
'''
Test
'''
def get_app(self):
'''
Test
'''
return APP
def test_index(self):
'''
Test index.
'''
response = self.fetch('/')
self.assertEqual(response.code, 200)
| bukun/TorCMS | tester/test_handlers/test_index_handler.py | Python | mit | 448 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from ._version import VERSION
class SynapseClientConfiguration(Configuration):
"""Configuration for SynapseClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: azure.core.credentials.TokenCredential
:param synapse_dns_suffix: Gets the DNS suffix used as the base for all Synapse service requests.
:type synapse_dns_suffix: str
:param livy_api_version: Valid api-version for the request.
:type livy_api_version: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
synapse_dns_suffix="dev.azuresynapse.net", # type: str
livy_api_version="2019-11-01-preview", # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if synapse_dns_suffix is None:
raise ValueError("Parameter 'synapse_dns_suffix' must not be None.")
if livy_api_version is None:
raise ValueError("Parameter 'livy_api_version' must not be None.")
super(SynapseClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.synapse_dns_suffix = synapse_dns_suffix
self.livy_api_version = livy_api_version
self.api_version = "2019-11-01-preview"
self.credential_scopes = ['https://dev.azuresynapse.net/.default']
self._configure(**kwargs)
self.user_agent_policy.add_user_agent('azsdk-python-synapseclient/{}'.format(VERSION))
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| Azure/azure-sdk-for-python | sdk/synapse/azure-synapse/azure/synapse/_configuration.py | Python | mit | 3,305 |
Subsets and Splits