repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
kashyapchhatbar/CLASHChimeras | clashchimeras/parsers.py | 2 | 34332 | import csv
import gzip
import logging
import mmap
import os
import sys
import textwrap
from collections import Counter, defaultdict
from itertools import groupby
from operator import itemgetter
import pandas as pd
import pyfaidx
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import clashchimeras
logger = logging.getLogger('root')
class GFF:
"""GFF file parser for mirbase gff3 file
This class uses memory-mapped file object to read a mirbase gff3 file. It
contains methods to read, process a gff3 file and return genomic coordinates
Attributes:
fileName: A mirbase gff3 file path
"""
def __init__(self, fileName=None):
self.features = {}
self.fileName = fileName
def read(self, featureType='miRNA_primary_transcript'):
"""Reads gff3 file provided during class initialization
Stores the byte positions of every feature in a dict object named
self.features
Keyword Args:
featureType: Feature type of a gff3 record, the third element of every
record in the file. Please change this if you want to store mature
form of microRNA, by default it uses primary transcript
(default 'miRNA_primary_transcript')
"""
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split(";")
for attribute in attributes:
if attribute.startswith('Name'):
mirbase_name = attribute.split("=")[-1]
self.features[mirbase_name] = bytePosition
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
logger.debug('Reading %s finished' % self.fileName)
def process(self, name):
"""A method to return a Record object providing genomic information
Args:
name: A valid miRNA_primary_transcript name
Returns:
An object Record containing scaffold, start, end, strand, mirbase_id and
mirbase_name as its variables for access
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
self.mm.seek(self.features[name])
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
attributes = row[-1].split(";")
for attribute in attributes:
if attribute.startswith("ID"):
_id = attribute.split("=")[-1]
elif attribute.startswith("Name"):
_name = attribute.split("=")[-1]
record = Record(scaffold=row[0], start=int(row[3]), end=int(row[4]),
strand=row[6], mirbase_id=_id, mirbase_name=_name)
self.fileHandle.close()
return record
def coordinates(self, name, start=None, end=None):
"""A method to return a bed record containing genomic coordinates for the
aligned segment
Keyword Args:
start: The alignment start position of the cDNA molecule or the relative
start of the particular molecule
end: The alignment end position in the cDNA molecule or the relative end
of the particular molecule
Args:
name: A valid miRNA_primary_transcript name
Returns:
A tuple of strings containing elements for a bed record
"""
record = self.process(name)
if not start and not end:
start = 1
end = record.end - record.start + 1
positions = {}
match_positions = []
if record.strand == '+':
_start = 1
for relative, actual in enumerate(range(record.start - 1, record.end),
start=_start):
positions[relative] = actual
for pos in range(start, end + 1):
match_positions.append(positions[pos])
return [(record.scaffold, min(match_positions), max(match_positions) + 1,
record.mirbase_name, 0, record.strand)]
elif record.strand == '-':
_start = 1
for relative, actual in enumerate(reversed(range(record.start - 1,
record.end)), start=_start):
positions[relative] = actual
for pos in range(start, end + 1):
match_positions.append(positions[pos])
return [(record.scaffold, min(match_positions), max(match_positions) + 1,
record.mirbase_name, 0, record.strand)]
class GTF:
"""GTF file parser for gencode gtf file
This class uses memory-mapped file object to read a gencode gtf file. It
contains methods to read, process a gtf file and return genomic coordinates
Attributes:
fileName: A gencode gtf file path
"""
def __init__(self, fileName=None):
self.features = defaultdict(list)
self.biotypeFeatures = defaultdict(list)
self.geneFeatures = defaultdict(list)
self.fileName = fileName
self.geneIds = {}
def readBiotype(self, featureType='exon', biotype=None):
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split("; ")
havana_transcript = '-'
havana_gene = '-'
exon_number = '0'
for attribute in attributes:
if attribute.startswith("transcript_id"):
transcript_id = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("transcript_type"):
transcript_type = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("exon_number"):
exon_number = int(attribute.split(" ")[-1])
elif attribute.startswith("havana_gene"):
havana_gene = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("havana_transcript"):
havana_transcript = attribute.split(" ")[-1][1:-2]
elif attribute.startswith("gene_id"):
gene_id = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("gene_name"):
gene_name = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("transcript_name"):
transcript_name = attribute.split(" ")[-1][1:-1]
if biotype == 'tRNA':
if transcript_type == "tRNAscan":
self.biotypeFeatures[transcript_id].append((exon_number, row[0],
int(row[3]), int(row[4]),
row[6], gene_id,
havana_gene,
havana_transcript,
transcript_name,
gene_name))
else:
if transcript_type == biotype:
self.biotypeFeatures[transcript_id].append((exon_number, row[0],
int(row[3]), int(row[4]),
row[6], gene_id,
havana_gene,
havana_transcript,
transcript_name,
gene_name))
self.fileHandle.close()
def read(self, featureType='exon'):
"""Reads gtf file provided during class initialization
Stores the byte positions of every feature in a defaultdict(list) object
named self.features
Keyword Args:
featureType: Feature type of a gtf record, the third element of every
record in the file. Please change this if you want to get specific
records (e.g. 'UTR') (default 'exon')
"""
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split("; ")
for attribute in attributes:
if attribute.startswith("transcript_id"):
transcript_id = attribute.split(" ")[-1][1:-1]
self.features[transcript_id].append(bytePosition)
self.geneIds[transcript_id] = gene_id
if attribute.startswith("gene_id"):
gene_id = attribute.split(" ")[-1][1:-1]
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split("; ")
for attribute in attributes:
if attribute.startswith("gene_id"):
gene_id = attribute.split(" ")[-1][1:-1]
self.geneFeatures[gene_id].append(bytePosition)
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
logger.debug('Reading %s finished' % self.fileName)
def process(self, name):
"""A method to return a Record object providing genomic information
Args:
name: A valid gencode transcript_id
Returns:
An object Record containing scaffold, start, end, strand, mirbase_id and
mirbase_name as its variables for access
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
positions = self.features[name]
for position in positions:
self.mm.seek(position)
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
attributes = row[-1].split("; ")
_eid = '-'
_enb = '0'
for attribute in attributes:
if attribute.startswith("transcript_type"):
_tt = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("transcript_id"):
_tid = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("exon_id"):
_eid = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("exon_number"):
_enb = int(attribute.split(" ")[-1])
elif attribute.startswith("gene_name"):
_gn = attribute.split(" ")[-1][1:-1]
record = Record(scaffold=row[0], start=int(row[3]), end=int(row[4]),
strand=row[6], transcript_type=_tt, transcript_id=_tid, exon_id=_eid,
exon_number=_enb, gene_name=_gn)
yield record
self.fileHandle.close()
def geneExonicRegions(self, df):
"""Given a DataFrame with the exon coordinates from Gencode for a single
gene, return the total number of coding regions in that gene.
"""
scaffold = df.iloc[0].scaffold
strand = df.iloc[0].strand
gene_type = df.iloc[0].gene_type
gene_id = df.iloc[0].gene_id
gene_name = df.iloc[0].gene_name
start = df.start.min()
end = df.end.max()
bp = [False] * (end - start + 1)
for i in range(df.shape[0]):
s = df.iloc[i]['start'] - start
e = df.iloc[i]['end'] - start + 1
bp[s:e] = [True] * (e - s)
regions = list(range(start, end + 1))
groups = []
for i, j in groupby(bp):
groups.append((i, len(list(j))))
e_start = 0
for i in groups:
e_end = e_start + i[1]
if i[0]:
record = Record(scaffold=scaffold, start=regions[e_start],
end=regions[e_end - 1], gene_type=gene_type, gene_id=gene_id,
gene_name=gene_name, strand=strand)
yield record
e_start += i[1]
def geneProcess(self, name):
"""A method to return a Record object providing genomic information
Args:
name: A valid gencode gene_id
Returns:
An object Record containing scaffold, start, end, strand, mirbase_id and
mirbase_name as its variables for access
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
positions = self.geneFeatures[name]
exons = []
for position in positions:
self.mm.seek(position)
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
attributes = row[-1].split("; ")
for attribute in attributes:
if attribute.startswith("gene_type"):
_gt = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("gene_id"):
_gid = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("gene_name"):
_gn = attribute.split(" ")[-1][1:-1]
exons.append((row[0], int(row[3]), int(row[4]), row[6], _gt, _gid, _gn))
self.fileHandle.close()
exons_df = pd.DataFrame(exons, columns=['scaffold', 'start', 'end',
'strand', 'gene_type', 'gene_id', 'gene_name'])
for record in self.geneExonicRegions(exons_df):
yield record
def coordinates(self, name, start=None, end=None):
"""A generator to return a bed record containing genomic coordinates for the
aligned segment
Keyword Args:
start: The alignment start position of the cDNA molecule or the relative
start of the particular molecule
end: The alignment end position in the cDNA molecule or the relative end
of the particular molecule
Args:
name: A valid miRNA_primary_transcript name
Returns:
A list of tuple(s) of strings containing elements for a bed record. There
may be more than one because of alternate splicing.
"""
if "|" in name:
self.name = name.split("|")[0]
else:
self.name = name
positions = {}
match_positions = []
records = []
segments = []
result_segments = []
for record in self.process(self.name):
records.append(record)
records.sort(key=lambda x: int(x.exon_number))
if records[0].strand == '+':
_start = 1
for record in records:
for relative, actual in enumerate(range(record.start, record.end + 1),
start=_start):
positions[relative] = actual
_start = relative + 1
for pos in range(start, end):
match_positions.append(positions[pos])
for key, group in groupby(enumerate(match_positions),
lambda x: x[0] - x[-1]):
segment = list(map(itemgetter(1), group))
segments.append([segment[0], segment[-1]])
for segment in segments:
for record in records:
if segment[0] >= record.start and segment[1] <= record.end:
result_segments.append((record.scaffold, segment[0], segment[1],
record.transcript_id + '|' + record.gene_name, 0, record.strand))
elif records[0].strand == '-':
_start = 1
for record in records:
for relative, actual in enumerate(reversed(range(record.start,
record.end + 1)), start=_start):
positions[relative] = actual
_start = relative + 1
for pos in range(start, end):
match_positions.append(positions[pos])
for key, group in groupby(enumerate(reversed(match_positions)),
lambda x: x[0] - x[-1]):
segment = list(map(itemgetter(1), group))
segments.append([segment[0], segment[-1]])
for segment in segments:
for record in records:
if segment[0] >= record.start and segment[1] <= record.end:
result_segments.append((record.scaffold, segment[0], segment[1],
record.transcript_id + '|' + record.gene_name, 0, record.strand))
if len(result_segments) == 0:
logger.debug('%s, %s, %s' % (name, start, end))
logger.debug('%s' % str(segments))
for r in records:
logger.debug('%s %s %s %s' % (r.scaffold, r.strand,
r.start, r.end))
return result_segments
class SAM:
"""SAM file parser for parsing bowtie2 generated files
This class uses memory-mapped file object to read a sam file
Attributes:
fileName: A sam file path
"""
def __init__(self, fileName=None):
self.fileName = fileName
self.records = {}
def read(self, flag=0):
"""Reads sam file provided during class initialization
Stores the byte position of every record based on the keyword arg flag
provided, to a dict object named self.records
Keyword Args:
flag: The SAM alignment flag for a record. For default, it uses the
primary alignment for every record and ignores secondary alignments
(default 0)
"""
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
read = line.decode('utf-8').split("\t")
if not read[0].startswith("@") and read[1] == str(flag):
self.records[read[0]] = bytePosition
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
logger.debug('Reading %s finished' % self.fileName)
def access(self, queryName):
"""Provides random access of a record from the sam file
Args:
queryName: The query name of the read from the sam file
Returns:
A list generated after splitting the record line from sam file
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
self.mm.seek(self.records[queryName])
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
self.fileHandle.close()
return self.pretty(row)
def filterPotentialChimeras(self, min_length=30, flag=0, target=None):
"""Generated a filtered fasta file from a sam file
This filtered fasta file contains reads that can be potentially chimeras.
The criteria for filtering is based on the minimum length
Keyword Args:
min_length: To be selected as a potential chimera, this is the minimum
read length (default 30)
flag: The SAM alignment flag describing the type of alignment (default 0)
target: The prefix for output file
"""
logger.debug('Filtering {} for potential chimeras'.format(target))
target = '{}.filter.fasta'.format(target.rpartition(".")[0])
if os.path.exists(target):
logger.info('Skipping filtering for {}'.format(target))
else:
with open(target, 'w') as oH:
with open(self.fileName) as iH:
for row in csv.reader(iH, delimiter="\t"):
if not row[0].startswith('@') and row[1] == str(flag):
if len(row[9]) >= 30:
print(textwrap.fill('>%s' % row[0], width=80), file=oH)
print(textwrap.fill('%s' % row[9], width=80), file=oH)
logger.debug('Filtering finished')
return target
def pretty(self, row):
refId = row[2]
start = int(row[3])
for i in row[10:]:
if i.startswith('MD'):
mismatchInfo = i
sequence = row[9]
cigar = row[5]
cigarString = clashchimeras.methods.convertCigar(row[5])
matchLength = cigarString.count("M") + cigarString.count("D")
end = start + matchLength - 1
record = Record(refId=refId, start=start, mismatchInfo=mismatchInfo,
sequence=sequence, cigarString=cigarString, matchLength=matchLength,
cigar=cigar, end=end)
return record
class Output:
"""Contains methods for writing output files
This class is used to generate every kind of output generated by this
package which includes plain text, ansi colored text and bed file
Attributes:
target: A prefix for output file which will be automatically followed by
extension (default 'wip')
overlap: Minimum overlap to be set between two molecules when determining
chimera (default 4)
gap: Maximum gap (number of unknown nucleotides) to be allowed between
two molecules within a chimera (default 9)
"""
def __init__(self,
target=None,
smallRNABed=False,
targetRNABed=False,
overlap=4,
gap=9):
self.target = target
self.overlap = overlap
self.gap = gap
if smallRNABed:
self.smallRNABedHandle = open('{}.smallRNA.bed'.format(self.target), 'w')
print('# BED locations of smallRNA part of the identified chimera',
file=self.smallRNABedHandle)
self.smallRNABedCSV = csv.writer(self.smallRNABedHandle, delimiter="\t")
self.smallRNABedCSV.writerow(
['# The name field represents the following:'])
self.smallRNABedCSV.writerow(
['# E.g. 201980-1-48|hsa-mir-100==PAPSS1'])
self.smallRNABedCSV.writerow(
['# 201980-1-48 is the fasta identifier'])
self.smallRNABedCSV.writerow(
["# 201980 is the unique identifier"])
self.smallRNABedCSV.writerow(
["# 1 is the number of times that sequence was observed in raw "
"fastq "])
self.smallRNABedCSV.writerow(
["# 48 is the length of the sequence"])
self.smallRNABedCSV.writerow(
['# hsa-mir-100 represents the smallRNA transcript'])
self.smallRNABedCSV.writerow(
['# PAPSS1 represents the gene symbol for targetRNA transcript '
'transcript '])
if targetRNABed:
self.targetRNABedHandle = open('{}.targetRNA.bed'.format(self.target),
'w')
self.targetRNABedCSV = csv.writer(self.targetRNABedHandle, delimiter="\t")
self.targetRNABedCSV.writerow(
['# The name field represents the following:'])
self.targetRNABedCSV.writerow(
['# E.g. 136019-1-48|ENST00000375759.6|SPEN==hsa-mir-103a-2'])
self.targetRNABedCSV.writerow(
['# 136019-1-48 is the fasta identifier'])
self.targetRNABedCSV.writerow(
["# 136019 is the unique identifier"])
self.targetRNABedCSV.writerow(
["# 1 is the number of times that sequence was observed in raw "
"fastq "])
self.targetRNABedCSV.writerow(
["# 48 is the length of the sequence"])
self.targetRNABedCSV.writerow(
["# ENST00000375759.6 is the targetRNA transcript identifier"])
self.targetRNABedCSV.writerow(
['# SPEN is the gene symbol for for targetRNA transcript '
'ENST00000375759.6'])
self.targetRNABedCSV.writerow(
['# hsa-mir-103a-2 represents the smallRNA transcript '])
self.hybWriter = open('%s.chimeras.tsv' % self.target, 'w')
self.hybComments()
def hybComments(self):
print("# fasta Identifier: The identifier in <sample>.unique.fasta. ",
"#\tE.g. 123456-3-68 ",
"#\t123456 is the unique identifier",
"#\t3 is the number of times that sequence was observed in raw "
"fastq ",
"#\t68 is the length of the sequence", sep="\n", file=self.hybWriter)
print("# smallRNA: The cDNA ID of the type of RNA labelled as smallRNA in "
"the analysis",
"#\tE.g. hsa-let-7b (miRBase identifier)",
"#\tE.g. ENST00000619178.1|SNORD3D| (Gencode snoRNA identifier)",
sep="\n", file=self.hybWriter)
print("# smallRNA_start: cDNA alignment start position of the smallRNA "
"part of the chimera", file=self.hybWriter)
print("# smallRNA_MDtag: Showing the MD tag from the smallRNA SAM "
"alignment for the chimera",
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
"#\tMD Z String for mismatching positions.Regex:[0-9]+((["
"A-Z]|\^[A-Z]+)[0-9]+)*9", sep="\n", file=self.hybWriter)
print('# smallRNA_cigar: Cigar string from the smallRNA SAM alignment for '
'the chimera',
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
'#\tSee CIGAR in the file', sep="\n", file=self.hybWriter)
print('# arbitrary_chimera: The chimera representation indicating what '
'part of the sequence represents smallRNA and targetRNA',
'#\t{ is representing a match with smallRNA',
'#\t} is representing a match with targetRNA',
'#\t# is representing unaligned sequences (identified as --gap -ga)',
'#\t- is representing a deletion (D in cigar string)',
'#\t+ is representing a deletion (I in cigar string)',
'#\tE.g {{{{{{{{-{{{{{{{{{{{{{##}}}}}}}}}}+}}}}}}}}}}}}}}}}}}}}}}'
'#\tE.g The first 22 nucleotides are aligning to smallRNA cDNA',
'#\tE.g The last 33 nucleotides are aligning to targetRNA cDNA',
sep="\n", file=self.hybWriter)
print('# read_sequence: The actual sequence that is appeared in raw '
'reads', file=self.hybWriter)
print("# targetRNA: The cDNA ID of the type of RNA labelled as targetRNA "
"in "
"the analysis",
"#\tE.g. hsa-let-7b (miRBase identifier)",
"#\tE.g. ENST00000619178.1|SNORD3D| (Gencode snoRNA identifier)",
sep="\n", file=self.hybWriter)
print("# targetRNA_start: cDNA alignment start position of the targetRNA "
"part of the chimera", file=self.hybWriter)
print("# targetRNA_MDtag: Showing the MD tag from the targetRNA SAM "
"alignment for the chimera",
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
"#\tMD Z String for mismatching positions.Regex:[0-9]+((["
"A-Z]|\^[A-Z]+)[0-9]+)*9", sep="\n", file=self.hybWriter)
print('# targetRNA_cigar: Cigar string from the targetRNA SAM alignment '
'for '
'the chimera',
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
'#\tSee CIGAR in the file', sep="\n", file=self.hybWriter)
print("# fasta_Identifier", "smallRNA", "smallRNA_start", "smallRNA_MDtag",
"smallRNA_cigar", "arbitrary_chimera", "read_sequence", "targetRNA",
"targetRNA_start", "targetRNA_MDtag", "targetRNA_cigar", sep="\t",
file=self.hybWriter)
def writeTargetRNABed(self, query, targetRNASegments, smallRNA):
if "ENS" in smallRNA and "|" in smallRNA:
_smallRNA = smallRNA.split("|")[5]
else:
_smallRNA = smallRNA
for segment in targetRNASegments:
_segment = list(segment)
_segment[3] = query + "|" + _segment[3] + "==" + _smallRNA
self.targetRNABedCSV.writerow(_segment)
def writeSmallRNABed(self, query, smallRNASegments, targetRNA):
if "ENS" in targetRNA and "|" in targetRNA:
_targetRNA = targetRNA.split("|")[5]
else:
_targetRNA = targetRNA
for segment in smallRNASegments:
_segment = list(segment)
_segment[3] = query + "|" + _segment[3] + "==" + _targetRNA
self.smallRNABedCSV.writerow(_segment)
def write(self, queryName, smallRNA, targetRNA):
chimeraString = clashchimeras.methods.chimeraOrNot(smallRNA.cigarString,
targetRNA.cigarString, overlap=self.overlap, gap=self.gap)
smallRNARegion = clashchimeras.methods.findRegion(smallRNA)
targetRNARegion = clashchimeras.methods.findRegion(targetRNA)
print(queryName, smallRNARegion, smallRNA.start, smallRNA.mismatchInfo,
smallRNA.cigar, chimeraString, smallRNA.sequence,
targetRNARegion, targetRNA.start,
targetRNA.mismatchInfo, targetRNA.cigar, sep="\t", file=self.hybWriter)
def __del__(self):
self.hybWriter.close()
class Fasta:
def __init__(self, genome=None, gtf=None):
self.genome = genome
self.gtf = gtf
self.faidx = pyfaidx.Fasta(self.genome)
def getBiotype(self, output=None, biotype=None):
self.sequences = []
g = GTF(fileName=self.gtf)
if biotype == 'tRNA':
g.readBiotype(biotype=biotype, featureType='tRNAscan')
else:
g.readBiotype(biotype=biotype)
for transcript_id, exons in g.biotypeFeatures.items():
temp_seq = ''
exons.sort(key=itemgetter(0))
for exon in exons:
if exon[4] == '-':
temp_seq += (-self.faidx[exon[1]][exon[2] - 1:exon[3]]).seq
elif exon[4] == '+':
temp_seq += self.faidx[exon[1]][exon[2] - 1:exon[3]].seq
_id = '{}|{}|{}|{}|{}|{}|{}'.format(transcript_id,
exons[0][5],
exons[0][6],
exons[0][7],
exons[0][8],
exons[0][9],
len(temp_seq))
temp_rec = SeqRecord(seq=Seq(temp_seq), id=_id,
description='')
self.sequences.append(temp_rec)
if not output:
logger.error('Please provide output file..')
sys.exit()
else:
logger.info('Writing {}'.format(output))
SeqIO.write(self.sequences, output, 'fasta')
class Fastq:
def __init__(self, fileName=None, compressed=False):
self.fileName = fileName
self.compressed = compressed
self.n = 4
self.sequences = Counter()
self.uniqueOutput = fileName.rpartition(".")[0] + '.unique.fasta'
def recordIterator(self):
record = []
record_length = 0
for line in self.fileHandle:
if record_length == self.n:
yield record
record_length = 0
record = []
record.append(line.decode().rstrip())
record_length += 1
yield record
def createUnique(self):
if self.compressed:
self.fileHandle = gzip.open(self.fileName, 'rb')
else:
self.fileHandle = open(self.fileName, 'rb')
logger.info('Reading {}'.format(self.fileName))
for record in self.recordIterator():
self.sequences[record[1]] += 1
logger.info('Writing {}'.format(self.uniqueOutput))
with open(self.uniqueOutput, 'w') as wH:
for index, (sequence, counts) in enumerate(sorted(self.sequences.items(),
key=itemgetter(1), reverse=True), start=1):
print('>{}-{}-{}'.format(index, counts, len(sequence)), file=wH)
print(textwrap.fill(sequence, width=80), file=wH)
logger.debug('Finished writing {}'.format(self.uniqueOutput))
self.fileHandle.close()
class Record:
"""A custom object (preferred over dict) for easy access using variables
It's a dependency for GTF and GFF classes
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
| mit | -935,002,050,063,870,100 | 41.75467 | 117 | 0.533933 | false | 4.227038 | false | false | false |
bmwiedemann/linuxcnc-mirror | src/hal/utils/halgui/main.py | 38 | 1324 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from optparse import OptionParser
import gtk
from app import Application
from load import file_load
import data
import load
import save
def main():
usage = "usage: %prog [-c CRAPFILE] | [CRAPFILE]"
parser = OptionParser(usage)
(options, args) = parser.parse_args()
if len(args) > 1:
parser.error("incorrect number of arguments")
app = Application()
if len(args):
file_load(app.design, args[0])
app.show_app()
gtk.main()
else:
app.design.update()
app.show_app()
gtk.main()
if __name__ == '__main__':
try:
import psyco
psyco.full()
except ImportError:
pass
main()
| lgpl-2.1 | 748,531,910,334,872,700 | 24.960784 | 78 | 0.712991 | false | 3.475066 | false | false | false |
ReactiveX/RxPY | rx/scheduler/periodicscheduler.py | 1 | 3861 | from abc import abstractmethod
from datetime import datetime
from typing import Optional
from rx.core import typing
from rx.disposable import Disposable, MultipleAssignmentDisposable
from .scheduler import Scheduler
class PeriodicScheduler(Scheduler, typing.PeriodicScheduler):
"""Base class for the various periodic scheduler implementations in this
package as well as the mainloop sub-package.
"""
def schedule_periodic(self,
period: typing.RelativeTime,
action: typing.ScheduledPeriodicAction,
state: Optional[typing.TState] = None
) -> typing.Disposable:
"""Schedules a periodic piece of work.
Args:
period: Period in seconds or timedelta for running the
work periodically.
action: Action to be executed.
state: [Optional] Initial state passed to the action upon
the first iteration.
Returns:
The disposable object used to cancel the scheduled
recurring action (best effort).
"""
disp: MultipleAssignmentDisposable = MultipleAssignmentDisposable()
seconds: float = self.to_seconds(period)
def periodic(scheduler: typing.Scheduler,
state: Optional[typing.TState] = None
) -> Optional[Disposable]:
if disp.is_disposed:
return None
now: datetime = scheduler.now
try:
state = action(state)
except Exception:
disp.dispose()
raise
time = seconds - (scheduler.now - now).total_seconds()
disp.disposable = scheduler.schedule_relative(time, periodic, state=state)
return None
disp.disposable = self.schedule_relative(period, periodic, state=state)
return disp
@abstractmethod
def schedule(self,
action: typing.ScheduledAction,
state: Optional[typing.TState] = None
) -> typing.Disposable:
"""Schedules an action to be executed.
Args:
action: Action to be executed.
state: [Optional] state to be given to the action function.
Returns:
The disposable object used to cancel the scheduled action
(best effort).
"""
return NotImplemented
@abstractmethod
def schedule_relative(self,
duetime: typing.RelativeTime,
action: typing.ScheduledAction,
state: Optional[typing.TState] = None
) -> typing.Disposable:
"""Schedules an action to be executed after duetime.
Args:
duetime: Relative time after which to execute the action.
action: Action to be executed.
state: [Optional] state to be given to the action function.
Returns:
The disposable object used to cancel the scheduled action
(best effort).
"""
return NotImplemented
@abstractmethod
def schedule_absolute(self,
duetime: typing.AbsoluteTime,
action: typing.ScheduledAction,
state: Optional[typing.TState] = None
) -> typing.Disposable:
"""Schedules an action to be executed at duetime.
Args:
duetime: Absolute time at which to execute the action.
action: Action to be executed.
state: [Optional] state to be given to the action function.
Returns:
The disposable object used to cancel the scheduled action
(best effort).
"""
return NotImplemented
| mit | 8,123,050,020,604,884,000 | 32.284483 | 86 | 0.574204 | false | 5.260218 | false | false | false |
AnotherVisitor/StuffAroundRrdtool | weather.py | 1 | 1521 | #!/usr/bin/python
from Adafruit_BMP085 import BMP085
import re, os, rrdtool, time
# function: read and parse sensor data file
def read_sensor(path):
value = "U"
try:
f = open(path, "r")
line = f.readline()
if re.match(r"([0-9a-f]{2} ){9}: crc=[0-9a-f]{2} YES", line):
line = f.readline()
m = re.match(r"([0-9a-f]{2} ){9}t=([+-]?[0-9]+)", line)
if m:
value = str(float(m.group(2)) / 1000.0)
f.close()
except (IOError), e:
print time.strftime("%x %X"), "Error reading", path, ": ", e
return value
# define pathes to 1-wire sensor data
path = ("/sys/bus/w1/devices/10-0008002ff245/w1_slave")
# read sensor data
tempout = float(read_sensor(path))
time.sleep(1)
#########################################################################
# Initialise the BMP085 and use STANDARD mode
bmp = BMP085(0x77)
tempin = bmp.readTemperature()
# Read the current barometric pressure level
pressure = bmp.readPressure()
# Set the altitude of your current location in meter
altitude = 301
psea = pressure / pow(1.0 - altitude/44330.0, 5.255)
print "Outside Temp: %.2f C" % tempout
print "Inside Temp: %.2f C" % tempin
print "Pressure: %.2f hPa" % (pressure / 100.0)
print "Pressure at sea level: %8.2f hPa" % (psea / 100.0)
time.sleep(1)
# insert data into round-robin-database
data = "N:%.2f:%.2f:%8.2f" % (tempin, tempout, psea/100.0)
rrdtool.update(
"%s/weather.rrd" % (os.path.dirname(os.path.abspath(__file__))),
data)
| gpl-2.0 | -7,056,019,276,738,214,000 | 25.684211 | 73 | 0.590401 | false | 2.765455 | false | false | false |
danielquinn/spirithunter | src/economy/migrations/0001_initial.py | 1 | 3412 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-04 17:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_email', models.EmailField(max_length=254)),
('delta', models.IntegerField()),
('note', models.TextField(blank=True, null=True)),
('balance', models.PositiveIntegerField(help_text='The balance total at this point in the log. This value exists strictly for performance reasons when calculating current balance.')),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AdministrativeTransaction',
fields=[
('transaction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='economy.Transaction')),
],
options={
'abstract': False,
},
bases=('economy.transaction',),
),
migrations.CreateModel(
name='ForexTransaction',
fields=[
('transaction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='economy.Transaction')),
('currency_value', models.PositiveIntegerField()),
('credits_purchased', models.PositiveIntegerField()),
],
options={
'abstract': False,
},
bases=('economy.transaction',),
),
migrations.CreateModel(
name='ItemPurchaseTransaction',
fields=[
('transaction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='economy.Transaction')),
('quantity', models.PositiveIntegerField(help_text='Number of items purchased')),
],
options={
'abstract': False,
},
bases=('economy.transaction',),
),
migrations.CreateModel(
name='SpoilsTransaction',
fields=[
('transaction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='economy.Transaction')),
],
options={
'abstract': False,
},
bases=('economy.transaction',),
),
migrations.AddField(
model_name='transaction',
name='polymorphic_ctype',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_economy.transaction_set+', to='contenttypes.ContentType'),
),
]
| agpl-3.0 | -838,863,452,036,683,800 | 41.65 | 201 | 0.578839 | false | 4.686813 | false | false | false |
bourbakilee/PyMPL | debug.py | 1 | 1270 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 11:55:10 2016
@author: liys
"""
import pickle, sqlite3
from queue import PriorityQueue
import matplotlib.pyplot as plt
from OffRoadPlanning import load_forward_path_primitives, load_reverse_path_primitives, test_load_path_primitives, \
test_load_motion_primitives, State
# test_load_path_primitives()
# test_load_motion_primitives()
# primitives1 = load_forward_path_primitives()
# primitives2 = load_reverse_path_primitives()
motion_primitives = {}
with open('motion_primitives2.pickle', 'rb') as f:
motion_primitives.update(pickle.load(f))
conn = sqlite3.connect('InitialGuessTable.db')
cursor = conn.cursor()
start = State(index=(50,50,-5,-1), time=10., length=50., cost=0.)
pq = PriorityQueue()
pq.put(start)
node_dict = {start.index:start}
edge_dict = {}
times = 0
while times < 100 and not pq.empty():
times += 1
state = pq.get()
print(state.index)
State.ControlSet(state, motion_primitives, pq, node_dict, edge_dict)
print(len(edge_dict))
for traj in edge_dict.values():
plt.plot(traj[:,2], traj[:,3])
for state in node_dict.values():
plt.plot(state.state[0], state.state[1], 'ro')
# print(state.priority)
plt.axis('equal')
plt.show()
cursor.close()
conn.close() | gpl-3.0 | 6,384,187,114,103,065,000 | 25.479167 | 116 | 0.696063 | false | 2.974239 | false | false | false |
kdschlosser/SonyAPI | SonyAPI/channel.py | 1 | 4685 | # -*- coding: utf-8 -*-
#
# SonyAPI
# External control of Sony Bravia Generation 3 TV's
# Copyright (C) 2017 Kevin G. Schlosser
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import absolute_import
from . import media
from .api_const import PY2
class Channels(object):
def __init__(self, sony_api):
self._sony_api = sony_api
@property
def _channel(self):
return self._sony_api.now_playing.display_num
@property
def lineup(self):
content_items = []
for source in self._sony_api.source_list:
if source.uri.startswith('tv'):
content_list = self._sony_api.send(
'avContent',
'getContentList',
source=source.uri
)
for content in content_list:
content['source'] = source
content_items += [
media.ContentItem(self._sony_api, **content)
]
return content_items
def _set_channel(self, direction, channel):
for chan in self.lineup:
if chan.display_num == str(channel):
chan.set()
return channel
selected_channel = None
new_channel = 999999 if direction == 'up' else 0
for chan in self.lineup:
if direction == 'up':
if (
new_channel >
int(chan.display_num) >
channel
):
selected_channel = chan
new_channel = int(chan.display_num)
else:
if (
new_channel <
int(chan.display_num) <
channel
):
selected_channel = chan
new_channel = int(chan.display_num)
if new_channel == 999999999:
for chan in self.lineup:
if new_channel > int(chan.display_num):
selected_channel = chan
new_channel = int(chan.display_num)
if new_channel == 0:
for chan in self.lineup:
if new_channel < int(chan.display_num):
selected_channel = chan
new_channel = int(chan.display_num)
if selected_channel is not None:
selected_channel.set()
return selected_channel
def up(self):
return self._set_channel('up', int(self._channel) + 1)
def down(self):
return self._set_channel('down', int(self._channel) - 1)
def __lt__(self, other):
return int(self._channel) < int(other)
def __le__(self, other):
return int(self._channel) <= int(other)
def __eq__(self, other):
return int(self._channel) == int(other)
def __ne__(self, other):
return int(self._channel) != int(other)
def __gt__(self, other):
return int(self._channel) > int(other)
def __ge__(self, other):
return int(self._channel) >= int(other)
def __add__(self, other):
return int(self._channel) + int(other)
def __sub__(self, other):
return int(self._channel) - int(other)
def __mul__(self, other):
return int(self._channel) * int(other)
def __div__(self, other):
return int(self._channel) / int(other)
def __iadd__(self, other):
return self._set_channel('up', int(self._channel) + int(other))
def __isub__(self, other):
return self._set_channel('down', int(self._channel) - int(other))
def __imul__(self, other):
return self._set_channel('up', int(self._channel) * int(other))
def __idiv__(self, other):
return self._set_channel('down', int(self._channel) / int(other))
def __int__(self):
return int(self._channel)
def __str__(self):
return str(self._channel)
if PY2:
def __unicode__(self):
return unicode(str(self))
| gpl-2.0 | 7,907,677,837,741,881,000 | 29.422078 | 73 | 0.548346 | false | 4.113257 | false | false | false |
cmancone/ezgal | ezgal/sfhs.py | 1 | 2071 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
import numpy as np
from . import dusts
__ver__ = '1.0'
class sfh_wrapper(object):
""" sfh_wrapper class. EzGal wraps this class around the sfh function. It takes care of the
details of passing or not passing parameters """
func = '' # sfh function
args = () # extra arguments to pass on call
has_args = False # whether or not there are actually any extra arguments
def __init__(self, function, args):
""" wrapper_obj = ezgal.sfhs.wrapper( function, args )
wrapper class. EzGal wraps this class around the sfh function. It takes care of the
details of passing or not passing parameters """
self.func = function
if type(args) == type(()) and len(args) > 0:
self.has_args = True
self.args = args
def __call__(self, val):
if self.has_args:
return self.func(val, *self.args)
else:
return self.func(val)
class numeric(object):
ages = np.array([])
sfr = np.array([])
def __init__(self, ages, sfr):
""" numeric_obj = ezgal.sfhs.numeric( ages, sfrs )
wrapper class for making a numeric star formation history callable.
Pass a list of ages and relative star formation rates. Ages should be in gyrs. """
self.ages = np.asarray(ages)
self.sfr = np.asarray(sfr)
def __call__(self, val):
return np.interp(val, self.ages, self.sfr)
def exponential(t, tau):
""" ezgal.sfhs.exponential( ages, tau )
exponentially decaying star formation history with
e-folding time scale of tau gyrs """
return np.exp(-1.0 * t / tau)
def constant(t, length):
""" ezgal.sfhs.constant( ages, length )
Burst of constant starformation from t=0 to t=length """
if type(t) == type(np.array([])):
sfr = np.zeros(t.size)
m = t <= length
if m.sum(): sfr[m] = 1.0
return sfr
else:
return 0.0 if t > length else 1.0
| mit | -3,856,164,806,952,304,000 | 26.613333 | 98 | 0.609367 | false | 3.480672 | false | false | false |
qedi-r/home-assistant | homeassistant/components/alexa/const.py | 2 | 10344 | """Constants for the Alexa integration."""
from collections import OrderedDict
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.components.climate import const as climate
from homeassistant.components import fan
DOMAIN = "alexa"
# Flash briefing constants
CONF_UID = "uid"
CONF_TITLE = "title"
CONF_AUDIO = "audio"
CONF_TEXT = "text"
CONF_DISPLAY_URL = "display_url"
CONF_FILTER = "filter"
CONF_ENTITY_CONFIG = "entity_config"
CONF_ENDPOINT = "endpoint"
CONF_CLIENT_ID = "client_id"
CONF_CLIENT_SECRET = "client_secret"
ATTR_UID = "uid"
ATTR_UPDATE_DATE = "updateDate"
ATTR_TITLE_TEXT = "titleText"
ATTR_STREAM_URL = "streamUrl"
ATTR_MAIN_TEXT = "mainText"
ATTR_REDIRECTION_URL = "redirectionURL"
SYN_RESOLUTION_MATCH = "ER_SUCCESS_MATCH"
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.0Z"
API_DIRECTIVE = "directive"
API_ENDPOINT = "endpoint"
API_EVENT = "event"
API_CONTEXT = "context"
API_HEADER = "header"
API_PAYLOAD = "payload"
API_SCOPE = "scope"
API_CHANGE = "change"
CONF_DESCRIPTION = "description"
CONF_DISPLAY_CATEGORIES = "display_categories"
API_TEMP_UNITS = {TEMP_FAHRENHEIT: "FAHRENHEIT", TEMP_CELSIUS: "CELSIUS"}
# Needs to be ordered dict for `async_api_set_thermostat_mode` which does a
# reverse mapping of this dict and we want to map the first occurrence of OFF
# back to HA state.
API_THERMOSTAT_MODES = OrderedDict(
[
(climate.HVAC_MODE_HEAT, "HEAT"),
(climate.HVAC_MODE_COOL, "COOL"),
(climate.HVAC_MODE_HEAT_COOL, "AUTO"),
(climate.HVAC_MODE_AUTO, "AUTO"),
(climate.HVAC_MODE_OFF, "OFF"),
(climate.HVAC_MODE_FAN_ONLY, "OFF"),
(climate.HVAC_MODE_DRY, "CUSTOM"),
]
)
API_THERMOSTAT_MODES_CUSTOM = {climate.HVAC_MODE_DRY: "DEHUMIDIFY"}
API_THERMOSTAT_PRESETS = {climate.PRESET_ECO: "ECO"}
PERCENTAGE_FAN_MAP = {
fan.SPEED_OFF: 0,
fan.SPEED_LOW: 33,
fan.SPEED_MEDIUM: 66,
fan.SPEED_HIGH: 100,
}
RANGE_FAN_MAP = {
fan.SPEED_OFF: 0,
fan.SPEED_LOW: 1,
fan.SPEED_MEDIUM: 2,
fan.SPEED_HIGH: 3,
}
SPEED_FAN_MAP = {
0: fan.SPEED_OFF,
1: fan.SPEED_LOW,
2: fan.SPEED_MEDIUM,
3: fan.SPEED_HIGH,
}
class Cause:
"""Possible causes for property changes.
https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#cause-object
"""
# Indicates that the event was caused by a customer interaction with an
# application. For example, a customer switches on a light, or locks a door
# using the Alexa app or an app provided by a device vendor.
APP_INTERACTION = "APP_INTERACTION"
# Indicates that the event was caused by a physical interaction with an
# endpoint. For example manually switching on a light or manually locking a
# door lock
PHYSICAL_INTERACTION = "PHYSICAL_INTERACTION"
# Indicates that the event was caused by the periodic poll of an appliance,
# which found a change in value. For example, you might poll a temperature
# sensor every hour, and send the updated temperature to Alexa.
PERIODIC_POLL = "PERIODIC_POLL"
# Indicates that the event was caused by the application of a device rule.
# For example, a customer configures a rule to switch on a light if a
# motion sensor detects motion. In this case, Alexa receives an event from
# the motion sensor, and another event from the light to indicate that its
# state change was caused by the rule.
RULE_TRIGGER = "RULE_TRIGGER"
# Indicates that the event was caused by a voice interaction with Alexa.
# For example a user speaking to their Echo device.
VOICE_INTERACTION = "VOICE_INTERACTION"
class Catalog:
"""The Global Alexa catalog.
https://developer.amazon.com/docs/device-apis/resources-and-assets.html#global-alexa-catalog
You can use the global Alexa catalog for pre-defined names of devices, settings, values, and units.
This catalog is localized into all the languages that Alexa supports.
You can reference the following catalog of pre-defined friendly names.
Each item in the following list is an asset identifier followed by its supported friendly names.
The first friendly name for each identifier is the one displayed in the Alexa mobile app.
"""
LABEL_ASSET = "asset"
LABEL_TEXT = "text"
# Shower
DEVICENAME_SHOWER = "Alexa.DeviceName.Shower"
# Washer, Washing Machine
DEVICENAME_WASHER = "Alexa.DeviceName.Washer"
# Router, Internet Router, Network Router, Wifi Router, Net Router
DEVICENAME_ROUTER = "Alexa.DeviceName.Router"
# Fan, Blower
DEVICENAME_FAN = "Alexa.DeviceName.Fan"
# Air Purifier, Air Cleaner,Clean Air Machine
DEVICENAME_AIRPURIFIER = "Alexa.DeviceName.AirPurifier"
# Space Heater, Portable Heater
DEVICENAME_SPACEHEATER = "Alexa.DeviceName.SpaceHeater"
# Rain Head, Overhead shower, Rain Shower, Rain Spout, Rain Faucet
SHOWER_RAINHEAD = "Alexa.Shower.RainHead"
# Handheld Shower, Shower Wand, Hand Shower
SHOWER_HANDHELD = "Alexa.Shower.HandHeld"
# Water Temperature, Water Temp, Water Heat
SETTING_WATERTEMPERATURE = "Alexa.Setting.WaterTemperature"
# Temperature, Temp
SETTING_TEMPERATURE = "Alexa.Setting.Temperature"
# Wash Cycle, Wash Preset, Wash setting
SETTING_WASHCYCLE = "Alexa.Setting.WashCycle"
# 2.4G Guest Wi-Fi, 2.4G Guest Network, Guest Network 2.4G, 2G Guest Wifi
SETTING_2GGUESTWIFI = "Alexa.Setting.2GGuestWiFi"
# 5G Guest Wi-Fi, 5G Guest Network, Guest Network 5G, 5G Guest Wifi
SETTING_5GGUESTWIFI = "Alexa.Setting.5GGuestWiFi"
# Guest Wi-fi, Guest Network, Guest Net
SETTING_GUESTWIFI = "Alexa.Setting.GuestWiFi"
# Auto, Automatic, Automatic Mode, Auto Mode
SETTING_AUTO = "Alexa.Setting.Auto"
# #Night, Night Mode
SETTING_NIGHT = "Alexa.Setting.Night"
# Quiet, Quiet Mode, Noiseless, Silent
SETTING_QUIET = "Alexa.Setting.Quiet"
# Oscillate, Swivel, Oscillation, Spin, Back and forth
SETTING_OSCILLATE = "Alexa.Setting.Oscillate"
# Fan Speed, Airflow speed, Wind Speed, Air speed, Air velocity
SETTING_FANSPEED = "Alexa.Setting.FanSpeed"
# Preset, Setting
SETTING_PRESET = "Alexa.Setting.Preset"
# Mode
SETTING_MODE = "Alexa.Setting.Mode"
# Direction
SETTING_DIRECTION = "Alexa.Setting.Direction"
# Delicates, Delicate
VALUE_DELICATE = "Alexa.Value.Delicate"
# Quick Wash, Fast Wash, Wash Quickly, Speed Wash
VALUE_QUICKWASH = "Alexa.Value.QuickWash"
# Maximum, Max
VALUE_MAXIMUM = "Alexa.Value.Maximum"
# Minimum, Min
VALUE_MINIMUM = "Alexa.Value.Minimum"
# High
VALUE_HIGH = "Alexa.Value.High"
# Low
VALUE_LOW = "Alexa.Value.Low"
# Medium, Mid
VALUE_MEDIUM = "Alexa.Value.Medium"
class Unit:
"""Alexa Units of Measure.
https://developer.amazon.com/docs/device-apis/alexa-property-schemas.html#units-of-measure
"""
ANGLE_DEGREES = "Alexa.Unit.Angle.Degrees"
ANGLE_RADIANS = "Alexa.Unit.Angle.Radians"
DISTANCE_FEET = "Alexa.Unit.Distance.Feet"
DISTANCE_INCHES = "Alexa.Unit.Distance.Inches"
DISTANCE_KILOMETERS = "Alexa.Unit.Distance.Kilometers"
DISTANCE_METERS = "Alexa.Unit.Distance.Meters"
DISTANCE_MILES = "Alexa.Unit.Distance.Miles"
DISTANCE_YARDS = "Alexa.Unit.Distance.Yards"
MASS_GRAMS = "Alexa.Unit.Mass.Grams"
MASS_KILOGRAMS = "Alexa.Unit.Mass.Kilograms"
PERCENT = "Alexa.Unit.Percent"
TEMPERATURE_CELSIUS = "Alexa.Unit.Temperature.Celsius"
TEMPERATURE_DEGREES = "Alexa.Unit.Temperature.Degrees"
TEMPERATURE_FAHRENHEIT = "Alexa.Unit.Temperature.Fahrenheit"
TEMPERATURE_KELVIN = "Alexa.Unit.Temperature.Kelvin"
VOLUME_CUBICFEET = "Alexa.Unit.Volume.CubicFeet"
VOLUME_CUBICMETERS = "Alexa.Unit.Volume.CubicMeters"
VOLUME_GALLONS = "Alexa.Unit.Volume.Gallons"
VOLUME_LITERS = "Alexa.Unit.Volume.Liters"
VOLUME_PINTS = "Alexa.Unit.Volume.Pints"
VOLUME_QUARTS = "Alexa.Unit.Volume.Quarts"
WEIGHT_OUNCES = "Alexa.Unit.Weight.Ounces"
WEIGHT_POUNDS = "Alexa.Unit.Weight.Pounds"
class Inputs:
"""Valid names for the InputController.
https://developer.amazon.com/docs/device-apis/alexa-property-schemas.html#input
"""
VALID_SOURCE_NAME_MAP = {
"aux": "AUX 1",
"aux1": "AUX 1",
"aux2": "AUX 2",
"aux3": "AUX 3",
"aux4": "AUX 4",
"aux5": "AUX 5",
"aux6": "AUX 6",
"aux7": "AUX 7",
"bluray": "BLURAY",
"cable": "CABLE",
"cd": "CD",
"coax": "COAX 1",
"coax1": "COAX 1",
"coax2": "COAX 2",
"composite": "COMPOSITE 1",
"composite1": "COMPOSITE 1",
"dvd": "DVD",
"game": "GAME",
"gameconsole": "GAME",
"hdradio": "HD RADIO",
"hdmi": "HDMI 1",
"hdmi1": "HDMI 1",
"hdmi2": "HDMI 2",
"hdmi3": "HDMI 3",
"hdmi4": "HDMI 4",
"hdmi5": "HDMI 5",
"hdmi6": "HDMI 6",
"hdmi7": "HDMI 7",
"hdmi8": "HDMI 8",
"hdmi9": "HDMI 9",
"hdmi10": "HDMI 10",
"hdmiarc": "HDMI ARC",
"input": "INPUT 1",
"input1": "INPUT 1",
"input2": "INPUT 2",
"input3": "INPUT 3",
"input4": "INPUT 4",
"input5": "INPUT 5",
"input6": "INPUT 6",
"input7": "INPUT 7",
"input8": "INPUT 8",
"input9": "INPUT 9",
"input10": "INPUT 10",
"ipod": "IPOD",
"line": "LINE 1",
"line1": "LINE 1",
"line2": "LINE 2",
"line3": "LINE 3",
"line4": "LINE 4",
"line5": "LINE 5",
"line6": "LINE 6",
"line7": "LINE 7",
"mediaplayer": "MEDIA PLAYER",
"optical": "OPTICAL 1",
"optical1": "OPTICAL 1",
"optical2": "OPTICAL 2",
"phono": "PHONO",
"playstation": "PLAYSTATION",
"playstation3": "PLAYSTATION 3",
"playstation4": "PLAYSTATION 4",
"satellite": "SATELLITE",
"satellitetv": "SATELLITE",
"smartcast": "SMARTCAST",
"tuner": "TUNER",
"tv": "TV",
"usbdac": "USB DAC",
"video": "VIDEO 1",
"video1": "VIDEO 1",
"video2": "VIDEO 2",
"video3": "VIDEO 3",
"xbox": "XBOX",
}
| apache-2.0 | -6,711,995,617,098,553,000 | 28.138028 | 104 | 0.643755 | false | 2.972414 | false | false | false |
vchudinov/MultiNEAT | MultiNEAT/viz.py | 1 | 13459 | from _MultiNEAT import *
def Scale(a, a_min, a_max, a_tr_min, a_tr_max):
t_a_r = a_max - a_min
if t_a_r == 0:
return a_max
t_r = a_tr_max - a_tr_min
rel_a = (a - a_min) / t_a_r
return a_tr_min + t_r * rel_a
def Clamp(a, min, max):
if a < min:
return min
elif a > max:
return max
else:
return a
def AlmostEqual(a, b, margin):
if abs(a-b) > margin:
return False
else:
return True
try:
import cv2
import numpy as np
from numpy import array, clip
cvnumpy_installed = True
except:
print ('Tip: install the OpenCV computer vision library (2.0+) with '
'Python bindings')
print (' to get convenient neural network visualization to NumPy '
'arrays')
cvnumpy_installed = False
try:
import matplotlib.pyplot as plt
matplotlib_installed = True
except:
matplotlib_installed = False
if matplotlib_installed:
def render_nn(nn, ax=None,
is_substrate=False,
details=False,
invert_yaxis=True,
connection_alpha=1.0):
if ax is None:
ax = plt.gca()
if is_substrate:
ax.set_xlim(-1.1, 1.1)
ax.set_ylim(-1.1, 1.1)
node_radius = 0.05
else:
ax.set_xlim(-0.05, 1.05)
ax.set_ylim(-0.05, 1.05)
node_radius = 0.03
if invert_yaxis: ax.invert_yaxis()
# get the max weight
max_weight = max([c.weight for c in nn.connections])
# connections
for connection in nn.connections:
n1 = nn.neurons[connection.source_neuron_idx]
n2 = nn.neurons[connection.target_neuron_idx]
if is_substrate:
n1_x, n1_y = n1.substrate_coords[0], n1.substrate_coords[1]
n2_x, n2_y = n2.substrate_coords[0], n2.substrate_coords[1]
else:
n1_x, n1_y = n1.x, n1.y
n2_x, n2_y = n2.x, n2.y
offsetx = n2_x - n1_x
offsety = n2_y - n1_y
if offsetx == 0 or offsety == 0:
continue
# if going left->right, offset is a bit to the left and vice versa
# same for y
if n1_x - offsetx < 0:
ox = -node_radius * 0.9
elif n1_x - offsetx > 0:
ox = node_radius * 0.9
else:
ox = 0
if n1_y - offsety < 0:
oy = -node_radius * 0.9
elif n1_y - offsety > 0:
oy = node_radius * 0.9
else:
oy = 0
wg = clip(connection.weight, -2, 2)
if connection.weight > 0.0:
ax.arrow(n1_x, n1_y, offsetx+ox, offsety+oy, head_width = node_radius*0.8,
head_length = node_radius*1.2, fc='red', ec='red', length_includes_head=True,
linewidth = abs(wg),
alpha = connection_alpha*np.clip(0.1+abs(connection.weight)/max_weight, 0, 1))
else:
ax.arrow(n1_x, n1_y, offsetx+ox, offsety+oy, head_width = node_radius*0.8,
head_length = node_radius*1.2, fc='blue', ec='blue', length_includes_head=True,
linewidth = abs(wg),
alpha = connection_alpha*np.clip(0.1+abs(connection.weight)/max_weight, 0, 1))
# neurons
for index in range(len(nn.neurons)):
n = nn.neurons[index]
if is_substrate:
nx, ny = n.substrate_coords[0], n.substrate_coords[1]
else:
nx, ny = n.x, n.y
a = n.activation
if a < 0:
clr = array([0.3,0.3,0.3]) + array([0,0,0.5]) * (-a)
else:
clr = array([0.3,0.3,0.3]) + array([0.5,0,0]) * (a)
clr = clip(clr, 0, 1)
if n.type == NeuronType.INPUT:
ax.add_patch(plt.Circle((nx, ny), node_radius, ec='green', fc=clr, linewidth=3, zorder=2))
elif n.type == NeuronType.BIAS:
ax.add_patch(plt.Circle((nx, ny), node_radius, ec='black', fc=(1,1,1), linewidth=3, zorder=2))
elif n.type == NeuronType.HIDDEN:
ax.add_patch(plt.Circle((nx, ny), node_radius, ec='grey', fc=clr, linewidth=3, zorder=2))
elif n.type == NeuronType.OUTPUT:
ax.add_patch(plt.Circle((nx, ny), node_radius, ec='brown', fc=clr, linewidth=3, zorder=2))
def plot_nn(nn, ax=None,
is_substrate=False,
details=False,
invert_yaxis=True,
connection_alpha=1.0):
# if this is a genome, make a NN from it
if type(nn) == Genome:
kk = NeuralNetwork()
nn.BuildPhenotype(kk)
nn = kk
if is_substrate:
return render_nn(nn, ax,
is_substrate=True,
details=details,
invert_yaxis=invert_yaxis)
# not a substrate, compute the node coordinates
for i, n in enumerate(nn.neurons):
nn.neurons[i].x = 0
nn.neurons[i].y = 0
rect_x = 0
rect_y = 0
rect_x_size = 1
rect_y_size = 1
neuron_radius = 0.03
MAX_DEPTH = 64
# for every depth, count how many nodes are on this depth
all_depths = np.linspace(0.0, 1.0, MAX_DEPTH)
for depth in all_depths:
neuron_count = 0
for neuron in nn.neurons:
if AlmostEqual(neuron.split_y, depth, 1.0 / (MAX_DEPTH+1)):
neuron_count += 1
if neuron_count == 0:
continue
# calculate x positions of neurons
xxpos = rect_x_size / (1 + neuron_count)
j = 0
for neuron in nn.neurons:
if AlmostEqual(neuron.split_y, depth, 1.0 / (MAX_DEPTH+1)):
neuron.x = rect_x + xxpos + j * (rect_x_size / (2 + neuron_count))
j = j + 1
# calculate y positions of nodes
for neuron in nn.neurons:
base_y = rect_y + neuron.split_y
size_y = rect_y_size - neuron_radius
if neuron.split_y == 0.0:
neuron.y = base_y * size_y + neuron_radius
else:
neuron.y = base_y * size_y
# done, render the nn
return render_nn(nn, ax,
is_substrate=False,
details=details,
invert_yaxis=invert_yaxis)
# Faster Neural Network display code
# image is a NumPy array
# rect is a tuple in the form (x, y, size_x, size_y)
if not cvnumpy_installed:
def DrawPhenotype(image, rect, nn, neuron_radius=15,
max_line_thickness=3, substrate=False):
print("OpenCV/NumPy don't appear to be installed")
raise NotImplementedError
else:
MAX_DEPTH = 64
def DrawPhenotype(image, rect, nn, neuron_radius=15,
max_line_thickness=3, substrate=False):
for i, n in enumerate(nn.neurons):
nn.neurons[i].x = 0
nn.neurons[i].y = 0
rect_x = rect[0]
rect_y = rect[1]
rect_x_size = rect[2]
rect_y_size = rect[3]
if not substrate:
depth = 0
# for every depth, count how many nodes are on this depth
all_depths = np.linspace(0.0, 1.0, MAX_DEPTH)
for depth in all_depths:
neuron_count = 0
for neuron in nn.neurons:
if AlmostEqual(neuron.split_y, depth, 1.0 / (MAX_DEPTH+1)):
neuron_count += 1
if neuron_count == 0:
continue
# calculate x positions of neurons
xxpos = rect_x_size / (1 + neuron_count)
j = 0
for neuron in nn.neurons:
if AlmostEqual(neuron.split_y, depth, 1.0 / (MAX_DEPTH+1)):
neuron.x = rect_x + xxpos + j * (rect_x_size / (2 + neuron_count))
j = j + 1
# calculate y positions of nodes
for neuron in nn.neurons:
base_y = rect_y + neuron.split_y
size_y = rect_y_size - neuron_radius
if neuron.split_y == 0.0:
neuron.y = base_y * size_y + neuron_radius
else:
neuron.y = base_y * size_y
else:
# HyperNEAT substrate
# only the first 2 dimensions are used for drawing
# if a layer is 1D, y values will be supplied to make 3 rows
# determine min/max coords in NN
xs = [(neuron.substrate_coords[0]) for neuron in nn.neurons]
ys = [(neuron.substrate_coords[1]) for neuron in nn.neurons]
min_x, min_y, max_x, max_y = min(xs), min(ys), max(xs), max(ys)
#dims = [len(neuron.substrate_coords) for neuron in nn.neurons]
for neuron in nn.neurons:
# TODO(jkoelker) Make the rect_x_size / 15 a variable
neuron.x = Scale(neuron.substrate_coords[0], min_x, max_x,
rect_x_size / 15,
rect_x_size - rect_x_size / 15)
neuron.y = Scale(neuron.substrate_coords[1], min_y, max_y,
rect_x_size / 15,
rect_y_size - rect_x_size / 15)
# the positions of neurons is computed, now we draw
# connections first
if len(nn.connections) > 0:
max_weight = max([abs(x.weight) for x in nn.connections])
else:
max_weight = 1.0
if image.dtype in [np.uint8, np.uint16, np.uint32, np.uint,
np.int, np.int8, np.int16, np.int32]:
magn = 255.0
else:
magn = 1.0
for conn in nn.connections:
thickness = conn.weight
thickness = Scale(thickness, 0, max_weight, 1, max_line_thickness)
thickness = Clamp(thickness, 1, max_line_thickness)
w = Scale(abs(conn.weight), 0.0, max_weight, 0.0, 1.0)
w = Clamp(w, 0.75, 1.0)
if conn.recur_flag:
if conn.weight < 0:
# green weight
color = (0, magn * w, 0)
else:
# white weight
color = (magn * w, magn * w, magn * w)
else:
if conn.weight < 0:
# blue weight
color = (0, 0, magn * w)
else:
# red weight
color = (magn * w, 0, 0)
if magn == 255:
color = tuple(int(x) for x in color)
# if the link is looping back on the same neuron, draw it with
# ellipse
if conn.source_neuron_idx == conn.target_neuron_idx:
pass # todo: later
else:
# Draw a line
pt1 = (int(nn.neurons[conn.source_neuron_idx].x),
int(nn.neurons[conn.source_neuron_idx].y))
pt2 = (int(nn.neurons[conn.target_neuron_idx].x),
int(nn.neurons[conn.target_neuron_idx].y))
cv2.line(image, pt1, pt2, color, int(thickness))
# draw all neurons
for neuron in nn.neurons:
pt = (int(neuron.x), int(neuron.y))
a = neuron.activation
if a < 0:
clr = array([0.3,0.3,0.3]) + array([0, 0, .7]) * (-a)
else:
clr = array([0.3,0.3,0.3]) + array([.7, .7, .7]) * (a)
clr = clip(clr, 0, 1)
if image.dtype in [np.uint8, np.uint16, np.uint32, np.uint,
np.int, np.int8, np.int16, np.int32]:
clr = (clr*255).astype(np.uint8)
clr = tuple(int(x) for x in clr)
a = Clamp(a, 0.3, 2.0)
if neuron.type == NeuronType.INPUT:
cv2.circle(image, pt, int(neuron_radius*a), clr, thickness=-1) # filled
cv2.circle(image, pt, neuron_radius, (0,255,0), thickness=2) # outline
elif neuron.type == NeuronType.BIAS:
cv2.circle(image, pt, int(neuron_radius*a), clr, thickness=-1) # filled
cv2.circle(image, pt, neuron_radius, (0,0,0), thickness=2) # outline
elif neuron.type == NeuronType.HIDDEN:
cv2.circle(image, pt, int(neuron_radius*a), clr, thickness=-1) # filled
cv2.circle(image, pt, neuron_radius, (127,127,127), thickness=2) # outline
elif neuron.type == NeuronType.OUTPUT:
cv2.circle(image, pt, int(neuron_radius*a), clr, thickness=-1) # filled first
cv2.circle(image, pt, neuron_radius, (255,255,0), thickness=2) # outline
| lgpl-3.0 | -9,214,734,946,703,446,000 | 35.57337 | 110 | 0.472695 | false | 3.607344 | false | false | false |
bibsian/database-development | test/manual_test_dialogobs.py | 1 | 6854 | #!/usr/bin/env python
import pytest
import pytestqt
from PyQt4 import QtGui
from collections import OrderedDict
import sys,os
from Views import ui_dialog_obs as obs
from poplerGUI import ui_logic_preview as prev
from poplerGUI import class_modelviewpandas as view
from poplerGUI import class_inputhandler as ini
from poplerGUI.logiclayer import class_userfacade as face
from poplerGUI.logiclayer import class_helpers as hlp
rootpath = os.path.dirname(os.path.dirname( __file__ ))
end = os.path.sep
sys.path.append(os.path.realpath(os.path.dirname(
rootpath)))
os.chdir(rootpath)
@pytest.fixture
def ObsDialog(site_handle_free, file_handle_free, meta_handle_free):
class ObsDialog(QtGui.QDialog, obs.Ui_Dialog):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
# Facade set up for the taxa dialog
# box. These inputs will have been already
# logged in the computer in order to
# reach this phase
self.facade = face.Facade()
self.facade.input_register(meta_handle_free)
self.facade.meta_verify()
self.facade.input_register(file_handle_free)
self.facade.load_data()
self.facade.input_register(site_handle_free)
sitelevels = self.facade._data[
site_handle_free.lnedentry['study_site_key']].drop_duplicates().values.tolist()
self.facade.register_site_levels(sitelevels)
# Place holders for user inputs
self.obslned = {}
self.obsckbox = {}
self.obsraw = {}
self.available = None
self.null = None
# Place holder: Data Model/ Data model view
self.obsmodel = None
self.viewEdit = view.PandasTableModelEdit
# Placeholders: Data tables
self.obstable = None
# Placeholder: Director (table builder), log
self.obsdirector = None
self._log = None
# Placeholder for maindata Orms
self.obsorms = {}
# Actions
self.btnPreview.clicked.connect(self.submit_change)
self.btnSaveClose.clicked.connect(self.submit_change)
self.btnCancel.clicked.connect(self.close)
self.tablename = None
self.table = None
# Update boxes/preview box
self.message = QtGui.QMessageBox
self.error = QtGui.QErrorMessage()
self.preview = prev.TablePreview()
def submit_change(self):
sender = self.sender()
self.obslned = OrderedDict((
('spatial_replication_level_2', self.lnedRep2.text()),
('spatial_replication_level_3', self.lnedRep3.text()),
('spatial_replication_level_4', self.lnedRep4.text()),
('spatial_replication_level_5', self.lnedRep5.text()),
('structure_type_1', self.lnedStructure1.text()),
('structure_type_2', self.lnedStructure2.text()),
('structure_type_3', self.lnedStructure3.text()),
('structure_type_4', self.lnedStructure4.text()),
('treatment_type_1', self.lnedTreatment1.text()),
('treatment_type_2', self.lnedTreatment2.text()),
('treatment_type_3', self.lnedTreatment3.text()),
('unitobs', self.lnedRaw.text())
))
self.obsckbox = OrderedDict((
('spatial_replication_level_2', self.ckRep2.isChecked()),
('spatial_replication_level_3', self.ckRep3.isChecked()),
('spatial_replication_level_4', self.ckRep4.isChecked()),
('spatial_replication_level_5', self.ckRep5.isChecked()),
('structure_type_1', self.ckStructure1.isChecked()),
('structure_type_2', self.ckStructure2.isChecked()),
('structure_type_3', self.ckStructure3.isChecked()),
('structure_type_4', self.ckStructure4.isChecked()),
('treatment_type_1', self.ckTreatment1.isChecked()),
('treatment_type_2', self.ckTreatment2.isChecked()),
('treatment_type_3', self.ckTreatment3.isChecked()),
('unitobs', True)
))
self.table = {
'count_table': self.rbtnCount.isChecked(),
'biomass_table': self.rbtnBiomass.isChecked(),
'density_table': self.rbtnDensity.isChecked(),
'percent_cover_table': self.rbtnPercentcover.isChecked(),
'individual_table': self.rbtnIndividual.isChecked()
}
available = [
x for x,y in zip(
list(self.obslned.keys()), list(
self.obsckbox.values()))
if y is True
]
try:
self.tablename = [
x for x, y in
zip(list(self.table.keys()), list(self.table.values()))
if y is True
][0]
except Exception as e:
print(str(e))
self.error.showMessage('Select data type')
rawini = ini.InputHandler(
name='rawinfo',
tablename= self.tablename,
lnedentry= hlp.extract(self.obslned, available),
checks=self.obsckbox)
self.facade.input_register(rawini)
self.facade.create_log_record(self.tablename)
self._log = self.facade._tablelog[self.tablename]
try:
self.rawdirector = self.facade.make_table('rawinfo')
print('obs table build: ', self.rawdirector)
assert self.rawdirector._availdf is not None
except Exception as e:
print(str(e))
self._log.debug(str(e))
self.error.showMessage(
'Column(s) not identified')
raise AttributeError(
'Column(s) not identified: ' + str(e))
self.obstable = self.rawdirector._availdf.copy()
self.obsmodel = self.viewEdit(
self.obstable)
if sender is self.btnPreview:
self.preview.tabviewPreview.setModel(self.obsmodel)
self.preview.show()
elif sender is self.btnSaveClose:
self.facade.push_tables[self.tablename] = self.obstable
hlp.write_column_to_log(
self.obslned, self._log, self.tablename)
self.close()
return ObsDialog()
def test_dialog_site(qtbot, ObsDialog):
ObsDialog.show()
qtbot.addWidget(ObsDialog)
qtbot.stopForInteraction()
| mit | 4,524,446,183,923,021,300 | 41.04908 | 95 | 0.557339 | false | 4.104192 | false | false | false |
mileswwatkins/moxie | moxie/cores/ssh.py | 4 | 10112 | # Copyright (c) Paul R. Tagliamonte <[email protected]>, 2015
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import hashlib
import asyncio
import asyncssh
from moxie.facts import get_printable_fact
from aiocore import Service
MOTD = """
.,-:;//;:=,\r
. :H@@@MM@M#H/.,+%;,\r
,/X+ +M@@M@MM%=,-%HMMM@X/,\r
-+@MM; $M@@MH+-,;XMMMM@MMMM@+-\r
;@M@@M- XM@X;. -+XXXXXHHH@M@M#@/.\r
,%MM@@MH ,@%= .---=-=:=,.\r
=@#@@@MX.,\r
=-./@M@M$ ▗ ▌ ▗ ▐ ▗▀▖\r
X@/ -$MM/ ▛▚▀▖▞▀▖▚▗▘▄ ▞▀▖ ▞▀▘▞▀▘▛▀▖ ▄ ▛▀▖▜▀ ▞▀▖▙▀▖▐ ▝▀▖▞▀▖▞▀▖\r
,@M@H: :@: ▌▐ ▌▌ ▌▗▚ ▐ ▛▀ ▝▀▖▝▀▖▌ ▌ ▐ ▌ ▌▐ ▖▛▀ ▌ ▜▀ ▞▀▌▌ ▖▛▀\r
,@@@MMX, . ▘▝ ▘▝▀ ▘ ▘▀▘▝▀▘ ▀▀ ▀▀ ▘ ▘ ▀▘▘ ▘ ▀ ▝▀▘▘ ▐ ▝▀▘▝▀ ▝▀▘\r
.H@@@@M@+,\r
/MMMM@MMH/. XM@MH; =;\r
/%+%$XHH@$= , .H@@@@MX,\r
.=--------. -%H.,@@@@@MX,\r
.%MM@@@HHHXX$$$%+- .:$MMX =M@@MM%.\r
=XMMM@MM@MM#H;,-+HMM@M+ /MMMX=\r
=%@M@M#@$-.=$@MM@@@M; %M%=\r
,:+$+-,/H#MMMMMMM@= =,\r
=++%%%%+/:-.\r
\r
\r
\r
"""
COMMANDS = {}
def command(name):
def _(fn):
coro = asyncio.coroutine(fn)
COMMANDS[name] = coro
return coro
return _
class StopItError(Exception):
pass
@command("exit")
def exit(stdin, stdout, stderr, args=None):
raise StopItError("Exit called")
@asyncio.coroutine
def readl(stdin, stdout, echo=True):
buf = ""
while not stdin.at_eof():
bytes_ = (yield from stdin.read())
for byte in bytes_:
obyte = ord(byte)
if obyte == 0x08 or obyte == 127:
if buf != "":
stdout.write('\x08 \x08')
buf = buf[:-1]
continue
if obyte < 0x20:
if obyte == 0x03:
raise StopItError("C-c")
if obyte == 0x04:
raise EOFError("EOF hit")
if obyte == 13:
stdout.write("\r\n")
return buf.strip()
continue
if echo:
stdout.write(byte)
buf += byte
return buf
@asyncio.coroutine
def error(name, stdin, stdout, stderr):
stderr.write("""\
Error! Command {} not found!
""".format(name))
@command("list")
def list(stdin, stdout, stderr, *, args=None):
database = Service.resolve("moxie.cores.database.DatabaseService")
jobs = yield from database.job.list()
for job in jobs:
stdout.write("[%s] - %s - %s\n\r" % (job.name, job.image, job.command))
@command("run")
def run(stdin, stdout, stderr, *, args=None):
run = Service.resolve("moxie.cores.run.RunService")
if len(args) != 1:
stderr.write("Just give me a single job name")
return
name, = args
stdout.write("Starting job %s...\r\n" % (name))
try:
yield from run.run(name, 'ssh')
except ValueError as e:
stderr.write(str(e))
return
stdout.write(" Wheatley: Surprise! We're doing it now!\r\n")
stdout.write("\n\r" * 3)
yield from attach(stdin, stdout, stderr, args=args)
@command("running")
def running(stdin, stdout, stderr, *, args=None):
container = Service.resolve("moxie.cores.container.ContainerService")
database = Service.resolve("moxie.cores.database.DatabaseService")
jobs = (yield from database.job.list())
running = (yield from container.list(all=True))
nmap = {z: x for x in [x._container for x in running] for z in x['Names']}
for job in jobs:
cname = "/{}".format(job.name)
container = nmap.get(cname, {})
if container is None:
pass
stdout.write("{name} - {status}\n\r".format(
name=job.name,
status=container.get('Status', "offline")
))
return
@command("kill")
def kill(stdin, stdout, stderr, *, args=None):
container = Service.resolve("moxie.cores.container.ContainerService")
if len(args) != 1:
stderr.write("Just give me a single job name\r")
return
name, = args
stdout.write("Killing job %s...\r\n\r\n" % (name))
stdout.write(
" GLaDOS: Ah! Well, this is the part where he kills us.\r\n"
)
try:
yield from container.kill(name)
except ValueError as e:
stderr.write(str(e))
return
stdout.write(
" Wheatley: Hello! This is the part where I kill you!\r\n\r\n"
)
stdout.write("Job terminated")
def aborter(stdin, *peers):
while True:
stream = yield from stdin.read()
if ord(stream) == 0x03:
for peer in peers:
peer.throw(StopItError("We got a C-c, abort"))
return
@command("attach")
def attach(stdin, stdout, stderr, *, args=None):
container = Service.resolve("moxie.cores.container.ContainerService")
if len(args) != 1:
stderr.write("Just give me a single job name")
return
name, = args
try:
container = yield from container.get(name)
except ValueError as e:
stderr.write(str(e))
return
@asyncio.coroutine
def writer():
logs = container.logs
logs.saferun()
queue = logs.listen()
while logs.running:
out = yield from queue.get()
stdout.write(out.decode('utf-8'))
# raise StopItError("Attach EOF")
stdout.write("[ process complete ]\r\n")
w = writer()
try:
yield from asyncio.gather(w, aborter(stdin, w))
except StopItError:
return
def handler(key, user, container):
@asyncio.coroutine
def handle_connection(stdin, stdout, stderr):
if user is None:
stderr.write("""\
\n\r
SSH works, but you did not provide a known key.\n\r
This may happen if your key is authorized but no User model is created\r
for you yet. Ping the cluster operator.\r
Your motives for doing whatever good deed you may have in mind will be\r
misinterpreted by somebody.\r
\r
Fingerprint: {}
\n\r
""".format(hashlib.sha224(key.export_public_key('pkcs1-der')).hexdigest()))
stdout.close()
stderr.close()
return
stdout.write("Hey! I know you! You're {}\n\r".format(user.name))
stdout.write(MOTD)
stdout.write("\r\n{}\r\n\r\n".format(get_printable_fact()))
while not stdin.at_eof():
stdout.write("* ")
try:
line = yield from readl(stdin, stdout)
except asyncssh.misc.TerminalSizeChanged:
stdout.write("\r")
continue
except (StopItError, EOFError):
stdout.close()
stderr.close()
break
if line == "":
continue
cmd, *args = line.split()
if cmd in COMMANDS:
yield from COMMANDS[cmd](stdin, stdout, stderr, args=args)
else:
yield from error(line, stdin, stdout, stderr)
stdout.write("\r\n")
stdout.close()
stderr.close()
return handle_connection
class MoxieSSHServer(asyncssh.SSHServer):
_keys = None
container = None
user = None
def begin_auth(self, username):
self.container = username
return True
def session_requested(self):
return handler(self.key, self.user, self.container)
def public_key_auth_supported(self):
return True
def validate_public_key(self, username, key):
self.key = key
if self._keys is None:
return False
valid = key in self._keys
if valid is False:
return False
self.user = self._keys[key]
return True
def fingerprint(key):
return hashlib.sha224(key.export_public_key('pkcs1-der')).hexdigest()
class SSHService(Service):
identifier = "moxie.cores.ssh.SSHService"
@asyncio.coroutine
def __call__(self):
database = Service.resolve("moxie.cores.database.DatabaseService")
# self.alert = CronService.resolve("moxie.cores.alert.AlertService")
# register an ssh callback for each thinger
ssh_host_keys = asyncssh.read_private_key_list('ssh_host_keys')
if MoxieSSHServer._keys is None:
authorized_keys = {}
for key in asyncssh.read_public_key_list('authorized_keys'):
authorized_keys[key] = (yield from
database.user.get_by_fingerprint(
fingerprint(key)))
MoxieSSHServer._keys = authorized_keys
obj = yield from asyncssh.create_server(
MoxieSSHServer, '0.0.0.0', 2222,
server_host_keys=ssh_host_keys
)
return obj
| mit | -2,430,743,110,413,162,500 | 27.514451 | 79 | 0.556457 | false | 3.363791 | false | false | false |
ichi23de5/ichi_Repo | sim/sim/models/sim.py | 1 | 3894 | # -*- coding: utf-8 -*-
from openerp import models, fields, api
from datetime import datetime, timedelta, date
from dateutil.relativedelta import relativedelta
class Sim(models.Model):
_name = 'sim'
_order = 'date_sim desc'
_inherit = ['mail.thread', 'ir.needaction_mixin']
user_number = fields.Char(string='SIM User Number', required=True, copy=False,)
phone = fields.Char(string='SIM Tel Number', required=False, copy=False,)
sim_id = fields.Char(string='SIM ID', copy=False,)
date_sim = fields.Datetime(string='Record Date', required=True, index=True, copy=False, default=fields.Datetime.now,)
iccid_number = fields.Char(string='Iccid Number', copy=False,)
reception_date = fields.Date(string='Reception Date', required=True, copy=False, store=True, index=True,)
#### auto input ### with reception_date ###
arrival_date = fields.Date(string='Arrival Date', store=True)
charge_date = fields.Date(string='Freebit Charge Date', store=True)
min_month = fields.Date(string='Minimum Usage Date', store=True)
expiration_date = fields.Date(string='Expiration Date', store=True)
#### Don't need it now ####
# cloud_name = field.Selection([
# ('tkcloud', 'TKCLOUD'),
# ('eagleeye', 'Eagle Eye'),
# ], string='service', default='tkcloud',)
# emp_number = fields.Integer(string="Emp Number",)
@api.onchange('reception_date')
def _date_calc(self):
main = fields.Datetime.from_string(self.reception_date)
if main:
arr = main + relativedelta(days=2)
self.update({'arrival_date': arr})
self.update({'charge_date': arr})
min = main + relativedelta(days=2, months=12)
self.update({'min_month': min})
exp = main + relativedelta(days=2, months=24)
self.update({'expiration_date': exp})
return
@api.multi
def applicate_sim(self):
pass
@api.multi
def arrival_sim(self):
pass
class SimType(models.Model):
_name = 'sim_type'
_order = 'date_sim desc'
sim_type_id = fields.Char(string='SIM Type ID', required=True, copy=False,)
max_storage = fields.Char(string='Max Strage')
business_number = fields.Char(string='Business Number')
size_code = fields.Char(string='Size Code')
deta_code = fields.Char(string='Kakinkaishibi')
datermination = fields.Char(string='Sansyutuhouhou')
pay_per_up = fields.Char(string='Juuryoukakin Up')
pay_per_down = fields.Char(string='Juuryoukakin Down')
min_use_time = fields.Char(string='Minimum Perion of Use')
### charge ###
basic_charge = fields.Integer(string='Basic Charge')
cancel_charge = fields.Integer(string='Cancel Charge')
admin_charge = fields.Integer(string='Admin Charge')
### commission ###
opening_sim = fields.Integer(string='Opening Sim Commission')
opening_sim_up = fields.Integer(string='Opening Sim Commission up')
unreturned_sim = fields.Integer(string='Unreturned Sim Commission')
reissure_sim = fields.Integer(string='Reissure Sim Commission')
change_plan = fields.Integer(string='Change Plan Commission')
change_size = fields.Integer(string='Change Size Commission')
redelivery_sim = fields.Integer(string='Redelivery Sim Commission')
stop_sim = fields.Integer(string='Stop Sim Commission')
delivery_sim = fields.Integer(string='Delivery Sim Commission')
universal_service = fields.Integer(string='Universal Service Commission')
cancel_charge_first = fields.Integer(string='Cancel Charge 1month')
cancel_charge_year = fields.Integer(string='Cancel Charge Year')
charge100 = fields.Integer(string='100MB')
charge500 = fields.Integer(string='500MB')
charge1000 = fields.Integer(string='1000MB')
ip_month = fields.Integer(string='IP Charge')
date_model = fields.Char(string='Date Model')
| gpl-3.0 | -7,608,304,808,753,028,000 | 40.870968 | 121 | 0.67283 | false | 3.562672 | false | false | false |
based-at/odoo-addons | stock_warehouse_access/stock_access.py | 1 | 2673 | # -*- coding: utf-8 -*-
##############################################################################
#
# Limit User Access per Warehouse
# Copyright (C) 2016 Vadim (<http://based.at>).
# OpenERP, Open Source Management Solution
# Copyright (c) 2004-2016 Odoo S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
class ResUsers(models.Model):
_inherit = 'res.users'
@api.one
def _get_location_ids(self):
""" Get the list of locations that either not attached to a warehouse (virtual location, asset Location)
OR user has access to the warehouse this location belongs to
"""
locations = self.env['stock.location'].search([])
if self.warehouse_ids:
# Allow locations that are not attached to a warehouse
w_ids = [False] + [w.id for w in self.warehouse_ids]
locations = locations.filtered(lambda r: locations.get_warehouse(r) in w_ids)
self.location_ids = locations
warehouse_ids = fields.Many2many('stock.warehouse', string='Allowed Warehouses',
help='List of allowed warehouses. If empty, the user can access all warehouses.')
location_ids = fields.One2many('stock.location', string='Allowed Locations', compute='_get_location_ids')
class StockWarehouse(models.Model):
_inherit = 'stock.warehouse'
@api.one
def _get_user_ids(self):
""" Get the list of "Warehouse / Users" who can access this warehouse
"""
user_ids = [user.id for user in self.env['ir.model.data'].xmlid_to_object('stock.group_stock_user').users
if not user.warehouse_ids or self in user.warehouse_ids]
self.user_ids = user_ids
user_ids = fields.One2many('res.users', string='Authorized Users', compute='_get_user_ids',
help='List of users authorized to access the warehouse.')
| agpl-3.0 | -8,148,570,591,550,810,000 | 44.305085 | 118 | 0.621399 | false | 4.222749 | false | false | false |
mihaip/NewsBlur | vendor/munin/memcached.py | 5 | 1089 | #!/usr/bin/env python
import os
import socket
from vendor.munin import MuninPlugin
class MuninMemcachedPlugin(MuninPlugin):
category = "Memcached"
def autoconf(self):
try:
self.get_stats()
except socket.error:
return False
return True
def get_stats(self):
host = os.environ.get('MEMCACHED_HOST') or '127.0.0.1'
port = int(os.environ.get('MEMCACHED_PORT') or '11211')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send("stats\n")
buf = ""
while 'END\r\n' not in buf:
buf += s.recv(1024)
stats = (x.split(' ', 2) for x in buf.split('\r\n'))
stats = dict((x[1], x[2]) for x in stats if x[0] == 'STAT')
s.close()
return stats
def execute(self):
stats = self.get_stats()
values = {}
for k, v in self.fields:
try:
value = stats[k]
except KeyError:
value = "U"
values[k] = value
return values
| mit | -4,773,181,607,346,177,000 | 26.225 | 67 | 0.522498 | false | 3.512903 | false | false | false |
bruderstein/PythonScript | PythonLib/full/xml/etree/ElementTree.py | 1 | 74022 | """Lightweight XML support for Python.
XML is an inherently hierarchical data format, and the most natural way to
represent it is with a tree. This module has two classes for this purpose:
1. ElementTree represents the whole XML document as a tree and
2. Element represents a single node in this tree.
Interactions with the whole document (reading and writing to/from files) are
usually done on the ElementTree level. Interactions with a single XML element
and its sub-elements are done on the Element level.
Element is a flexible container object designed to store hierarchical data
structures in memory. It can be described as a cross between a list and a
dictionary. Each Element has a number of properties associated with it:
'tag' - a string containing the element's name.
'attributes' - a Python dictionary storing the element's attributes.
'text' - a string containing the element's text content.
'tail' - an optional string containing text after the element's end tag.
And a number of child elements stored in a Python sequence.
To create an element instance, use the Element constructor,
or the SubElement factory function.
You can also use the ElementTree class to wrap an element structure
and convert it to and from XML.
"""
#---------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
#
# ElementTree
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"indent", "iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML", "XMLID",
"XMLParser", "XMLPullParser",
"register_namespace",
"canonicalize", "C14NWriterTarget",
]
VERSION = "1.3.0"
import sys
import re
import warnings
import io
import collections
import collections.abc
import contextlib
from . import ElementPath
class ParseError(SyntaxError):
"""An error when parsing an XML document.
In addition to its exception value, a ParseError contains
two extra attributes:
'code' - the specific exception code
'position' - the line and column of the error
"""
pass
# --------------------------------------------------------------------
def iselement(element):
"""Return True if *element* appears to be an Element."""
return hasattr(element, 'tag')
class Element:
"""An XML element.
This class is the reference implementation of the Element interface.
An element's length is its number of subelements. That means if you
want to check if an element is truly empty, you should check BOTH
its length AND its text attribute.
The element tag, attribute names, and attribute values can be either
bytes or strings.
*tag* is the element name. *attrib* is an optional dictionary containing
element attributes. *extra* are additional element attributes given as
keyword arguments.
Example form:
<tag attrib>text<child/>...</tag>tail
"""
tag = None
"""The element's name."""
attrib = None
"""Dictionary of the element's attributes."""
text = None
"""
Text before first subelement. This is either a string or the value None.
Note that if there is no text, this attribute may be either
None or the empty string, depending on the parser.
"""
tail = None
"""
Text after this element's end tag, but before the next sibling element's
start tag. This is either a string or the value None. Note that if there
was no text, this attribute may be either None or an empty string,
depending on the parser.
"""
def __init__(self, tag, attrib={}, **extra):
if not isinstance(attrib, dict):
raise TypeError("attrib must be dict, not %s" % (
attrib.__class__.__name__,))
self.tag = tag
self.attrib = {**attrib, **extra}
self._children = []
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__, self.tag, id(self))
def makeelement(self, tag, attrib):
"""Create a new element with the same type.
*tag* is a string containing the element name.
*attrib* is a dictionary containing the element attributes.
Do not call this method, use the SubElement factory function instead.
"""
return self.__class__(tag, attrib)
def copy(self):
"""Return copy of current element.
This creates a shallow copy. Subelements will be shared with the
original tree.
"""
warnings.warn(
"elem.copy() is deprecated. Use copy.copy(elem) instead.",
DeprecationWarning
)
return self.__copy__()
def __copy__(self):
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
def __len__(self):
return len(self._children)
def __bool__(self):
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning, stacklevel=2
)
return len(self._children) != 0 # emulate old behaviour, for now
def __getitem__(self, index):
return self._children[index]
def __setitem__(self, index, element):
if isinstance(index, slice):
for elt in element:
self._assert_is_element(elt)
else:
self._assert_is_element(element)
self._children[index] = element
def __delitem__(self, index):
del self._children[index]
def append(self, subelement):
"""Add *subelement* to the end of this element.
The new element will appear in document order after the last existing
subelement (or directly after the text, if it's the first subelement),
but before the end tag for this element.
"""
self._assert_is_element(subelement)
self._children.append(subelement)
def extend(self, elements):
"""Append subelements from a sequence.
*elements* is a sequence with zero or more elements.
"""
for element in elements:
self._assert_is_element(element)
self._children.append(element)
def insert(self, index, subelement):
"""Insert *subelement* at position *index*."""
self._assert_is_element(subelement)
self._children.insert(index, subelement)
def _assert_is_element(self, e):
# Need to refer to the actual Python implementation, not the
# shadowing C implementation.
if not isinstance(e, _Element_Py):
raise TypeError('expected an Element, not %s' % type(e).__name__)
def remove(self, subelement):
"""Remove matching subelement.
Unlike the find methods, this method compares elements based on
identity, NOT ON tag value or contents. To remove subelements by
other means, the easiest way is to use a list comprehension to
select what elements to keep, and then use slice assignment to update
the parent element.
ValueError is raised if a matching element could not be found.
"""
# assert iselement(element)
self._children.remove(subelement)
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
return ElementPath.find(self, path, namespaces)
def findtext(self, path, default=None, namespaces=None):
"""Find text for first matching element by tag name or path.
*path* is a string having either an element tag or an XPath,
*default* is the value to return if the element was not found,
*namespaces* is an optional mapping from namespace prefix to full name.
Return text content of first matching element, or default value if
none was found. Note that if an element is found having no text
content, the empty string is returned.
"""
return ElementPath.findtext(self, path, default, namespaces)
def findall(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Returns list containing all matching elements in document order.
"""
return ElementPath.findall(self, path, namespaces)
def iterfind(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return an iterable yielding all matching elements in document order.
"""
return ElementPath.iterfind(self, path, namespaces)
def clear(self):
"""Reset element.
This function removes all subelements, clears all attributes, and sets
the text and tail attributes to None.
"""
self.attrib.clear()
self._children = []
self.text = self.tail = None
def get(self, key, default=None):
"""Get element attribute.
Equivalent to attrib.get, but some implementations may handle this a
bit more efficiently. *key* is what attribute to look for, and
*default* is what to return if the attribute was not found.
Returns a string containing the attribute value, or the default if
attribute was not found.
"""
return self.attrib.get(key, default)
def set(self, key, value):
"""Set element attribute.
Equivalent to attrib[key] = value, but some implementations may handle
this a bit more efficiently. *key* is what attribute to set, and
*value* is the attribute value to set it to.
"""
self.attrib[key] = value
def keys(self):
"""Get list of attribute names.
Names are returned in an arbitrary order, just like an ordinary
Python dict. Equivalent to attrib.keys()
"""
return self.attrib.keys()
def items(self):
"""Get element attributes as a sequence.
The attributes are returned in arbitrary order. Equivalent to
attrib.items().
Return a list of (name, value) tuples.
"""
return self.attrib.items()
def iter(self, tag=None):
"""Create tree iterator.
The iterator loops over the element and all subelements in document
order, returning all elements with a matching tag.
If the tree structure is modified during iteration, new or removed
elements may or may not be included. To get a stable set, use the
list() function on the iterator, and loop over the resulting list.
*tag* is what tags to look for (default is to return all elements)
Return an iterator containing all the matching elements.
"""
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
yield from e.iter(tag)
def itertext(self):
"""Create text iterator.
The iterator loops over the element and all subelements in document
order, returning all inner text.
"""
tag = self.tag
if not isinstance(tag, str) and tag is not None:
return
t = self.text
if t:
yield t
for e in self:
yield from e.itertext()
t = e.tail
if t:
yield t
def SubElement(parent, tag, attrib={}, **extra):
"""Subelement factory which creates an element instance, and appends it
to an existing parent.
The element tag, attribute names, and attribute values can be either
bytes or Unicode strings.
*parent* is the parent element, *tag* is the subelements name, *attrib* is
an optional directory containing element attributes, *extra* are
additional attributes given as keyword arguments.
"""
attrib = {**attrib, **extra}
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
def Comment(text=None):
"""Comment element factory.
This function creates a special element which the standard serializer
serializes as an XML comment.
*text* is a string containing the comment string.
"""
element = Element(Comment)
element.text = text
return element
def ProcessingInstruction(target, text=None):
"""Processing Instruction element factory.
This function creates a special element which the standard serializer
serializes as an XML comment.
*target* is a string containing the processing instruction, *text* is a
string containing the processing instruction contents, if any.
"""
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
class QName:
"""Qualified name wrapper.
This class can be used to wrap a QName attribute value in order to get
proper namespace handing on output.
*text_or_uri* is a string containing the QName value either in the form
{uri}local, or if the tag argument is given, the URI part of a QName.
*tag* is an optional argument which if given, will make the first
argument (text_or_uri) be interpreted as a URI, and this argument (tag)
be interpreted as a local name.
"""
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.text)
def __hash__(self):
return hash(self.text)
def __le__(self, other):
if isinstance(other, QName):
return self.text <= other.text
return self.text <= other
def __lt__(self, other):
if isinstance(other, QName):
return self.text < other.text
return self.text < other
def __ge__(self, other):
if isinstance(other, QName):
return self.text >= other.text
return self.text >= other
def __gt__(self, other):
if isinstance(other, QName):
return self.text > other.text
return self.text > other
def __eq__(self, other):
if isinstance(other, QName):
return self.text == other.text
return self.text == other
# --------------------------------------------------------------------
class ElementTree:
"""An XML element hierarchy.
This class also provides support for serialization to and from
standard XML.
*element* is an optional root element node,
*file* is an optional file handle or file name of an XML file whose
contents will be used to initialize the tree with.
"""
def __init__(self, element=None, file=None):
# assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
def getroot(self):
"""Return root element of this tree."""
return self._root
def _setroot(self, element):
"""Replace root element of this tree.
This will discard the current contents of the tree and replace it
with the given element. Use with care!
"""
# assert iselement(element)
self._root = element
def parse(self, source, parser=None):
"""Load external XML document into element tree.
*source* is a file name or file object, *parser* is an optional parser
instance that defaults to XMLParser.
ParseError is raised if the parser fails to parse the document.
Returns the root element of the given source document.
"""
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
try:
if parser is None:
# If no parser was specified, create a default XMLParser
parser = XMLParser()
if hasattr(parser, '_parse_whole'):
# The default XMLParser, when it comes from an accelerator,
# can define an internal _parse_whole API for efficiency.
# It can be used to parse the whole source without feeding
# it with chunks.
self._root = parser._parse_whole(source)
return self._root
while True:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
finally:
if close_source:
source.close()
def iter(self, tag=None):
"""Create and return tree iterator for the root element.
The iterator loops over all elements in this tree, in document order.
*tag* is a string with the tag name to iterate over
(default is to return all elements).
"""
# assert self._root is not None
return self._root.iter(tag)
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
Same as getroot().find(path), which is Element.find()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.find(path, namespaces)
def findtext(self, path, default=None, namespaces=None):
"""Find first matching element by tag name or path.
Same as getroot().findtext(path), which is Element.findtext()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findtext(path, default, namespaces)
def findall(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
Same as getroot().findall(path), which is Element.findall().
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return list containing all matching elements in document order.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findall(path, namespaces)
def iterfind(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
Same as getroot().iterfind(path), which is element.iterfind()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return an iterable yielding all matching elements in document order.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.iterfind(path, namespaces)
def write(self, file_or_filename,
encoding=None,
xml_declaration=None,
default_namespace=None,
method=None, *,
short_empty_elements=True):
"""Write element tree to a file as XML.
Arguments:
*file_or_filename* -- file name or a file object opened for writing
*encoding* -- the output encoding (default: US-ASCII)
*xml_declaration* -- bool indicating if an XML declaration should be
added to the output. If None, an XML declaration
is added if encoding IS NOT either of:
US-ASCII, UTF-8, or Unicode
*default_namespace* -- sets the default XML namespace (for "xmlns")
*method* -- either "xml" (default), "html, "text", or "c14n"
*short_empty_elements* -- controls the formatting of elements
that contain no content. If True (default)
they are emitted as a single self-closed
tag, otherwise they are emitted as a pair
of start/end tags
"""
if not method:
method = "xml"
elif method not in _serialize:
raise ValueError("unknown method %r" % method)
if not encoding:
if method == "c14n":
encoding = "utf-8"
else:
encoding = "us-ascii"
enc_lower = encoding.lower()
with _get_writer(file_or_filename, enc_lower) as write:
if method == "xml" and (xml_declaration or
(xml_declaration is None and
enc_lower not in ("utf-8", "us-ascii", "unicode"))):
declared_encoding = encoding
if enc_lower == "unicode":
# Retrieve the default encoding for the xml declaration
import locale
declared_encoding = locale.getpreferredencoding()
write("<?xml version='1.0' encoding='%s'?>\n" % (
declared_encoding,))
if method == "text":
_serialize_text(write, self._root)
else:
qnames, namespaces = _namespaces(self._root, default_namespace)
serialize = _serialize[method]
serialize(write, self._root, qnames, namespaces,
short_empty_elements=short_empty_elements)
def write_c14n(self, file):
# lxml.etree compatibility. use output method instead
return self.write(file, method="c14n")
# --------------------------------------------------------------------
# serialization support
@contextlib.contextmanager
def _get_writer(file_or_filename, encoding):
# returns text write method and release all resources after using
try:
write = file_or_filename.write
except AttributeError:
# file_or_filename is a file name
if encoding == "unicode":
file = open(file_or_filename, "w")
else:
file = open(file_or_filename, "w", encoding=encoding,
errors="xmlcharrefreplace")
with file:
yield file.write
else:
# file_or_filename is a file-like object
# encoding determines if it is a text or binary writer
if encoding == "unicode":
# use a text writer as is
yield write
else:
# wrap a binary writer with TextIOWrapper
with contextlib.ExitStack() as stack:
if isinstance(file_or_filename, io.BufferedIOBase):
file = file_or_filename
elif isinstance(file_or_filename, io.RawIOBase):
file = io.BufferedWriter(file_or_filename)
# Keep the original file open when the BufferedWriter is
# destroyed
stack.callback(file.detach)
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
file = io.BufferedIOBase()
file.writable = lambda: True
file.write = write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
file.seekable = file_or_filename.seekable
file.tell = file_or_filename.tell
except AttributeError:
pass
file = io.TextIOWrapper(file,
encoding=encoding,
errors="xmlcharrefreplace",
newline="\n")
# Keep the original file open when the TextIOWrapper is
# destroyed
stack.callback(file.detach)
yield file.write
def _namespaces(elem, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = "%s:%s" % (prefix, tag)
else:
qnames[qname] = tag # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = qname
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
for elem in elem.iter():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, str):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, qnames, namespaces,
short_empty_elements, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % text)
elif tag is ProcessingInstruction:
write("<?%s?>" % text)
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem) or not short_empty_elements:
write(">")
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta", "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, qnames, namespaces, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
ltag = tag.lower()
if text:
if ltag == "script" or ltag == "style":
write(text)
else:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
if ltag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail))
def _serialize_text(write, elem):
for part in elem.itertext():
write(part)
if elem.tail:
write(elem.tail)
_serialize = {
"xml": _serialize_xml,
"html": _serialize_html,
"text": _serialize_text,
# this optional method is imported at the end of the module
# "c14n": _serialize_c14n,
}
def register_namespace(prefix, uri):
"""Register a namespace prefix.
The registry is global, and any existing mapping for either the
given prefix or the namespace URI will be removed.
*prefix* is the namespace prefix, *uri* is a namespace uri. Tags and
attributes in this namespace will be serialized with prefix if possible.
ValueError is raised if prefix is reserved or is invalid.
"""
if re.match(r"ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in list(_namespace_map.items()):
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublin core
"http://purl.org/dc/elements/1.1/": "dc",
}
# For tests and troubleshooting
register_namespace._namespace_map = _namespace_map
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _escape_cdata(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 characters, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
# Although section 2.11 of the XML specification states that CR or
# CR LN should be replaced with just LN, it applies only to EOLNs
# which take part of organizing file into lines. Within attributes,
# we are replacing these with entity numbers, so they do not count.
# http://www.w3.org/TR/REC-xml/#sec-line-ends
# The current solution, contained in following six lines, was
# discussed in issue 17582 and 39011.
if "\r" in text:
text = text.replace("\r", " ")
if "\n" in text:
text = text.replace("\n", " ")
if "\t" in text:
text = text.replace("\t", "	")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
def tostring(element, encoding=None, method=None, *,
xml_declaration=None, default_namespace=None,
short_empty_elements=True):
"""Generate string representation of XML element.
All subelements are included. If encoding is "unicode", a string
is returned. Otherwise a bytestring is returned.
*element* is an Element instance, *encoding* is an optional output
encoding defaulting to US-ASCII, *method* is an optional output which can
be one of "xml" (default), "html", "text" or "c14n", *default_namespace*
sets the default XML namespace (for "xmlns").
Returns an (optionally) encoded string containing the XML data.
"""
stream = io.StringIO() if encoding == 'unicode' else io.BytesIO()
ElementTree(element).write(stream, encoding,
xml_declaration=xml_declaration,
default_namespace=default_namespace,
method=method,
short_empty_elements=short_empty_elements)
return stream.getvalue()
class _ListDataStream(io.BufferedIOBase):
"""An auxiliary stream accumulating into a list reference."""
def __init__(self, lst):
self.lst = lst
def writable(self):
return True
def seekable(self):
return True
def write(self, b):
self.lst.append(b)
def tell(self):
return len(self.lst)
def tostringlist(element, encoding=None, method=None, *,
xml_declaration=None, default_namespace=None,
short_empty_elements=True):
lst = []
stream = _ListDataStream(lst)
ElementTree(element).write(stream, encoding,
xml_declaration=xml_declaration,
default_namespace=default_namespace,
method=method,
short_empty_elements=short_empty_elements)
return lst
def dump(elem):
"""Write element tree or element structure to sys.stdout.
This function should be used for debugging only.
*elem* is either an ElementTree, or a single Element. The exact output
format is implementation dependent. In this version, it's written as an
ordinary XML file.
"""
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout, encoding="unicode")
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
def indent(tree, space=" ", level=0):
"""Indent an XML document by inserting newlines and indentation space
after elements.
*tree* is the ElementTree or Element to modify. The (root) element
itself will not be changed, but the tail text of all elements in its
subtree will be adapted.
*space* is the whitespace to insert for each indentation level, two
space characters by default.
*level* is the initial indentation level. Setting this to a higher
value than 0 can be used for indenting subtrees that are more deeply
nested inside of a document.
"""
if isinstance(tree, ElementTree):
tree = tree.getroot()
if level < 0:
raise ValueError(f"Initial indentation level must be >= 0, got {level}")
if not len(tree):
return
# Reduce the memory consumption by reusing indentation strings.
indentations = ["\n" + level * space]
def _indent_children(elem, level):
# Start a new indentation level for the first child.
child_level = level + 1
try:
child_indentation = indentations[child_level]
except IndexError:
child_indentation = indentations[level] + space
indentations.append(child_indentation)
if not elem.text or not elem.text.strip():
elem.text = child_indentation
for child in elem:
if len(child):
_indent_children(child, child_level)
if not child.tail or not child.tail.strip():
child.tail = child_indentation
# Dedent after the last child by overwriting the previous indentation.
if not child.tail.strip():
child.tail = indentations[level]
_indent_children(tree, 0)
# --------------------------------------------------------------------
# parsing
def parse(source, parser=None):
"""Parse XML document into element tree.
*source* is a filename or file object containing XML data,
*parser* is an optional parser instance defaulting to XMLParser.
Return an ElementTree instance.
"""
tree = ElementTree()
tree.parse(source, parser)
return tree
def iterparse(source, events=None, parser=None):
"""Incrementally parse XML document into ElementTree.
This class also reports what's going on to the user based on the
*events* it is initialized with. The supported events are the strings
"start", "end", "start-ns" and "end-ns" (the "ns" events are used to get
detailed namespace information). If *events* is omitted, only
"end" events are reported.
*source* is a filename or file object containing XML data, *events* is
a list of events to report back, *parser* is an optional parser instance.
Returns an iterator providing (event, elem) pairs.
"""
# Use the internal, undocumented _parser argument for now; When the
# parser argument of iterparse is removed, this can be killed.
pullparser = XMLPullParser(events=events, _parser=parser)
def iterator():
try:
while True:
yield from pullparser.read_events()
# load event buffer
data = source.read(16 * 1024)
if not data:
break
pullparser.feed(data)
root = pullparser._close_and_return_root()
yield from pullparser.read_events()
it.root = root
finally:
if close_source:
source.close()
class IterParseIterator(collections.abc.Iterator):
__next__ = iterator().__next__
it = IterParseIterator()
it.root = None
del iterator, IterParseIterator
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
return it
class XMLPullParser:
def __init__(self, events=None, *, _parser=None):
# The _parser argument is for internal use only and must not be relied
# upon in user code. It will be removed in a future release.
# See http://bugs.python.org/issue17741 for more details.
self._events_queue = collections.deque()
self._parser = _parser or XMLParser(target=TreeBuilder())
# wire up the parser for event reporting
if events is None:
events = ("end",)
self._parser._setevents(self._events_queue, events)
def feed(self, data):
"""Feed encoded data to parser."""
if self._parser is None:
raise ValueError("feed() called after end of stream")
if data:
try:
self._parser.feed(data)
except SyntaxError as exc:
self._events_queue.append(exc)
def _close_and_return_root(self):
# iterparse needs this to set its root attribute properly :(
root = self._parser.close()
self._parser = None
return root
def close(self):
"""Finish feeding data to parser.
Unlike XMLParser, does not return the root element. Use
read_events() to consume elements from XMLPullParser.
"""
self._close_and_return_root()
def read_events(self):
"""Return an iterator over currently available (event, elem) pairs.
Events are consumed from the internal event queue as they are
retrieved from the iterator.
"""
events = self._events_queue
while events:
event = events.popleft()
if isinstance(event, Exception):
raise event
else:
yield event
def XML(text, parser=None):
"""Parse XML document from string constant.
This function can be used to embed "XML Literals" in Python code.
*text* is a string containing XML data, *parser* is an
optional parser instance, defaulting to the standard XMLParser.
Returns an Element instance.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
def XMLID(text, parser=None):
"""Parse XML document from string constant for its IDs.
*text* is a string containing XML data, *parser* is an
optional parser instance, defaulting to the standard XMLParser.
Returns an (Element, dict) tuple, in which the
dict maps element id:s to elements.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.iter():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
# Parse XML document from string constant. Alias for XML().
fromstring = XML
def fromstringlist(sequence, parser=None):
"""Parse XML document from sequence of string fragments.
*sequence* is a list of other sequence, *parser* is an optional parser
instance, defaulting to the standard XMLParser.
Returns an Element instance.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
class TreeBuilder:
"""Generic element structure builder.
This builder converts a sequence of start, data, and end method
calls to a well-formed element structure.
You can use this class to build an element structure using a custom XML
parser, or a parser for some other XML-like format.
*element_factory* is an optional element factory which is called
to create new Element instances, as necessary.
*comment_factory* is a factory to create comments to be used instead of
the standard factory. If *insert_comments* is false (the default),
comments will not be inserted into the tree.
*pi_factory* is a factory to create processing instructions to be used
instead of the standard factory. If *insert_pis* is false (the default),
processing instructions will not be inserted into the tree.
"""
def __init__(self, element_factory=None, *,
comment_factory=None, pi_factory=None,
insert_comments=False, insert_pis=False):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._root = None # root element
self._tail = None # true if we're after an end tag
if comment_factory is None:
comment_factory = Comment
self._comment_factory = comment_factory
self.insert_comments = insert_comments
if pi_factory is None:
pi_factory = ProcessingInstruction
self._pi_factory = pi_factory
self.insert_pis = insert_pis
if element_factory is None:
element_factory = Element
self._factory = element_factory
def close(self):
"""Flush builder buffers and return toplevel document Element."""
assert len(self._elem) == 0, "missing end tags"
assert self._root is not None, "missing toplevel element"
return self._root
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
def data(self, data):
"""Add text to current element."""
self._data.append(data)
def start(self, tag, attrs):
"""Open new element and return it.
*tag* is the element name, *attrs* is a dict containing element
attributes.
"""
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
elif self._root is None:
self._root = elem
self._elem.append(elem)
self._tail = 0
return elem
def end(self, tag):
"""Close and return current Element.
*tag* is the element name.
"""
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
def comment(self, text):
"""Create a comment using the comment_factory.
*text* is the text of the comment.
"""
return self._handle_single(
self._comment_factory, self.insert_comments, text)
def pi(self, target, text=None):
"""Create a processing instruction using the pi_factory.
*target* is the target name of the processing instruction.
*text* is the data of the processing instruction, or ''.
"""
return self._handle_single(
self._pi_factory, self.insert_pis, target, text)
def _handle_single(self, factory, insert, *args):
elem = factory(*args)
if insert:
self._flush()
self._last = elem
if self._elem:
self._elem[-1].append(elem)
self._tail = 1
return elem
# also see ElementTree and TreeBuilder
class XMLParser:
"""Element structure builder for XML source data based on the expat parser.
*target* is an optional target object which defaults to an instance of the
standard TreeBuilder class, *encoding* is an optional encoding string
which if given, overrides the encoding specified in the XML file:
http://www.iana.org/assignments/character-sets
"""
def __init__(self, *, target=None, encoding=None):
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# main callbacks
parser.DefaultHandlerExpand = self._default
if hasattr(target, 'start'):
parser.StartElementHandler = self._start
if hasattr(target, 'end'):
parser.EndElementHandler = self._end
if hasattr(target, 'start_ns'):
parser.StartNamespaceDeclHandler = self._start_ns
if hasattr(target, 'end_ns'):
parser.EndNamespaceDeclHandler = self._end_ns
if hasattr(target, 'data'):
parser.CharacterDataHandler = target.data
# miscellaneous callbacks
if hasattr(target, 'comment'):
parser.CommentHandler = target.comment
if hasattr(target, 'pi'):
parser.ProcessingInstructionHandler = target.pi
# Configure pyexpat: buffering, new-style attribute handling.
parser.buffer_text = 1
parser.ordered_attributes = 1
parser.specified_attributes = 1
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _setevents(self, events_queue, events_to_report):
# Internal API for XMLPullParser
# events_to_report: a list of events to report during parsing (same as
# the *events* of XMLPullParser's constructor.
# events_queue: a list of actual parsing events that will be populated
# by the underlying parser.
#
parser = self._parser
append = events_queue.append
for event_name in events_to_report:
if event_name == "start":
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event_name, append=append,
start=self._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event_name == "end":
def handler(tag, event=event_name, append=append,
end=self._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event_name == "start-ns":
# TreeBuilder does not implement .start_ns()
if hasattr(self.target, "start_ns"):
def handler(prefix, uri, event=event_name, append=append,
start_ns=self._start_ns):
append((event, start_ns(prefix, uri)))
else:
def handler(prefix, uri, event=event_name, append=append):
append((event, (prefix or '', uri or '')))
parser.StartNamespaceDeclHandler = handler
elif event_name == "end-ns":
# TreeBuilder does not implement .end_ns()
if hasattr(self.target, "end_ns"):
def handler(prefix, event=event_name, append=append,
end_ns=self._end_ns):
append((event, end_ns(prefix)))
else:
def handler(prefix, event=event_name, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
elif event_name == 'comment':
def handler(text, event=event_name, append=append, self=self):
append((event, self.target.comment(text)))
parser.CommentHandler = handler
elif event_name == 'pi':
def handler(pi_target, data, event=event_name, append=append,
self=self):
append((event, self.target.pi(pi_target, data)))
parser.ProcessingInstructionHandler = handler
else:
raise ValueError("unknown event %r" % event_name)
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.position = value.lineno, value.offset
raise err
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name
return name
def _start_ns(self, prefix, uri):
return self.target.start_ns(prefix or '', uri or '')
def _end_ns(self, prefix):
return self.target.end_ns(prefix or '')
def _start(self, tag, attr_list):
# Handler for expat's StartElementHandler. Since ordered_attributes
# is set, the attributes are reported as a list of alternating
# attribute name,value.
fixname = self._fixname
tag = fixname(tag)
attrib = {}
if attr_list:
for i in range(0, len(attr_list), 2):
attrib[fixname(attr_list[i])] = attr_list[i+1]
return self.target.start(tag, attrib)
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
data_handler = self.target.data
except AttributeError:
return
try:
data_handler(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self.parser.ErrorLineNumber,
self.parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self.parser.ErrorLineNumber
err.offset = self.parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
if pubid:
pubid = pubid[1:-1]
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif hasattr(self, "doctype"):
warnings.warn(
"The doctype() method of XMLParser is ignored. "
"Define doctype() method on the TreeBuilder target.",
RuntimeWarning)
self._doctype = None
def feed(self, data):
"""Feed encoded data to parser."""
try:
self.parser.Parse(data, False)
except self._error as v:
self._raiseerror(v)
def close(self):
"""Finish feeding data to parser and return element structure."""
try:
self.parser.Parse(b"", True) # end of data
except self._error as v:
self._raiseerror(v)
try:
close_handler = self.target.close
except AttributeError:
pass
else:
return close_handler()
finally:
# get rid of circular references
del self.parser, self._parser
del self.target, self._target
# --------------------------------------------------------------------
# C14N 2.0
def canonicalize(xml_data=None, *, out=None, from_file=None, **options):
"""Convert XML to its C14N 2.0 serialised form.
If *out* is provided, it must be a file or file-like object that receives
the serialised canonical XML output (text, not bytes) through its ``.write()``
method. To write to a file, open it in text mode with encoding "utf-8".
If *out* is not provided, this function returns the output as text string.
Either *xml_data* (an XML string) or *from_file* (a file path or
file-like object) must be provided as input.
The configuration options are the same as for the ``C14NWriterTarget``.
"""
if xml_data is None and from_file is None:
raise ValueError("Either 'xml_data' or 'from_file' must be provided as input")
sio = None
if out is None:
sio = out = io.StringIO()
parser = XMLParser(target=C14NWriterTarget(out.write, **options))
if xml_data is not None:
parser.feed(xml_data)
parser.close()
elif from_file is not None:
parse(from_file, parser=parser)
return sio.getvalue() if sio is not None else None
_looks_like_prefix_name = re.compile(r'^\w+:\w+$', re.UNICODE).match
class C14NWriterTarget:
"""
Canonicalization writer target for the XMLParser.
Serialises parse events to XML C14N 2.0.
The *write* function is used for writing out the resulting data stream
as text (not bytes). To write to a file, open it in text mode with encoding
"utf-8" and pass its ``.write`` method.
Configuration options:
- *with_comments*: set to true to include comments
- *strip_text*: set to true to strip whitespace before and after text content
- *rewrite_prefixes*: set to true to replace namespace prefixes by "n{number}"
- *qname_aware_tags*: a set of qname aware tag names in which prefixes
should be replaced in text content
- *qname_aware_attrs*: a set of qname aware attribute names in which prefixes
should be replaced in text content
- *exclude_attrs*: a set of attribute names that should not be serialised
- *exclude_tags*: a set of tag names that should not be serialised
"""
def __init__(self, write, *,
with_comments=False, strip_text=False, rewrite_prefixes=False,
qname_aware_tags=None, qname_aware_attrs=None,
exclude_attrs=None, exclude_tags=None):
self._write = write
self._data = []
self._with_comments = with_comments
self._strip_text = strip_text
self._exclude_attrs = set(exclude_attrs) if exclude_attrs else None
self._exclude_tags = set(exclude_tags) if exclude_tags else None
self._rewrite_prefixes = rewrite_prefixes
if qname_aware_tags:
self._qname_aware_tags = set(qname_aware_tags)
else:
self._qname_aware_tags = None
if qname_aware_attrs:
self._find_qname_aware_attrs = set(qname_aware_attrs).intersection
else:
self._find_qname_aware_attrs = None
# Stack with globally and newly declared namespaces as (uri, prefix) pairs.
self._declared_ns_stack = [[
("http://www.w3.org/XML/1998/namespace", "xml"),
]]
# Stack with user declared namespace prefixes as (uri, prefix) pairs.
self._ns_stack = []
if not rewrite_prefixes:
self._ns_stack.append(list(_namespace_map.items()))
self._ns_stack.append([])
self._prefix_map = {}
self._preserve_space = [False]
self._pending_start = None
self._root_seen = False
self._root_done = False
self._ignored_depth = 0
def _iter_namespaces(self, ns_stack, _reversed=reversed):
for namespaces in _reversed(ns_stack):
if namespaces: # almost no element declares new namespaces
yield from namespaces
def _resolve_prefix_name(self, prefixed_name):
prefix, name = prefixed_name.split(':', 1)
for uri, p in self._iter_namespaces(self._ns_stack):
if p == prefix:
return f'{{{uri}}}{name}'
raise ValueError(f'Prefix {prefix} of QName "{prefixed_name}" is not declared in scope')
def _qname(self, qname, uri=None):
if uri is None:
uri, tag = qname[1:].rsplit('}', 1) if qname[:1] == '{' else ('', qname)
else:
tag = qname
prefixes_seen = set()
for u, prefix in self._iter_namespaces(self._declared_ns_stack):
if u == uri and prefix not in prefixes_seen:
return f'{prefix}:{tag}' if prefix else tag, tag, uri
prefixes_seen.add(prefix)
# Not declared yet => add new declaration.
if self._rewrite_prefixes:
if uri in self._prefix_map:
prefix = self._prefix_map[uri]
else:
prefix = self._prefix_map[uri] = f'n{len(self._prefix_map)}'
self._declared_ns_stack[-1].append((uri, prefix))
return f'{prefix}:{tag}', tag, uri
if not uri and '' not in prefixes_seen:
# No default namespace declared => no prefix needed.
return tag, tag, uri
for u, prefix in self._iter_namespaces(self._ns_stack):
if u == uri:
self._declared_ns_stack[-1].append((uri, prefix))
return f'{prefix}:{tag}' if prefix else tag, tag, uri
if not uri:
# As soon as a default namespace is defined,
# anything that has no namespace (and thus, no prefix) goes there.
return tag, tag, uri
raise ValueError(f'Namespace "{uri}" is not declared in scope')
def data(self, data):
if not self._ignored_depth:
self._data.append(data)
def _flush(self, _join_text=''.join):
data = _join_text(self._data)
del self._data[:]
if self._strip_text and not self._preserve_space[-1]:
data = data.strip()
if self._pending_start is not None:
args, self._pending_start = self._pending_start, None
qname_text = data if data and _looks_like_prefix_name(data) else None
self._start(*args, qname_text)
if qname_text is not None:
return
if data and self._root_seen:
self._write(_escape_cdata_c14n(data))
def start_ns(self, prefix, uri):
if self._ignored_depth:
return
# we may have to resolve qnames in text content
if self._data:
self._flush()
self._ns_stack[-1].append((uri, prefix))
def start(self, tag, attrs):
if self._exclude_tags is not None and (
self._ignored_depth or tag in self._exclude_tags):
self._ignored_depth += 1
return
if self._data:
self._flush()
new_namespaces = []
self._declared_ns_stack.append(new_namespaces)
if self._qname_aware_tags is not None and tag in self._qname_aware_tags:
# Need to parse text first to see if it requires a prefix declaration.
self._pending_start = (tag, attrs, new_namespaces)
return
self._start(tag, attrs, new_namespaces)
def _start(self, tag, attrs, new_namespaces, qname_text=None):
if self._exclude_attrs is not None and attrs:
attrs = {k: v for k, v in attrs.items() if k not in self._exclude_attrs}
qnames = {tag, *attrs}
resolved_names = {}
# Resolve prefixes in attribute and tag text.
if qname_text is not None:
qname = resolved_names[qname_text] = self._resolve_prefix_name(qname_text)
qnames.add(qname)
if self._find_qname_aware_attrs is not None and attrs:
qattrs = self._find_qname_aware_attrs(attrs)
if qattrs:
for attr_name in qattrs:
value = attrs[attr_name]
if _looks_like_prefix_name(value):
qname = resolved_names[value] = self._resolve_prefix_name(value)
qnames.add(qname)
else:
qattrs = None
else:
qattrs = None
# Assign prefixes in lexicographical order of used URIs.
parse_qname = self._qname
parsed_qnames = {n: parse_qname(n) for n in sorted(
qnames, key=lambda n: n.split('}', 1))}
# Write namespace declarations in prefix order ...
if new_namespaces:
attr_list = [
('xmlns:' + prefix if prefix else 'xmlns', uri)
for uri, prefix in new_namespaces
]
attr_list.sort()
else:
# almost always empty
attr_list = []
# ... followed by attributes in URI+name order
if attrs:
for k, v in sorted(attrs.items()):
if qattrs is not None and k in qattrs and v in resolved_names:
v = parsed_qnames[resolved_names[v]][0]
attr_qname, attr_name, uri = parsed_qnames[k]
# No prefix for attributes in default ('') namespace.
attr_list.append((attr_qname if uri else attr_name, v))
# Honour xml:space attributes.
space_behaviour = attrs.get('{http://www.w3.org/XML/1998/namespace}space')
self._preserve_space.append(
space_behaviour == 'preserve' if space_behaviour
else self._preserve_space[-1])
# Write the tag.
write = self._write
write('<' + parsed_qnames[tag][0])
if attr_list:
write(''.join([f' {k}="{_escape_attrib_c14n(v)}"' for k, v in attr_list]))
write('>')
# Write the resolved qname text content.
if qname_text is not None:
write(_escape_cdata_c14n(parsed_qnames[resolved_names[qname_text]][0]))
self._root_seen = True
self._ns_stack.append([])
def end(self, tag):
if self._ignored_depth:
self._ignored_depth -= 1
return
if self._data:
self._flush()
self._write(f'</{self._qname(tag)[0]}>')
self._preserve_space.pop()
self._root_done = len(self._preserve_space) == 1
self._declared_ns_stack.pop()
self._ns_stack.pop()
def comment(self, text):
if not self._with_comments:
return
if self._ignored_depth:
return
if self._root_done:
self._write('\n')
elif self._root_seen and self._data:
self._flush()
self._write(f'<!--{_escape_cdata_c14n(text)}-->')
if not self._root_seen:
self._write('\n')
def pi(self, target, data):
if self._ignored_depth:
return
if self._root_done:
self._write('\n')
elif self._root_seen and self._data:
self._flush()
self._write(
f'<?{target} {_escape_cdata_c14n(data)}?>' if data else f'<?{target}?>')
if not self._root_seen:
self._write('\n')
def _escape_cdata_c14n(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if '&' in text:
text = text.replace('&', '&')
if '<' in text:
text = text.replace('<', '<')
if '>' in text:
text = text.replace('>', '>')
if '\r' in text:
text = text.replace('\r', '
')
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_c14n(text):
# escape attribute value
try:
if '&' in text:
text = text.replace('&', '&')
if '<' in text:
text = text.replace('<', '<')
if '"' in text:
text = text.replace('"', '"')
if '\t' in text:
text = text.replace('\t', '	')
if '\n' in text:
text = text.replace('\n', '
')
if '\r' in text:
text = text.replace('\r', '
')
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
# Import the C accelerators
try:
# Element is going to be shadowed by the C implementation. We need to keep
# the Python version of it accessible for some "creative" by external code
# (see tests)
_Element_Py = Element
# Element, SubElement, ParseError, TreeBuilder, XMLParser, _set_factories
from _elementtree import *
from _elementtree import _set_factories
except ImportError:
pass
else:
_set_factories(Comment, ProcessingInstruction)
| gpl-2.0 | -2,000,098,224,576,624,600 | 34.417225 | 96 | 0.568979 | false | 4.445232 | false | false | false |
mayapurmedia/tovp | tovp/gifts/migrations/0001_initial.py | 2 | 3850 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import audit_log.models.fields
import ananta.models
import model_utils.fields
from django.conf import settings
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('contacts', '0012_person_note'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Gift',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('created_with_session_key', audit_log.models.fields.CreatingSessionKeyField(null=True, editable=False, max_length=40)),
('modified_with_session_key', audit_log.models.fields.LastSessionKeyField(null=True, editable=False, max_length=40)),
('name', models.CharField(help_text='Enter gift name.', max_length=100)),
('description', models.TextField(blank=True)),
('created_by', audit_log.models.fields.CreatingUserField(null=True, to=settings.AUTH_USER_MODEL, related_name='created_gifts_gift_set', editable=False, verbose_name='created by')),
('modified_by', audit_log.models.fields.LastUserField(null=True, to=settings.AUTH_USER_MODEL, related_name='modified_gifts_gift_set', editable=False, verbose_name='modified by')),
],
options={
'abstract': False,
},
bases=(ananta.models.NextPrevMixin, models.Model),
),
migrations.CreateModel(
name='GiftGiven',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('created_with_session_key', audit_log.models.fields.CreatingSessionKeyField(null=True, editable=False, max_length=40)),
('modified_with_session_key', audit_log.models.fields.LastSessionKeyField(null=True, editable=False, max_length=40)),
('status', model_utils.fields.StatusField(choices=[('sent', 'Sent'), ('returned', 'Returned'), ('delivered', 'Delivered')], default='sent', no_check_for_status=True, max_length=100)),
('status_changed', model_utils.fields.MonitorField(default=django.utils.timezone.now, monitor='status')),
('note', models.TextField(verbose_name='Note', blank=True)),
('created_by', audit_log.models.fields.CreatingUserField(null=True, to=settings.AUTH_USER_MODEL, related_name='created_gifts_giftgiven_set', editable=False, verbose_name='created by')),
('gift', models.ForeignKey(to='gifts.Gift', related_name='gifts', verbose_name='Gift')),
('modified_by', audit_log.models.fields.LastUserField(null=True, to=settings.AUTH_USER_MODEL, related_name='modified_gifts_giftgiven_set', editable=False, verbose_name='modified by')),
('person', models.ForeignKey(to='contacts.Person', related_name='gifts', verbose_name='Person')),
],
options={
'abstract': False,
},
bases=(ananta.models.NextPrevMixin, models.Model),
),
]
| mit | -7,659,025,736,623,560,000 | 64.254237 | 201 | 0.647013 | false | 3.952772 | false | false | false |
kryptn/Pantry | setup.py | 1 | 3672 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='Pantry',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.3',
description='A simple file data store',
long_description=long_description,
# The project's main homepage.
url='https://github.com/kryptn/Pantry',
# Author details
author='David Bibb',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='simple database datastore data store',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
#install_requires=['peppercorn'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
#entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
) | mit | -741,208,000,180,265,900 | 35.366337 | 94 | 0.64842 | false | 4.05298 | false | false | false |
elaird/supy-tflat | displayer.py | 1 | 19587 | import ROOT as r
import supy
class displayer(supy.steps.displayer):
def __init__(self,
scale=200.0,
jets=[{"fixes":("J", "Gen"), "nMax":4, "color":r.kBlack, "width":2, "style":2},
{"fixes":("J", ""), "nMax":4, "color":r.kBlue, "width":1, "style":1},
],
nMaxParticles=4,
particles=[("genB", r.kRed, 0.15),
("genTau", r.kCyan, 0.15),
("genMu", r.kMagenta+1, 0.05),
("genEle", r.kOrange, 0.05),
],
nMaxDiTaus=4,
taus={"color":r.kGreen+2, "width":1, "style":1},
):
self.moreName = "(see below)"
for item in ["scale", "jets", "nMaxParticles", "particles", "nMaxDiTaus", "taus"]:
setattr(self, item, eval(item))
self.titleSizeFactor = 1.0
self.legendDict = {}
self.legendList = []
self.ellipse = r.TEllipse()
self.ellipse.SetFillStyle(0)
self.line = r.TLine()
self.arrow = r.TArrow()
self.text = r.TText()
self.latex = r.TLatex()
def prepareText(self, params, coords) :
self.text.SetTextSize(params["size"])
self.text.SetTextFont(params["font"])
self.text.SetTextColor(params["color"])
self.textSlope = params["slope"]
self.textX = coords["x"]
self.textY = coords["y"]
self.textCounter = 0
def printText(self, message, color=r.kBlack):
self.text.SetTextColor(color)
self.text.DrawText(self.textX, self.textY - self.textCounter * self.textSlope, message)
self.textCounter += 1
self.text.SetTextColor(r.kBlack)
def printEvent(self, eventVars, params, coords):
self.prepareText(params, coords)
self.printText("Event %10d" % eventVars["EVENT"])
self.printText("Weight %9.2f" % eventVars["weight"])
self.printText("")
#self.printText("rho %10.1f" % eventVars["rho"])
self.printText("")
#met = eventVars["MissingET"][0]
#self.printText("MET %10.1f (phi %4.1f)" % (met.MET, met.Phi))
self.printText("")
def printJets(self, eventVars=None, params=None, coords=None, fixes=None, nMax=None, highlight=False):
def j(s="", iJet=None):
return eventVars["%s%d%s%s" % (fixes[0], 1+iJet, fixes[1], s)]
self.prepareText(params, coords)
self.printText("".join(fixes))
headers = " csv pT eta phi mass"
self.printText(headers)
self.printText("-" * len(headers))
for iJet in range(nMax):
if nMax <= iJet:
self.printText("[%d more not listed]" % (nJets - nMax))
break
out = ""
out += "%6s %5.0f %5.1f %5.1f %5.0f" % (" " if fixes[1] else "%6.2f" % j("CSVbtag", iJet),
j("Pt", iJet),
j("Eta", iJet),
j("Phi", iJet),
j("Mass", iJet),
)
self.printText(out, r.kBlack)
def printGenParticles(self, eventVars=None, params=None, coords=None,
nMax=None, particles=None, color=r.kBlack):
def g(s="", iJet=None):
return eventVars["%s%s" % (particles, s)].at(iJet)
self.prepareText(params, coords)
self.printText(particles)
headers = " pT eta phi mass"
self.printText(headers)
self.printText("-" * len(headers))
nParticles = eventVars["%sPt" % particles].size()
for iParticle in range(nParticles):
if nMax <= iParticle:
self.printText("[%d more not listed]" % (nParticles - nMax))
break
self.printText("%5.0f %5.1f %5.1f %5.1f" % (g("Pt", iParticle),
g("Eta", iParticle),
g("Phi", iParticle),
g("Mass", iParticle),
),
color=color)
return
def printDiTaus(self, eventVars=None, params=None, coords=None,
nMax=None, color=r.kBlack, ptMin=None):
self.prepareText(params, coords)
self.printText("di-taus")
headers = "cmee pT eta phi mass iso*"
self.printText(headers)
self.printText("-" * len(headers))
nDiTaus = eventVars["pt1"].size()
for iDiTau in range(nDiTaus):
if nMax <= iDiTau:
self.printText("[%d more not listed]" % (nDiTaus - nMax))
break
for iTau in [1, 2]:
c = eventVars["charge%d" % iTau].at(iDiTau)
if c > 0:
s = "+"
elif c < 0:
s = "-"
else:
s = "@"
values = (s,
eventVars["againstMuonLoose%d" % iTau].at(iDiTau),
eventVars["againstElectronLoose%d" % iTau].at(iDiTau),
eventVars["againstElectronLooseMVA3_%d" % iTau].at(iDiTau),
eventVars["pt%d" % iTau].at(iDiTau),
eventVars["eta%d" % iTau].at(iDiTau),
eventVars["phi%d" % iTau].at(iDiTau),
eventVars["m%d" % iTau].at(iDiTau),
eventVars["iso%d" % iTau].at(iDiTau),
)
self.printText("%1s%1d%1d%1d %5.0f %5.1f %5.1f %5.1f %5.1f" % values, color=color)
self.printText(" ")
return
def drawSkeleton(self, coords, color) :
r.gPad.AbsCoordinates(False)
self.ellipse.SetLineColor(color)
self.ellipse.SetLineWidth(1)
self.ellipse.SetLineStyle(1)
self.ellipse.DrawEllipse(coords["x0"], coords["y0"], coords["radius"], coords["radius"], 0.0, 360.0, 0.0, "")
self.line.SetLineColor(color)
self.line.DrawLine(coords["x0"]-coords["radius"], coords["y0"] , coords["x0"]+coords["radius"], coords["y0"] )
self.line.DrawLine(coords["x0"] , coords["y0"]-coords["radius"], coords["x0"] , coords["y0"]+coords["radius"])
def drawScale(self, color, size, scale, point) :
self.latex.SetTextSize(size)
self.latex.SetTextColor(color)
self.latex.DrawLatex(point["x"], point["y"],"radius = "+str(scale)+" GeV p_{T}")
def drawP4(self,
rhoPhiPad=None,
etaPhiPad=None,
coords=None,
p4=None,
lineColor=None,
lineWidth=1,
lineStyle=1,
arrowSize=1.0,
circleRadius=1.0,
b=None,
tau=None):
c = coords
x0 = c["x0"]
y0 = c["y0"]
x1 = x0 + p4.px()*c["radius"]/c["scale"]
y1 = y0 + p4.py()*c["radius"]/c["scale"]
rhoPhiPad.cd()
self.arrow.SetLineColor(lineColor)
self.arrow.SetLineWidth(lineWidth)
self.arrow.SetLineStyle(lineStyle)
self.arrow.SetArrowSize(arrowSize)
self.arrow.SetFillColor(lineColor)
self.arrow.DrawArrow(x0, y0, x1, y1)
etaPhiPad.cd()
self.ellipse.SetLineColor(lineColor)
self.ellipse.SetLineWidth(lineWidth)
self.ellipse.SetLineStyle(lineStyle)
self.ellipse.DrawEllipse(p4.eta(), p4.phi(), circleRadius, circleRadius, 0.0, 360.0, 0.0, "")
if b:
self.ellipse.SetLineColor(r.kRed)
self.ellipse.SetLineStyle(3)
self.ellipse.DrawEllipse(p4.eta(), p4.phi(), circleRadius, circleRadius, 0.0, 360.0, 0.0, "")
if tau:
self.ellipse.SetLineColor(r.kCyan)
self.ellipse.SetLineStyle(2)
self.ellipse.DrawEllipse(p4.eta(), p4.phi(), circleRadius, circleRadius, 0.0, 360.0, 0.0, "")
def legendFunc(self, lineColor=None, lineStyle=1, name="", desc=""):
if name not in self.legendDict:
self.legendDict[name] = True
self.legendList.append((lineColor, lineStyle, desc, "l"))
def drawGenParticles(self, eventVars=None, indices="",
coords=None, lineColor=None,
lineWidth=1, lineStyle=1,
arrowSize=-1.0, circleRadius=None):
self.legendFunc(lineColor=lineColor,
lineStyle=lineStyle,
name=indices,
desc=indices)
for iParticle in eventVars[indices]:
particle = eventVars["genP4"].at(iParticle)
if circleRadius is None:
self.drawP4(coords=coords,
p4=particle,
lineColor=lineColor,
lineWidth=lineWidth,
arrowSize=arrowSize)
else :
self.drawCircle(p4=particle,
lineColor=lineColor,
lineWidth=lineWidth,
circleRadius=circleRadius)
def drawJets(self, eventVars=None, fixes=None, nMax=None, vec=False, bVar="",
coords=None, lineColor=None, lineWidth=1, lineStyle=1,
arrowSize=-1.0, circleRadius=None, rhoPhiPad=None, etaPhiPad=None):
def j(s="", iJet=None):
if vec:
return eventVars["%s%s" % (fixes[0], s)].at(iJet)
else:
return eventVars["%s%d%s%s" % (fixes[0], 1+iJet, fixes[1], s)]
self.legendFunc(lineColor=lineColor,
lineStyle=lineStyle,
name="".join(fixes), desc="".join(fixes))
for iJet in range(nMax):
if not j("Pt", iJet):
continue
self.drawP4(rhoPhiPad=rhoPhiPad,
etaPhiPad=etaPhiPad,
coords=coords,
p4=supy.utils.LorentzV(j("Pt", iJet), j("Eta", iJet), j("Phi", iJet), j("Mass", iJet)),
b=False if (fixes[1] or not bVar) else (j(bVar, iJet) > 0.679),
tau=False,
lineColor=lineColor,
lineWidth=lineWidth,
lineStyle=lineStyle,
arrowSize=arrowSize,
circleRadius=circleRadius)
def drawTaus(self, eventVars=None,
coords=None, lineColor=None, lineWidth=1, lineStyle=1,
arrowSize=-1.0, circleRadius=None, rhoPhiPad=None, etaPhiPad=None):
self.legendFunc(lineColor=lineColor, lineStyle=lineStyle, name="reco. taus", desc="reco. taus")
nDiTaus = eventVars["pt1"].size()
for iDiTau in range(nDiTaus):
for iTau in [1, 2]:
self.drawP4(rhoPhiPad=rhoPhiPad,
etaPhiPad=etaPhiPad,
coords=coords,
p4=supy.utils.LorentzV(eventVars["pt%d" % iTau].at(iDiTau),
eventVars["eta%d" % iTau].at(iDiTau),
eventVars["phi%d" % iTau].at(iDiTau),
eventVars["m%d" % iTau].at(iDiTau),
),
lineColor=lineColor,
lineWidth=lineWidth,
lineStyle=lineStyle,
arrowSize=arrowSize,
circleRadius=circleRadius)
def etaPhiPad(self, eventVars, corners):
pad = r.TPad("etaPhiPad", "etaPhiPad",
corners["x1"], corners["y1"],
corners["x2"], corners["y2"])
pad.cd()
pad.SetTickx()
pad.SetTicky()
etaPhiPlot = r.TH2D("etaPhi", ";#eta;#phi;",
1, -r.TMath.Pi(), r.TMath.Pi(),
1, -r.TMath.Pi(), r.TMath.Pi())
etaPhiPlot.SetStats(False)
etaPhiPlot.Draw()
return pad, etaPhiPlot
def rhoPhiPad(self, eventVars, coords, corners):
pad = r.TPad("rhoPhiPad", "rhoPhiPad", corners["x1"], corners["y1"], corners["x2"], corners["y2"])
pad.cd()
skeletonColor = r.kYellow+1
self.drawSkeleton(coords, skeletonColor)
self.drawScale(color=skeletonColor, size=0.03, scale=coords["scale"],
point={"x":0.0, "y":coords["radius"]+coords["y0"]+0.03})
return pad
def drawObjects(self, eventVars=None, etaPhiPad=None, rhoPhiPad=None, rhoPhiCoords=None):
defArrowSize=0.5*self.arrow.GetDefaultArrowSize()
defWidth=1
arrowSize = defArrowSize
for particles, color, size in self.particles:
self.drawJets(eventVars=eventVars,
fixes=(particles, ""),
vec=True,
nMax=eventVars["%sPt" % particles].size(),
coords=rhoPhiCoords,
lineColor=color,
arrowSize=arrowSize,
circleRadius=size,
rhoPhiPad=rhoPhiPad,
etaPhiPad=etaPhiPad,
)
arrowSize *= 0.8
for d in self.jets:
self.drawJets(eventVars=eventVars,
fixes=d["fixes"],
nMax=d["nMax"],
bVar="CSVbtag",
coords=rhoPhiCoords,
lineColor=d["color"],
lineWidth=d["width"],
lineStyle=d["style"],
arrowSize=arrowSize,
circleRadius=0.5,
rhoPhiPad=rhoPhiPad,
etaPhiPad=etaPhiPad,
)
arrowSize *= 0.8
self.drawTaus(eventVars=eventVars,
coords=rhoPhiCoords,
lineColor=self.taus["color"],
lineWidth=self.taus["width"],
lineStyle=self.taus["style"],
arrowSize=arrowSize,
circleRadius=0.25,
rhoPhiPad=rhoPhiPad,
etaPhiPad=etaPhiPad,
)
arrowSize *= 0.8
def drawLegend(self, corners) :
pad = r.TPad("legendPad", "legendPad", corners["x1"], corners["y1"], corners["x2"], corners["y2"])
pad.cd()
legend = r.TLegend(0.0, 0.0, 1.0, 1.0)
for color, style, desc, gopts in self.legendList:
self.line.SetLineColor(color)
self.line.SetLineStyle(style)
someLine = self.line.DrawLine(0.0, 0.0, 0.0, 0.0)
legend.AddEntry(someLine, desc, gopts)
legend.Draw("same")
self.canvas.cd()
pad.Draw()
return [pad,legend]
def printText1(self, eventVars, corners):
pad = r.TPad("textPad", "textPad",
corners["x1"], corners["y1"],
corners["x2"], corners["y2"])
pad.cd()
defaults = {}
defaults["size"] = 0.035
defaults["font"] = 80
defaults["color"] = r.kBlack
defaults["slope"] = 0.017
s = defaults["slope"]
smaller = {}
smaller.update(defaults)
smaller["size"] = 0.034
yy = 0.98
x0 = 0.01
self.printEvent(eventVars, params=defaults, coords={"x": x0, "y": yy})
y = yy - 5*s
for d in self.jets:
self.printJets(eventVars,
params=smaller,
coords={"x": x0, "y": y},
fixes=d["fixes"],
nMax=d["nMax"],
highlight=False)
y -= s*(5 + d["nMax"])
for i, (particles, color, size) in enumerate(self.particles):
self.printGenParticles(eventVars,
params=smaller,
particles=particles,
color=color,
coords={"x": x0+(0.5 if i%2 else 0.0), "y": y},
nMax=self.nMaxParticles)
if i % 2:
y -= s*(5 + self.nMaxParticles)
if not (i % 2):
y -= s*(5 + self.nMaxParticles)
self.printDiTaus(eventVars,
params=smaller,
coords={"x": x0, "y": y},
nMax=self.nMaxDiTaus)
y -= s*(5 + self.nMaxDiTaus)
self.canvas.cd()
pad.Draw()
return [pad]
def printText2(self, eventVars, corners):
pad = r.TPad("textPad2", "textPad2",
corners["x1"], corners["y1"],
corners["x2"], corners["y2"])
pad.cd()
defaults = {}
defaults["size"] = 0.08
defaults["font"] = 80
defaults["color"] = r.kBlack
defaults["slope"] = 0.03
s = defaults["slope"]
y = 0.98 - 2*s
x0 = 0.01
self.canvas.cd()
pad.Draw()
return [pad]
def display(self, eventVars):
rhoPhiPadYSize = 0.50*self.canvas.GetAspectRatio()
rhoPhiPadXSize = 0.50
radius = 0.4
rhoPhiCoords = {"scale":self.scale, "radius":radius,
"x0":radius, "y0":radius+0.05}
rhoPhiCorners = {"x1":0.0,
"y1":0.0,
"x2":rhoPhiPadXSize,
"y2":rhoPhiPadYSize}
etaPhiCorners = {"x1":rhoPhiPadXSize - 0.18,
"y1":rhoPhiPadYSize - 0.08*self.canvas.GetAspectRatio(),
"x2":rhoPhiPadXSize + 0.12,
"y2":rhoPhiPadYSize + 0.22*self.canvas.GetAspectRatio()}
legendCorners = {"x1":0.0,
"y1":rhoPhiPadYSize,
"x2":1.0-rhoPhiPadYSize,
"y2":1.0}
textCorners1 = {"x1":rhoPhiPadXSize + 0.11,
"y1":0.0,
"x2":1.0,
"y2":1.0}
textCorners2 = {"x1":rhoPhiPadXSize - 0.08,
"y1":0.0,
"x2":rhoPhiPadXSize + 0.11,
"y2":0.55}
rhoPhiPad = self.rhoPhiPad(eventVars, rhoPhiCoords, rhoPhiCorners)
etaPhiPad, etaPhiPlot = self.etaPhiPad(eventVars, etaPhiCorners)
keep = [rhoPhiPad, etaPhiPad, etaPhiPlot]
self.drawObjects(eventVars, etaPhiPad, rhoPhiPad, rhoPhiCoords)
self.canvas.cd()
rhoPhiPad.Draw()
etaPhiPad.Draw()
keep.append(self.drawLegend(corners=legendCorners))
keep.append(self.printText1(eventVars, corners=textCorners1))
#keep.append(self.printText2(eventVars, corners=textCorners2))
return keep
| gpl-3.0 | 8,626,758,202,597,365,000 | 37.481336 | 150 | 0.46633 | false | 3.685924 | false | false | false |
hunterfu/it-manager | stock_tech/rps_sma.py | 1 | 22581 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = 0.1
from lib import stock
from lib.htmltmpl import TemplateManager, TemplateProcessor
import sys
import os
import shutil
import pickle
import re
import time
#import pysqlite2.dbapi2 as sqlite
import sqlite3 as sqlite
import getopt
from operator import itemgetter
from decimal import *
import urllib
import json
import fpformat
import subprocess
import datetime
import string
import pprint
import commands
import smtplib
from pprint import pprint
"""
股票均线相对大盘均线的涨幅比较
针对大盘指数涨跌幅对比
"""
import threading,Queue
class ind_runs(threading.Thread):
def __init__(self,base,long_result,short_result,day_ago,sma_day,timeframe='day'):
threading.Thread.__init__(self)
self.long_result=long_result
self.short_result=short_result
self.base=base
self.day_ago= day_ago
self.sma_day=sma_day
self.timeframe = timeframe # week,day,month
def run(self):
"""
强弱度=(该股涨跌幅-指数涨跌幅)*100
"""
while clientpool.empty() != True:
try:
symbol = clientpool.get(block=0)
change = get_indicator_output(symbol,self.day_ago,self.sma_day,self.timeframe)
if change >= self.base:
tmp_str = "%s,%s" % (symbol,change)
data = tuple(tmp_str.split(","))
self.long_result.append(data)
if change <= self.base:
tmp_str = "%s,%s" % (symbol,change)
data = tuple(tmp_str.split(","))
self.short_result.append(data)
except Queue.Empty:
pass
class graph_runs(threading.Thread):
def __init__(self,result,endday,conf_dir):
threading.Thread.__init__(self)
self.result=result
self.endday = endday
self.conf_dir = conf_dir
def run(self):
while clientpool.empty() != True:
try:
symbol = clientpool.get(block=0)
out_dir = img_out_dir
conf_dir = self.conf_dir
graph_conf = "%s/graph_day.conf" % (conf_dir)
graph_week_conf = "%s/graph_week.conf" % (conf_dir)
graph_month_conf = "%s/graph_month.conf" % (conf_dir)
img_file = "%s/%s.png" % (out_dir,symbol)
img_week_file = "%s/%s_WEEK.png" % (out_dir,symbol)
img_month_file = "%s/%s_MONTH.png" % (out_dir,symbol)
os.chdir('%s' % script_dir)
cmd = "perl graphic.pl --end '%s' --file %s --out '%s' %s" % (self.endday,graph_conf,img_file,symbol)
(status,output) = commands.getstatusoutput(cmd)
if status != 0 :
print "Error = %s" % output
continue
cmd = "perl graphic.pl --file %s --out '%s' %s" % (graph_week_conf,img_week_file,symbol)
(status,output) = commands.getstatusoutput(cmd)
if status != 0:
print "Error = %s" % output
continue
cmd = "perl graphic.pl --file %s --out '%s' %s" % (graph_month_conf,img_month_file,symbol)
(status,output) = commands.getstatusoutput(cmd)
if status != 0:
print "Error = %s" % output
continue
stock_dict= {}
stock_dict['symbol'] = symbol
stock_dict['img'] = img_file
stock_dict['img_week'] = img_week_file
stock_dict['img_month'] = img_month_file
self.result.append(stock_dict)
except Queue.Empty:
pass
def get_home_path():
"""
得到用户主目录
"""
homedir = os.environ.get('HOME')
if homedir:
return homedir
else:
homedir = "%s%s" % (os.environ.get('HOMEDRIVE'),os.environ.get('HOMEPATH'))
return homedir
def connect_db(db_file):
"""
股票列表数据库
"""
if os.path.isfile(db_file):
cx = sqlite.connect(db_file)
cu = cx.cursor()
return (cu,cx)
else:
cx = sqlite.connect(db_file)
cu = cx.cursor()
cu.execute('''
create table stock(
id integer primary key,
exchange_name varchar(20),
stock_title varchar(50),
stock_symbol varchar(20) UNIQUE,
stock_country varchar(100),
stock_tradedb_lastupdate_time NUMERIC DEFAULT 0
)''')
return (cu,cx)
def connect_pool_db(db_file):
"""
筛选出的票池列表
"""
if os.path.isfile(db_file):
cx = sqlite.connect(db_file)
cu = cx.cursor()
return (cu,cx)
else:
cx = sqlite.connect(db_file)
cu = cx.cursor()
cu.execute('''
create table stock(
id integer primary key,
symbol varchar(20) UNIQUE,
country varchar(100),
firstsee_time NUMERIC DEFAULT 0,
lastupdate_time NUMERIC DEFAULT 0
)''')
return (cu,cx)
def calc_stock(stock_list,country,timeframe='day'):
"""
计算rps
"""
graph_dir = script_dir
long_data = []
short_data = []
if len(stock_list) == 0 : return result_data
# 标普500指数作为基准
index_name = base_index[country][0]
index_symbol = base_index[country][1]
#day_ago = 30
#sma_day = 120
base_line_percent = get_indicator_output(index_symbol,day_ago,sma_day,timeframe)
if DEBUG: print "day_ago = %s , sma_day = %s ,timeframe=%s , base_line =%s " % (day_ago,sma_day,timeframe,base_line_percent)
#stock_list.append(index_symbol)
quene_list = []
ts = []
# 多线程运行
global clientpool
clientpool = Queue.Queue(0)
for a in stock_list:
a=a.strip()
clientpool.put(a)
for b in xrange(20):
t = ind_runs(base_line_percent,long_data,short_data,day_ago,sma_day,timeframe)
t.start()
ts.append(t)
for t in ts:
if t:t.join()
return (long_data,short_data)
def create_graph(stock_list,template_file,conf_dir,stock_region='US',signal_file="signal_file",endday='today'):
"""
根据股票代码生成图片
"""
out_dir = img_out_dir
graph_conf = "%s/graph_day.conf" % (conf_dir)
template_graph_conf = "/tmp/graph_%s.conf" % (signal_file)
graph_week_conf = "%s/graph_week.conf" % (conf_dir)
graph_month_conf = "%s/graph_month.conf" % (conf_dir)
stock_count = len(stock_list)
template = TemplateManager().prepare(template_file)
tproc = TemplateProcessor(html_escape=0)
stock = []
for symbol in stock_list:
img_file = "%s/%s.png" % (out_dir,symbol)
img_week_file = "%s/%s_WEEK.png" % (out_dir,symbol)
img_month_file = "%s/%s_MONTH.png" % (out_dir,symbol)
stock_dict= {}
stock_dict['symbol'] = symbol
stock_dict['img'] = img_file
stock_dict['img_week'] = img_week_file
stock_dict['img_month'] = img_month_file
stock.append(stock_dict)
#pprint.pprint(stock)
tproc.set("market_name","%s Market" % stock_region)
tproc.set("stock_count",stock_count)
tproc.set("Stock",stock)
# save to file
filename = "%s/%s_%s_STOCK.html" % (out_dir,stock_region,signal_file)
FILE = open(filename,"w")
FILE.writelines(tproc.process(template))
FILE.close()
# 多线程运行
global clientpool
#globals()['clentpool'] = Queue.Queue(0)
clientpool = Queue.Queue(0)
ts = []
for a in stock_list:
a=a.strip()
clientpool.put(a)
for b in xrange(20):
t = graph_runs(stock,endday,conf_dir)
t.start()
ts.append(t)
for t in ts:
if t:t.join()
def export_stock_symbol(db_cursor,cx):
"""
导出股票代码名称对应列表
"""
sql = "select * from stock order by stock_symbol"
#print "DEBUG sql = %s" % sql
db_cursor.execute(sql)
rs = db_cursor.fetchall()
# print title
if len(rs) == 0 : return
sharenames = "/home/hua.fu/.gt/sharenames"
os.system("rm -fr %s" % sharenames)
share_FILE = open(sharenames,"w")
for item in rs:
title = item[2]
symbol = item[3]
country = item[4]
if title:
stock_map = symbol + "\t" + title
share_FILE.writelines(stock_map + "\n")
else:
stock_map = symbol + "\t" + "No title"
share_FILE.writelines(stock_map + "\n")
share_FILE.close()
def get_stock_list(db_cursor,cx,stock_region='US'):
"""
将符合条件的股票列表导出
"""
sql =""
time_now = int(time.time())
sql = "select * from stock where stock_tradedb_lastupdate_time <= %s and stock_country = '%s' order by stock_symbol" % (time_now,stock_region)
#sql = "select * from stock where stock_tradedb_lastupdate_time <= %s and stock_country = '%s' ORDER BY RANDOM() limit 10" % (time_now,stock_region)
db_cursor.execute(sql)
rs = db_cursor.fetchall()
if len(rs) == 0 : return
stock_list = []
for item in rs:
symbol = item[3]
stock_list.append(symbol)
return stock_list
def connect_trade_db(symbol):
"""
连接历史交易数据库
"""
cache_dir = "%s/trade_db" % (base_dir)
symbol = symbol.upper()
db_file = "%s/%s" % (cache_dir,symbol)
if os.path.isfile(db_file) and os.path.getsize(db_file) != 0:
cx = sqlite.connect(db_file)
cu = cx.cursor()
cx.text_factory=str
return (cu,cx)
else:
print "Symbol = %s ,Not find trade data,please check" % symbol
return (False,False)
def sort_stock(stock_data):
""" 排序 """
top_data = {}
s_data = {}
stock_list = []
pool_data = []
# 所有票子的排序
if action == "long":
sorted_list = sorted(stock_data, key=lambda result: Decimal(result[1]),reverse=True)
if action == "short":
sorted_list = sorted(stock_data, key=lambda result: Decimal(result[1]))
for item in sorted_list:
symbol = item[0]
stock_percent = item[1]
stock_list.append(symbol)
tmp_str = "%s,%s" % (symbol,stock_percent)
tmp_data = tuple(tmp_str.split(","))
pool_data.append(tmp_data)
return (stock_list,pool_data)
def get_indicator_output(symbol,dayago=65,sma=50,timeframe='day'):
"""
"""
symbol = symbol.upper()
##if DEBUG: print "DEBUG : CURRENT pROCESS SYMBOL=%s" % symbol
#print "DEBUG : CURRENT pROCESS SYMBOL=%s" % symbol
os.chdir('%s' % script_dir)
if timeframe == 'day':
cmd = "perl display_indicator.pl --timeframe=%s --nb=%s \
--tight I:SMA %s %s|grep -P '\[\d+-\d+\-\d+]*.*'" % (timeframe,dayago,symbol,sma)
if timeframe == 'week':
cmd = "perl display_indicator.pl --timeframe=%s --nb=%s \
--tight I:SMA %s %s|grep -P '\[\d+-\d+]*.*'" % (timeframe,dayago,symbol,sma)
if timeframe == 'month':
cmd = "perl display_indicator.pl --timeframe=%s --nb=%s \
--tight I:SMA %s %s| grep -P '\[\d+\/\d+]*.*'" % (timeframe,dayago,symbol,sma)
#print "DEBUG indicator_cmd = %s" % cmd
(status,output) = commands.getstatusoutput(cmd)
if status != 0:
return False
ind_list = output.split("\n")
base_point = ind_list[0].split("=")[1].strip()
if base_point !="":
last_point = ind_list[len(ind_list)-1].split("=")[1].strip()
change = (Decimal(last_point) - Decimal(base_point))/Decimal(base_point) * 100
change = Decimal(str(round(change, 3)))
else:
change = 0
return change
def scan_stock(conf_dir,stock_list,signal_file):
"""
"""
graph_dir = script_dir
ret_list = []
if len(stock_list) == 0 : return ret_list
timeframe="day"
if signal_file.find("week") != -1:
timeframe = "week"
elif signal_file.find("month") != -1:
timeframe = "month"
sig_file = "%s/%s" % (conf_dir,signal_file)
filename = "/dev/shm/%s" % (signal_file)
stock_FILE = open(filename,"w")
for symbol in stock_list:
stock_FILE.writelines(symbol+ "\n")
stock_FILE.close()
cmd = "cd %s;./scan.pl --nbprocess=4 --timeframe %s %s 'today' %s |sed -e '/^$/d' | sed -e '/Signal/d'" % (graph_dir,timeframe,filename,sig_file)
res = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)
error_log = res.stderr.readlines()
if len(error_log) !=0:
print "CMD = %s" % cmd
print "DEBUG : stderr = %s " % res.stderr.readlines()
for line in res.stdout.readlines():
symbol = line.split('\t')[0]
ret_list.append(symbol.strip())
return ret_list
def create_stock_list(stock_list,stock_region='US'):
"""
将符合条件的股票列表导出
"""
filename = "/home/hua.fu/geniustrader/Scripts/stock_%s_list" % (stock_region)
stock_FILE = open(filename,"w")
for symbol in stock_list:
stock_FILE.writelines(symbol+ "\n")
stock_FILE.close()
def filter_by_indicator(stock_list):
"""
根据技术指标筛选短期强于大盘的股票
"""
if action == "long":
filter_signal_file = ['buy_filter_new_high']
if action == "short":
filter_signal_file = ['sell_filter_new_low']
for sig_file in filter_signal_file:
stock_list = scan_stock(conf_dir,stock_list,sig_file)
if DEBUG: print "DEBUG: After Scan = %s" % len(stock_list)
return stock_list
def update_filter_stockdb(stock_db,data,country):
"""
将符合条件的股票保存到后续观察票池中
"""
new_list = []
(pool_db_cursor,pool_cx) = connect_pool_db(stock_db)
pool_cx.text_factory=str
lastupdate_time = int(time.time())
for symbol in data:
try:
sql_cmd = 'insert into stock values(NULL,"%s","%s",%s,%s)' % (symbol,country,lastupdate_time,lastupdate_time)
pool_db_cursor.execute(sql_cmd)
new_list.append(symbol)
except sqlite.IntegrityError,e:
sql_cmd = "update stock set lastupdate_time = '%s' where symbol='%s'" % (lastupdate_time,symbol)
pool_db_cursor.execute(sql_cmd)
except Exception as inst:
print "exception type = %s,Error = %s" % (type(inst),inst)
pool_cx.commit()
def get_buy_point(stock_db,buy_signal,country):
"""
扫描票池数据库,找到日线图买点
"""
(db_cursor,pool_cx) = connect_pool_db(stock_db)
pool_cx.text_factory=str
# 获取列表
sql = "select symbol from stock where country='%s'" % (country)
db_cursor.execute(sql)
rs = db_cursor.fetchall()
if len(rs) == 0 : return
stock_list = []
for item in rs:
symbol = item[0]
stock_list.append(symbol)
pool_cx.close()
# 扫描列表
scan_list = scan_stock(conf_dir,stock_list,buy_signal)
if DEBUG: print "DEBUG: Buy Point Signal = %s ,After Scan = %s" % (buy_signal,len(scan_list))
return scan_list
def compare_to_spy(wating_stock_list,peroid_offet_list,country):
"""
根据个股与大盘对比,选取强于大盘的个股
"""
cache_file = "%s/tmp/filter_%s" % (base_dir,country)
if not os.path.isfile(cache_file) or (int(time.time()) - int(os.stat(cache_file).st_mtime) >= 86000):
for peroid in peroid_offet_list:
(globals()['day_ago'],globals()['sma_day'],timeframe) = peroid
if DEBUG: print "DEBUG = Before filter count = %s" % len(wating_stock_list)
(long_stock_data,short_data) = calc_stock(wating_stock_list,country,timeframe)
(stock_list,pool_data) = sort_stock(long_stock_data)
if DEBUG: print "DEBUG = After filter count = %s" % len(stock_list)
wating_stock_list = stock_list
fout = open(cache_file, "w")
pickle.dump(stock_list, fout, protocol=0)
fout.close()
return stock_list
elif os.path.isfile(cache_file):
fin = open(cache_file, "r")
data = pickle.load(fin)
fin.close()
return data
def sendmail(msg):
""" send mail function """
SERVER = 'localhost'
FROM = '[email protected]'
TO = ['[email protected]']
SUBJECT = 'Daily Stock Notify Report'
# Prepare actual message
message = """From: %s \nTo: %s\nSubject: %s \n
%s """ % (FROM, ", ".join(TO), SUBJECT, msg)
# Send the mail
try:
#server = smtplib.SMTP(host=SERVER,timeout=5)
server = smtplib.SMTP(host=SERVER)
server.sendmail(FROM, TO, message)
server.quit()
except Exception,e:
print 'Unable to send email ErrorMsg=%s' % e
def usage():
print '''
Usage: create_graph.py [options...]
Options:
-s/--action : long or short
-r/--region : the special region of stock [CHINA|US|HK|TRACK]
-e/--endday : scan stock endday [2011-10-1],default is today
-h/--help : this help info page
-d/--debug : run in debug mode
Example:
# default is checking all stock which in monitor db
monitor_stock.py
# debug special stock
monitor_stock.py -s ras
# setting stock support line and resistance_line
monitor_stock.py -s ras -l 2.44,2.48
# setting stock channel,maybe uptrend or downtrend
monitor_stock.py -s ras -c 2010-07-01,2010-07-02,2010-07-03
'''
def main():
""" main function """
#各个变量保存
scan_signal_file = "signal_file"
stock_region =""
timeframe ="day"
endday = "today"
#global clientpool,action
global DEBUG
DEBUG = False
global base_index
base_index = {}
base_index['CHINA'] = ["上证指数","000001.SS"]
base_index['US'] = ["标普500","^GSPC"]
global base_dir,action,img_out_dir,script_dir
base_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
img_out_dir = "%s/img_out" % (base_dir)
script_dir = "%s/GeniusTrader/Scripts" % (base_dir)
stock_db = "%s/db/stock_db" % (base_dir)
# 票池
stock_pool = "%s/db/stock_pool" % (base_dir)
# 筛选周期
global day_ago,sma_day,conf_dir
(day_ago,sma_day) = (30,200)
peroid_offet_list = [(30,200,'day'),(30,50,'day')]
conf_dir = "%s/conf" % (base_dir)
template_file = "%s/template/stock_template.html" % (base_dir)
db_cursor = None
cx = None
(db_cursor,cx) = connect_db(stock_db)
cx.text_factory=str
try:
opts, args = getopt.getopt(sys.argv[1:],'dhs:r:e:')
except getopt.GetoptError:
usage()
sys.exit()
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
sys.exit()
elif opt == '-s':
scan_signal_file = arg
elif opt == '-e':
endday = arg
elif opt == '-d':
DEBUG = True
elif opt == '-r':
stock_region = arg
stock_region = stock_region.upper()
export_stock_symbol(db_cursor,cx)
if not scan_signal_file:
print "please setting long or short using -s"
sys.exit()
if scan_signal_file not in ['long','short']:
print "Input Action Not Correct,Please Check"
sys.exit()
action = scan_signal_file
region = []
if stock_region:
if stock_region not in ['CHINA','US','HK']:
print "Input Region Not Correct,Please Check"
sys.exit()
region.append(stock_region)
else:
region = ['CHINA','US','HK']
os.system("rm -fr /home/hua.fu/geniustrader/output/*")
for country in region:
#stock_list=['A','FSLR']
#create_graph(stock_list,template_file,conf_dir,country,"TESsig_file",endday)
#sys.exit(0)
# 根据大盘过滤出强于大盘的个股
#wating_stock_list = get_stock_list(db_cursor,cx,country)
#stock_list = compare_to_spy(wating_stock_list,peroid_offet_list,country)
#stock_list = stock_list[:50]
#create_graph(stock_list,template_file,conf_dir,country,"all",endday)
#sys.exit(0)
# 根据月线过滤kdj再底部cross的
wating_stock_list = get_stock_list(db_cursor,cx,country)
stock_list = wating_stock_list
data = filter_by_indicator(stock_list)
create_graph(data,template_file,conf_dir,country,"all",endday)
sys.exit(0)
# 更新到票池数据库中
update_filter_stockdb(stock_pool,data,country)
# 跟踪扫描日线,找到买点
filter_signal_file = ['buy_point_signal_one','buy_point_signal_two','buy_point_signal_three']
for sig_file in filter_signal_file:
stock_list = get_buy_point(stock_pool,sig_file,country)
# 画图
if len(stock_list) > 0:
create_graph(stock_list,template_file,conf_dir,country,sig_file,endday)
#create_stock_list(stock_list,country)
#sys.exit(0)
if __name__ == "__main__":
main()
#sys.exit(0)
#stocklist = ['A','BAC','FSLR']
#filter_list = scan_stock("/home/hua.fu/it-manager/stock_tech/conf",stocklist,"buy_signal_kdj_cross_month","US")
#print filter_list
#print get_indicator_output('000001.SS',30,30,'day')
#print get_indicator_output('^GSPC',30,30,'week')
#print get_indicator_output('^GSPC',30,30,'month')
#result_data = []
#stocklist = ['A','BAC','FSLR']
#t = ind_runs(-10,result_data,stocklist)
#t.start()
#threading.Thread.join(t)
#print result_data
| lgpl-3.0 | 1,135,593,402,020,305,300 | 31.75 | 152 | 0.548466 | false | 3.197936 | false | false | false |
saulshanabrook/django-dumper | test/models.py | 1 | 2381 | from django.db import models
from django.core.urlresolvers import reverse
from django.contrib.contenttypes.models import ContentType
try: # new import added in Django 1.7
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.fields import GenericRelation
except ImportError:
from django.contrib.contenttypes import generic
GenericForeignKey = generic.GenericForeignKey
GenericRelation = generic.GenericRelation
import dumper
class LoggingModel(models.Model):
text = models.CharField(max_length=200)
def __unicode__(self):
return self.text
class SimpleModel(models.Model):
slug = models.CharField(max_length=200, default='slug')
def get_absolute_url(self):
return reverse('simple-detail', kwargs={'slug': self.slug})
def dependent_paths(self):
yield self.get_absolute_url()
for model in self.related_set.all():
yield model.get_absolute_url()
class RelatedModel(models.Model):
slug = models.CharField(max_length=200, default='slug')
related = models.ManyToManyField(SimpleModel, related_name='related_set')
def dependent_paths(self):
yield self.get_absolute_url()
def get_absolute_url(self):
return reverse('related-detail', kwargs={'slug': self.slug})
class GenericRelationModel(models.Model):
slug = models.CharField(max_length=200, default='slug')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def dependent_paths(self):
yield self.content_object.get_absolute_url()
class RelatedToGenericModel(models.Model):
slug = models.CharField(max_length=200, default='slug')
generic_related = GenericRelation(GenericRelationModel)
def get_absolute_url(self):
return reverse('related-to-generic-detail', kwargs={'slug': self.slug})
class GenericRelationNotRegisteredModel(models.Model):
slug = models.CharField(max_length=200, default='slug')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def dependent_paths(self):
pass
dumper.register(SimpleModel)
dumper.register(RelatedModel)
dumper.register(GenericRelationModel)
| mit | 4,475,345,521,856,068,000 | 30.328947 | 79 | 0.730365 | false | 4.112263 | false | false | false |
odoousers2014/odoo-addons-supplier_price | stock_working_days/__openerp__.py | 1 | 2769 | # -*- coding: utf8 -*-
#
# Copyright (C) 2014 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
{
'name': 'Stock Move Scheduling on Working Days',
'version': '0.1',
'author': 'NDP Systèmes',
'maintainer': 'NDP Systèmes',
'category': 'Warehouse',
'depends': ['resource','procurement','stock'],
'description': """
Stock Move Scheduling on Working Days
=====================================
This modules enables scheduling of procurements on working days defined by resources and associated calendars.
Each warehouse can have its own resource and associated calendar representing its opening days. If a warehouse is not
given a resource, then the system falls back to a company default calendar.
When a procurement needs to be scheduled by stock move, it counts only opened days defined by the
applicable calendar.
This module also enables that procurement rules of type "move" be triggered only on fixed days of the week. This is
particularly useful for scheduling chained moves where some activities are only performed on certain days (e.g.
transport between 2 warehouses only done twice a week on mondays and thursdays).
Notes:
------
- When no applicable calendar is found, the module's default calendar is used which sets working days from Monday to
Friday. This default calendar can be changed by authorized users.
- For a given procurement, the applicable warehouse is the warehouse of the location of the procurement. It falls back
on the warehouse of the procurement itself only if no location is defined. This is to handle correctly
inter-warehouse procurement with chained moves where the warehouse of the procurement is the warehouse of the end of
the chain.
""",
'website': 'http://www.ndp-systemes.fr',
'data': [
'security/ir.model.access.csv',
'stock_working_days_data.xml',
'stock_working_days_view.xml',
],
'demo': [
'stock_working_days_demo.xml',
],
'test': [],
'installable': True,
'auto_install': False,
'license': 'AGPL-3',
'application': False,
}
| agpl-3.0 | 5,371,611,125,826,309,000 | 40.909091 | 118 | 0.710412 | false | 4.073638 | false | false | false |
cmap/cmapPy | cmapPy/pandasGEXpress/GCToo.py | 1 | 12504 | """
DATA:
-----------------------------
| | cid |
-----------------------------
| | |
|r | |
|i | data |
|d | |
| | |
-----------------------------
ROW METADATA:
--------------------------
|id| rhd |
--------------------------
| | |
|r | |
|i | row_metadata |
|d | |
| | |
--------------------------
COLUMN METADATA:
N.B. The df is transposed from how it looks in a gct file.
---------------------
|id| chd |
---------------------
| | |
| | |
| | |
|c | |
|i | col_metadata |
|d | |
| | |
| | |
| | |
---------------------
N.B. rids, cids, rhds, and chds must be:
- unique
- matching in both content & order everywhere they're found
"""
import numpy as np
import pandas as pd
import logging
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger
__authors__ = 'Oana Enache, Lev Litichevskiy, Dave Lahr'
__email__ = '[email protected]'
class GCToo(object):
"""Class representing parsed gct(x) objects as pandas dataframes.
Contains 3 component dataframes (row_metadata_df, column_metadata_df,
and data_df) as well as an assembly of these 3 into a multi index df
that provides an alternate way of selecting data.
"""
def __init__(self, data_df, row_metadata_df=None, col_metadata_df=None,
src=None, version=None, make_multiindex=False, logger_name=setup_logger.LOGGER_NAME):
self.logger = logging.getLogger(logger_name)
self.src = src
self.version = version
# Check data_df before setting
self.check_df(data_df)
self.data_df = data_df
if row_metadata_df is None:
self.row_metadata_df = pd.DataFrame(index=data_df.index)
else:
# Lots of checks will occur when this attribute is set (see __setattr__ below)
self.row_metadata_df = row_metadata_df
if col_metadata_df is None:
self.col_metadata_df = pd.DataFrame(index=data_df.columns)
else:
# Lots of checks will occur when this attribute is set (see __setattr__ below)
self.col_metadata_df = col_metadata_df
# Create multi_index_df if explicitly requested
if make_multiindex:
self.assemble_multi_index_df()
else:
self.multi_index_df = None
# This GCToo object is now initialized
self._initialized = True
def __setattr__(self, name, value):
# Make sure row/col metadata agree with data_df before setting
if name in ["row_metadata_df", "col_metadata_df"]:
self.check_df(value)
if name == "row_metadata_df":
self.id_match_check(self.data_df, value, "row")
value = value.reindex(self.data_df.index)
super(GCToo, self).__setattr__(name, value)
else:
self.id_match_check(self.data_df, value, "col")
value = value.reindex(self.data_df.columns)
super(GCToo, self).__setattr__(name, value)
# When reassigning data_df after initialization, reindex row/col metadata if necessary
# N.B. Need to check if _initialized is present before checking if it's true, or code will break
elif name == "data_df" and "_initialized" in self.__dict__ and self._initialized:
self.id_match_check(value, self.row_metadata_df, "row")
self.id_match_check(value, self.col_metadata_df, "col")
super(GCToo, self).__setattr__("row_metadata_df", self.row_metadata_df.reindex(value.index))
super(GCToo, self).__setattr__("col_metadata_df", self.col_metadata_df.reindex(value.columns))
super(GCToo, self).__setattr__(name, value)
# Can't reassign multi_index_df after initialization
elif name == "multi_index_df" and "_initialized" in self.__dict__ and self._initialized:
msg = ("Cannot reassign value of multi_index_df attribute; " +
"if you'd like a new multiindex df, please create a new GCToo instance" +
"with appropriate data_df, row_metadata_df, and col_metadata_df fields.")
self.logger.error(msg)
raise Exception("GCToo.__setattr__: " + msg)
# Otherwise, use the normal __setattr__ method
else:
super(GCToo, self).__setattr__(name, value)
def check_df(self, df):
"""
Verifies that df is a pandas DataFrame instance and
that its index and column values are unique.
"""
if isinstance(df, pd.DataFrame):
if not df.index.is_unique:
repeats = df.index[df.index.duplicated()].values
msg = "Index values must be unique but aren't. The following entries appear more than once: {}".format(repeats)
self.logger.error(msg)
raise Exception("GCToo GCToo.check_df " + msg)
if not df.columns.is_unique:
repeats = df.columns[df.columns.duplicated()].values
msg = "Columns values must be unique but aren't. The following entries appear more than once: {}".format(repeats)
raise Exception("GCToo GCToo.check_df " + msg)
else:
return True
else:
msg = "expected Pandas DataFrame, got something else: {} of type: {}".format(df, type(df))
self.logger.error(msg)
raise Exception("GCToo GCToo.check_df " + msg)
def id_match_check(self, data_df, meta_df, dim):
"""
Verifies that id values match between:
- row case: index of data_df & index of row metadata
- col case: columns of data_df & index of column metadata
"""
if dim == "row":
if len(data_df.index) == len(meta_df.index) and set(data_df.index) == set(meta_df.index):
return True
else:
msg = ("The rids are inconsistent between data_df and row_metadata_df.\n" +
"data_df.index.values:\n{}\nrow_metadata_df.index.values:\n{}").format(data_df.index.values, meta_df.index.values)
self.logger.error(msg)
raise Exception("GCToo GCToo.id_match_check " + msg)
elif dim == "col":
if len(data_df.columns) == len(meta_df.index) and set(data_df.columns) == set(meta_df.index):
return True
else:
msg = ("The cids are inconsistent between data_df and col_metadata_df.\n" +
"data_df.columns.values:\n{}\ncol_metadata_df.index.values:\n{}").format(data_df.columns.values, meta_df.index.values)
self.logger.error(msg)
raise Exception("GCToo GCToo.id_match_check " + msg)
def __str__(self):
"""Prints a string representation of a GCToo object."""
version = "{}\n".format(self.version)
source = "src: {}\n".format(self.src)
data = "data_df: [{} rows x {} columns]\n".format(
self.data_df.shape[0], self.data_df.shape[1])
row_meta = "row_metadata_df: [{} rows x {} columns]\n".format(
self.row_metadata_df.shape[0], self.row_metadata_df.shape[1])
col_meta = "col_metadata_df: [{} rows x {} columns]".format(
self.col_metadata_df.shape[0], self.col_metadata_df.shape[1])
full_string = (version + source + data + row_meta + col_meta)
return full_string
def assemble_multi_index_df(self):
"""Assembles three component dataframes into a multiindex dataframe.
Sets the result to self.multi_index_df.
IMPORTANT: Cross-section ("xs") is the best command for selecting
data. Be sure to use the flag "drop_level=False" with this command,
or else the dataframe that is returned will not have the same
metadata as the input.
N.B. "level" means metadata header.
N.B. "axis=1" indicates column annotations.
Examples:
1) Select the probe with pr_lua_id="LUA-3404":
lua3404_df = multi_index_df.xs("LUA-3404", level="pr_lua_id", drop_level=False)
2) Select all DMSO samples:
DMSO_df = multi_index_df.xs("DMSO", level="pert_iname", axis=1, drop_level=False)
"""
#prepare row index
self.logger.debug("Row metadata shape: {}".format(self.row_metadata_df.shape))
self.logger.debug("Is empty? {}".format(self.row_metadata_df.empty))
row_copy = pd.DataFrame(self.row_metadata_df.index) if self.row_metadata_df.empty else self.row_metadata_df.copy()
row_copy["rid"] = row_copy.index
row_index = pd.MultiIndex.from_arrays(row_copy.T.values, names=row_copy.columns)
#prepare column index
self.logger.debug("Col metadata shape: {}".format(self.col_metadata_df.shape))
col_copy = pd.DataFrame(self.col_metadata_df.index) if self.col_metadata_df.empty else self.col_metadata_df.copy()
col_copy["cid"] = col_copy.index
transposed_col_metadata = col_copy.T
col_index = pd.MultiIndex.from_arrays(transposed_col_metadata.values, names=transposed_col_metadata.index)
# Create multi index dataframe using the values of data_df and the indexes created above
self.logger.debug("Data df shape: {}".format(self.data_df.shape))
self.multi_index_df = pd.DataFrame(data=self.data_df.values, index=row_index, columns=col_index)
def multi_index_df_to_component_dfs(multi_index_df, rid="rid", cid="cid"):
""" Convert a multi-index df into 3 component dfs. """
# Id level of the multiindex will become the index
rids = list(multi_index_df.index.get_level_values(rid))
cids = list(multi_index_df.columns.get_level_values(cid))
# It's possible that the index and/or columns of multi_index_df are not
# actually multi-index; need to check for this and there are more than one level in index(python3)
if isinstance(multi_index_df.index, pd.MultiIndex):
# check if there are more than one levels in index (python3)
if len(multi_index_df.index.names) > 1:
# If so, drop rid because it won't go into the body of the metadata
mi_df_index = multi_index_df.index.droplevel(rid)
# Names of the multiindex levels become the headers
rhds = list(mi_df_index.names)
# Assemble metadata values
row_metadata = np.array([mi_df_index.get_level_values(level).values for level in list(rhds)]).T
# if there is one level in index (python3), then rhds and row metadata should be empty
else:
rhds = []
row_metadata = []
# If the index is not multi-index, then rhds and row metadata should be empty
else:
rhds = []
row_metadata = []
# Check if columns of multi_index_df are in fact multi-index
if isinstance(multi_index_df.columns, pd.MultiIndex):
# Check if there are more than one levels in columns(python3)
if len(multi_index_df.columns.names) > 1:
# If so, drop cid because it won't go into the body of the metadata
mi_df_columns = multi_index_df.columns.droplevel(cid)
# Names of the multiindex levels become the headers
chds = list(mi_df_columns.names)
# Assemble metadata values
col_metadata = np.array([mi_df_columns.get_level_values(level).values for level in list(chds)]).T
# If there is one level in columns (python3), then rhds and row metadata should be empty
else:
chds = []
col_metadata = []
# If the columns are not multi-index, then rhds and row metadata should be empty
else:
chds = []
col_metadata = []
# Create component dfs
row_metadata_df = pd.DataFrame.from_records(row_metadata, index=pd.Index(rids, name="rid"), columns=pd.Index(rhds, name="rhd"))
col_metadata_df = pd.DataFrame.from_records(col_metadata, index=pd.Index(cids, name="cid"), columns=pd.Index(chds, name="chd"))
data_df = pd.DataFrame(multi_index_df.values, index=pd.Index(rids, name="rid"), columns=pd.Index(cids, name="cid"))
return data_df, row_metadata_df, col_metadata_df
| bsd-3-clause | -3,186,919,485,496,119,300 | 43.028169 | 135 | 0.583333 | false | 3.710386 | false | false | false |
arenadata/ambari | ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/kafka_upgrade.py | 2 | 1531 |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management import *
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions import Direction
from resource_management.libraries.functions.format import format
from resource_management.libraries.script.script import Script
class KafkaUpgrade(Script):
def copy_kerberos_param(self,env):
import params
kafka_run_path = "/usr/iop/4.1.0.0/kafka/bin/kafka-run-class.sh"
if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:
Execute(("sed", "-i", "s/\$CLASSPATH \$KAFKA_OPTS/\$CLASSPATH \$KAFKA_OPTS \$KAFKA_KERBEROS_PARAMS/", kafka_run_path), logoutput=True)
if __name__ == "__main__":
KafkaUpgrade().execute()
| apache-2.0 | -4,120,357,280,770,025,500 | 39.289474 | 140 | 0.772044 | false | 3.895674 | false | false | false |
akretion/odoo | addons/website_sale_stock/models/sale_order.py | 17 | 2582 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, api, fields
from odoo.tools.translate import _
class SaleOrder(models.Model):
_inherit = 'sale.order'
warning_stock = fields.Char('Warning')
def _cart_update(self, product_id=None, line_id=None, add_qty=0, set_qty=0, **kwargs):
values = super(SaleOrder, self)._cart_update(product_id, line_id, add_qty, set_qty, **kwargs)
line_id = values.get('line_id')
for line in self.order_line:
if line.product_id.type == 'product' and line.product_id.inventory_availability in ['always', 'threshold']:
cart_qty = sum(self.order_line.filtered(lambda p: p.product_id.id == line.product_id.id).mapped('product_uom_qty'))
if cart_qty > line.product_id.virtual_available and (line_id == line.id):
qty = line.product_id.virtual_available - cart_qty
new_val = super(SaleOrder, self)._cart_update(line.product_id.id, line.id, qty, 0, **kwargs)
values.update(new_val)
# Make sure line still exists, it may have been deleted in super()_cartupdate because qty can be <= 0
if line.exists() and new_val['quantity']:
line.warning_stock = _('You ask for %s products but only %s is available') % (cart_qty, new_val['quantity'])
values['warning'] = line.warning_stock
else:
self.warning_stock = _("Some products became unavailable and your cart has been updated. We're sorry for the inconvenience.")
values['warning'] = self.warning_stock
return values
@api.multi
def _website_product_id_change(self, order_id, product_id, qty=0):
res = super(SaleOrder, self)._website_product_id_change(order_id, product_id, qty=qty)
product = self.env['product.product'].browse(product_id)
res['customer_lead'] = product.sale_delay
return res
@api.multi
def _get_stock_warning(self, clear=True):
self.ensure_one()
warn = self.warning_stock
if clear:
self.warning_stock = ''
return warn
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
warning_stock = fields.Char('Warning')
@api.multi
def _get_stock_warning(self, clear=True):
self.ensure_one()
warn = self.warning_stock
if clear:
self.warning_stock = ''
return warn
| agpl-3.0 | 7,518,949,106,520,849,000 | 41.327869 | 149 | 0.599923 | false | 3.797059 | false | false | false |
qedsoftware/commcare-hq | corehq/apps/products/models.py | 1 | 10523 | from datetime import datetime
from decimal import Decimal
import itertools
import jsonfield
from django.db import models
from django.utils.translation import ugettext as _
from couchdbkit.exceptions import ResourceNotFound
from dimagi.ext.couchdbkit import (
Document,
StringProperty,
DecimalProperty,
DictProperty,
BooleanProperty,
DateTimeProperty,
)
from dimagi.utils.couch.database import iter_docs
# move these too
from corehq.apps.commtrack.exceptions import InvalidProductException, DuplicateProductCodeException
class Product(Document):
"""
A product, e.g. "coartem" or "tylenol"
"""
domain = StringProperty()
name = StringProperty()
unit = StringProperty()
code_ = StringProperty() # todo: why the hell is this code_ and not code
description = StringProperty()
category = StringProperty()
program_id = StringProperty()
cost = DecimalProperty()
product_data = DictProperty()
is_archived = BooleanProperty(default=False)
last_modified = DateTimeProperty()
@classmethod
def wrap(cls, data):
from corehq.apps.groups.models import dt_no_Z_re
# If "Z" is missing because of the Aug 2014 migration, then add it.
# cf. Group class
last_modified = data.get('last_modified')
if last_modified and dt_no_Z_re.match(last_modified):
data['last_modified'] += 'Z'
return super(Product, cls).wrap(data)
@classmethod
def save_docs(cls, docs, use_uuids=True, all_or_nothing=False, codes_by_domain=None):
from corehq.apps.commtrack.util import generate_code
codes_by_domain = codes_by_domain or {}
def get_codes(domain):
if domain not in codes_by_domain:
codes_by_domain[domain] = SQLProduct.objects.filter(domain=domain)\
.values_list('code', flat=True).distinct()
return codes_by_domain[domain]
for doc in docs:
doc.last_modified = datetime.utcnow()
if not doc['code_']:
doc['code_'] = generate_code(
doc['name'],
get_codes(doc['domain'])
)
super(Product, cls).save_docs(docs, use_uuids, all_or_nothing)
bulk_save = save_docs
def sync_to_sql(self):
properties_to_sync = [
('product_id', '_id'),
'domain',
'name',
'is_archived',
('code', 'code_'),
'description',
'category',
'program_id',
'cost',
('units', 'unit'),
'product_data',
]
# sync properties to SQL version
sql_product, _ = SQLProduct.objects.get_or_create(
product_id=self._id
)
for prop in properties_to_sync:
if isinstance(prop, tuple):
sql_prop, couch_prop = prop
else:
sql_prop = couch_prop = prop
if hasattr(self, couch_prop):
setattr(sql_product, sql_prop, getattr(self, couch_prop))
sql_product.save()
def save(self, *args, **kwargs):
"""
Saving a couch version of Product will trigger
one way syncing to the SQLProduct version of this
product.
"""
# mark modified time stamp for selective syncing
self.last_modified = datetime.utcnow()
# generate code if user didn't specify one
if not self.code:
from corehq.apps.commtrack.util import generate_code
self.code = generate_code(
self.name,
SQLProduct.objects
.filter(domain=self.domain)
.values_list('code', flat=True)
.distinct()
)
result = super(Product, self).save(*args, **kwargs)
self.sync_to_sql()
return result
@property
def code(self):
return self.code_
@code.setter
def code(self, val):
self.code_ = val.lower() if val else None
@classmethod
def get_by_code(cls, domain, code):
if not code:
return None
try:
sql_product = SQLProduct.objects.get(domain=domain, code__iexact=code)
except SQLProduct.DoesNotExist:
return None
else:
return cls.get(sql_product.product_id)
@classmethod
def by_domain(cls, domain, wrap=True, include_archived=False):
queryset = SQLProduct.objects.filter(domain=domain)
if not include_archived:
queryset = queryset.filter(is_archived=False)
return list(queryset.couch_products(wrapped=wrap))
@classmethod
def ids_by_domain(cls, domain):
return list(SQLProduct.objects.filter(domain=domain).product_ids())
@classmethod
def count_by_domain(cls, domain):
"""
Gets count of products in a domain
"""
# todo: we should add a reduce so we can get this out of couch
return len(cls.ids_by_domain(domain))
@classmethod
def _export_attrs(cls):
return [
('name', unicode),
('unit', unicode),
'description',
'category',
('program_id', str),
('cost', lambda a: Decimal(a) if a else None),
]
def to_dict(self):
from corehq.apps.commtrack.util import encode_if_needed
product_dict = {}
product_dict['id'] = self._id
product_dict['product_id'] = self.code_
for attr in self._export_attrs():
real_attr = attr[0] if isinstance(attr, tuple) else attr
product_dict[real_attr] = encode_if_needed(
getattr(self, real_attr)
)
return product_dict
def custom_property_dict(self):
from corehq.apps.commtrack.util import encode_if_needed
property_dict = {}
for prop, val in self.product_data.iteritems():
property_dict['data: ' + prop] = encode_if_needed(val)
return property_dict
def archive(self):
"""
Mark a product as archived. This will cause it (and its data)
to not show up in default Couch and SQL views.
"""
self.is_archived = True
self.save()
def unarchive(self):
"""
Unarchive a product, causing it (and its data) to show
up in Couch and SQL views again.
"""
if self.code:
if SQLProduct.objects.filter(domain=self.domain, code=self.code, is_archived=False).exists():
raise DuplicateProductCodeException()
self.is_archived = False
self.save()
@classmethod
def from_excel(cls, row, custom_data_validator):
if not row:
return None
id = row.get('id')
if id:
try:
p = cls.get(id)
except ResourceNotFound:
raise InvalidProductException(
_("Product with ID '{product_id}' could not be found!").format(product_id=id)
)
else:
p = cls()
p.code = str(row.get('product_id') or '')
for attr in cls._export_attrs():
key = attr[0] if isinstance(attr, tuple) else attr
if key in row:
val = row[key]
if val is None:
val = ''
if isinstance(attr, tuple):
val = attr[1](val)
setattr(p, key, val)
else:
break
if not p.code:
raise InvalidProductException(_('Product ID is a required field and cannot be blank!'))
if not p.name:
raise InvalidProductException(_('Product name is a required field and cannot be blank!'))
custom_data = row.get('data', {})
error = custom_data_validator(custom_data)
if error:
raise InvalidProductException(error)
p.product_data = custom_data
p.product_data.update(row.get('uncategorized_data', {}))
return p
class ProductQueriesMixin(object):
def product_ids(self):
return self.values_list('product_id', flat=True)
def couch_products(self, wrapped=True):
"""
Returns the couch products corresponding to this queryset.
"""
ids = self.product_ids()
products = iter_docs(Product.get_db(), ids)
if wrapped:
return itertools.imap(Product.wrap, products)
return products
class ProductQuerySet(ProductQueriesMixin, models.query.QuerySet):
pass
class ProductManager(ProductQueriesMixin, models.Manager):
def get_queryset(self):
return ProductQuerySet(self.model, using=self._db)
class OnlyActiveProductManager(ProductManager):
def get_queryset(self):
return super(OnlyActiveProductManager, self).get_queryset().filter(is_archived=False)
class SQLProduct(models.Model):
"""
A SQL based clone of couch Products.
This is used to efficiently filter StockState and other
SQL based queries to exclude data for archived products.
"""
domain = models.CharField(max_length=255, db_index=True)
product_id = models.CharField(max_length=100, db_index=True, unique=True)
name = models.CharField(max_length=100, null=True)
is_archived = models.BooleanField(default=False)
code = models.CharField(max_length=100, default='', null=True)
description = models.TextField(null=True, default='')
category = models.CharField(max_length=100, null=True, default='')
program_id = models.CharField(max_length=100, null=True, default='')
cost = models.DecimalField(max_digits=20, decimal_places=5, null=True)
units = models.CharField(max_length=100, null=True, default='')
product_data = jsonfield.JSONField(
default=dict,
)
created_at = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
objects = ProductManager()
active_objects = OnlyActiveProductManager()
def __unicode__(self):
return u"{} ({})".format(self.name, self.domain)
def __repr__(self):
return "<SQLProduct(domain=%s, name=%s)>" % (
self.domain,
self.name
)
@classmethod
def by_domain(cls, domain):
return cls.objects.filter(domain=domain).all()
@property
def get_id(self):
return self.product_id
class Meta:
app_label = 'products'
| bsd-3-clause | -8,021,989,213,573,564,000 | 29.501449 | 105 | 0.588235 | false | 4.123433 | false | false | false |
antoinecarme/pyaf | tests/codegen/test_air_passengers_code_gen.py | 1 | 1297 | import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
import pyaf.CodeGen.TS_CodeGenerator as tscodegen
b1 = tsds.load_airline_passengers()
df = b1.mPastData
df.head()
H = b1.mHorizon;
N = df.shape[0];
for n in range(2*H, N , 10):
df1 = df.head(n).copy();
lEngine = autof.cForecastEngine()
lEngine
lEngine.mOptions.mEnableARModels = False;
# lEngine.mOptions.mDebugCycles = False;
lEngine.train(df1 , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
dfapp_in = df1.copy();
dfapp_in.tail()
# H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
lForecastColumnName = b1.mSignalVar + '_Forecast'
Forecast_DF = dfapp_out;
# [[b1.mTimeVar , b1.mSignalVar, lForecastColumnName , lForecastColumnName + '_Lower_Bound', lForecastColumnName + '_Upper_Bound' ]]
print(Forecast_DF.info())
# print("Forecasts_HEAD\n" , Forecast_DF.head(2*H).values);
# print("Forecasts_TAIL\n" , Forecast_DF.tail(2*H).values);
lCodeGenerator = tscodegen.cTimeSeriesCodeGenerator();
lSQL = lCodeGenerator.testGeneration(lEngine);
| bsd-3-clause | 8,247,578,394,566,682,000 | 28.477273 | 137 | 0.692367 | false | 2.783262 | false | false | false |
mateoqac/unqTip | gui/views/qDesigner/boardOptionWindow.py | 4 | 3806 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'boardOptions.ui'
#
# Created: Fri Oct 4 12:49:10 2013
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.setWindowModality(QtCore.Qt.WindowModal)
Dialog.resize(600, 400)
#Dialog.setMaximumSize(QtCore.QSize(400, 400))
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
#self.buttonBox.setGeometry(QtCore.QRect(60, 260, 251, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.widget = QtGui.QWidget(Dialog)
#self.widget.setGeometry(QtCore.QRect(50, 30, 300, 300))
self.widget.setObjectName(_fromUtf8("widget"))
self.verticalLayout = QtGui.QVBoxLayout(self.widget)
#self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(self.widget)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.comboBox = QtGui.QComboBox(self.widget)
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.horizontalLayout.addWidget(self.comboBox)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_2 = QtGui.QLabel(self.widget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout_2.addWidget(self.label_2)
self.comboBox_2 = QtGui.QComboBox(self.widget)
self.comboBox_2.setObjectName(_fromUtf8("comboBox_2"))
self.horizontalLayout_2.addWidget(self.comboBox_2)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.label_3 = QtGui.QLabel(self.widget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_3.addWidget(self.label_3)
self.comboBox_3 = QtGui.QComboBox(self.widget)
self.comboBox_3.setObjectName(_fromUtf8("comboBox_3"))
self.horizontalLayout_3.addWidget(self.comboBox_3)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.comboBox.setMinimumWidth(250)
self.comboBox_2.setMinimumWidth(250)
self.comboBox_3.setMinimumWidth(250)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Dialog", "Balls", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Dialog", "Size", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("Dialog", "Head", None, QtGui.QApplication.UnicodeUTF8)) | gpl-3.0 | -383,792,218,594,159,550 | 49.76 | 117 | 0.707042 | false | 3.939959 | false | false | false |
ASMlover/study | python/game-idea/main.py | 1 | 2866 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2015 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import trigger as tm
class Space(object):
def __init__(self):
pass
def monster_count(self):
return 0
class TriggerManager(object):
def __init__(self, space=None):
self.space = space
self.triggers = {}
self.activate_triggers = set()
def register(self, trigger_no, infos):
trigger_name = 'Trigger%d' % trigger_no
trigger_type = getattr(tm, trigger_name)
if trigger_type:
trigger = trigger_type(self.space, infos)
self.triggers[trigger_no] = trigger
if trigger.activatiable():
self.activate_triggers.add(trigger_no)
def unregister(self, trigger_no):
self.triggers.pop(trigger_no, None)
def on_event_notify(self, notify, *args):
completed_triggers = []
for trigger_no in self.activate_triggers:
trigger = self.triggers.get(trigger_no, None)
if not trigger:
continue
on_event = getattr(trigger, notify, None)
if on_event:
on_event(*args)
if trigger.is_completed():
completed_triggers.append(trigger_no)
[self.activate_triggers.discard(no) for no in completed_triggers]
if __name__ == '__main__':
space = Space()
trigger_mgr = TriggerManager(space)
trigger_mgr.register(1101, {'cond': 0, 'action': 'all monsters dead !!!'})
trigger_mgr.on_event_notify('on_monster_die')
| bsd-2-clause | -7,980,078,939,381,923,000 | 35.74359 | 78 | 0.675506 | false | 4.183942 | false | false | false |
Sispheor/piclodio3 | back/tests/test_views/test_alarm_clock_view/base.py | 1 | 1038 | from rest_framework.test import APITestCase
from restapi.models import WebRadio, AlarmClock
class Base(APITestCase):
def setUp(self):
super(Base, self).setUp()
self.test_webradio = WebRadio.objects.create(name="test", url="http://test.com")
self.test_webradio2 = WebRadio.objects.create(name="test2", url="http://test2.com")
self.test_alarm = AlarmClock.objects.create(name="alarm1",
monday=True,
hour=8,
minute=20,
webradio=self.test_webradio)
self.test_alarm2 = AlarmClock.objects.create(name="alarm2",
wednesday=True,
hour=8,
minute=20,
webradio=self.test_webradio2)
| mit | -5,475,881,533,277,622,000 | 48.428571 | 91 | 0.424855 | false | 5.1133 | true | false | false |
icoxfog417/sayuri-server | sayuri/rekognition/client.py | 1 | 1263 | import urllib.request
import urllib.parse
import json
class Client(object):
API_HOME = "https://rekognition.com/func/api/"
def __init__(self, api_key, api_secret, name_space="demo_project", user_id="demo_user"):
self.api_key = api_key
self.api_secret = api_secret
self.name_space = name_space
self.user_id = user_id
def face_recognize(self, image_url, **kwargs):
parameters = self.__make_initial_parameters()
jobs = "face_recognize"
for op in kwargs:
if kwargs[op]:
jobs += ("_" + op)
parameters.update({"jobs": jobs, "base64": image_url})
return self.__request(parameters)
def __make_initial_parameters(self):
return {
"api_key": self.api_key,
"api_secret": self.api_secret,
"name_space": self.name_space,
"user_id": self.user_id
}
@classmethod
def __request(cls, parameters):
p = urllib.parse.urlencode(parameters)
p = p.encode("utf-8")
request = urllib.request.Request(cls.API_HOME, p)
response = urllib.request.urlopen(request)
content = response.read()
obj = json.loads(content.decode("utf-8"))
return obj
| mit | -6,091,735,256,105,236,000 | 29.071429 | 92 | 0.577197 | false | 3.703812 | false | false | false |
springmeyer/gyp | test/mac/gyptest-archs.py | 4 | 3579 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests things related to ARCHS.
"""
import TestGyp
import TestMac
import re
import subprocess
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test-no-archs.gyp', chdir='archs')
test.build('test-no-archs.gyp', test.ALL, chdir='archs')
result_file = test.built_file_path('Test', chdir='archs')
test.must_exist(result_file)
if TestMac.Xcode.Version() >= '0500':
expected_type = ['x86_64']
else:
expected_type = ['i386']
TestMac.CheckFileType(test, result_file, expected_type)
test.run_gyp('test-valid-archs.gyp', chdir='archs')
test.build('test-valid-archs.gyp', test.ALL, chdir='archs')
result_file = test.built_file_path('Test', chdir='archs')
test.must_exist(result_file)
TestMac.CheckFileType(test, result_file, ['x86_64'])
test.run_gyp('test-archs-x86_64.gyp', chdir='archs')
test.build('test-archs-x86_64.gyp', test.ALL, chdir='archs')
result_file = test.built_file_path('Test64', chdir='archs')
test.must_exist(result_file)
TestMac.CheckFileType(test, result_file, ['x86_64'])
test.run_gyp('test-dependencies.gyp', chdir='archs')
test.build('test-dependencies.gyp', target=test.ALL, chdir='archs')
products = ['c_standalone', 'd_standalone']
for product in products:
result_file = test.built_file_path(
product, chdir='archs', type=test.STATIC_LIB)
test.must_exist(result_file)
if test.format != 'make':
# Build all targets except 'exe_32_64_no_sources' that does build
# but should not cause error when generating ninja files
targets = [
'static_32_64', 'shared_32_64', 'shared_32_64_bundle',
'module_32_64', 'module_32_64_bundle',
'exe_32_64', 'exe_32_64_bundle', 'precompiled_prefix_header_mm_32_64',
]
test.run_gyp('test-archs-multiarch.gyp', chdir='archs')
for target in targets:
test.build('test-archs-multiarch.gyp', target=target, chdir='archs')
result_file = test.built_file_path(
'static_32_64', chdir='archs', type=test.STATIC_LIB)
test.must_exist(result_file)
TestMac.CheckFileType(test, result_file, ['i386', 'x86_64'])
result_file = test.built_file_path(
'shared_32_64', chdir='archs', type=test.SHARED_LIB)
test.must_exist(result_file)
TestMac.CheckFileType(test, result_file, ['i386', 'x86_64'])
result_file = test.built_file_path('My Framework.framework/My Framework',
chdir='archs')
test.must_exist(result_file)
TestMac.CheckFileType(test, result_file, ['i386', 'x86_64'])
# Check that symbol "_x" made it into both versions of the binary:
if not all(['D _x' in subprocess.check_output(
['nm', '-arch', arch, result_file]) for arch in ['i386', 'x86_64']]):
# This can only flakily fail, due to process ordering issues. If this
# does fail flakily, then something's broken, it's not the test at fault.
test.fail_test()
result_file = test.built_file_path(
'exe_32_64', chdir='archs', type=test.EXECUTABLE)
test.must_exist(result_file)
TestMac.CheckFileType(test, result_file, ['i386', 'x86_64'])
result_file = test.built_file_path('Test App.app/Contents/MacOS/Test App',
chdir='archs')
test.must_exist(result_file)
TestMac.CheckFileType(test, result_file, ['i386', 'x86_64'])
| bsd-3-clause | -2,560,930,344,174,830,000 | 37.074468 | 79 | 0.656329 | false | 3.224324 | true | false | false |
Kkevsterrr/backdoorme | backdoors/shell/ruby.py | 1 | 1142 | from backdoors.backdoor import *
import time
class Ruby(Backdoor):
prompt = Fore.RED + "(rb) " + Fore.BLUE + ">> " + Fore.RESET
def __init__(self, core):
cmd.Cmd.__init__(self)
self.intro = GOOD + "Using Ruby module..."
self.core = core
self.options = {
"port" : Option("port", 53937, "port to connect to", True),
}
self.modules = {}
self.allow_modules = True
self.help_text = INFO + "Uses ruby to open a socket and redirect I/O to /bin/sh."
def get_command(self):
command = "echo " + self.core.curtarget.pword + " | sudo -S ruby -rsocket -e 'exit if fork;c=TCPSocket.new(\"" + self.core.localIP + "\",\"" + str(self.get_value("port")) + "\");while(cmd=c.gets);IO.popen(cmd,\"r\"){ |io| c.print io.read } end'"
print(command)
return command
def do_exploit(self, args):
print(GOOD + "Initializing backdoor...")
self.listen(prompt="none")
self.core.curtarget.ssh.exec_command(self.get_command())
print(GOOD + "Ruby backdoor on " + str(self.get_value("port")) + " attempted.")
for mod in self.modules.keys():
print(INFO + "Attempting to execute " + mod.name + " module...")
mod.exploit() | mit | -8,470,266,462,933,295,000 | 35.870968 | 247 | 0.631349 | false | 2.812808 | false | false | false |
DonJayamanne/pythonVSCode | pythonFiles/refactor.py | 1 | 11855 | # Arguments are:
# 1. Working directory.
# 2. Rope folder
import difflib
import io
import json
import os
import sys
import traceback
try:
import rope
from rope.base import libutils
from rope.refactor.rename import Rename
from rope.refactor.extract import ExtractMethod, ExtractVariable
import rope.base.project
import rope.base.taskhandle
except:
jsonMessage = {
"error": True,
"message": "Rope not installed",
"traceback": "",
"type": "ModuleNotFoundError",
}
sys.stderr.write(json.dumps(jsonMessage))
sys.stderr.flush()
WORKSPACE_ROOT = sys.argv[1]
ROPE_PROJECT_FOLDER = ".vscode/.ropeproject"
class RefactorProgress:
"""
Refactor progress information
"""
def __init__(self, name="Task Name", message=None, percent=0):
self.name = name
self.message = message
self.percent = percent
class ChangeType:
"""
Change Type Enum
"""
EDIT = 0
NEW = 1
DELETE = 2
class Change:
""""""
EDIT = 0
NEW = 1
DELETE = 2
def __init__(self, filePath, fileMode=ChangeType.EDIT, diff=""):
self.filePath = filePath
self.diff = diff
self.fileMode = fileMode
def get_diff(changeset):
"""This is a copy of the code form the ChangeSet.get_description method found in Rope."""
new = changeset.new_contents
old = changeset.old_contents
if old is None:
if changeset.resource.exists():
old = changeset.resource.read()
else:
old = ""
# Ensure code has a trailing empty lines, before generating a diff.
# https://github.com/Microsoft/vscode-python/issues/695.
old_lines = old.splitlines(True)
if not old_lines[-1].endswith("\n"):
old_lines[-1] = old_lines[-1] + os.linesep
new = new + os.linesep
result = difflib.unified_diff(
old_lines,
new.splitlines(True),
"a/" + changeset.resource.path,
"b/" + changeset.resource.path,
)
return "".join(list(result))
class BaseRefactoring(object):
"""
Base class for refactorings
"""
def __init__(self, project, resource, name="Refactor", progressCallback=None):
self._progressCallback = progressCallback
self._handle = rope.base.taskhandle.TaskHandle(name)
self._handle.add_observer(self._update_progress)
self.project = project
self.resource = resource
self.changes = []
def _update_progress(self):
jobset = self._handle.current_jobset()
if jobset and not self._progressCallback is None:
progress = RefactorProgress()
# getting current job set name
if jobset.get_name() is not None:
progress.name = jobset.get_name()
# getting active job name
if jobset.get_active_job_name() is not None:
progress.message = jobset.get_active_job_name()
# adding done percent
percent = jobset.get_percent_done()
if percent is not None:
progress.percent = percent
if not self._progressCallback is None:
self._progressCallback(progress)
def stop(self):
self._handle.stop()
def refactor(self):
try:
self.onRefactor()
except rope.base.exceptions.InterruptedTaskError:
# we can ignore this exception, as user has cancelled refactoring
pass
def onRefactor(self):
"""
To be implemented by each base class
"""
pass
class RenameRefactor(BaseRefactoring):
def __init__(
self,
project,
resource,
name="Rename",
progressCallback=None,
startOffset=None,
newName="new_Name",
):
BaseRefactoring.__init__(self, project, resource, name, progressCallback)
self._newName = newName
self.startOffset = startOffset
def onRefactor(self):
renamed = Rename(self.project, self.resource, self.startOffset)
changes = renamed.get_changes(self._newName, task_handle=self._handle)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, get_diff(item))
)
else:
raise Exception("Unknown Change")
class ExtractVariableRefactor(BaseRefactoring):
def __init__(
self,
project,
resource,
name="Extract Variable",
progressCallback=None,
startOffset=None,
endOffset=None,
newName="new_Name",
similar=False,
global_=False,
):
BaseRefactoring.__init__(self, project, resource, name, progressCallback)
self._newName = newName
self._startOffset = startOffset
self._endOffset = endOffset
self._similar = similar
self._global = global_
def onRefactor(self):
renamed = ExtractVariable(
self.project, self.resource, self._startOffset, self._endOffset
)
changes = renamed.get_changes(self._newName, self._similar, self._global)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, get_diff(item))
)
else:
raise Exception("Unknown Change")
class ExtractMethodRefactor(ExtractVariableRefactor):
def __init__(
self,
project,
resource,
name="Extract Method",
progressCallback=None,
startOffset=None,
endOffset=None,
newName="new_Name",
similar=False,
global_=False,
):
ExtractVariableRefactor.__init__(
self,
project,
resource,
name,
progressCallback,
startOffset=startOffset,
endOffset=endOffset,
newName=newName,
similar=similar,
global_=global_,
)
def onRefactor(self):
renamed = ExtractMethod(
self.project, self.resource, self._startOffset, self._endOffset
)
changes = renamed.get_changes(self._newName, self._similar, self._global)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, get_diff(item))
)
else:
raise Exception("Unknown Change")
class RopeRefactoring(object):
def __init__(self):
self.default_sys_path = sys.path
self._input = io.open(sys.stdin.fileno(), encoding="utf-8")
def _rename(self, filePath, start, newName, indent_size):
"""
Renames a variable
"""
project = rope.base.project.Project(
WORKSPACE_ROOT,
ropefolder=ROPE_PROJECT_FOLDER,
save_history=False,
indent_size=indent_size,
)
resourceToRefactor = libutils.path_to_resource(project, filePath)
refactor = RenameRefactor(
project, resourceToRefactor, startOffset=start, newName=newName
)
refactor.refactor()
changes = refactor.changes
project.close()
valueToReturn = []
for change in changes:
valueToReturn.append({"diff": change.diff})
return valueToReturn
def _extractVariable(self, filePath, start, end, newName, indent_size):
"""
Extracts a variable
"""
project = rope.base.project.Project(
WORKSPACE_ROOT,
ropefolder=ROPE_PROJECT_FOLDER,
save_history=False,
indent_size=indent_size,
)
resourceToRefactor = libutils.path_to_resource(project, filePath)
refactor = ExtractVariableRefactor(
project,
resourceToRefactor,
startOffset=start,
endOffset=end,
newName=newName,
similar=True,
)
refactor.refactor()
changes = refactor.changes
project.close()
valueToReturn = []
for change in changes:
valueToReturn.append({"diff": change.diff})
return valueToReturn
def _extractMethod(self, filePath, start, end, newName, indent_size):
"""
Extracts a method
"""
project = rope.base.project.Project(
WORKSPACE_ROOT,
ropefolder=ROPE_PROJECT_FOLDER,
save_history=False,
indent_size=indent_size,
)
resourceToRefactor = libutils.path_to_resource(project, filePath)
refactor = ExtractMethodRefactor(
project,
resourceToRefactor,
startOffset=start,
endOffset=end,
newName=newName,
similar=True,
)
refactor.refactor()
changes = refactor.changes
project.close()
valueToReturn = []
for change in changes:
valueToReturn.append({"diff": change.diff})
return valueToReturn
def _serialize(self, identifier, results):
"""
Serializes the refactor results
"""
return json.dumps({"id": identifier, "results": results})
def _deserialize(self, request):
"""Deserialize request from VSCode.
Args:
request: String with raw request from VSCode.
Returns:
Python dictionary with request data.
"""
return json.loads(request)
def _process_request(self, request):
"""Accept serialized request from VSCode and write response."""
request = self._deserialize(request)
lookup = request.get("lookup", "")
if lookup == "":
pass
elif lookup == "rename":
changes = self._rename(
request["file"],
int(request["start"]),
request["name"],
int(request["indent_size"]),
)
return self._write_response(self._serialize(request["id"], changes))
elif lookup == "extract_variable":
changes = self._extractVariable(
request["file"],
int(request["start"]),
int(request["end"]),
request["name"],
int(request["indent_size"]),
)
return self._write_response(self._serialize(request["id"], changes))
elif lookup == "extract_method":
changes = self._extractMethod(
request["file"],
int(request["start"]),
int(request["end"]),
request["name"],
int(request["indent_size"]),
)
return self._write_response(self._serialize(request["id"], changes))
def _write_response(self, response):
sys.stdout.write(response + "\n")
sys.stdout.flush()
def watch(self):
self._write_response("STARTED")
while True:
try:
self._process_request(self._input.readline())
except:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_info = traceback.extract_tb(exc_tb)
jsonMessage = {
"error": True,
"message": str(exc_value),
"traceback": str(tb_info),
"type": str(exc_type),
}
sys.stderr.write(json.dumps(jsonMessage))
sys.stderr.flush()
if __name__ == "__main__":
RopeRefactoring().watch()
| mit | 3,470,056,954,809,841,700 | 29.012658 | 93 | 0.562801 | false | 4.371313 | false | false | false |
lfepp/pd_account_info | get_info.py | 1 | 19158 | #!/usr/bin/env python
#
# Copyright (c) 2016, PagerDuty, Inc. <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of PagerDuty Inc nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL PAGERDUTY INC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import requests
from datetime import datetime, timedelta
import csv
ACCESS_TOKEN = 'ACCESS_TOKEN' # Should be a v2 token, can be read only
def pd_get(endpoint, payload=None):
"""Handle all PagerDuty GET requests"""
url = 'https://api.pagerduty.com{endpoint}'.format(endpoint=endpoint)
headers = {
'Accept': 'application/vnd.pagerduty+json;version=2',
'Content-type': 'application/json',
'Authorization': 'Token token={token}'.format(token=ACCESS_TOKEN)
}
r = requests.get(url, params=payload, headers=headers)
if r.status_code == 200:
return r.json()
else:
raise Exception('GET request failed with status {code}'.format(
code=r.status_code
))
def list_users(team_id=None):
"""List all users in the account"""
output = pd_get('/users', {
'limit': 100,
'include[]': ['contact_methods'],
'team_ids[]': [team_id]
})
if output['more']:
offset = 100
r = {'more': True}
while r['more']:
r = pd_get('/users', {
'limit': 100,
'offset': offset,
'include[]': ['contact_methods'],
'team_ids[]': [team_id]
})
output['users'] = output['users'] + r['users']
offset += 100
return output
def parse_user_info(users):
"""Parse relevant user info for reporting"""
output = []
for user in users:
contact_methods = []
if len(user['contact_methods']) == 0:
contact_methods = [{
'id': None,
'type': None,
'label': None,
'address': None
}]
for i, method in enumerate(user['contact_methods']):
contact_methods.append({
'label': method['label'],
'type': method['type'],
'id': method['id']
})
if method['type'] == 'push_notification_contact_method':
contact_methods[i]['address'] = 'N/A'
elif method['type'] == 'email_contact_method':
contact_methods[i]['address'] = method['address']
else:
contact_methods[i]['address'] = '{country}+{address}'.format(
country=method['country_code'],
address=method['address']
)
output.append({
'name': user['name'],
'id': user['id'],
'email': user['email'],
'role': user['role'],
'contact_methods': contact_methods
})
return output
def write_user_csv(user_data):
"""Create CSV from user data"""
with open('user_data_{timestamp}.csv'.format(
timestamp=datetime.now().isoformat()
), 'w') as csvfile:
fieldnames = [
'id',
'name',
'email',
'role',
'contact_method_id',
'contact_method_type',
'contact_method_label',
'contact_method_address'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for user in user_data:
for method in user['contact_methods']:
writer.writerow({
'id': user['id'],
'name': user['name'].encode('utf-8'),
'email': user['email'].encode('utf-8'),
'role': user['role'],
'contact_method_id': method['id'],
'contact_method_type': method['type'],
'contact_method_label': method['label'].encode('utf-8'),
'contact_method_address': method['address'].encode('utf-8')
})
return "CSV created"
def list_escalation_policies(team_id=None):
"""List all escalation policies in account"""
output = pd_get('/escalation_policies', {
'limit': 100,
'team_ids[]': [team_id]
})
if output['more']:
offset = 100
r = {'more': True}
while r['more']:
r = pd_get(
'/escalation_policies',
{'limit': 100, 'offset': offset, 'team_ids[]': [team_id]}
)
output['escalation_policies'] = (
output['escalation_policies'] + r['escalation_policies']
)
offset += 100
return output
def parse_ep_info(escalation_policies):
"""Parse relevant escalation policy info for reporting"""
output = []
for ep in escalation_policies:
rules = []
if len(ep['escalation_rules']) == 0:
rules = [{
'id': None,
'escalation_delay': None,
'targets': [{
'id': None,
'type': None,
'name': None
}]
}]
for rule in ep['escalation_rules']:
targets = []
for target in rule['targets']:
if target['type'] in ['user', 'user_reference']:
target_type = 'user'
else:
target_type = 'schedule'
targets.append({
'id': target['id'],
'type': target_type,
'name': target['summary']
})
rules.append({
'escalation_delay': rule['escalation_delay_in_minutes'],
'id': rule['id'],
'targets': targets
})
output.append({
'name': ep['name'],
'id': ep['id'],
'rules': rules
})
return output
def write_escalation_policy_csv(ep_data):
"""Create CSV from escalation policy data"""
with open('escalation_policy_data_{timestamp}.csv'.format(
timestamp=datetime.now().isoformat()
), 'w') as csvfile:
fieldnames = [
'id',
'name',
'escalation_rule_id',
'escalation_rule_delay',
'escalation_rule_target_id',
'escalation_rule_target_type',
'escalation_rule_target_name'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for ep in ep_data:
for rule in ep['rules']:
for target in rule['targets']:
writer.writerow({
'id': ep['id'],
'name': ep['name'],
'escalation_rule_id': rule['id'],
'escalation_rule_delay': rule['escalation_delay'],
'escalation_rule_target_id': target['id'],
'escalation_rule_target_type': target['type'],
'escalation_rule_target_name': target['name']
})
return "CSV created"
def list_schedules(team_id=None):
"""List all schedules in account"""
output = pd_get('/schedules', {
'limit': 100,
'team_ids[]': [team_id]
})
if output['more']:
offset = 100
r = {'more': True}
while r['more']:
r = pd_get('/schedules', {
'limit': 100,
'offset': offset,
'team_ids[]': [team_id]
})
output['schedules'] = output['schedules'] + r['schedules']
offset += 100
return output
def list_schedule_oncalls(schedule_id):
"""List the current on-calls for a schedule"""
output = pd_get('/oncalls', {
'since': datetime.now().isoformat(),
'until': (datetime.now() + timedelta(seconds=1)).isoformat(),
'schedule_ids[]': [schedule_id],
'limit': 100
})
if output['more']:
offset = 100
r = {'more': True}
while r['more']:
r = pd_get('/oncalls', {
'since': datetime.now().isoformat(),
'until': (datetime.now() + timedelta(seconds=1)).isoformat(),
'schedule_ids[]': [schedule_id],
'limit': 100,
'offset': offset
})
output['oncalls'] = output['oncalls'] + r['oncalls']
offset += 100
return output
def parse_schedule_info(schedules):
"""Parse relevant schedule info for reporting"""
output = []
for schedule in schedules:
output.append({
'name': schedule['name'],
'id': schedule['id'],
'description': schedule['description'],
'time_zone': schedule['time_zone'],
'oncalls': parse_oncall_info(
list_schedule_oncalls(schedule['id'])['oncalls']
)
})
return output
def parse_oncall_info(oncalls):
"""Parse relevant on-call info for reporting"""
output = []
if len(oncalls) == 0:
output = [{
'user_name': None,
'user_id': None,
'escalation_level': None,
'start': None,
'end': None
}]
for oncall in oncalls:
output.append({
'user_name': oncall['user']['summary'],
'user_id': oncall['user']['id'],
'escalation_level': oncall['escalation_level'],
'start': oncall['start'],
'end': oncall['end']
})
return output
def write_schedule_csv(schedule_data):
"""Create CSV from schedule data"""
with open('schedule_data_{timestamp}.csv'.format(
timestamp=datetime.now().isoformat()
), 'w') as csvfile:
fieldnames = [
'id',
'name',
'description',
'time_zone',
'oncall_id',
'oncall_name',
'oncall_escalation_level',
'oncall_shift_start',
'oncall_shift_end'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for schedule in schedule_data:
for oncall in schedule['oncalls']:
writer.writerow({
'id': schedule['id'],
'name': schedule['name'],
'description': schedule['description'],
'time_zone': schedule['time_zone'],
'oncall_id': oncall['user_id'],
'oncall_name': oncall['user_name'],
'oncall_escalation_level': oncall['escalation_level'],
'oncall_shift_start': oncall['start'],
'oncall_shift_end': oncall['end']
})
return "CSV created"
def list_teams():
"""List all teams in account"""
output = pd_get('/teams', {'limit': 100})
if output['more']:
offset = 100
r = {'more': True}
while r['more']:
r = pd_get('/teams', {'limit': 100, 'offset': offset})
output['teams'] = output['teams'] + r['teams']
offset += 100
return output
def parse_team_info(teams):
"""Parse relevant team info for reporting"""
output = []
for i, team in enumerate(teams):
output.append({
'name': team['name'],
'id': team['id'],
'users': [],
'schedules': [],
'escalation_policies': [],
'services': []
})
users = list_users(team['id'])['users']
for user in users:
output[i]['users'].append({
'name': user['name'],
'id': user['id']
})
schedules = list_schedules(team['id'])['schedules']
for schedule in schedules:
output[i]['schedules'].append({
'name': schedule['name'],
'id': schedule['id']
})
escalation_policies = list_escalation_policies(
team['id']
)['escalation_policies']
for ep in escalation_policies:
output[i]['escalation_policies'].append({
'name': ep['name'],
'id': ep['id']
})
services = list_services(team['id'])['services']
for service in services:
output[i]['services'].append({
'name': service['name'],
'id': service['id']
})
return output
def write_team_csv(team_data):
"""Create CSV from team data"""
with open('team_data_{timestamp}.csv'.format(
timestamp=datetime.now().isoformat()
), 'w') as csvfile:
fieldnames = [
'id',
'name',
'user_id',
'user_name',
'schedule_id',
'schedule_name',
'escalation_policy_id',
'escalation_policy_name',
'service_id',
'service_name'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for team in team_data:
for user in team['users']:
writer.writerow({
'id': team['id'],
'name': team['name'].encode('utf-8'),
'user_id': user['id'],
'user_name': user['name'].encode('utf-8'),
'schedule_id': None,
'schedule_name': None,
'escalation_policy_id': None,
'escalation_policy_name': None,
'service_id': None,
'service_name': None
})
for schedule in team['schedules']:
writer.writerow({
'id': team['id'],
'name': team['name'].encode('utf-8'),
'user_id': None,
'user_name': None,
'schedule_id': schedule['id'],
'schedule_name': schedule['name'].encode('utf-8'),
'escalation_policy_id': None,
'escalation_policy_name': None,
'service_id': None,
'service_name': None
})
for ep in team['escalation_policies']:
writer.writerow({
'id': team['id'],
'name': team['name'].encode('utf-8'),
'user_id': None,
'user_name': None,
'schedule_id': None,
'schedule_name': None,
'escalation_policy_id': ep['id'],
'escalation_policy_name': ep['name'].encode('utf-8'),
'service_id': None,
'service_name': None
})
for service in team['services']:
writer.writerow({
'id': team['id'],
'name': team['name'].encode('utf-8'),
'user_id': None,
'user_name': None,
'schedule_id': None,
'schedule_name': None,
'escalation_policy_id': None,
'escalation_policy_name': None,
'service_id': service['id'],
'service_name': service['name'].encode('utf-8')
})
return "CSV created"
def list_services(team_id=None):
"""List all services"""
output = pd_get('/services', {'limit': 100, 'team_ids[]': [team_id]})
if output['more']:
offset = 100
r = {'more': True}
while r['more']:
r = pd_get('/services', {
'limit': 100,
'offset': offset,
'team_ids[]': [team_id]
})
output['services'] = output['services'] + r['services']
offset += 100
return output
def parse_service_info(services):
"""Parse relevant services info for reporting"""
output = []
for service in services:
output.append({
'id': service['id'],
'name': service['name'].encode('utf-8'),
'escalation_policy_id': service['escalation_policy']['id'],
'escalation_policy_name': service['escalation_policy']['summary'],
'alert_creation': service['alert_creation']
})
return output
def write_service_csv(service_data):
"""Create CSV from service data"""
with open('service_data_{timestamp}.csv'.format(
timestamp=datetime.now().isoformat()
), 'w') as csvfile:
fieldnames = [
'id',
'name',
'escalation_policy_id',
'escalation_policy_name',
'alert_creation'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for service in service_data:
writer.writerow({
'id': service['id'],
'name': service['name'],
'escalation_policy_id': service['escalation_policy_id'],
'escalation_policy_name': (
service['escalation_policy_name'].encode('utf-8')
),
'alert_creation': service['alert_creation']
})
return "CSV created"
if __name__ == '__main__':
write_user_csv(parse_user_info(list_users()['users']))
write_escalation_policy_csv(parse_ep_info(
list_escalation_policies()['escalation_policies']
))
write_schedule_csv(parse_schedule_info(list_schedules()['schedules']))
write_team_csv(parse_team_info(list_teams()['teams']))
write_service_csv(parse_service_info(list_services()['services']))
print "Data has finished exporting"
| bsd-3-clause | 7,826,909,042,825,284,000 | 33.028419 | 79 | 0.494206 | false | 4.280161 | false | false | false |
croxis/SpaceDrive | spacedrive/renderpipeline/rpcore/gui/buffer_viewer.py | 1 | 10435 | """
RenderPipeline
Copyright (c) 2014-2016 tobspr <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import division
from functools import partial
from panda3d.core import Texture, Vec3
from direct.gui.DirectFrame import DirectFrame
from direct.gui.DirectScrolledFrame import DirectScrolledFrame
from direct.gui.DirectGui import DGG
from rplibs.six import itervalues
from rpcore.image import Image
from rpcore.util.display_shader_builder import DisplayShaderBuilder
from rpcore.globals import Globals
from rpcore.render_target import RenderTarget
from rpcore.gui.texture_preview import TexturePreview
from rpcore.gui.sprite import Sprite
from rpcore.gui.labeled_checkbox import LabeledCheckbox
from rpcore.gui.text import Text
from rpcore.gui.draggable_window import DraggableWindow
class BufferViewer(DraggableWindow):
""" This class provides a view into the buffers to inspect them """
def __init__(self, pipeline, parent):
""" Constructs the buffer viewer """
DraggableWindow.__init__(self, width=1400, height=800, parent=parent,
title="Buffer- and Image-Browser")
self._pipeline = pipeline
self._scroll_height = 3000
self._display_images = False
self._stages = []
self._create_components()
self._tex_preview = TexturePreview(self._pipeline, parent)
self._tex_preview.hide()
self.hide()
def toggle(self):
""" Updates all the buffers and then toggles the buffer viewer """
if self._visible:
self._remove_components()
self.hide()
else:
self._perform_update()
self.show()
@property
def entries(self):
""" Returns a list of all registered entries """
return RenderTarget.REGISTERED_TARGETS + Image.REGISTERED_IMAGES
@property
def stage_information(self):
""" Returns the amount of attached stages, and also the memory consumed
in MiB in a tuple. """
count, memory = 0, 0
for entry in self.entries:
if isinstance(entry, Texture):
memory += entry.estimate_texture_memory()
count += 1
elif entry.__class__.__name__ == "RenderTarget":
for target in itervalues(entry.targets):
memory += target.estimate_texture_memory()
count += 1
else:
self.warn("Unkown type:", entry.__class__.__name__)
return memory, count
def _create_components(self):
""" Creates the window components """
DraggableWindow._create_components(self)
self._content_frame = DirectScrolledFrame(
frameSize=(0, self._width - 15, 0, self._height - 70),
canvasSize=(0, self._width - 80, 0, self._scroll_height),
autoHideScrollBars=False,
scrollBarWidth=12.0,
frameColor=(0, 0, 0, 0),
verticalScroll_relief=DGG.FLAT,
verticalScroll_incButton_relief=DGG.FLAT,
verticalScroll_decButton_relief=DGG.FLAT,
verticalScroll_thumb_relief=DGG.FLAT,
verticalScroll_frameColor=(0.05, 0.05, 0.05, 1),
verticalScroll_thumb_frameColor=(0.8, 0.8, 0.8, 1),
verticalScroll_incButton_frameColor=(0.6, 0.6, 0.6, 1),
verticalScroll_decButton_frameColor=(0.6, 0.6, 0.6, 1),
horizontalScroll_frameColor=(0, 0, 0, 0),
horizontalScroll_relief=False,
horizontalScroll_thumb_relief=False,
horizontalScroll_incButton_relief=False,
horizontalScroll_decButton_relief=False,
parent=self._node,
pos=(0, 1, -self._height))
self._content_node = self._content_frame.getCanvas().attach_new_node(
"BufferComponents")
self._content_node.set_scale(1, 1, -1)
self._content_node.set_z(self._scroll_height)
self._chb_show_images = LabeledCheckbox(
parent=self._node, x=10, y=43, chb_callback=self._set_show_images,
chb_checked=False, text="Display image resources",
text_color=Vec3(0.4), expand_width=330)
def _set_show_images(self, arg):
""" Sets whether images and textures will be shown """
self._display_images = arg
self._perform_update()
def _set_scroll_height(self, height):
""" Sets the maximum scroll height in the content frame """
self._scroll_height = height
self._content_frame["canvasSize"] = (0, self._width - 80, 0, self._scroll_height)
self._content_node.set_z(self._scroll_height)
def _remove_components(self):
""" Removes all components of the buffer viewer """
self._content_node.node().remove_all_children()
self._tex_preview.hide()
def _perform_update(self):
""" Collects all entries, extracts their images and re-renders the
window """
# Collect texture stages
self._stages = []
for entry in self.entries:
if isinstance(entry, Texture):
if self._display_images:
self._stages.append(entry)
# Can not use isinstance or we get circular import references
elif entry.__class__.__name__ == "RenderTarget":
for target in itervalues(entry.targets):
self._stages.append(target)
else:
self.warn("Unrecognized instance!", entry.__class__)
self._render_stages()
def _on_texture_hovered(self, hover_frame, evt=None): # pylint: disable=W0613
""" Internal method when a texture is hovered """
hover_frame["frameColor"] = (0, 0, 0, 0.1)
def _on_texture_blurred(self, hover_frame, evt=None): # pylint: disable=W0613
""" Internal method when a texture is blurred """
hover_frame["frameColor"] = (0, 0, 0, 0)
def _on_texture_clicked(self, tex_handle, evt=None): # pylint: disable=W0613
""" Internal method when a texture is blurred """
self._tex_preview.present(tex_handle)
def _render_stages(self):
""" Renders the stages to the window """
self._remove_components()
entries_per_row = 6
aspect = Globals.base.win.get_y_size() / Globals.base.win.get_x_size()
entry_width = 235
entry_height = (entry_width - 20) * aspect + 55
# Store already processed images
processed = set()
index = -1
# Iterate over all stages
for stage_tex in self._stages:
if stage_tex in processed:
continue
processed.add(stage_tex)
index += 1
stage_name = stage_tex.get_name()
xoffs = index % entries_per_row
yoffs = index // entries_per_row
node = self._content_node.attach_new_node("Preview")
node.set_sz(-1)
node.set_pos(10 + xoffs * (entry_width - 14), 1, yoffs * (entry_height - 14 + 10))
r, g, b = 0.2, 0.2, 0.2
stage_name = stage_name.replace("render_pipeline_internal:", "")
parts = stage_name.split(":")
stage_name = parts[-1]
DirectFrame(
parent=node, frameSize=(7, entry_width - 17, -7, -entry_height + 17),
frameColor=(r, g, b, 1.0), pos=(0, 0, 0))
frame_hover = DirectFrame(
parent=node, frameSize=(0, entry_width - 10, 0, -entry_height + 10),
frameColor=(0, 0, 0, 0), pos=(0, 0, 0), state=DGG.NORMAL)
frame_hover.bind(
DGG.ENTER, partial(self._on_texture_hovered, frame_hover))
frame_hover.bind(
DGG.EXIT, partial(self._on_texture_blurred, frame_hover))
frame_hover.bind(
DGG.B1PRESS, partial(self._on_texture_clicked, stage_tex))
Text(text=stage_name, x=15, y=29, parent=node, size=12, color=Vec3(0.8))
# Scale image so it always fits
w, h = stage_tex.get_x_size(), stage_tex.get_y_size()
padd_x, padd_y = 24, 57
scale_x = (entry_width - padd_x) / max(1, w)
scale_y = (entry_height - padd_y) / max(1, h)
scale_factor = min(scale_x, scale_y)
if stage_tex.get_texture_type() == Image.TT_buffer_texture:
scale_factor = 1
w = entry_width - padd_x
h = entry_height - padd_y
preview = Sprite(
image=stage_tex, w=scale_factor * w, h=scale_factor * h,
any_filter=False, parent=node, x=7, y=40, transparent=False)
preview.set_shader_input("mipmap", 0)
preview.set_shader_input("slice", 0)
preview.set_shader_input("brightness", 1)
preview.set_shader_input("tonemap", False)
preview_shader = DisplayShaderBuilder.build(stage_tex, scale_factor*w, scale_factor*h)
preview.set_shader(preview_shader)
num_rows = (index + entries_per_row) // entries_per_row
self._set_scroll_height(50 + (entry_height - 14 + 10) * num_rows)
| mit | 1,130,596,399,849,093,500 | 39.74 | 98 | 0.594921 | false | 3.94518 | false | false | false |
asphalt-framework/asphalt-wamp | asphalt/wamp/events.py | 1 | 1043 | from autobahn.wamp.types import SessionDetails, CloseDetails
from asphalt.core import Event
__all__ = ('SessionJoinEvent', 'SessionLeaveEvent')
class SessionJoinEvent(Event):
"""
Signals that the client has joined the WAMP realm on the router.
:ivar details: the autobahn-provided session details
:vartype details: ~autobahn.wamp.types.SessionDetails
"""
__slots__ = 'details'
def __init__(self, source, topic: str, session_details: SessionDetails) -> None:
super().__init__(source, topic)
self.details = session_details
class SessionLeaveEvent(Event):
"""
Signals that the client has left the WAMP realm on the router.
:ivar str reason: the reason why the client left the realm
:ivar str message: the closing message
"""
__slots__ = 'reason', 'message'
def __init__(self, source, topic: str, close_details: CloseDetails) -> None:
super().__init__(source, topic)
self.reason = close_details.reason
self.message = close_details.message
| apache-2.0 | -6,847,102,164,541,371,000 | 27.972222 | 84 | 0.670182 | false | 3.950758 | false | false | false |
beaker-project/beaker | IntegrationTests/src/bkr/inttest/client/test_job_comment.py | 1 | 4191 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from turbogears.database import session
from bkr.inttest import data_setup, with_transaction
from bkr.inttest.client import run_client, create_client_config, \
ClientError, ClientTestCase
class JobCommentTest(ClientTestCase):
@with_transaction
def setUp(self):
self.job = data_setup.create_completed_job()
def test_invalid_taskpec(self):
try:
run_client(['bkr', 'job-comment', '12345'])
self.fail('should raise')
except ClientError as e:
self.assertIn('Invalid taskspec', e.stderr_output)
def test_post_comment_to_recipeset(self):
with session.begin():
recipe = self.job.recipesets[0]
comment_text = u'Never gonna give you up'
out = run_client(['bkr', 'job-comment', recipe.t_id,
'--message', comment_text])
with session.begin():
session.expire_all()
self.assertEqual(recipe.comments[0].comment, comment_text)
def test_post_comment_to_recipetask(self):
with session.begin():
recipe = self.job.recipesets[0].recipes[0]
task = recipe.tasks[0]
comment_text = u'Never gonna let you down'
out = run_client(['bkr', 'job-comment', task.t_id,
'--message', comment_text])
with session.begin():
session.expire_all()
self.assertEqual(task.comments[0].comment, comment_text)
def test_post_comment_to_task_result(self):
with session.begin():
recipe = data_setup.create_recipe()
job = data_setup.create_job_for_recipes([recipe])
data_setup.mark_job_complete(job)
result = recipe.tasks[0].results[0]
comment_text = u'Never gonna run around and desert you'
out = run_client(['bkr', 'job-comment', result.t_id,
'--message', comment_text])
with session.begin():
session.expire_all()
self.assertEqual(result.comments[0].comment, comment_text)
def test_anonymous_user_cannot_comment(self):
with session.begin():
client_config = create_client_config(username=None, password=None)
comment_text = u'Never gonna make you cry'
try:
run_client(['bkr', 'job-comment', self.job.recipesets[0].t_id,
'--message', comment_text], config=client_config)
self.fail('should raise')
except ClientError as e:
self.assertEquals(e.status, 1)
self.assertIn('Invalid username or password', e.stderr_output)
def test_empty_comment_is_rejected(self):
try:
run_client(['bkr', 'job-comment', self.job.recipesets[0].t_id,
'--message', ''])
self.fail('should raise')
except ClientError as e:
self.assertIn('Comment text cannot be empty', e.stderr_output)
def test_post_comment_on_multiple_taskspec(self):
with session.begin():
job = data_setup.create_completed_job()
recipe1 = self.job.recipesets[0]
recipe2 = job.recipesets[0]
comment_text = u'Never gonna say goodbye'
out = run_client(['bkr', 'job-comment', recipe1.t_id, recipe2.t_id,
'--message', comment_text])
with session.begin():
session.expire_all()
self.assertEqual(recipe1.comments[0].comment, comment_text)
self.assertEqual(recipe2.comments[0].comment, comment_text)
def test_post_comment_to_tr_taskspec_string_fails(self):
comment_text = u'Never gonna tell a lie...'
try:
run_client(['bkr', 'job-comment', 'TR:thisisnotanint', '--message',
comment_text])
self.fail('should raise')
except ClientError as e:
self.assertIn('Recipe task result not found', e.stderr_output)
| gpl-2.0 | -8,781,481,340,832,835,000 | 38.537736 | 79 | 0.596516 | false | 3.905871 | true | false | false |
delph-in/pydelphin | delphin/sembase.py | 1 | 6504 |
"""
Basic classes and functions for semantic representations.
"""
from typing import (Optional, Mapping, Tuple, List, Union, Sequence)
from delphin.lnk import Lnk, LnkMixin
# Default modules need to import the PyDelphin version
from delphin.__about__ import __version__ # noqa: F401
# Basic Types
# Identifiers are node ids in DMRS and EDS, or variables in MRS
# including handles and underspecified variables
Identifier = Union[str, int]
Role = str
RoleArgument = Tuple[Role, Identifier]
ArgumentStructure = Mapping[Identifier, List[RoleArgument]]
PropertyMap = Mapping[str, str]
# Functions for the default ordering of feature lists
def role_priority(role: str) -> Tuple[bool, bool, str]:
"""Return a representation of role priority for ordering."""
# canonical order: LBL ARG* RSTR BODY *-INDEX *-HNDL CARG ...
role = role.upper()
return (
role != 'LBL',
role in ('BODY', 'CARG'),
role
)
_COMMON_PROPERTIES = (
'PERS', # [x] person (ERG, Jacy)
'NUM', # [x] number (ERG, Jacy)
'GEND', # [x] gender (ERG, Jacy)
'IND', # [x] individuated (ERG)
'PT', # [x] pronoun-type (ERG)
'PRONTYPE', # [x] pronoun-type (Jacy)
'SF', # [e] sentential-force (ERG)
'TENSE', # [e] tense (ERG, Jacy)
'MOOD', # [e] mood (ERG, Jacy)
'PROG', # [e] progressive (ERG, Jacy)
'PERF', # [e] perfective (ERG, Jacy)
'ASPECT', # [e] other aspect (Jacy)
'PASS', # [e] passive (Jacy)
)
_COMMON_PROPERTY_INDEX = dict((p, i) for i, p in enumerate(_COMMON_PROPERTIES))
def property_priority(prop: str) -> Tuple[int, str]:
"""
Return a representation of property priority for ordering.
Note:
The ordering provided by this function was modeled on the ERG
and Jacy grammars and may be inaccurate for others. Properties
not known to this function will be sorted alphabetically.
"""
index = _COMMON_PROPERTY_INDEX.get(prop.upper(), len(_COMMON_PROPERTIES))
return (index, prop)
# Classes for Semantic Structures
class Predication(LnkMixin):
"""
An instance of a predicate in a semantic structure.
While a predicate (see :mod:`delphin.predicate`) is a description
of a possible semantic entity, a predication is the instantiation
of a predicate in a semantic structure. Thus, multiple predicates
with the same form are considered the same thing, but multiple
predications with the same predicate will have different
identifiers and, if specified, different surface alignments.
"""
__slots__ = ('id', 'predicate', 'type', 'base')
def __init__(self,
id: Identifier,
predicate: str,
type: Union[str, None],
lnk: Optional[Lnk],
surface,
base):
super().__init__(lnk, surface)
self.id = id
self.predicate = predicate
self.type = type
self.base = base
def __repr__(self):
return '<{} object ({}:{}{}{}) at {}>'.format(
self.__class__.__name__,
self.id,
self.predicate,
str(self.lnk),
'[{}]'.format(self.type or '?'),
id(self))
# Structure types
Predications = Sequence[Predication]
MaybePredication = Union[Predication, None]
PredicationPair = Tuple[MaybePredication, MaybePredication]
class SemanticStructure(LnkMixin):
"""
A basic semantic structure.
DELPH-IN-style semantic structures are rooted DAGs with flat lists
of predications.
Args:
top: identifier for the top of the structure
predications: list of predications in the structure
identifier: a discourse-utterance identifier
Attributes:
top: identifier for the top of the structure
predications: list of predications in the structure
identifier: a discourse-utterance identifier
"""
__slots__ = ('top', 'predications', 'identifier', '_pidx')
def __init__(self,
top: Optional[Identifier],
predications: Predications,
lnk: Optional[Lnk],
surface,
identifier):
super().__init__(lnk, surface)
self.top = top
self.predications = predications
self._pidx = {p.id: p for p in predications}
self.identifier = identifier
def __repr__(self):
return '<{} object ({}) at {}>'.format(
self.__class__.__name__,
' '.join(p.predicate for p in self.predications),
id(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (self.top == other.top
and self.predications == other.predications)
def __contains__(self, id):
return id in self._pidx
def __getitem__(self, id):
return self._pidx[id]
def arguments(self, types=None, expressed=None) -> ArgumentStructure:
"""
Return a mapping of the argument structure.
Args:
types: an iterable of predication types to include
expressed: if `True`, only include arguments to expressed
predications; if `False`, only include those
unexpressed; if `None`, include both
Returns:
A mapping of predication ids to lists of (role, target)
pairs for outgoing arguments for the predication.
"""
raise NotImplementedError()
def properties(self, id: Identifier) -> PropertyMap:
"""Return the morphosemantic properties for *id*."""
raise NotImplementedError()
def is_quantifier(self, id: Identifier) -> bool:
"""Return `True` if *id* represents a quantifier."""
raise NotImplementedError()
def quantification_pairs(self) -> List[PredicationPair]:
"""
Return a list of (Quantifiee, Quantifier) pairs.
Both the Quantifier and Quantifiee are :class:`Predication`
objects, unless they do not quantify or are not quantified by
anything, in which case they are `None`. In well-formed and
complete structures, the quantifiee will never be `None`.
Example:
>>> [(p.predicate, q.predicate)
... for p, q in m.quantification_pairs()]
[('_dog_n_1', '_the_q'), ('_bark_v_1', None)]
"""
raise NotImplementedError()
| mit | -4,539,682,961,294,775,300 | 31.039409 | 79 | 0.60163 | false | 3.946602 | false | false | false |
wolph/python-statsd | statsd/__init__.py | 1 | 1052 | from statsd.connection import Connection
from statsd.client import Client
from statsd.timer import Timer
from statsd.gauge import Gauge
from statsd.average import Average
from statsd.raw import Raw
from statsd.counter import Counter, increment, decrement
__all__ = [
'Client',
'Connection',
'Timer',
'Counter',
'Gauge',
'Average',
'Raw',
'increment',
'decrement',
]
# The doctests in this package, when run, will try to send data on the wire.
# To keep this from happening, we hook into nose's machinery to mock out
# `Connection.send` at the beginning of testing this package, and reset it at
# the end.
_connection_patch = None
def setup_package():
# Since we don't want mock to be a global requirement, we need the import
# the setup method.
import mock
global _connection_patch
_connection_patch = mock.patch('statsd.Connection.send')
send = _connection_patch.start()
send.return_value = True
def teardown_package():
assert _connection_patch
_connection_patch.stop()
| bsd-3-clause | -6,754,197,769,659,256,000 | 23.465116 | 77 | 0.702471 | false | 3.811594 | false | false | false |
hammerlab/immuno | immuno/mhc_iedb.py | 1 | 8207 | # Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib2
import urllib
from StringIO import StringIO
import logging
import re
import pandas as pd
from mhc_common import normalize_hla_allele_name, seq_to_str, convert_str
from peptide_binding_measure import (
IC50_FIELD_NAME, PERCENTILE_RANK_FIELD_NAME
)
"""
A note about prediction methods, copied from the IEDB website:
The prediction method list box allows choosing from a number of MHC class I
binding prediction methods:
- Artificial neural network (ANN),
- Stabilized matrix method (SMM),
- SMM with a Peptide:MHC Binding Energy Covariance matrix (SMMPMBEC),
- Scoring Matrices from Combinatorial Peptide Libraries (Comblib_Sidney2008),
- Consensus,
- NetMHCpan.
IEDB recommended is the default prediction method selection.
Based on availability of predictors and previously observed predictive
performance, this selection tries to use the best possible method for a given
MHC molecule. Currently for peptide:MHC-I binding prediction, for a given MHC
molecule, IEDB Recommended uses the Consensus method consisting of ANN, SMM,
and CombLib if any corresponding predictor is available for the molecule.
Otherwise, NetMHCpan is used. This choice was motivated by the expected
predictive performance of the methods in decreasing order:
Consensus > ANN > SMM > NetMHCpan > CombLib.
"""
VALID_IEDB_METHODS = [
'recommended',
'consensus',
'netmhcpan',
'ann',
'smmpmbec',
'smm',
'comblib_sidney2008'
]
def _parse_iedb_response(response):
"""
Take the binding predictions returned by IEDB's web API
and parse them into a DataFrame
"""
lines = response.split("\n")
# manually parsing since Pandas is insane
header_names = lines[0].split("\t")
d = {}
for col_name in header_names:
d[col_name] = []
for line in lines[1:]:
line = line.strip()
if len(line) > 0:
fields = line.split('\t')
for i, header_name in enumerate(header_names):
value = convert_str(fields[i] if len(fields) > i else None)
d[header_name].append(value)
return pd.DataFrame(d)
def _query_iedb(request_values, url):
"""
Call into IEDB's web API for MHC binding prediction using request dictionary
with fields:
- "method"
- "length"
- "sequence_text"
- "allele"
Parse the response into a DataFrame.
"""
data = urllib.urlencode(request_values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req).read()
return _parse_iedb_response(response)
class IEDB_MHC_Binding_Predictor(object):
def __init__(
self,
alleles,
lengths,
method,
url):
assert isinstance(alleles, (list,tuple)), \
"Alleles must be a sequence, not: %s" % alleles
self._alleles = alleles
assert isinstance(lengths, (list,tuple)), \
"Peptide lengths must be a sequence, not: %s" % (lengths,)
assert all(isinstance(l, (int,long)) for l in lengths), \
"Not all integers: %s" % (lengths,)
self._lengths = lengths
assert method in VALID_IEDB_METHODS, \
"Invalid IEDB MHC binding prediction method: %s" % (method,)
self._method = method
self._url = url
def _get_iedb_request_params(self, sequence, allele=None):
# sometimes we can get joint predictions for all alleles
if allele is None:
allele = seq_to_str(self._alleles)
params = {
"method" : seq_to_str(self._method),
"length" : seq_to_str(self._lengths),
"sequence_text" : sequence,
"allele" : allele,
}
return params
def predict(self, data):
"""
Given a dataframe with long amino acid sequences in the
'SourceSequence' field, return an augmented dataframe
with shorter k-mers in the 'Epitope' column and several
columns of MHC binding predictions with names such as 'percentile_rank'
"""
# take each mutated sequence in the dataframe
# and general MHC binding scores for all k-mer substrings
responses = {}
for i, peptide in enumerate(data.SourceSequence):
for allele in self._alleles:
key = (peptide, allele)
if key not in responses:
request = self._get_iedb_request_params(peptide, allele)
logging.info(
"Calling IEDB (%s) with request %s",
self._url,
request)
response_df = _query_iedb(request, self._url)
response_df.rename(
columns={
'peptide': 'Epitope',
'length' : 'EpitopeLength',
'start' : 'EpitopeStart',
'end' : 'EpitopeEnd',
'allele' : 'Allele',
},
inplace=True)
response_df['EpitopeStart'] -= 1
response_df['EpitopeEnd'] -= 1
responses[key] = response_df
else:
logging.info(
"Already made predictions for peptide %s with allele %s",
peptide,
allele)
# concatenating the responses makes a MultiIndex with two columns
# - SourceSequence
# - index of epitope from that sequence's IEDB call
#
# ...when we reset the index, we turn these into two columns
# named 'level_0', and 'level_1'. We want to rename the former
# and delete the latter.
responses = pd.concat(responses).reset_index()
responses['SourceSequence'] = responses['level_0']
del responses['level_0']
del responses['level_1']
# IEDB has inclusive end positions, change to exclusive
responses['EpitopeEnd'] += 1
assert 'ann_rank' in responses, responses.head()
responses[PERCENTILE_RANK_FIELD_NAME] = responses['ann_rank']
assert 'ann_ic50' in responses, responses.head()
responses[IC50_FIELD_NAME] = responses['ann_ic50']
# instead of just building up a new dataframe I'm expliciting
# dropping fields here to document what other information is available
drop_fields = (
'seq_num',
'method',
'ann_ic50',
'ann_rank',
'consensus_percentile_rank',
'smm_ic50',
'smm_rank',
'comblib_sidney2008_score',
'comblib_sidney2008_rank'
)
for field in drop_fields:
if field in responses:
responses = responses.drop(field, axis = 1)
result = data.merge(responses, on='SourceSequence')
# some of the MHC scores come back as all NaN so drop them
result = result.dropna(axis=1, how='all')
return result
class IEDB_MHC1(IEDB_MHC_Binding_Predictor):
def __init__(self,
alleles,
lengths=[9],
method='recommended',
url='http://tools.iedb.org/tools_api/mhci/'):
IEDB_MHC_Binding_Predictor.__init__(
self,
alleles=alleles,
lengths=lengths,
method=method,
url=url)
class IEDB_MHC2(IEDB_MHC_Binding_Predictor):
def __init__(self,
alleles,
method='recommended',
url='http://tools.iedb.org/tools_api/mhcii/'):
IEDB_MHC_Binding_Predictor.__init__(
self,
alleles=alleles,
lengths=[15],
method=method,
url=url)
def _get_iedb_request_params(self, sequence):
params = {
"method" : seq_to_str(self._method),
"sequence_text" : sequence,
"allele" : seq_to_str(self._alleles),
}
return params
| apache-2.0 | 3,265,039,371,302,371,000 | 30.687259 | 80 | 0.620568 | false | 3.780286 | false | false | false |
ragupta-git/ImcSdk | imcsdk/apis/v2/admin/ipmi.py | 1 | 3307 | # Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module implements all the communication services
"""
from imcsdk.mometa.comm.CommIpmiLan import CommIpmiLan
from imcsdk.imccoreutils import get_server_dn, IMC_PLATFORM
def _get_comm_mo_dn(handle, server_id=1):
"""
Internal method to get the IPMI mo's parent_dn based \
on the type of platform
"""
from imcsdk.imcexception import ImcValidationException
if handle.platform == IMC_PLATFORM.TYPE_CLASSIC:
return("sys/svc-ext")
elif handle.platform == IMC_PLATFORM.TYPE_MODULAR:
return(get_server_dn(handle, server_id) + "/svc-ext")
else:
raise ImcValidationException("Invalid platform detected:%s" %
handle.platform)
def ipmi_enable(handle, priv=None, key=None, server_id=1):
"""
Enable IPMI over LAN.
Args:
handle (ImcHandle)
priv (string): Optional privilege level: 'admin', 'user', 'read-only'
key (string): Optional encryption key as hexadecimal string
server_id (int): Server Id to be specified for C3260 platforms
Returns:
CommIpmiLan object
Raises:
ValueError if privilege or key are invalid
Example:
if ipmi_enable(handle):
print "IPMI Enabled"
"""
# Verify key is a hex number
try:
if key:
hex(int(key, 16))[2:]
except ValueError:
raise ValueError('{0}: ERROR: Encryption key is not hex number: ' +
'"{1}"'.format(handle.ip, key))
# Create enabled IPMI object
mo = CommIpmiLan(parent_mo_or_dn=_get_comm_mo_dn(handle, server_id))
mo.admin_state = "enabled"
mo.priv = priv
mo.key = key
# Configure IPMI object on CIMC
handle.set_mo(mo)
return mo
def ipmi_disable(handle, server_id=1):
"""
Disable IPMI over LAN.
Args:
handle (ImcHandle)
server_id (int): Server Id to be specified for C3260 platforms
Returns:
CommIpmiLan object
"""
# Create disabled IPMI object
mo = CommIpmiLan(parent_mo_or_dn=_get_comm_mo_dn(handle, server_id))
mo.admin_state = "disabled"
# Configure IPMI object on CIMC
handle.set_mo(mo)
return mo
def ipmi_exists(handle, server_id=1, **kwargs):
"""
Check if IPMI over LAN is enabled
Args:
handle (ImcHandle)
server_id (int): Server Id to be specified for C3260 platforms
Returns:
True/False, MO/None
"""
mo = CommIpmiLan(parent_mo_or_dn=_get_comm_mo_dn(handle, server_id))
mo = handle.query_dn(mo.dn)
if mo is None:
return False, None
kwargs['admin_state'] = "enabled"
mo_exists = mo.check_prop_match(**kwargs)
return (mo_exists, mo)
| apache-2.0 | 1,437,764,092,708,313,000 | 27.508621 | 77 | 0.644995 | false | 3.622125 | false | false | false |
cdubz/babybuddy | core/templatetags/duration.py | 1 | 1802 | # -*- coding: utf-8 -*-
from django import template
from core.utils import duration_parts, duration_string as d_string
register = template.Library()
@register.filter
def duration_string(duration, precision='s'):
"""
Format a duration (e.g. "2 hours, 3 minutes, 35 seconds").
:param duration: a timedetla instance.
:param precision: the level of precision to return (h for hours, m for
minutes, s for seconds)
:returns: a string representation of the duration.
"""
if not duration:
return ''
try:
return d_string(duration, precision)
except (ValueError, TypeError):
return ''
@register.filter
def hours(duration):
"""
Return the "hours" portion of a duration.
:param duration: a timedetla instance.
:returns: an integer representing the number of hours in duration.
"""
if not duration:
return 0
try:
h, m, s = duration_parts(duration)
return h
except (ValueError, TypeError):
return 0
@register.filter
def minutes(duration):
"""
Return the "minutes" portion of a duration.
:param duration: a timedetla instance.
:returns: an integer representing the number of minutes in duration.
"""
if not duration:
return 0
try:
h, m, s = duration_parts(duration)
return m
except (ValueError, TypeError):
return 0
@register.filter
def seconds(duration):
"""
Return the "seconds" portion of a duration.
:param duration: a timedetla instance.
:returns: an integer representing the number of seconds in duration.
"""
if not duration:
return 0
try:
h, m, s = duration_parts(duration)
return s
except (ValueError, TypeError):
return 0
| bsd-2-clause | -8,208,156,730,294,913,000 | 24.027778 | 74 | 0.632075 | false | 4.230047 | false | false | false |
riverma/climate | obs4MIPs/obs4MIPs_process.py | 10 | 19744 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import cdms2
import cdtime
import cmor
import sys
import getopt
import factory
import numpy
from factory.formats import import_equation
from Toolbox.ESGFresources import *
from Toolbox.ESGFexcel import *
from Toolbox.CMORresources import CMORTable
# ************************************************************************
# process()
#
# Convert to obs4MIPS file format.
# ************************************************************************
def process( rc ):
'''
Convert netcdf/matlab/grads files into CMIP5 format.
'''
pdb.set_trace()
# ----------------------------
# Loop yearly on file list.
# ----------------------------
file_template = rc['file_template'].split(",");
if( len(file_template) == 2 ):
template_parameter = file_template[1]
rc['file_template'] = file_template[0]
else:
template_parameter = 'years'
for year in rc[template_parameter].split(","):
if(year == ''):
files= os.popen( "ls " + rc['file_template'] ).readlines()
else:
# ------------------------------------------------
# Use string formating for path with same argument
# ------------------------------------------------
try:
tmplFile = rc['file_template'] % (year)
except:
tmplFile = rc['file_template'].format(year)
if( not os.path.isfile( tmplFile) ) :
print "**** Warning %s not found\n" % ( tmplFile )
continue
files= os.popen( "ls " + tmplFile).readlines()
if( files == [] ):
print "No file found: Check your resource file"
return -1
# ------------------------------------------------
# Get the right handler to manage this file format
# ------------------------------------------------
Handler = factory.HandlerFormats(files[0].strip())
# -----------------------------------
# Take care of cmor initialization.
# -----------------------------------
cmor.setup(inpath=rc['inpath'],
netcdf_file_action = cmor.CMOR_REPLACE)
cmor.dataset(experiment_id = rc['experiment_id'],
institution = rc['institution' ],
calendar = rc['calendar' ],
institute_id = rc['institute_id' ],
model_id = rc['model_id' ],
source = rc['source' ],
contact = rc['contact' ],
references = rc['references' ])
# -----------------------------------------
# add extra Global Attributes for obs4MIPs.
# -----------------------------------------
cmor.set_cur_dataset_attribute( 'instrument', rc['instrument' ])
cmor.set_cur_dataset_attribute( 'mip_specs', rc['mip_specs' ])
cmor.set_cur_dataset_attribute( 'data_structure', rc['data_structure'])
cmor.set_cur_dataset_attribute( 'source_type', rc['source_type' ])
cmor.set_cur_dataset_attribute( 'source_id', rc['source_id' ])
cmor.set_cur_dataset_attribute( 'realm', rc['realm' ])
cmor.set_cur_dataset_attribute( 'obs_project', rc['obs_project' ])
cmor.set_cur_dataset_attribute( 'processing_version',
rc['processing_version'] )
cmor.set_cur_dataset_attribute( 'processing_level',
rc['processing_level'] )
cmor.load_table(rc['table'])
# ---------------------------------------------------------------------
# We loop on each file found, a new cmor file will be create on each
# iteration. If you want to aggregate, you need to use Grads ctl file
# or NeCDF list of file.
# ---------------------------------------------------------------------
for file in files:
fnm=file.strip() # Get rid of \n
aVariable = eval(rc['original_var'])
nbVariable = len(aVariable)
# -----------------------------------------------------
# ECMWF needs synoptic time 00z and 12z in he filename.
# We extract it from the first file in the list.
# -----------------------------------------------------
if( rc['source_fn'] == 'SYNOPTIC' ):
index = fnm.find('z.')
rc['SYNOPTIC'] = fnm[index-2:index]
# -----------------------
# Work on all variables
# -------------------------
for j in arange(nbVariable):
# ----------------------------------------------------
# Fetch the variable directly or excute equation.
# ----------------------------------------------------
try:
variable=aVariable[j]
Handler.open(fnm, variable=variable)
rc['cvrt_original_var'] = aVariable[j]
print "Working on variable %s " % variable
except:
if( aVariable[j] != 'equation' ) :
print "Variable %s can't open" % variable
continue
else:
print "Executing %s " % eval(rc['equation'])[j]
# pdb.set_trace()
rc['cvrt_original_units'] = eval(rc['original_units'])[j]
rc['cvrt_cmor_var'] = eval(rc['cmor_var'])[j]
rc['cvrt_equation'] = eval(rc['equation'])[j]
rc['cvrt_level'] = eval(rc['level'])[j]
data=Handler.getData()
# ----------------------------------------------------------
# Evaluate equation if needed. Usually used to change units
# ----------------------------------------------------------
if( rc['cvrt_equation'][0] == '@' ):
fncName = rc['cvrt_equation'][1:]
fnc = import_equation( "equations.%s" % fncName )
data[:]= fnc(Handler)
else:
data[:]=eval(rc['cvrt_equation'])
# -------------------------------------------------------------
# Save filled value in case data type is changed in createAxes
# -------------------------------------------------------------
fill_value = data.fill_value
# ---------------------------------------------
# Extract latitude/longitude
# ---------------------------------------------
lonvals=Handler.getLongitude()
latvals=Handler.getLatitude()
# ---------------------
# Create cmor time axis
# ----------------------
(rel_time, rel_time_bnds) = createTime(Handler, rc)
# ---------------------------------------------------
# Create cmor axes and add an axis to data if needed
# ---------------------------------------------------
(axes, data) = createAxes( rc, latvals, lonvals, data )
axis_ids = list()
for axis in axes:
axis_id = cmor.axis(**axis)
axis_ids.append(axis_id)
# ----------------------------------------------------------
# Create cmor variable
# Note: since this is in the loop, a new cmor file will be
# create for each cmor write command.
# ----------------------------------------------------------
varid = cmor.variable(table_entry = rc['cvrt_cmor_var'],
axis_ids = axis_ids,
history = '',
missing_value = fill_value,
original_name = rc['cvrt_original_var'],
units = rc['cvrt_original_units']
)
# -------------------------------
# Write data for this time frame.
# -------------------------------
cmor.write(varid,data,\
time_vals=rel_time,time_bnds=rel_time_bnds)
cmor.close(varid,file_name=True)
# ---------------------------------------
# Rename cmor files according to project.
# ---------------------------------------
if( movefiles(rc) ):
return -2
cmor.close()
return 0
# ********************************************************************
#
# createTime()
#
# Define Time and Time bound axes for cmor
#
# ********************************************************************
def createTime(Handler, rc):
'''
InputtimeUnits: specified from resource file or from first file
in a list of file.
return relative time and time bounds using OutputTimeUnits from
resource file.
'''
# ----------------------------------------------------
# Retrieve time units from file if not provided in the
# resource file.
# ----------------------------------------------------
InputTimeUnits = Handler.getTimeUnits(rc['InputTimeUnits'])
# --------------------------------------------------------
# Create time relative to January 1st 1900 to facilitate
# Threds software file handling.
# -------------------------------------------------------
cur_time = Handler.getTime(InputTimeUnits)
rel_time =[cur_time[i].torel(rc['OutputTimeUnits']).value
for i in range(len(cur_time))]
if( len(rel_time) == 1 ) :
deltarel = 1
else:
deltarel = rel_time[2] - rel_time[1]
rel_time_bnds = rel_time[:]
rel_time_bnds.append(rel_time[-1]+deltarel)
return rel_time, rel_time_bnds
# ********************************************************************
#
# getCMIP5lev()
#
# Extract CMIP5 mandatory level and recreate a new data array.
# They are 16 mandatory levels.
#
# ********************************************************************
def getCMIP5lev(data,rc):
'''
'''
oTable = CMORTable(rc['inpath'], rc['table'], "plevs")
# ----------------------
# Extract spefied levels
# ----------------------
if( 'levels' in oTable.dico.keys() ):
#pdb.set_trace()
dataLevels = data.getLevel()[:]
if( data.getLevel().units == "millibars" or
data.getLevel().units == "hPa" or
data.getLevel().units == "mbar" ):
# --------------------------
# Change units for to Pascal
# ---------------------------
LevelScaleFactor = 100
dataLevels = data.getLevel()[:] * LevelScaleFactor
# ----------------------------------------
# No level selected, return all data array
# ----------------------------------------
if( len(rc['cvrt_level'].split(":")) == 1 ):
levels = [ float(item) for item in dataLevels ]
lev=cdms2.createAxis( levels )
lev.designateLevel()
lev.units="pa"
lev.long_name=data.getLevel().long_name
#lev.id="lev"
#lev=data.getAxis(1)
#lev.__setattr__('_data_',dataLevels.astype(float))
#lev.__setattr__('units',"Pa")
#lev.units="hPa"
data2=data.pressureRegrid(lev)
return data2
if( rc['cvrt_level'].split(':')[1] == "CMIP5" ):
lev=cdms2.createAxis( [ float(item/LevelScaleFactor)
for item in dataLevels
if item in oTable.dico['levels' ] ] )
lev.designateLevel()
lev.units="pa"
lev.long_name = data.getLevel().long_name
data2=data.pressureRegrid(lev)
lev[:]=lev[:]*LevelScaleFactor
return data2
else:
# -----------------------
# Assume a list of values
# -----------------------
levels = rc['cvrt_level'].split(':')[1].split(",")
# --------------------------
# Change units to Pascal
# ---------------------------
dataLevels = [ float(rc['cvrt_level'].split(":")[1].split(",")[i]) * \
LevelScaleFactor for i in range(len(levels)) ]
# -----------------------------------
# Match dataLevels with CMIP5 levels
# Use file units
# -----------------------------------
lev=cdms2.createAxis( [ float(item/LevelScaleFactor)
for item in dataLevels
if item in oTable.dico['levels' ] ] )
# -----------------------------------
# Set axis metadata
# -----------------------------------
lev.units="pa"
lev.long_name = data.getLevel().long_name
lev.designateLevel()
# -----------------------------------
# Extract specified levels
# -----------------------------------
data2=data.pressureRegrid(lev)
# -----------------------------------
# Scale data back
# -----------------------------------
lev[:]=lev[:]*LevelScaleFactor
return data2
return data
# ********************************************************************
#
# createAxes()
#
# Define axes required by cmor and add z axis to data if needed
#
# ********************************************************************
def createAxes(rc, latvals, lonvals, data):
# ---------------------------------------------
# Create time/lat/lon axes using a dictionary
# ---------------------------------------------
axes = [
{'table_entry' : 'time',
'units' : rc['OutputTimeUnits']},
{'table_entry' : 'latitude',
'units' : 'degrees_north',
'coord_vals' : latvals,
'cell_bounds' : latvals.getBounds()},
{'table_entry' : 'longitude',
'units' : 'degrees_east',
'coord_vals' : lonvals,
'cell_bounds' : lonvals.getBounds()},
]
fill_value = data.fill_value
if( rc['cvrt_level'] == 'height2m' ):
axes.append({'table_entry' : 'height2m',
'units' : 'm',
'coord_vals' : [2.0] })
data = numpy.array(data[:])
data = data[:,:,:,numpy.newaxis]
elif( rc['cvrt_level'] != '' ):
data = getCMIP5lev( data, rc )
levels=data.getLevel()[:]
axes = numpy.insert(axes, 1,
{'table_entry' : 'plevs',
'units' : 'Pa',
'coord_vals' : levels })
return axes, data
# ********************************************************************
#
# usage()
#
# ********************************************************************
def usage(message):
'''
Describe program synopsis.
'''
print
print "*************************"
print message
print "*************************"
print
print
print "obs4MIPS_process.py [-h] -r resource"
print " resource: File containing Global attributes"
print ""
print "obs4MIPS will convert an input data file into CMIP5 format using "
print "CMOR. A directory path will be creating using CMOR by default or "
print "using a template provided in the resource file."
print
# ********************************************************************
#
# main()
#
# ********************************************************************
def main():
'''
'''
pdb.set_trace()
try:
opts, args = getopt.getopt(sys.argv[1:], "hy:r:x:",
["help" ,"year=","resource=","excel="])
except getopt.GetoptError, err:
usage(str(err))# will print something like "option -a not recognized"
return(2)
# --------------------------
# Verify passed arguments
# --------------------------
year = -1
resource = None
excel = None
for o, a in opts:
if o in ("-r", "--resource"):
resource = a
elif o in ("-x", "--excel"):
excel = a
elif o in ("-h", "--help"):
usage()
return(0)
elif o in ("-y", "--year"):
yr = a
else:
assert False, "unhandled option"
# ------------------------------
# Does the resource file exist?
# ------------------------------
if( ((resource == None ) or ( not os.path.isfile( resource ) )) and (( excel == None ) or ( not os.path.isfile( excel ) )) ):
usage("bad Input Resource/Excel File")
return 1
# -----------------------
# Read in "rc" file
# -----------------------
if( resource ):
rc = ESGFresources( resource )
if( excel ):
rc = ESGFexcel( excel )
# --------------------------------
# Extract CMIP5 Table information
# --------------------------------
oTable = CMORTable(rc['inpath'], rc['table'])
if( not 'original_var' in rc.resources.keys() ):
sys.exit(-1)
rc['project_id'] = oTable[ 'project_id' ]
rc['product'] = oTable[ 'product' ]
rc['modeling_realm'] = oTable[ 'modeling_realm' ]
rc['frequency'] = oTable[ 'frequency' ]
if( process(rc) ):
return -1
return 0
# ********************************************************************
#
# Call main program and return exit code
#
# ********************************************************************
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | 159,500,257,221,938,300 | 38.886869 | 129 | 0.383762 | false | 4.878676 | false | false | false |
bruceyou/NewsBlur | apps/categories/management/commands/activate_free.py | 2 | 1058 | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from optparse import make_option
import datetime
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option("-u", "--username", dest="username", nargs=1, help="Specify user id or username"),
)
def handle(self, *args, **options):
username = options.get('username')
user = None
if username:
try:
user = User.objects.get(username__icontains=username)
except User.MultipleObjectsReturned:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
user = User.objects.get(email__iexact=username)
except User.DoesNotExist:
print " ---> No user found at: %s" % username
if user:
user.profile.activate_free()
user.profile.save()
else:
print " ---> No user found at: %s" % (username)
| mit | -1,529,755,085,474,918,100 | 34.266667 | 102 | 0.57845 | false | 4.560345 | false | false | false |
gitporst/spotpy | spotpy/algorithms/sceua.py | 1 | 16831 | # -*- coding: utf-8 -*-
'''
Copyright (c) 2015 by Tobias Houska
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Tobias Houska and Stijn Van Hoey
This class holds the Shuffled Complex Evolution Algortithm (SCE-UA) algorithm, based on Duan (1994):
Duan, Q., Sorooshian, S. and Gupta, V. K.: Optimal use of the SCE-UA global optimization method for calibrating watershed models, J. Hydrol., 158(3), 265–284, 1994.
Based on Optimization_SCE
Copyright (c) 2011 Stijn Van Hoey.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from . import _algorithm
import spotpy
import numpy as np
import time
class sceua(_algorithm):
'''
Implements the SCE-UA algorithm from Duan (1994).
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
* Name of the database where parameter, objectivefunction value and simulation results will be saved.
dbformat: str
* ram: fast suited for short sampling time. no file will be created and results are saved in an array.
* csv: A csv file will be created, which you can import afterwards.
parallel: str
* seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
* mpc: Multi processing: Iterations on all available cores on your cpu (recommended for windows os).
* mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).
save_sim: boolean
*True: Simulation results will be saved
*False: Simulationt results will not be saved
alt_objfun: str or None, default: 'rmse'
alternative objectivefunction to be used for algorithm
* None: the objfun defined in spot_setup.objectivefunction is used
* any str: if str is found in spotpy.objectivefunctions,
this objectivefunction is used, else falls back to None
e.g.: 'log_p', 'rmse', 'bias', 'kge' etc.
'''
def __init__(self, *args, **kwargs):
if 'alt_objfun' not in kwargs:
kwargs['alt_objfun'] = 'rmse'
super(sceua, self).__init__(*args, **kwargs)
def find_min_max(self):
randompar=self.parameter()['random']
for i in range(1000):
randompar=np.column_stack((randompar,self.parameter()['random']))
return np.amin(randompar,axis=1),np.amax(randompar,axis=1)
"""
def simulate(self,params):
if self.repeat.phase=='burnin':
id,params = params
simulations =
"""
def simulate(self,id_params_tuple):
"""This overwrites the simple wrapper function of _algorithms.py
and makes a two phase mpi parallelization possbile:
1) burn-in
2) complex evolution
"""
if not self.repeat.phase: #burn-in
id,params = id_params_tuple
return id,params,self.model(params)
else:#complex-evolution
igs,x,xf,icall,cx,cf,sce_vars= id_params_tuple
self.npg,self.nopt,self.ngs,self.nspl,self.nps,self.bl,self.bu, self.status = sce_vars
# Partition the population into complexes (sub-populations);
# cx=np.zeros((self.npg,self.nopt))
# cf=np.zeros((self.npg))
#print(igs)
k1=np.arange(self.npg,dtype=int)
k2=k1*self.ngs+igs
cx[k1,:] = x[k2,:]
cf[k1] = xf[k2]
# Evolve sub-population igs for self.self.nspl steps:
likes=[]
sims=[]
pars=[]
for loop in xrange(self.nspl):
# Select simplex by sampling the complex according to a linear
# probability distribution
lcs=np.array([0]*self.nps)
lcs[0] = 1
for k3 in range(1,self.nps):
for i in range(1000):
#lpos = 1 + int(np.floor(self.npg+0.5-np.sqrt((self.npg+0.5)**2 - self.npg*(self.npg+1)*np.random.random())))
lpos = int(np.floor(self.npg+0.5-np.sqrt((self.npg+0.5)**2 - self.npg*(self.npg+1)*np.random.random())))
#idx=find(lcs(1:k3-1)==lpos)
idx=(lcs[0:k3]==lpos).nonzero() #check of element al eens gekozen
if idx[0].size == 0:
break
lcs[k3] = lpos
lcs.sort()
# Construct the simplex:
s = np.zeros((self.nps,self.nopt))
s=cx[lcs,:]
sf = cf[lcs]
snew,fnew,icall,simulation = self._cceua(s,sf,icall)
likes.append(fnew)
pars.append(list(snew))
self.status(igs,-fnew,snew)
sims.append(list(simulation))
#self.datawriter.save(-fnew,list(snew), simulations = list(simulation),chains = igs)
# Replace the worst point in Simplex with the new point:
s[-1,:] = snew
sf[-1] = fnew
# Replace the simplex into the complex;
cx[lcs,:] = s
cf[lcs] = sf
# Sort the complex;
idx = np.argsort(cf)
cf = np.sort(cf)
cx=cx[idx,:]
# Replace the complex back into the population;
return igs,likes,pars,sims,cx,cf,k1,k2
def sample(self,repetitions,ngs=20,kstop=100,pcento=0.0000001,peps=0.0000001):
"""
Samples from parameter distributions using SCE-UA (Duan, 2004),
converted to python by Van Hoey (2011).
Parameters
----------
repetitions: int
maximum number of function evaluations allowed during optimization
ngs: int
number of complexes (sub-populations), take more then the number of
analysed parameters
kstop: int
maximum number of evolution loops before convergency
pcento: int
the percentage change allowed in kstop loops before convergency
peps: float
Convergence criterium
"""
# Initialize the Progress bar
starttime = time.time()
intervaltime = starttime
# Initialize SCE parameters:
self.ngs=ngs
randompar=self.parameter()['random']
self.nopt=randompar.size
self.npg=2*self.nopt+1
self.nps=self.nopt+1
self.nspl=self.npg
npt=self.npg*self.ngs
self.iseed=1
self.bl,self.bu = self.parameter()['minbound'],self.parameter()['maxbound']
bound = self.bu-self.bl #np.array
# Create an initial population to fill array x(npt,self.self.nopt):
x=self._sampleinputmatrix(npt,self.nopt)
#Set Ininitial parameter position
#iniflg=1
nloop=0
icall=0
xf=np.zeros(npt)
#Burn in
param_generator = ((rep,list(x[rep])) for rep in xrange(int(npt)))
for rep,randompar,simulations in self.repeat(param_generator):
#Calculate the objective function
like = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
#Save everything in the database
self.status(rep,-like,randompar)
xf[rep] = like
self.datawriter.save(-like,randompar,simulations=simulations)
icall += 1
#Progress bar
acttime=time.time()
if acttime-intervaltime>=2:
text='%i of %i (best like=%g)' % (rep,repetitions,self.status.objectivefunction)
print(text)
intervaltime=time.time()
# Sort the population in order of increasing function values;
idx = np.argsort(xf)
xf = np.sort(xf)
x=x[idx,:]
# Record the best and worst points;
bestx=x[0,:]
bestf=xf[0]
#worstx=x[-1,:]
#worstf=xf[-1]
BESTF=bestf
BESTX=bestx
ICALL=icall
# Compute the standard deviation for each parameter
#xnstd=np.std(x,axis=0)
# Computes the normalized geometric range of the parameters
gnrng=np.exp(np.mean(np.log((np.max(x,axis=0)-np.min(x,axis=0))/bound)))
# Check for convergency;
if icall >= repetitions:
print('*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT')
print('ON THE MAXIMUM NUMBER OF TRIALS ')
print(repetitions)
print('HAS BEEN EXCEEDED. SEARCH WAS STOPPED AT TRIAL NUMBER:')
print(icall)
print('OF THE INITIAL LOOP!')
if gnrng < peps:
print('THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE')
# Begin evolution loops:
nloop = 0
criter=[]
criter_change=1e+5
starttime=time.time()
intervaltime=starttime
acttime=time.time()
self.repeat.setphase('ComplexEvo')
while icall<repetitions and gnrng>peps and criter_change>pcento:
nloop+=1
#print nloop
#print 'Start MPI'
# Loop on complexes (sub-populations);
cx=np.zeros((self.npg,self.nopt))
cf=np.zeros((self.npg))
sce_vars=[self.npg,self.nopt,self.ngs,self.nspl,self.nps,self.bl,self.bu, self.status]
param_generator = ((rep,x,xf,icall,cx,cf,sce_vars) for rep in xrange(int(self.ngs)))
for igs,likes,pars,sims,cx,cf,k1,k2 in self.repeat(param_generator):
icall+=len(likes)
x[k2,:] = cx[k1,:]
xf[k2] = cf[k1]
for i in range(len(likes)):
self.status(icall,-likes[i],pars[i])
self.datawriter.save(-likes[i],list(pars[i]), simulations = list(sims[i]),chains = igs)
#Progress bar
acttime=time.time()
if acttime-intervaltime>=2:
text='%i of %i (best like=%g)' % (icall,repetitions,self.status.objectivefunction)
print(text)
intervaltime=time.time()
# End of Loop on Complex Evolution;
# Shuffled the complexes;
idx = np.argsort(xf)
xf = np.sort(xf)
x=x[idx,:]
# Record the best and worst points;
bestx=x[0,:]
bestf=xf[0]
#worstx=x[-1,:]
#worstf=xf[-1]
BESTX = np.append(BESTX,bestx, axis=0) #appenden en op einde reshapen!!
BESTF = np.append(BESTF,bestf)
ICALL = np.append(ICALL,icall)
# Computes the normalized geometric range of the parameters
gnrng=np.exp(np.mean(np.log((np.max(x,axis=0)-np.min(x,axis=0))/bound)))
# Check for convergency;
if icall >= repetitions:
print('*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT')
print('ON THE MAXIMUM NUMBER OF TRIALS ')
print(repetitions)
print('HAS BEEN EXCEEDED.')
if gnrng < peps:
print('THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE')
criter=np.append(criter,bestf)
if nloop >= kstop: #nodig zodat minimum zoveel doorlopen worden
criter_change= np.abs(criter[nloop-1]-criter[nloop-kstop])*100
criter_change= criter_change/np.mean(np.abs(criter[nloop-kstop:nloop]))
if criter_change < pcento:
text='THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY LESS THAN THE THRESHOLD %f' %(kstop,pcento)
print(text)
print('CONVERGENCY HAS ACHIEVED BASED ON OBJECTIVE FUNCTION CRITERIA!!!')
# End of the Outer Loops
text='SEARCH WAS STOPPED AT TRIAL NUMBER: %d' %icall
print(text)
text='NORMALIZED GEOMETRIC RANGE = %f' %gnrng
print(text)
text='THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY %f' %(kstop,criter_change)
print(text)
#reshape BESTX
BESTX=BESTX.reshape(BESTX.size/self.nopt,self.nopt)
self.repeat.terminate()
try:
self.datawriter.finalize()
except AttributeError: #Happens if no database was assigned
pass
print('Best parameter set')
print(self.status.params)
text='Duration:'+str(round((acttime-starttime),2))+' s'
print(text)
def _cceua(self,s,sf,icall):
# This is the subroutine for generating a new point in a simplex
#
# s(.,.) = the sorted simplex in order of increasing function values
# s(.) = function values in increasing order
#
# LIST OF LOCAL VARIABLES
# sb(.) = the best point of the simplex
# sw(.) = the worst point of the simplex
# w2(.) = the second worst point of the simplex
# fw = function value of the worst point
# ce(.) = the centroid of the simplex excluding wo
# snew(.) = new point generated from the simplex
# iviol = flag indicating if constraints are violated
# = 1 , yes
# = 0 , no
self.nps,self.nopt=s.shape
alpha = 1.0
beta = 0.5
# Assign the best and worst points:
sw=s[-1,:]
fw=sf[-1]
# Compute the centroid of the simplex excluding the worst point:
ce= np.mean(s[:-1,:],axis=0)
# Attempt a reflection point
snew = ce + alpha*(ce-sw)
# Check if is outside the bounds:
ibound=0
s1=snew-self.bl
idx=(s1<0).nonzero()
if idx[0].size <> 0:
ibound=1
s1=self.bu-snew
idx=(s1<0).nonzero()
if idx[0].size <> 0:
ibound=2
if ibound >= 1:
snew = self._sampleinputmatrix(1,self.nopt)[0] #checken!!
## fnew = functn(self.nopt,snew);
simulations=self.model(snew)
like = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
fnew = like#bcf.algorithms._makeSCEUAformat(self.model,self.observations,snew)
#fnew = self.model(snew)
icall += 1
# Reflection failed; now attempt a contraction point:
if fnew > fw:
snew = sw + beta*(ce-sw)
simulations=self.model(snew)
like = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
fnew = like
icall += 1
# Both reflection and contraction have failed, attempt a random point;
if fnew > fw:
snew = self._sampleinputmatrix(1,self.nopt)[0] #checken!!
simulations=self.model(snew)
like = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
fnew = like#bcf.algorithms._makeSCEUAformat(self.model,self.observations,snew)
#print 'NSE = '+str((fnew-1)*-1)
#fnew = self.model(snew)
icall += 1
# END OF CCE
return snew,fnew,icall,simulations
def _sampleinputmatrix(self,nrows,npars):
'''
Create inputparameter matrix for nrows simualtions,
for npars with bounds ub and lb (np.array from same size)
distname gives the initial sampling ditribution (currently one for all parameters)
returns np.array
'''
x=np.zeros((nrows,npars))
for i in range(nrows):
x[i,:]= self.parameter()['random']
return x
# Matrix=np.empty((nrows,npars))
# for i in range(nrows):
# Matrix[i]= self.parameter()['random']
# return Matrix
| mit | 8,285,090,159,764,058,000 | 37.600917 | 164 | 0.554757 | false | 3.788609 | false | false | false |
saks/hb | records/models.py | 1 | 2100 | import logging
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from djmoney.models.fields import MoneyField
TRANSACTION_TYPE = (
('EXP', _('Expences')),
('INC', _('Income')),
)
log = logging.getLogger(__name__)
class Record(models.Model):
'''
Record model defines the storage of income/expences records.
Amount field is MoneyField. Determines amount of money and currency. CAD by default.
Transaction type determines EXP(Expences) or INC(Income) the record is.
'''
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
tags = ArrayField(
models.TextField(max_length=20), null=False, blank=True, default=list
)
amount = MoneyField(max_digits=15, decimal_places=2, default_currency='CAD')
comment = models.TextField(null=True, blank=True)
transaction_type = models.CharField(choices=TRANSACTION_TYPE, max_length=3)
created_at = models.DateTimeField(default=timezone.now, blank=True)
@property
def redis_tags_key(self):
'''
Return Redis key where stored sorted set of tags frequency usage.
'''
return settings.REDIS_KEY_USER_TAGS % (self.user_id,)
def __str__(self):
return '%s %s' % (self.amount, ', '.join(self.tags))
def remove_tags_weights(self):
'''
Remove tags from frequency tags set.
'''
log.debug('Remove tags weights')
pipe = settings.REDIS_CONN.pipeline()
for tag in self.tags:
pipe.zincrby(self.redis_tags_key, -1, tag)
# remove 0 score tags
pipe.zremrangebyscore(self.redis_tags_key, 0, 0)
pipe.execute()
def add_tags_weights(self):
'''
Add tags to usage frequency set.
'''
log.debug('Add tags weights')
pipe = settings.REDIS_CONN.pipeline()
for tag in self.tags:
pipe.zincrby(self.redis_tags_key, 1, tag)
pipe.execute()
| mit | 7,670,605,267,441,292,000 | 30.818182 | 88 | 0.650952 | false | 3.743316 | false | false | false |
sam81/pysoundanalyser | pysoundanalyser/dialog_save_sound.py | 1 | 3550 | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2017 Samuele Carcagno <[email protected]>
# This file is part of pysoundanalyser
# pysoundanalyser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pysoundanalyser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pysoundanalyser. If not, see <http://www.gnu.org/licenses/>.
from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals
from .pyqtver import*
if pyqtversion == 4:
from PyQt4 import QtGui, QtCore
from PyQt4.QtGui import QComboBox, QDialog, QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QMessageBox
elif pyqtversion == -4:
from PySide import QtGui, QtCore
from PySide.QtGui import QComboBox, QDialog, QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QMessageBox
elif pyqtversion == 5:
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QComboBox, QDialog, QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QMessageBox
class saveSoundDialog(QDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
grid = QGridLayout()
n = 0
#self.fileToWrite = None
self.guessFileExtension = True
formatLabel = QLabel(self.tr('File Format: '))
grid.addWidget(formatLabel, n, 0)
self.formatChooser = QComboBox()
self.formatChooser.addItems(["wav"])#available_file_formats())
self.formatChooser.setCurrentIndex(0)
grid.addWidget(self.formatChooser, n, 1)
self.formatChooser.currentIndexChanged[int].connect(self.onFileFormatChange)
self.suggestedExtension = str(self.formatChooser.currentText())
encodingLabel = QLabel(self.tr('Bits: '))
grid.addWidget(encodingLabel, n, 2)
self.encodingChooser = QComboBox()
self.encodingChooser.addItems(["16", "24", "32"])#available_encodings(str(self.formatChooser.currentText())))
self.encodingChooser.setCurrentIndex(0)
grid.addWidget(self.encodingChooser, n, 3)
n = n+1
channelLabel = QLabel(self.tr('Channel: '))
grid.addWidget(channelLabel, n, 0)
self.channelChooser = QComboBox()
self.channelChooser.addItems([self.tr('Stereo'), self.tr('Mono')])
self.channelChooser.setCurrentIndex(0)
grid.addWidget(self.channelChooser, n, 1)
n = n+1
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok|
QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
grid.addWidget(buttonBox, n, 2)
self.setLayout(grid)
self.setWindowTitle(self.tr("Save Sound Options"))
def onFileFormatChange(self):
pass
## for i in range(self.encodingChooser.count()):
## self.encodingChooser.removeItem(0)
## self.encodingChooser.addItems(available_encodings(str(self.formatChooser.currentText())))
## self.suggestedExtension = str(self.formatChooser.currentText())
| gpl-3.0 | 5,370,182,566,540,507,000 | 45.103896 | 125 | 0.684789 | false | 3.944444 | false | false | false |
AusTac/parma | b3/plugins/censor.py | 1 | 12779 | #
# BigBrotherBot(B3) (www.bigbrotherbot.net)
# Copyright (C) 2005 Michael "ThorN" Thornton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# CHANGELOG
# 2012/07/03 - 3.1 - Courgette
# Fixes a bug wich prevented regular expression "\sd[i!1*]ck\s" to match for word "d!ck"
# 2012/07/03 - 3.0.1 - Courgette
# Gives meaningful log messages when loading the config file
# 2011/12/26 - 3.0 - Courgette
# Refactor and make the checks on raw text before checks on cleaned text. Add tests
# 2/12/2011 - 2.2.2 - Bravo17
# Fix for reason keyword not working
# 1/16/2010 - 2.2.1 - xlr8or
# Plugin can now be disabled with !disable censor
# 1/16/2010 - 2.2.0 - xlr8or
# Added ignore_length as an optional configurable option
# Started debugging the badname checker
# 8/13/2005 - 2.0.0 - ThorN
# Converted to use XML config
# Allow custom penalties for words and names
# 7/23/2005 - 1.1.0 - ThorN
# Added data column to penalties table
# Put censored message/name in the warning data
__author__ = 'ThorN, xlr8or, Bravo17, Courgette'
__version__ = '3.1'
import b3, re, traceback, sys, threading
import b3.events
import b3.plugin
from b3.config import XmlConfigParser
from b3 import functions
class PenaltyData:
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
type = None
reason = None
keyword = None
duration = 0
def __repr__(self):
return """Penalty(type=%r, reason=%r, keyword=%r, duration=%r)""" % (self.type, self.reason, self.keyword, self.duration)
def __str__(self):
data = {"type": self.type, "reason": self.reason, "reasonkeyword": self.keyword, "duration": self.duration}
return "<penalty " + ' '.join(['%s="%s"' % (k, v) for k, v in data.items() if v]) + " />"
class CensorData:
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
name = None
penalty = None
regexp = None
def __repr__(self):
return """CensorData(name=%r, penalty=%r, regexp=%r)""" % (self.name, self.penalty, self.regexp)
#--------------------------------------------------------------------------------------------------
class CensorPlugin(b3.plugin.Plugin):
_adminPlugin = None
_reClean = re.compile(r'[^0-9a-z ]+', re.I)
_defaultBadWordPenalty = PenaltyData(type="warning", keyword="cuss")
_defaultBadNamePenalty = PenaltyData(type="warning", keyword="badname")
_maxLevel = 0
_ignoreLength = 3
def onStartup(self):
self._adminPlugin = self.console.getPlugin('admin')
if not self._adminPlugin:
return False
self.registerEvent(b3.events.EVT_CLIENT_SAY)
self.registerEvent(b3.events.EVT_CLIENT_TEAM_SAY)
self.registerEvent(b3.events.EVT_CLIENT_NAME_CHANGE)
self.registerEvent(b3.events.EVT_CLIENT_AUTH)
def onLoadConfig(self):
assert isinstance(self.config, XmlConfigParser)
try:
self._maxLevel = self.config.getint('settings', 'max_level')
except Exception, err:
self._maxLevel = 0
self.warning(err)
self.warning("using default value %s for settings:max_level" % self._maxLevel)
try:
self._ignoreLength = self.config.getint('settings', 'ignore_length')
except Exception, err:
self._ignoreLength = 3
self.warning(err)
self.warning("using default value %s for settings:ignore_length" % self._ignoreLength)
default_badwords_penalty_nodes = self.config.get('badwords/penalty')
if len(default_badwords_penalty_nodes):
penalty = default_badwords_penalty_nodes[0]
self._defaultBadWordPenalty = PenaltyData(type = penalty.get('type'),
reason = penalty.get('reason'),
keyword = penalty.get('reasonkeyword'),
duration = functions.time2minutes(penalty.get('duration')))
else:
self.warning("""no default badwords penalty found in config. Using default : %s""" % self._defaultBadNamePenalty)
default_badnames_penalty_nodes = self.config.get('badnames/penalty')
if len(default_badnames_penalty_nodes):
penalty = default_badnames_penalty_nodes[0]
self._defaultBadNamePenalty = PenaltyData(type = penalty.get('type'),
reason = penalty.get('reason'),
keyword = penalty.get('reasonkeyword'),
duration = functions.time2minutes(penalty.get('duration')))
else:
self.warning("""no default badnames penalty found in config. Using default : %s""" % self._defaultBadNamePenalty)
# load bad words into memory
self._badWords = []
for e in self.config.get('badwords/badword'):
penalty_node = e.find('penalty')
word_node = e.find('word')
regexp_node = e.find('regexp')
self._add_bad_word(rulename=e.get('name'),
penalty=penalty_node,
word=word_node.text if word_node is not None else None,
regexp=regexp_node.text if regexp_node is not None else None)
# load bad names into memory
self._badNames = []
for e in self.config.get('badnames/badname'):
penalty_node = e.find('penalty')
word_node = e.find('word')
regexp_node = e.find('regexp')
self._add_bad_name(rulename=e.get('name'),
penalty=penalty_node,
word=word_node.text if word_node is not None else None,
regexp=regexp_node.text if regexp_node is not None else None)
def _add_bad_word(self, rulename, penalty=None, word=None, regexp=None):
if word is regexp is None:
self.warning("badword rule [%s] has no word and no regular expression to search for" % rulename)
elif word is not None and regexp is not None:
self.warning("badword rule [%s] cannot have both a word and regular expression to search for" % rulename)
elif regexp is not None:
# has a regular expression
self._badWords.append(self._getCensorData(rulename, regexp.strip(), penalty, self._defaultBadWordPenalty))
self.debug("badword rule '%s' loaded" % rulename)
elif word is not None:
# has a plain word
self._badWords.append(self._getCensorData(rulename, '\\s' + word.strip() + '\\s', penalty, self._defaultBadWordPenalty))
self.debug("badword rule '%s' loaded" % rulename)
def _add_bad_name(self, rulename, penalty=None, word=None, regexp=None):
if word is regexp is None:
self.warning("badname rule [%s] has no word and no regular expression to search for" % rulename)
elif word is not None and regexp is not None:
self.warning("badname rule [%s] cannot have both a word and regular expression to search for" % rulename)
elif regexp is not None:
# has a regular expression
self._badNames.append(self._getCensorData(rulename, regexp.strip(), penalty, self._defaultBadNamePenalty))
self.debug("badname rule '%s' loaded" % rulename)
elif word is not None:
# has a plain word
self._badNames.append(self._getCensorData(rulename, '\\s' + word.strip() + '\\s', penalty, self._defaultBadNamePenalty))
self.debug("badname rule '%s' loaded" % rulename)
def _getCensorData(self, name, regexp, penalty, defaultPenalty):
try:
regexp = re.compile(regexp, re.I)
except re.error, e:
self.error('Invalid regular expression: %s - %s' % (name, regexp))
raise
if penalty is not None:
pd = PenaltyData(type = penalty.get('type'),
reason = penalty.get('reason'),
keyword = penalty.get('reasonkeyword'),
duration = functions.time2minutes(penalty.get('duration')))
else:
pd = defaultPenalty
return CensorData(name=name, penalty=pd, regexp=regexp)
def onEvent(self, event):
try:
if not self.isEnabled():
return
elif not event.client:
return
elif event.client.cid is None:
return
elif event.client.maxLevel > self._maxLevel:
return
elif not event.client.connected:
return
if event.type == b3.events.EVT_CLIENT_AUTH or event.type == b3.events.EVT_CLIENT_NAME_CHANGE:
self.checkBadName(event.client)
elif len(event.data) > self._ignoreLength:
if event.type == b3.events.EVT_CLIENT_SAY or \
event.type == b3.events.EVT_CLIENT_TEAM_SAY:
self.checkBadWord(event.data, event.client)
except b3.events.VetoEvent:
raise
except Exception, msg:
self.error('Censor plugin error: %s - %s', msg, traceback.extract_tb(sys.exc_info()[2]))
def penalizeClient(self, penalty, client, data=''):
"""\
This is the default penalisation for using bad language in say and teamsay
"""
#self.debug("%s"%((penalty.type, penalty.reason, penalty.keyword, penalty.duration),))
# fix for reason keyword not working
if penalty.keyword is None:
penalty.keyword = penalty.reason
self._adminPlugin.penalizeClient(penalty.type, client, penalty.reason, penalty.keyword, penalty.duration, None, data)
def penalizeClientBadname(self, penalty, client, data=''):
"""\
This is the penalisation for bad names
"""
#self.debug("%s"%((penalty.type, penalty.reason, penalty.keyword, penalty.duration),))
self._adminPlugin.penalizeClient(penalty.type, client, penalty.reason, penalty.keyword, penalty.duration, None, data)
def checkBadName(self, client):
if not client.connected:
self.debug('Client not connected?')
return
cleaned_name = ' ' + self.clean(client.exactName) + ' '
self.info("Checking '%s'=>'%s' for badname" % (client.exactName, cleaned_name))
was_penalized = False
for w in self._badNames:
if w.regexp.search(client.exactName):
self.debug("badname rule [%s] matches '%s'" % (w.name, client.exactName))
self.penalizeClientBadname(w.penalty, client, '%s (rule %s)' % (client.exactName, w.name))
was_penalized = True
break
if w.regexp.search(cleaned_name):
self.debug("badname rule [%s] matches cleaned name '%s' for player '%s'" % (w.name, cleaned_name, client.exactName))
self.penalizeClientBadname(w.penalty, client, '%s (rule %s)' % (client.exactName, w.name))
was_penalized = True
break
if was_penalized:
# check again in 1 minute
t = threading.Timer(60, self.checkBadName, (client,))
t.start()
return
def checkBadWord(self, text, client):
cleaned = ' ' + self.clean(text) + ' '
text = ' ' + text + ' '
self.debug("cleaned text: [%s]" % cleaned)
for w in self._badWords:
if w.regexp.search(text):
self.debug("badword rule [%s] matches '%s'" % (w.name, text))
self.penalizeClient(w.penalty, client, text)
raise b3.events.VetoEvent
if w.regexp.search(cleaned):
self.debug("badword rule [%s] matches cleaned text '%s'" % (w.name, cleaned))
self.penalizeClient(w.penalty, client, '%s => %s' % (text, cleaned))
raise b3.events.VetoEvent
def clean(self, data):
return re.sub(self._reClean, ' ', self.console.stripColors(data.lower()))
| gpl-2.0 | 2,528,547,837,317,563,000 | 42.617747 | 132 | 0.596604 | false | 3.846779 | true | false | false |
trenton42/txbalanced-v1.0 | setup.py | 1 | 1409 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='txbalanced',
version='0.1.3',
description='Balanced Payments library Twisted style',
long_description=readme + '\n\n' + history,
author='Trenton Broughton',
author_email='[email protected]',
url='https://github.com/trenton42/txbalanced',
packages=[
'txbalanced',
],
package_dir={'txbalanced': 'txbalanced'},
include_package_data=True,
install_requires=[
'treq',
'python-dateutil',
'pyopenssl',
'wrapt'
],
license="BSD",
zip_safe=False,
keywords='txbalanced',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
test_suite='tests',
)
| bsd-3-clause | -3,378,068,112,512,977,000 | 25.092593 | 66 | 0.597587 | false | 3.631443 | false | true | false |
alfa-addon/addon | plugin.video.alfa/channels/pelisvips.py | 1 | 12431 | # -*- coding: utf-8 -*-
# -*- Channel PelisVips -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
else:
import urlparse # Usamos el nativo de PY2 que es más rápido
import re
from bs4 import BeautifulSoup
from channels import autoplay, filtertools
from core import httptools, scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
#Yoast SEO v11.6
host = "https://www.pelisvips.com/"
unify = config.get_setting('unify')
lquality = {'hd1080p': 'FHD', 'hd720p': 'HD', 'hdreal720': 'HD',
'br screener': 'BR-S', 'ts screener': 'TS'}
list_quality = list(lquality.values())
list_servers = ['directo', 'fembed', 'rapidvideo', 'mega', 'vidlox', 'streamango', 'openload']
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST',
'Subtitulado': 'VOSE', 'Subtitulada': 'VOSE'}
list_language = list(IDIOMAS.values())
def mainlist(item):
logger.info()
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, title="Estrenos", action="list_all",
url= host+'genero/estrenos/', viewmode="movie_with_plot",
thumbnail=get_thumb("premieres", auto=True)))
itemlist.append(Item(channel=item.channel, title="Novedades", action="list_all",
url= host, viewmode="movie_with_plot",
thumbnail=get_thumb("newest", auto=True)))
itemlist.append(Item(channel=item.channel, title="Géneros", action="genres",
url=host, thumbnail=get_thumb("genres", auto=True)))
itemlist.append(Item(channel=item.channel, title="Castellano", action="list_all",
url=host+'ver-idioma/castellano/',
thumbnail=get_thumb("cast", auto=True)))
itemlist.append(Item(channel=item.channel, title="Latino", action="list_all",
url=host+'ver-idioma/latino/',
thumbnail=get_thumb("lat", auto=True)))
itemlist.append(Item(channel=item.channel, title="VOSE", action="list_all",
url=host+'ver-idioma/subtitulada/',
thumbnail=get_thumb("vose", auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search",
url= host + "?s=", thumbnail=get_thumb("search", auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def create_soup(url, soup=True, referer=None, post=None):
logger.info()
data = httptools.downloadpage(url, headers=referer, post=post).data
if soup:
soup = BeautifulSoup(data, "html5lib", from_encoding="utf-8")
return soup
return data
def genres(item):
logger.info()
itemlist = []
soup = create_soup(item.url)
bloque = soup.find_all('ul', class_="sbi-list")[1]
matches = bloque.find_all('a')
for elem in matches:
url = elem['href']
title = elem.text.strip()
itemlist.append(Item(channel=item.channel, action="list_all",
title=title, url=url))
return itemlist
def search(item, texto):
logger.info()
texto_post = texto.replace(" ", "+")
item.url = item.url + texto_post
try:
return list_search(item)
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def list_search(item):
logger.info()
itemlist = []
soup = create_soup(item.url)
matches = soup.find_all('li', class_='itemlist')
for elem in matches:
url = elem.a['href']
url = urlparse.urljoin(host, url)
stitle = elem.a['title']
thumbnail = elem.img['src']
info = elem.find('p', class_='main-info-list').text.partition('Calidad:')
plot = elem.find('p', class_='text-list').text.partition('cula Completa ')[2]
title = clear_title(stitle)
year = scrapertools.find_single_match(stitle, r'\((\d{4})\)$')
quality = info[2].strip()
quality = lquality.get(quality.lower(), quality)
info_langs = info[0].split('Idioma:')[1]
list_langs = scrapertools.find_multiple_matches(info_langs, '([a-zA-Z]+)')
langs, list_langs = extrae_idiomas(list_langs)
plot = ''
if not unify:
stitle = "[B]%s[/B] [COLOR darkgrey](%s)[/COLOR]" % (
title, year)
plot = '[COLOR yellowgreen][I]Idiomas[/COLOR]: %s\n[COLOR yellowgreen]Calidad[/COLOR]: %s[/I]\n\n' % (
langs, quality)
itemlist.append(Item(channel = item.channel,
action = 'findvideos',
contentTitle = title,
infoLabels = {'year':year},
quality = quality,
thumbnail = thumbnail,
title = stitle,
language=list_langs,
url = url,
plot=plot,
plot2=plot
))
tmdb.set_infoLabels(itemlist, True)
if not unify:
for item in itemlist:
if item.infoLabels['tmdb_id'] and not 'Idiomas' in item.contentPlot:
item.plot1 = item.contentPlot
item.contentPlot = item.plot2+item.contentPlot
return itemlist
def list_all(item):
logger.info()
itemlist = []
soup = create_soup(item.url)
matches = soup.find_all('a', class_='movie-item clearfix tooltipS')
for elem in matches:
url = elem['href']
url = urlparse.urljoin(host, url)
quality = elem.find('div', class_='_format').text.strip()
thumbnail = elem.img['src']
stitle = elem.img['alt']
syear = elem.find('div', class_='label_year').text
audio = elem.find('div', class_='_audio')
title, year = clear_title(stitle, syear)
stitle = title
quality = lquality.get(quality.lower(), quality)
list_langs = audio.find_all('img')
langs, list_langs = extrae_idiomas(list_langs)
plot = ''
if not unify:
stitle = "[B]%s[/B] [COLOR darkgrey](%s)[/COLOR]" % (
title, year)
plot = '[COLOR yellowgreen][I]Idiomas[/COLOR]: %s\n[COLOR yellowgreen]Calidad[/COLOR]: %s[/I]\n\n' % (
langs, quality)
itemlist.append(Item(channel = item.channel,
action = 'findvideos',
contentTitle = title,
infoLabels = {'year':year},
quality = quality,
thumbnail = thumbnail,
title = stitle,
language=list_langs,
url = url,
plot=plot,
plot2=plot
))
tmdb.set_infoLabels(itemlist, True)
if not unify:
for item in itemlist:
if item.infoLabels['tmdb_id'] and not 'Idiomas' in item.contentPlot:
item.plot1 = item.contentPlot
item.contentPlot = item.plot2+item.contentPlot
try:
next_page = soup.find('a', class_='nextpostslink')['href']
next_page = urlparse.urljoin(host, next_page)
except:
next_page = None
if next_page:
itemlist.append(Item(channel=item.channel, action="list_all",
title='Página Siguiente >>',
text_color='aquamarine',
url=next_page.strip()))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
from lib import players_parse
# Descarga la pagina
soup = create_soup(item.url).find('div', id='movie-player')
matches = soup.find_all('li')
for elem in matches:
title = "%s"
url = elem.a['rel'][0]
url = players_parse.player_parse(url, elem.a['title'], host)
info = elem.find('span', class_='optxt').text.partition('\n')
slang = info[0].strip().replace('Español ', '')
squality = info[2].strip().replace(' ', '')
language = IDIOMAS.get(slang, slang)
quality = lquality.get(squality.lower(), squality)
if "pelisvips.com" in url:
data = create_soup(url, soup=False).partition('sources:')[2]
url = scrapertools.find_single_match(data, "file': '([^']+)")
elif "pelisup" in url:
url = url.replace('pelisup', 'fembed')
if not unify:
title += ' [COLOR palegreen][%s] [/COLOR][COLOR grey][%s][/COLOR]' % (quality, language)
if url:
itemlist.append(
item.clone(action="play", title=title, url=url,
quality= quality, language=language,
plot=item.plot1
))
itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if itemlist and item.contentChannel != "videolibrary":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="gold",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
contentTitle=item.contentTitle
))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'documentales':
item.url = host + "genero/documental/"
elif categoria == 'infantiles':
item.url = host + "genero/animacion/"
elif categoria == 'terror':
item.url = host + "genero/terror/"
elif categoria == 'castellano':
item.url = host + "ver-idioma/castellano/"
elif categoria == 'latino':
item.url = host + "ver-idioma/latino/"
itemlist = list_all(item)
if itemlist[-1].action == "list_all":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def clear_title(stitle, syear=None):
title = re.sub(r' / (.*)| \(.*', '', stitle)
if syear:
year = scrapertools.find_single_match(syear, r'(\d{4})') or '-'
return title, year
return title
def extrae_idiomas(list_language):
logger.info()
textoidiomas = ''
for i, elem in enumerate(list_language):
try:
idioma = elem['title']
except:
idioma = elem.strip()
c_lang = IDIOMAS.get(idioma, idioma)
textoidiomas += "%s, " % c_lang
list_language[i] = c_lang
textoidiomas = textoidiomas[:-2]
return textoidiomas, list_language | gpl-3.0 | 2,597,182,748,620,308,000 | 32.043836 | 123 | 0.518835 | false | 3.848823 | false | false | false |
taspinar/siml | siml/naive_bayes.py | 1 | 5454 | from collections import Counter, defaultdict
from evaluators import *
import load_data as ld
import numpy as np
class NaiveBaseClass:
def calculate_relative_occurences(self, list1):
no_examples = len(list1)
ro_dict = dict(Counter(list1))
for key in ro_dict.keys():
ro_dict[key] = ro_dict[key] / float(no_examples)
return ro_dict
def get_max_value_key(self, d1):
values = list(d1.values())
keys = list(d1.keys())
max_value_index = values.index(max(values))
max_key = keys[max_value_index]
return max_key
def initialize_nb_dict(self):
self.nb_dict = {}
for label in self.labels:
self.nb_dict[label] = defaultdict(list)
class NaiveBayes(NaiveBaseClass):
"""
Naive Bayes Classifier:
It is trained with a 2D-array X (dimensions m,n) and a 1D array Y (dimension 1,n).
X should have one column per feature (total m) and one row per training example (total n).
After training a dictionary is filled with the class probabilities per feature.
"""
def train(self, X, Y):
self.labels = np.unique(Y)
no_rows, no_cols = np.shape(X)
self.initialize_nb_dict()
self.class_probabilities = self.calculate_relative_occurences(Y)
#fill self.nb_dict with the feature values per class
for label in self.labels:
row_indices = np.where(Y == label)[0]
X_ = X[row_indices, :]
no_rows_, no_cols_ = np.shape(X_)
for jj in range(0,no_cols_):
self.nb_dict[label][jj] += list(X_[:,jj])
#transform the dict which contains lists with all feature values
#to a dict with relative feature value occurences per class
for label in self.labels:
for jj in range(0,no_cols):
self.nb_dict[label][jj] = self.calculate_relative_occurences(self.nb_dict[label][jj])
def classify_single_elem(self, X_elem):
Y_dict = {}
for label in self.labels:
class_probability = self.class_probabilities[label]
for ii in range(0,len(X_elem)):
relative_feature_values = self.nb_dict[label][ii]
if X_elem[ii] in relative_feature_values.keys():
class_probability *= relative_feature_values[X_elem[ii]]
else:
class_probability *= 0
Y_dict[label] = class_probability
return self.get_max_value_key(Y_dict)
def classify(self, X):
self.predicted_Y_values = []
no_rows, no_cols = np.shape(X)
for ii in range(0,no_rows):
X_elem = X[ii,:]
prediction = self.classify_single_elem(X_elem)
self.predicted_Y_values.append(prediction)
return self.predicted_Y_values
class NaiveBayesText(NaiveBaseClass):
""""
When the goal is classifying text, it is better to give the input X in the form of a list of lists containing words.
X = [
['this', 'is', 'a',...],
(...)
]
Y still is a 1D array / list containing the labels of each entry
"""
def initialize_nb_dict(self):
self.nb_dict = {}
for label in self.labels:
self.nb_dict[label] = []
def train(self, X, Y):
self.class_probabilities = self.calculate_relative_occurences(Y)
self.labels = np.unique(Y)
self.no_examples = len(Y)
self.initialize_nb_dict()
for ii in range(0,len(Y)):
label = Y[ii]
self.nb_dict[label] += X[ii]
#transform the list with all occurences to a dict with relative occurences
for label in self.labels:
self.nb_dict[label] = self.calculate_relative_occurences(self.nb_dict[label])
def classify_single_elem(self, X_elem):
Y_dict = {}
for label in self.labels:
class_probability = self.class_probabilities[label]
nb_dict_features = self.nb_dict[label]
for word in X_elem:
if word in nb_dict_features.keys():
relative_word_occurence = nb_dict_features[word]
class_probability *= relative_word_occurence
else:
class_probability *= 0
Y_dict[label] = class_probability
return self.get_max_value_key(Y_dict)
def classify(self, X):
self.predicted_Y_values = []
n = len(X)
for ii in range(0,n):
X_elem = X[ii]
prediction = self.classify_single_elem(X_elem)
self.predicted_Y_values.append(prediction)
return self.predicted_Y_values
####
X_train, Y_train, X_test, Y_test = ld.amazon_reviews()
print("training naive bayes")
nbc = NaiveBayesText()
nbc.train(X_train, Y_train)
print("trained")
predicted_Y = nbc.classify(X_test[:100])
y_labels = np.unique(Y_test)
for y_label in y_labels:
f1 = f1_score(predicted_Y, Y_test, y_label)
print("F1-score on the test-set for class %s is: %s" % (y_label, f1))
X_train, Y_train, X_test, Y_test = ld.adult()
print("training naive bayes")
nbc = NaiveBayes()
nbc.train(X_train, Y_train)
print("trained")
predicted_Y = nbc.classify(X_test)
y_labels = np.unique(Y_test)
for y_label in y_labels:
f1 = f1_score(predicted_Y, Y_test, y_label)
print("F1-score on the test-set for class %s is: %s" % (y_label, f1))
| mit | 4,719,717,715,912,433,000 | 36.102041 | 120 | 0.590392 | false | 3.436673 | true | false | false |
jameshilliard/playground21 | apibb/apibb-server.py | 1 | 4963 | import os
import re
import json
import random
import apsw
import time
# import flask web microframework
from flask import Flask
from flask import request
# import from the 21 Developer Library
from two1.lib.wallet import Wallet
from two1.lib.bitserv.flask import Payment
connection = apsw.Connection("apibb.db")
name_re = re.compile(r"^[a-zA-Z0-9][a-zA-Z0-9-\.]*$")
app = Flask(__name__)
wallet = Wallet()
payment = Payment(app, wallet)
def expire_ads():
cursor = connection.cursor()
cursor.execute("DELETE FROM ads WHERE expires < datetime('now')")
def expire_names():
cursor = connection.cursor()
cursor.execute("DELETE FROM names WHERE expires < datetime('now')")
@app.route('/names')
@payment.required(1)
def get_names():
cursor = connection.cursor()
rv = []
for name,created,expires in cursor.execute("SELECT name,created,expires FROM names ORDER BY name"):
obj = {
"name": name,
"created": created,
"expires": expires
}
rv.append(obj)
return json.dumps(rv)
def valid_renewal(request):
name = request.args.get('name')
hours = request.args.get('hours')
if (name_re.match(name) is None or
int(hours) < 1 or
int(hours) > (24 * 30)):
return False
return True
def get_renew_price_from_req(request):
if not valid_renewal(request):
return "invalid advertisement"
hours = int(request.args.get('hours'))
price = hours * 10 # 10 satoshis per hour
if price < 10:
price = 10
return price
@app.route('/namerenew')
@payment.required(get_renew_price_from_req)
def name_renew():
if not valid_renewal(request):
return "invalid renewal"
expire_names()
name = request.args.get('name')
hours = int(request.args.get('hours'))
cursor = connection.cursor()
expires = 0
for v in cursor.execute("SELECT expires FROM names WHERE name = ?", (name,)):
expires = v[0]
print("EXPIRES " + str(expires))
if expires == 0:
cursor.execute("INSERT INTO names VALUES(?, datetime('now'), datetime('now', '+" + str(hours) + " hours'))", (name,))
else:
cursor.execute("UPDATE names SET expires = datetime(?, '+" + str(hours) + " hours') WHERE name = ?", (expires, name))
return "OK"
def valid_advertisement(cursor, request):
name = request.args.get('name')
uri = request.args.get('uri')
pubkey = request.args.get('pubkey')
hours = request.args.get('hours')
if (name_re.match(name) is None or
len(uri) < 1 or
len(uri) > 512 or
len(pubkey) < 32 or
len(pubkey) > 512 or
int(hours) < 1 or
int(hours) > (24 * 30)):
return False
expires = None
for v in cursor.execute("SELECT strftime('%s', expires) FROM names WHERE name = ? AND expires > datetime('now')", (name,)):
expires = v
if expires is None:
return False
# curtime = int(time.time())
# curtime_deltap = curtime + (int(hours) * 60 * 60)
# if curtime_deltap > expires:
# return False
return True
def get_advertise_price_from_req(request):
cursor = connection.cursor()
if not valid_advertisement(cursor, request):
return "invalid advertisement"
hours = int(request.args.get('hours'))
price = hours * 2 # 2 satoshis per hour
if price < 2:
price = 2
return price
@app.route('/advertise')
@payment.required(get_advertise_price_from_req)
def advertise():
cursor = connection.cursor()
if not valid_advertisement(cursor, request):
return "invalid advertisement"
name = request.args.get('name')
uri = request.args.get('uri')
pubkey = request.args.get('pubkey')
hours = request.args.get('hours')
cursor.execute("INSERT INTO ads VALUES(?, ?, ?, datetime('now'), datetime('now','+" + str(hours) + " hours'))", (name, uri, pubkey))
return "OK"
@app.route('/ads')
@payment.required(1)
def get_advertisements():
name = request.args.get('name')
rv = []
cursor = connection.cursor()
for uri,pk,created,expires in cursor.execute("SELECT uri,pubkey,created,expires FROM ads WHERE name = ? AND expires > datetime('now')", (name,)):
obj = {
"uri": uri,
"pubkey": pk,
"created": created,
"expires": expires
}
rv.append(obj)
return json.dumps(rv)
@app.route('/info')
def get_info():
info_obj = {
"name": "apibb",
"version": 100,
"pricing": {
"/names" : {
"minimum" : 1
},
"/namerenew" : {
"minimum" : 10
},
"/advertise" : {
"minimum" : 2
},
"/ads" : {
"minimum" : 1
},
}
}
return json.dumps(info_obj)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=12002, debug=True)
| mit | -4,786,536,286,983,426,000 | 24.321429 | 149 | 0.583921 | false | 3.552613 | false | false | false |
jessicalucci/NovaOrc | nova/console/websocketproxy.py | 9 | 3155 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Websocket proxy that is compatible with OpenStack Nova.
Leverages websockify.py by Joel Martin
'''
import Cookie
import socket
import websockify
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class NovaWebSocketProxy(websockify.WebSocketProxy):
def __init__(self, *args, **kwargs):
websockify.WebSocketProxy.__init__(self, unix_target=None,
target_cfg=None,
ssl_target=None, *args, **kwargs)
def new_client(self):
"""
Called after a new WebSocket connection has been established.
"""
cookie = Cookie.SimpleCookie()
cookie.load(self.headers.getheader('cookie'))
token = cookie['token'].value
ctxt = context.get_admin_context()
rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
connect_info = rpcapi.check_token(ctxt, token=token)
if not connect_info:
LOG.audit("Invalid Token: %s", token)
raise Exception(_("Invalid Token"))
host = connect_info['host']
port = int(connect_info['port'])
# Connect to the target
self.msg("connecting to: %s:%s" % (host, port))
LOG.audit("connecting to: %s:%s" % (host, port))
tsock = self.socket(host, port, connect=True)
# Handshake as necessary
if connect_info.get('internal_access_path'):
tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
connect_info['internal_access_path'])
while True:
data = tsock.recv(4096, socket.MSG_PEEK)
if data.find("\r\n\r\n") != -1:
if not data.split("\r\n")[0].find("200"):
LOG.audit("Invalid Connection Info %s", token)
raise Exception(_("Invalid Connection Info"))
tsock.recv(len(data))
break
if self.verbose and not self.daemon:
print(self.traffic_legend)
# Start proxying
try:
self.do_proxy(tsock)
except Exception:
if tsock:
tsock.shutdown(socket.SHUT_RDWR)
tsock.close()
self.vmsg("%s:%s: Target closed" % (host, port))
LOG.audit("%s:%s: Target closed" % (host, port))
raise
| apache-2.0 | -2,780,871,884,747,952,000 | 34.449438 | 78 | 0.590808 | false | 4.086788 | false | false | false |
super13/tensorflow-speech-recognition-pai | src/smodels/RNN/rnn.py | 1 | 10374 | # Note: All calls to tf.name_scope or tf.summary.* support TensorBoard visualization.
import os
import tensorflow as tf
from models.RNN.utils import variable_on_cpu
def SimpleLSTM(input_tensor, seq_length):
'''
This function was initially based on open source code from Mozilla DeepSpeech:
https://github.com/mozilla/DeepSpeech/blob/master/DeepSpeech.py
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
'''
# SimpleLSTM
n_character = 29
b1_stddev = 0.046875
h1_stddev = 0.046875
n_layers = 2
n_hidden_units = 512
# Input shape: [batch_size, n_steps, n_input + 2*n_input*n_context]
# batch_x_shape = tf.shape(batch_x)
input_tensor_shape = tf.shape(input_tensor)
n_items = input_tensor_shape[0]
with tf.name_scope("lstm"):
# Initialize weights
# with tf.device('/cpu:0'):
W = tf.get_variable('W', shape=[n_hidden_units, n_character],
# initializer=tf.truncated_normal_initializer(stddev=h1_stddev),
initializer=tf.random_normal_initializer(stddev=h1_stddev),
)
# Initialize bias
# with tf.device('/cpu:0'):
# b = tf.get_variable('b', initializer=tf.zeros_initializer([n_character]))
b = tf.get_variable('b', shape=[n_character],
# initializer=tf.constant_initializer(value=0),
initializer=tf.random_normal_initializer(stddev=b1_stddev),
)
# Define the cell
# Can be:
# tf.contrib.rnn.BasicRNNCell
# tf.contrib.rnn.GRUCell
cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_units, state_is_tuple=True)
# Stacking rnn cells
stack = tf.contrib.rnn.MultiRNNCell([cell] * n_layers, state_is_tuple=True)
# Get layer activations (second output is the final state of the layer, do not need)
outputs, _ = tf.nn.dynamic_rnn(stack, input_tensor, seq_length,
time_major=False, dtype=tf.float32)
# Reshape to apply the same weights over the timesteps
outputs = tf.reshape(outputs, [-1, n_hidden_units])
# Perform affine transformation to layer output:
# multiply by weights (linear transformation), add bias (translation)
logits = tf.add(tf.matmul(outputs, W), b)
tf.summary.histogram("weights", W)
tf.summary.histogram("biases", b)
tf.summary.histogram("activations", logits)
# Reshaping back to the original shape
logits = tf.reshape(logits, [n_items, -1, n_character])
# Put time as the major axis
logits = tf.transpose(logits, (1, 0, 2))
summary_op = tf.summary.merge_all()
return logits, summary_op
def BiRNN(batch_x, seq_length, n_input, n_context):
"""
This function was initially based on open source code from Mozilla DeepSpeech:
https://github.com/mozilla/DeepSpeech/blob/master/DeepSpeech.py
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
dropout = [0.05,0.05,0.05,0.0,0.0,0.05]
relu_clip = 20
b1_stddev = 0.046875
h1_stddev = 0.046875
b2_stddev = 0.046875
h2_stddev = 0.046875
b3_stddev = 0.046875
h3_stddev = 0.046875
b5_stddev = 0.046875
h5_stddev = 0.046875
b6_stddev = 0.046875
h6_stddev = 0.046875
n_hidden_1 = 1024
n_hidden_2 = 1024
n_hidden_5 = 1024
n_cell_dim = 1024
n_hidden_3 = 1024
n_hidden_6 = 1024
# Input shape: [batch_size, n_steps, n_input + 2*n_input*n_context]
batch_x_shape = tf.shape(batch_x)
# Reshaping `batch_x` to a tensor with shape `[n_steps*batch_size, n_input + 2*n_input*n_context]`.
# This is done to prepare the batch for input into the first layer which expects a tensor of rank `2`.
# Permute n_steps and batch_size
batch_x = tf.transpose(batch_x, [1, 0, 2])
# Reshape to prepare input for first layer
batch_x = tf.reshape(batch_x,
[-1, n_input + 2 * n_input * n_context]) # (n_steps*batch_size, n_input + 2*n_input*n_context)
# The next three blocks will pass `batch_x` through three hidden layers with
# clipped RELU activation and dropout.
# 1st layer
with tf.name_scope('fc1'):
b1 = variable_on_cpu('b1', [n_hidden_1], tf.random_normal_initializer(stddev=b1_stddev))
h1 = variable_on_cpu('h1', [n_input + 2 * n_input * n_context, n_hidden_1],
tf.random_normal_initializer(stddev=h1_stddev))
layer_1 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(batch_x, h1), b1)), relu_clip)
layer_1 = tf.nn.dropout(layer_1, (1.0 - dropout[0]))
tf.summary.histogram("weights", h1)
tf.summary.histogram("biases", b1)
tf.summary.histogram("activations", layer_1)
# 2nd layer
with tf.name_scope('fc2'):
b2 = variable_on_cpu('b2', [n_hidden_2], tf.random_normal_initializer(stddev=b2_stddev))
h2 = variable_on_cpu('h2', [n_hidden_1, n_hidden_2], tf.random_normal_initializer(stddev=h2_stddev))
layer_2 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(layer_1, h2), b2)), relu_clip)
layer_2 = tf.nn.dropout(layer_2, (1.0 - dropout[1]))
tf.summary.histogram("weights", h2)
tf.summary.histogram("biases", b2)
tf.summary.histogram("activations", layer_2)
# 3rd layer
with tf.name_scope('fc3'):
b3 = variable_on_cpu('b3', [n_hidden_3], tf.random_normal_initializer(stddev=b3_stddev))
h3 = variable_on_cpu('h3', [n_hidden_2, n_hidden_3], tf.random_normal_initializer(stddev=h3_stddev))
layer_3 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(layer_2, h3), b3)), relu_clip)
layer_3 = tf.nn.dropout(layer_3, (1.0 - dropout[2]))
tf.summary.histogram("weights", h3)
tf.summary.histogram("biases", b3)
tf.summary.histogram("activations", layer_3)
# Create the forward and backward LSTM units. Inputs have length `n_cell_dim`.
# LSTM forget gate bias initialized at `1.0` (default), meaning less forgetting
# at the beginning of training (remembers more previous info)
with tf.name_scope('lstm'):
# Forward direction cell:
lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(n_cell_dim, forget_bias=1.0, state_is_tuple=True)
lstm_fw_cell = tf.contrib.rnn.DropoutWrapper(lstm_fw_cell,
input_keep_prob=1.0 - dropout[3],
output_keep_prob=1.0 - dropout[3],
# seed=random_seed,
)
# Backward direction cell:
lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(n_cell_dim, forget_bias=1.0, state_is_tuple=True)
lstm_bw_cell = tf.contrib.rnn.DropoutWrapper(lstm_bw_cell,
input_keep_prob=1.0 - dropout[4],
output_keep_prob=1.0 - dropout[4],
# seed=random_seed,
)
# `layer_3` is now reshaped into `[n_steps, batch_size, 2*n_cell_dim]`,
# as the LSTM BRNN expects its input to be of shape `[max_time, batch_size, input_size]`.
layer_3 = tf.reshape(layer_3, [-1, batch_x_shape[0], n_hidden_3])
# Now we feed `layer_3` into the LSTM BRNN cell and obtain the LSTM BRNN output.
outputs, output_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fw_cell,
cell_bw=lstm_bw_cell,
inputs=layer_3,
dtype=tf.float32,
time_major=True,
sequence_length=seq_length)
tf.summary.histogram("activations", outputs)
# Reshape outputs from two tensors each of shape [n_steps, batch_size, n_cell_dim]
# to a single tensor of shape [n_steps*batch_size, 2*n_cell_dim]
outputs = tf.concat(outputs, 2)
outputs = tf.reshape(outputs, [-1, 2 * n_cell_dim])
with tf.name_scope('fc5'):
# Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
b5 = variable_on_cpu('b5', [n_hidden_5], tf.random_normal_initializer(stddev=b5_stddev))
h5 = variable_on_cpu('h5', [(2 * n_cell_dim), n_hidden_5], tf.random_normal_initializer(stddev=h5_stddev))
layer_5 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(outputs, h5), b5)), relu_clip)
layer_5 = tf.nn.dropout(layer_5, (1.0 - dropout[5]))
tf.summary.histogram("weights", h5)
tf.summary.histogram("biases", b5)
tf.summary.histogram("activations", layer_5)
with tf.name_scope('fc6'):
# Now we apply the weight matrix `h6` and bias `b6` to the output of `layer_5`
# creating `n_classes` dimensional vectors, the logits.
b6 = variable_on_cpu('b6', [n_hidden_6], tf.random_normal_initializer(stddev=b6_stddev))
h6 = variable_on_cpu('h6', [n_hidden_5, n_hidden_6], tf.random_normal_initializer(stddev=h6_stddev))
layer_6 = tf.add(tf.matmul(layer_5, h6), b6)
tf.summary.histogram("weights", h6)
tf.summary.histogram("biases", b6)
tf.summary.histogram("activations", layer_6)
# Finally we reshape layer_6 from a tensor of shape [n_steps*batch_size, n_hidden_6]
# to the slightly more useful shape [n_steps, batch_size, n_hidden_6].
# Note, that this differs from the input in that it is time-major.
layer_6 = tf.reshape(layer_6, [-1, batch_x_shape[0], n_hidden_6])
summary_op = tf.summary.merge_all()
# Output shape: [n_steps, batch_size, n_hidden_6]
return layer_6, summary_op
| mit | -6,191,367,770,140,815,000 | 44.104348 | 120 | 0.584249 | false | 3.454545 | false | false | false |
saurabh-hirani/bin | gen-nagios-cfg/get_chef_attrs.py | 1 | 3313 | #!/usr/bin/env python
"""
Usage:
get_chef_attrs.py [-h|--help] --hosts <hosts-file> [--chef-cfg <chef-cfg-file>] --cache <cache-file> [--ext <extension>] --attrs <attr>... [--verbose]
Options:
-h,--help show this help text
-H <hosts-file>, --hosts <hosts-file> target hosts file - one host per line
--chef-cfg <chef-cfg-file> yaml based chef config file [default: chef.yml]
--cache <cache-file> cache for storing looked up hosts
--attrs <attr> chef attributes to search
--ext <extension> add this host extension to re-search if the search fails
-v, --verbose verbose mode
"""
import os
import sys
import yaml
import json
from docopt import docopt
import chef
def lookup_chef(opts, hosts):
chef_cfg = yaml.load(open(opts['--chef-cfg']).read())
cache = {}
with chef.ChefAPI(chef_cfg['chef']['host'], chef_cfg['chef']['pem'],
chef_cfg['chef']['user']):
for host in hosts:
attrs_map = {}
orig_host = host
if '@' in host:
_, host = host.split('@')
n = chef.Node(host)
ipaddr = n.attributes.get('ipaddress')
if ipaddr is None or ipaddr == 'None':
if opts['--ext'] is not None:
host = host + '.' + opts['--ext']
n = chef.Node(host)
ipaddr = n.attributes.get('ipaddress')
for attr in opts['--attrs']:
attrs_map[str(attr)] = str(n.attributes.get(attr))
if ipaddr:
cache[host] = attrs_map
else:
cache[host] = {}
if '--verbose' in opts and opts['--verbose']:
print "------------"
print host
print json.dumps(attrs_map, indent=4)
return cache
def get_chef_attrs(opts):
hosts = []
with open(opts['--hosts']) as f:
hosts = [x.strip() for x in f.readlines()]
unresolved_hosts = []
cache = json.loads(open(opts['--cache']).read())
for host in hosts:
_, host = host.split('@')
host_variants = []
host_variants.append(host)
host_variants.append(host.split('.')[0])
found = False
for host_variant in host_variants:
if host_variant in cache:
found = True
break
if not found:
unresolved_hosts.append(host)
if unresolved_hosts:
hosts_info = lookup_chef(opts, unresolved_hosts)
for host in hosts_info:
cache[host] = hosts_info[host]
with open(opts['--cache'], 'w') as f:
f.write(json.dumps(cache, indent=4))
return cache
def validate_input(opts):
if not os.path.exists(opts['--hosts']):
print 'ERROR: hosts file %s does not exist' % opts['--hosts']
sys.exit(1)
if not os.path.exists(opts['--chef-cfg']):
print 'ERROR: chef cfg file %s does not exist' % opts['--chef-cfg']
sys.exit(1)
if not opts['--attrs']:
print 'ERROR: Empty attrs' % opts['--attrs']
sys.exit(1)
if not os.path.exists(opts['--cache']):
with open(opts['--cache'], 'w') as f:
f.write('{}')
def load_args(args):
parsed_docopt = docopt(__doc__)
return parsed_docopt
def main(opts):
validate_input(opts)
return get_chef_attrs(opts)
if __name__ == '__main__':
opts = load_args(sys.argv[1:])
attrs = main(opts)
print json.dumps(attrs, indent=4)
| gpl-2.0 | -2,016,134,175,059,922,400 | 25.293651 | 155 | 0.567763 | false | 3.384065 | false | false | false |
erikdejonge/newsrivr | mongo/mongotest.py | 1 | 1030 | import csv
import json
import pymongo
from pymongo.objectid import ObjectId
from pymongo import Connection
def import_feeds():
print "reading"
s = set()
r = csv.reader(open("feeds.txt", "rU"))
for i in r:
if len(i)>1:
if len(i[1])>0:
s.add(i[1])
connection = Connection()
connection = Connection("localhost", 27017)
db = connection.river
collection = db.spider
print "inserting"
for i in s:
feed = { "url" : i}
collection.feeds.insert(feed)
def find_feeds():
connection = Connection("192.168.0.18", 10000)
db = connection.river
collection = db.spider
d = {}
l = ["hello", "world"]
d["data"] = l
print json.dumps(d)
db.testtable.insert(d)
def main():
#find_feeds()
connection = Connection("kain.active8.nl", 10000, slave_okay=True)
db = connection.river
for o in db.testtable.find():
print o
#
if __name__=="__main__":
main()
print "ok" | gpl-2.0 | 1,952,921,670,317,960,400 | 20.93617 | 71 | 0.564078 | false | 3.503401 | false | false | false |
ptrsxu/snippetpy | builtinim/peekable.py | 1 | 1117 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
from python cookbook 2nd edition.
"""
import collections
class Peekable(object):
"""A iteration that can look forward with normal iter operations.
Example:
>>> p = Peekable(iter(range(4)))
>>> p.peek()
0
>>> p.next(1)
[0]
>>> p.peek(3)
[1, 2, 3]
>>> p.next(2)
[1, 2]
"""
def __init__(self, iterable):
self._iterable = iterable
self._cache = collections.deque()
def __iter__(self):
return self
def _fill_cache(self, n):
if n is None:
n = 1
while len(self._cache) < n:
self._cache.append(self._iterable.next())
def next(self, n=None):
self._fill_cache(n)
if n is None:
result = self._cache.popleft()
else:
result = [self._cache.popleft() for i in range(n)]
return result
def peek(self, n=None):
self._fill_cache(n)
if n is None:
result = self._cache[0]
else:
result = [self._cache[i] for i in range(n)]
return result
| mit | -786,149,576,617,089,500 | 20.901961 | 69 | 0.5094 | false | 3.501567 | false | false | false |
puttarajubr/commcare-hq | corehq/apps/sms/tasks.py | 2 | 9211 | import math
from datetime import datetime, timedelta
from celery.task import task
from time import sleep
from redis_cache.cache import RedisCache
from corehq.apps.sms.mixin import SMSLoadBalancingMixin
from corehq.apps.sms.models import (SMSLog, OUTGOING, INCOMING,
ERROR_TOO_MANY_UNSUCCESSFUL_ATTEMPTS, ERROR_MESSAGE_IS_STALE,
ERROR_INVALID_DIRECTION)
from corehq.apps.sms.api import (send_message_via_backend, process_incoming,
log_sms_exception)
from django.conf import settings
from corehq.apps.domain.models import Domain
from corehq.apps.smsbillables.models import SmsBillable
from corehq.util.timezones.conversions import ServerTime
from dimagi.utils.couch.cache import cache_core
from threading import Thread
def handle_unsuccessful_processing_attempt(msg):
msg.num_processing_attempts += 1
if msg.num_processing_attempts < settings.SMS_QUEUE_MAX_PROCESSING_ATTEMPTS:
delay_processing(msg, settings.SMS_QUEUE_REPROCESS_INTERVAL)
else:
msg.set_system_error(ERROR_TOO_MANY_UNSUCCESSFUL_ATTEMPTS)
def handle_successful_processing_attempt(msg):
utcnow = datetime.utcnow()
msg.num_processing_attempts += 1
msg.processed = True
msg.processed_timestamp = utcnow
if msg.direction == OUTGOING:
msg.date = utcnow
msg.save()
def delay_processing(msg, minutes):
msg.datetime_to_process += timedelta(minutes=minutes)
msg.save()
def get_lock(client, key):
return client.lock(key, timeout=settings.SMS_QUEUE_PROCESSING_LOCK_TIMEOUT*60)
def time_within_windows(domain_now, windows):
weekday = domain_now.weekday()
time = domain_now.time()
for window in windows:
if (window.day in [weekday, -1] and
(window.start_time is None or time >= window.start_time) and
(window.end_time is None or time <= window.end_time)):
return True
return False
def handle_domain_specific_delays(msg, domain_object, utcnow):
"""
Checks whether or not we need to hold off on sending an outbound message
due to any restrictions set on the domain, and delays processing of the
message if necessary.
Returns True if a delay was made, False if not.
"""
domain_now = ServerTime(utcnow).user_time(domain_object.get_default_timezone()).done()
if len(domain_object.restricted_sms_times) > 0:
if not time_within_windows(domain_now, domain_object.restricted_sms_times):
delay_processing(msg, settings.SMS_QUEUE_DOMAIN_RESTRICTED_RETRY_INTERVAL)
return True
if msg.chat_user_id is None and len(domain_object.sms_conversation_times) > 0:
if time_within_windows(domain_now, domain_object.sms_conversation_times):
sms_conversation_length = domain_object.sms_conversation_length
conversation_start_timestamp = utcnow - timedelta(minutes=sms_conversation_length)
if SMSLog.inbound_entry_exists(msg.couch_recipient_doc_type,
msg.couch_recipient,
conversation_start_timestamp,
utcnow):
delay_processing(msg, 1)
return True
return False
def message_is_stale(msg, utcnow):
oldest_allowable_datetime = \
utcnow - timedelta(hours=settings.SMS_QUEUE_STALE_MESSAGE_DURATION)
if isinstance(msg.date, datetime):
return msg.date < oldest_allowable_datetime
else:
return True
def _wait_and_release_lock(lock, timeout, start_timestamp):
while (datetime.utcnow() - start_timestamp) < timedelta(seconds=timeout):
sleep(0.1)
try:
lock.release()
except:
# The lock could have timed out in the meantime
pass
def wait_and_release_lock(lock, timeout):
timestamp = datetime.utcnow()
t = Thread(target=_wait_and_release_lock, args=(lock, timeout, timestamp))
t.start()
def handle_outgoing(msg):
"""
Should return a requeue flag, so if it returns True, the message will be
requeued and processed again immediately, and if it returns False, it will
not be queued again.
"""
backend = msg.outbound_backend
sms_interval = backend.get_sms_interval()
use_rate_limit = sms_interval is not None
use_load_balancing = (isinstance(backend, SMSLoadBalancingMixin) and
len(backend.phone_numbers) > 1)
if use_rate_limit or use_load_balancing:
client = cache_core.get_redis_client()
lbi = None
orig_phone_number = None
if use_load_balancing:
lbi = backend.get_next_phone_number(client)
orig_phone_number = lbi.phone_number
elif (isinstance(backend, SMSLoadBalancingMixin) and
len(backend.phone_numbers) == 1):
# If there's only one phone number, we don't need to go through the
# load balancing algorithm. But we should always pass an
# orig_phone_number if it's an instance of SMSLoadBalancingMixin.
orig_phone_number = backend.phone_numbers[0]
if use_rate_limit:
if use_load_balancing:
lock_key = "sms-backend-%s-rate-limit-phone-%s" % (backend._id,
lbi.phone_number)
else:
lock_key = "sms-backend-%s-rate-limit" % backend._id
lock = client.lock(lock_key, timeout=30)
if not use_rate_limit or (use_rate_limit and lock.acquire(blocking=False)):
if use_load_balancing:
lbi.finish(save_stats=True)
result = send_message_via_backend(msg, backend=backend,
orig_phone_number=orig_phone_number)
if use_rate_limit:
wait_and_release_lock(lock, sms_interval)
# Only do the following if an unrecoverable error did not happen
if not msg.error:
if result:
handle_successful_processing_attempt(msg)
else:
handle_unsuccessful_processing_attempt(msg)
return False
else:
# We're using rate limiting, but couldn't acquire the lock, so
# another thread is sending sms with this backend. Rather than wait,
# we'll just put this message at the back of the queue.
if use_load_balancing:
lbi.finish(save_stats=False)
return True
def handle_incoming(msg):
try:
process_incoming(msg)
handle_successful_processing_attempt(msg)
except:
log_sms_exception(msg)
handle_unsuccessful_processing_attempt(msg)
@task(queue="sms_queue", ignore_result=True)
def process_sms(message_id):
"""
message_id - _id of an SMSLog entry
"""
# Note that Redis error/exception notifications go out from the
# run_sms_queue command, so no need to send them out here
# otherwise we'd get too many emails.
rcache = cache_core.get_redis_default_cache()
if not isinstance(rcache, RedisCache):
return
try:
client = rcache.raw_client
except NotImplementedError:
return
utcnow = datetime.utcnow()
# Prevent more than one task from processing this SMS, just in case
# the message got enqueued twice.
message_lock = get_lock(client, "sms-queue-processing-%s" % message_id)
if message_lock.acquire(blocking=False):
msg = SMSLog.get(message_id)
if message_is_stale(msg, utcnow):
msg.set_system_error(ERROR_MESSAGE_IS_STALE)
message_lock.release()
return
if msg.direction == OUTGOING:
if msg.domain:
domain_object = Domain.get_by_name(msg.domain, strict=True)
else:
domain_object = None
if domain_object and handle_domain_specific_delays(msg, domain_object, utcnow):
message_lock.release()
return
requeue = False
# Process inbound SMS from a single contact one at a time
recipient_block = msg.direction == INCOMING
if (isinstance(msg.processed, bool)
and not msg.processed
and not msg.error
and msg.datetime_to_process < utcnow):
if recipient_block:
recipient_lock = get_lock(client,
"sms-queue-recipient-phone-%s" % msg.phone_number)
recipient_lock.acquire(blocking=True)
if msg.direction == OUTGOING:
requeue = handle_outgoing(msg)
elif msg.direction == INCOMING:
handle_incoming(msg)
else:
msg.set_system_error(ERROR_INVALID_DIRECTION)
if recipient_block:
recipient_lock.release()
message_lock.release()
if requeue:
process_sms.delay(message_id)
@task(ignore_result=True)
def store_billable(msg):
if msg._id and not SmsBillable.objects.filter(log_id=msg._id).exists():
try:
msg.text.encode('iso-8859-1')
msg_length = 160
except UnicodeEncodeError:
# This string contains unicode characters, so the allowed
# per-sms message length is shortened
msg_length = 70
for _ in range(int(math.ceil(float(len(msg.text)) / msg_length))):
SmsBillable.create(msg)
| bsd-3-clause | 2,552,213,391,816,183,000 | 36.291498 | 94 | 0.644121 | false | 3.871795 | false | false | false |
data-henrik/watson-conversation-tool | watoolV2.py | 1 | 6863 | # Copyright 2017-2018 IBM Corp. All Rights Reserved.
# See LICENSE for details.
#
# Author: Henrik Loeser
#
# Converse with your assistant based on IBM Watson Assistant service on IBM Cloud.
# See the README for documentation.
#
import json, argparse, importlib
from os.path import join, dirname
from ibm_watson import AssistantV2
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
privcontext=None
assistantService=None
def loadAndInit(confFile=None):
# Credentials are read from a file
with open(confFile) as confFile:
config = json.load(confFile)
configWA = config['credentials']
global assistantService
# Initialize the Watson Assistant client, use API V2
if 'apikey' in configWA:
# Authentication via IAM
authenticator = IAMAuthenticator(configWA['apikey'])
assistantService = AssistantV2(
authenticator=authenticator,
version=configWA['versionV2'])
assistantService.set_service_url(configWA['url'])
else:
print('Expected apikey in credentials.')
exit
# Define parameters that we want to catch and some basic command help
def initParser(args=None):
parser = argparse.ArgumentParser(description='Watson Assistant tool',
prog='watoolV2.py',
usage='%(prog)s [-h | -dialog ] [options]')
parser.add_argument("-dialog",dest='dialog', action='store_true', help='have dialog')
parser.add_argument("-outputonly",dest='outputOnly', action='store_true', help='print dialog output only')
parser.add_argument("-id",dest='assistantID', help='Assistant ID')
parser.add_argument("-actionmodule",dest='actionModule', help='Module for client action handling')
parser.add_argument("-context",dest='context', help='context file')
parser.add_argument("-config",dest='confFile', default='config.json', help='configuration file')
return parser
# Start a dialog and converse with Watson
def converse(assistantID, outputOnly=None, contextFile=None):
contextFile="session_contextV2.json"
print ("Starting a conversation, stop by Ctrl+C or saying 'bye'")
print ("======================================================")
# Start with an empty context object
context={}
first=True
## Load conversation context on start or not?
contextStart = input("Start with empty context? (Y/n)\n")
if (contextStart == "n" or contextStart == "N"):
print ("loading old session context...")
with open(contextFile) as jsonFile:
context=json.load(jsonFile)
jsonFile.close()
# create a new session
response = assistantService.create_session(assistant_id=assistantID).get_result()
sessionID = response['session_id']
print('Session created!\n')
# Now loop to chat
while True:
# get some input
minput = input("\nPlease enter your input message:\n")
# if we catch a "bye" then exit after deleting the session
if (minput == "bye"):
response = assistantService.delete_session(
assistant_id=assistantID,
session_id=sessionID).get_result()
print('Session deleted. Bye...')
break
# Read the session context from file if we are not entering the loop
# for the first time
if not first:
try:
with open(contextFile) as jsonFile:
context=json.load(jsonFile)
except IOError:
# do nothing
print ("ignoring")
else:
jsonFile.close()
else:
first=False
# Process IBM Cloud Function credentials if present
if privcontext is not None:
context.update(privcontext)
# send the input to Watson Assistant
# Set alternate_intents to False for less output
resp=assistantService.message(assistant_id=assistantID,
session_id=sessionID,
input={'text': minput,
'options': {'alternate_intents': True, 'return_context': True, 'debug': True}}
).get_result()
#print(json.dumps(resp, indent=2))
# Save returned context for next round of conversation
if ('context' in resp):
context=resp['context']
respOutput=resp['output']
if ('actions' in respOutput and len(respOutput['actions']) and respOutput['actions'][0]['type']=='client'):
# Dump the returned answer
if not outputOnly:
print ("")
print ("Full response object of intermediate step:")
print ("------------------------------------------")
print(json.dumps(resp, indent=2))
if (hca is not None):
contextNew=hca.handleClientActions(context,respOutput['actions'], resp)
# call Watson Assistant with result from client action(s)
resp=assistantService.message(assistant_id=assistantID,
session_id=sessionID,
input={'text': minput,
'options': {'alternate_intents': True, 'return_context': True, 'debug': True}},
intents=respOutput['intents'],
context=contextNew).get_result()
context=resp['context']
respOutput=resp['output']
else:
print("\n\nplease use -actionmodule to define module to handle client actions")
break
# Dump the returned answer
if (outputOnly):
print ("Response:")
print(json.dumps(respOutput['generic'], indent=2))
else:
print ("")
print ("Full response object:")
print ("---------------------")
print(json.dumps(resp, indent=2))
# Persist the current context object to file.
with open(contextFile,'w') as jsonFile:
json.dump(context, jsonFile, indent=2)
jsonFile.close()
#
# Main program, for now just detect what function to call and invoke it
#
if __name__ == '__main__':
# Assume no module for client actions
hca=None
# initialize parser
parser = initParser()
parms = parser.parse_args()
# enable next line to print parameters
# print parms
# load configuration and initialize Watson
loadAndInit(confFile=parms.confFile)
if (parms.dialog and parms.assistantID):
if parms.actionModule:
hca=importlib.import_module(parms.actionModule)
converse(parms.assistantID, parms.outputOnly)
else:
parser.print_usage()
| apache-2.0 | 5,171,017,202,342,663,000 | 35.505319 | 115 | 0.588081 | false | 4.476843 | true | false | false |
zettsu-t/cPlusPlusFriend | scripts/tests/test_secretary_problem.py | 1 | 4823 | #!/usr/bin/python3
# coding: utf-8
'''
This script tests secretary_problem.py
Copyright (C) 2017 Zettsu Tatsuya
usage : python3 -m unittest discover tests
'''
import warnings
from unittest import TestCase
import numpy as np
import secretary_problem.secretary_problem as tested
class TestSecretaryProblem(TestCase):
'''Testing find_candidate'''
def test_find_first(self):
'''Find a candidate just after passed candidates'''
nums = np.array([10, 20, 30, 40, 50])
n_items = len(nums)
for pass_count in range(1, n_items):
passes = [pass_count]
actual = tested.SecretaryProblem(n_items, passes).find_candidate(n_items, passes, nums)
self.assertEqual(actual, nums[pass_count])
def test_find_last(self):
'''Find a last candidate'''
nums = np.array([50, 40, 30, 20, 10])
n_items = len(nums)
for pass_count in range(1, n_items):
passes = [pass_count]
actual = tested.SecretaryProblem(n_items, passes).find_candidate(n_items, passes, nums)
self.assertEqual(actual, nums[-1])
def test_find_middle(self):
'''Find a candidate between passed and last candidates'''
nums = np.array([30, 20, 10, 50, 40])
n_items = len(nums)
for pass_count in range(1, 3):
passes = [pass_count]
actual = tested.SecretaryProblem(n_items, passes).find_candidate(n_items, passes, nums)
self.assertEqual(actual, 50)
def test_find_two_candidates1(self):
'''Hold two candidates and the best candidates is placed last'''
nums = np.array([10, 20, 40, 30, 50, 60, 70])
n_items = len(nums)
passes_set = [[[1, 2], 40], [[1, 3], 30], [[1, 4], 50], [[1, 5], 60], [[1, 6], 70],
[[2, 3], 50], [[2, 4], 50], [[2, 5], 60], [[2, 6], 70],
[[3, 4], 60], [[3, 5], 60], [[3, 6], 70],
[[4, 5], 60], [[4, 6], 70],
[[5, 6], 70],
[[1, 1], 40], [[2, 2], 50], [[3, 3], 60], [[4, 4], 60], [[5, 5], 70],
[[6, 6], 70], [[7, 7], 70],
[[1, 7], 20], [[2, 7], 40], [[3, 7], 50], [[4, 7], 50], [[5, 7], 60],
[[6, 7], 70]]
for passes, expected in passes_set:
actual = tested.SecretaryProblem(n_items, passes).find_candidate(n_items, passes, nums)
self.assertEqual(actual, expected)
def test_find_two_candidates2(self):
'''Hold two candidates and the best candidates is placed middle of candidates'''
nums = np.array([30, 40, 60, 50, 70, 20, 10])
n_items = len(nums)
passes_set = [[[1, 2], 60], [[1, 3], 50], [[1, 4], 70], [[1, 5], 40], [[1, 6], 40],
[[2, 3], 70], [[2, 4], 70], [[2, 5], 60], [[2, 6], 60],
[[3, 4], 70], [[3, 5], 70], [[3, 6], 70],
[[4, 5], 70], [[4, 6], 70],
[[5, 6], 10],
[[1, 1], 60], [[2, 2], 70], [[3, 3], 70], [[4, 4], 70], [[5, 5], 10],
[[6, 6], 10], [[7, 7], 10],
[[1, 7], 40], [[2, 7], 60], [[3, 7], 70], [[4, 7], 70], [[5, 7], 10],
[[6, 7], 10]]
for passes, expected in passes_set:
actual = tested.SecretaryProblem(n_items, passes).find_candidate(n_items, passes, nums)
self.assertEqual(actual, expected)
def test_find_many_candidates(self):
'''Hold many candidates'''
nums = np.array([10, 20, 30, 40, 70, 60, 50, 49, 48, 47])
n_items = len(nums)
passes_set = [[[1, 2, 3], 40], [[1, 2, 3, 4], 70], [[1, 2, 3, 4, 5], 70],
[[4, 5, 6, 7], 70], [[5, 6, 7, 8], 47]]
for passes, expected in passes_set:
actual = tested.SecretaryProblem(n_items, passes).find_candidate(n_items, passes, nums)
self.assertEqual(actual, expected)
class TestExploreSecretaryProblem(TestCase):
'''Testing optimization with ExploreSecretaryProblem'''
def test_explore(self):
'''Explore a solution of 1-secretary problem'''
# Surpress warnings for scikit-optimize
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
cutoffs, value = tested.ExploreSecretaryProblem(
n_items=100, n_cutoffs=1, n_trial=10000, n_calls=30,
n_random_starts=20).explore(verbose=False)
self.assertGreater(cutoffs[0], 35.0)
self.assertLess(cutoffs[0], 39.0)
self.assertLess(value, -0.35)
self.assertGreater(value, -0.39)
| mit | -1,155,666,774,424,360,000 | 40.681416 | 99 | 0.502384 | false | 3.427861 | true | false | false |
honza/stickies-app | stickies/notes/views.py | 1 | 2756 | from django.http import HttpResponse, Http404
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import Group
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from models import Note, Project
from django.shortcuts import render_to_response, get_object_or_404
import simplejson as json
STATES = ['todo', 'inprogress', 'document', 'test', 'verify', 'done']
def index(request):
if request.user.is_authenticated():
projects = Project.objects.all()
else:
projects = None
return render_to_response('index.html', {'projects': projects},
context_instance=RequestContext(request))
@login_required
def project(request, id):
project = Project.objects.get(id=id)
group = get_object_or_404(Group, name=project.name)
if group not in request.user.groups.all():
raise Http404
return render_to_response('project.html', {'project': project})
@login_required
def ajax(request):
r = _ajax(request)
return HttpResponse(json.dumps(r))
def _ajax(request):
"""Wrapper"""
if not request.is_ajax():
return {'status': 403}
a = request.POST.get('a')
if a not in ['move', 'edit', 'delete', 'new']:
return {'status': 403}
if a in ['move', 'edit', 'delete']:
n = request.POST.get('note')
id = int(n[5:])
note = get_object_or_404(Note, pk=id)
try:
note = Note.objects.get(pk=id)
except ObjectDoesNotExist:
return {'status': 403}
if a in ['edit', 'new']:
content = request.POST.get('content')
if a == 'move':
st = request.POST.get('section')
if st not in STATES:
return {'status': 403}
note.state = st
elif a == 'delete':
note.delete()
return {'status': 200}
elif a == 'new':
p = request.POST.get('project')
p = get_object_or_404(Project, id=p)
note = Note(
content=content,
author=request.user,
project=p)
note.save()
return {
'status': 200,
'content': _note(note)
}
else:
note.content = content
note.save()
return {'status': 200}
def _note(note):
t = """
<div id="note-%d" class="portlet ui-widget ui-widget-content ui-helper-clearfix ui-corner-all"><div class="portlet-header ui-widget-header ui-corner-all"><span class="ui-icon ui-icon-close"></span><span class="ui-icon ui-icon-pencil"></span></div>
<div class="portlet-content">%s</div>
<div class="portlet-meta">
<p>Author: %s</p>
</div>
</div>
"""
n = t % (note.id, note.content, note.author.username,)
return n
| gpl-3.0 | 2,390,391,735,569,483,000 | 29.285714 | 247 | 0.601597 | false | 3.640687 | false | false | false |
ibest/GRC_Scripts | read_info.py | 1 | 3835 | #!/usr/bin/env python
"""
# Copyright 2013, Matt Settles
# Modified Aug 10, 2013
"""
from Bio import SeqIO
from optparse import OptionParser
import sys, os, os.path, time, gzip
from collections import Counter
## Parse options and setup ##
usage = "usage %prog -d [path to directory of raw reads]"
parser = OptionParser(usage=usage)
parser.add_option('-d', '--directory', help="Directory containing read files to de-duplicate",
action="store", type="str", dest="sample_dir")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit()
sample_dir = options.sample_dir
output_dir = options.sample_dir
#kindly provided by http://stackoverflow.com/questions/7099290/how-to-ignore-hidden-files-using-os-listdir-python
#glob.glob will list hidden files
#this replaces that functionality when hidden files exist, like in my reads from Berkeley
def listdir_nohidden(path):
for f in os.listdir(path):
if not f.startswith('.'):
yield f
def main_sff(infile, outfile1):
# Set up the global variables
global count
global bases
lcount = 0
lbases = 0
lqual = 0
# Open inputs:
iterator1 = SeqIO.parse(open(infile, 'r'), "sff")
try:
while 1:
seq1 = iterator1.next()
count += 1
lcount += 1
len = seq1.annotations["clip_qual_right"] - seq1.annotations["clip_qual_left"]
bases += len
lbases += len
lqual += sum(seq1.letter_annotations['phred_quality'][seq1.annotations["clip_qual_left"]:seq1.annotations["clip_qual_right"]])/len
except StopIteration:
pass
finally:
print "Finished processing file" + infile1
outfile1.write("file\t" + infile1 + "\n")
outfile1.write("nreads\t" + str(lcount) + "\n")
outfile1.write("nbases\t" + str(lbases) + "\n")
outfile1.write("avgBases\t" + str(round(lbases/lcount,0)) + "\n")
outfile1.write("avgQual\t" + str(round(lqual/lcount,1)) + "\n")
def main(infile1, outfile1):
#Set up the global variables
global count
global bases
lcount = 0
lbases = 0
lqual = 0
#Open inputs:
if infile1.split(".")[-1] == "gz":
import gzip
iterator1 = SeqIO.parse(gzip.open(infile1, 'rb'), 'fastq')
elif infile1.split(".")[-1] == "fastq":
iterator1 = SeqIO.parse(open(infile1, 'r'), 'fastq')
else:
iterator1 = SeqIO.parse(open(infile1, 'r'), 'fastq')
try:
while 1:
seq1 = iterator1.next()
count += 1
lcount += 1
bases += len(seq1)
lbases += len(seq1)
lqual += sum(seq1.letter_annotations['phred_quality'])/len(seq1)
except StopIteration:
pass
finally:
print "Finished processing file" + infile1
outfile1.write("file\t" + infile1 + "\n")
outfile1.write("nreads\t" + str(lcount) + "\n")
outfile1.write("nbases\t" + str(lbases) + "\n")
outfile1.write("avgBases\t" + str(round(lbases/lcount,0)) + "\n")
outfile1.write("avgQual\t" + str(round(lqual/lcount,1)) + "\n")
#main part of the program
count = 0
bases = 0
outfile1 = open(os.path.realpath(os.path.join(os.getcwd(), sample_dir, "read_data.txt")),"w+")
files = listdir_nohidden('./' + sample_dir)
for f in files:
if ("fastq" in f) or ("fq" in f):
print f
infile1 = os.path.realpath(os.path.join(os.getcwd(), sample_dir, f))
main(infile1, outfile1)
if ("sff" in f):
print f
infile1 = os.path.realpath(os.path.join(os.getcwd(), sample_dir, f))
main_sff(infile1, outfile1)
outfile1.write("directory\t" + sample_dir + "\n")
outfile1.write("treads\t" + str(count) + "\n")
outfile1.write("tbases\t" + str(bases) + "\n")
outfile1.close()
| artistic-2.0 | -5,776,645,509,899,073,000 | 29.19685 | 142 | 0.605215 | false | 3.26661 | false | false | false |
ihabunek/toot | tests/test_config.py | 1 | 4923 | import os
import pytest
from toot import User, App, config
@pytest.fixture
def sample_config():
return {
'apps': {
'foo.social': {
'base_url': 'https://foo.social',
'client_id': 'abc',
'client_secret': 'def',
'instance': 'foo.social'
},
'bar.social': {
'base_url': 'https://bar.social',
'client_id': 'ghi',
'client_secret': 'jkl',
'instance': 'bar.social'
},
},
'users': {
'[email protected]': {
'access_token': 'mno',
'instance': 'bar.social',
'username': 'ihabunek'
}
},
'active_user': '[email protected]',
}
def test_extract_active_user_app(sample_config):
user, app = config.extract_user_app(sample_config, sample_config['active_user'])
assert isinstance(user, User)
assert user.instance == 'bar.social'
assert user.username == 'ihabunek'
assert user.access_token == 'mno'
assert isinstance(app, App)
assert app.instance == 'bar.social'
assert app.base_url == 'https://bar.social'
assert app.client_id == 'ghi'
assert app.client_secret == 'jkl'
def test_extract_active_when_no_active_user(sample_config):
# When there is no active user
assert config.extract_user_app(sample_config, None) == (None, None)
# When active user does not exist for whatever reason
assert config.extract_user_app(sample_config, 'does-not-exist') == (None, None)
# When active app does not exist for whatever reason
sample_config['users']['[email protected]']['instance'] = 'does-not-exist'
assert config.extract_user_app(sample_config, '[email protected]') == (None, None)
def test_save_app(sample_config):
app = App('xxx.yyy', 2, 3, 4)
app2 = App('moo.foo', 5, 6, 7)
app_count = len(sample_config['apps'])
assert 'xxx.yyy' not in sample_config['apps']
assert 'moo.foo' not in sample_config['apps']
# Sets
config.save_app.__wrapped__(sample_config, app)
assert len(sample_config['apps']) == app_count + 1
assert 'xxx.yyy' in sample_config['apps']
assert sample_config['apps']['xxx.yyy']['instance'] == 'xxx.yyy'
assert sample_config['apps']['xxx.yyy']['base_url'] == 2
assert sample_config['apps']['xxx.yyy']['client_id'] == 3
assert sample_config['apps']['xxx.yyy']['client_secret'] == 4
# Overwrites
config.save_app.__wrapped__(sample_config, app2)
assert len(sample_config['apps']) == app_count + 2
assert 'xxx.yyy' in sample_config['apps']
assert 'moo.foo' in sample_config['apps']
assert sample_config['apps']['xxx.yyy']['instance'] == 'xxx.yyy'
assert sample_config['apps']['xxx.yyy']['base_url'] == 2
assert sample_config['apps']['xxx.yyy']['client_id'] == 3
assert sample_config['apps']['xxx.yyy']['client_secret'] == 4
assert sample_config['apps']['moo.foo']['instance'] == 'moo.foo'
assert sample_config['apps']['moo.foo']['base_url'] == 5
assert sample_config['apps']['moo.foo']['client_id'] == 6
assert sample_config['apps']['moo.foo']['client_secret'] == 7
# Idempotent
config.save_app.__wrapped__(sample_config, app2)
assert len(sample_config['apps']) == app_count + 2
assert 'xxx.yyy' in sample_config['apps']
assert 'moo.foo' in sample_config['apps']
assert sample_config['apps']['xxx.yyy']['instance'] == 'xxx.yyy'
assert sample_config['apps']['xxx.yyy']['base_url'] == 2
assert sample_config['apps']['xxx.yyy']['client_id'] == 3
assert sample_config['apps']['xxx.yyy']['client_secret'] == 4
assert sample_config['apps']['moo.foo']['instance'] == 'moo.foo'
assert sample_config['apps']['moo.foo']['base_url'] == 5
assert sample_config['apps']['moo.foo']['client_id'] == 6
assert sample_config['apps']['moo.foo']['client_secret'] == 7
def test_delete_app(sample_config):
app = App('foo.social', 2, 3, 4)
app_count = len(sample_config['apps'])
assert 'foo.social' in sample_config['apps']
config.delete_app.__wrapped__(sample_config, app)
assert 'foo.social' not in sample_config['apps']
assert len(sample_config['apps']) == app_count - 1
# Idempotent
config.delete_app.__wrapped__(sample_config, app)
assert 'foo.social' not in sample_config['apps']
assert len(sample_config['apps']) == app_count - 1
def test_get_config_file_path():
fn = config.get_config_file_path
os.unsetenv('XDG_CONFIG_HOME')
os.environ.pop('XDG_CONFIG_HOME', None)
assert fn() == os.path.expanduser('~/.config/toot/config.json')
os.environ['XDG_CONFIG_HOME'] = '/foo/bar/config'
assert fn() == '/foo/bar/config/toot/config.json'
os.environ['XDG_CONFIG_HOME'] = '~/foo/config'
assert fn() == os.path.expanduser('~/foo/config/toot/config.json')
| gpl-3.0 | 5,020,795,922,674,533,000 | 34.417266 | 84 | 0.598212 | false | 3.425887 | true | false | false |
fls-bioinformatics-core/genomics | bcftbx/test/test_ngsutils.py | 1 | 7920 | #######################################################################
# Tests for ngsutils.py module
#######################################################################
import unittest
import os
import io
import tempfile
import shutil
import gzip
from bcftbx.ngsutils import *
from builtins import range
class TestGetreadsFunction(unittest.TestCase):
"""Tests for the 'getreads' function
"""
def setUp(self):
self.wd = tempfile.mkdtemp()
self.example_fastq_data = u"""@K00311:43:HL3LWBBXX:8:1101:21440:1121 1:N:0:CNATGT
GCCNGACAGCAGAAAT
+
AAF#FJJJJJJJJJJJ
@K00311:43:HL3LWBBXX:8:1101:21460:1121 1:N:0:CNATGT
GGGNGTCATTGATCAT
+
AAF#FJJJJJJJJJJJ
@K00311:43:HL3LWBBXX:8:1101:21805:1121 1:N:0:CNATGT
CCCNACCCTTGCCTAC
+
AAF#FJJJJJJJJJJJ
"""
self.example_csfasta_data = u"""# Cwd: /home/pipeline
# Title: solid0127_20121204_FRAG_BC_Run_56_pool_LC_CK
>1_51_38_F3
T3..3.213.12211.01..000..111.0210202221221121011..0
>1_51_301_F3
T0..3.222.21233.00..022..110.0210022323223202211..2
>1_52_339_F3
T1.311202211102.331233332113.23332233002223222312.2
"""
self.example_qual_data = u"""# Cwd: /home/pipeline
# Title: solid0127_20121204_FRAG_BC_Run_56_pool_LC_CK
>1_51_38_F3
16 -1 -1 5 -1 24 15 12 -1 21 12 16 22 19 -1 26 13 -1 -1 4 21 4 -1 -1 4 7 9 -1 4 5 4 4 4 4 4 13 4 4 4 5 4 4 10 4 4 4 4 -1 -1 4
>1_51_301_F3
22 -1 -1 4 -1 24 30 7 -1 4 9 26 6 16 -1 25 25 -1 -1 17 18 13 -1 -1 4 14 24 -1 4 14 17 32 4 7 13 13 22 4 12 19 4 24 6 9 8 4 4 -1 -1 9
>1_52_339_F3
27 -1 33 24 28 32 29 17 25 27 26 30 30 31 -1 28 33 19 19 13 4 20 21 13 5 4 12 -1 4 23 13 8 4 10 4 6 5 7 4 8 4 8 12 5 12 10 8 7 -1 4
"""
def tearDown(self):
shutil.rmtree(self.wd)
def test_getreads_fastq(self):
"""getreads: read records from Fastq file
"""
# Make an example file
example_fastq = os.path.join(self.wd,"example.fastq")
with io.open(example_fastq,'wt') as fp:
fp.write(self.example_fastq_data)
# Read lines
fastq_reads = getreads(example_fastq)
reference_reads = [self.example_fastq_data.split('\n')[i:i+4]
for i
in range(0,
len(self.example_fastq_data.split('\n')),
4)]
for r1,r2 in zip(reference_reads,fastq_reads):
self.assertEqual(r1,r2)
def test_getreads_gzipped_fastq(self):
"""getreads: read records from gzipped Fastq file
"""
# Make an example file
example_fastq = os.path.join(self.wd,"example.fastq.gz")
with gzip.open(example_fastq,'wt') as fp:
fp.write(self.example_fastq_data)
# Read lines
fastq_reads = getreads(example_fastq)
reference_reads = [self.example_fastq_data.split('\n')[i:i+4]
for i
in range(0,
len(self.example_fastq_data.split('\n')),
4)]
for r1,r2 in zip(reference_reads,fastq_reads):
self.assertEqual(r1,r2)
def test_getreads_csfasta(self):
"""getreads: read records from csfasta file
"""
# Make an example file
example_csfasta = os.path.join(self.wd,"example.csfasta")
with io.open(example_csfasta,'wt') as fp:
fp.write(self.example_csfasta_data)
# Read lines
csfasta_reads = getreads(example_csfasta)
reference_reads = [self.example_csfasta_data.split('\n')[i:i+2]
for i
in range(2,
len(self.example_fastq_data.split('\n')),
2)]
for r1,r2 in zip(reference_reads,csfasta_reads):
self.assertEqual(r1,r2)
def test_getreads_qual(self):
"""getreads: read records from qual file
"""
# Make an example file
example_qual = os.path.join(self.wd,"example.qual")
with io.open(example_qual,'wt') as fp:
fp.write(self.example_qual_data)
# Read lines
qual_reads = getreads(example_qual)
reference_reads = [self.example_qual_data.split('\n')[i:i+2]
for i
in range(2,
len(self.example_qual_data.split('\n')),
2)]
for r1,r2 in zip(reference_reads,qual_reads):
self.assertEqual(r1,r2)
class TestGetreadsSubsetFunction(unittest.TestCase):
"""Tests for the 'getreads_subset' function
"""
def setUp(self):
self.wd = tempfile.mkdtemp()
self.example_fastq_data = u"""@K00311:43:HL3LWBBXX:8:1101:21440:1121 1:N:0:CNATGT
GCCNGACAGCAGAAAT
+
AAF#FJJJJJJJJJJJ
@K00311:43:HL3LWBBXX:8:1101:21460:1121 1:N:0:CNATGT
GGGNGTCATTGATCAT
+
AAF#FJJJJJJJJJJJ
@K00311:43:HL3LWBBXX:8:1101:21805:1121 1:N:0:CNATGT
CCCNACCCTTGCCTAC
+
AAF#FJJJJJJJJJJJ
"""
def tearDown(self):
shutil.rmtree(self.wd)
def test_getreads_subset_fastq(self):
"""getreads: get subset of reads from Fastq file
"""
# Make an example file
example_fastq = os.path.join(self.wd,"example.fastq")
with io.open(example_fastq,'wt') as fp:
fp.write(self.example_fastq_data)
# Get subset
fastq_reads = getreads_subset(example_fastq,
indices=(0,2))
reference_reads = [self.example_fastq_data.split('\n')[i:i+4]
for i in (0,8)]
for r1,r2 in zip(reference_reads,fastq_reads):
self.assertEqual(r1,r2)
def test_getreads_subset_fastq_index_out_of_range(self):
"""getreads: requesting non-existent read raises exception
"""
# Make an example file
example_fastq = os.path.join(self.wd,"example.fastq")
with io.open(example_fastq,'wt') as fp:
fp.write(self.example_fastq_data)
# Attempt to get subset with indices outside the range
# of reads
# NB would prefer to use assertRaises, however we need to
# actually yeild the reads in order to raise the exceptions
try:
[r for r in getreads_subset(example_fastq,indices=(-1,0))]
failed = True
except Exception:
# This is expected, test passes
failed = False
self.assertFalse(failed,"Exception not raised")
try:
[r for r in getreads_subset(example_fastq,indices=(0,99))]
failed = True
except Exception:
# This is expected, test passes
failed = False
self.assertFalse(failed,"Exception not raised")
class TestGetreadsRegexpFunction(unittest.TestCase):
"""Tests for the 'getreads_regex' function
"""
def setUp(self):
self.wd = tempfile.mkdtemp()
self.example_fastq_data = u"""@K00311:43:HL3LWBBXX:8:1101:21440:1121 1:N:0:CNATGT
GCCNGACAGCAGAAAT
+
AAF#FJJJJJJJJJJJ
@K00311:43:HL3LWBBXX:8:1101:21460:1121 1:N:0:CNATGT
GGGNGTCATTGATCAT
+
AAF#FJJJJJJJJJJJ
@K00311:43:HL3LWBBXX:8:1101:21805:1121 1:N:0:CNATGT
CCCNACCCTTGCCTAC
+
AAF#FJJJJJJJJJJJ
"""
def tearDown(self):
shutil.rmtree(self.wd)
def test_getreads_regexp_fastq(self):
"""getreads: get reads from Fastq file matching pattern
"""
# Make an example file
example_fastq = os.path.join(self.wd,"example.fastq")
with io.open(example_fastq,'wt') as fp:
fp.write(self.example_fastq_data)
# Get subset
fastq_reads = getreads_regex(example_fastq,
":1101:21440:1121")
reference_reads = [self.example_fastq_data.split('\n')[i:i+4]
for i in (0,)]
for r1,r2 in zip(reference_reads,fastq_reads):
self.assertEqual(r1,r2)
| artistic-2.0 | 4,980,588,436,694,378,000 | 36.894737 | 133 | 0.573611 | false | 3.259259 | true | false | false |
jshufelt/volatility | volatility/plugins/malware/callbacks.py | 8 | 25250 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (c) 2010, 2011, 2012 Michael Ligh <[email protected]>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.utils as utils
import volatility.obj as obj
import volatility.poolscan as poolscan
import volatility.debug as debug
import volatility.plugins.common as common
import volatility.win32.modules as modules
import volatility.win32.tasks as tasks
import volatility.plugins.malware.devicetree as devicetree
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
try:
import distorm3
has_distorm3 = True
except ImportError:
has_distorm3 = False
#--------------------------------------------------------------------------------
# vtypes
#--------------------------------------------------------------------------------
callback_types = {
'_NOTIFICATION_PACKET' : [ 0x10, {
'ListEntry' : [ 0x0, ['_LIST_ENTRY']],
'DriverObject' : [ 0x8, ['pointer', ['_DRIVER_OBJECT']]],
'NotificationRoutine' : [ 0xC, ['unsigned int']],
} ],
'_KBUGCHECK_CALLBACK_RECORD' : [ 0x20, {
'Entry' : [ 0x0, ['_LIST_ENTRY']],
'CallbackRoutine' : [ 0x8, ['unsigned int']],
'Buffer' : [ 0xC, ['pointer', ['void']]],
'Length' : [ 0x10, ['unsigned int']],
'Component' : [ 0x14, ['pointer', ['String', dict(length = 64)]]],
'Checksum' : [ 0x18, ['pointer', ['unsigned int']]],
'State' : [ 0x1C, ['unsigned char']],
} ],
'_KBUGCHECK_REASON_CALLBACK_RECORD' : [ 0x1C, {
'Entry' : [ 0x0, ['_LIST_ENTRY']],
'CallbackRoutine' : [ 0x8, ['unsigned int']],
'Component' : [ 0xC, ['pointer', ['String', dict(length = 8)]]],
'Checksum' : [ 0x10, ['pointer', ['unsigned int']]],
'Reason' : [ 0x14, ['unsigned int']],
'State' : [ 0x18, ['unsigned char']],
} ],
'_SHUTDOWN_PACKET' : [ 0xC, {
'Entry' : [ 0x0, ['_LIST_ENTRY']],
'DeviceObject' : [ 0x8, ['pointer', ['_DEVICE_OBJECT']]],
} ],
'_EX_CALLBACK_ROUTINE_BLOCK' : [ 0x8, {
'RundownProtect' : [ 0x0, ['unsigned int']],
'Function' : [ 0x4, ['unsigned int']],
'Context' : [ 0x8, ['unsigned int']],
} ],
'_GENERIC_CALLBACK' : [ 0xC, {
'Callback' : [ 0x4, ['pointer', ['void']]],
'Associated' : [ 0x8, ['pointer', ['void']]],
} ],
'_REGISTRY_CALLBACK_LEGACY' : [ 0x38, {
'CreateTime' : [ 0x0, ['WinTimeStamp', dict(is_utc = True)]],
} ],
'_REGISTRY_CALLBACK' : [ None, {
'ListEntry' : [ 0x0, ['_LIST_ENTRY']],
'Function' : [ 0x1C, ['pointer', ['void']]],
} ],
'_DBGPRINT_CALLBACK' : [ 0x14, {
'Function' : [ 0x8, ['pointer', ['void']]],
} ],
'_NOTIFY_ENTRY_HEADER' : [ None, {
'ListEntry' : [ 0x0, ['_LIST_ENTRY']],
'EventCategory' : [ 0x8, ['Enumeration', dict(target = 'long', choices = {
0: 'EventCategoryReserved',
1: 'EventCategoryHardwareProfileChange',
2: 'EventCategoryDeviceInterfaceChange',
3: 'EventCategoryTargetDeviceChange'})]],
'CallbackRoutine' : [ 0x14, ['unsigned int']],
'DriverObject' : [ 0x1C, ['pointer', ['_DRIVER_OBJECT']]],
} ],
}
callback_types_x64 = {
'_GENERIC_CALLBACK' : [ 0x18, {
'Callback' : [ 0x8, ['pointer', ['void']]],
'Associated' : [ 0x10, ['pointer', ['void']]],
} ],
'_NOTIFICATION_PACKET' : [ 0x30, {
'ListEntry' : [ 0x0, ['_LIST_ENTRY']],
'DriverObject' : [ 0x10, ['pointer', ['_DRIVER_OBJECT']]],
'NotificationRoutine' : [ 0x18, ['address']],
} ],
'_SHUTDOWN_PACKET' : [ 0xC, {
'Entry' : [ 0x0, ['_LIST_ENTRY']],
'DeviceObject' : [ 0x10, ['pointer', ['_DEVICE_OBJECT']]],
} ],
'_DBGPRINT_CALLBACK' : [ 0x14, {
'Function' : [ 0x10, ['pointer', ['void']]],
} ],
'_NOTIFY_ENTRY_HEADER' : [ None, {
'ListEntry' : [ 0x0, ['_LIST_ENTRY']],
'EventCategory' : [ 0x10, ['Enumeration', dict(target = 'long', choices = {
0: 'EventCategoryReserved',
1: 'EventCategoryHardwareProfileChange',
2: 'EventCategoryDeviceInterfaceChange',
3: 'EventCategoryTargetDeviceChange'})]],
'CallbackRoutine' : [ 0x20, ['address']],
'DriverObject' : [ 0x30, ['pointer', ['_DRIVER_OBJECT']]],
} ],
'_REGISTRY_CALLBACK' : [ 0x50, {
'ListEntry' : [ 0x0, ['_LIST_ENTRY']],
'Function' : [ 0x20, ['pointer', ['void']]], # other could be 28
} ],
'_KBUGCHECK_CALLBACK_RECORD' : [ None, {
'Entry' : [ 0x0, ['_LIST_ENTRY']],
'CallbackRoutine' : [ 0x10, ['address']],
'Component' : [ 0x28, ['pointer', ['String', dict(length = 8)]]],
} ],
'_KBUGCHECK_REASON_CALLBACK_RECORD' : [ None, {
'Entry' : [ 0x0, ['_LIST_ENTRY']],
'CallbackRoutine' : [ 0x10, ['unsigned int']],
'Component' : [ 0x28, ['pointer', ['String', dict(length = 8)]]],
} ],
}
#--------------------------------------------------------------------------------
# object classes
#--------------------------------------------------------------------------------
class _SHUTDOWN_PACKET(obj.CType):
"""Class for shutdown notification callbacks"""
def is_valid(self):
"""
Perform some checks.
Note: obj_native_vm is kernel space.
"""
if not obj.CType.is_valid(self):
return False
if (not self.obj_native_vm.is_valid_address(self.Entry.Flink) or
not self.obj_native_vm.is_valid_address(self.Entry.Blink) or
not self.obj_native_vm.is_valid_address(self.DeviceObject)):
return False
# Dereference the device object
device = self.DeviceObject.dereference()
# Carve out the device's object header and check its type
object_header = obj.Object("_OBJECT_HEADER",
offset = device.obj_offset -
self.obj_native_vm.profile.get_obj_offset("_OBJECT_HEADER", "Body"),
vm = device.obj_vm,
native_vm = device.obj_native_vm)
return object_header.get_object_type() == "Device"
#--------------------------------------------------------------------------------
# profile modifications
#--------------------------------------------------------------------------------
class CallbackMods(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x: x == 'windows'}
def modification(self, profile):
if profile.metadata.get("memory_model", "32bit") == "32bit":
profile.vtypes.update(callback_types)
profile.object_classes.update({
'_SHUTDOWN_PACKET': _SHUTDOWN_PACKET,
})
else:
profile.vtypes.update(callback_types_x64)
#--------------------------------------------------------------------------------
# pool scanners
#--------------------------------------------------------------------------------
class AbstractCallbackScanner(poolscan.PoolScanner):
"""Return the offset of the callback, no object headers"""
class PoolScanFSCallback(AbstractCallbackScanner):
"""PoolScanner for File System Callbacks"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "IoFs"
self.struct_name = "_NOTIFICATION_PACKET"
if address_space.profile.metadata.get("memory_model", "32bit") == "32bit":
size = 0x18
else:
size = 0x30
self.checks = [ ('CheckPoolSize', dict(condition = lambda x: x == size)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
#('CheckPoolIndex', dict(value = 4)),
]
class PoolScanShutdownCallback(AbstractCallbackScanner):
"""PoolScanner for Shutdown Callbacks"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "IoSh"
self.struct_name = "_SHUTDOWN_PACKET"
if address_space.profile.metadata.get("memory_model", "32bit") == "32bit":
size = 0x18
else:
size = 0x30
self.checks = [ ('CheckPoolSize', dict(condition = lambda x: x == size)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
('CheckPoolIndex', dict(value = 0)),
]
class PoolScanGenericCallback(AbstractCallbackScanner):
"""PoolScanner for Generic Callbacks"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "Cbrb"
self.struct_name = "_GENERIC_CALLBACK"
if address_space.profile.metadata.get("memory_model", "32bit") == "32bit":
size = 0x18
else:
size = 0x30
self.checks = [ ('CheckPoolSize', dict(condition = lambda x: x == size)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
# This is a good constraint for all images except Frank's rustock-c.vmem
#('CheckPoolIndex', dict(value = 1)),
]
class PoolScanDbgPrintCallback(AbstractCallbackScanner):
"""PoolScanner for DebugPrint Callbacks on Vista and 7"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "DbCb"
self.struct_name = "_DBGPRINT_CALLBACK"
self.checks = [ ('CheckPoolSize', dict(condition = lambda x: x >= 0x20 and x <= 0x40)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
#('CheckPoolIndex', dict(value = 0)),
]
class PoolScanRegistryCallback(AbstractCallbackScanner):
"""PoolScanner for DebugPrint Callbacks on Vista and 7"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "CMcb"
self.struct_name = "_REGISTRY_CALLBACK"
self.checks = [('CheckPoolSize', dict(condition = lambda x: x >= 0x38)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
('CheckPoolIndex', dict(value = 4)),
]
class PoolScanPnp9(AbstractCallbackScanner):
"""PoolScanner for Pnp9 (EventCategoryHardwareProfileChange)"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "Pnp9"
self.struct_name = "_NOTIFY_ENTRY_HEADER"
self.checks = [ # seen as 0x2C on W7, 0x28 on vistasp0 (4 less but needs 8 less)
('CheckPoolSize', dict(condition = lambda x: x >= 0x30)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
('CheckPoolIndex', dict(value = 1)),
]
class PoolScanPnpD(AbstractCallbackScanner):
"""PoolScanner for PnpD (EventCategoryDeviceInterfaceChange)"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "PnpD"
self.struct_name = "_NOTIFY_ENTRY_HEADER"
self.checks = [('CheckPoolSize', dict(condition = lambda x: x >= 0x40)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
('CheckPoolIndex', dict(value = 1)),
]
class PoolScanPnpC(AbstractCallbackScanner):
"""PoolScanner for PnpC (EventCategoryTargetDeviceChange)"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "PnpC"
self.struct_name = "_NOTIFY_ENTRY_HEADER"
self.checks = [('CheckPoolSize', dict(condition = lambda x: x >= 0x38)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
('CheckPoolIndex', dict(value = 1)),
]
#--------------------------------------------------------------------------------
# callbacks plugin
#--------------------------------------------------------------------------------
class Callbacks(common.AbstractScanCommand):
"""Print system-wide notification routines"""
scanners = [PoolScanFSCallback, PoolScanShutdownCallback, PoolScanGenericCallback]
@staticmethod
def get_kernel_callbacks(nt_mod):
"""
Enumerate the Create Process, Create Thread, and Image Load callbacks.
On some systems, the byte sequences will be inaccurate or the exported
function will not be found. In these cases, the PoolScanGenericCallback
scanner will pick up the pool associated with the callbacks.
"""
bits32 = nt_mod.obj_vm.profile.metadata.get("memory_model", "32bit") == "32bit"
if bits32:
routines = [
# push esi; mov esi, offset _PspLoadImageNotifyRoutine
('PsSetLoadImageNotifyRoutine', "\x56\xbe"),
# push esi; mov esi, offset _PspCreateThreadNotifyRoutine
('PsSetCreateThreadNotifyRoutine', "\x56\xbe"),
# mov edi, offset _PspCreateProcessNotifyRoutine
('PsSetCreateProcessNotifyRoutine', "\xbf"),
]
else:
routines = [
# lea ecx, offset _PspLoadImageNotifyRoutine
('PsRemoveLoadImageNotifyRoutine', "\x48\x8d\x0d"),
# lea rcx, offset _PspCreateThreadNotifyRoutine
('PsRemoveCreateThreadNotifyRoutine', "\x48\x8d\x0d"),
# mov edi, offset _PspCreateProcessNotifyRoutine
#('PsSetCreateProcessNotifyRoutine', "\xbf"),
]
for symbol, hexbytes in routines:
# Locate the exported symbol in the NT module
symbol_rva = nt_mod.getprocaddress(symbol)
if symbol_rva == None:
continue
symbol_address = symbol_rva + nt_mod.DllBase
# Find the global variable referenced by the exported symbol
data = nt_mod.obj_vm.zread(symbol_address, 100)
offset = data.find(hexbytes)
if offset == -1:
continue
if bits32:
# Read the pointer to the list
p = obj.Object('Pointer',
offset = symbol_address + offset + len(hexbytes),
vm = nt_mod.obj_vm)
else:
# Read the pointer to the list
v = obj.Object('int',
offset = symbol_address + offset + len(hexbytes),
vm = nt_mod.obj_vm)
p = symbol_address + offset + 7 + v
# The list is an array of 8 _EX_FAST_REF objects
addrs = obj.Object('Array', count = 8, targetType = '_EX_FAST_REF',
offset = p, vm = nt_mod.obj_vm)
for addr in addrs:
callback = addr.dereference_as("_GENERIC_CALLBACK")
if callback:
yield symbol, callback.Callback, None
@staticmethod
def get_bugcheck_callbacks(addr_space):
"""
Enumerate generic Bugcheck callbacks.
Note: These structures don't exist in tagged pools, but you can find
them via KDDEBUGGER_DATA64 on all versions of Windows.
"""
kdbg = tasks.get_kdbg(addr_space)
list_head = kdbg.KeBugCheckCallbackListHead.dereference_as('_KBUGCHECK_CALLBACK_RECORD')
for l in list_head.Entry.list_of_type("_KBUGCHECK_CALLBACK_RECORD", "Entry"):
yield "KeBugCheckCallbackListHead", l.CallbackRoutine, l.Component.dereference()
@staticmethod
def get_registry_callbacks_legacy(nt_mod):
"""
Enumerate registry change callbacks.
This method of finding a global variable via disassembly of the
CmRegisterCallback function is only for XP systems. If it fails on
XP you can still find the callbacks using PoolScanGenericCallback.
On Vista and Windows 7, these callbacks are registered using the
CmRegisterCallbackEx function.
"""
if not has_distorm3:
return
symbol = "CmRegisterCallback"
# Get the RVA of the symbol from NT's EAT
symbol_rva = nt_mod.getprocaddress(symbol)
if symbol_rva == None:
return
# Absolute VA to the symbol code
symbol_address = symbol_rva + nt_mod.DllBase
# Read the function prologue
data = nt_mod.obj_vm.zread(symbol_address, 200)
c = 0
vector = None
# Looking for MOV EBX, CmpCallBackVector
# This may be the first or second MOV EBX instruction
for op in distorm3.Decompose(symbol_address, data, distorm3.Decode32Bits):
if (op.valid and op.mnemonic == "MOV"
and len(op.operands) == 2
and op.operands[0].name == 'EBX'):
vector = op.operands[1].value
if c == 1:
break
else:
c += 1
# Can't find the global variable
if vector == None:
return
# The vector is an array of 100 _EX_FAST_REF objects
addrs = obj.Object("Array", count = 100, offset = vector,
vm = nt_mod.obj_vm, targetType = "_EX_FAST_REF")
for addr in addrs:
callback = addr.dereference_as("_EX_CALLBACK_ROUTINE_BLOCK")
if callback:
yield symbol, callback.Function, None
@staticmethod
def get_bugcheck_reason_callbacks(nt_mod):
"""
Enumerate Bugcheck Reason callbacks.
Note: These structures don't exist in tagged pools, so we
find them by locating the list head which is a non-exported
NT symbol. The method works on all x86 versions of Windows.
mov [eax+KBUGCHECK_REASON_CALLBACK_RECORD.Entry.Blink], \
offset _KeBugCheckReasonCallbackListHead
"""
symbol = "KeRegisterBugCheckReasonCallback"
bits32 = nt_mod.obj_vm.profile.metadata.get("memory_model", "32bit") == "32bit"
if bits32:
hexbytes = "\xC7\x40\x04"
else:
hexbytes = "\x48\x8d\x0d"
# Locate the symbol RVA
symbol_rva = nt_mod.getprocaddress(symbol)
if symbol_rva == None:
return
# Compute the absolute virtual address
symbol_address = symbol_rva + nt_mod.DllBase
data = nt_mod.obj_vm.zread(symbol_address, 200)
# Search for the pattern
offset = data.find(hexbytes)
if offset == -1:
return
if bits32:
p = obj.Object('Pointer',
offset = symbol_address + offset + len(hexbytes),
vm = nt_mod.obj_vm)
bugs = p.dereference_as('_KBUGCHECK_REASON_CALLBACK_RECORD')
else:
v = obj.Object("int", offset = symbol_address + offset + len(hexbytes), vm = nt_mod.obj_vm)
p = symbol_address + offset + 7 + v
bugs = obj.Object("_KBUGCHECK_REASON_CALLBACK_RECORD", offset = p, vm = nt_mod.obj_vm)
for l in bugs.Entry.list_of_type("_KBUGCHECK_REASON_CALLBACK_RECORD", "Entry"):
if nt_mod.obj_vm.is_valid_address(l.CallbackRoutine):
yield symbol, l.CallbackRoutine, l.Component.dereference()
def calculate(self):
addr_space = utils.load_as(self._config)
bits32 = addr_space.profile.metadata.get("memory_model", "32bit") == "32bit"
# Get the OS version we're analyzing
version = (addr_space.profile.metadata.get('major', 0),
addr_space.profile.metadata.get('minor', 0))
modlist = list(modules.lsmod(addr_space))
mods = dict((addr_space.address_mask(mod.DllBase), mod) for mod in modlist)
mod_addrs = sorted(mods.keys())
# Valid for Vista and later
if version >= (6, 0):
self.scanners.append(PoolScanDbgPrintCallback)
self.scanners.append(PoolScanRegistryCallback)
self.scanners.append(PoolScanPnp9)
self.scanners.append(PoolScanPnpD)
self.scanners.append(PoolScanPnpC)
for objct in self.scan_results(addr_space):
name = objct.obj_name
if name == "_REGISTRY_CALLBACK":
info = "CmRegisterCallback", objct.Function, None
yield info, mods, mod_addrs
elif name == "_DBGPRINT_CALLBACK":
info = "DbgSetDebugPrintCallback", objct.Function, None
yield info, mods, mod_addrs
elif name == "_SHUTDOWN_PACKET":
driver = objct.DeviceObject.dereference().DriverObject
if not driver:
continue
index = devicetree.MAJOR_FUNCTIONS.index('IRP_MJ_SHUTDOWN')
address = driver.MajorFunction[index]
details = str(driver.DriverName or "-")
info = "IoRegisterShutdownNotification", address, details
yield info, mods, mod_addrs
elif name == "_GENERIC_CALLBACK":
info = "GenericKernelCallback", objct.Callback, None
yield info, mods, mod_addrs
elif name == "_NOTIFY_ENTRY_HEADER":
# Dereference the driver object pointer
driver = objct.DriverObject.dereference()
driver_name = ""
if driver:
# Instantiate an object header for the driver name
header = driver.get_object_header()
if header.get_object_type() == "Driver":
# Grab the object name
driver_name = header.NameInfo.Name.v()
info = objct.EventCategory, objct.CallbackRoutine, driver_name
yield info, mods, mod_addrs
elif name == "_NOTIFICATION_PACKET":
info = "IoRegisterFsRegistrationChange", objct.NotificationRoutine, None
yield info, mods, mod_addrs
for info in self.get_kernel_callbacks(modlist[0]):
yield info, mods, mod_addrs
for info in self.get_bugcheck_callbacks(addr_space):
yield info, mods, mod_addrs
for info in self.get_bugcheck_reason_callbacks(modlist[0]):
yield info, mods, mod_addrs
# Valid for XP
if bits32 and version == (5, 1):
for info in self.get_registry_callbacks_legacy(modlist[0]):
yield info, mods, mod_addrs
def unified_output(self, data):
return TreeGrid([("Type", str),
("Callback", Address),
("Module", str),
("Details", str)],
self.generator(data))
def generator(self, data):
for (sym, cb, detail), mods, mod_addrs in data:
module = tasks.find_module(mods, mod_addrs, mods.values()[0].obj_vm.address_mask(cb))
## The original callbacks plugin searched driver objects
## if the owning module isn't found (Rustock.B). We leave that
## task up to the user this time, and will be incoporating
## some different module association methods later.
if module:
module_name = module.BaseDllName or module.FullDllName
else:
module_name = "UNKNOWN"
yield (0, [str(sym), Address(cb), str(module_name), str(detail or "-")])
def render_text(self, outfd, data):
self.table_header(outfd,
[("Type", "36"),
("Callback", "[addrpad]"),
("Module", "20"),
("Details", ""),
])
for (sym, cb, detail), mods, mod_addrs in data:
module = tasks.find_module(mods, mod_addrs, mods.values()[0].obj_vm.address_mask(cb))
## The original callbacks plugin searched driver objects
## if the owning module isn't found (Rustock.B). We leave that
## task up to the user this time, and will be incoporating
## some different module association methods later.
if module:
module_name = module.BaseDllName or module.FullDllName
else:
module_name = "UNKNOWN"
self.table_row(outfd, sym, cb, module_name, detail or "-")
| gpl-2.0 | -1,730,338,783,978,873,900 | 38.147287 | 103 | 0.553822 | false | 4.060791 | false | false | false |
rohitwaghchaure/alec_frappe5_erpnext | erpnext/selling/custom_methods.py | 1 | 25465 | from __future__ import unicode_literals
import frappe
from frappe.utils import cint, cstr, flt, formatdate
from frappe.model.mapper import get_mapped_doc
from frappe import msgprint, _, throw
from erpnext.setup.utils import get_company_currency
import frappe.defaults
from frappe.desk.form.utils import get_linked_docs
import json
#check batch is of respective item code
def validate_batch(doc,method):
for d in doc.get('items'):
if d.batch_no and d.item_code != frappe.db.get_value('Batch',d.batch_no,'item'):
frappe.throw(_("Select batch respective to item code {0}").format(d.item_code))
#maintain supplier name,rate,batch as EC-Rate of purchase
def create_batchwise_price_list(doc, method):
for d in doc.get('items'):
item_price=frappe.db.get_value('Item Price',{'item_code':d.item_code,'price_list':'EC - Rate of Purchase'},'name')
if not item_price:
create_item_price(d,doc)
else:
create_batchwise_item_price(item_price,d,doc)
#create item price list
def create_item_price(d,doc):
pl=frappe.new_doc('Item Price')
pl.price_list='EC - Rate of Purchase'
pl.buying = 1
pl.selling = 1
pl.item_code= d.item_code
pl.price_list_rate=d.rate
pl.item_name=d.item_name
pl.item_description=d.description
pl.currency=doc.currency
pl.save(ignore_permissions=True)
create_batchwise_item_price(pl.name,d,doc)
#create batch wise price list rate
def create_batchwise_item_price(name, d, doc):
if d.batch_no and not frappe.db.get_value('Batchwise Purchase Rate',{'batch':d.batch_no},'name'):
bpr=frappe.new_doc('Batchwise Purchase Rate')
bpr.supplier=doc.supplier
bpr.batch=d.batch_no
bpr.rate=d.rate
bpr.parentfield='batchwise_purchase_rate'
bpr.parenttype='Item Price'
bpr.parent=name
bpr.document = doc.name
bpr.save(ignore_permissions=True)
#on cancel delete created price list
def cancel_batchwise_price_list(doc, method):
for d in doc.get('items'):
if d.batch_no:
frappe.db.sql("delete from `tabBatchwise Purchase Rate` where document='%s'"%(doc.name))
#create supplier quotation from quotationin draft
@frappe.whitelist()
def create_supplier_quotation():
Quotations=get_quotation_in_draft()
if Quotations:
for quotation in Quotations:
if not frappe.db.get_value("Quotation Used",{"quotation":quotation[0]},"quotation"):
items=frappe.db.sql("""select item_code,qty from `tabQuotation Item` where parent='%s'"""%(quotation[0]),as_list=1)
for item in items:
item_price_exists=frappe.db.sql("""select distinct ifnull(price_list_rate,0) from `tabItem Price` where item_code='%s' """%(item[0]))
if not item_price_exists or item_price_exists[0][0]==0:
suppliers=get_suplier_details(item[0])
if suppliers:
for supplier in suppliers:
make_supplier_quotation(item,supplier[0])
update_used_quotation(quotation[0])
#get all quotations in draft state
def get_quotation_in_draft():
return frappe.db.sql("""select name from `tabQuotation` where docstatus=0""",as_list=1)
#get all quotations that were used during last scheduler event for validation
def get_quotation_used(quotation):
return frappe.db.sql("""select quotation from `tabQuotation Used` where quotation='%s'"""%(quotation),as_list=1)
#get details of supplier
def get_suplier_details(item):
item_wrapper = frappe.get_doc("Item", item)
return frappe.db.sql("""select supplier_name from `tabSupplier` where supplier_name in(select parent from `tabSupplier Brands` where brand='%s') and
supplier_name in(select parent from `tabSupplier Item Groups` where item_group='%s')"""%(item_wrapper.brand,item_wrapper.item_group),as_list=1)
#create supplier quotation
def make_supplier_quotation(item,supplier):
quotation_exists=check_quotation_exists(supplier)
if quotation_exists:
if not frappe.db.get_value('Supplier Quotation Item',{'item_code':item[0],'parent':quotation_exists},'name'):
update_supplier_items(item,quotation_exists)
else:
update_qty_quotation(quotation_exists ,item)
else:
new_supplier_quotaion(supplier,item)
#check if quotation exists in for supplier
def check_quotation_exists(supplier):
return frappe.db.get_value('Supplier Quotation',{'supplier':supplier,'docstatus':0},'name')
#create new supplier quotation
def new_supplier_quotaion(supplier,item):
item_wrapper = frappe.get_doc("Item", item[0])
sq=frappe.new_doc('Supplier Quotation')
sq.supplier=supplier
sq.append("items", {
"doctype": "Supplier Quotation Item",
"item_code": item[0],
"item_name": item_wrapper.item_name,
"description": item_wrapper.description,
"uom": item_wrapper.stock_uom,
"item_group": item_wrapper.item_group,
"brand": item_wrapper.brand,
"qty": item[1],
"base_rate":0,
"base_amount":0,
"manufacturer_pn":item_wrapper.manufacturer_pn,
"oem_part_number":item_wrapper.oem_part_number
})
sq.save(ignore_permissions=True)
#Add item to existing supplier quotation
def update_supplier_items(item,name):
item_wrapper = frappe.get_doc("Item", item[0])
idx=frappe.db.sql("""select ifnull(max(idx),0)+1 as idx from `tabSupplier Quotation Item` where parent='%s'"""%(name),as_list=1)
sqi=frappe.new_doc('Supplier Quotation Item')
sqi.idx=idx[0][0]
sqi.item_code=item[0]
sqi.item_name=item_wrapper.item_name
sqi.description=item_wrapper.description
sqi.manufacturer_pn=item_wrapper.manufacturer_pn
sqi.oem_part_number=item_wrapper.oem_part_number
sqi.uom=item_wrapper.stock_uom
sqi.brand=item_wrapper.brand
sqi.qty=item[1]
sqi.base_rate=0
sqi.base_amount=0
sqi.item_group=item_wrapper.item_group
sqi.parentfield='items'
sqi.parenttype='Supplier Quotation'
sqi.parent=name
sqi.save(ignore_permissions=True)
#if item in supplier quotation exists update qty
def update_qty_quotation(name,item):
frappe.db.sql("""update `tabSupplier Quotation Item` set qty=qty+%s where parent='%s' and item_code='%s'"""%(item[1],name,item[0]))
#when quotation used so it can be negleted in future
def update_used_quotation(quotation):
if not frappe.db.get_value("Quotation Used",{"quotation":quotation},"quotation"):
uq=frappe.new_doc('Used Quotation')
uq.save(ignore_permissions=True)
qu=frappe.new_doc('Quotation Used')
qu.quotation=quotation
qu.parentfield='quotation_used'
qu.parenttype='Used Quotation'
qu.parent=uq.name
qu.save(ignore_permissions=True)
#returns query data
@frappe.whitelist()
def get_details(doc):
import json
doc = json.loads(doc)
condition=get_query(doc)
result = frappe.db.sql(condition,as_list=1)
data = previous_ordered_status(doc, result)
return data
#check whether item previously ordered
@frappe.whitelist()
def previous_ordered_status(doc, result):
query_data = []
for data in result:
for q in range(0,len(data)):
item = data[1]
if q == 4:
if not doc.get('previously_ordered_only'):
data[q] = get_status(doc, item)
else:
data[q] = 1
query_data.append(data)
return query_data
#get document status
@frappe.whitelist()
def get_status(doc, item):
data = 0
status = frappe.db.sql(""" select ifnull(`tabSales Order`.docstatus,0) from `tabSales Order`, `tabSales Order Item` where `tabSales Order`.name= `tabSales Order Item`.parent
and `tabSales Order`.customer='%s'
and `tabSales Order Item`.item_code='%s'
and `tabSales Order`.docstatus=1 """%(doc.get('customer'),item))
if status:
data = 1
return data
#build query
@frappe.whitelist()
def get_query(doc):
column = get_columns(doc)
table = get_tables(doc)
condition = get_conditions(doc)
return column + ' ' + table + ' ' + condition
#build columns
@frappe.whitelist()
def get_columns(doc):
column = 'ifnull(`tabItem`.item_group,"")'
if doc.get('item_groups'):
column = 'ifnull(`tabWebsite Item Group`.performance,"")'
return """ select DISTINCT '',ifnull(`tabQuote Item`.item_code,"") ,
ifnull(`tabQuote Item`.brand,"") ,
"""+column+""",
'',
ifnull(`tabBatchwise Purchase Rate`.batch,""),
format(ifnull(`tabBatchwise Purchase Rate`.rate,(select price_list_rate from `tabItem Price` where price_list='EC - Rate of Purchase' and item_code=`tabQuote Item`.item_code)),2) ,
(select format(ifnull(sum(actual_qty),0),2) from `tabStock Ledger Entry` where item_code=`tabQuote Item`.item_code and batch_no=`tabBatchwise Purchase Rate`.batch)"""
#returns tables required
@frappe.whitelist()
def get_tables(doc):
table = """ `tabItem` INNER JOIN `tabQuote Item` ON
`tabQuote Item`.parent = `tabItem`.name """
if doc.get('item_groups') and doc.get('part_no'):
table = """ `tabItem` INNER JOIN `tabQuote Item` ON
`tabQuote Item`.parent = `tabItem`.name INNER JOIN
`tabWebsite Item Group` ON `tabQuote Item`.parent = `tabWebsite Item Group`.parent """
elif doc.get('item_groups'):
table = """ `tabWebsite Item Group` INNER JOIN `tabQuote Item` ON
`tabQuote Item`.parent = `tabWebsite Item Group`.parent"""
return """ FROM """+table+""" LEFT JOIN
`tabItem Price` ON `tabQuote Item`.item_code = `tabItem Price`.item_code
LEFT JOIN
`tabStock Ledger Entry` ON `tabStock Ledger Entry`.item_code = `tabItem Price`.item_code and `tabStock Ledger Entry`.is_cancelled='No'
LEFT JOIN
`tabBatchwise Purchase Rate` ON `tabBatchwise Purchase Rate`.parent = `tabItem Price`.name
LEFT JOIN
`tabSales Order Item` ON `tabSales Order Item`.item_code = `tabQuote Item`.item_code
LEFT JOIN
`tabSales Order` ON `tabSales Order`.name = `tabSales Order Item`.parent """
#returns conditions for query
@frappe.whitelist()
def get_conditions(doc):
previous_ordered = condition = '1=1'
if doc.get('item_groups') and doc.get('part_no'):
condition = """ `tabItem`.name='%s' and `tabWebsite Item Group`.item_group = '%s' """%(doc.get('part_no'),doc.get('item_groups'))
elif doc.get('item_groups'):
condition = """ `tabWebsite Item Group`.item_group = '%s' """%(doc.get('item_groups'))
elif doc.get('part_no'):
condition = """ `tabItem`.name='%s' """%(doc.get('part_no'))
if doc.get('previously_ordered_only') == 1:
previous_ordered = """`tabSales Order`.customer= '%s' and ifnull(`tabSales Order`.docstatus,0) = 1 """%(doc.get('customer'))
return """ where """+condition+""" and `tabItem Price`.price_list='EC - Rate of Purchase'
and """+previous_ordered+""" """
def validate_price_list(doc, method):
for d in doc.get('items'):
if d.batch_no:
rate = frappe.db.sql("select a.rate from `tabBatchwise Purchase Rate` a inner join `tabItem Price` b on a.parent = b.name and b.item_code = '%s' and a.batch = '%s'"%(d.item_code,d.batch_no),as_list=1)
if rate and flt(rate[0][0]) > flt(d.rate):
frappe.throw(_('Item Code {0} rate must be greater than rate of price list EC Purchase of Rate').format(d.item_code))
def set_price_list(doc, method):
doc.competitor = frappe.db.get_value('Price List',doc.price_list,'competitor')
frappe.db.sql("update `tabItem Price` set competitor=%s where name='%s'"%(cint(doc.competitor),doc.name))
#create purchase orders from submitted sales orders
@frappe.whitelist()
def generate_po():
sales_orders=get_submitted_sales_orders()
if sales_orders:
for sales_order in sales_orders:
if not frappe.db.get_value("Sales Order Used",{"sales_order":sales_order[0]},"sales_order"):
doc = frappe.get_doc('Sales Order', sales_order[0])
for item in doc.get('items'):
if cint(frappe.db.get_value('Item', item.item_code, 'is_stock_item')) == 1:
stock_balance=get_stock_balance(item)
qty = (flt(item.qty) - flt(stock_balance[0][0])) or 0.0
if flt(qty) > 0.0:
supplier=get_supplier_details(item.item_code)
if supplier and supplier[0][1]:
make_po(supplier,item,sales_order[0], qty)
update_used(sales_order[0])
#returns submitted sales orders
def get_submitted_sales_orders():
return frappe.db.sql("""select name from `tabSales Order` where docstatus=1""",as_list=1)
#returns stock balance for item
def get_stock_balance(args):
return frappe.db.sql("""select actual_qty from `tabBin` where item_code='{0}'
and warehouse = '{1}'""".format(args.item_code, args.warehouse),as_list=1)
#returns least item price list rate and supplier name
def get_supplier_details(item):
return frappe.db.sql("""select min(price_list_rate),price_list from `tabItem Price` where item_code='%s' and buying=1 and price_list in (select name from tabSupplier) group by price_list order by price_list_rate limit 1"""%(item),as_list=1)
def get_price_list_rate(item,supplier):
rate = frappe.db.sql("""select ifnull(price_list_rate,0) from `tabItem Price` where item_code='%s' and buying=1 and price_list='%s'"""%(item,supplier),as_list=1)
if rate:
return rate[0][0]
else:
return 0
#returns sales orders from which purchase orders created
def get_sales_order_used(sales_order):
return frappe.db.sql("""select sales_order from `tabSales Order Used` where sales_order='%s'"""%(sales_order[0]),as_list=1)
#makes new po or updates existing
def make_po(supplier,item,sales_order, qty):
po_exists=check_po_exists(supplier[0][1])
#price_rate=get_price_list_rate(item[0],supplier[0][1])
if po_exists:
item_exists=frappe.db.get_value('Purchase Order Item',{'item_code':item.item_code,'parent':po_exists},'name')
if not item_exists:
add_po_items(po_exists,item,sales_order,supplier[0][0], qty)
else:
update_qty(po_exists,item,sales_order,supplier[0][0], qty)
else:
new_po(supplier,item,supplier[0][0],sales_order, qty)
#check if po exists
def check_po_exists(supplier):
return frappe.db.get_value('Purchase Order',{'supplier':supplier,'docstatus':0},'name')
#creates new purchase order
def new_po(supplier,item,price_rate,sales_order, qty):
item_wrapper=frappe.get_doc("Item", item.item_code)
po=frappe.new_doc('Purchase Order')
po.supplier=supplier[0][1]
po.currency = frappe.db.get_value('Supplier', supplier[0][1], 'default_currency') or frappe.db.get_value('Global Defaults', None, 'default_currency')
po.plc_conversion_rate = frappe.db.get_value('Currency Exchange', {'from_currency': po.currency}, 'exchange_rate')
po.buying_price_list=supplier[0][1]
po.append("items", {
"doctype": "Purchase Order Item",
"item_code": item.item_code,
"item_name": item_wrapper.item_name,
"description": item_wrapper.description,
"uom": item_wrapper.stock_uom,
"item_group": item_wrapper.item_group,
"brand": item_wrapper.brand,
"qty":qty ,
"base_rate":0,
"base_amount":0,
"manufacturer_pn":item_wrapper.manufacturer_pn,
"oem_part_number":item_wrapper.oem_part_number,
"price_list_rate":price_rate,
"schedule_date":'08-12-2014'
})
po.save(ignore_permissions=True)
#maintains sales orders which are used in process
def update_used(sales_order):
if not frappe.db.get_value("Sales Order Used",{"sales_order":sales_order},"sales_order"):
uso=frappe.new_doc('Used Sales Order')
uso.save(ignore_permissions=True)
sopo=frappe.new_doc('Sales Order Used')
sopo.sales_order=sales_order
sopo.parentfield='sales_order_used'
sopo.parenttype='Used Sales Order'
sopo.parent=uso.name
sopo.save(ignore_permissions=True)
#update qty if item in purchase order exists
def update_qty(name,item,sales_order,price_rate, qty):
frappe.db.sql("""update `tabPurchase Order Item` set qty=qty+%s where parent='%s' and item_code='%s'"""%(qty,name,item.item_code))
#update purchase order with item
def add_po_items(name,item,sales_order,price_rate, qty):
idx=frappe.db.sql("""select ifnull(max(idx),0)+1 as idx from `tabPurchase Order Item` where parent='%s'"""%(name),as_list=1)
item_wrapper = frappe.get_doc("Item", item.item_code)
poi=frappe.new_doc('Purchase Order Item')
poi.idx=idx[0][0]
poi.item_code=item.item_code
poi.item_name=item_wrapper.item_name
poi.description=item_wrapper.description
poi.manufacturer_pn=item_wrapper.manufacturer_pn
poi.oem_part_number=item_wrapper.oem_part_number
poi.uom=item_wrapper.stock_uom
poi.brand=item_wrapper.brand
poi.qty= qty
poi.price_list_rate=price_rate
poi.base_rate=0
poi.base_amount=0
poi.schedule_date='08-12-2014'
poi.conversion_factor=1
poi.item_group=item_wrapper.item_group
poi.parentfield='items'
poi.parenttype='Purchase Order'
poi.parent=name
poi.save(ignore_permissions=True)
#to make oppurtunity from submitted sales order
@frappe.whitelist()
def make_oppurtunity(source_name, target_doc=None):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target_doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Opportunity",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Opportunity Item",
"field_map": {
"rate": "rate",
"name": "prevdoc_detail_docname",
"parent": "against_sales_order",
},
}
}, target_doc, set_missing_values)
return target_doc
def update_item_price_rate_pi(doc,method):
# update the rate if new rate is less than existing item rate
for item in doc.get('items'):
if item.item_code:
rate=get_ec_rate(item.item_code)
if rate and (item.rate < rate):
frappe.db.sql("""update `tabItem Price`
set price_list_rate=%s where item_code='%s'
and price_list='EC - Rate of Purchase'"""%(item.rate,item.item_code))
def update_item_price_sq(doc,method):
for d in doc.get('items'):
rate=get_ec_rate(d.item_code)
if rate:
if d.rate < rate:
frappe.db.sql("""update `tabItem Price`
set price_list_rate='%s'
where price_list='EC - Rate of Purchase'
and item_code='%s' """%(d.rate,d.item_code))
frappe.db.sql("commit")
def update_item_price_ip(doc,method):
rate= get_ec_rate(doc.item_code)
if rate:
if doc.price_list_rate < rate:
frappe.db.sql("""update `tabItem Price`
set price_list_rate='%s' where price_list='EC - Rate of Purchase'
and item_code='%s' """%(doc.price_list_rate,doc.item_code))
frappe.db.sql("commit")
else:
pass
def get_ec_rate(item_code):
return frappe.db.get_value("Item Price",{"item_code":item_code,"price_list":"EC - Rate of Purchase"},"price_list_rate")
def update_item_price_on_pi_cl(doc,method):
for item in doc.get('items'):
if item.item_code:
rate=get_rate(item.item_code)
if rate:
frappe.db.sql("""update `tabItem Price`
set price_list_rate=%s
where item_code='%s'
and price_list='EC - Rate of Purchase'"""%(rate[0][0],item.item_code))
def update_item_price_on_sq_cl(doc,method):
for item in doc.get('item_list'):
if item.item_code:
rate=get_rate(item.item_code)
if rate:
frappe.db.sql("""update `tabItem Price`
set price_list_rate=%s
where item_code='%s' and price_list='%s'"""%(rate[0][0],item.item_code,doc.buying_price_list))
def get_rate(item_code):
return frappe.db.sql("""select least(
CASE WHEN item_rate = 0 THEN GREATEST(item_rate,quotation_rate,purchase_rate)+1 ELSE item_rate END,
CASE WHEN quotation_rate= 0 THEN GREATEST(item_rate,quotation_rate,purchase_rate)+1 ELSE quotation_rate END,
CASE WHEN purchase_rate = 0 THEN GREATEST(item_rate,quotation_rate,purchase_rate)+1 ELSE purchase_rate END) as rate from (select
ifnull(min(nullif(ip.price_list_rate,0)),0) as item_rate,
ifnull(min(nullif(sq.price_list_rate,0)),0) as quotation_rate,
ifnull(min(nullif(pi.price_list_rate,0)),0) as purchase_rate from `tabItem` im
left join `tabItem Price` ip on ip.item_code=im.item_code
left join `tabSupplier Quotation Item` sq on sq.item_code=im.item_code and sq.docstatus=1
left join `tabPurchase Invoice Item` pi on pi.item_code=im.item_code and pi.docstatus=1
where im.item_code='%s' group by im.item_code)x"""%(item_code),as_list=1)
def check_eq_item_selected_twice(doc,method):
item_list = []
for row in doc.get('engine_compatibility_'):
if row.item_code in item_list:
frappe.throw(_("Duplicate entry for Item {0} in Part Equivalency table ").format(row.item_code))
item_list.append(row.item_code)
def auto_create_self_item_entry(doc,method):
result = frappe.db.sql(""" select name from `tabQuote Item` where parent='{0}' and item_code='{1}' """.format(doc.item_code,doc.item_code),as_list=1)
if not result:
doc.append('engine_compatibility_',{
"item_code":doc.item_code,
"item_name":doc.item_name,
"brand":doc.brand,
"item_group":doc.item_group
})
doc.save()
frappe.db.commit()
def create_eq_item_entry(doc,method):
for row in doc.get('engine_compatibility_'):
result = frappe.db.sql(""" select name from `tabQuote Item` where parent='{0}' and item_code='{1}' """.format(row.item_code,doc.item_code),as_list=1)
if not result:
item_doc = frappe.get_doc('Item',row.item_code)
item_doc.append('engine_compatibility_',{
"item_code":doc.item_code,
"item_name":doc.item_name,
"brand":doc.brand,
"item_group":doc.item_group
})
item_doc.save()
frappe.db.commit()
@frappe.whitelist()
def get_item_code(row_name):
if row_name:
return frappe.db.get_value('Quote Item',row_name,'item_code')
def delete_eq_item_entry(doc,method):
if doc.deleted_eq_item:
deleted_eq_item = cstr(doc.deleted_eq_item).split(',')
for d in deleted_eq_item:
my_doc = frappe.get_doc('Item',d)
for row in my_doc.get('engine_compatibility_'):
if row.item_code == doc.item_code:
my_doc.get('engine_compatibility_').remove(row)
my_doc.save()
doc.deleted_eq_item = ''
@frappe.whitelist()
def get_alternative_item_details(doc):
doc=json.loads(doc)
item_dict = {}
alteritem_dic={}
if doc:
for d in doc.get('items'):
result = {}
if d.get("sales_item_name"):
result = frappe.db.sql(""" SELECT
distinct(qi.item_code),
qi.parent,
coalesce(bi.actual_qty,0) as actual_qty,
ifnull(ite.item_name,'') as item_name,
ifnull(ite.manufacturer_pn,'') as manufacturer_pn,
ifnull(ite.oem_part_number,'') as oem_part_number,
ifnull(ite.description,'') as description,
coalesce( bi.warehouse,'') as warehouse,
ifnull(ite.stock_uom,'') as stock_uom
FROM
`tabQuote Item` qi join
`tabBin` bi
on
qi.item_code = bi.item_code join `tabItem` ite
on
ite.item_code = bi.item_code
where
qi.parent='{0}'
AND bi.warehouse='{1}' AND bi.actual_qty!=0 AND qi.item_code!='{2}' """.format(d["sales_item_name"],d["warehouse"],d["sales_item_name"]),as_dict=1)
alteritem_dic[d["sales_item_name"]]=result
item_dict[d["sales_item_name"]] = d["qty"]
return alteritem_dic,item_dict
def update_sales_item_name(doc,method):
for row in doc.get('items'):
row.sales_item_name = row.item_code
row.old_oem = row.current_oem
@frappe.whitelist()
def get_roles_for_so_cancellation():
role_list = frappe.db.sql("select roles from `tabAssign Roles Permissions`",as_list=1)
return role_list
@frappe.whitelist()
def custom_get_linked_docs(doctype, name, metadata_loaded=None):
results = get_linked_docs(doctype,name,metadata_loaded)
my_dict = make_unique(results)
cancel_linked_docs(my_dict,doctype,name)
return 0
def make_unique(results):
if results:
for key,value in results.items():
my_list = []
for my_key in value:
if my_key['docstatus'] == 1:
my_list.append(my_key['name'])
my_list = list(set(my_list))
results[key] = my_list
return results
def cancel_linked_docs(my_dict,doctype,name):
if my_dict:
for doc in ['Journal Voucher','Sales Invoice','Packing Slip','Delivery Note']:
if my_dict.get(doc):
if doc == 'Sales Invoice':
check_link_of_sales_invoice(doc,my_dict.get(doc))
for curr_name in my_dict.get(doc):
cancel_doc(doc,curr_name)
cancel_sales_order_self(doctype,name)
def cancel_doc(doc,name):
my_doc = frappe.get_doc(doc,name)
my_doc.cancel()
def check_link_of_sales_invoice(doc,si_list):
for sales_invoice in si_list:
jv_list = frappe.db.sql(""" select distinct(jvt.parent) from `tabJournal Voucher Detail` jvt join `tabJournal Voucher` jv on jv.name=jvt.parent where jvt.against_invoice='{0}' and jv.docstatus= 1 """.format(sales_invoice),as_list=1)
if jv_list:
cancel_jv('Journal Voucher',jv_list)
def cancel_jv(doc_name,jv_list):
for jv in jv_list:
my_doc = frappe.get_doc(doc_name,jv[0])
my_doc.cancel()
def cancel_sales_order_self(doctype,name):
my_doc = frappe.get_doc(doctype,name)
my_doc.cancel()
@frappe.whitelist()
def set_alternative_item_details(alter_dic,doc):
if alter_dic:
alter_dic=json.loads(alter_dic)
#doc=json.loads(doc)
c_doc=frappe.get_doc("Delivery Note",doc)
for d in c_doc.get('items'):
if alter_dic.has_key(d.item_code):
original_item=d.item_code
alter_item=alter_dic.get(d.item_code)["item_code"]
aitem_doc=frappe.get_doc("Item",alter_item)
d.item_code = aitem_doc.item_code
d.item_name = aitem_doc.item_name
d.manufacturer_pn = aitem_doc.manufacturer_pn
d.description = aitem_doc.description
d.old_oem = d.current_oem
d.current_oem = aitem_doc.oem_part_number
d.stock_uom = aitem_doc.stock_uom
d.sales_item_name = d.item_code
if alter_dic[original_item]["qty"] < d.qty:
d.actual_qty =alter_dic.get(original_item)["qty"]
if not (aitem_doc.oem_part_number == d.old_oem):
d.oem_part_number = aitem_doc.oem_part_number
else:
d.oem_part_number = cstr(aitem_doc.oem_part_number)+"(Same as %s)"%d.oem_part_number
c_doc.save(ignore_permissions=True)
return c_doc | agpl-3.0 | -5,378,458,708,806,662,000 | 36.340176 | 241 | 0.693619 | false | 2.875776 | false | false | false |
solidfire/Python-CLI | element/cli/utils.py | 2 | 16761 | import jsonpickle
import json as serializer
from pkg_resources import Requirement, resource_filename
import os
import csv
from Crypto.Cipher import ARC4
import base64
import socket
import getpass
from solidfire.factory import ElementFactory
from filelock import FileLock
import sys
def kv_string_to_dict(kv_string):
new_dict = {}
items = kv_string.split(',')
for item in items:
kvs = item.split('=')
new_dict[kvs[0]] = kvs[1]
def print_result(objs, log, as_json=False, as_pickle=False, depth=None, filter_tree=None):
# There are 3 acceptable parameter sets to provide:
# 1. json=True, depth=None, filter_tree=None
# 2. json=False, depth=#, filter_tree=None
# 3. json=False, depth=#, filter_tree=acceptable string
# Error case
if as_json and (depth is not None or filter_tree is not None):
log.error("If you choose to print it as json, do not provide a depth or filter. Those are for printing it as a tree.")
exit()
"""
SDK1.6 Note:
Since print_tree is not supported in 1.6, when both the available output formats
json and pickle formats are set to False, change the default output format (pickle) to True.
"""
if as_json == False and as_pickle == False:
as_pickle = True
# If json is true, we print it as json and return:
if as_json == True or as_pickle == True:
print_result_as_json(objs, as_pickle)
return
"""
SDK1.6 Note:
Commenting out these lines as print_tree is not supported in 1.6.
"""
"""
# If we have a filter, apply it.
if filter_tree is not None:
try:
objs_to_print = filter_objects_from_simple_keypaths(objs, filter_tree.split(','))
except Exception as e:
log.error(e.args[0])
exit(1)
else:
objs_to_print = objs
# Set up a default depth
if depth is None:
depth = 10
# Next, print the tree to the appropriate depth
print_result_as_tree(objs_to_print, depth)
"""
def print_result_as_json(objs, pickle=False):
#print(jsonpickle.encode(objs))
nestedDict = serializer.loads(jsonpickle.encode(objs))
filteredDict = type(nestedDict)()
if(pickle==False):
remove_pickling(nestedDict, filteredDict)
else:
filteredDict = nestedDict
print(serializer.dumps(filteredDict,indent=4))
def remove_pickling(nestedDict, filteredDict):
if type(nestedDict) is dict:
#foreach key, if list, recurse, if dict, recurse, if string recurse unless py/obj is key.
for key in nestedDict:
if key == "py/object":
continue
else:
filteredDict[key] = type(nestedDict[key])()
filteredDict[key] = remove_pickling(nestedDict[key], filteredDict[key])
return filteredDict
if type(nestedDict) is list:
# foreach item
for i in range(len(nestedDict)):
filteredDict.append(type(nestedDict[i])())
filteredDict[i] = remove_pickling(nestedDict[i], filteredDict[i])
return filteredDict
return nestedDict
"""
SDK1.6 Note:
Commenting this as print_tree is not supported in SDK 1.6.
"""
def get_result_as_tree(objs, depth=1, currentDepth=0, lastKey = ""):
print("print_tree is not supported in SDK1.6")
"""stringToReturn = ""
if(currentDepth > depth):
return "<to see more details, increase depth>\n"
if(type(objs) is str or type(objs) is bool or type(objs) is int or type(objs) is type(u'') or objs is None or type(objs) is float):# or (sys.version_info[0]<3 and type(objs) is long)):
return str(objs) + "\n"
if(type(objs) is list):
stringToReturn += "\n"
for i in range(len(objs)):
obj = objs[i]
stringToReturn += currentDepth*" "+get_result_as_tree(obj, depth, currentDepth+1, lastKey)
return stringToReturn
if(isinstance(objs, dict)):
stringToReturn += "\n"
for key in objs:
stringToReturn += currentDepth*" "+key+": "+get_result_as_tree(objs[key], depth, currentDepth+1, key)
return stringToReturn
if (isinstance(objs, tuple)):
return str(objs[0]) + "\n"
if(objs is None):
return stringToReturn
mydict = objs.__dict__
stringToReturn += "\n"
for key in mydict:
stringToReturn += currentDepth*" "
stringToReturn += key+": "+get_result_as_tree(mydict[key], depth, currentDepth+1, key)
return stringToReturn
"""
def filter_objects_from_simple_keypaths(objs, simpleKeyPaths):
# First, we assemble the key paths.
# They start out like this:
# [accouts.username, accounts.initiator_secret.secret, accounts.status]
# and become like this:
# {"accounts":{"username":True, "initiator_secret":{"secret":True}, "status":True}
keyPaths = dict()
for simpleKeyPath in simpleKeyPaths:
currentLevel = keyPaths
keyPathArray = simpleKeyPath.split('.')
for i in range(len(keyPathArray)):
if(i<(len(keyPathArray) - 1)):
if currentLevel.get(keyPathArray[i]) is None:
currentLevel[keyPathArray[i]] = dict()
else:
currentLevel[keyPathArray[i]] = True
currentLevel = currentLevel[keyPathArray[i]]
# Then we pass it in to filter objects.
return filter_objects(objs, keyPaths)
# Keypaths is arranged as follows:
# it is a nested dict with the order of the keys.
def filter_objects(objs, keyPaths):
# Otherwise, we keep recursing deeper.
# Because there are deeper keys, we know that we can go deeper.
# This means we are dealing with either an array or a dict.
# If keyPaths looks like this:
# {"username": True, "volumes": {"Id": True}}
# The keys in this sequence will be username and volumes.
# When we recurse into volumes, the keys will be Id.
finalFilteredObjects = dict()
if keyPaths == True and type(objs) is not list:
return objs
# If we've found a list, we recurse deeper to pull out the objs.
# We do not advance our keyPath recursion because this is just a list.
if type(objs) is list:
# If we have a list of objects, we will need to assemble and return a list of stuff.
filteredObjsDict = [None]*len(objs)
for i in range(len(objs)):
# Each element could be a string, dict, or list.
filteredObjsDict[i] = filter_objects(objs[i], keyPaths)
return filteredObjsDict
dictionaryOfInterest = None
if type(objs) is dict:
dictionaryOfInterest = objs
else:
dictionaryOfInterest = objs.__dict__
for key in keyPaths:
# If we've found a dict, we recurse deeper to pull out the objs.
# Because this is a dict, we must advance our keyPaths recursion.
# Consider the following example:
if key not in dictionaryOfInterest:
raise ValueError("'"+key+"' is not a valid key for this level. Valid keys are: "+','.join(dictionaryOfInterest.keys()))
finalFilteredObjects[key] = filter_objects(dictionaryOfInterest[key], keyPaths[key])
return finalFilteredObjects
def print_result_as_table(objs, keyPaths):
filteredDictionary = filter_objects(objs, keyPaths)
def print_result_as_tree(objs, depth=1):
print(get_result_as_tree(objs, depth))
def establish_connection(ctx):
# Verify that the mvip does not contain the port number:
if ctx.mvip and ":" in ctx.mvip:
ctx.logger.error('Please provide the port using the port parameter.')
exit(1)
cfg = None
# Arguments take precedence regardless of env settings
if ctx.mvip:
if ctx.username is None:
ctx.username = getpass.getpass("Username:")
if ctx.password is None:
ctx.password = getpass.getpass("Password:")
cfg = {'mvip': ctx.mvip,
'username': "b'"+encrypt(ctx.username).decode('utf-8')+"'",
'password': "b'"+encrypt(ctx.password).decode('utf-8')+"'",
'port': ctx.port,
'url': 'https://%s:%s' % (ctx.mvip, ctx.port),
'version': ctx.version,
'verifyssl': ctx.verifyssl,
'timeout': ctx.timeout}
try:
ctx.element = ElementFactory.create(cfg["mvip"],decrypt(cfg["username"]),decrypt(cfg["password"]),port=cfg["port"],version=cfg["version"],verify_ssl=cfg["verifyssl"],timeout=cfg["timeout"])
ctx.version = ctx.element._api_version
cfg["version"] = ctx.element._api_version
except Exception as e:
ctx.logger.error(e.__str__())
exit(1)
# If someone accidentally passed in an argument, but didn't specify everything, throw an error.
elif ctx.username or ctx.password:
ctx.logger.error("In order to manually connect, please provide an mvip, a username, AND a password")
# If someone asked for a given connection or we need to default to using the connection at index 0 if it exists:
else:
if ctx.connectionindex is None and ctx.name is None:
cfg = get_default_connection(ctx)
elif ctx.connectionindex is not None:
connections = get_connections(ctx)
if int(ctx.connectionindex) > (len(connections)-1) or int(ctx.connectionindex) < (-len(connections)):
ctx.logger.error("Connection "+str(ctx.connectionindex)+" Please provide an index between "+str(-len(connections))+" and "+str(len(connections)-1))
exit(1)
cfg = connections[ctx.connectionindex]
elif ctx.name is not None:
connections = get_connections(ctx)
filteredCfg = [connection for connection in connections if connection["name"] == ctx.name]
if(len(filteredCfg) > 1):
ctx.logger.error("Your connections.csv file has become corrupted. There are two connections of the same name.")
exit()
if(len(filteredCfg) < 1):
ctx.logger.error("Could not find a connection named "+ctx.name)
exit()
cfg = filteredCfg[0]
# If we managed to find the connection we were looking for, we must try to establish the connection.
if cfg is not None:
# Finally, we need to establish our connection via elementfactory:
try:
if int(cfg["port"]) != 443:
address = cfg["mvip"] + ":" + cfg["port"]
else:
address = cfg["mvip"]
ctx.element = ElementFactory.create(address, decrypt(cfg["username"]), decrypt(cfg["password"]), cfg["version"], verify_ssl=cfg["verifyssl"])
if int(cfg["timeout"]) != 30:
ctx.element.timeout(cfg["timeout"])
except Exception as e:
ctx.logger.error(e.__str__())
ctx.logger.error("The connection is corrupt. Run 'sfcli connection prune' to try and remove all broken connections or use 'sfcli connection remove -n name'")
ctx.logger.error(cfg)
exit(1)
# If we want the json output directly from the source, we'll have to override the send request method in the sdk:
# This is so that we can circumvent the python objects and get exactly what the json-rpc returns.
if ctx.json and ctx.element:
def new_send_request(*args, **kwargs):
return ctx.element.__class__.__bases__[0].send_request(ctx.element, return_response_raw=True, *args, **kwargs)
ctx.element.send_request = new_send_request
# The only time it is none is when we're asking for help or we're trying to store a connection.
# If that's not what we're doing, we catch it later.
if cfg is not None:
cfg["port"] = int(cfg["port"])
ctx.cfg = cfg
cfg["name"] = cfg.get("name", "default")
if not ctx.nocache:
write_default_connection(ctx, cfg)
if ctx.element is None:
ctx.logger.error("You must establish at least one connection and specify which you intend to use.")
exit()
# this needs to be atomic.
def get_connections(ctx):
connectionsCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "connections.csv")
connectionsLock = resource_filename(Requirement.parse("solidfire-cli"), "connectionsLock")
if os.path.exists(connectionsCsvLocation):
try:
with FileLock(connectionsLock):
with open(connectionsCsvLocation, 'r') as connectionFile:
connections = list(csv.DictReader(connectionFile, delimiter=','))
except Exception as e:
ctx.logger.error("Problem reading "+connectionsCsvLocation+" because: "+str(e.args)+" Try changing the permissions of that file.")
exit(1)
else:
connections = []
for connection in connections:
connection["version"] = float(connection["version"])
if connection.get("verifyssl") == "True":
connection["verifyssl"] = True
else:
connection["verifyssl"] = False
return connections
def write_connections(ctx, connections):
try:
connectionsCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "connections.csv")
connectionsLock = resource_filename(Requirement.parse("solidfire-cli"), "connectionsLock")
with open(connectionsCsvLocation, 'w') as f:
with FileLock(connectionsLock):
w = csv.DictWriter(f, ["name","mvip","port","username","password","version","url","verifyssl","timeout"], lineterminator='\n')
w.writeheader()
for connection in connections:
if connection is not None:
w.writerow(connection)
except Exception as e:
ctx.logger.error("Problem writing "+ connectionsCsvLocation + " " + str(e.args)+" Try changing the permissions of that file.")
exit(1)
def get_default_connection(ctx):
connectionCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "default_connection.csv")
if os.path.exists(connectionCsvLocation):
defaultLockLocation = resource_filename(Requirement.parse("solidfire-cli"), "defaultLock")
try:
with FileLock(defaultLockLocation):
with open(connectionCsvLocation) as connectionFile:
connection = list(csv.DictReader(connectionFile, delimiter=','))
except Exception as e:
ctx.logger.error("Problem reading "+connectionCsvLocation+" because: "+str(e.args)+" Try changing the permissions of that file or specifying credentials.")
exit(1)
if len(connection)>0:
connection[0]["version"] = float(connection[0]["version"])
if(connection[0]["verifyssl"] == "True"):
connection[0]["verifyssl"] = True
else:
connection[0]["verifyssl"] = False
return connection[0]
else:
os.remove(defaultLockLocation)
ctx.logger.error("Please provide connection information. There is no connection info in cache at this time.")
exit(1)
else:
ctx.logger.error("Please provide connection information. There is no connection info in cache at this time.")
exit(1)
def write_default_connection(ctx, connection):
connectionCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "default_connection.csv")
try:
defaultLockLocation = resource_filename(Requirement.parse("solidfire-cli"), "defaultLock")
with FileLock(defaultLockLocation):
with open(connectionCsvLocation, 'w') as f:
w = csv.DictWriter(f, ["name", "mvip", "port", "username", "password", "version", "url", "verifyssl", "timeout"],
lineterminator='\n')
w.writeheader()
w.writerow(connection)
except Exception as e:
ctx.logger.warning("Problem writing "+ connectionCsvLocation + " " + str(e.args)+" Try using changing the permissions of that file or using the --nocache flag.")
# WARNING! This doesn't actually give us total security. It only gives us obscurity.
def encrypt(sensitive_data):
cipher = ARC4.new(socket.gethostname().encode('utf-8') + "SOLIDFIRE".encode('utf-8'))
encoded = base64.b64encode(cipher.encrypt(sensitive_data.encode('utf-8')))
return encoded
def decrypt(encoded_sensitive_data):
cipher = ARC4.new(socket.gethostname().encode('utf-8') + "SOLIDFIRE".encode('utf-8'))
decoded = cipher.decrypt(base64.b64decode(encoded_sensitive_data[2:-1]))
return decoded.decode('utf-8') | apache-2.0 | -404,484,761,291,329,300 | 43.818182 | 201 | 0.629557 | false | 4.020389 | false | false | false |
sussexstudent/falmer | falmer/studentgroups/migrations/0005_auto_20170905_1151.py | 1 | 1091 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-05 10:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('matte', '0002_remoteimage'),
('studentgroups', '0004_auto_20170703_1633'),
]
operations = [
migrations.AddField(
model_name='studentgroup',
name='description',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='studentgroup',
name='is_prospective',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='studentgroup',
name='link',
field=models.CharField(default='', max_length=255),
),
migrations.AddField(
model_name='studentgroup',
name='logo',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='matte.MatteImage'),
),
]
| mit | -7,897,777,460,002,123,000 | 28.486486 | 115 | 0.581118 | false | 4.228682 | false | false | false |
modimore/BreezeBlocks | examples/quickstart.py | 1 | 1696 | import sqlite3
from breezeblocks import Database
from breezeblocks.sql import Table
# Setup
db = Database(dsn="Library.sqlite", dbapi_module=sqlite3)
authors = Table("Author", ["id", "name"])
genres = Table("Genre", ["id", "name"])
books = Table("Book", ["id", "author_id", "genre_id", "title"])
# Query
get_all_authors = db.query(authors).get()
get_all_genre_names = db.query(genres.columns["name"]).get()
get_all_book_titles_and_ids = db.query(
books.columns["id"], books.columns["title"]).get()
for author in get_all_authors.execute():
print(author.id, author.name)
for genre in get_all_genre_names.execute():
print(genre.name)
for book in get_all_book_titles_and_ids.execute():
print(book.id, book.title)
# Insert
insert_books = db.insert(books).add_columns(
"author_id", "genre_id", "title").get()
jkr_query = db.query(authors.columns["id"])\
.where(authors.columns["name"] == "J.K. Rowling").get()
jkr_id = jkr_query.execute()[0].id
fantasy_query = db.query(genres.columns["id"])\
.where(genres.columns["name"] == "Fantasy").get()
fantasy_id = fantasy_query.execute()[0].id
insert_books.execute([
(jkr_id, fantasy_id, "Harry Potter and the Deadly Hallows"),
(jkr_id, fantasy_id, "Harry Potter and the Sorceror's Stone")
])
# Update
update_deadly_hallows = db.update(books)\
.set_(books.columns["title"], "Harry Potter and the Deathly Hallows")\
.where(books.columns["title"] == "Harry Potter and the Deadly Hallows")\
.get()
update_deadly_hallows.execute()
# Delete
delete_sorcerors_stone = db.delete(books)\
.where(
books.columns["title"] == "Harry Potter and the Sorceror's Stone"
).get()
delete_sorcerors_stone.execute()
| mit | 5,297,581,521,766,187,000 | 31.615385 | 76 | 0.679245 | false | 2.831386 | false | false | false |
spikeekips/serf-python | test/test_command_auth.py | 1 | 4108 | import serf
import pytest
from _base import FakeClient, FakeConnection
def test_request_auth () :
_body = dict(
AuthKey='auth-key',
)
_request = serf.get_request_class('auth')(**_body)
_request.check(FakeClient(), )
assert _request.is_checked
_body = dict(
AuthKey=1, # `AuthKey` must be str
)
_request = serf.get_request_class('auth')(**_body)
with pytest.raises(serf.InvalidRequest, ) :
_request.check(FakeClient(), )
assert not _request.is_checked
_body = dict( # empty values
)
_request = serf.get_request_class('auth')(**_body)
with pytest.raises(serf.InvalidRequest, ) :
_request.check(FakeClient(), )
assert not _request.is_checked
class AuthFakeConnectionFailed (FakeConnection, ) :
socket_data = (
'\x82\xa5Error\xa0\xa3Seq\x00',
'\x82\xa5Error\xbcInvalid authentication token\xa3Seq\x01',
)
def test_response_auth_failed () :
_client = serf.Client(connection_class=AuthFakeConnectionFailed, )
def _callback (response, ) :
assert response.request.command == 'auth'
assert response.error
assert not response.is_success
assert response.body is None
assert response.seq == 1
_body = dict(
AuthKey='this-is-bad-authkey',
)
assert not _client.is_authed
with pytest.raises(serf.AuthenticationError, ) :
_client.auth(**_body).add_callback(_callback, ).request()
assert not _client.is_authed
class AuthFakeConnectionSuccess (FakeConnection, ) :
socket_data = (
'\x82\xa5Error\xa0\xa3Seq\x00',
'\x82\xa5Error\xa0\xa3Seq\x01',
)
def test_response_auth_success () :
_client = serf.Client(connection_class=AuthFakeConnectionSuccess, )
def _callback (response, ) :
assert response.request.command == 'auth'
assert not response.error
assert response.is_success
assert response.body is None
assert response.seq == 1
_body = dict(
AuthKey='this-is-valid-authkey',
)
assert not _client.is_authed
_client.auth(**_body).add_callback(_callback, ).request()
assert _client.is_authed
class AuthFakeConnectionForceLeaveSuccess (FakeConnection, ) :
socket_data = (
'\x82\xa5Error\xa0\xa3Seq\x00',
'\x82\xa5Error\xa0\xa3Seq\x01\x82\xa5Error\xa0\xa3Seq\x02',
)
def test_implicit_authentication_with_host_url_success () :
def _callback (response, ) :
assert response.request.command == 'force_leave'
assert not response.error
assert response.is_success
assert response.body is None
assert response.seq == 2
_body = dict(
Node='node0',
)
_auth_key = 'this-is-valid-authkey'
_client = serf.Client(
'serf://127.0.0.1:7373?AuthKey=%s' % _auth_key,
connection_class=AuthFakeConnectionForceLeaveSuccess,
)
assert not _client.is_authed
_client.force_leave(**_body).add_callback(_callback, ).request()
assert _client.is_authed
class AuthFakeConnectionForceLeaveFailed (FakeConnection, ) :
socket_data = (
'\x82\xa5Error\xa0\xa3Seq\x00',
'\x82\xa5Error\xbcInvalid authentication token\xa3Seq\x01',
)
def test_implicit_authentication_with_host_url () :
def _callback (response, ) :
assert response.request.command == 'force_leave'
assert not response.error
assert response.is_success
assert response.body is None
assert response.seq == 2
_body = dict(
Node='node0',
)
_auth_key = 'this-is-valid-authkey'
_client = serf.Client(
'serf://127.0.0.1:7373?AuthKey=%s' % _auth_key,
connection_class=AuthFakeConnectionForceLeaveFailed,
)
assert not _client.is_authed
with pytest.raises(serf.AuthenticationError, ) :
_client.force_leave(**_body).add_callback(_callback, ).request()
assert not _client.is_authed
| mpl-2.0 | 1,739,868,179,361,525,200 | 24.202454 | 72 | 0.615871 | false | 3.575283 | true | false | false |
fmin2958/manwe | manwe/commands.py | 2 | 30978 | # -*- coding: utf-8 -*-
"""
Manwë command line interface.
Todo: Move some of the docstring from the _old_population_study.py file here.
.. moduleauthor:: Martijn Vermaat <[email protected]>
.. Licensed under the MIT license, see the LICENSE file.
"""
import argparse
import getpass
import itertools
import os
import re
import sys
from clint import textui
from .config import Config
from .errors import (ApiError, BadRequestError, UnauthorizedError,
ForbiddenError, NotFoundError)
from .resources import USER_ROLES
from .session import Session
SYSTEM_CONFIGURATION = '/etc/manwe/config'
USER_CONFIGURATION = os.path.join(
os.environ.get('XDG_CONFIG_HOME', None) or
os.path.join(os.path.expanduser('~'), '.config'),
'manwe', 'config')
class UserError(Exception):
pass
def log(message):
sys.stderr.write('%s\n' % message)
def abort(message=None):
if message:
log('error: %s' % message)
sys.exit(1)
def wait_for_tasks(*tasks):
with textui.progress.Bar(expected_size=100) as bar:
for percentages in itertools.izip_longest(
*[task.wait_and_monitor() for task in tasks], fillvalue=100):
# We treat the `None` percentage (waiting) as `0` (running).
bar.show(sum(percentage for percentage in percentages
if percentage is not None) // len(tasks))
def list_samples(session, public=False, user=None, groups=None):
"""
List samples.
"""
groups = groups or []
filters = {}
if public:
filters.update(public=True)
if user:
filters.update(user=user)
if groups:
filters.update(groups=groups)
samples = session.samples(**filters)
for i, sample in enumerate(samples):
if i:
print
print 'Sample: %s' % sample.uri
print 'Name: %s' % sample.name
print 'Pool size: %i' % sample.pool_size
print 'Visibility: %s' % ('public' if sample.public else 'private')
print 'State: %s' % ('active' if sample.active else 'inactive')
def show_sample(session, uri):
"""
Show sample details.
"""
try:
sample = session.sample(uri)
except NotFoundError:
raise UserError('Sample does not exist: "%s"' % uri)
print 'Sample: %s' % sample.uri
print 'Name: %s' % sample.name
print 'Pool size: %i' % sample.pool_size
print 'Visibility: %s' % ('public' if sample.public else 'private')
print 'State: %s' % ('active' if sample.active else 'inactive')
print
print 'User: %s' % sample.user.uri
print 'Name: %s' % sample.user.name
for group in sample.groups:
print
print 'Group: %s' % group.uri
print 'Name: %s' % group.name
for variation in session.variations(sample=sample):
print
print 'Variation: %s' % variation.uri
task = variation.task
if task.running:
print 'Task state: %s (%d%%)' % (task.state, task.progress)
else:
print 'Task state: %s' % task.state
if task.failure:
print 'Task error: %s' % task.error.message
for coverage in session.coverages(sample=sample):
print
print 'Coverage: %s' % coverage.uri
task = coverage.task
if task.running:
print 'Task state: %s (%d%%)' % (task.state, task.progress)
else:
print 'Task state: %s' % task.state
if task.failure:
print 'Task error: %s' % task.error.message
def activate_sample(session, uri):
"""
Activate sample.
"""
try:
sample = session.sample(uri)
except NotFoundError:
raise UserError('Sample does not exist: "%s"' % uri)
sample.active = True
sample.save()
log('Activated sample: %s' % sample.uri)
def annotate_sample_variations(session, uri, queries=None, wait=False):
"""
Annotate sample variations with variant frequencies.
"""
queries = queries or {}
try:
sample = session.sample(uri)
except NotFoundError:
raise UserError('Sample does not exist: "%s"' % uri)
tasks = []
for variation in session.variations(sample=sample):
annotation = session.create_annotation(
variation.data_source, queries=queries)
log('Started annotation: %s' % annotation.uri)
tasks.append(annotation.task)
if not wait:
return
wait_for_tasks(*tasks)
log('Annotated variations for sample: %s' % sample.uri)
def add_sample(session, name, groups=None, pool_size=1, public=False,
no_coverage_profile=False):
"""
Add sample.
"""
groups = groups or []
if pool_size < 1:
raise UserError('Pool size should be at least 1')
groups = [session.group(uri) for uri in groups]
sample = session.create_sample(name, groups=groups, pool_size=pool_size,
coverage_profile=not no_coverage_profile,
public=public)
log('Added sample: %s' % sample.uri)
return sample
def import_sample(session, name, groups=None, pool_size=1, public=False,
no_coverage_profile=False, vcf_files=None, bed_files=None,
data_uploaded=False, prefer_genotype_likelihoods=False,
wait=False):
"""
Add sample and import variation and coverage files.
"""
vcf_files = vcf_files or []
bed_files = bed_files or []
if not no_coverage_profile and not bed_files:
raise UserError('Expected at least one BED file')
# Todo: Nice error if file cannot be read.
vcf_sources = [({'local_file': vcf_file}, vcf_file) if data_uploaded else
({'data': open(vcf_file)}, vcf_file)
for vcf_file in vcf_files]
bed_sources = [({'local_file': bed_file}, bed_file) if data_uploaded else
({'data': open(bed_file)}, bed_file)
for bed_file in bed_files]
sample = add_sample(session, name, groups=groups, pool_size=pool_size,
public=public, no_coverage_profile=no_coverage_profile)
tasks = []
for source, filename in vcf_sources:
data_source = session.create_data_source(
'Variants from file "%s"' % filename,
filetype='vcf',
gzipped=filename.endswith('.gz'),
**source)
log('Added data source: %s' % data_source.uri)
variation = session.create_variation(
sample, data_source,
prefer_genotype_likelihoods=prefer_genotype_likelihoods)
log('Started variation import: %s' % variation.uri)
tasks.append(variation.task)
for source, filename in bed_sources:
data_source = session.create_data_source(
'Regions from file "%s"' % filename,
filetype='bed',
gzipped=filename.endswith('.gz'),
**source)
log('Added data source: %s' % data_source.uri)
coverage = session.create_coverage(sample, data_source)
log('Started coverage import: %s' % coverage.uri)
tasks.append(coverage.task)
if not wait:
return
wait_for_tasks(*tasks)
log('Imported variations and coverages for sample: %s' % sample.uri)
def import_variation(session, uri, vcf_file, data_uploaded=False,
prefer_genotype_likelihoods=False, wait=False):
"""
Import variation file for existing sample.
"""
# Todo: Nice error if file cannot be read.
if data_uploaded:
source = {'local_file': vcf_file}
else:
source = {'data': open(vcf_file)}
try:
sample = session.sample(uri)
except NotFoundError:
raise UserError('Sample does not exist: "%s"' % uri)
data_source = session.create_data_source(
'Variants from file "%s"' % vcf_file,
filetype='vcf',
gzipped=vcf_file.endswith('.gz'),
**source)
log('Added data source: %s' % data_source.uri)
variation = session.create_variation(
sample, data_source,
prefer_genotype_likelihoods=prefer_genotype_likelihoods)
log('Started variation import: %s' % variation.uri)
if not wait:
return
wait_for_tasks(variation.task)
log('Imported variation: %s' % variation.uri)
def import_coverage(session, uri, bed_file, data_uploaded=False, wait=False):
"""
Import coverage file for existing sample.
"""
# Todo: Nice error if file cannot be read.
if data_uploaded:
source = {'local_file': bed_file}
else:
source = {'data': open(bed_file)}
try:
sample = session.sample(uri)
except NotFoundError:
raise UserError('Sample does not exist: "%s"' % uri)
data_source = session.create_data_source(
'Regions from file "%s"' % bed_file,
filetype='bed',
gzipped=bed_file.endswith('.gz'),
**source)
log('Added data source: %s' % data_source.uri)
coverage = session.create_coverage(sample, data_source)
log('Started coverage import: %s' % coverage.uri)
if not wait:
return
wait_for_tasks(coverage.task)
log('Imported coverage: %s' % coverage.uri)
def list_groups(session):
"""
List groups.
"""
groups = session.groups()
for i, group in enumerate(groups):
if i:
print
print 'Group: %s' % group.uri
print 'Name: %s' % group.name
def show_group(session, uri):
"""
Show group details.
"""
try:
group = session.group(uri)
except NotFoundError:
raise UserError('Group does not exist: "%s"' % uri)
print 'Group: %s' % group.uri
print 'Name: %s' % group.name
def add_group(session, name):
"""
Add a sample group.
"""
group = session.create_group(name)
log('Added group: %s' % group.uri)
def list_users(session):
"""
List users.
"""
users = session.users()
for i, user in enumerate(users):
if i:
print
print 'User: %s' % user.uri
print 'Name: %s' % user.name
print 'Login: %s' % user.login
print 'Roles: %s' % ', '.join(sorted(user.roles))
def show_user(session, uri):
"""
Show user details.
"""
try:
user = session.user(uri)
except NotFoundError:
raise UserError('User does not exist: "%s"' % uri)
print 'User: %s' % user.uri
print 'Name: %s' % user.name
print 'Login: %s' % user.login
print 'Roles: %s' % ', '.join(sorted(user.roles))
def add_user(session, login, name=None, roles=None):
"""
Add an API user (queries for password).
"""
roles = roles or []
name = name or login
if not re.match('[a-zA-Z][a-zA-Z0-9._-]*$', login):
raise UserError('User login must match "[a-zA-Z][a-zA-Z0-9._-]*"')
password = getpass.getpass('Please provide a password for the new user: ')
password_control = getpass.getpass('Repeat: ')
if password != password_control:
raise UserError('Passwords did not match')
user = session.create_user(login, password, name=name, roles=roles)
log('Added user: %s' % user.uri)
def list_data_sources(session, user=None):
"""
List data sources.
"""
filters = {}
if user:
filters.update(user=user)
data_sources = session.data_sources(**filters)
for i, data_source in enumerate(data_sources):
if i:
print
print 'Data source: %s' % data_source.uri
print 'Name: %s' % data_source.name
print 'Filetype: %s' % data_source.filetype
def show_data_source(session, uri):
"""
Show data source details.
"""
try:
data_source = session.data_source(uri)
except NotFoundError:
raise UserError('Data source does not exist: "%s"' % uri)
print 'Data source: %s' % data_source.uri
print 'Name: %s' % data_source.name
print 'Filetype: %s' % data_source.filetype
print
print 'User: %s' % data_source.user.uri
print 'Name: %s' % data_source.user.name
def data_source_data(session, uri):
"""
Download data source and write data to standard output.
"""
try:
data_source = session.data_source(uri)
except NotFoundError:
raise UserError('Data source does not exist: "%s"' % uri)
for chunk in data_source.data:
sys.stdout.write(chunk)
def annotate_data_source(session, uri, queries=None, wait=False):
"""
Annotate data source with variant frequencies.
"""
queries = queries or {}
try:
data_source = session.data_source(uri)
except NotFoundError:
raise UserError('Data source does not exist: "%s"' % uri)
annotation = session.create_annotation(
data_source, queries=queries)
log('Started annotation: %s' % annotation.uri)
if not wait:
return
wait_for_tasks(annotation.task)
log('Annotated data source: %s' % annotation.annotated_data_source.uri)
def annotate_vcf(session, vcf_file, data_uploaded=False, queries=None,
wait=False):
"""
Annotate VCF file with variant frequencies.
"""
queries = queries or {}
# Todo: Nice error if file cannot be read.
if data_uploaded:
source = {'local_file': vcf_file}
else:
source = {'data': open(vcf_file)}
data_source = session.create_data_source(
'Variants from file "%s"' % vcf_file,
filetype='vcf',
gzipped=vcf_file.endswith('.gz'),
**source)
log('Added data source: %s' % data_source.uri)
annotation = session.create_annotation(
data_source, queries=queries)
log('Started annotation: %s' % annotation.uri)
if not wait:
return
wait_for_tasks(annotation.task)
log('Annotated VCF file: %s' % annotation.annotated_data_source.uri)
def annotate_bed(session, bed_file, data_uploaded=False, queries=None,
wait=False):
"""
Annotate BED file with variant frequencies.
"""
queries = queries or {}
# Todo: Nice error if file cannot be read.
if data_uploaded:
source = {'local_file': bed_file}
else:
source = {'data': open(bed_file)}
data_source = session.create_data_source(
'Regions from file "%s"' % bed_file,
filetype='bed',
gzipped=bed_file.endswith('.gz'),
**source)
log('Added data source: %s' % data_source.uri)
annotation = session.create_annotation(
data_source, queries=queries)
log('Started annotation: %s' % annotation.uri)
if not wait:
return
wait_for_tasks(annotation.task)
log('Annotated BED file: %s' % annotation.annotated_data_source.uri)
def create_config(filename=None):
"""
Create a Manwë configuration object.
Configuration values are initialized from the :mod:`manwe.default_config`
module.
By default, configuration values are then read from two locations, in this
order:
1. `SYSTEM_CONFIGURATION`
2. `USER_CONFIGURATION`
If both files exist, values defined in the second overwrite values defined
in the first.
An exception to this is when the optional `filename` argument is set. In
that case, the locations listed above are ignored and the configuration is
read from `filename`.
:arg filename: Optional filename to read configuration from. If present,
this overrides automatic detection of configuration file location.
:type filename: str
:return: Manwë configuration object.
:rtype: config.Config
"""
config = Config()
if filename:
config.from_pyfile(filename)
else:
if os.path.isfile(SYSTEM_CONFIGURATION):
config.from_pyfile(SYSTEM_CONFIGURATION)
if os.path.isfile(USER_CONFIGURATION):
config.from_pyfile(USER_CONFIGURATION)
return config
def main():
"""
Manwë command line interface.
"""
class UpdateAction(argparse.Action):
"""
Custom argparse action to store a pair of values as key and value in a
dictionary.
Example usage::
>>> p.add_argument(
... '-c', dest='flower_colors', nargs=2,
... metavar=('FLOWER', 'COLOR'), action=UpdateAction,
... help='set flower color (multiple allowed)')
"""
def __init__(self, *args, **kwargs):
if kwargs.get('nargs') != 2:
raise ValueError('nargs for update actions must be 2')
super(UpdateAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
key, value = values
d = getattr(namespace, self.dest) or {}
d[key] = value
setattr(namespace, self.dest, d)
config_parser = argparse.ArgumentParser(add_help=False)
config_parser.add_argument(
'-c', '--config', metavar='FILE', type=str, dest='config',
help='path to configuration file to use instead of looking in '
'default locations')
parser = argparse.ArgumentParser(
description=__doc__.split('\n\n')[0], parents=[config_parser])
subparsers = parser.add_subparsers(
title='subcommands', dest='subcommand', help='subcommand help')
# Subparsers for 'samples'.
s = subparsers.add_parser(
'samples', help='manage samples', description='Manage sample resources.'
).add_subparsers()
# Subparser 'samples list'.
p = s.add_parser(
'list', help='list samples',
description=list_samples.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=list_samples)
p.add_argument(
'-p', '--public', dest='public', action='store_true',
help='only public samples')
p.add_argument(
'-u', '--user', dest='user', metavar='URI',
help='filter samples by user')
p.add_argument(
'-g', '--group', dest='groups', metavar='URI', action='append',
help='filter samples by group (more than one allowed)')
# Subparser 'samples show'.
p = s.add_parser(
'show', help='show sample details',
description=show_sample.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=show_sample)
p.add_argument(
'uri', metavar='URI', type=str, help='sample')
# Subparser 'samples activate'.
p = s.add_parser(
'activate', help='activate sample',
description=activate_sample.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=activate_sample)
p.add_argument(
'uri', metavar='URI', type=str, help='sample')
# Subparser 'samples annotate-variations'.
p = s.add_parser(
'annotate-variations', help='annotate sample variations',
description=annotate_sample_variations.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=annotate_sample_variations)
p.add_argument(
'uri', metavar='URI', type=str, help='sample')
p.add_argument(
'-q', '--query', dest='queries', nargs=2, action=UpdateAction,
metavar=('NAME', 'EXPRESSION'), help='annotation query (more than '
'one allowed)')
p.add_argument(
'-w', '--wait', dest='wait', action='store_true',
help='wait for annotations to complete (blocking)')
# Subparser 'samples add'.
p = s.add_parser(
'add', help='add sample',
description=add_sample.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=add_sample)
p.add_argument(
'name', metavar='NAME', type=str, help='sample name')
p.add_argument(
'-g', '--group', dest='groups', metavar='URI', action='append',
help='sample group (more than one allowed)')
p.add_argument(
'-s', '--pool-size', dest='pool_size', default=1, type=int,
help='number of individuals in sample (default: 1)')
p.add_argument(
'-p', '--public', dest='public', action='store_true',
help='sample data is public')
p.add_argument(
'--no-coverage-profile', dest='no_coverage_profile', action='store_true',
help='sample has no coverage profile')
# Subparser 'samples import'.
p = s.add_parser(
'import', help='add sample and import data',
description=import_sample.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=import_sample)
p.add_argument(
'name', metavar='NAME', type=str, help='sample name')
p.add_argument(
'-g', '--group', dest='groups', metavar='URI', action='append',
help='sample group (more than one allowed)')
p.add_argument(
'--vcf', metavar='VCF_FILE', dest='vcf_files', action='append',
required=True, help='file in VCF 4.1 format to import variants from '
'(more than one allowed)')
p.add_argument(
'--bed', metavar='BED_FILE', dest='bed_files', action='append',
help='file in BED format to import covered regions from (more than '
'one allowed)')
p.add_argument(
'-u', '--data-uploaded', dest='data_uploaded', action='store_true',
help='data files are already uploaded to the server')
p.add_argument(
'-s', '--pool-size', dest='pool_size', default=1, type=int,
help='number of individuals in sample (default: 1)')
p.add_argument(
'-p', '--public', dest='public', action='store_true',
help='sample data is public')
# Note: We prefer to explicitely include the --no-coverage-profile instead
# of concluding it from an empty list of BED files. This prevents
# accidentally forgetting the coverage profile.
p.add_argument(
'--no-coverage-profile', dest='no_coverage_profile', action='store_true',
help='sample has no coverage profile')
p.add_argument(
'-l', '--prefer_genotype_likelihoods', dest='prefer_genotype_likelihoods',
action='store_true', help='in VCF files, derive genotypes from '
'likelihood scores instead of using reported genotypes (use this if '
'the file was produced by samtools)')
p.add_argument(
'-w', '--wait', dest='wait', action='store_true',
help='wait for imports to complete (blocking)')
# Subparser 'samples import-vcf'.
p = s.add_parser(
'import-vcf', help='import VCF file for existing sample',
description=import_variation.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=import_variation)
p.add_argument(
'uri', metavar='URI', type=str, help='sample')
p.add_argument(
'vcf_file', metavar='FILE',
help='file in VCF 4.1 format to import variants from')
p.add_argument(
'-u', '--data-uploaded', dest='data_uploaded', action='store_true',
help='data files are already uploaded to the server')
p.add_argument(
'-l', '--prefer_genotype_likelihoods', dest='prefer_genotype_likelihoods',
action='store_true', help='in VCF files, derive genotypes from '
'likelihood scores instead of using reported genotypes (use this if '
'the file was produced by samtools)')
p.add_argument(
'-w', '--wait', dest='wait', action='store_true',
help='wait for import to complete (blocking)')
# Subparser 'samples import-bed'.
p = s.add_parser(
'import-bed', help='import BED file for existing sample',
description=import_coverage.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=import_coverage)
p.add_argument(
'uri', metavar='URI', type=str, help='sample')
p.add_argument(
'bed_file', metavar='FILE',
help='file in BED format to import covered regions from')
p.add_argument(
'-u', '--data-uploaded', dest='data_uploaded', action='store_true',
help='data files are already uploaded to the server')
p.add_argument(
'-w', '--wait', dest='wait', action='store_true',
help='wait for import to complete (blocking)')
# Subparsers for 'groups'.
s = subparsers.add_parser(
'groups', help='manage groups', description='Manage group resources.'
).add_subparsers()
# Subparser 'groups list'.
p = s.add_parser(
'list', help='list groups',
description=list_groups.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=list_groups)
# Subparser 'groups show'.
p = s.add_parser(
'show', help='show group details',
description=show_group.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=show_group)
p.add_argument(
'uri', metavar='URI', type=str, help='group')
# Subparser 'groups add'.
p = s.add_parser(
'add', help='add new sample group',
description=add_group.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=add_group)
p.add_argument(
'name', metavar='NAME', type=str, help='group name')
# Subparsers for 'users'.
s = subparsers.add_parser(
'users', help='manage users', description='Manage user resources.'
).add_subparsers()
# Subparser 'users list'.
p = s.add_parser(
'list', help='list users',
description=list_users.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=list_users)
# Subparser 'users show'.
p = s.add_parser(
'show', help='show user details',
description=show_user.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=show_user)
p.add_argument('uri', metavar='URI', type=str, help='user')
# Subparser 'users add'.
p = s.add_parser(
'add', help='add new API user',
description=add_user.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=add_user)
p.add_argument(
'login', metavar='LOGIN', type=str, help='user login')
p.add_argument(
'-n', '--name', metavar='NAME', dest='name', type=str,
help='user name (default: LOGIN)')
for role in USER_ROLES:
p.add_argument(
'--%s' % role, dest='roles', action='append_const', const=role,
help='user has %s role' % role)
# Subparsers for 'data-sources'.
s = subparsers.add_parser(
'data-sources', help='manage data sources',
description='Manage data source resources.'
).add_subparsers()
# Subparser 'data-sources list'.
p = s.add_parser(
'list', help='list data sources',
description=list_data_sources.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=list_data_sources)
p.add_argument(
'-u', '--user', dest='user', metavar='URI',
help='filter data sources by user')
# Subparser 'data-sources show'.
p = s.add_parser(
'show', help='show data source details',
description=show_data_source.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=show_data_source)
p.add_argument(
'uri', metavar='URI', type=str, help='data source')
# Subparser 'data-sources download'.
p = s.add_parser(
'download', help='download data source',
description=data_source_data.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=data_source_data)
p.add_argument(
'uri', metavar='URI', type=str, help='data source')
# Subparser 'data-sources annotate'.
p = s.add_parser(
'annotate', help='annotate data source',
description=annotate_data_source.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=annotate_data_source)
p.add_argument(
'uri', metavar='URI', type=str, help='data source')
p.add_argument(
'-q', '--query', dest='queries', nargs=2, action=UpdateAction,
metavar=('NAME', 'EXPRESSION'), help='annotation query (more than '
'one allowed)')
p.add_argument(
'-w', '--wait', dest='wait', action='store_true',
help='wait for annotation to complete (blocking)')
# Subparser 'annotate-vcf'.
p = subparsers.add_parser(
'annotate-vcf', help='annotate VCF file',
description=annotate_vcf.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=annotate_vcf)
p.add_argument(
'vcf_file', metavar='FILE', help='file in VCF 4.1 format to annotate')
p.add_argument(
'-u', '--data-uploaded', dest='data_uploaded', action='store_true',
help='data files are already uploaded to the server')
p.add_argument(
'-q', '--query', dest='queries', nargs=2, action=UpdateAction,
metavar=('NAME', 'EXPRESSION'), help='annotation query (more than '
'one allowed)')
# TODO:
# - Perhaps --no-wait is better (i.e., wait by default)?
# - If we are waiting we might as well also download the result.
p.add_argument(
'-w', '--wait', dest='wait', action='store_true',
help='wait for annotation to complete (blocking)')
# Subparser 'annotate-bed'.
p = subparsers.add_parser(
'annotate-bed', help='annotate BED file',
description=annotate_bed.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=annotate_bed)
p.add_argument(
'bed_file', metavar='FILE', help='file in BED format to annotate')
p.add_argument(
'-u', '--data-uploaded', dest='data_uploaded', action='store_true',
help='data files are already uploaded to the server')
p.add_argument(
'-q', '--query', dest='queries', nargs=2, action=UpdateAction,
metavar=('NAME', 'EXPRESSION'), help='annotation query (more than '
'one allowed)')
p.add_argument(
'-w', '--wait', dest='wait', action='store_true',
help='wait for annotation to complete (blocking)')
args = parser.parse_args()
try:
session = Session(config=create_config(args.config))
args.func(session=session,
**{k: v for k, v in vars(args).items()
if k not in ('config', 'func', 'subcommand')})
except UserError as e:
abort(e)
except UnauthorizedError:
abort('Authentication is needed, please make sure you have the '
'correct authentication token defined in "%s"'
% (args.config or USER_CONFIGURATION))
except ForbiddenError:
abort('Sorry, you do not have permission')
except BadRequestError as (code, message):
abort(message)
except ApiError as (code, message):
abort(message)
if __name__ == '__main__':
main()
| mit | 7,411,989,852,363,706,000 | 31.604211 | 82 | 0.598696 | false | 3.758068 | true | false | false |
gfarnadi/FairPSL | debug/compare_fair_map/run_fpsl_pulp.py | 1 | 1338 | #!/usr/bin/env python
import os, sys
SCRIPTDIR = os.path.dirname(__file__)
ENGINDIR = os.path.join(SCRIPTDIR, '..', '..', 'engines')
sys.path.append(os.path.abspath(ENGINDIR))
from fpsl_pulp import fair_map_inference
PROBLEMDIR = os.path.join(SCRIPTDIR, '..', '..', 'problems', 'paper_review')
sys.path.append(os.path.abspath(PROBLEMDIR))
from grounding import ground
from os.path import join as ojoin
if __name__ == '__main__':
data_path = ojoin(PROBLEMDIR, 'data', '1')
rules, hard_rules, counts, atoms = ground(data_path)
results = fair_map_inference(rules, hard_rules, counts, 0.1, 'RC', solver='gurobi')
out_path = ojoin('output', 'fpsl_pulp')
reviews = atoms['review']
with open(ojoin(out_path, 'POSITIVEREVIEW.txt'), 'w') as f:
for (review, paper), (vid, _) in reviews.items():
print("'%s'\t'%s'\t%f"%(review, paper, results[vid]), file=f)
acceptable = atoms['acceptable']
with open(ojoin(out_path, 'ACCEPTABLE.txt'), 'w') as f:
for paper, (vid, _) in acceptable.items():
print("'%s'\t%f"%(paper, results[vid]), file=f)
presents = atoms['presents']
with open(ojoin(out_path, 'PRESENTS.txt'), 'w') as f:
for author, (vid, _) in presents.items():
print("'%s'\t%f"%(author, results[vid]), file=f)
| mit | -2,399,746,609,562,865,700 | 36.166667 | 87 | 0.603886 | false | 3.090069 | false | false | false |
tensorflow/tpu | models/official/mobilenet/supervised_images.py | 1 | 11789 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the input_fn for training or evaluation.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import tensorflow.compat.v1 as tf
import inception_preprocessing
import vgg_preprocessing
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'num_parallel_calls', 64,
'Number of elements to process in parallel (by mapper)')
flags.DEFINE_integer(
'shuffle_buffer_size', 1000,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done after prefetching is done. '
'Set to 0 to disable')
flags.DEFINE_bool(
'use_sloppy_interleave',
default=False,
help='Use sloppy interleave or not. Default set to False.')
flags.DEFINE_integer(
'cycle_length',
default=16,
help='The number of files to read concurrently by interleave function.')
flags.DEFINE_string(
'data_source',
'real',
help='Data source to be real or fake. Fake data uses randomly generated '
'numbers.')
flags.DEFINE_bool(
'preprocessed', False, help='Is the data preprocessed to 224x224 images?')
flags.DEFINE_integer(
'width', 224, 'Width of input image')
flags.DEFINE_integer(
'height', 224, 'Height of input image')
flags.DEFINE_integer(
'num_channel', 3, 'Number of channgles')
flags.DEFINE_bool(
'use_annotated_bbox', False,
'If true, use annotated bounding box as input to cropping function, '
'else use full image size')
flags.DEFINE_string(
'preprocessing', None,
'Preprocessing stage to use: one of inception or vgg')
flags.DEFINE_integer(
'prefetch_size',
default=None,
help='The number of elements buffered by prefetch function. Default is the '
'batch size. Any positive integer sets the buffer size at such a value.'
'Any other value disables prefetch.')
flags.DEFINE_integer(
'dataset_reader_buffer_size',
default=256 * 1024 * 1024,
help='The number of bytes in read buffer. A value of zero means no '
'buffering.')
flags.DEFINE_integer(
'followup_shuffle_buffer_size', 1000,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done after prefetching is done. '
'Set to 0 to disable')
flags.DEFINE_integer(
'element_shuffle_buffer_size',
default=1024,
help='The number of training samples in the shuffle buffer. A value of zero'
' disables input-sample shuffling.')
flags.DEFINE_integer(
'prefetch_dataset_buffer_size', 8*1024*1024,
'Number of bytes in read buffer. 0 means no buffering.')
flags.DEFINE_integer(
'num_files_infeed', 8,
'Number of training files to read in parallel.')
flags.DEFINE_float(
'image_minval', -1.0, 'Min value.')
flags.DEFINE_float(
'image_maxval', 1.0, 'Max value.')
# Random cropping constants
_RESIZE_SIDE_MIN = 256
_RESIZE_SIDE_MAX = 512
def preprocess_raw_bytes(image_bytes, is_training=False, bbox=None):
"""Preprocesses a raw JPEG image.
This implementation is shared in common between train/eval pipelines,
and when serving the model.
Args:
image_bytes: A string Tensor, containing the encoded JPEG.
is_training: Whether or not to preprocess for training.
bbox: In inception preprocessing, this bbox can be used for cropping.
Returns:
A 3-Tensor [height, width, RGB channels] of type float32.
"""
image = tf.image.decode_jpeg(image_bytes, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if FLAGS.preprocessing == 'vgg':
image = vgg_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=is_training,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX)
elif FLAGS.preprocessing == 'inception':
image = inception_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=is_training,
bbox=bbox)
else:
assert False, 'Unknown preprocessing type: %s' % FLAGS.preprocessing
return image
def tensor_transform_fn(data, perm):
"""Transpose function.
This function is used to transpose an image tensor on the host and then
perform an inverse transpose on the TPU. The transpose on the TPU gets
effectively elided thus voiding any associated computational cost.
NOTE: Eventually the compiler will be able to detect when this kind of
operation may prove beneficial and perform these types of transformations
implicitly, voiding the need for user intervention
Args:
data: Tensor to be transposed
perm: Permutation of the dimensions of a
Returns:
Transposed tensor
"""
if FLAGS.transpose_enabled:
return tf.transpose(data, perm)
return data
class InputPipeline(object):
"""Provides TFEstimator input function for imagenet, with preprocessing."""
def __init__(self, is_training, data_dir):
self.is_training = is_training
self.data_dir = data_dir
def dataset_parser(self, serialized_proto):
"""Parse an Imagenet record from value."""
if FLAGS.preprocessed:
keys_to_features = {
'image': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64),
}
features = tf.parse_single_example(serialized_proto, keys_to_features)
image = tf.decode_raw(features['image'], tf.float32)
image.set_shape([FLAGS.height * FLAGS.width * FLAGS.num_channel])
label = tf.cast(features['label'], tf.int32)
else:
keys_to_features = {
'image/encoded':
tf.io.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.io.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/class/label':
tf.io.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'image/class/text':
tf.io.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/object/bbox/xmin':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/class/label':
tf.io.VarLenFeature(dtype=tf.int64),
}
features = tf.parse_single_example(serialized_proto, keys_to_features)
image = tf.image.decode_jpeg(features['image/encoded'],
channels=FLAGS.num_channel)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
label = tf.cast(tf.reshape(
features['image/class/label'], shape=[]), dtype=tf.int32)
bbox = None
if FLAGS.use_annotated_bbox:
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
if FLAGS.preprocessing == 'vgg':
image = vgg_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=self.is_training,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX)
elif FLAGS.preprocessing == 'inception':
image = inception_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=self.is_training,
bbox=bbox)
else:
image = tf.image.resize_images(image, size=[FLAGS.height, FLAGS.width])
image = (tf.cast(image, tf.float32) * (1. / 255)) - 0.5
return image, label
def input_fn(self, params):
"""Input function which provides a single batch for train or eval.
Args:
params: `dict` of parameters passed from the `TPUEstimator`.
`params['batch_size']` is always provided and should be used as the
effective batch size.
Raises:
RuntimeError: If the data source has the incorrect value.
Returns:
A (images, labels) tuple of `Tensor`s for a batch of samples.
"""
batch_size = params['batch_size']
if FLAGS.data_source == 'real':
# Actual imagenet data
datadir = 'train-*' if self.is_training else 'validation-*'
file_pattern = os.path.join(self.data_dir, datadir)
dataset = tf.data.Dataset.list_files(file_pattern,
shuffle=self.is_training)
if self.is_training:
dataset = dataset.repeat()
def prefetch_dataset(filename):
dataset = tf.data.TFRecordDataset(
filename, buffer_size=FLAGS.prefetch_dataset_buffer_size)
if FLAGS.prefetch_size is None:
dataset = dataset.prefetch(batch_size)
else:
if FLAGS.prefetch_size > 0:
dataset = dataset.prefetch(FLAGS.prefetch_size)
return dataset
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
prefetch_dataset,
cycle_length=FLAGS.num_files_infeed,
sloppy=True))
if FLAGS.followup_shuffle_buffer_size > 0:
dataset = dataset.shuffle(
buffer_size=FLAGS.followup_shuffle_buffer_size)
dataset = dataset.map(
self.dataset_parser,
num_parallel_calls=FLAGS.num_parallel_calls)
dataset = dataset.prefetch(batch_size)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
images, labels = dataset.make_one_shot_iterator().get_next()
images = tf.reshape(images, [batch_size, FLAGS.height, FLAGS.width,
FLAGS.num_channel])
labels = tf.reshape(labels, [batch_size])
elif FLAGS.data_source == 'fake':
images = tf.random_uniform(
shape=[batch_size, FLAGS.height, FLAGS.width, FLAGS.num_channel],
minval=FLAGS.image_minval,
maxval=FLAGS.image_maxval,
dtype=tf.float32)
labels = tf.random_uniform(
[batch_size], minval=0, maxval=999, dtype=tf.int32)
else:
raise RuntimeError('Data source {} not supported. Use real/fake'.format(
FLAGS.data_source))
if FLAGS.transpose_enabled:
images = tensor_transform_fn(images, params['output_perm'])
return images, labels
| apache-2.0 | -7,167,556,893,884,225,000 | 33.775811 | 80 | 0.656799 | false | 3.828841 | false | false | false |
spennihana/h2o-3 | h2o-py/tests/testdir_misc/pyunit_factoring.py | 5 | 2259 | from __future__ import print_function
from builtins import zip
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o import H2OFrame
from h2o.exceptions import H2OTypeError, H2OValueError
def compare_frames(expected, actual):
assert actual.shape == expected.shape
assert actual.columns == expected.columns, "Columns differ: %r vs %r" % (actual.columns, colnames)
for i in range(len(actual.columns)):
colname = actual.columns[i]
t1 = expected.types[colname]
t2 = actual.types[colname]
assert t1 == t2, ("Bad types %s: expected %s, got %s" %(colname, t1, t2))
col1 = expected[colname]
s1 = str(h2o.as_list(col1))
col2 = actual[colname]
s2 = str(h2o.as_list(col2))
assert s1 == s2, ("bad values: expected[%d] = %r, actual[%d] = %r"
% (i, s1, i, s2))
def test1():
badFrame = H2OFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"], "three": [0, 5.2, 14]})
badClone = H2OFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"], "three": [0, 5.2, 14]})
compare_frames(badFrame, badClone)
try:
badFrame.asfactor()
assert False, "The frame contaied a real number, an error should be thrown"
except H2OValueError: # as designed
pass
compare_frames(badFrame, badClone)
originalAfterOp = H2OFrame.get_frame(badFrame.frame_id)
compare_frames(badFrame, originalAfterOp)
goodFrame = H2OFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"]})
goodClone = H2OFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"]})
compare_frames(goodFrame, goodClone)
factoredFrame = goodFrame.asfactor()
originalAfterOp = H2OFrame.get_frame(goodFrame.frame_id)
compare_frames(goodFrame, originalAfterOp)
expectedFactoredFrame = H2OFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"]}, column_types={"one":"categorical", "two": "enum"})
compare_frames(expectedFactoredFrame, factoredFrame)
refactoredFrame = expectedFactoredFrame.asfactor()
factoredAfterOp = H2OFrame.get_frame(refactoredFrame.frame_id)
compare_frames(expectedFactoredFrame, factoredAfterOp)
if __name__ == "__main__":
pyunit_utils.standalone_test(test1)
else:
test1()
| apache-2.0 | -4,928,311,135,687,874,000 | 35.435484 | 133 | 0.628597 | false | 3.090287 | false | false | false |
Rhombik/rhombik-object-repository | importer/views.py | 1 | 1798 | from django.http import HttpResponseRedirect#, HttpResponse
from importer.forms import ImportForm
from importer.tasks import ThingiProjectTask,ThingiUserTask
from django.shortcuts import get_object_or_404, render_to_response
from multiuploader.views import draftview
import re
''' nifty parse function performs all the identification that can be done on a url without making any get requests. It returns a good value, and if True, a task to call, or False and an error message '''
def parse(url):
if re.search('thingiverse\.com',url):
#gee wiz, it's from thiniverse!
if re.search('thing:\d\d+',url):#it's a thing/project page
return(ThingiProjectTask)
else:
return(ThingiUserTask)#it's probably a user page. or it's another page, but we aren't checking that here.
else:
return(None)
def importer(request):
###Write a scraper dispatcher here.
if request.method == 'POST':
form = ImportForm(request.POST.copy())
if form.is_valid() and request.user.is_authenticated():
userPK=request.user.pk
url=form.cleaned_data['url']
task=parse(url)#get the kind of task to execute for a given url!
print("importer attempting to import from : {}".format(url))
print("for user : {}".format(userPK))
if task:
print("importing from {}".format(task.__name__))
task.delay(url=url,userPK=userPK)#delay(url=url, userPK=userPK)
else:
# neeto unknown site error! these should prolly get logged.
pass
##else we need to be giving them shiny errors as to why it isn't valid.
return draftview(request, scraperMessage=True)
#return HttpResponseRedirect('/mydrafts/', c)
| agpl-3.0 | -7,661,264,724,412,769,000 | 42.853659 | 203 | 0.656841 | false | 4.077098 | false | false | false |
255BITS/HyperGAN | examples/next-frame-wip.py | 1 | 22324 | import os
import uuid
import random
import tensorflow as tf
import hypergan as hg
import hyperchamber as hc
import numpy as np
import glob
import time
import re
from hypergan.viewer import GlobalViewer
from hypergan.samplers.base_sampler import BaseSampler
from hypergan.gan_component import ValidationException, GANComponent
from hypergan.samplers.random_walk_sampler import RandomWalkSampler
from hypergan.samplers.debug_sampler import DebugSampler
from hypergan.search.alphagan_random_search import AlphaGANRandomSearch
from hypergan.gans.base_gan import BaseGAN
from common import *
import copy
from hypergan.gans.alpha_gan import AlphaGAN
from hypergan.gan_component import ValidationException, GANComponent
from hypergan.gans.base_gan import BaseGAN
from hypergan.discriminators.fully_connected_discriminator import FullyConnectedDiscriminator
from hypergan.encoders.uniform_encoder import UniformEncoder
from hypergan.trainers.multi_step_trainer import MultiStepTrainer
from hypergan.trainers.multi_trainer_trainer import MultiTrainerTrainer
from hypergan.trainers.consensus_trainer import ConsensusTrainer
arg_parser = ArgumentParser("render next frame")
parser = arg_parser.add_image_arguments()
parser.add_argument('--frames', type=int, default=4, help='Number of frames to embed.')
parser.add_argument('--shuffle', type=bool, default=False, help='Randomize inputs.')
args = arg_parser.parse_args()
width, height, channels = parse_size(args.size)
config = lookup_config(args)
if args.action == 'search':
random_config = AlphaGANRandomSearch({}).random_config()
if args.config_list is not None:
config = random_config_from_list(args.config_list)
config["generator"]=random_config["generator"]
config["g_encoder"]=random_config["g_encoder"]
config["discriminator"]=random_config["discriminator"]
config["z_discriminator"]=random_config["z_discriminator"]
# TODO Other search terms?
else:
config = random_config
def tryint(s):
try:
return int(s)
except ValueError:
return s
def alphanum_key(s):
return [tryint(c) for c in re.split('([0-9]+)', s)]
class VideoFrameLoader:
"""
"""
def __init__(self, batch_size, frame_count, shuffle):
self.batch_size = batch_size
self.frame_count = frame_count
self.shuffle = shuffle
def inputs(self):
return self.frames
def create(self, directory, channels=3, format='jpg', width=64, height=64, crop=False, resize=False):
directories = glob.glob(directory+"/*")
directories = [d for d in directories if os.path.isdir(d)]
if(len(directories) == 0):
directories = [directory]
# Create a queue that produces the filenames to read.
if(len(directories) == 1):
# No subdirectories, use all the images in the passed in path
filenames = glob.glob(directory+"/*."+format)
else:
filenames = glob.glob(directory+"/**/*."+format)
if(len(filenames) < self.frame_count):
print("Error: Not enough frames in data folder ", directory)
self.file_count = len(filenames)
filenames = sorted(filenames, key=alphanum_key)
if self.file_count == 0:
raise ValidationException("No images found in '" + directory + "'")
# creates arrays of filenames[:end], filenames[1:end-1], etc for serialized random batching
if self.shuffle:
frames = [tf.train.slice_input_producer([filenames], shuffle=True)[0] for i in range(self.frame_count)]
else:
input_t = [filenames[i:i-self.frame_count] for i in range(self.frame_count)]
input_queue = tf.train.slice_input_producer(input_t, shuffle=True)
frames = input_queue
# Read examples from files in the filename queue.
frames = [self.read_frame(frame, format, crop, resize) for frame in frames]
frames = self._get_data(frames)
self.frames = frames
x = tf.train.slice_input_producer([filenames], shuffle=True)[0]
y = tf.train.slice_input_producer([filenames], shuffle=True)[0]
self.x = self.read_frame(x, format, crop, resize)
self.y = self.read_frame(y, format, crop, resize)
self.x = self._get_data([self.x])
self.y = self._get_data([self.y])
def read_frame(self, t, format, crop, resize):
value = tf.read_file(t)
if format == 'jpg':
img = tf.image.decode_jpeg(value, channels=channels)
elif format == 'png':
img = tf.image.decode_png(value, channels=channels)
else:
print("[loader] Failed to load format", format)
img = tf.cast(img, tf.float32)
# Image processing for evaluation.
# Crop the central [height, width] of the image.
if crop:
resized_image = hypergan.inputs.resize_image_patch.resize_image_with_crop_or_pad(img, height, width, dynamic_shape=True)
elif resize:
resized_image = tf.image.resize_images(img, [height, width], 1)
else:
resized_image = img
tf.Tensor.set_shape(resized_image, [height,width,channels])
# This moves the image to a range of -1 to 1.
float_image = resized_image / 127.5 - 1.
return float_image
def _get_data(self, imgs):
batch_size = self.batch_size
num_preprocess_threads = 24
return tf.train.shuffle_batch(
imgs,
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity= batch_size*2, min_after_dequeue=batch_size)
inputs = VideoFrameLoader(args.batch_size, args.frames, args.shuffle)
inputs.create(args.directory,
channels=channels,
format=args.format,
crop=args.crop,
width=width,
height=height,
resize=True)
save_file = "save/model.ckpt"
class AliNextFrameGAN(BaseGAN):
"""
"""
def __init__(self, *args, **kwargs):
BaseGAN.__init__(self, *args, **kwargs)
def create(self):
config = self.config
ops = self.ops
self.g_vars = []
d_vars = []
with tf.device(self.device):
def random_t(shape):
shape[-1] //= len(config.z_distribution.projections)
return UniformEncoder(self, config.z_distribution, output_shape=shape).sample
def random_like(x):
shape = self.ops.shape(x)
return random_t(shape)
self.frame_count = len(self.inputs.frames)
self.frames = self.inputs.frames
dist = UniformEncoder(self, config.z_distribution)
dist2 = UniformEncoder(self, config.z_distribution)
dist3 = UniformEncoder(self, config.z_distribution)
dist4 = UniformEncoder(self, config.z_distribution)
dist5 = UniformEncoder(self, config.z_distribution)
uz = self.create_component(config.uz, name='u_to_z', input=dist.sample)
uc = self.create_component(config.uc, name='u_to_c', input=dist2.sample)
uz2 = self.create_component(config.uz, name='u_to_z', input=dist3.sample, reuse=True)
uc2 = self.create_component(config.uc, name='u_to_c', input=dist4.sample, reuse=True)
uc3 = self.create_component(config.uc, name='u_to_c', input=dist5.sample, reuse=True)
self.g_vars += uz.variables()
self.g_vars += uc.variables()
def ec(zt, cp,reuse=True):
if config.noise:
randt = random_like(cp)
if config.proxy:
dist3 = UniformEncoder(self, config.z_distribution)
proxy_c = self.create_component(config.proxy_c, name='rand_ct', input=dist3.sample, reuse=reuse)
randt = proxy_c.sample
print("CC", zt, randt)
c = self.create_component(config.ec, name='ec', input=zt, features={'ct-1':cp, 'n':randt}, reuse=reuse)
else:
c = self.create_component(config.ec, name='ec', input=zt, features=[cp], reuse=reuse)
if not reuse:
if config.proxy:
self.g_vars += proxy_c.variables()
self.g_vars += c.variables()
return c.sample
def ez(ft, zp,reuse=True):
z = self.create_component(config.ez, name='ez', input=ft, features=[zp], reuse=reuse)
if not reuse:
self.g_vars += z.variables()
return z.sample
def build_g(zt, ct, reuse=True):
print("Gb", reuse)
g = self.create_component(config.generator, name='generator', input=ct, features=[zt], reuse=reuse)
if not reuse:
self.g_vars += g.variables()
return g.sample
def encode_frames(fs, c0, z0, reuse=True):
cs = [c0]
zs = [z0]
x_hats = [build_g(zs[-1], cs[-1], reuse=reuse)]
for i in range(len(fs)):
print("encode frames", i)
_reuse = reuse or (i!=0)
z = ez(fs[i], zs[-1], reuse=_reuse)
c = ec(z, cs[-1], reuse=_reuse)
x_hat = build_g(z, c, reuse=True)
zs.append(z)
cs.append(c)
x_hats.append(x_hat)
return cs, zs, x_hats
def build_sim(z0, c0, steps, reuse=True):
zs = [z0]
cs = [c0]
gs = [build_g(zs[-1], cs[-1], reuse=reuse)]
for i in range(steps):
_reuse = reuse or (i!=0)
z = ez(gs[-1], zs[-1], reuse=_reuse)
c = ec(z, cs[-1], reuse=_reuse)
g = build_g(z, c, reuse=True)
zs.append(z)
cs.append(c)
gs.append(g)
return gs, cs, zs
#self.frames = [f+tf.random_uniform(self.ops.shape(f), minval=-0.1, maxval=0.1) for f in self.frames ]
cs, zs, x_hats = encode_frames(self.frames, uc2.sample, uz2.sample, reuse=False)
self.zs = zs
self.cs = cs
ugs, ucs, uzs = build_sim(uz.sample, uc.sample, len(self.frames))
ugs_next, ucs_next, uzs_next = build_sim(uzs[-1], ucs[-1], len(self.frames))
re_ucs_next, re_uzs_next, re_ugs_next = encode_frames(ugs_next[1:], ucs_next[0], uzs_next[0])
gs_next, cs_next, zs_next = build_sim(zs[-1], cs[-1], len(self.frames))
#gs_next_next, cs_next_next, zs_next_next = build_sim(zs[-1], cs[-1], 21)
re_ucs, re_uzs, ugs_hat = encode_frames(ugs[1:], ucs[0], uzs[0])
re_cs_next, re_zs_next, re_gs_next = encode_frames(gs_next[1:], cs_next[0], zs_next[0])
self.x_hats = x_hats
t0 = tf.concat(zs[1:], axis=3)
t1 = tf.concat(re_uzs[:-1], axis=3)
t2 = tf.concat(re_zs_next[:-1], axis=3)
t3 = tf.concat(re_uzs_next[:-1], axis=3)
t4 = tf.concat(re_uzs[:-1], axis=3)
f0 = tf.concat(cs[1:], axis=3)
f1 = tf.concat(re_ucs[:-1], axis=3)
f2 = tf.concat(re_cs_next[:-1], axis=3)
f3 = tf.concat(re_ucs_next[:-1], axis=3)
stack = [t0,t1, t2]#, t4, t5]
stacked = ops.concat(stack, axis=0)
features =ops.concat([f0,f1,f2], axis=0)
d = self.create_component(config.z_discriminator, name='d_img', input=stacked, features=[features])
d_vars += d.variables()
l = self.create_loss(config.loss, d, None, None, len(stack))
d_loss = l.d_loss
g_loss = l.g_loss
self.video_generator_last_z = uzs[0]
self.video_generator_last_c = ucs[0]
self.gs_next = gs_next
ztn = uzs[1]
ctn = ucs[1]
self.video_generator_last_zn = ztn
self.video_generator_last_cn = ctn
gen = hc.Config({"sample":ugs[0]})
if config.use_x:
def rotate(first, second, offset=None):
rotations = [tf.concat(first[:offset], axis=3)]
elem = first
for e in second:
elem = elem[1:]+[e]
rotations.append(tf.concat(elem[:offset], axis=3))
return rotations
t0 = tf.concat(self.frames[1:], axis=3)
f0 = tf.concat(cs[1:-1], axis=3)
stack = [t0]
features = [f0]
if config.encode_forward:
stack += rotate(self.frames[2:]+[gs_next[0]], gs_next[1:])
features += rotate(cs[2:], cs_next[1:])
#stack += [gs_next_next[-frames:]]
if config.encode_ug:
stack += rotate(ugs[:-2], ugs[-2:]+ugs_next[1:])
features += rotate(ucs[:-2], ucs[-2:]+ucs_next[1:])
stacked = ops.concat(stack, axis=0)
features = tf.concat(features, axis=0)
d = self.create_component(config.discriminator, name='d_manifold', input=stacked, features=[features])
d_vars += d.variables()
l = self.create_loss(config.loss, d, None, None, len(stack))
d_loss += l.d_loss
g_loss += l.g_loss
gx_sample = gen.sample
gy_sample = gen.sample
gx = hc.Config({"sample":gx_sample})
gy = hc.Config({"sample":gy_sample})
last_frame = tf.slice(gy_sample, [0,0,0,0], [-1, -1, -1, 3])
self.y = hc.Config({"sample":last_frame})
self.gy = self.y
self.gx = self.y
self.uniform_sample = gen.sample
self.preview = tf.concat(self.inputs.frames[:-1] + [gen.sample], axis=1)#tf.concat(tf.split(gen.sample, (self.ops.shape(gen.sample)[3]//3), 3), axis=1)
metrics = {
'g_loss': g_loss,
'd_loss': d_loss
}
trainers = []
lossa = hc.Config({'sample': [d_loss, g_loss], 'metrics': metrics, 'd_fake': l.d_fake, 'd_real': l.d_real, 'config': l.config})
self.loss = lossa
self._g_vars = self.g_vars
self._d_vars = d_vars
trainer = self.create_component(config.trainer, loss = lossa, g_vars = self.g_vars, d_vars = d_vars)
self.session.run(tf.global_variables_initializer())
self.trainer = trainer
self.generator = gx
self.z_hat = gy.sample
self.x_input = self.inputs.frames[0]
self.uga = self.y.sample
self.uniform_encoder = dist
def g_vars(self):
return self._g_vars
def d_vars(self):
return self._d_vars
def fitness_inputs(self):
return self.inputs.frames
def create_loss(self, loss_config, discriminator, x, generator, split):
loss = self.create_component(loss_config, discriminator = discriminator, x=x, generator=generator, split=split)
return loss
def create_encoder(self, x_input, name='input_encoder', reuse=False):
config = self.config
input_encoder = dict(config.input_encoder or config.g_encoder or config.generator)
encoder = self.create_component(input_encoder, name=name, input=x_input, reuse=reuse)
return encoder
def create_z_discriminator(self, z, z_hat):
config = self.config
z_discriminator = dict(config.z_discriminator or config.discriminator)
z_discriminator['layer_filter']=None
net = tf.concat(axis=0, values=[z, z_hat])
encoder_discriminator = self.create_component(z_discriminator, name='z_discriminator', input=net)
return encoder_discriminator
def create_cycloss(self, x_input, x_hat):
config = self.config
ops = self.ops
distance = config.distance or ops.lookup('l1_distance')
pe_layers = self.gan.skip_connections.get_array("progressive_enhancement")
cycloss_lambda = config.cycloss_lambda
if cycloss_lambda is None:
cycloss_lambda = 10
if(len(pe_layers) > 0):
mask = self.progressive_growing_mask(len(pe_layers)//2+1)
cycloss = tf.reduce_mean(distance(mask*x_input,mask*x_hat))
cycloss *= mask
else:
cycloss = tf.reduce_mean(distance(x_input, x_hat))
cycloss *= cycloss_lambda
return cycloss
def create_z_cycloss(self, z, x_hat, encoder, generator):
config = self.config
ops = self.ops
total = None
distance = config.distance or ops.lookup('l1_distance')
if config.z_hat_lambda:
z_hat_cycloss_lambda = config.z_hat_cycloss_lambda
recode_z_hat = encoder.reuse(x_hat)
z_hat_cycloss = tf.reduce_mean(distance(z_hat,recode_z_hat))
z_hat_cycloss *= z_hat_cycloss_lambda
if config.z_cycloss_lambda:
recode_z = encoder.reuse(generator.reuse(z))
z_cycloss = tf.reduce_mean(distance(z,recode_z))
z_cycloss_lambda = config.z_cycloss_lambda
if z_cycloss_lambda is None:
z_cycloss_lambda = 0
z_cycloss *= z_cycloss_lambda
if config.z_hat_lambda and config.z_cycloss_lambda:
total = z_cycloss + z_hat_cycloss
elif config.z_cycloss_lambda:
total = z_cycloss
elif config.z_hat_lambda:
total = z_hat_cycloss
return total
def input_nodes(self):
"used in hypergan build"
if hasattr(self.generator, 'mask_generator'):
extras = [self.mask_generator.sample]
else:
extras = []
return extras + [
self.x_input
]
def output_nodes(self):
"used in hypergan build"
if hasattr(self.generator, 'mask_generator'):
extras = [
self.mask_generator.sample,
self.generator.g1x,
self.generator.g2x
]
else:
extras = []
return extras + [
self.encoder.sample,
self.generator.sample,
self.uniform_sample,
self.generator_int
]
class VideoFrameSampler(BaseSampler):
def __init__(self, gan, samples_per_row=8):
sess = gan.session
self.x = gan.session.run(gan.preview)
print("__________", np.shape(self.x),'---oo')
frames = np.shape(self.x)[1]//height
self.frames=frames
self.x = np.split(self.x, frames, axis=1)
self.i = 0
BaseSampler.__init__(self, gan, samples_per_row)
def _sample(self):
gan = self.gan
z_t = gan.uniform_encoder.sample
sess = gan.session
feed_dict = {}
for i,f in enumerate(gan.inputs.frames):
if len(self.x) > i+1:
feed_dict[f]=self.x[i+1]
#if(1 + self.frames < len(self.x)):
# feed_dict[f] = self.x[1+self.frames]
self.x = sess.run(gan.preview, feed_dict)
frames = np.shape(self.x)[1]//height
self.x = np.split(self.x, frames, axis=1)
x_ = self.x[-1]
time.sleep(0.15)
return {
'generator': x_
}
class TrainingVideoFrameSampler(BaseSampler):
def __init__(self, gan, samples_per_row=8):
self.z = None
self.i = 0
BaseSampler.__init__(self, gan, samples_per_row)
def _sample(self):
gan = self.gan
z_t = gan.uniform_encoder.sample
sess = gan.session
return {
'generator': gan.session.run(gan.preview)
}
def setup_gan(config, inputs, args):
gan = AliNextFrameGAN(config, inputs=inputs)
if(args.action != 'search' and os.path.isfile(save_file+".meta")):
gan.load(save_file)
tf.train.start_queue_runners(sess=gan.session)
config_name = args.config
GlobalViewer.title = "[hypergan] next-frame " + config_name
GlobalViewer.enabled = args.viewer
GlobalViewer.zoom = args.zoom
return gan
def train(config, inputs, args):
gan = setup_gan(config, inputs, args)
sampler = lookup_sampler(args.sampler or TrainingVideoFrameSampler)(gan)
samples = 0
#metrics = [batch_accuracy(gan.inputs.x, gan.uniform_sample), batch_diversity(gan.uniform_sample)]
#sum_metrics = [0 for metric in metrics]
for i in range(args.steps):
gan.step()
if args.action == 'train' and i % args.save_every == 0 and i > 0:
print("saving " + save_file)
gan.save(save_file)
if i % args.sample_every == 0:
sample_file="samples/%06d.png" % (samples)
samples += 1
sampler.sample(sample_file, args.save_samples)
#if i > args.steps * 9.0/10:
# for k, metric in enumerate(gan.session.run(metrics)):
# print("Metric "+str(k)+" "+str(metric))
# sum_metrics[k] += metric
tf.reset_default_graph()
return []#sum_metrics
def sample(config, inputs, args):
gan = setup_gan(config, inputs, args)
sampler = lookup_sampler(args.sampler or VideoFrameSampler)(gan)
samples = 0
for i in range(args.steps):
sample_file="samples/%06d.png" % (samples)
samples += 1
sampler.sample(sample_file, args.save_samples)
def search(config, inputs, args):
metrics = train(config, inputs, args)
config_filename = "colorizer-"+str(uuid.uuid4())+'.json'
hc.Selector().save(config_filename, config)
with open(args.search_output, "a") as myfile:
myfile.write(config_filename+","+",".join([str(x) for x in metrics])+"\n")
if args.action == 'train':
metrics = train(config, inputs, args)
print("Resulting metrics:", metrics)
elif args.action == 'sample':
sample(config, inputs, args)
elif args.action == 'search':
search(config, inputs, args)
else:
print("Unknown action: "+args.action)
| mit | -7,698,782,246,398,737,000 | 36.021559 | 163 | 0.565848 | false | 3.516144 | true | false | false |
lukehsiao/SecretMessageMaker | SecretMessageMaker.py | 1 | 3306 | #! /usr/bin/env python
import sys, os, io, string
import argparse
def parse_args():
"""
Function: parse_args
--------------------
Parse the commandline arguments for SecretMessageMaker
"""
# Define what commandline arguments can be accepted
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--encrypt", action="store_const", const=True,
help="run program in encryption mode. Default: decrypt")
parser.add_argument('filename', metavar="FILE",
help="path of input text file (required)")
args = parser.parse_args()
# Validate the filename
file_name = args.filename
if not os.path.exists(file_name):
parser.error("The file %s does not exist!" % file_name)
# Check to make sure the user isn't going to accidentally override
# something, or accidentally encrypt something twice.
if "secret_" in args.filename and args.encrypt:
# If you're encrypted an already encrypted message
parser.error("[error] You're ENCRYPTING an encrypted file!")
elif "secret_" not in args.filename and not args.encrypt:
parser.error("[error] You're DECRYPTING a plain file!")
# Let the user know which commands the program sees, and which files will
# be made.
if args.encrypt:
print "[info] ENCRYPTING %s into secret_%s..." % (file_name, file_name)
else:
print ("[info] DECRYPTING %s into %s..." % (file_name,
file_name.replace("secret_", '')))
return args
def encrypt(plain_line):
"""
Function: encrypt
--------------------
Turns the human-readable line of text into a non-human-readable line of
encrypted text.
@param plain_line the line to be encrypted
@return the encrypted version of the 'line'
"""
char_list = list(plain_line)
encrypted_list = []
for character in char_list:
encrypted_list.append(str(ord(character)))
return ' '.join(encrypted_list)
def decrypt(encrypted_line):
"""
Function: decrypt
--------------------
Turns the encrypted line of text into a human-readable line of text.
@param encrypted_line the line to be encrypted
@return the encrypted version of the 'line'
"""
num_list = encrypted_line.split()
decrypted_list = []
for number in num_list:
decrypted_list.append(chr(int(number)))
return ''.join(decrypted_list)
# Main Function
if __name__ == "__main__":
args = parse_args() # parse commandline arguments
# Open input file
in_file = open(args.filename, 'r')
# Open output file
if args.encrypt:
# If encrypting, append secret_ to the front
out_file = open("secret_" + args.filename, 'w')
else:
# If decrypting, remove secret_ from the filename
out_file = open(args.filename.replace("secret_", ''), 'w')
# Iterate over every line of the file
for line in in_file:
if args.encrypt:
# Run encryption algorithm
out_file.write(encrypt(line) + ' ') # add space between lines
else:
# Run decryption algorithm
out_file.write(decrypt(line))
# Close the files that we were using
in_file.close()
out_file.close()
| mit | -335,721,031,060,413,950 | 31.411765 | 80 | 0.613128 | false | 4.122195 | false | false | false |
onelab-eu/sfa | sfa/managers/registry_manager_openstack.py | 1 | 4232 | import types
# for get_key_from_incoming_ip
import tempfile
import os
import commands
from sfa.util.faults import RecordNotFound, AccountNotEnabled, PermissionError, MissingAuthority, \
UnknownSfaType, ExistingRecord, NonExistingRecord
from sfa.util.sfatime import utcparse, datetime_to_epoch
from sfa.util.prefixTree import prefixTree
from sfa.util.xrn import Xrn, get_authority, hrn_to_urn, urn_to_hrn
from sfa.util.version import version_core
from sfa.util.sfalogging import logger
from sfa.trust.gid import GID
from sfa.trust.credential import Credential
from sfa.trust.certificate import Certificate, Keypair, convert_public_key
from sfa.trust.gid import create_uuid
from sfa.storage.model import make_record,RegRecord
#from sfa.storage.alchemy import dbsession
from sfa.storage.alchemy import global_dbsession
dbsession = global_dbsession
from sfa.managers.registry_manager import RegistryManager
class RegistryManager(RegistryManager):
def GetCredential(self, api, xrn, type, caller_xrn = None):
# convert xrn to hrn
if type:
hrn = urn_to_hrn(xrn)[0]
else:
hrn, type = urn_to_hrn(xrn)
# Is this a root or sub authority
auth_hrn = api.auth.get_authority(hrn)
if not auth_hrn or hrn == api.config.SFA_INTERFACE_HRN:
auth_hrn = hrn
auth_info = api.auth.get_auth_info(auth_hrn)
# get record info
filter = {'hrn': hrn}
if type:
filter['type'] = type
record=dbsession.query(RegRecord).filter_by(**filter).first()
if not record:
raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
# verify_cancreate_credential requires that the member lists
# (researchers, pis, etc) be filled in
logger.debug("get credential before augment dict, keys=%s"%record.__dict__.keys())
api.driver.augment_records_with_testbed_info (record.__dict__)
logger.debug("get credential after augment dict, keys=%s"%record.__dict__.keys())
if not api.driver.is_enabled (record.__dict__):
raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record.email))
# get the callers gid
# if caller_xrn is not specified assume the caller is the record
# object itself.
if not caller_xrn:
caller_hrn = hrn
caller_gid = record.get_gid_object()
else:
caller_hrn, caller_type = urn_to_hrn(caller_xrn)
caller_filter = {'hrn': caller_hrn}
if caller_type:
caller_filter['type'] = caller_type
caller_record = dbsession.query(RegRecord).filter_by(**caller_filter).first()
if not caller_record:
raise RecordNotFound("Unable to associated caller (hrn=%s, type=%s) with credential for (hrn: %s, type: %s)"%(caller_hrn, caller_type, hrn, type))
caller_gid = GID(string=caller_record.gid)
object_hrn = record.get_gid_object().get_hrn()
rights = api.auth.determine_user_rights(caller_hrn, record)
# make sure caller has rights to this object
if rights.is_empty():
raise PermissionError(caller_hrn + " has no rights to " + record.hrn)
object_gid = GID(string=record.gid)
new_cred = Credential(subject = object_gid.get_subject())
new_cred.set_gid_caller(caller_gid)
new_cred.set_gid_object(object_gid)
new_cred.set_issuer_keys(auth_info.get_privkey_filename(), auth_info.get_gid_filename())
#new_cred.set_pubkey(object_gid.get_pubkey())
new_cred.set_privileges(rights)
new_cred.get_privileges().delegate_all_privileges(True)
if hasattr(record,'expires'):
date = utcparse(record.expires)
expires = datetime_to_epoch(date)
new_cred.set_expiration(int(expires))
auth_kind = "authority,ma,sa"
# Parent not necessary, verify with certs
#new_cred.set_parent(api.auth.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
new_cred.encode()
new_cred.sign()
return new_cred.save_to_string(save_parents=True)
| mit | 7,620,469,847,222,133,000 | 42.628866 | 162 | 0.648157 | false | 3.577346 | false | false | false |
xianjunzhengbackup/code | data science/machine_learning_for_the_web/chapter_7/server_movierecsys/books_recsys_app/management/commands/get_plotsfromtitles.py | 1 | 2962 | from django.core.management.base import BaseCommand
import os
import optparse
import numpy as np
import json
import pandas as pd
import requests
#python manage.py get_plotsfromtitles --input=/Users/andrea/Desktop/book_packt/chapters/5/data/utilitymatrix.csv --outputplots=plots.csv --outputumatrix='umatrix.csv'
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
optparse.make_option('-i', '--input', dest='umatrixfile',
type='string', action='store',
help=('Input utility matrix')),
optparse.make_option('-o', '--outputplots', dest='plotsfile',
type='string', action='store',
help=('output file')),
optparse.make_option('--om', '--outputumatrix', dest='umatrixoutfile',
type='string', action='store',
help=('output file')),
)
def getplotfromomdb(self,col,df_moviesplots,df_movies,df_utilitymatrix):
string = col.split(';')[0]
title=string[:-6].strip()
year = string[-5:-1]
plot = ' '.join(title.split(' ')).encode('ascii','ignore')+'. '
url = "http://www.omdbapi.com/?t="+title+"&y="+year+"&plot=full&r=json"
headers={"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36"}
r = requests.get(url,headers=headers)
jsondata = json.loads(r.content)
if 'Plot' in jsondata:
#store plot + title
plot += jsondata['Plot'].encode('ascii','ignore')
if plot!=None and plot!='' and plot!=np.nan and len(plot)>3:#at least 3 letters to consider the movie
df_moviesplots.loc[len(df_moviesplots)]=[string,plot]
df_utilitymatrix[col] = df_movies[col]
print len(df_utilitymatrix.columns)
return df_moviesplots,df_utilitymatrix
def handle(self, *args, **options):
pathutilitymatrix = options['umatrixfile']
df_movies = pd.read_csv(pathutilitymatrix)
movieslist = list(df_movies.columns[1:])
df_moviesplots = pd.DataFrame(columns=['title','plot'])
df_utilitymatrix = pd.DataFrame()
df_utilitymatrix['user'] = df_movies['user']
print 'nmovies:',len(movieslist)
for m in movieslist[:]:
df_moviesplots,df_utilitymatrix=self.getplotfromomdb(m,df_moviesplots,df_movies,df_utilitymatrix)
print len(df_movies.columns),'--',len(df_utilitymatrix.columns)
outputfile = options['plotsfile']
df_moviesplots.to_csv(outputfile, index=False)
outumatrixfile = options['umatrixoutfile']
df_utilitymatrix.to_csv(outumatrixfile, index=False)
| mit | -2,961,374,809,798,989,300 | 43.223881 | 167 | 0.574274 | false | 3.970509 | false | false | false |
SpaceVim/SpaceVim | bundle/deoplete.nvim/test/rplugin/python3/deoplete/test_converter_reorder_attr.py | 2 | 1047 | from deoplete.filter.converter_reorder_attr import Filter
candidates = [
{'word': 'Apple', 'kind': 'Fruit'},
{'word': 'Banana', 'kind': 'Fruit'},
{'word': 'Pen', 'kind': 'Object'},
{'word': 'Cherry Pie', 'kind': 'Pie'},
]
def test_reorder():
candidates_copy = candidates[:]
preferred_order = {'kind': ['Pie', 'Fruit']}
expected_candidates = [
{'word': 'Cherry Pie', 'kind': 'Pie'},
{'word': 'Apple', 'kind': 'Fruit'},
{'word': 'Banana', 'kind': 'Fruit'},
{'word': 'Pen', 'kind': 'Object'},
]
assert expected_candidates == Filter.filter_attrs(
candidates_copy, preferred_order
)
def test_filter():
candidates_copy = candidates[:]
preferred_order = {'word': ['!Pen', 'Banana']}
expected_candidates = [
{'word': 'Banana', 'kind': 'Fruit'},
{'word': 'Apple', 'kind': 'Fruit'},
{'word': 'Cherry Pie', 'kind': 'Pie'},
]
assert expected_candidates == Filter.filter_attrs(
candidates_copy, preferred_order
)
| gpl-3.0 | -2,882,397,963,730,283,000 | 24.536585 | 57 | 0.541547 | false | 3.38835 | false | false | false |
AdrianArmaselu/aDi-music | graphmodel/appio/preprocessing.py | 1 | 1424 | import midi
import sys
from graphmodel.utils import MidiUtils
__author__ = 'Adisor'
class Analyzer(object):
"""
Class is used for analyzing, input curation, validation, and pre-processing a midi file before execution.
With this class, we can capture events that are not being processed or midi patterns that break our
rules or assumptions. If any pattern would break our rules or assumptions, the program exits.
"""
DO_EXIT = True
def __init__(self, midi_file_name):
self.pattern = midi.read_midifile(midi_file_name)
def perform_analysis(self):
# check for unprocessed events
for track in self.pattern:
channel = -1
for event in track:
if MidiUtils.is_channel_event(event):
if channel == -1:
channel = event.channel
if channel != event.channel:
print "TRACK HAS MULTIPLE CHANNELS"
if Analyzer.DO_EXIT:
sys.exit(-1)
# global meta events should be in the first track
for i in range(1, len(self.pattern), 1):
for event in self.pattern[i]:
if MidiUtils.is_song_meta_event(event):
print "GLOBAL META EVENTS NEED TO BE IN THE FIRST TRACK", event
if Analyzer.DO_EXIT:
sys.exit(-1)
| mit | -8,046,057,735,247,845,000 | 36.473684 | 109 | 0.572331 | false | 4.315152 | false | false | false |
newera912/WeatherTransportationProject | outputs/mesonetPlots/hourlyPatterns/graphPlot.py | 1 | 1771 | import matplotlib.pyplot as plt
import networkx as nx
import os
G=nx.Graph()
posp=[[0,0.1],[1,0.0],[2,0.1],[4,0.1],[6,0.25],[7,0.1],[8,0.25],[10,0],[12,0.0],[14,0.1]]
for node in range(10):
G.add_node(node,pos=(posp[node][0],posp[node][1]))
edges=[(0,1),(1,2),(2,3),(3,4),(3,5),(4,5),(4,6),(5,6),(5,7),(6,7),(7,8),(7,9),(8,9)]
abnode=[1,3,4]
months=["201603","201604","201605","201606","201607","201608","201609"]
for mon in months[:1]:
root="F:/workspace/git/Graph-MP/outputs/mesonetPlots/hourlyPatterns/"+mon+"/"
outRoot="F:/workspace/git/Graph-MP/outputs/mesonetPlots/hourlyPatternsGraph/"+mon+"_png/"
if os.path.exists(outRoot)==False:
os.makedirs(outRoot)
for name in os.listdir(root):
with open(root+name,"r") as f:
data=[]
for line in f.readlines():
data.append(map(int,line.strip().split()))
for i,d in enumerate(data):
color_map = []
for node in G:
if d[node]==1:
color_map.append('red')
else:
color_map.append('green')
G.add_edges_from(edges)
fig=plt.figure(1)
print name.split('.')[0]+'-hour-'+str(i)
plt.title(name.split('.')[0]+'-hour-'+str(i)+" Red stations have changing patterns")
nx.draw(G,nx.get_node_attributes(G, 'pos'),with_labels=True,node_color = color_map,node_size=400.0) # networkx draw()
plt.draw() # pyplot draw()
plt.tight_layout()
fig.savefig(outRoot+name.split('.')[0]+'-hour-'+str(i)+'.jpg', bbox_inches="tight")
plt.close()
| gpl-2.0 | 2,175,961,076,609,691,000 | 43.3 | 134 | 0.5048 | false | 3.156863 | false | false | false |
MOLSSI-BSE/basis_set_exchange | basis_set_exchange/curate/add_basis.py | 1 | 13528 | '''
Add a basis set to the library
'''
import os
import datetime
from ..fileio import read_json_basis, write_json_basis
from ..misc import expand_elements
from ..validator import validate_data
from ..skel import create_skel
from ..readers import read_formatted_basis_file
from .metadata import create_metadata_file
def add_from_components(component_files, data_dir, subdir, file_base, name, family, role, description, version,
revision_description):
'''
Add a basis set to this library that is a combination of component files
This takes in a list of component basis files and creates a new basis set for the intersection
of all the elements contained in those files. This creates the element, and table basis set
files in the given data_dir (and subdir). The metadata file for the basis is created if it
doesn't exist, and the main metadata file is also updated.
Parameters
----------
component_files : str
Path to component json files (in BSE format already)
data_dir : str
Path to the data directory to add the data to
subdir : str
Subdirectory of the data directory to add the basis set to
file_base : str
Base name for new files
name : str
Name of the basis set
family : str
Family to which this basis set belongs
role : str
Role of the basis set (orbital, etc)
description : str
Description of the basis set
version : str
Version of the basis set
revision_description : str
Description of this version of the basis set
'''
if not component_files:
raise RuntimeError("Need at least one component file to create a basis set from")
# Determine what files have which elements
valid_elements = None
# And the relative path of the component files to the data dir
cfile_relpaths = []
for cfile in component_files:
cfile_data = read_json_basis(cfile)
cfile_elements = set(cfile_data['elements'].keys())
relpath = os.path.relpath(cfile, data_dir)
if valid_elements is None:
valid_elements = cfile_elements
else:
valid_elements = valid_elements.intersection(cfile_elements)
cfile_relpaths.append(relpath)
valid_elements = sorted(valid_elements, key=lambda x: int(x))
# Start the data files for the element and table json
element_file_data = create_skel('element')
element_file_data['name'] = name
element_file_data['description'] = description
element_file_name = '{}.{}.element.json'.format(file_base, version)
element_file_relpath = os.path.join(subdir, element_file_name)
element_file_path = os.path.join(data_dir, element_file_relpath)
table_file_data = create_skel('table')
table_file_data['revision_description'] = revision_description
table_file_data['revision_date'] = datetime.date.today().isoformat()
table_file_name = '{}.{}.table.json'.format(file_base, version)
# and the metadata file
meta_file_data = create_skel('metadata')
meta_file_data['names'] = [name]
meta_file_data['family'] = family
meta_file_data['description'] = description
meta_file_data['role'] = role
meta_file_name = '{}.metadata.json'.format(file_base)
# These get created directly in the top-level data directory
table_file_path = os.path.join(data_dir, table_file_name)
meta_file_path = os.path.join(data_dir, meta_file_name)
# Can just make all the entries for the table file pretty easily
# (we add the relative path to the location of the element file,
# which resides in subdir)
table_file_entry = element_file_relpath
table_file_data['elements'] = {k: table_file_entry for k in valid_elements}
# Add to the element file data
for el in valid_elements:
element_file_data['elements'][el] = {'components': cfile_relpaths}
# Verify all data using the schema
validate_data('element', element_file_data)
validate_data('table', table_file_data)
######################################################################################
# Before creating any files, check that all the files don't already exist.
# Yes, there is technically a race condition (files could be created between the
# check and then actually writing out), but if that happens, you are probably using
# this library wrong
#
# Note that the metadata file may exist already. That is ok
######################################################################################
if os.path.exists(element_file_path):
raise RuntimeError("Element json file {} already exists".format(element_file_path))
if os.path.exists(table_file_path):
raise RuntimeError("Table json file {} already exists".format(table_file_path))
#############################################
# Actually create all the files
#############################################
# First, create the subdirectory
subdir_path = os.path.join(data_dir, subdir)
if not os.path.exists(subdir_path):
os.makedirs(subdir_path)
write_json_basis(element_file_path, element_file_data)
write_json_basis(table_file_path, table_file_data)
# Create the metadata file if it doesn't exist already
if not os.path.exists(meta_file_path):
write_json_basis(meta_file_path, meta_file_data)
# Update the metadata file
metadata_file = os.path.join(data_dir, 'METADATA.json')
create_metadata_file(metadata_file, data_dir)
def add_basis_from_dict(bs_data,
data_dir,
subdir,
file_base,
name,
family,
role,
description,
version,
revision_description,
data_source,
refs=None):
'''Add a basis set to this library
This takes in a basis set dictionary, and create the component,
element, and table basis set files in the given data_dir (and
subdir). The metadata file for the basis is created if it doesn't
exist, and the main metadata file is also updated.
Parameters
----------
bs_data : dict
Basis set dictionary
data_dir : str
Path to the data directory to add the data to
subdir : str
Subdirectory of the data directory to add the basis set to
file_base : str
Base name for new files
name : str
Name of the basis set
family : str
Family to which this basis set belongs
role : str
Role of the basis set (orbital, etc)
description : str
Description of the basis set
version : str
Version of the basis set
revision_description : str
Description of this version of the basis set
data_source : str
Description of where this data came from
refs : dict or str
Mapping of references to elements. This can be a dictionary with a compressed
string of elements as keys and a list of reference strings as values.
For example, {'H,Li-B,Kr': ['kumar2018a']}
If a list or string is passed, then those reference(s) will be used for
all elements.
Elements that exist in the file but do not have a reference are given the
usual 'noref' extension and the references entry is empty.
file_fmt : str
Format of the input basis data (None = autodetect)
'''
# Read the basis set data into a component file, and add the description
bs_data['description'] = description
bs_data['data_source'] = data_source
if refs is None:
refs = []
# We keep track of which elements we've done so that
# we can detect duplicates in the references (which would be an error)
# (and also handle elements with no reference)
orig_elements = bs_data['elements']
done_elements = []
# If a string or list of strings, use that as a reference for all elements
if isinstance(refs, str):
for k, v in bs_data['elements'].items():
v['references'] = [refs]
elif isinstance(refs, list):
for k, v in bs_data['elements'].items():
v['references'] = refs
elif isinstance(refs, dict):
for k, v in refs.items():
# Expand the string a list of integers (as strings)
elements = expand_elements(k, True)
# Make sure we have info for the given elements
# and that there are no duplicates
for el in elements:
if el not in orig_elements:
raise RuntimeError("Element {} not found in file {}".format(el, bs_file))
if el in done_elements:
raise RuntimeError("Duplicate element {} in reference string {}".format(el, k))
if isinstance(v, str):
bs_data['elements'][el]['references'] = [v]
else:
bs_data['elements'][el]['references'] = v
done_elements.extend(elements)
# Handle elements without a reference
noref_elements = set(orig_elements.keys()) - set(done_elements)
if noref_elements:
for el in noref_elements:
bs_data['elements'][el]['references'] = []
else:
raise RuntimeError('refs should be a string, a list, or a dictionary')
# Create the filenames for the components
# Also keep track of where data for each element is (for the element and table files)
component_file_name = file_base + '.' + str(version) + '.json'
component_file_relpath = os.path.join(subdir, component_file_name)
component_file_path = os.path.join(data_dir, component_file_relpath)
# Verify all data using the schema
validate_data('component', bs_data)
######################################################################################
# Before creating any files, check that all the files don't already exist.
# Yes, there is technically a race condition (files could be created between the
# check and then actually writing out), but if that happens, you are probably using
# this library wrong
#
# Note that the metadata file may exist already. That is ok
######################################################################################
if os.path.exists(component_file_path):
raise RuntimeError("Component json file {} already exists".format(component_file_path))
#############################################
# Actually create all the files
#############################################
# First, create the subdirectory
subdir_path = os.path.join(data_dir, subdir)
if not os.path.exists(subdir_path):
os.makedirs(subdir_path)
write_json_basis(component_file_path, bs_data)
# Do all the rest
add_from_components([component_file_path], data_dir, subdir, file_base, name, family, role, description, version,
revision_description)
def add_basis(bs_file,
data_dir,
subdir,
file_base,
name,
family,
role,
description,
version,
revision_description,
data_source,
refs=None,
file_fmt=None):
'''
Add a basis set to this library
This takes in a single file containing the basis set is some format, parses it, and
create the component, element, and table basis set files in the given data_dir (and subdir).
The metadata file for the basis is created if it doesn't exist, and the main metadata file is
also updated.
Parameters
----------
bs_file : str
Path to the file with formatted basis set information
data_dir : str
Path to the data directory to add the data to
subdir : str
Subdirectory of the data directory to add the basis set to
file_base : str
Base name for new files
name : str
Name of the basis set
family : str
Family to which this basis set belongs
role : str
Role of the basis set (orbital, etc)
description : str
Description of the basis set
version : str
Version of the basis set
revision_description : str
Description of this version of the basis set
data_source : str
Description of where this data came from
refs : dict or str
Mapping of references to elements. This can be a dictionary with a compressed
string of elements as keys and a list of reference strings as values.
For example, {'H,Li-B,Kr': ['kumar2018a']}
If a list or string is passed, then those reference(s) will be used for
all elements.
Elements that exist in the file but do not have a reference are given the
usual 'noref' extension and the references entry is empty.
file_fmt : str
Format of the input basis data (None = autodetect)
'''
# Read the basis set data into a component file, and add the description
bs_data = read_formatted_basis_file(bs_file, file_fmt, validate=True, as_component=True)
# The rest is done by the dict routine
add_basis_from_dict(bs_data, data_dir, subdir, file_base, name, family, role, description, version,
revision_description, data_source, refs)
| bsd-3-clause | -6,095,067,406,082,727,000 | 37.431818 | 117 | 0.612803 | false | 4.423806 | false | false | false |
wolcomm/prngmgr | prngmgr/views/utils.py | 1 | 2529 | # Copyright 2016-2017 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""View utilities module for prngmgr."""
from prngmgr import models
def render_alerts(calculated):
"""Render alerts dict."""
if calculated['count']['possible'] == 0:
calculated['alert'] = {
'possible': models.ALERT_NONE,
'provisioned': models.ALERT_NONE,
'established': models.ALERT_NONE,
}
else:
if calculated['count']['provisioned'] == 0:
calculated['alert'] = {
'possible': models.ALERT_SUCCESS,
'provisioned': models.ALERT_DANGER,
'established': models.ALERT_DANGER,
}
elif calculated['count']['provisioned'] < calculated['count']['possible']: # noqa
if calculated['count']['established'] < calculated['count']['provisioned']: # noqa
calculated['alert'] = {
'possible': models.ALERT_SUCCESS,
'provisioned': models.ALERT_WARNING,
'established': models.ALERT_DANGER,
}
else:
calculated['alert'] = {
'possible': models.ALERT_SUCCESS,
'provisioned': models.ALERT_WARNING,
'established': models.ALERT_WARNING,
}
else:
if calculated['count']['established'] < calculated['count']['provisioned']: # noqa
calculated['alert'] = {
'possible': models.ALERT_SUCCESS,
'provisioned': models.ALERT_SUCCESS,
'established': models.ALERT_DANGER,
}
else:
calculated['alert'] = {
'possible': models.ALERT_SUCCESS,
'provisioned': models.ALERT_SUCCESS,
'established': models.ALERT_SUCCESS,
}
return calculated
| apache-2.0 | 6,014,259,822,061,085,000 | 41.15 | 95 | 0.568604 | false | 4.468198 | false | false | false |
OlfillasOdikno/mctools | altExploit.py | 1 | 2070 | """
@author: OlfillasOdikno
@info: This tool was written in good faith. Please do not abuse this tool. Use it only with permission of the person/company you want to test.
@license:
/*****************************************************************************
* McTools *
* Copyright (C) 2017 Olfillas Odikno *
* *
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
*****************************************************************************/
"""
import requests
import time
linkBase= "http://www.x.x" #insert here the url of your generator
linkGen = "/lib/generate.php?generator="
genid = int(raw_input("Gen id: "))
i = 0
while(i < 20):
try:
r = requests.post(linkBase+linkGen+str(genid), data={'username': '', 'password': ''}, headers={"Referer": linkBase + "/login.php"})
resp = r.content
if("@" in resp):
print resp
else:
time.sleep(0.1)
except:
pass
i+=1;
| gpl-3.0 | 5,029,948,643,112,208,000 | 47.285714 | 142 | 0.452657 | false | 4.825175 | false | false | false |
Princessgladys/googleresourcefinder | tools/load_kml_hospitals.py | 1 | 1573 | from model import *
from setup import *
import datetime
import datetime
import kml
import re
import unicodedata
import utils
BEDS_RE = re.compile(r'(\d+) *[bB]eds')
def load_hospitals(version, records):
"""Loads a list of hospital records into the given version."""
arrondissement = DivisionType(
version, key_name='arrondissement',
singular='arrondissement', plural='arrondissements')
db.put(arrondissement)
unknown = Division(
version, key_name='unknown', type='arrondissement', title='Unknown')
db.put(unknown)
subjects = []
reports = []
for record in records:
location = record['location']
subject_name = utils.make_name(record['title'])
subjects.append(Subject(
version,
key_name=subject_name,
type='hospital',
title=record['title'],
location=db.GeoPt(location[1], location[0]),
division_name='unknown',
division_names=['unknown']
))
if record.get('comment', ''):
comment = record['comment']
report = Report(
version,
subject_name=subject_name,
date=datetime.date.today(),
comments=db.Text(comment))
match = BEDS_RE.search(comment)
if match:
report.total_beds = int(match.group(1))
reports.append(report)
db.put(subjects)
db.put(reports)
def load_kml_file(version, filename):
load_hospitals(version, kml.parse_file(open(filename)))
| apache-2.0 | -7,513,072,806,892,976,000 | 29.843137 | 76 | 0.590591 | false | 3.874384 | false | false | false |
Letractively/timeside | timeside/core.py | 2 | 10798 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2009-2013 Parisson SARL
# Copyright (c) 2009 Olivier Guilyardi <[email protected]>
#
# This file is part of TimeSide.
# TimeSide is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# TimeSide is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with TimeSide. If not, see <http://www.gnu.org/licenses/>.
from timeside.component import *
from timeside.api import IProcessor
from timeside.exceptions import Error, ApiError
import re
import time
import numpy
import uuid
__all__ = ['Processor', 'MetaProcessor', 'implements', 'abstract',
'interfacedoc', 'processors', 'get_processor', 'ProcessPipe',
'FixedSizeInputAdapter']
_processors = {}
class MetaProcessor(MetaComponent):
"""Metaclass of the Processor class, used mainly for ensuring that processor
id's are wellformed and unique"""
valid_id = re.compile("^[a-z][_a-z0-9]*$")
def __new__(cls, name, bases, d):
new_class = MetaComponent.__new__(cls, name, bases, d)
if new_class in implementations(IProcessor):
id = str(new_class.id())
if _processors.has_key(id):
# Doctest test can duplicate a processor
# This can be identify by the conditon "module == '__main__'"
if new_class.__module__ == '__main__':
new_class = _processors[id]
elif _processors[id].__module__ == '__main__':
pass
else:
raise ApiError("%s and %s have the same id: '%s'"
% (new_class.__name__, _processors[id].__name__, id))
if not MetaProcessor.valid_id.match(id):
raise ApiError("%s has a malformed id: '%s'"
% (new_class.__name__, id))
_processors[id] = new_class
return new_class
class Processor(Component):
"""Base component class of all processors
Attributes:
parents : List of parent Processors that must be processed
before the current Processor
pipe : The current ProcessPipe in which the Processor will run
"""
__metaclass__ = MetaProcessor
abstract()
implements(IProcessor)
def __init__(self):
super(Processor, self).__init__()
self.parents = []
self.source_mediainfo = None
self.pipe = None
self.UUID = uuid.uuid4()
@interfacedoc
def setup(self, channels=None, samplerate=None, blocksize=None,
totalframes=None):
self.source_channels = channels
self.source_samplerate = samplerate
self.source_blocksize = blocksize
self.source_totalframes = totalframes
# If empty Set default values for input_* attributes
# may be setted by the processor during __init__()
if not hasattr(self, 'input_channels'):
self.input_channels = self.source_channels
if not hasattr(self, 'input_samplerate'):
self.input_samplerate = self.source_samplerate
if not hasattr(self, 'input_blocksize'):
self.input_blocksize = self.source_blocksize
if not hasattr(self, 'input_stepsize'):
self.input_stepsize = self.source_blocksize
# default channels(), samplerate() and blocksize() implementations returns
# the source characteristics, but processors may change this behaviour by
# overloading those methods
@interfacedoc
def channels(self):
return self.source_channels
@interfacedoc
def samplerate(self):
return self.source_samplerate
@interfacedoc
def blocksize(self):
return self.source_blocksize
@interfacedoc
def totalframes(self):
return self.source_totalframes
@interfacedoc
def process(self, frames, eod):
return frames, eod
@interfacedoc
def post_process(self):
pass
@interfacedoc
def release(self):
pass
@interfacedoc
def mediainfo(self):
return self.source_mediainfo
@interfacedoc
def uuid(self):
return str(self.UUID)
def __del__(self):
self.release()
def __or__(self, other):
return ProcessPipe(self, other)
class FixedSizeInputAdapter(object):
"""Utility to make it easier to write processors which require fixed-sized
input buffers."""
def __init__(self, buffer_size, channels, pad=False):
"""Construct a new adapter: buffer_size is the desired buffer size in frames,
channels the number of channels, and pad indicates whether the last block should
be padded with zeros."""
self.buffer = numpy.empty((buffer_size, channels))
self.buffer_size = buffer_size
self.len = 0
self.pad = pad
def blocksize(self, input_totalframes):
"""Return the total number of frames that this adapter will output according to the
input_totalframes argument"""
blocksize = input_totalframes
if self.pad:
mod = input_totalframes % self.buffer_size
if mod:
blocksize += self.buffer_size - mod
return blocksize
def process(self, frames, eod):
"""Returns an iterator over tuples of the form (buffer, eod) where buffer is a
fixed-sized block of data, and eod indicates whether this is the last block.
In case padding is deactivated the last block may be smaller than the buffer size.
"""
src_index = 0
remaining = len(frames)
while remaining:
space = self.buffer_size - self.len
copylen = remaining < space and remaining or space
src = frames[src_index:src_index + copylen]
if self.len == 0 and copylen == self.buffer_size:
# avoid unnecessary copy
buffer = src
else:
buffer = self.buffer
buffer[self.len:self.len + copylen] = src
remaining -= copylen
src_index += copylen
self.len += copylen
if self.len == self.buffer_size:
yield buffer, (eod and not remaining)
self.len = 0
if eod and self.len:
block = self.buffer
if self.pad:
self.buffer[self.len:self.buffer_size] = 0
else:
block = self.buffer[0:self.len]
yield block, True
self.len = 0
def processors(interface=IProcessor, recurse=True):
"""Returns the processors implementing a given interface and, if recurse,
any of the descendants of this interface."""
return implementations(interface, recurse)
def get_processor(processor_id):
"""Return a processor by its id"""
if not _processors.has_key(processor_id):
raise Error("No processor registered with id: '%s'"
% processor_id)
return _processors[processor_id]
class ProcessPipe(object):
"""Handle a pipe of processors
Attributes:
processor: List of all processors in the Process pipe
results : Results Container for all the analyzers of the Pipe process
"""
def __init__(self, *others):
self.processors = []
self |= others
from timeside.analyzer.core import AnalyzerResultContainer
self.results = AnalyzerResultContainer()
def __or__(self, other):
return ProcessPipe(self, other)
def __ior__(self, other):
if isinstance(other, Processor):
for parent in other.parents:
self |= parent
self.processors.append(other)
other.process_pipe = self
elif isinstance(other, ProcessPipe):
self.processors.extend(other.processors)
else:
try:
iter(other)
except TypeError:
raise Error("Can not add this type of object to a pipe: %s", str(other))
for item in other:
self |= item
return self
def __repr__(self):
pipe = ''
for item in self.processors:
pipe += item.id()
if item != self.processors[-1]:
pipe += ' | '
return pipe
def run(self, channels=None, samplerate=None, blocksize=None, stack=None):
"""Setup/reset all processors in cascade and stream audio data along
the pipe. Also returns the pipe itself."""
source = self.processors[0]
items = self.processors[1:]
source.setup(channels=channels, samplerate=samplerate,
blocksize=blocksize)
if stack is None:
self.stack = False
else:
self.stack = stack
if self.stack:
self.frames_stack = []
last = source
# setup/reset processors and configure properties throughout the pipe
for item in items:
item.source_mediainfo = source.mediainfo()
item.setup(channels=last.channels(),
samplerate=last.samplerate(),
blocksize=last.blocksize(),
totalframes=last.totalframes())
last = item
# now stream audio data along the pipe
eod = False
while not eod:
frames, eod = source.process()
if self.stack:
self.frames_stack.append(frames)
for item in items:
frames, eod = item.process(frames, eod)
# Post-processing
for item in items:
item.post_process()
# Release processors
if self.stack:
if not isinstance(self.frames_stack, numpy.ndarray):
self.frames_stack = numpy.vstack(self.frames_stack)
from timeside.decoder.core import ArrayDecoder
new_source = ArrayDecoder(samples=self.frames_stack,
samplerate=source.samplerate())
new_source.setup(channels=source.channels(),
samplerate=source.samplerate(),
blocksize=source.blocksize())
self.processors[0] = new_source
for item in items:
item.release()
self.processors.remove(item)
| gpl-2.0 | -2,483,333,003,632,494,600 | 31.329341 | 91 | 0.590572 | false | 4.469371 | false | false | false |
algerbrex/Schemey | src/bytecode.py | 1 | 14432 | """
bytecode.py
----------------------------------------
Implementation of bytecode instructions. Also
includes the implementation of CodeObjects objects,
Instruction objects, and the serializer and deserializer
for the CodeObjects.
"""
from .utils import pack_integer, unpack_integer, pack_string, unpack_string, Stream
from .expressions import Pair, Symbol, Number, Boolean, Nil, String
OP_LOAD_CONST = 0x00
OP_LOAD_VAR = 0x01
OP_SET_VAR = 0x02
OP_DEF_VAR = 0x03
OP_DEF_FUNC = 0x04
OP_PROC_CALL = 0x05
OP_JUMP_IF_FALSE = 0x06
OP_JUMP = 0x07
OP_RETURN = 0x08
OP_POP = 0x09
_opcode_to_str_map = {
0x00: 'OP_LOAD_CONST',
0x01: 'OP_LOAD_VAR',
0x02: 'OP_SET_VAR ',
0x03: 'OP_DEF_VAR ',
0x04: 'OP_DEF_FUNC',
0x05: 'OP_PROC_CALL',
0x06: 'OP_JUMP_IF_FALSE',
0x07: 'OP_JUMP',
0x08: 'OP_RETURN',
0x09: 'OP_POP '
}
def opcode_to_str(opcode):
return _opcode_to_str_map[opcode]
class Instruction:
"""
A structure for holding the operation code and optional
argument for each instruction generated.
"""
def __init__(self, opcode, arg):
self.opcode = opcode
self.arg = arg
def __repr__(self):
if self.arg is None:
return '{:<24}'.format(opcode_to_str(self.opcode))
else:
return '{:<24}{}'.format(opcode_to_str(self.opcode), self.arg)
class CodeObject:
"""
Represents a compiled Scheme procedure. A code object is ready
for serialization and/or direct execution by the virtual machine.
name:
The procedures name. Used for debugging.
code:
A list of Instruction objects containing the
bytecode instructions.
args:
A list of arguments to the procedure.
constants:
A list of constants referenced in the procedure. The constants can either be
a Scheme expression - as implemented in expressions.py - or a CodeObject itself.
varnames:
A list of variable names referenced in the procedure.
"""
def __init__(self, code, args, constants, varnames, name=''):
self.name = name or 'Anonymous procedure'
self.code = code
self.args = args
self.constants = constants
self.varnames = varnames
def __repr__(self, indent=0):
repr_ = ''
prefix = ' ' * indent
repr_ += prefix + '---------------\n'
repr_ += prefix + 'Procedure: ' + self.name + '\n'
repr_ += prefix + 'Arguments: ' + str(self.args) + '\n'
repr_ += prefix + 'Variables referenced: ' + str(self.varnames) + '\n'
constants = []
for constant in self.constants:
if isinstance(constant, CodeObject):
constants.append('\n' + constant.__repr__(indent + 4))
else:
constants.append(('\n ' + prefix) + repr(constant))
repr_ += prefix + 'Constants referenced: ' + ''.join(constants) + '\n'
formatted_code = self._format_code(prefix=prefix)
repr_ += prefix + 'Code: ' + ''.join(formatted_code) + '\n'
repr_ += prefix + '---------------\n'
return repr_
def _format_code(self, prefix):
"""
Iterate over the opcodes of the class, and
"pretty-format" each one.
"""
formatted_code = []
for pos, instruction in enumerate(self.code):
instr_repr = ('\n ' + prefix + '({}) '.format(pos)) + repr(instruction)
if instruction.opcode == OP_LOAD_CONST:
instr_repr += ' [{}]'.format(self.constants[instruction.arg])
elif instruction.opcode == OP_LOAD_VAR:
instr_repr += ' [{}]'.format(self.varnames[instruction.arg])
elif instruction.opcode == OP_SET_VAR:
instr_repr += ' [{}]'.format(self.varnames[instruction.arg])
elif instruction.opcode == OP_DEF_VAR:
instr_repr += ' [{}]'.format(self.varnames[instruction.arg])
elif instruction.opcode == OP_DEF_FUNC:
instr_repr += ' [{}]'.format(self.constants[instruction.arg].name)
elif instruction.opcode == OP_PROC_CALL:
instr_repr += ' [no args]'
elif instruction.opcode == OP_JUMP_IF_FALSE:
instr_repr += ' [{}]'.format(instruction.arg)
elif instruction.opcode == OP_JUMP:
instr_repr += ' [{}]'.format(instruction.arg)
elif instruction.opcode == OP_RETURN:
instr_repr += ' [no args]'
elif instruction.opcode == OP_POP:
instr_repr += ' [no args]'
formatted_code.append(instr_repr)
return formatted_code
"""
What follows is a custom implementation of a simple serialization
API for CodeObjects. The design is very simple and easy to understand, and is
based of off CPython's and Bobscheme's marshaling API.
Each serialised object is prefixed with a "type" byte which tells the objects
type, and then the bytecode format of each object.
I've tried to make my code readable, simple, and easy to understand. So
take a look at the code below!
"""
TYPE_CODEOBJECT = b'C'
TYPE_INSTRUCTION = b'I'
TYPE_PAIR = b'P'
TYPE_BOOLEAN = b'B'
TYPE_NUMBER = b'N'
TYPE_SYMBOL = b'S'
TYPE_SEQUENCE = b'['
TYPE_STRING = b's'
TYPE_PY_STRING = b'p'
TYPE_NIL = b'n'
MAGIC_CONSTANT = 0x01A
class SerializationError(Exception):
"""
Serialization error exception.
"""
pass
class Serializer:
"""
A custom implementation of a serializer for CodeObjects.
This is based off of the CPython implementation.
"""
def __init__(self, codeobject):
self.co = codeobject
def _dispatch(self, value):
"""
Given a value, determine its type,
and call the corresponding serialization
method.
"""
if isinstance(value, CodeObject):
return self._serialize_codeobject(value)
elif isinstance(value, Instruction):
return self._serialize_instruction(value)
elif isinstance(value, Pair):
return self._serialize_pair(value)
elif isinstance(value, Boolean):
return self._serialize_boolean(value)
elif isinstance(value, Number):
return self._serialize_number(value)
elif isinstance(value, Symbol):
return self._serialize_symbol(value)
elif isinstance(value, str):
return self._serialize_py_string(value)
elif isinstance(value, String):
return self._serialize_string(value)
elif isinstance(value, Nil):
return self._serialize_nil(value)
else:
raise SerializationError("Unknown value of type: {}".format(type(value)))
def serialize(self):
"""
The top-level function of this class. Call this
method to serialize the code object given in the
constructor.
"""
serialized_codeobject = self._serialize_codeobject()
return pack_integer(MAGIC_CONSTANT) + serialized_codeobject
def _serialize_codeobject(self, value=None):
"""
Serialize a CodeObject.
"""
co = value or self.co
stream = TYPE_CODEOBJECT
stream += self._serialize_py_string(co.name)
stream += self._serialize_sequence(co.args)
stream += self._serialize_sequence(co.code)
stream += self._serialize_sequence(co.constants)
stream += self._serialize_sequence(co.varnames)
return stream
def _serialize_instruction(self, value):
"""
Serialize an Instruction object.
"""
arg = value.arg or 0
return TYPE_INSTRUCTION + pack_integer(value.opcode) + pack_integer(arg)
def _serialize_pair(self, value):
"""
Serialize a Pair object.
"""
return TYPE_PAIR + self._serialize_object(value.first) + \
self._serialize_object(value.second)
def _serialize_boolean(self, value):
"""
Serialize a Boolean object.
"""
return TYPE_BOOLEAN + pack_integer(value.value)
def _serialize_number(self, value):
"""
Serialize a Number object.
"""
return TYPE_NUMBER + pack_integer(value.value)
def _serialize_symbol(self, value):
"""
Serialize a Symbol object.
"""
return TYPE_SYMBOL + pack_string(value.value)
def _serialize_sequence(self, value):
"""
Serialize a (Python)list of objects. This is similar to
serializing strings or Symbols, with the difference being
that we record the actual Python lists length, and not its
bytecode form.
"""
stream = b''.join(self._serialize_object(el) for el in value)
return TYPE_SEQUENCE + pack_integer(len(value)) + stream
def _serialize_py_string(self, value):
"""
Serialize a Python string object.
"""
return TYPE_PY_STRING + pack_string(value)
def _serialize_string(self, value):
"""
Serialize a Scheme string object.
"""
return TYPE_STRING + pack_string(value.value)
def _serialize_nil(self, value):
"""
Serialize None.
"""
# Nil represents nothingness. We only need to return the tag.
return TYPE_NIL
def _serialize_object(self, value):
"""
Serialize a generic object.
"""
return self._dispatch(value)
class DeserializationError(Exception):
"""
Deserialization error exception.
"""
pass
class Deserializer:
"""
A class to deserialize a serialized code object.
"""
def __init__(self, bytecode):
self.stream = Stream(bytecode)
def deserialize(self):
"""
Using the bytecode stream given in the constructor,
deserialize it into a CodeObject.
"""
magic_const = unpack_integer(self.stream.read(4))
if magic_const != MAGIC_CONSTANT:
raise DeserializationError("Magic constant does not match")
return self._deserialize_codeobject()
def _match(self, obj_type, msg=''):
"""
Check if the current byte in our Stream, is equal to `obj_type`.
"""
if not bytes(self.stream.get_curr_byte()) == obj_type:
raise DeserializationError("Expected object with type: {}".format(obj_type) if not msg else msg)
else:
self.stream.advance()
def _dispatch(self, obj_type):
"""
Given an objects "tag" type,
dispatch the corresponding
deserialization method. If none
match the "tag" raise an error.
"""
if obj_type == TYPE_CODEOBJECT:
return self._deserialize_codeobject()
elif obj_type == TYPE_INSTRUCTION:
return self._deserialize_instruction()
elif obj_type == TYPE_PAIR:
return self._deserialize_pair()
elif obj_type == TYPE_BOOLEAN:
return self._deserialize_boolean()
elif obj_type == TYPE_NUMBER:
return self._deserialize_number()
elif obj_type == TYPE_SYMBOL:
return self._deserialize_symbol()
elif obj_type == TYPE_PY_STRING:
return self._deserialize_py_string()
elif obj_type == TYPE_STRING:
return self._deserialize_string()
elif obj_type == TYPE_NIL:
return self._deserialize_nil()
else:
raise DeserializationError("Unknown object type: {}".format(obj_type))
def _deserialize_codeobject(self):
"""
Deserialize a code object.
"""
self._match(TYPE_CODEOBJECT, "Top-level object is not a code object.")
co = CodeObject([], [], [], [])
co.name = self._deserialize_py_string()
co.args = self._deserialize_sequence()
co.code = self._deserialize_sequence()
co.constants = self._deserialize_sequence()
co.varnames = self._deserialize_sequence()
return co
def _deserialize_instruction(self):
"""
Deserialize an instruction.
"""
self._match(TYPE_INSTRUCTION)
opcode = unpack_integer(self.stream.read(4))
arg = unpack_integer(self.stream.read(4))
return Instruction(opcode, arg)
def _deserialize_pair(self):
self._match(TYPE_PAIR)
first = self._deserialize_object()
second = self._deserialize_object()
return Pair(first, second)
def _deserialize_boolean(self):
"""
Deserialize a CodeObject.
"""
self._match(TYPE_BOOLEAN)
return Boolean(unpack_integer(self.stream.read(4)))
def _deserialize_number(self):
"""
Deserialize a number.
"""
self._match(TYPE_NUMBER)
return Number(unpack_integer(self.stream.read(4)))
def _deserialize_symbol(self):
"""
Deserialize a symbol.
"""
self._match(TYPE_SYMBOL)
str_len = unpack_integer(self.stream.read(4))
return Symbol(unpack_string(self.stream.read(str_len)))
def _deserialize_sequence(self):
"""
Deserialize a sequence.
"""
self._match(TYPE_SEQUENCE)
seq_len = unpack_integer(self.stream.read(4))
return [self._deserialize_object() for _ in range(seq_len)]
def _deserialize_py_string(self):
"""
Deserialize a Python string.
"""
self._match(TYPE_PY_STRING)
str_len = unpack_integer(self.stream.read(4))
return unpack_string(self.stream.read(str_len))
def _deserialize_string(self):
self._match(TYPE_STRING)
str_len = unpack_integer(self.stream.read(4))
return String(unpack_string(self.stream.read(str_len)))
def _deserialize_nil(self):
"""
Deserialize None.
"""
self._match(TYPE_NIL)
return Nil()
def _deserialize_object(self):
"""
Deserialize a generic object.
"""
return self._dispatch(self.stream.get_curr_byte())
def serialize(codeobject):
"""
A convince function for serializing code objects.
"""
bytecode = Serializer(codeobject).serialize()
return bytecode
def deserialize(bytecode):
"""
A convince function for deserializing code objects.
"""
codeobject = Deserializer(bytecode).deserialize()
return codeobject
| unlicense | -1,968,049,527,960,456,200 | 29.837607 | 108 | 0.59181 | false | 4.090703 | false | false | false |
SeanHayes/django-tastypie | tastypie/resources.py | 2 | 96092 | from __future__ import unicode_literals
from __future__ import with_statement
from copy import deepcopy
from datetime import datetime
import logging
from time import mktime
import warnings
from wsgiref.handlers import format_date_time
import django
from django.conf import settings
from django.conf.urls import patterns, url
from django.core.exceptions import ObjectDoesNotExist,\
MultipleObjectsReturned, ValidationError
from django.core.urlresolvers import NoReverseMatch, reverse, Resolver404,\
get_script_prefix
from django.core.signals import got_request_exception
from django.core.exceptions import ImproperlyConfigured
try:
from django.contrib.gis.db.models.fields import GeometryField
except (ImproperlyConfigured, ImportError):
GeometryField = None
from django.db.models.constants import LOOKUP_SEP
from django.db.models.sql.constants import QUERY_TERMS
from django.http import HttpResponse, HttpResponseNotFound, Http404
from django.utils.cache import patch_cache_control, patch_vary_headers
from django.utils.html import escape
from django.utils import six
from tastypie.authentication import Authentication
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.cache import NoCache
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import NotFound, BadRequest, InvalidFilterError,\
HydrationError, InvalidSortError, ImmediateHttpResponse, Unauthorized
from tastypie import fields
from tastypie import http
from tastypie.paginator import Paginator
from tastypie.serializers import Serializer
from tastypie.throttle import BaseThrottle
from tastypie.utils import dict_strip_unicode_keys,\
is_valid_jsonp_callback_value, string_to_python, trailing_slash
from tastypie.utils.mime import determine_format, build_content_type
from tastypie.validation import Validation
from tastypie.compat import get_module_name, atomic_decorator
# If ``csrf_exempt`` isn't present, stub it.
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
def csrf_exempt(func):
return func
def sanitize(text):
# We put the single quotes back, due to their frequent usage in exception
# messages.
return escape(text).replace(''', "'").replace('"', '"')
class NOT_AVAILABLE:
def __str__(self):
return 'No such data is available.'
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
serializer = Serializer()
authentication = Authentication()
authorization = ReadOnlyAuthorization()
cache = NoCache()
throttle = BaseThrottle()
validation = Validation()
paginator_class = Paginator
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
list_allowed_methods = None
detail_allowed_methods = None
limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
max_limit = 1000
api_name = None
resource_name = None
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
collection_name = 'objects'
detail_uri_name = 'pk'
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
if six.PY3:
return object.__new__(type('ResourceOptions', (cls,), overrides))
else:
return object.__new__(type(b'ResourceOptions', (cls,), overrides))
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = {}
declared_fields = {}
# Inherit any fields from parent(s).
try:
parents = [b for b in bases if issubclass(b, Resource)]
# Simulate the MRO.
parents.reverse()
for p in parents:
parent_fields = getattr(p, 'base_fields', {})
for field_name, field_object in parent_fields.items():
attrs['base_fields'][field_name] = deepcopy(field_object)
except NameError:
pass
for field_name, obj in attrs.copy().items():
# Look for ``dehydrated_type`` instead of doing ``isinstance``,
# which can break down if Tastypie is re-namespaced as something
# else.
if hasattr(obj, 'dehydrated_type'):
field = attrs.pop(field_name)
declared_fields[field_name] = field
attrs['base_fields'].update(declared_fields)
attrs['declared_fields'] = declared_fields
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
opts = getattr(new_class, 'Meta', None)
new_class._meta = ResourceOptions(opts)
if not getattr(new_class._meta, 'resource_name', None):
# No ``resource_name`` provided. Attempt to auto-name the resource.
class_name = new_class.__name__
name_bits = [bit for bit in class_name.split('Resource') if bit]
resource_name = ''.join(name_bits).lower()
new_class._meta.resource_name = resource_name
if getattr(new_class._meta, 'include_resource_uri', True):
if 'resource_uri' not in new_class.base_fields:
new_class.base_fields['resource_uri'] = fields.CharField(readonly=True, verbose_name="resource uri")
elif 'resource_uri' in new_class.base_fields and 'resource_uri' not in attrs:
del(new_class.base_fields['resource_uri'])
for field_name, field_object in new_class.base_fields.items():
if hasattr(field_object, 'contribute_to_class'):
field_object.contribute_to_class(new_class, field_name)
return new_class
class Resource(six.with_metaclass(DeclarativeMetaclass)):
"""
Handles the data, request dispatch and responding to requests.
Serialization/deserialization is handled "at the edges" (i.e. at the
beginning/end of the request/response cycle) so that everything internally
is Python data structures.
This class tries to be non-model specific, so it can be hooked up to other
data sources, such as search results, files, other data, etc.
"""
def __init__(self, api_name=None):
# this can cause:
# TypeError: object.__new__(method-wrapper) is not safe, use method-wrapper.__new__()
# when trying to copy a generator used as a default. Wrap call to
# generator in lambda to get around this error.
self.fields = deepcopy(self.base_fields)
if api_name is not None:
self._meta.api_name = api_name
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError(name)
def wrap_view(self, view):
"""
Wraps methods so they can be called in a more functional way as well
as handling exceptions better.
Note that if ``BadRequest`` or an exception with a ``response`` attr
are seen, there is special handling to either present a message back
to the user or return the response traveling with the exception.
"""
@csrf_exempt
def wrapper(request, *args, **kwargs):
try:
callback = getattr(self, view)
response = callback(request, *args, **kwargs)
# Our response can vary based on a number of factors, use
# the cache class to determine what we should ``Vary`` on so
# caches won't return the wrong (cached) version.
varies = getattr(self._meta.cache, "varies", [])
if varies:
patch_vary_headers(response, varies)
if self._meta.cache.cacheable(request, response):
if self._meta.cache.cache_control():
# If the request is cacheable and we have a
# ``Cache-Control`` available then patch the header.
patch_cache_control(response, **self._meta.cache.cache_control())
if request.is_ajax() and not response.has_header("Cache-Control"):
# IE excessively caches XMLHttpRequests, so we're disabling
# the browser cache here.
# See http://www.enhanceie.com/ie/bugs.asp for details.
patch_cache_control(response, no_cache=True)
return response
except (BadRequest, fields.ApiFieldError) as e:
data = {"error": sanitize(e.args[0]) if getattr(e, 'args') else ''}
return self.error_response(request, data, response_class=http.HttpBadRequest)
except ValidationError as e:
data = {"error": sanitize(e.messages)}
return self.error_response(request, data, response_class=http.HttpBadRequest)
except Exception as e:
if hasattr(e, 'response'):
return e.response
# A real, non-expected exception.
# Handle the case where the full traceback is more helpful
# than the serialized error.
if settings.DEBUG and getattr(settings, 'TASTYPIE_FULL_DEBUG', False):
raise
# Re-raise the error to get a proper traceback when the error
# happend during a test case
if request.META.get('SERVER_NAME') == 'testserver':
raise
# Rather than re-raising, we're going to things similar to
# what Django does. The difference is returning a serialized
# error message.
return self._handle_500(request, e)
return wrapper
def _handle_500(self, request, exception):
import traceback
import sys
the_trace = '\n'.join(traceback.format_exception(*(sys.exc_info())))
response_class = http.HttpApplicationError
response_code = 500
NOT_FOUND_EXCEPTIONS = (NotFound, ObjectDoesNotExist, Http404)
if isinstance(exception, NOT_FOUND_EXCEPTIONS):
response_class = HttpResponseNotFound
response_code = 404
if settings.DEBUG:
data = {
"error_message": sanitize(six.text_type(exception)),
"traceback": the_trace,
}
return self.error_response(request, data, response_class=response_class)
# When DEBUG is False, send an error message to the admins (unless it's
# a 404, in which case we check the setting).
send_broken_links = getattr(settings, 'SEND_BROKEN_LINK_EMAILS', False)
if not response_code == 404 or send_broken_links:
log = logging.getLogger('django.request.tastypie')
log.error('Internal Server Error: %s' % request.path, exc_info=True,
extra={'status_code': response_code, 'request': request})
# Send the signal so other apps are aware of the exception.
got_request_exception.send(self.__class__, request=request)
# Prep the data going out.
data = {
"error_message": getattr(settings, 'TASTYPIE_CANNED_ERROR', "Sorry, this request could not be processed. Please try again later."),
}
return self.error_response(request, data, response_class=response_class)
def _build_reverse_url(self, name, args=None, kwargs=None):
"""
A convenience hook for overriding how URLs are built.
See ``NamespacedModelResource._build_reverse_url`` for an example.
"""
return reverse(name, args=args, kwargs=kwargs)
def base_urls(self):
"""
The standard URLs this ``Resource`` should respond to.
"""
return [
url(r"^(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash), self.wrap_view('dispatch_list'), name="api_dispatch_list"),
url(r"^(?P<resource_name>%s)/schema%s$" % (self._meta.resource_name, trailing_slash), self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/set/(?P<%s_list>.*?)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash), self.wrap_view('get_multiple'), name="api_get_multiple"),
url(r"^(?P<resource_name>%s)/(?P<%s>.*?)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash), self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
def override_urls(self):
"""
Deprecated. Will be removed by v1.0.0. Please use ``prepend_urls`` instead.
"""
return []
def prepend_urls(self):
"""
A hook for adding your own URLs or matching before the default URLs.
"""
return []
@property
def urls(self):
"""
The endpoints this ``Resource`` responds to.
Mostly a standard URLconf, this is suitable for either automatic use
when registered with an ``Api`` class or for including directly in
a URLconf should you choose to.
"""
urls = self.prepend_urls()
overridden_urls = self.override_urls()
if overridden_urls:
warnings.warn("'override_urls' is a deprecated method & will be removed by v1.0.0. Please rename your method to ``prepend_urls``.")
urls += overridden_urls
urls += self.base_urls()
return patterns('', *urls)
def determine_format(self, request):
"""
Used to determine the desired format.
Largely relies on ``tastypie.utils.mime.determine_format`` but here
as a point of extension.
"""
return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)
def serialize(self, request, data, format, options=None):
"""
Given a request, data and a desired format, produces a serialized
version suitable for transfer over the wire.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
options = options or {}
if 'text/javascript' in format:
# get JSONP callback name. default to "callback"
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
return self._meta.serializer.serialize(data, format, options)
def deserialize(self, request, data, format='application/json'):
"""
Given a request, data and a format, deserializes the given data.
It relies on the request properly sending a ``CONTENT_TYPE`` header,
falling back to ``application/json`` if not provided.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
deserialized = self._meta.serializer.deserialize(data, format=request.META.get('CONTENT_TYPE', format))
return deserialized
def alter_list_data_to_serialize(self, request, data):
"""
A hook to alter list data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for a list of objects, generally also including
meta data.
"""
return data
def alter_detail_data_to_serialize(self, request, data):
"""
A hook to alter detail data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for receiving a single bundle of data.
"""
return data
def alter_deserialized_list_data(self, request, data):
"""
A hook to alter list data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def alter_deserialized_detail_data(self, request, data):
"""
A hook to alter detail data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def dispatch_list(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) over
the entire list of resources.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('list', request, **kwargs)
def dispatch_detail(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) on
a single resource.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('detail', request, **kwargs)
def dispatch(self, request_type, request, **kwargs):
"""
Handles the common operations (allowed HTTP method, authentication,
throttling, method lookup) surrounding most CRUD interactions.
"""
allowed_methods = getattr(self._meta, "%s_allowed_methods" % request_type, None)
if 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META:
request.method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE']
request_method = self.method_check(request, allowed=allowed_methods)
method = getattr(self, "%s_%s" % (request_method, request_type), None)
if method is None:
raise ImmediateHttpResponse(response=http.HttpNotImplemented())
self.is_authenticated(request)
self.throttle_check(request)
# All clear. Process the request.
request = convert_post_to_put(request)
response = method(request, **kwargs)
# Add the throttled request.
self.log_throttled_access(request)
# If what comes back isn't a ``HttpResponse``, assume that the
# request was accepted and that some action occurred. This also
# prevents Django from freaking out.
if not isinstance(response, HttpResponse):
return http.HttpNoContent()
return response
def remove_api_resource_names(self, url_dict):
"""
Given a dictionary of regex matches from a URLconf, removes
``api_name`` and/or ``resource_name`` if found.
This is useful for converting URLconf matches into something suitable
for data lookup. For example::
Model.objects.filter(**self.remove_api_resource_names(matches))
"""
kwargs_subset = url_dict.copy()
for key in ['api_name', 'resource_name']:
try:
del(kwargs_subset[key])
except KeyError:
pass
return kwargs_subset
def method_check(self, request, allowed=None):
"""
Ensures that the HTTP method used on the request is allowed to be
handled by the resource.
Takes an ``allowed`` parameter, which should be a list of lowercase
HTTP methods to check against. Usually, this looks like::
# The most generic lookup.
self.method_check(request, self._meta.allowed_methods)
# A lookup against what's allowed for list-type methods.
self.method_check(request, self._meta.list_allowed_methods)
# A useful check when creating a new endpoint that only handles
# GET.
self.method_check(request, ['get'])
"""
if allowed is None:
allowed = []
request_method = request.method.lower()
allows = ','.join([meth.upper() for meth in allowed])
if request_method == "options":
response = HttpResponse(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
if request_method not in allowed:
response = http.HttpMethodNotAllowed(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
return request_method
def is_authenticated(self, request):
"""
Handles checking if the user is authenticated and dealing with
unauthenticated users.
Mostly a hook, this uses class assigned to ``authentication`` from
``Resource._meta``.
"""
# Authenticate the request as needed.
auth_result = self._meta.authentication.is_authenticated(request)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if auth_result is not True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def throttle_check(self, request):
"""
Handles checking if the user should be throttled.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
identifier = self._meta.authentication.get_identifier(request)
# Check to see if they should be throttled.
throttle = self._meta.throttle.should_be_throttled(identifier)
if throttle:
# Throttle limit exceeded.
response = http.HttpTooManyRequests()
if isinstance(throttle, int) and not isinstance(throttle, bool):
response['Retry-After'] = throttle
elif isinstance(throttle, datetime):
response['Retry-After'] = format_date_time(mktime(throttle.timetuple()))
raise ImmediateHttpResponse(response=response)
def log_throttled_access(self, request):
"""
Handles the recording of the user's access for throttling purposes.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
request_method = request.method.lower()
self._meta.throttle.accessed(self._meta.authentication.get_identifier(request), url=request.get_full_path(), request_method=request_method)
def unauthorized_result(self, exception):
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def authorized_read_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to GET this resource.
"""
try:
auth_result = self._meta.authorization.read_list(object_list, bundle)
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def authorized_read_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to GET this resource.
"""
try:
auth_result = self._meta.authorization.read_detail(object_list, bundle)
if auth_result is not True:
raise Unauthorized()
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def authorized_create_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to POST this resource.
"""
try:
auth_result = self._meta.authorization.create_list(object_list, bundle)
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def authorized_create_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to POST this resource.
"""
try:
auth_result = self._meta.authorization.create_detail(object_list, bundle)
if auth_result is not True:
raise Unauthorized()
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def authorized_update_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to PUT this resource.
"""
try:
auth_result = self._meta.authorization.update_list(object_list, bundle)
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def authorized_update_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to PUT this resource.
"""
try:
auth_result = self._meta.authorization.update_detail(object_list, bundle)
if auth_result is not True:
raise Unauthorized()
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def authorized_delete_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to DELETE this resource.
"""
try:
auth_result = self._meta.authorization.delete_list(object_list, bundle)
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def authorized_delete_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to DELETE this resource.
"""
try:
auth_result = self._meta.authorization.delete_detail(object_list, bundle)
if not auth_result:
raise Unauthorized()
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def build_bundle(self, obj=None, data=None, request=None, objects_saved=None, via_uri=None):
"""
Given either an object, a data dictionary or both, builds a ``Bundle``
for use throughout the ``dehydrate/hydrate`` cycle.
If no object is provided, an empty object from
``Resource._meta.object_class`` is created so that attempts to access
``bundle.obj`` do not fail.
"""
if obj is None and self._meta.object_class:
obj = self._meta.object_class()
return Bundle(
obj=obj,
data=data,
request=request,
objects_saved=objects_saved,
via_uri=via_uri
)
def build_filters(self, filters=None):
"""
Allows for the filtering of applicable objects.
This needs to be implemented at the user level.'
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return filters
def apply_sorting(self, obj_list, options=None):
"""
Allows for the sorting of objects being returned.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return obj_list
def get_bundle_detail_data(self, bundle):
"""
Convenience method to return the ``detail_uri_name`` attribute off
``bundle.obj``.
Usually just accesses ``bundle.obj.pk`` by default.
"""
return getattr(bundle.obj, self._meta.detail_uri_name)
# URL-related methods.
def detail_uri_kwargs(self, bundle_or_obj):
"""
This needs to be implemented at the user level.
Given a ``Bundle`` or an object, it returns the extra kwargs needed to
generate a detail URI.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def resource_uri_kwargs(self, bundle_or_obj=None):
"""
Builds a dictionary of kwargs to help generate URIs.
Automatically provides the ``Resource.Meta.resource_name`` (and
optionally the ``Resource.Meta.api_name`` if populated by an ``Api``
object).
If the ``bundle_or_obj`` argument is provided, it calls
``Resource.detail_uri_kwargs`` for additional bits to create
"""
kwargs = {
'resource_name': self._meta.resource_name,
}
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
if bundle_or_obj is not None:
kwargs.update(self.detail_uri_kwargs(bundle_or_obj))
return kwargs
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
"""
Handles generating a resource URI.
If the ``bundle_or_obj`` argument is not provided, it builds the URI
for the list endpoint.
If the ``bundle_or_obj`` argument is provided, it builds the URI for
the detail endpoint.
Return the generated URI. If that URI can not be reversed (not found
in the URLconf), it will return an empty string.
"""
if bundle_or_obj is not None:
url_name = 'api_dispatch_detail'
try:
return self._build_reverse_url(url_name, kwargs=self.resource_uri_kwargs(bundle_or_obj))
except NoReverseMatch:
return ''
def get_via_uri(self, uri, request=None):
"""
This pulls apart the salient bits of the URI and populates the
resource via a ``obj_get``.
Optionally accepts a ``request``.
If you need custom behavior based on other portions of the URI,
simply override this method.
"""
prefix = get_script_prefix()
chomped_uri = uri
if prefix and chomped_uri.startswith(prefix):
chomped_uri = chomped_uri[len(prefix) - 1:]
# We mangle the path a bit further & run URL resolution against *only*
# the current class. This ought to prevent bad URLs from resolving to
# incorrect data.
found_at = chomped_uri.rfind(self._meta.resource_name)
if found_at == -1:
raise NotFound("An incorrect URL was provided '%s' for the '%s' resource." % (uri, self.__class__.__name__))
chomped_uri = chomped_uri[found_at:]
try:
for url_resolver in getattr(self, 'urls', []):
result = url_resolver.resolve(chomped_uri)
if result is not None:
view, args, kwargs = result
break
else:
raise Resolver404("URI not found in 'self.urls'.")
except Resolver404:
raise NotFound("The URL provided '%s' was not a link to a valid resource." % uri)
bundle = self.build_bundle(request=request)
return self.obj_get(bundle=bundle, **self.remove_api_resource_names(kwargs))
# Data preparation.
def full_dehydrate(self, bundle, for_list=False):
"""
Given a bundle with an object instance, extract the information from it
to populate the resource.
"""
data = bundle.data
api_name = self._meta.api_name
resource_name = self._meta.resource_name
# Dehydrate each field.
for field_name, field_object in self.fields.items():
# If it's not for use in this mode, skip
field_use_in = field_object.use_in
if callable(field_use_in):
if not field_use_in(bundle):
continue
else:
if field_use_in not in ['all', 'list' if for_list else 'detail']:
continue
# A touch leaky but it makes URI resolution work.
if field_object.dehydrated_type == 'related':
field_object.api_name = api_name
field_object.resource_name = resource_name
data[field_name] = field_object.dehydrate(bundle, for_list=for_list)
# Check for an optional method to do further dehydration.
method = getattr(self, "dehydrate_%s" % field_name, None)
if method:
data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
return bundle
def dehydrate(self, bundle):
"""
A hook to allow a final manipulation of data once all fields/methods
have built out the dehydrated data.
Useful if you need to access more than one dehydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def full_hydrate(self, bundle):
"""
Given a populated bundle, distill it and turn it back into
a full-fledged object instance.
"""
if bundle.obj is None:
bundle.obj = self._meta.object_class()
bundle = self.hydrate(bundle)
for field_name, field_object in self.fields.items():
if field_object.readonly is True:
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
# NOTE: We only get back a bundle when it is related field.
if isinstance(value, Bundle) and value.errors.get(field_name):
bundle.errors[field_name] = value.errors[field_name]
if value is not None or field_object.null:
# We need to avoid populating M2M data here as that will
# cause things to blow up.
if not field_object.is_related:
setattr(bundle.obj, field_object.attribute, value)
elif not field_object.is_m2m:
if value is not None:
# NOTE: A bug fix in Django (ticket #18153) fixes incorrect behavior
# which Tastypie was relying on. To fix this, we store value.obj to
# be saved later in save_related.
try:
setattr(bundle.obj, field_object.attribute, value.obj)
except (ValueError, ObjectDoesNotExist):
bundle.related_objects_to_save[field_object.attribute] = value.obj
elif field_object.blank:
continue
elif field_object.null:
setattr(bundle.obj, field_object.attribute, value)
return bundle
def hydrate(self, bundle):
"""
A hook to allow an initial manipulation of data before all methods/fields
have built out the hydrated data.
Useful if you need to access more than one hydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def hydrate_m2m(self, bundle):
"""
Populate the ManyToMany data on the instance.
"""
if bundle.obj is None:
raise HydrationError("You must call 'full_hydrate' before attempting to run 'hydrate_m2m' on %r." % self)
for field_name, field_object in self.fields.items():
if not field_object.is_m2m:
continue
if field_object.attribute:
# Note that we only hydrate the data, leaving the instance
# unmodified. It's up to the user's code to handle this.
# The ``ModelResource`` provides a working baseline
# in this regard.
bundle.data[field_name] = field_object.hydrate_m2m(bundle)
for field_name, field_object in self.fields.items():
if not field_object.is_m2m:
continue
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
method(bundle)
return bundle
def build_schema(self):
"""
Returns a dictionary of all the fields on the resource and some
properties about those fields.
Used by the ``schema/`` endpoint to describe what will be available.
"""
data = {
'fields': {},
'default_format': self._meta.default_format,
'allowed_list_http_methods': self._meta.list_allowed_methods,
'allowed_detail_http_methods': self._meta.detail_allowed_methods,
'default_limit': self._meta.limit,
}
if self._meta.ordering:
data['ordering'] = self._meta.ordering
if self._meta.filtering:
data['filtering'] = self._meta.filtering
# Skip assigning pk_field_name for non-model resources
try:
pk_field_name = self._meta.queryset.model._meta.pk.name
except AttributeError:
pk_field_name = None
for field_name, field_object in self.fields.items():
data['fields'][field_name] = {
'default': field_object.default,
'type': field_object.dehydrated_type,
'nullable': field_object.null,
'blank': field_object.blank,
'readonly': field_object.readonly,
'help_text': field_object.help_text,
'unique': field_object.unique,
'primary_key': True if field_name == pk_field_name else False,
'verbose_name': field_object.verbose_name or field_name.replace("_", " "),
}
if field_object.dehydrated_type == 'related':
if field_object.is_m2m:
related_type = 'to_many'
else:
related_type = 'to_one'
data['fields'][field_name]['related_type'] = related_type
return data
def dehydrate_resource_uri(self, bundle):
"""
For the automatically included ``resource_uri`` field, dehydrate
the URI for the given bundle.
Returns empty string if no URI can be generated.
"""
try:
return self.get_resource_uri(bundle)
except NotImplementedError:
return ''
except NoReverseMatch:
return ''
def generate_cache_key(self, *args, **kwargs):
"""
Creates a unique-enough cache key.
This is based off the current api_name/resource_name/args/kwargs.
"""
smooshed = ["%s=%s" % (key, value) for key, value in kwargs.items()]
# Use a list plus a ``.join()`` because it's faster than concatenation.
return "%s:%s:%s:%s" % (self._meta.api_name, self._meta.resource_name, ':'.join(args), ':'.join(sorted(smooshed)))
# Data access methods.
def get_object_list(self, request):
"""
A hook to allow making returning the list of available objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def apply_authorization_limits(self, request, object_list):
"""
Deprecated.
FIXME: REMOVE BEFORE 1.0
"""
return self._meta.authorization.apply_limits(request, object_list)
def can_create(self):
"""
Checks to ensure ``post`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'post' in allowed
def can_update(self):
"""
Checks to ensure ``put`` is within ``allowed_methods``.
Used when hydrating related data.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'put' in allowed
def can_delete(self):
"""
Checks to ensure ``delete`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'delete' in allowed
def apply_filters(self, request, applicable_filters):
"""
A hook to alter how the filters are applied to the object list.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_get_list(self, bundle, **kwargs):
"""
Fetches the list of objects available on the resource.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get_list(self, bundle, **kwargs):
"""
A version of ``obj_get_list`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('list', **kwargs)
obj_list = self._meta.cache.get(cache_key)
if obj_list is None:
obj_list = self.obj_get_list(bundle=bundle, **kwargs)
self._meta.cache.set(cache_key, obj_list)
return obj_list
def obj_get(self, bundle, **kwargs):
"""
Fetches an individual object on the resource.
This needs to be implemented at the user level. If the object can not
be found, this should raise a ``NotFound`` exception.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get(self, bundle, **kwargs):
"""
A version of ``obj_get`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('detail', **kwargs)
cached_bundle = self._meta.cache.get(cache_key)
if cached_bundle is None:
cached_bundle = self.obj_get(bundle=bundle, **kwargs)
self._meta.cache.set(cache_key, cached_bundle)
return cached_bundle
def obj_create(self, bundle, **kwargs):
"""
Creates a new object based on the provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_update(self, bundle, **kwargs):
"""
Updates an existing object (or creates a new object) based on the
provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list(self, bundle, **kwargs):
"""
Deletes an entire list of objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list_for_update(self, bundle, **kwargs):
"""
Deletes an entire list of objects, specific to PUT list.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete(self, bundle, **kwargs):
"""
Deletes a single object.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format), **response_kwargs)
def error_response(self, request, errors, response_class=None):
"""
Extracts the common "which-format/serialize/return-error-response"
cycle.
Should be used as much as possible to return errors.
"""
if response_class is None:
response_class = http.HttpBadRequest
desired_format = None
if request:
if request.GET.get('callback', None) is None:
try:
desired_format = self.determine_format(request)
except BadRequest:
pass # Fall through to default handler below
else:
# JSONP can cause extra breakage.
desired_format = 'application/json'
if not desired_format:
desired_format = self._meta.default_format
try:
serialized = self.serialize(request, errors, desired_format)
except BadRequest as e:
error = "Additional errors occurred, but serialization of those errors failed."
if settings.DEBUG:
error += " %s" % e
return response_class(content=error, content_type='text/plain')
return response_class(content=serialized, content_type=build_content_type(desired_format))
def is_valid(self, bundle):
"""
Handles checking if the data provided by the user is valid.
Mostly a hook, this uses class assigned to ``validation`` from
``Resource._meta``.
If validation fails, an error is raised with the error messages
serialized inside it.
"""
errors = self._meta.validation.is_valid(bundle, bundle.request)
if errors:
bundle.errors[self._meta.resource_name] = errors
return False
return True
def rollback(self, bundles):
"""
Given the list of bundles, delete all objects pertaining to those
bundles.
This needs to be implemented at the user level. No exceptions should
be raised if possible.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
# Views.
def get_list(self, request, **kwargs):
"""
Returns a serialized list of resources.
Calls ``obj_get_list`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
# TODO: Uncached for now. Invalidation that works for everyone may be
# impossible.
base_bundle = self.build_bundle(request=request)
objects = self.obj_get_list(bundle=base_bundle, **self.remove_api_resource_names(kwargs))
sorted_objects = self.apply_sorting(objects, options=request.GET)
paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_uri(), limit=self._meta.limit, max_limit=self._meta.max_limit, collection_name=self._meta.collection_name)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = [
self.full_dehydrate(self.build_bundle(obj=obj, request=request), for_list=True)
for obj in to_be_serialized[self._meta.collection_name]
]
to_be_serialized[self._meta.collection_name] = bundles
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def get_detail(self, request, **kwargs):
"""
Returns a single serialized resource.
Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
basic_bundle = self.build_bundle(request=request)
try:
obj = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle)
def post_list(self, request, **kwargs):
"""
Creates a new resource/object with the provided data.
Calls ``obj_create`` with the provided data and returns a response
with the new resource's location.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
"""
deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
updated_bundle = self.obj_create(bundle, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_detail(self, request, **kwargs):
"""
Creates a new subcollection of the resource under a resource.
This is not implemented by default because most people's data models
aren't self-referential.
If a new resource is created, return ``HttpCreated`` (201 Created).
"""
return http.HttpNotImplemented()
def put_list(self, request, **kwargs):
"""
Replaces a collection of resources with another collection.
Calls ``delete_list`` to clear out the collection then ``obj_create``
with the provided the data to create the new collection.
Return ``HttpNoContent`` (204 No Content) if
``Meta.always_return_data = False`` (default).
Return ``HttpAccepted`` (200 OK) if
``Meta.always_return_data = True``.
"""
deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_list_data(request, deserialized)
if self._meta.collection_name not in deserialized:
raise BadRequest("Invalid data sent: missing '%s'" % self._meta.collection_name)
basic_bundle = self.build_bundle(request=request)
self.obj_delete_list_for_update(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
bundles_seen = []
for object_data in deserialized[self._meta.collection_name]:
bundle = self.build_bundle(data=dict_strip_unicode_keys(object_data), request=request)
# Attempt to be transactional, deleting any previously created
# objects if validation fails.
try:
self.obj_create(bundle=bundle, **self.remove_api_resource_names(kwargs))
bundles_seen.append(bundle)
except ImmediateHttpResponse:
self.rollback(bundles_seen)
raise
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
to_be_serialized = {
self._meta.collection_name: [
self.full_dehydrate(b, for_list=True)
for b in bundles_seen
]
}
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def put_detail(self, request, **kwargs):
"""
Either updates an existing resource or creates a new one with the
provided data.
Calls ``obj_update`` with the provided data first, but falls back to
``obj_create`` if the object does not already exist.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
If an existing resource is modified and
``Meta.always_return_data = False`` (default), return ``HttpNoContent``
(204 No Content).
If an existing resource is modified and
``Meta.always_return_data = True``, return ``HttpAccepted`` (200
OK).
"""
deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
try:
updated_bundle = self.obj_update(bundle=bundle, **self.remove_api_resource_names(kwargs))
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
# Invalidate prefetched_objects_cache for bundled object
# because we might have changed a prefetched field
updated_bundle.obj._prefetched_objects_cache = {}
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle)
except (NotFound, MultipleObjectsReturned):
updated_bundle = self.obj_create(bundle=bundle, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def delete_list(self, request, **kwargs):
"""
Destroys a collection of resources/objects.
Calls ``obj_delete_list``.
If the resources are deleted, return ``HttpNoContent`` (204 No Content).
"""
bundle = self.build_bundle(request=request)
self.obj_delete_list(bundle=bundle, request=request, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
def delete_detail(self, request, **kwargs):
"""
Destroys a single resource/object.
Calls ``obj_delete``.
If the resource is deleted, return ``HttpNoContent`` (204 No Content).
If the resource did not exist, return ``Http404`` (404 Not Found).
"""
# Manually construct the bundle here, since we don't want to try to
# delete an empty instance.
bundle = Bundle(request=request)
try:
self.obj_delete(bundle=bundle, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
except NotFound:
return http.HttpNotFound()
def patch_list(self, request, **kwargs):
"""
Updates a collection in-place.
The exact behavior of ``PATCH`` to a list resource is still the matter of
some debate in REST circles, and the ``PATCH`` RFC isn't standard. So the
behavior this method implements (described below) is something of a
stab in the dark. It's mostly cribbed from GData, with a smattering
of ActiveResource-isms and maybe even an original idea or two.
The ``PATCH`` format is one that's similar to the response returned from
a ``GET`` on a list resource::
{
"objects": [{object}, {object}, ...],
"deleted_objects": ["URI", "URI", "URI", ...],
}
For each object in ``objects``:
* If the dict does not have a ``resource_uri`` key then the item is
considered "new" and is handled like a ``POST`` to the resource list.
* If the dict has a ``resource_uri`` key and the ``resource_uri`` refers
to an existing resource then the item is a update; it's treated
like a ``PATCH`` to the corresponding resource detail.
* If the dict has a ``resource_uri`` but the resource *doesn't* exist,
then this is considered to be a create-via-``PUT``.
Each entry in ``deleted_objects`` referes to a resource URI of an existing
resource to be deleted; each is handled like a ``DELETE`` to the relevent
resource.
In any case:
* If there's a resource URI it *must* refer to a resource of this
type. It's an error to include a URI of a different resource.
* ``PATCH`` is all or nothing. If a single sub-operation fails, the
entire request will fail and all resources will be rolled back.
* For ``PATCH`` to work, you **must** have ``put`` in your
:ref:`detail-allowed-methods` setting.
* To delete objects via ``deleted_objects`` in a ``PATCH`` request you
**must** have ``delete`` in your :ref:`detail-allowed-methods`
setting.
Substitute appropriate names for ``objects`` and
``deleted_objects`` if ``Meta.collection_name`` is set to something
other than ``objects`` (default).
"""
request = convert_post_to_patch(request)
deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
collection_name = self._meta.collection_name
deleted_collection_name = 'deleted_%s' % collection_name
if collection_name not in deserialized:
raise BadRequest("Invalid data sent: missing '%s'" % collection_name)
if len(deserialized[collection_name]) and 'put' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
bundles_seen = []
for data in deserialized[collection_name]:
# If there's a resource_uri then this is either an
# update-in-place or a create-via-PUT.
if "resource_uri" in data:
uri = data.pop('resource_uri')
try:
obj = self.get_via_uri(uri, request=request)
# The object does exist, so this is an update-in-place.
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle, for_list=True)
bundle = self.alter_detail_data_to_serialize(request, bundle)
self.update_in_place(request, bundle, data)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# The object referenced by resource_uri doesn't exist,
# so this is a create-by-PUT equivalent.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle=bundle)
else:
# There's no resource URI, so this is a create call just
# like a POST to the list resource.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle=bundle)
bundles_seen.append(bundle)
deleted_collection = deserialized.get(deleted_collection_name, [])
if deleted_collection:
if 'delete' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for uri in deleted_collection:
obj = self.get_via_uri(uri, request=request)
bundle = self.build_bundle(obj=obj, request=request)
self.obj_delete(bundle=bundle)
if not self._meta.always_return_data:
return http.HttpAccepted()
else:
to_be_serialized = {
'objects': [
self.full_dehydrate(b, for_list=True)
for b in bundles_seen
]
}
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized, response_class=http.HttpAccepted)
def patch_detail(self, request, **kwargs):
"""
Updates a resource in-place.
Calls ``obj_update``.
If the resource is updated, return ``HttpAccepted`` (202 Accepted).
If the resource did not exist, return ``HttpNotFound`` (404 Not Found).
"""
request = convert_post_to_patch(request)
basic_bundle = self.build_bundle(request=request)
# We want to be able to validate the update, but we can't just pass
# the partial data into the validator since all data needs to be
# present. Instead, we basically simulate a PUT by pulling out the
# original data and updating it in-place.
# So first pull out the original object. This is essentially
# ``get_detail``.
try:
obj = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
# Now update the bundle in-place.
deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
self.update_in_place(request, bundle, deserialized)
if not self._meta.always_return_data:
return http.HttpAccepted()
else:
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle, response_class=http.HttpAccepted)
def update_in_place(self, request, original_bundle, new_data):
"""
Update the object in original_bundle in-place using new_data.
"""
original_bundle.data.update(**dict_strip_unicode_keys(new_data))
# Now we've got a bundle with the new data sitting in it and we're
# we're basically in the same spot as a PUT request. SO the rest of this
# function is cribbed from put_detail.
self.alter_deserialized_detail_data(request, original_bundle.data)
kwargs = {
self._meta.detail_uri_name: self.get_bundle_detail_data(original_bundle),
'request': request,
}
return self.obj_update(bundle=original_bundle, **kwargs)
def get_schema(self, request, **kwargs):
"""
Returns a serialized form of the schema of the resource.
Calls ``build_schema`` to generate the data. This method only responds
to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
self.log_throttled_access(request)
bundle = self.build_bundle(request=request)
self.authorized_read_detail(self.get_object_list(bundle.request), bundle)
return self.create_response(request, self.build_schema())
def get_multiple(self, request, **kwargs):
"""
Returns a serialized list of resources based on the identifiers
from the URL.
Calls ``obj_get`` to fetch only the objects requested. This method
only responds to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Rip apart the list then iterate.
kwarg_name = '%s_list' % self._meta.detail_uri_name
obj_identifiers = kwargs.get(kwarg_name, '').split(';')
objects = []
not_found = []
base_bundle = self.build_bundle(request=request)
for identifier in obj_identifiers:
try:
obj = self.obj_get(bundle=base_bundle, **{self._meta.detail_uri_name: identifier})
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle, for_list=True)
objects.append(bundle)
except (ObjectDoesNotExist, Unauthorized):
not_found.append(identifier)
object_list = {
self._meta.collection_name: objects,
}
if len(not_found):
object_list['not_found'] = not_found
self.log_throttled_access(request)
return self.create_response(request, object_list)
class ModelDeclarativeMetaclass(DeclarativeMetaclass):
def __new__(cls, name, bases, attrs):
meta = attrs.get('Meta')
if meta and hasattr(meta, 'queryset'):
setattr(meta, 'object_class', meta.queryset.model)
new_class = super(ModelDeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
include_fields = getattr(new_class._meta, 'fields', [])
excludes = getattr(new_class._meta, 'excludes', [])
field_names = list(new_class.base_fields.keys())
for field_name in field_names:
if field_name == 'resource_uri':
continue
if field_name in new_class.declared_fields:
continue
if len(include_fields) and field_name not in include_fields:
del(new_class.base_fields[field_name])
if len(excludes) and field_name in excludes:
del(new_class.base_fields[field_name])
# Add in the new fields.
new_class.base_fields.update(new_class.get_fields(include_fields, excludes))
if getattr(new_class._meta, 'include_absolute_url', True):
if 'absolute_url' not in new_class.base_fields:
new_class.base_fields['absolute_url'] = fields.CharField(attribute='get_absolute_url', readonly=True)
elif 'absolute_url' in new_class.base_fields and 'absolute_url' not in attrs:
del(new_class.base_fields['absolute_url'])
return new_class
class BaseModelResource(Resource):
"""
A subclass of ``Resource`` designed to work with Django's ``Models``.
This class will introspect a given ``Model`` and build a field list based
on the fields found on the model (excluding relational fields).
Given that it is aware of Django's ORM, it also handles the CRUD data
operations of the resource.
"""
@classmethod
def should_skip_field(cls, field):
"""
Given a Django model field, return if it should be included in the
contributed ApiFields.
"""
# Ignore certain fields (related fields).
if getattr(field, 'rel'):
return True
return False
@classmethod
def api_field_from_django_field(cls, f, default=fields.CharField):
"""
Returns the field type that would likely be associated with each
Django type.
"""
result = default
internal_type = f.get_internal_type()
if internal_type == 'DateField':
result = fields.DateField
elif internal_type == 'DateTimeField':
result = fields.DateTimeField
elif internal_type in ('BooleanField', 'NullBooleanField'):
result = fields.BooleanField
elif internal_type in ('FloatField',):
result = fields.FloatField
elif internal_type in ('DecimalField',):
result = fields.DecimalField
elif internal_type in ('IntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField', 'AutoField'):
result = fields.IntegerField
elif internal_type in ('FileField', 'ImageField'):
result = fields.FileField
elif internal_type == 'TimeField':
result = fields.TimeField
# TODO: Perhaps enable these via introspection. The reason they're not enabled
# by default is the very different ``__init__`` they have over
# the other fields.
# elif internal_type == 'ForeignKey':
# result = ForeignKey
# elif internal_type == 'ManyToManyField':
# result = ManyToManyField
return result
@classmethod
def get_fields(cls, fields=None, excludes=None):
"""
Given any explicit fields to include and fields to exclude, add
additional fields based on the associated model.
"""
final_fields = {}
fields = fields or []
excludes = excludes or []
if not cls._meta.object_class:
return final_fields
for f in cls._meta.object_class._meta.fields:
# If the field name is already present, skip
if f.name in cls.base_fields:
continue
# If field is not present in explicit field listing, skip
if fields and f.name not in fields:
continue
# If field is in exclude list, skip
if excludes and f.name in excludes:
continue
if cls.should_skip_field(f):
continue
api_field_class = cls.api_field_from_django_field(f)
kwargs = {
'attribute': f.name,
'help_text': f.help_text,
'verbose_name': f.verbose_name,
}
if f.null is True:
kwargs['null'] = True
kwargs['unique'] = f.unique
if not f.null and f.blank is True:
kwargs['default'] = ''
kwargs['blank'] = True
if f.get_internal_type() == 'TextField':
kwargs['default'] = ''
if f.has_default():
kwargs['default'] = f.default
if getattr(f, 'auto_now', False):
kwargs['default'] = f.auto_now
if getattr(f, 'auto_now_add', False):
kwargs['default'] = f.auto_now_add
final_fields[f.name] = api_field_class(**kwargs)
final_fields[f.name].instance_name = f.name
return final_fields
def check_filtering(self, field_name, filter_type='exact', filter_bits=None):
"""
Given a field name, a optional filter type and an optional list of
additional relations, determine if a field can be filtered on.
If a filter does not meet the needed conditions, it should raise an
``InvalidFilterError``.
If the filter meets the conditions, a list of attribute names (not
field names) will be returned.
"""
if filter_bits is None:
filter_bits = []
if field_name not in self._meta.filtering:
raise InvalidFilterError("The '%s' field does not allow filtering." % field_name)
# Check to see if it's an allowed lookup type.
if self._meta.filtering[field_name] not in (ALL, ALL_WITH_RELATIONS):
# Must be an explicit whitelist.
if filter_type not in self._meta.filtering[field_name]:
raise InvalidFilterError("'%s' is not an allowed filter on the '%s' field." % (filter_type, field_name))
if self.fields[field_name].attribute is None:
raise InvalidFilterError("The '%s' field has no 'attribute' for searching with." % field_name)
# Check to see if it's a relational lookup and if that's allowed.
if len(filter_bits):
if not getattr(self.fields[field_name], 'is_related', False):
raise InvalidFilterError("The '%s' field does not support relations." % field_name)
if not self._meta.filtering[field_name] == ALL_WITH_RELATIONS:
raise InvalidFilterError("Lookups are not allowed more than one level deep on the '%s' field." % field_name)
# Recursively descend through the remaining lookups in the filter,
# if any. We should ensure that all along the way, we're allowed
# to filter on that field by the related resource.
related_resource = self.fields[field_name].get_related_resource(None)
return [self.fields[field_name].attribute] + related_resource.check_filtering(filter_bits[0], filter_type, filter_bits[1:])
return [self.fields[field_name].attribute]
def filter_value_to_python(self, value, field_name, filters, filter_expr,
filter_type):
"""
Turn the string ``value`` into a python object.
"""
# Simple values
value = string_to_python(value)
# Split on ',' if not empty string and either an in or range filter.
if filter_type in ('in', 'range') and len(value):
if hasattr(filters, 'getlist'):
value = []
for part in filters.getlist(filter_expr):
value.extend(part.split(','))
else:
value = value.split(',')
return value
def build_filters(self, filters=None):
"""
Given a dictionary of filters, create the necessary ORM-level filters.
Keys should be resource fields, **NOT** model fields.
Valid values are either a list of Django filter types (i.e.
``['startswith', 'exact', 'lte']``), the ``ALL`` constant or the
``ALL_WITH_RELATIONS`` constant.
"""
# At the declarative level:
# filtering = {
# 'resource_field_name': ['exact', 'startswith', 'endswith', 'contains'],
# 'resource_field_name_2': ['exact', 'gt', 'gte', 'lt', 'lte', 'range'],
# 'resource_field_name_3': ALL,
# 'resource_field_name_4': ALL_WITH_RELATIONS,
# ...
# }
# Accepts the filters as a dict. None by default, meaning no filters.
if filters is None:
filters = {}
qs_filters = {}
if getattr(self._meta, 'queryset', None) is not None:
# Get the possible query terms from the current QuerySet.
query_terms = self._meta.queryset.query.query_terms
else:
query_terms = QUERY_TERMS
if django.VERSION >= (1, 8) and GeometryField:
query_terms = query_terms | set(GeometryField.class_lookups.keys())
for filter_expr, value in filters.items():
filter_bits = filter_expr.split(LOOKUP_SEP)
field_name = filter_bits.pop(0)
filter_type = 'exact'
if field_name not in self.fields:
# It's not a field we know about. Move along citizen.
continue
if len(filter_bits) and filter_bits[-1] in query_terms:
filter_type = filter_bits.pop()
lookup_bits = self.check_filtering(field_name, filter_type, filter_bits)
value = self.filter_value_to_python(value, field_name, filters, filter_expr, filter_type)
db_field_name = LOOKUP_SEP.join(lookup_bits)
qs_filter = "%s%s%s" % (db_field_name, LOOKUP_SEP, filter_type)
qs_filters[qs_filter] = value
return dict_strip_unicode_keys(qs_filters)
def apply_sorting(self, obj_list, options=None):
"""
Given a dictionary of options, apply some ORM-level sorting to the
provided ``QuerySet``.
Looks for the ``order_by`` key and handles either ascending (just the
field name) or descending (the field name with a ``-`` in front).
The field name should be the resource field, **NOT** model field.
"""
if options is None:
options = {}
parameter_name = 'order_by'
if 'order_by' not in options:
if 'sort_by' not in options:
# Nothing to alter the order. Return what we've got.
return obj_list
else:
warnings.warn("'sort_by' is a deprecated parameter. Please use 'order_by' instead.")
parameter_name = 'sort_by'
order_by_args = []
if hasattr(options, 'getlist'):
order_bits = options.getlist(parameter_name)
else:
order_bits = options.get(parameter_name)
if not isinstance(order_bits, (list, tuple)):
order_bits = [order_bits]
for order_by in order_bits:
order_by_bits = order_by.split(LOOKUP_SEP)
field_name = order_by_bits[0]
order = ''
if order_by_bits[0].startswith('-'):
field_name = order_by_bits[0][1:]
order = '-'
if field_name not in self.fields:
# It's not a field we know about. Move along citizen.
raise InvalidSortError("No matching '%s' field for ordering on." % field_name)
if field_name not in self._meta.ordering:
raise InvalidSortError("The '%s' field does not allow ordering." % field_name)
if self.fields[field_name].attribute is None:
raise InvalidSortError("The '%s' field has no 'attribute' for ordering with." % field_name)
order_by_args.append("%s%s" % (order, LOOKUP_SEP.join([self.fields[field_name].attribute] + order_by_bits[1:])))
return obj_list.order_by(*order_by_args)
def apply_filters(self, request, applicable_filters):
"""
An ORM-specific implementation of ``apply_filters``.
The default simply applies the ``applicable_filters`` as ``**kwargs``,
but should make it possible to do more advanced things.
"""
return self.get_object_list(request).filter(**applicable_filters)
def get_object_list(self, request):
"""
An ORM-specific implementation of ``get_object_list``.
Returns a queryset that may have been limited by other overrides.
"""
return self._meta.queryset._clone()
def obj_get_list(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_get_list``.
Takes an optional ``request`` object, whose ``GET`` dictionary can be
used to narrow the query.
"""
filters = {}
if hasattr(bundle.request, 'GET'):
# Grab a mutable copy.
filters = bundle.request.GET.copy()
# Update with the provided kwargs.
filters.update(kwargs)
applicable_filters = self.build_filters(filters=filters)
try:
objects = self.apply_filters(bundle.request, applicable_filters)
return self.authorized_read_list(objects, bundle)
except ValueError:
raise BadRequest("Invalid resource lookup data provided (mismatched type).")
def obj_get(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_get``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
# prevents FieldError when looking up nested resources containing extra data
field_names = self._meta.object_class._meta.get_all_field_names()
field_names.append('pk')
kwargs = dict([(k, v,) for k, v in kwargs.items() if k in field_names])
try:
object_list = self.get_object_list(bundle.request).filter(**kwargs)
stringified_kwargs = ', '.join(["%s=%s" % (k, v) for k, v in kwargs.items()])
if len(object_list) <= 0:
raise self._meta.object_class.DoesNotExist("Couldn't find an instance of '%s' which matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
elif len(object_list) > 1:
raise MultipleObjectsReturned("More than '%s' matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
bundle.obj = object_list[0]
self.authorized_read_detail(object_list, bundle)
return bundle.obj
except ValueError:
raise NotFound("Invalid resource lookup data provided (mismatched type).")
def obj_create(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_create``.
"""
bundle.obj = self._meta.object_class()
for key, value in kwargs.items():
setattr(bundle.obj, key, value)
bundle = self.full_hydrate(bundle)
return self.save(bundle)
def lookup_kwargs_with_identifiers(self, bundle, kwargs):
"""
Kwargs here represent uri identifiers Ex: /repos/<user_id>/<repo_name>/
We need to turn those identifiers into Python objects for generating
lookup parameters that can find them in the DB
"""
lookup_kwargs = {}
bundle.obj = self.get_object_list(bundle.request).model()
# Override data values, we rely on uri identifiers
bundle.data.update(kwargs)
# We're going to manually hydrate, as opposed to calling
# ``full_hydrate``, to ensure we don't try to flesh out related
# resources & keep things speedy.
bundle = self.hydrate(bundle)
for identifier in kwargs:
if identifier == self._meta.detail_uri_name:
lookup_kwargs[identifier] = kwargs[identifier]
continue
field_object = self.fields[identifier]
# Skip readonly or related fields.
if field_object.readonly is True or field_object.is_related:
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % identifier, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
lookup_kwargs[identifier] = value
return lookup_kwargs
def obj_update(self, bundle, skip_errors=False, **kwargs):
"""
A ORM-specific implementation of ``obj_update``.
"""
bundle_detail_data = self.get_bundle_detail_data(bundle) if bundle.obj else None
arg_detail_data = kwargs.get(self._meta.detail_uri_name, None)
if not bundle_detail_data or (arg_detail_data and bundle_detail_data != arg_detail_data):
try:
lookup_kwargs = self.lookup_kwargs_with_identifiers(bundle, kwargs)
except:
# if there is trouble hydrating the data, fall back to just
# using kwargs by itself (usually it only contains a "pk" key
# and this will work fine.
lookup_kwargs = kwargs
try:
bundle.obj = self.obj_get(bundle=bundle, **lookup_kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
bundle = self.full_hydrate(bundle)
return self.save(bundle, skip_errors=skip_errors)
def obj_delete_list(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list``.
"""
objects_to_delete = self.obj_get_list(bundle=bundle, **kwargs)
deletable_objects = self.authorized_delete_list(objects_to_delete, bundle)
if hasattr(deletable_objects, 'delete'):
# It's likely a ``QuerySet``. Call ``.delete()`` for efficiency.
deletable_objects.delete()
else:
for authed_obj in deletable_objects:
authed_obj.delete()
def obj_delete_list_for_update(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list_for_update``.
"""
objects_to_delete = self.obj_get_list(bundle=bundle, **kwargs)
deletable_objects = self.authorized_update_list(objects_to_delete, bundle)
if hasattr(deletable_objects, 'delete'):
# It's likely a ``QuerySet``. Call ``.delete()`` for efficiency.
deletable_objects.delete()
else:
for authed_obj in deletable_objects:
authed_obj.delete()
def obj_delete(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
if not hasattr(bundle.obj, 'delete'):
try:
bundle.obj = self.obj_get(bundle=bundle, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
self.authorized_delete_detail(self.get_object_list(bundle.request), bundle)
bundle.obj.delete()
@atomic_decorator()
def patch_list(self, request, **kwargs):
"""
An ORM-specific implementation of ``patch_list``.
Necessary because PATCH should be atomic (all-success or all-fail)
and the only way to do this neatly is at the database level.
"""
return super(BaseModelResource, self).patch_list(request, **kwargs)
def rollback(self, bundles):
"""
A ORM-specific implementation of ``rollback``.
Given the list of bundles, delete all models pertaining to those
bundles.
"""
for bundle in bundles:
if bundle.obj and self.get_bundle_detail_data(bundle):
bundle.obj.delete()
def create_identifier(self, obj):
return u"%s.%s.%s" % (obj._meta.app_label, get_module_name(obj._meta), obj.pk)
def save(self, bundle, skip_errors=False):
if bundle.via_uri:
return bundle
self.is_valid(bundle)
if bundle.errors and not skip_errors:
raise ImmediateHttpResponse(response=self.error_response(bundle.request, bundle.errors))
# Check if they're authorized.
if bundle.obj.pk:
self.authorized_update_detail(self.get_object_list(bundle.request), bundle)
else:
self.authorized_create_detail(self.get_object_list(bundle.request), bundle)
# Save FKs just in case.
self.save_related(bundle)
# Save the main object.
obj_id = self.create_identifier(bundle.obj)
if obj_id not in bundle.objects_saved or bundle.obj._state.adding:
bundle.obj.save()
bundle.objects_saved.add(obj_id)
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def save_related(self, bundle):
"""
Handles the saving of related non-M2M data.
Calling assigning ``child.parent = parent`` & then calling
``Child.save`` isn't good enough to make sure the ``parent``
is saved.
To get around this, we go through all our related fields &
call ``save`` on them if they have related, non-M2M data.
M2M data is handled by the ``ModelResource.save_m2m`` method.
"""
for field_name, field_object in self.fields.items():
if not field_object.is_related:
continue
if field_object.is_m2m:
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
if field_object.blank and field_name not in bundle.data:
continue
# Get the object.
try:
related_obj = getattr(bundle.obj, field_object.attribute)
except ObjectDoesNotExist:
# Django 1.8: unset related objects default to None, no error
related_obj = None
# We didn't get it, so maybe we created it but haven't saved it
if related_obj is None:
related_obj = bundle.related_objects_to_save.get(field_object.attribute, None)
if field_object.related_name:
# this might be a reverse relation, so we need to save this
# model, attach it to the related object, and save the related
# object.
if not self.get_bundle_detail_data(bundle):
bundle.obj.save()
setattr(related_obj, field_object.related_name, bundle.obj)
related_resource = field_object.get_related_resource(related_obj)
# Before we build the bundle & try saving it, let's make sure we
# haven't already saved it.
if related_obj:
obj_id = self.create_identifier(related_obj)
if obj_id in bundle.objects_saved:
# It's already been saved. We're done here.
continue
if bundle.data.get(field_name):
if hasattr(bundle.data[field_name], 'keys'):
# Only build & save if there's data, not just a URI.
related_bundle = related_resource.build_bundle(
obj=related_obj,
data=bundle.data.get(field_name),
request=bundle.request,
objects_saved=bundle.objects_saved
)
related_resource.full_hydrate(related_bundle)
related_resource.save(related_bundle)
related_obj = related_bundle.obj
elif field_object.related_name:
# This condition probably means a URI for a reverse
# relation was provided.
related_bundle = related_resource.build_bundle(
obj=related_obj,
request=bundle.request,
objects_saved=bundle.objects_saved
)
related_resource.save(related_bundle)
related_obj = related_bundle.obj
if related_obj:
setattr(bundle.obj, field_object.attribute, related_obj)
def save_m2m(self, bundle):
"""
Handles the saving of related M2M data.
Due to the way Django works, the M2M data must be handled after the
main instance, which is why this isn't a part of the main ``save`` bits.
Currently slightly inefficient in that it will clear out the whole
relation and recreate the related data as needed.
"""
for field_name, field_object in self.fields.items():
if not field_object.is_m2m:
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
# Get the manager.
related_mngr = None
if isinstance(field_object.attribute, six.string_types):
related_mngr = getattr(bundle.obj, field_object.attribute)
elif callable(field_object.attribute):
related_mngr = field_object.attribute(bundle)
if not related_mngr:
continue
if hasattr(related_mngr, 'clear'):
# FIXME: Dupe the original bundle, copy in the new object &
# check the perms on that (using the related resource)?
# Clear it out, just to be safe.
related_mngr.clear()
related_objs = []
for related_bundle in bundle.data[field_name]:
related_resource = field_object.get_related_resource(bundle.obj)
# Only build & save if there's data, not just a URI.
updated_related_bundle = related_resource.build_bundle(
obj=related_bundle.obj,
data=related_bundle.data,
request=bundle.request,
objects_saved=bundle.objects_saved,
via_uri=related_bundle.via_uri,
)
related_resource.save(updated_related_bundle)
related_objs.append(updated_related_bundle.obj)
related_mngr.add(*related_objs)
def detail_uri_kwargs(self, bundle_or_obj):
"""
Given a ``Bundle`` or an object (typically a ``Model`` instance),
it returns the extra kwargs needed to generate a detail URI.
By default, it uses the model's ``pk`` in order to create the URI.
"""
kwargs = {}
if isinstance(bundle_or_obj, Bundle):
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj.obj, self._meta.detail_uri_name)
else:
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj, self._meta.detail_uri_name)
return kwargs
class ModelResource(six.with_metaclass(ModelDeclarativeMetaclass, BaseModelResource)):
pass
class NamespacedModelResource(ModelResource):
"""
A ModelResource subclass that respects Django namespaces.
"""
def _build_reverse_url(self, name, args=None, kwargs=None):
namespaced = "%s:%s" % (self._meta.urlconf_namespace, name)
return reverse(namespaced, args=args, kwargs=kwargs)
# Based off of ``piston.utils.coerce_put_post``. Similarly BSD-licensed.
# And no, the irony is not lost on me.
def convert_post_to_VERB(request, verb):
"""
Force Django to process the VERB.
"""
if request.method == verb:
if hasattr(request, '_post'):
del request._post
del request._files
try:
request.method = "POST"
request._load_post_and_files()
request.method = verb
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = verb
setattr(request, verb, request.POST)
return request
def convert_post_to_put(request):
return convert_post_to_VERB(request, verb='PUT')
def convert_post_to_patch(request):
return convert_post_to_VERB(request, verb='PATCH')
| bsd-3-clause | 2,766,582,241,735,299,000 | 37.162033 | 213 | 0.60134 | false | 4.377568 | false | false | false |
Danielhiversen/home-assistant | homeassistant/components/switch/lutron_caseta.py | 4 | 1592 | """
Support for Lutron Caseta switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sitch.lutron_caseta/
"""
import logging
from homeassistant.components.lutron_caseta import (
LUTRON_CASETA_SMARTBRIDGE, LutronCasetaDevice)
from homeassistant.components.switch import SwitchDevice, DOMAIN
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['lutron_caseta']
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up Lutron switch."""
devs = []
bridge = hass.data[LUTRON_CASETA_SMARTBRIDGE]
switch_devices = bridge.get_devices_by_domain(DOMAIN)
for switch_device in switch_devices:
dev = LutronCasetaLight(switch_device, bridge)
devs.append(dev)
async_add_entities(devs, True)
return True
class LutronCasetaLight(LutronCasetaDevice, SwitchDevice):
"""Representation of a Lutron Caseta switch."""
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
self._smartbridge.turn_on(self._device_id)
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
self._smartbridge.turn_off(self._device_id)
@property
def is_on(self):
"""Return true if device is on."""
return self._state["current_state"] > 0
async def async_update(self):
"""Update when forcing a refresh of the device."""
self._state = self._smartbridge.get_device_by_id(self._device_id)
_LOGGER.debug(self._state)
| mit | 7,844,480,755,282,230,000 | 29.615385 | 74 | 0.672739 | false | 3.593679 | false | false | false |
elin-moco/bedrock | bedrock/mozorg/urls.py | 1 | 3227 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.conf.urls import patterns, url
from util import page
import views
urlpatterns = patterns('',
url('^$', views.HomeTestView.as_view(), name='mozorg.home'),
page('about/manifesto', 'mozorg/about/manifesto.html'),
page('about', 'mozorg/about.html'),
page('book', 'mozorg/book.html'),
url('^about/partnerships/$', views.partnerships, name='mozorg.partnerships'),
page('about/partnerships/distribution', 'mozorg/partnerships-distribution.html'),
page('about/history', 'mozorg/about/history.html'),
page('products', 'mozorg/products.html'),
page('about/mozilla-based', 'mozorg/projects/mozilla-based.html'),
page('button', 'mozorg/button.html'),
page('mission', 'mozorg/mission.html'),
page('mobile', 'mozorg/mobile.html'),
page('ITU', 'mozorg/itu.html'),
page('about/powered-by', 'mozorg/powered-by.html'),
page('about/governance', 'mozorg/about/governance/governance.html'),
page('about/governance/roles', 'mozorg/about/governance/roles.html'),
page('about/governance/policies', 'mozorg/about/governance/policies.html'),
page('about/governance/policies/security-group', 'mozorg/about/governance/policies/security/group.html'),
page('about/governance/policies/security-group/bugs', 'mozorg/about/governance/policies/security/bugs.html'),
page('about/governance/policies/security-group/tld-idn', 'mozorg/about/governance/policies/security/tld-idn.html'),
page('about/governance/policies/security-group/membership', 'mozorg/about/governance/policies/security/membership.html'),
page('about/governance/organizations', 'mozorg/about/governance/organizations.html'),
page('about/governance/policies/participation', 'mozorg/about/governance/policies/participation.html'),
page('about/governance/policies', 'mozorg/about/governance/policies/policies.html'),
url('^contribute/$', views.contribute, name='mozorg.contribute',
kwargs={'template': 'mozorg/contribute.html',
'return_to_form': False}),
url('^contribute/event/$', views.contribute,
kwargs={'template': 'mozorg/contribute.html',
'return_to_form': True},
name='mozorg.contribute_event'),
url('^contribute/page/$', views.contribute,
kwargs={'template': 'mozorg/contribute-page.html',
'return_to_form': False},
name='mozorg.contribute_page'),
url('^contribute/embed/$', views.contribute_embed,
name='mozorg.contribute_embed',
kwargs={'template': 'mozorg/contribute-embed.html',
'return_to_form': False}),
url('^contribute/universityambassadors/$',
views.contribute_university_ambassadors,
name='mozorg.contribute_university_ambassadors'),
page('contribute/universityambassadors/thanks',
'mozorg/contribute_university_ambassadors_thanks.html'),
url(r'^plugincheck/$',
views.plugincheck,
name='mozorg.plugincheck'),
url(r'^robots.txt$', views.Robots.as_view(), name='robots.txt'),
)
| mpl-2.0 | -4,273,601,811,822,576,000 | 52.783333 | 125 | 0.691044 | false | 3.302968 | false | true | false |
Gustavo6046/GusBot-2 | plugins/markov.py | 1 | 22353 | import json
import BeautifulSoup
import requests
import re
import time
import threading
import networkx as nx
import multiprocessing
import matplotlib.pyplot as plt
import glob
import os
import difflib
from plugincon import bot_command, easy_bot_command, get_message_target, get_bot_nickname
from random import choice, sample
crawling = 0
crawled = 0
markov_dict = {}
markov_filter = []
can_crawl = True
def hastebin(data):
try:
h = requests.post("http://hastebin.com/documents", data=data, timeout=10)
except requests.exceptions.ConnectTimeout:
return "\x01"
if h.status_code != 200:
return "\x02" + str(h.status_code)
return "http://hastebin.com/" + h.json()['key']
def botbin(data, description="Result"):
r = hastebin(data)
if r == "\x01":
return "Error: Connection to hastebin.com timed out!"
elif r.startswith("\x02"):
return "Error: Unsuccesful status code reached! ({})".format(r[1:])
else:
return "{} URL: {}".format(description, r)
@easy_bot_command("hastemarkovjson")
def hastemarkov(message, raw):
if raw:
return
r = hastebin(json.dumps({x: list(y) for x, y in markov_dict.items()}, indent=4))
if r == "\x01":
return "Error: Connection to hastebin.com timed out!"
elif r.startswith("\x02"):
return "Error: Unsuccesful status code reached! ({})".format(r[1:])
else:
return "URL: {}".format(r)
@easy_bot_command("listmarkovfiles")
def list_markov_files(message, raw):
if raw:
return
return botbin("\n".join([os.path.splitext(os.path.split(x)[-1])[0] for x in glob.glob("markov2/*.mkov2")]))
@easy_bot_command("qlistmarkovfiles")
def quick_list(message, raw):
if raw:
return
return "Markov files that can be loaded using loadmarkov: {}".format(", ".join([os.path.splitext(os.path.split(x)[-1])[0] for x in glob.glob("markov2/*.mkov2")]))
@easy_bot_command("searchmarkovfiles")
def search_files(message, raw):
if raw:
return
if len(message["arguments"]) < 2:
return "Syntax: searchmarkofiles <keyword>"
return "Similiar Markov files: {} | Markov files with {} in filename: {}".format(", ".join([x for x in [os.path.splitext(os.path.split(x)[-1])[0] for x in glob.glob("markov2/*.mkov2")] if difflib.SequenceMatcher(None, x, " ".join(message["arguments"][1:])).ratio() > 0.8]), message["arguments"][1], ", ".join(x for x in [os.path.splitext(os.path.split(x)[-1])[0] for x in glob.glob("markov2/*.mkov2")] if message["arguments"][1] in x))
@easy_bot_command("markovderivates")
def derivates(message, raw):
if raw:
return
if len(message["arguments"]) < 2:
return "Syntax: markovderivates <Markov keyword>"
if message["arguments"][1] not in markov_dict:
return "Error: No such word in Markov data!"
return "Derivates for {}: {}".format(message["arguments"][1], ", ".join(markov_dict[message["arguments"][1]]))
def regex(value, reg):
if reg == "":
return True
return bool(re.search(reg, value))
def ends_with_any(string, list_of_endings):
for ending in list_of_endings:
if string.endswith(ending):
return True
return False
def mkplot(markov_dict):
G = nx.DiGraph()
labels = {}
for i, (k, v) in enumerate(markov_dict.iteritems()):
G.add_node(k)
for w in v:
G.add_node(w)
for i, (k, v) in enumerate(markov_dict.iteritems()):
for w in v:
G.add_edge(k, w)
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G, pos)
nx.draw_networkx_edges(G, pos, arrows=True)
nx.draw_networkx_labels(G, pos, {w: w for k, v in markov_dict.items() for w in [x for x in [k] + list(v)]})
plt.show()
def visible(element):
if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
return False
elif re.match('<!--.*-->', str(element)):
return False
return True
def isalnumspace(string):
for char in string:
if not (char.isalnum() or " " == char):
return False
return True
def simple_string_filter(old_string, bad_chars=None, extra_filter=None):
result = ""
if bad_chars:
for char in old_string:
if not char in bad_chars:
result += char
if extra_filter and hasattr(extra_filter, "__call__"):
old_result = result
result = ""
for char in old_result:
if extra_filter(char):
result += char
return result
def parse_markov_string(string):
global markov_dict
words = simple_string_filter(string, "\'\"-/\\,.!?", isalnumspace).split(" ")
for x in xrange(len(words)):
try:
if words[x - 1] == words[x] or words[x] == words[x + 1]:
continue
except IndexError:
pass
try:
markov_dict[words[x - 1].lower()].add(words[x].lower())
except KeyError:
try:
markov_dict[words[x - 1].lower()] = {words[x].lower()}
except IndexError:
pass
except IndexError:
pass
try:
markov_dict[words[x].lower()].add(words[x + 1].lower())
except KeyError:
try:
markov_dict[words[x].lower()] = {words[x + 1].lower()}
except IndexError:
pass
except IndexError:
continue
def string_filter(old_string, filter_, separator=None):
result_string = []
if hasattr(filter_, "__call__"):
for x in old_string:
if filter_(x):
result_string.append(x)
else:
if separator is None:
for x in old_string:
if x in str(filter_):
result_string.append(x)
else:
for x in old_string:
if x in str(filter_).split(separator):
result_string.append(x)
return "".join(result_string)
def crawl_markov(website, url_mask, max_level=3, level=0, crawled_urls=[]):
global markov_dict
global crawling, crawled
crawling += 1
if level > max_level:
return
if not can_crawl:
return
warnings = []
time.sleep(0.4)
try:
request = requests.get(website.encode("utf-8"), timeout=10)
except requests.ConnectionError:
return
except requests.exceptions.Timeout:
return
except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL):
try:
request = requests.get("http://" + website.encode("utf-8"), timeout=10)
except requests.ConnectionError:
return
except requests.exceptions.Timeout:
return
except requests.exceptions.InvalidURL:
return
html = BeautifulSoup.BeautifulSoup(request.text.encode("utf-8"))
for link in html.findAll("a", {"href": True}):
url = link["href"].encode("utf-8'")
if re.match("\.[a-zA-Z1-9]+$", url) and (not any(url.endswith(x) for x in [".html", ".php", ".htm"]) or "." in url.split("/")[-1]):
continue
if not url.startswith("http"):
continue
if url in crawled_urls:
continue
crawled_urls.append(url)
if regex(url, url_mask):
threading.Thread(target=crawl_markov, args=(url, url_mask, max_level, level+1, crawled_urls)).start()
for visible_text in [text.encode("utf-8") for text in filter(visible, html.findAll(text=True))]:
for line in visible_text.splitlines():
parse_markov_string(line)
time.sleep(0.5)
crawled += 1
print "Done crawling {}!".format(website)
@easy_bot_command("plotmarkov", True)
def plot_markov(message, raw):
global markov_dict
if raw:
return
p = multiprocessing.Process(target=mkplot, args=(markov_dict,))
p.start()
return "Plotting..."
@easy_bot_command("togglemarkovcrawling")
def toggle_crawling(message, raw):
global can_crawl
if raw:
return
can_crawl = not can_crawl
return "Success: now crawling is{} stopped!".format(("n't" if can_crawl else ""))
@bot_command("parsemarkov", True)
def parse_markov_from_text(message, connector, index, raw):
global markov_dict
for key, item in markov_dict.items():
markov_dict[key] = set(item)
if not raw:
if len(message["arguments"]) < 2:
connector.send_message(index, get_message_target(connector, message, index), "{}: Error: No argument provided!".format(message["nickname"]))
data = open(" ".join(message["arguments"][1:])).read()
data = " ".join([n.strip() for n in data.split("\n")])
words = [x for x in simple_string_filter(data, "\'\"-/\\,.!?", isalnumspace).split(" ") if x != " "]
for x in xrange(len(words)):
try:
if words[x - 1] == words[x] or words[x] == words[x + 1]:
continue
except IndexError:
pass
try:
markov_dict[words[x - 1].lower()].add(words[x].lower())
except KeyError:
try:
markov_dict[words[x - 1].lower()] = {words[x].lower()}
except IndexError:
pass
except IndexError:
pass
try:
markov_dict[words[x].lower()].add(words[x + 1].lower())
except KeyError:
try:
markov_dict[words[x].lower()] = {words[x + 1].lower()}
except IndexError:
pass
except IndexError:
continue
connector.send_message(index, get_message_target(connector, message, index), "{}: Text file succesfully parsed on Markov!".format(message["nickname"]))
@easy_bot_command("flushmarkov", True)
def flush_markov_data(message, raw):
global markov_dict
if raw:
return
markov_dict = {}
return ["Markov flushed succesfully!"]
@easy_bot_command("mk_feeder", all_messages=True)
def feed_markov_data(message, raw):
global markov_dict
if raw:
return
for key, item in markov_dict.items():
markov_dict[key] = set(item)
words = simple_string_filter(" ".join(message["arguments"]), "\'\"-/\\,.!?", isalnumspace).split(" ")
for x in xrange(len(words)):
if x - 1 > -1:
try:
if words[x - 1] == words[x] or words[x] == words[x + 1]:
continue
except IndexError:
pass
try:
markov_dict[words[x - 1].lower()].add(words[x].lower())
except KeyError:
try:
markov_dict[words[x - 1].lower()] = {words[x].lower()}
except IndexError:
pass
except IndexError:
pass
try:
markov_dict[words[x].lower()].add(words[x + 1].lower())
except KeyError:
try:
markov_dict[words[x].lower()] = {words[x + 1].lower()}
except IndexError:
pass
except IndexError:
continue
else:
try:
markov_dict[words[x].lower()].add(words[x + 1].lower())
except KeyError:
try:
markov_dict[words[x].lower()] = {words[x + 1].lower()}
except IndexError:
pass
except IndexError:
continue
@easy_bot_command("markov")
def get_markov(message, raw):
global markov_dict
for key, item in markov_dict.items():
markov_dict[key] = set(item)
if raw:
return
# Checks.
try:
markov_dict.__delitem__("")
markov_dict.__delitem__(" ")
except KeyError:
pass
for i, mkv in markov_dict.items():
try:
markov_dict[i].remove(" ")
markov_dict[i].remove("")
except KeyError:
continue
if len(markov_dict) < 1:
return "Error: no Markov data!"
# Get the string!
if len(message["arguments"]) < 2:
x = choice(markov_dict.keys())
words = [x]
else:
words = [x.lower() for x in message["arguments"][1:]]
x = words[0]
level = 0
result = x
print x
while level < len(words) - 1:
if not words[level + 1] in markov_dict[x]:
return ["{}: {}".format(message["nickname"], result)]
x = words[level + 1]
level += 1
result += " " + x
while x in markov_dict.keys():
try:
x = sample(markov_dict[x], 1)[0]
except ValueError:
break
print x.encode("utf-8")
result += " " + x
if len(result) > 750:
break
for cuss in markov_filter:
result = result.replace(cuss, "*" * len(cuss))
result = "{0}: {1}".format(message["nickname"], result)
return [result]
@easy_bot_command("savemarkov", True)
def save_markov_json(message, raw):
global markov_dict
if not raw:
if len(message["arguments"]) < 2:
return ["Error: not enough arguments!", "(Insert Markov file name as an argument)"]
save_dict = markov_dict
for key, item in save_dict.items():
save_dict[key] = tuple(item)
open("markov2/{}.mkov2".format(message["arguments"][1]), "w").write(json.dumps(save_dict))
for key, item in markov_dict.items():
markov_dict[key] = set(item)
return ["{}: Saved succesfully to {}.mkov2!".format(message["nickname"], message["arguments"][1])]
else:
return []
@easy_bot_command("loadmarkovfilter", True)
def load_markov_filter(message, raw):
global markov_filter
if raw:
return
if len(message["arguments"]) < 2:
return ["Error: Not enough arguments!"]
markov_filter += open("filters/{}.mkov2f".format(" ".join(message["arguments"][1:]))).readlines()
return ["Blacklist updated succesfully!"]
@easy_bot_command("savemarkovfilter", True)
def save_markov_filter(message, raw):
global markov_filter
if raw:
return
if len(message["arguments"]) < 2:
return ["Error: Not enough arguments!"]
open("filters/{}.mkov2f".format(" ".join(message["arguments"][1:])), "w").write("\n".join(markov_filter))
return ["Blacklist updated succesfully!"]
@easy_bot_command("loadmarkov", True)
def load_markov_json(message, raw):
global markov_dict
if not raw:
if len(message["arguments"]) < 2:
return ["Error: not enough arguments!", "(Insert Markov file name as an argument)"]
new_dict = json.load(open("markov2/{}.mkov2".format(message["arguments"][1])))
for key, item in new_dict.items():
new_dict[key] = {word for word in item}
markov_dict.update(new_dict)
return ["Loaded succesfully from {}.mkov2!".format(message["arguments"][1])]
else:
return []
@easy_bot_command("listfiltermarkov")
def list_cusses(message, raw):
if raw:
return
return "Cusses blacklisted: " + ", ".join(markov_filter)
@easy_bot_command("addfiltermarkov", True)
def filter_cusses(message, raw):
if raw:
return
global markov_filter
try:
markov_filter += message["arguments"][1:]
return ["Updated word blacklist succesfully!"]
except IndexError:
return ["Syntax: addfiltermarkov <list of cusses or blacklisted words>"]
@easy_bot_command("removefiltermarkov", True)
def unfilter_cusses(message, raw):
if raw:
return
global markov_filter
try:
for cuss in message["arguments"][1:]:
markov_filter.remove(cuss)
return ["Updated word blacklist succesfully!"]
except IndexError:
return ["Syntax: removefiltermarkov <list of words to un-blacklist>"]
@easy_bot_command("parsewebmarkov")
def parse_web_markov(message, raw):
global markov_dict
for key, item in markov_dict.items():
markov_dict[key] = set(item)
if raw:
return
messages = []
warnings = []
debug = "--debug" in message["arguments"][1:]
if len(message["arguments"]) < 2:
return ["{}: Error: No argument provided! (Syntax: parsewebmarkov <list of URLs>)".format(message["nickname"])]
for website in filter(lambda x: not x.startswith("--"), message["arguments"][1:]):
print "Parsing Markov from {}!".format(website)
messages.append("Parsing Markov from {}!".format(website))
try:
request = requests.get(website, timeout=10)
except requests.ConnectionError:
warnings.append("Error with connection!")
if debug:
raise
except requests.exceptions.Timeout:
warnings.append("Connection timed out!")
if debug:
raise
except requests.exceptions.MissingSchema:
try:
request = requests.get("http://" + website, timeout=10)
except requests.ConnectionError:
warnings.append("Error with connection!")
if debug:
raise
except requests.exceptions.Timeout:
warnings.append("Connection timed out!")
if debug:
raise
if not "request" in locals().keys():
continue
if request.status_code != 200:
warnings.append("{}: Error: Status {} reached!".format(message["nickname"], request.status_code))
continue
visible_texts = [text.encode("utf-8") for text in filter(visible, BeautifulSoup.BeautifulSoup(request.text).findAll(text=True))]
lines = []
for text in visible_texts:
lines += text.split("\n")
for line in lines:
words = simple_string_filter(line, "\'\"-/\\,.!?", isalnumspace).split(" ")
for x in xrange(len(words)):
try:
if words[x - 1] == words[x] or words[x] == words[x + 1]:
continue
except IndexError:
pass
try:
markov_dict[words[x - 1].lower()].add(words[x].lower())
except KeyError:
try:
markov_dict[words[x - 1].lower()] = {words[x].lower()}
except IndexError:
pass
except IndexError:
pass
try:
markov_dict[words[x].lower()].add(words[x + 1].lower())
except KeyError:
try:
markov_dict[words[x].lower()] = {words[x + 1].lower()}
except IndexError:
pass
except IndexError:
continue
if len(warnings) < len(message["arguments"][1:]):
messages.append("{}: Success reading Markov from (some) website(s)!".format(message["nickname"]))
return messages + warnings
@easy_bot_command("clearmarkovfilter", True)
def clear_filter(message, raw):
global markov_filter
if raw:
return
markov_filter = []
return "Success clearing Markov filter!"
@easy_bot_command("purgemarkov", True)
def purge_word_from_markov(message, raw):
global markov_dict
if raw:
return
if len(message["arguments"]) < 2:
return "Syntax: purgemarkov <list of words to purge from Markov>"
for word in message["arguments"][1:]:
for kw in markov_dict.keys():
if kw == word:
markov_dict.__delitem__(kw)
try:
if word in markov_dict[kw]:
markov_dict[kw] = [mk for mk in markov_dict[kw] if mk != word]
if markov_dict[kw] == []:
markov_dict.__delitem__(kw)
except KeyError:
pass
return "Words purged from Markov succesfully!"
def check_crawled(connector, index, message):
global crawling, crawled
while crawling > crawled:
time.sleep(0.2)
connector.send_message(
index,
get_message_target(connector, message, index),
"Finished crawling {all} websites!".format(all=crawled)
)
@bot_command("parsewebmarkovcrawl", True)
def get_web_markov_crawling(message, connector, index, raw):
global crawling, crawled
def smsg(msg):
if type(msg) is str:
connector.send_message(
index,
get_message_target(connector, message, index),
msg
)
return True
elif hasattr(msg, "__iter__"):
for m in msg:
connector.send_message(
index,
get_message_target(connector, message, index),
m
)
return True
else:
return False
crawling = 0
crawled = 0
if raw:
return
time.sleep(0.3)
if len(message["arguments"]) < 4:
smsg("Syntax: <URL mask> <max level> <list of URLs to crawl for Markov>")
return
try:
if int(message["arguments"][2]) > 4:
smsg("Way too large value for max_level! Use only up to 4. Do you want to wait for an eternity?!?")
return
if int(message["arguments"][2]) < 0:
smsg("Lol negative level XD")
return
except ValueError:
smsg("Insert some int for max level (second argument)! Insert something between 0 and 4.")
return
for website in message["arguments"][3:]:
crawl_markov(website, message["arguments"][1], int(message["arguments"][2]))
smsg("Website crawling threads started! Check for new additions using ||markovsize .")
threading.Thread(target=check_crawled, args=(connector, index, message)).start()
@easy_bot_command("markovsize")
def get_markov_size(message, raw):
global markov_dict
if not raw:
return ["Size of Markov chain: {}".format(len(markov_dict))]
| mit | 8,187,108,142,967,812,000 | 26.160389 | 439 | 0.555675 | false | 3.830848 | false | false | false |
huangshiyu13/funnyLittleProgram | huatian-funny/huatian/train.py | 1 | 6822 | # -*- coding=utf8 -*-
"""
构建决策树
"""
from __future__ import division
from math import log
import operator
import matplotlib.pyplot as plt
from extension import mongo_collection, SALARY, EDUCATION, SATISFY
decision_node = dict(boxstyle="sawtooth", fc="0.8")
leaf_node = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")
def load_data():
"""从mongo导入数据"""
data = []
for user in mongo_collection.find({"appearance": {"$exists": True},
"satisfy": {"$exists": True}}):
data.append([user.get('appearance', 0),
user.get('age', u'0'),
user.get('height', u'0'),
SALARY.get(user.get('salary', u'0'), u'--'),
EDUCATION.get(user.get('education', u'0'), u'--'),
SATISFY[user['satisfy']]])
labels = [u'颜值', u'年龄', u'身高', u'工资', u'学历']
return data, labels
def majority_count(class_list):
class_count = {}
for vote in class_list:
class_count[vote] = class_count.get(vote, 0) + 1
sorted_class_count = sorted(class_count.iteritems(),
key=operator.itemgetter(1), reverse=True)
return sorted_class_count[0][0]
def calc_shannon_ent(data_set):
num_entries = len(data_set)
label_counts = {}
for feat_vec in data_set:
current_label = feat_vec[-1]
label_counts[current_label] = label_counts.get(current_label, 0) + 1
shannon_ent = 0.0
for key in label_counts:
prob = float(label_counts[key]) / num_entries
shannon_ent -= prob * log(prob, 2)
return shannon_ent
def split_data_set(data_set, axis, value):
ret_data_set = []
for feat_vec in data_set:
if feat_vec[axis] == value:
reduced_feat_vec = feat_vec[:axis]
reduced_feat_vec.extend(feat_vec[axis+1:])
ret_data_set.append(reduced_feat_vec)
return ret_data_set
def choose_best_feature_to_split(data_set):
num_features = len(data_set[0]) - 1
base_entropy = calc_shannon_ent(data_set)
best_info_gain, best_feature = 0.0, -1
for i in range(num_features):
feat_fist = [example[i] for example in data_set]
unique_vals = set(feat_fist)
new_entropy = 0.0
for value in unique_vals:
sub_data_set = split_data_set(data_set, i, value)
prob = len(sub_data_set) / len(data_set)
new_entropy += prob * calc_shannon_ent(sub_data_set)
info_gain = base_entropy - new_entropy
if info_gain > best_info_gain:
best_info_gain = info_gain
best_feature = i
return best_feature
def create_tree(data_set, labels):
"""生成决策树"""
class_list = [example[-1] for example in data_set]
if class_list.count(class_list[0]) == len(class_list):
return class_list[0]
if len(data_set[0]) == 1:
return majority_count(class_list)
best_feat = choose_best_feature_to_split(data_set)
best_feat_label = labels[best_feat]
my_tree = {best_feat_label:{}}
del(labels[best_feat])
feat_values = [example[best_feat] for example in data_set]
unique_vals = set(feat_values)
for value in unique_vals:
sub_labels = labels[:]
my_tree[best_feat_label][value] = \
create_tree(split_data_set(data_set, best_feat, value), sub_labels)
return my_tree
def get_num_leafs(my_tree):
num_leafs = 0
first_str = my_tree.keys()[0]
second_dict = my_tree[first_str]
for _, val in second_dict.iteritems():
if isinstance(val, dict):
num_leafs += get_num_leafs(val)
else:
num_leafs += 1
return num_leafs
def get_tree_depth(my_tree):
max_depth = 0
first_str = my_tree.keys()[0]
second_dict = my_tree[first_str]
for _, val in second_dict.iteritems():
if isinstance(val, dict):
this_depth = 1 + get_tree_depth(val)
else:
this_depth = 1
if this_depth > max_depth:
max_depth = this_depth
return max_depth
def plot_node(node_txt, center_pt, parent_pt, node_type):
create_plot.ax1.annotate(
node_txt, xy=parent_pt, xycoords='axes fraction',
xytext=center_pt, textcoords='axes fraction',
va="center", ha="center", bbox=node_type, arrowprops=arrow_args)
def plot_mid_text(cntr_pt, parent_pt, txt_string):
x_mid = (parent_pt[0]-cntr_pt[0])/2.0 + cntr_pt[0]
y_mid = (parent_pt[1]-cntr_pt[1])/2.0 + cntr_pt[1]
create_plot.ax1.text(x_mid, y_mid, txt_string, va="center",
ha="center", rotation=30)
def plot_tree(my_tree, parent_pt, node_txt):
num_leafs = get_num_leafs(my_tree)
first_str = my_tree.keys()[0]
cntr_pt = (plot_tree.xOff + (2.0 + num_leafs) / 2.0 / plot_tree.totalW, plot_tree.yOff)
plot_mid_text(cntr_pt, parent_pt, node_txt)
plot_node(first_str, cntr_pt, parent_pt, decision_node)
second_dict = my_tree[first_str]
plot_tree.yOff = plot_tree.yOff - 1.0 / plot_tree.totalD
for key, val in second_dict.iteritems():
if isinstance(val, dict):
plot_tree(val, cntr_pt, unicode(key))
else:
plot_tree.xOff = plot_tree.xOff + 1.0 / plot_tree.totalW
plot_node(unicode(val), (plot_tree.xOff, plot_tree.yOff), cntr_pt, leaf_node)
plot_mid_text((plot_tree.xOff, plot_tree.yOff), cntr_pt, unicode(key))
plot_tree.yOff = plot_tree.yOff + 1.0 / plot_tree.totalD
def create_plot(in_tree):
""""生成图像"""
fig = plt.figure(1, figsize=(25, 10), facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
create_plot.ax1 = plt.subplot(111, frameon=False, **axprops)
plot_tree.totalW = float(get_num_leafs(in_tree))
plot_tree.totalD = float(get_tree_depth(in_tree))
plot_tree.xOff = -0.5 / plot_tree.totalW
plot_tree.yOff = 1.0
plot_tree(in_tree, (0.5, 1.0), '')
plt.show()
def compress_tree(my_tree):
"""压缩决策树"""
first_str = my_tree.keys()[0]
inner_dict = my_tree[first_str]
copy_dict = {}
for key, val in inner_dict.items():
if not isinstance(val, dict):
if val not in copy_dict:
copy_dict[val] = [unicode(key)]
else:
copy_dict[val].append(unicode(key))
copy_dict = {u','.join(val): unicode(key) for key, val in copy_dict.items()}
for key, val in inner_dict.items():
if isinstance(val, dict):
compress_tree(val)
else:
inner_dict.pop(key)
inner_dict.update(copy_dict)
if __name__ == '__main__':
data_set, labels = load_data()
result = create_tree(data_set, labels)
compress_tree(result)
create_plot(result)
| apache-2.0 | 624,625,102,838,304,900 | 32.270936 | 91 | 0.585135 | false | 2.999112 | false | false | false |
eepp/sortmuz | sortmuz/app.py | 1 | 9337 | # The MIT License (MIT)
#
# Copyright (c) 2014 Philippe Proulx <eepp.ca>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import mutagenx
import argparse
import readline
import sys
import os
import shutil
import sortmuz
from termcolor import colored
def _perror(msg, exit=True):
print(colored('Error: {}'.format(msg), 'red', attrs=['bold']),
file=sys.stderr)
if exit:
sys.exit(1)
def _pwarning(msg):
print(colored('Warning: {}'.format(msg), 'yellow', attrs=['bold']),
file=sys.stderr)
def _pinfo(msg):
print(colored('{}'.format(msg), 'blue'), file=sys.stderr)
def _parse_args():
ap = argparse.ArgumentParser()
ap.add_argument('-V', '--version', action='version',
version='%(prog)s v{}'.format(sortmuz.__version__))
ap.add_argument('-o', '--output', action='store', type=str,
default=os.getcwd(), metavar='DIR',
help='Output music collection directory (default: CWD)')
ap.add_argument('src', metavar='SRC', action='store', type=str,
help='Path to source directory')
# parse args
args = ap.parse_args()
# validate source directory
if not os.path.isdir(args.src):
_perror('source "{}" is not an existing directory'.format(args.src))
# validate output directory
if not os.path.isdir(args.output):
_perror('output "{}" is not an existing directory'.format(args.output))
sys.exit(1)
return args
def _print_summary(src, output, muz_files, meta_files):
print('{} {}'.format(colored('source:', 'blue'),
colored(os.path.abspath(src), 'blue', attrs=['bold'])))
print('{} {}'.format(colored('output:', 'blue'),
colored(os.path.abspath(output),
'blue', attrs=['bold'])))
if not muz_files:
_pwarning('no music files found')
else:
print()
_pinfo('music files:')
for file in muz_files:
print(' {}'.format(os.path.basename(file)))
print()
if not meta_files:
_pinfo('no meta files')
else:
_pinfo('meta files:')
for file in meta_files:
print(' {}'.format(os.path.basename(file)))
def _collect_files(src):
exts = ['.mp3', '.m4a', '.flac']
exclude_meta = ['.ds_store', 'desktop.ini', 'thumbs.db']
muz_files = []
meta_files = []
for file in os.listdir(src):
name, ext = os.path.splitext(file)
ext = ext.lower()
if ext in exts:
muz_files.append(os.path.abspath(os.path.join(src, file)))
else:
if file.lower() in exclude_meta:
continue
meta_files.append(os.path.abspath(os.path.join(src, file)))
return sorted(muz_files), sorted(meta_files)
def _get_file_infos(file):
try:
m_file = mutagenx.File(file)
except:
return '', '', ''
artist = ''
album = ''
year = ''
if type(m_file) is mutagenx.mp3.MP3:
if 'TPE1' in m_file:
artist = m_file['TPE1'].text[0]
elif 'TPE2' in m_file:
artist = m_file['TPE2'].text[0]
if 'TALB' in m_file:
album = m_file['TALB'].text[0]
year_tags = [
'TDRC',
'TYER',
'TDAT',
'TIME',
'TRDA',
]
for tag in year_tags:
if tag in m_file:
year = str(m_file[tag].text[0])
break
elif type(m_file) is mutagenx.mp4.MP4:
if b'\xa9ART' in m_file:
artist = m_file[b'\xa9ART'][0]
elif b'aART' in m_file:
artist = m_file[b'aART'][0]
if b'\xa9alb' in m_file:
album = m_file[b'\xa9alb'][0]
if b'\xa9day' in m_file:
year = str(m_file[b'\xa9day'][0])
return artist, album, year
def _guess_infos(muz_files):
if not muz_files:
return '', '', ''
artist, album, year = _get_file_infos(muz_files[0])
if len(muz_files) > 1:
artist2, album2, year2 = _get_file_infos(muz_files[1])
if artist != artist2:
artist = 'Various Artists'
return artist, album, year
def _pcp(src, dst):
msg = '[{}] "{}" {} "{}"'.format(colored('cp', attrs=['bold']), src,
colored('->', attrs=['bold']), dst)
print(msg)
def _pmkdir(dst):
print('[{}] "{}"'.format(colored('mkdir', attrs=['bold']), dst))
def do_sortmuz(src, output):
muz_files, meta_files = _collect_files(src)
_print_summary(src, output, muz_files, meta_files)
print(colored('\n---\n', 'blue'))
artist, album, year = _guess_infos(muz_files)
while True:
uartist = input('{} [{}] '.format(colored('artist?', 'green',
attrs=['bold']),
colored(artist, attrs=['bold'])))
ualbum = input('{} [{}] '.format(colored('album?', 'green',
attrs=['bold']),
colored(album, attrs=['bold'])))
uyear = input('{} [{}] '.format(colored('year?', 'green',
attrs=['bold']),
colored(year, attrs=['bold'])))
uconfirm = input('{} [{}] '.format(colored('confirm?', 'cyan',
attrs=['bold']),
colored('y', attrs=['bold'])))
if len(uconfirm) == 0 or uconfirm.lower() == 'y':
break
print()
uartist = uartist.strip()
ualbum = ualbum.strip()
uyear = uyear.strip()
if len(uartist.strip()) == 0:
uartist = artist
if len(ualbum.strip()) == 0:
ualbum = album
if len(uyear.strip()) == 0:
uyear = year
if len(uartist) == 0:
_perror('empty artist name')
if len(ualbum) == 0:
_perror('empty album name')
if len(uyear) == 0:
_perror('empty year')
year_album = '{} {}'.format(uyear, ualbum)
album_dir = os.path.join(output, uartist, year_album)
abs_album_dir = os.path.abspath(album_dir)
if os.path.isdir(album_dir):
res = input('{} {} [{}] '.format(colored('overwrite', 'cyan',
attrs=['bold']),
colored(abs_album_dir, 'blue',
attrs=['bold']),
colored('n', attrs=['bold'])))
if res.lower() != 'y':
sys.exit(0)
print()
print('[{}] "{}"'.format(colored('rm', attrs=['bold']),
abs_album_dir))
try:
shutil.rmtree(album_dir)
except Exception as e:
_perror('cannot remove directory "{}": {}'.format(album_dir, e))
else:
print()
_pmkdir(abs_album_dir)
try:
os.makedirs(album_dir)
except Exception as e:
_perror('cannot create directory "{}": {}'.format(album_dir, e))
for file in muz_files:
dst = os.path.join(abs_album_dir, os.path.basename(file))
_pcp(file, dst)
try:
shutil.copyfile(file, dst)
except Exception as e:
_perror('cannot cannot copy file "{}": {}'.format(file, e))
if meta_files:
meta_dir = os.path.join(abs_album_dir, '_')
_pmkdir(meta_dir)
try:
os.makedirs(meta_dir)
except Exception as e:
_perror('cannot create directory "{}": {}'.format(meta_dir, e))
for file in meta_files:
dst = os.path.join(meta_dir, os.path.basename(file))
_pcp(file, dst)
try:
if os.path.isdir(file):
shutil.copytree(file, dst)
else:
shutil.copyfile(file, dst)
except Exception as e:
fmt = 'cannot cannot copy file/directory "{}": {}'
_perror(fmt.format(file, e))
def run():
args = _parse_args()
try:
do_sortmuz(args.src, args.output)
except KeyboardInterrupt:
sys.exit(1)
| mit | 7,318,007,081,211,529,000 | 28.64127 | 80 | 0.520938 | false | 3.809466 | false | false | false |
yuanlisky/linlp | linlp/recognition/OrganizationRecognition.py | 1 | 2353 | # -*- coding: utf-8 -*-
from linlp.algorithm.Viterbi import viterbiRecognitionSimply
from linlp.algorithm.viterbiMat.prob_trans_organization import prob_trans as trans_p
from linlp.algorithm.viterbiMat.prob_emit_organization import prob_emit as emit_p
def organizationviterbiSimply(obs, DT, obsDT, debug):
if debug:
x = obs
obs = [('始##始', 'begin')] + obs + [('末##末', 'end')]
switch = {'nz': 1, 'ni': 2, 'nic': 2, 'nis': 2, 'nit': 2, 'm': 3}
length = len(obs)
for no in range(length):
case = switch.get(obs[no][1], 0)
if not DT.tree.get(obs[no][0]):
DT.tree[obs[no][0]] = dict()
if case == 1:
if obsDT.tree[obs[no][0]].get('total', 1001) <= 1000:
DT.tree[obs[no][0]].setdefault('F', 1000)
else:
DT.tree[obs[no][0]].setdefault('Z', 21149365)
elif case == 2:
DT.tree[obs[no][0]].setdefault('K', 1000)
DT.tree[obs[no][0]].setdefault('D', 1000)
elif case == 3 and len(obsDT.tree.get(obs[no][0], 'm')) != 2:
DT.tree[obs[no][0]] = {'M': 1000}
elif obs[no][1].startswith('ns'):
obs[no] = ('未##地', obs[no][1])
elif obs[no][1].startswith('x'):
obs[no] = ('未##串', 'x')
elif obs[no][1].startswith('nr'):
obs[no] = ('未##人', obs[no][1])
elif obs[no][1].startswith('nt'):
obs[no] = ('未##团', obs[no][1])
elif obs[no][1].startswith('m'):
obs[no] = ('未##数', obs[no][1])
elif obs[no][1].startswith('t'):
obs[no] = ('未##时', obs[no][1])
elif not DT.tree.get(obs[no][0]): # 不在机构词典时
DT.tree[obs[no][0]] = {'Z': 21149365}
path = viterbiRecognitionSimply(obs, trans_p, emit_p, DT)
if debug:
s = ''
t = '['
l = len(x)
for i in range(l):
word = x[i]
s += '[' + word[0] + ' '
t += word[0]
for k, v in DT.tree[obs[i+1][0]].items():
if k == 'total':
continue
s += k + ':' + str(v) + ' '
s += ']'
t += '/' + path[i+1] + ', '
t += ']'
print('机构名角色观察: %s' % s)
print('机构名角色标注: %s' % t)
return path[1:-1]
| apache-2.0 | -8,138,178,052,148,569,000 | 37.627119 | 84 | 0.457218 | false | 2.852315 | false | false | false |
ianclegg/winrmlib | winrmlib/api/session.py | 1 | 8473 | # (c) 2015, Ian Clegg <[email protected]>
#
# winrmlib is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '[email protected]'
import uuid
from winrmlib.api.service import Service
from winrmlib.api.resourcelocator import ResourceLocator
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
class Session(object):
"""
Factory object for building sessions and connection options
"""
def __init__(self, endpoint, username, password, **kwargs):
# transport = Session._build_transport(endpoint, auth, username, password)
# Store the endpoint and the service we will use to invoke it
self.endpoint = endpoint
# False == No CredSSP
self.service = Service(endpoint, username, password, True)
# The user can set override some defaults for the Session, they can also be overridden on each request
self.max_envelope = self._build_max_envelope(kwargs.get('max_envelope_size', Session.MaxEnvelopeSize))
self.locale = self._build_locale(kwargs.get('locale', Session.Locale))
# The operation timeout header overrides the timeout set on the server. Some users may prefer to
# use the servers default timeout, so this header will only be included if the user explicitly sets
# an operation timeout.
if 'operation_timeout' in kwargs:
self.default_operation_timeout = self._build_operation_timeout(kwargs.get('operation_timeout'))
else:
self.default_operation_timeout = None
def get(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.GetAction, operation_timeout, max_envelope_size, locale)
self.service.invoke.set_options(tsoapheaders=headers)
return self.service.invoke
def put(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
headers = None
return self.service.invoke(headers, obj)
def delete(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.DeleteAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, None)
def create(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CreateAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
def command(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CommandAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
def recieve(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.ReceiveAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
@staticmethod
def _build_selectors(selectors):
# Build the WSMan SelectorSet Element from the selector dictionary
selector_set = []
for selector_name in selectors.iterkeys():
selector_value = selectors[selector_name]
selector_set.append({'#text': str(selector_value), '@Name': selector_name})
return {'w:SelectorSet': {'w:Selector': selector_set}}
@staticmethod
# TODO add mustcomply attribute to element
def _build_options(options):
option_set = []
for name, (value, must_comply) in options.iteritems():
must_comply = bool(must_comply)
option_set.append({'#text': str(value), '@Name': name})
return {'w:OptionSet': {'w:Option': option_set}}
def _build_operation_timeout(self, operation_timeout):
if operation_timeout is None:
return self.default_operation_timeout
else:
return {'w:OperationTimeout': 'PT{0}S'.format(operation_timeout)}
def _build_max_envelope(self, max_envelope_size):
if max_envelope_size is None:
return self.max_envelope
else:
return {'w:MaxEnvelopeSize': '{0}'.format(max_envelope_size)}
def _build_locale(self, locale):
if locale is None:
return self.locale
else:
return {'Locale': {"@xml:lang": "en-US"}}
def _build_headers(self, resource, action, operation_timeout, max_envelope_size, locale):
headers = OrderedDict([
('a:To', self.endpoint),
('a:ReplyTo', Session.Address),
('w:ResourceURI', resource.url),
('a:MessageID', format(uuid.uuid4())),
('a:Action', action)]
)
# TODO: Implement support for Microsoft XPRESS compression
# https://social.msdn.microsoft.com/Forums/en-US/501e4f29-edfc-4240-af3b-344264060b99/
# wsman-xpress-remote-shell-compression?forum=os_windowsprotocols
# headers.update({'rsp:CompressionType': {'@soap:mustUnderstand': 'true', '#text': 'xpress'}})
# only include the operation timeout if the user specified one when the class was instantiated
# or if the user explicitly set one when invoking a method.
if operation_timeout is not None:
headers.update(self._build_operation_timeout(operation_timeout))
elif self.default_operation_timeout is not None:
headers.update(self.default_operation_timeout)
headers.update(self._build_selectors(resource.selectors))
headers.update(self._build_options(resource.options))
headers.update(self._build_max_envelope(max_envelope_size))
headers.update(self._build_locale(locale))
return headers
Session.MaxEnvelopeSize = 153600
Session.Locale = 'en-US'
Session.Address = {'a:Address': {
'@mustUnderstand': 'true',
'#text': 'http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous'
}}
# Static members that can be safely shared with all instances
Session.WSManNamespace = '{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}'
Session.AddressingNamespace = '{http://schemas.xmlsoap.org/ws/2004/08/addressing}'
Session.SoapContentType = {'Content-Type': 'application/soap+xml; charset=utf-8'}
# WSMan SOAP Actions
Session.GetAction = 'http://schemas.xmlsoap.org/ws/2004/09/transfer/Get'
Session.PutAction = 'http://schemas.xmlsoap.org/ws/2004/09/transfer/Put'
Session.DeleteAction = 'http://schemas.xmlsoap.org/ws/2004/09/transfer/Delete'
Session.CreateAction = 'http://schemas.xmlsoap.org/ws/2004/09/transfer/Create'
Session.CommandAction = 'http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Command'
Session.ReceiveAction = 'http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Receive'
| apache-2.0 | 2,206,419,717,923,506,000 | 42.010152 | 112 | 0.659271 | false | 4.073558 | false | false | false |
genenetwork/pylmm_gn2 | pylmm_gn2/plink.py | 1 | 4036 | # Plink module
#
# Copyright (C) 2015 Pjotr Prins ([email protected])
# Some of the BED file parsing came from pylmm:
# Copyright (C) 2013 Nicholas A. Furlotte ([email protected])
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# According to the PLINK information
# Parse a textual BIM file and return the contents as a list of tuples
#
# Extended variant information file accompanying a .bed binary genotype table.
#
# A text file with no header line, and one line per variant with the following six fields:
#
# Chromosome code (either an integer, or 'X'/'Y'/'XY'/'MT'; '0' indicates unknown) or name
# Variant identifier
# Position in morgans or centimorgans (safe to use dummy value of '0')
# Base-pair coordinate (normally 1-based, but 0 ok; limited to 231-2)
# Allele 1 (corresponding to clear bits in .bed; usually minor)
# Allele 2 (corresponding to set bits in .bed; usually major)
#
# Allele codes can contain more than one character. Variants with negative bp coordinates are ignored by PLINK. Example
#
# 1 mm37-1-3125499 0 3125499 1 2
# 1 mm37-1-3125701 0 3125701 1 2
# 1 mm37-1-3187481 0 3187481 1 2
import struct
# import numpy as np
def readbim(fn):
res = []
for line in open(fn):
list = line.split()
if len([True for e in list if e == 'nan']) == 0:
res.append( (list[0],list[1],int(list[2]),int(list[3]),int(list[4]),int(list[5])) )
else:
res.append( (list[0],list[1],list[2],float('nan'),float('nan'),float('nan')) )
return res
# .bed (PLINK binary biallelic genotype table)
#
# Primary representation of genotype calls at biallelic variants. Must
# be accompanied by .bim and .fam files. Basically contains num SNP
# blocks containing IND (compressed 4 IND into a byte)
#
# Since it is a biallelic format it supports for every individual
# whether the first allele is homozygous (b00), the second allele is
# homozygous (b11), it is heterozygous (b10) or that it is missing
# (b01).
# http://pngu.mgh.harvard.edu/~purcell/plink2/formats.html#bed
# http://pngu.mgh.harvard.edu/~purcell/plink2/formats.html#fam
# http://pngu.mgh.harvard.edu/~purcell/plink2/formats.html#bim
def readbed(fn,inds,encoding,func=None):
# For every SNP block fetch the individual genotypes using values
# 0.0 and 1.0 for homozygous and 0.5 for heterozygous alleles
def fetchGenotypes(X):
# D = { \
# '00': 0.0, \
# '10': 0.5, \
# '11': 1.0, \
# '01': float('nan') \
# }
Didx = { '00': 0, '10': 1, '11': 2, '01': 3 }
G = []
for x in X:
if not len(x) == 10:
xx = x[2:]
x = '0b' + '0'*(8 - len(xx)) + xx
a,b,c,d = (x[8:],x[6:8],x[4:6],x[2:4])
L = [encoding[Didx[y]] for y in [a,b,c,d]]
G += L
G = G[:inds]
# G = np.array(G)
return G
bytes = inds / 4 + (inds % 4 and 1 or 0)
format = 'c'*bytes
count = 0
with open(fn,'rb') as f:
magic = f.read(3)
assert( ":".join("{:02x}".format(ord(c)) for c in magic) == "6c:1b:01")
while True:
count += 1
X = f.read(bytes)
if not X:
return(count-1)
XX = [bin(ord(x)) for x in struct.unpack(format,X)]
xs = fetchGenotypes(XX)
func(count,xs)
| agpl-3.0 | 1,433,418,620,599,991,000 | 36.719626 | 119 | 0.62116 | false | 3.116602 | false | false | false |
mmdg-oxford/papers | Schlipf-PRL-2018/model/epw_step.py | 1 | 1434 | from __future__ import print_function
from bose_einstein import bose_einstein
from constant import htr_to_K, htr_to_meV
import argparser
import norm_k
import numpy as np
import scf
import system
args = argparser.read_argument('Renormalize EPW calculation')
thres = args.thres / htr_to_meV
beta = htr_to_K / args.temp
window = args.energy / htr_to_meV
if args.vb: offset = -8.75333295715961e-03
else: offset = 8.53193322468371e-03
Sigma = system.make_data(args.dft, args.vb)
Sigma.bose_einstein = bose_einstein(Sigma.freq, beta)
if args.vb: band_str = '36'
else: band_str = '37'
temp_str = '%03dK' % args.temp
if args.acoustic:
temp_str = '%dK' % args.temp
qpt_str = '10000'
elif args.temp == 1:
qpt_str = '050000'
elif args.temp == 150:
qpt_str = '100000'
elif args.temp == 300:
qpt_str = '100000'
else:
print("temperature " + str(args.temp) + " not available")
exit()
dir_str = args.direction
if args.acoustic:
filename = 'data/epw_all_28424_'+temp_str+'_5meV_acoustic_only/data_'+dir_str+'_'+band_str+'_10000.dat'
else:
filename = 'data/res_'+temp_str+'_1meV/data_'+dir_str+'_'+band_str+'_'+qpt_str+'.dat'
file_epw = open(filename, 'r')
for line in file_epw:
data = line.split()
eps = np.float(data[1]) - offset
ImS = np.float(data[2])
if (abs(eps) < window) and args.method == 2:
zz = 1.0 / (1.0 + np.float(data[4]))
else:
zz = 1.0
print(eps * htr_to_meV, ImS * zz * htr_to_meV, zz)
| gpl-3.0 | 6,837,800,266,492,713,000 | 24.607143 | 105 | 0.663877 | false | 2.463918 | false | false | false |
sSwiergosz/FinancialOrganiser | organizer_project/organizer/migrations/0015_auto_20170512_1208.py | 1 | 1520 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-05-12 10:08
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('organizer', '0014_transaction_category'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('balance', models.DecimalField(decimal_places=11, max_digits=11)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='transaction',
name='category',
field=models.CharField(choices=[('Apparel/Accesory', 'Apparel/Accesory'), ('Entertainment', 'Entertainment'), ('Food/Beverage', 'Food/Beverage'), ('Skin care/Cosmetics', 'Skin care/Cosmetics'), ('Computer/Mobile', 'Computer/Mobile'), ('Books/Newspapers', 'Books/Newspapers'), ('Other', 'Other')], max_length=20),
),
migrations.AlterField(
model_name='transaction',
name='purchase_date',
field=models.DateField(default=datetime.date.today),
),
]
| apache-2.0 | 1,351,537,851,008,599,800 | 40.081081 | 324 | 0.628947 | false | 3.979058 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.