metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jgrundstad/genotyping",
"score": 2
} |
#### File: jgrundstad/genotyping/report.py
```python
import argparse
import re
import sys
class Reporter:
def __init__(self, infile, filt=False):
self.infile = infile
self.filter_missense_nonsense_only = filt
self.__build_regex()
self.__identify_columns()
self.parse_infile()
def __build_regex(self):
self.regex = re.compile(r'(?P<eff>[\w\_]+)\(%s\)' % '\|'.join([
'(?P<effect_impact>[^\|]*)',
'(?P<functional_class>[^\|]*)',
'(?P<codon_change>[^\|]*)',
'(?P<amino_acid_change>[^\|]*)',
'(?P<amino_acid_length>[^\|]*)',
'(?P<gene_name>[^\|]*)',
'(?P<transcript_biotype>[^\|]*)',
'(?P<gene_coding>[^\|]*)',
'(?P<transcript_id>[^\|]*)',
'(?P<exon_intro_rank>[^\|]*)',
'(?P<genotype_number>[^\|]*)',
#'?(?P<warnings_errors>[^\|]*)?'
]))
def __identify_columns(self):
self.columns = ['chr', 'pos', 'context', 'ref', 'alt', 'normal_ref_count',
'normal_alt_count', 'tumor_ref_count', 'tumor_alt_count',
'gene', 'effect', 'coding', 'codon_change',
'amino_acid_change', 'amino_acid_length', 'mutect_call']
def parse_infile(self):
self.outstring = '\t'.join(self.columns) + '\n'
column_refs = '' # lookup for irregularly placed columns
try:
for line in open(self.infile):
# Skip headers.
if line[0] == '#':
continue
line = line.strip().split('\t')
if line[0] == 'contig':
column_refs = line
continue
report = list()
report.append(line[column_refs.index('contig')]) # chromosome
report.append(line[column_refs.index('0')]) # position
report.append(line[column_refs.index('context')]) # context
report.append(line[column_refs.index('REF_ALLELE')]) # reference
report.append(line[column_refs.index('ALT_ALLELE')]) # alternative
report.append(line[column_refs.index('n_ref_count')]) # normal reference count
report.append(line[column_refs.index('n_alt_count')]) # normal alternative count
report.append(line[column_refs.index('t_ref_count')]) # tumor reference count
report.append(line[column_refs.index('t_alt_count')]) # tumor alternative count
# Interpret the effects of the variant.
for match in self.regex.finditer(line[7]):
# NOTE this is a hack for Kevin's request
if self.filter_missense_nonsense_only:
if match.group('functional_class') not in ['MISSENSE', 'NONSENSE']:
continue
self.outstring += '\t'.join(report)
self.outstring += '\t%s' % match.group('gene_name')
self.outstring += '\t%s' % match.group('eff')
self.outstring += '\t%s' % match.group('gene_coding')
self.outstring += '\t%s' % match.group('codon_change')
self.outstring += '\t%s' % match.group('amino_acid_change')
self.outstring += '\t%s' % match.group('amino_acid_length')
self.outstring += '\t%s' % line[-1] # keep?
self.outstring += '\n'
break
except IOError:
print >>sys.stderr, "ERROR: Unable to open -i/--infile: " + self.infile
sys.exit()
def print_output(self):
print >>sys.stdout, self.outstring
def main():
parser = argparse.ArgumentParser(
description='parse snpEff annotated output into a digestable report.')
parser.add_argument('-i', '--infile', action='store', dest='infile',
help='snpEff annotated variant file')
parser.add_argument('-f', '--filter_missense_nonsense_only',
action='store_true', dest='f', default=False,
help='Apply a filter that only reports NONSENSE and ' \
+ 'MISSENSE vars')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
r = Reporter(args.infile, args.f)
r.print_output()
if __name__ == '__main__':
main()
``` |
{
"source": "jgrundstad/hgac-glacier",
"score": 2
} |
#### File: jgrundstad/hgac-glacier/check_jobs_status.py
```python
import argparse
import boto
import boto.glacier.layer2
import datetime
import json
import os
import sqlite3
import sys
SQLITE_DB = 'glacier_archive.sqlite3'
ACCESS_KEY = ''
SECRET_KEY = ''
ACCOUNT_ID = ''
#SNS_TOPIC = 'arn:aws:glacier:us-east-1:203237044369:vaults/'
def check_jobs_status(vault=None, **kwargs):
l = list()
try:
l2 = boto.glacier.layer2.Layer2(aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY, account_id=ACCOUNT_ID)
v = l2.get_vault(vault)
# attend to unicode bug in returned name
v.name = str(v.name)
for job in v.list_jobs():
j = {'JobID': job.id,
'Action': job.action,
'ArchiveID': job.archive_id,
'StatusCode': job.status_code,
}
l.append(j)
except Exception as e:
print >>sys.stderr, "ERROR: Unable to query glacier job"
print >>sys.stderr, "Info: " + str(e)
raise
return l
def main():
parser = argparse.ArgumentParser(description='Check glacier download ' \
+ 'request status')
parser.add_argument('-V', '--vault', dest='vault', action='store',
required=True, help='Glacier vault.')
#parser.add_argument('-s', '--sns-topic', dest='sns', action='store',
# required=True, help='SNS topic for notifications (append to default '
# + 'header: arn:aws:glacier:us-east-1:203237044369:vaults/)')
parser.add_argument('-a', '--aws-access-key-id', dest='access_key',
action='store', required=True)
parser.add_argument('-s', '--aws-secret-access-key', dest='secret_key',
action='store', required=True)
parser.add_argument('-c', '--aws-account-id', dest='account_id',
action='store', required=True)
args = parser.parse_args()
global ACCESS_KEY
ACCESS_KEY = args.access_key
global SECRET_KEY
SECRET_KEY = args.secret_key
global ACCOUNT_ID
ACCOUNT_ID = args.account_id
status = check_jobs_status(vault=args.vault)
print json.dumps(status, indent=2)
if __name__ == '__main__':
main()
``` |
{
"source": "jgrundstad/VCF_viewer",
"score": 3
} |
#### File: viewer/util/link_out_scraper.py
```python
__author__ = '<NAME>'
from django.conf import settings
from bs4 import BeautifulSoup
import json
import requests
import re
MD_ANDERSON_URL = 'https://pct.mdanderson.org'
MD_ANDERSON_OUTFILE = settings.LINKS_OUT + 'mdanderson.json'
def merge_dicts(x, y):
"""
:rtype : dict
"""
z = x.copy()
z.update(y)
return z
def find_mdanderson_genes(soup):
"""
:rtype : dict
"""
gene_list = dict()
# grab all available genes on this page and store links
gene_urls = soup.find_all('a')
for gene_url in gene_urls:
try:
gene_match = re.match(r'/genes/(.+)/show$', gene_url.get('href'))
if gene_match:
gene = gene_match.group(1)
gene_list[gene] = "{}/genes/{}/show".format(MD_ANDERSON_URL,
gene)
except TypeError:
print "Ran into a None value when parsing gene urls"
return gene_list
def find_mdanderson_next_page(soup):
page = None
page_urls = soup.find_all('a', {'rel': 'next'})
if len(page_urls) > 0:
for page_url in page_urls:
page_match = re.match(r'/genes\?page=(\w+)$', page_url.get('href'))
if page_match:
page = page_match.group(1)
else:
page = None
else:
page = None
return page
def grab_mdanderson():
"""
:rtype : dict
"""
page = 1
gene_list = dict() # name: url
while page:
print "Looking for page: {}".format(page)
md_url = "https://pct.mdanderson.org/genes?page={}".format(page)
soup = BeautifulSoup(requests.get(md_url).text)
# find genes on this page
page_gene_list = find_mdanderson_genes(soup)
gene_list = merge_dicts(gene_list, page_gene_list)
# is there another page to pull from
page = find_mdanderson_next_page(soup)
# dump gene_list dict to json file for report_parser.py to grab
gene_list_json = json.dumps(gene_list)
with open(MD_ANDERSON_OUTFILE, 'w') as f:
json.dump(gene_list_json, f)
def main():
grab_mdanderson()
if __name__ == '__main__':
main()
```
#### File: viewer/util/report_parser.py
```python
__author__ = '<NAME>'
from django.conf import settings
import json, simplejson
import os
import tablib
def add_goodies(atoms, headers, md_anderson_genes):
for i in range(0, len(headers)):
if not atoms[i]:
atoms[i] = ''
# highlight NON_SYNONYMOUS_CODING
if(headers[i] == 'effect' and
atoms[i] == 'NON_SYNONYMOUS_CODING'):
atoms[i] = "<font color=green>%s</font>" % atoms[i]
# add MDAnderson link to appropriate gene names
if (headers[i] == 'gene') and (atoms[i] is not None) and (
atoms[i].lower() in md_anderson_genes):
new_link = '{gene}<br><font size=-2><a href={link}>MDAnderson' + \
'</a></font>'
atoms[i] = new_link.format(
link=md_anderson_genes[atoms[i].lower()],
gene=atoms[i])
return atoms
def json_from_report(filename):
print "%s - creating json from: %s" % (os.getcwd(), filename)
report_file = open(filename, 'r')
header_line = report_file.readline().strip()
splitby = ','
if '\t' in header_line:
splitby = '\t'
cols = header_line.split(splitby)
with open(settings.LINKS_OUT + 'mdanderson.json', 'r') as md_f:
md_anderson_genes = json.loads(json.load(md_f))
d = []
for line in report_file:
# remove '%' character to allow numerical sorting on pct columns
line = line.replace('%', '')
tokens = line.rstrip('\n').split(splitby)
formatted_line = add_goodies(tokens, cols, md_anderson_genes)
d.append(formatted_line)
data = tablib.Dataset(*d, headers=cols)
return data
``` |
{
"source": "jgruselius/misc",
"score": 3
} |
#### File: misc/Python/ba_parser.py
```python
import csv
import re
class BioanalyzerParser:
inputFile = None
def __init__(self):
pass
def loadFile(self, inputFileName):
try:
self.inputFile = open(inputFileName, "rb")
except IOError:
raise
def parseFile(self):
if not self.inputFile:
raise ParserException("No inputfile loaded")
csv_reader = csv.reader(self.inputFile, delimiter=",",
quoting=csv.QUOTE_MINIMAL)
rowlist = []
for row in csv_reader:
rowString = "".join(row)
# Don't add empty or all-whitespace rows:
if not re.search("^\s*$", rowString):
print row
rowlist.append(row)
# (We could combine above and below...)
# Now create a dictionary for with sample names as keys:
sampleData = {}
for row in rowlist:
if row[0] == "Sample Name":
sampleData[row[1]] = {}
def __del__(self):
if self.inputFile: self.inputFile.close()
class ParserException:
message = None
def __init__(self, message=""):
self.message = message
def __str__(self):
return repr(self.message)
```
#### File: misc/Python/ba_parser_tester.py
```python
import sys
import inspect
import ba_parser
def main(args):
reader = ba_parser.BioanalyzerParser()
if len(args) == 1:
reader.loadFile(args[0])
reader.parseFile()
else:
print "Usage:\n\tpython %s <input_file> [output_file]\n" % __file__
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: misc/Python/index_color_balance.py
```python
import argparse
import re
# MAYBE: Ensure equal length
# MAYBE: Make sure no index starts width GG
# TODO: Print aligned sequences indicating (w color?) unbalanced positions
#
# NovaSeq (2-channel) color chemistry:
# Base Red Green Result
# A 1 1 Clusters that show intensity in both the red and green channels.
# C 1 0 Clusters that show intensity in the red channel only.
# G 0 0 Clusters that show no intensity at a known cluster location.
# T 0 1 Clusters that show intensity in the green channel only.
test_seqs = ['ATCTGTAG', 'GTCCAAGT', 'TTGGAACC', 'AGTTCAGT']
many_seqs = [
"GGTTTACT",
"GTAATCTT",
"CCACTTAT",
"CACTCGGA",
"TGGTAAAC",
"GTTGCAGC",
"ATGAATCT",
"GTATGTCA",
"TTTCATGA",
"TACTCTTC",
"CCTAGACC",
"TAACAAGG",
"GTGGTACC",
"TTTACATG",
"TGATTCTA",
"TAATGACC",
"CAGTACTG",
"GTGTATTA",
"TCAGCCGT",
"ACATTACT",
"AGGTATTG",
"TTCAGGTG",
"CCTCATTC",
"CCAAGATG",
"TATGATTC",
"ACTTCATA",
"ACAATTCA",
"CCCTAACA",
"TTCGCCCT",
"CCCAATAG",
"GCGATGTG",
"GCCATTCC",
"CTAGGTGA",
"AATAATGG",
"CGACTTGA",
"CTCGTCAC",
"CATTAGCG",
"GACTACGT",
"GAGCAAGA",
"CCACTACA",
"CGCTATGT",
"CGTTAATC",
"ATTACTTC",
"CATGCGAT",
"CTGCGGCT",
"CGGAGCAC",
"CTGACGCG",
"TAGGATAA",
"ACAGAGGT",
"AAACCTCA",
"GTCTCTCG",
"ATTTGCTA",
"CACGCCTT",
"CGTGCAGA",
"GGTATGCA",
"AGCTATCA",
"GCATCTCC",
"AAAGTGCT",
"GTTGAGAA",
"GCAACAAA",
"ATAGTTAC",
"CATGAACA",
"TATGAGCT",
"TTGTTGAT",
"TCTTAAAG",
"CTGTAACT",
"GCGCAGAA",
"AGGAGATG",
"TTGTTTCC",
"CAAGCTCC",
"TAGGACGT",
"ACACTGTT",
"GAAACCCT",
"ACCGTATG",
"TCTCAGTG",
"CAATACCC",
"AAATGTGC",
"GCTTGGCT",
"TCGCCAGC",
"GTAATTGC",
"GTCCGGTC",
"GTTCCTCA",
"GAGGATCT",
"CTTTGCGG",
"AAGCGCTG",
"GCGAGAGT",
"TTATCGTT",
"GGCGAGTA",
"AGTGGAAC",
"TACCACCA",
"TCTCGTTT",
"GCACAATG",
"ACCGGCTC",
"TGATGCAT",
"ATTCTAAG",
"GACAGCAT"
]
# Verify that all strings only consist of AGTC
# and strip leading/trailing whitespace:
def validate(seqs):
p = re.compile(r"[^AGTC]")
for seq in seqs:
s = seq.strip().upper()
if p.search(s):
raise ValueError("{} contains unexpected symbols".format(seq))
yield s
# One approach is to convert each base into color channel representation
# Generates a tuple indicating color channel (red,green) for every nt in seq
def base_to_col_chan(seq):
conv = {
"A": (1, 1),
"T": (0, 1),
"G": (0, 0),
"C": (1, 0)
}
for nt in seq:
yield conv[nt]
# Use the above for multiple sequences
def seqs_to_col_chan(seqs):
for seq in seqs:
yield list(base_to_col_chan(seq))
# Use list comprehension to translate a list of sequences (strings) to
# list of list of color channel tuples:
def conv_to_col_chan(seqs):
# Map to translate base to color channel tuple (red, green):
conv = {
"A": (1, 1),
"T": (0, 1),
"G": (0, 0),
"C": (1, 0)
}
return [[conv[nt] for nt in seq] for seq in seqs]
# Sum that short version
# (takes list of color channel tuples)
# (cannot deal w seqs of different length)
# Make a list of channel counts by index:
def sum_chan_short(chan_seqs):
return [list(map(sum, zip(*e))) for e in zip(*chan_seqs)]
# Sum that longer version
# (taskes list of color channel tuples)
# (can deal with seqs of dif length)
def sum_chan(chan_seqs):
counter = [] # Where length is equal to the longest seq in seqs
for seq in chan_seqs:
for i, chan in enumerate(seq):
if i == len(counter):
counter.append([0, 0])
counter[i][0] += chan[0]
counter[i][1] += chan[1]
return counter
# Check for any zeroes in the summed-color-channel-by-position list
def check_balance(chan_sums):
return [all(counts) for counts in chan_sums]
# An alternative approach is to count how many times each base occurs in each
# position
# Returns a list of base counters, where each position in the seq has a dict
# with counts of each of the four nucleotides occurring in this position.
def count_base_by_pos(seqs):
c = []
for seq in seqs:
for i, nt in enumerate(seq):
if i >= len(c):
c.append({"A": 0, "C": 0, "T": 0, "G": 0})
c[i][nt] += 1
return c
# Takes a list of base counters and checks if bases in both colors channels
# occur for every position.
def is_balanced(base_counter):
return [(p["A"] + p["C"]) * (p["A"] + p["T"]) > 0 for p in base_counter]
# Put it in words:
def summarize(balance_list):
for i, pos in enumerate(balance_list):
if not pos:
print("There is not balance in position {:d}".format(i+1))
if all(balance_list):
print("All positions are balanced!")
def pretty_print(seqs):
color = {
"red": " \033[01;31m{0}\033[00m",
"green": " \033[1;32m{0}\033[00m",
"cyan": " \033[1;36m{0}\033[00m",
"none": " {0}"
}
col_chan = {
"A": "cyan",
"T": "green",
"G": "none",
"C": "red"
}
for seq in seqs:
for x in seq:
print(color[col_chan[x]], end="")
print("")
# OO approach:
class ColorBalanceChecker:
# Map to translate base to color channel tuple (red, green):
conv = {
"A": (1, 1),
"T": (0, 1),
"G": (0, 0),
"C": (1, 0)
}
# Alt. map to translate base to color channel tuple (red, green):
cinv = {
'A': {'green': 1, 'red': 1},
'T': {'green': 0, 'red': 1},
'G': {'green': 0, 'red': 0},
'C': {'green': 1, 'red': 0}
}
def __init__(self, seqs):
self.validate(seqs).count_base_by_pos().check_balance_general()
# self.validate(seqs).conv_to_col_chan().sum_chan()
# Verify that all strings only consist of AGTC, store unique lenghts and
# strip leading/trailing whitespace:
def validate(self, seqs):
valid = []
lengths = set()
p = re.compile(r"[^AGTC]")
for seq in seqs:
s = seq.strip().upper()
if p.search(s):
raise ValueError("{} contains unexpected symbols".format(seq))
valid.append(s)
lengths.add(len(s))
self.seqs = valid
self.lengths = lengths
return self
# Returns a list of base counters, where each position in the seq has a dict
# with counts of each of the four nucleotides occurring in this position.
def count_base_by_pos(self):
c = []
for seq in self.seqs:
for i, nt in enumerate(seq):
if i >= len(c):
c.append({"A": 0, "C": 0, "T": 0, "G": 0})
c[i][nt] += 1
self.base_counts = c
return self
def check_balance(self):
self.balance = [(p["A"] + p["C"]) * (p["A"] + p["T"]) > 0 for p in self.base_counter]
return self
# This summariser takes a dict to make it more general
def check_balance_general(self):
b = []
for pos in self.base_counts:
green = 0
red = 0
for nt, chans in self.cinv.items():
green += pos[nt] * chans['green']
red += pos[nt] * chans['red']
b.append(green > 0 and red > 0)
self.balance = b
return self
def conv_to_col_chan(self):
self.chans = [[self.conv[nt] for nt in seq] for seq in self.seqs]
return self
# Sum those tuples (can deal with seqs of dif length)
def sum_chan(self):
c = []
if len(self.lengths) == 1:
c = [list(map(sum, zip(*e))) for e in zip(*self.chans)]
else:
for seq in self.chans:
for i, chan in enumerate(seq):
if i == len(c):
c.append([0, 0])
c[i][0] += chan[0]
c[i][1] += chan[1]
self.counter = c
return self
# Check for any zeroes in the summed-color-channel-by-position list
def check_balance(self):
self.balance = [all(counts) for counts in self.counter]
return self
# Put it in words:
def summarize(self):
for i, pos in enumerate(self.balance):
if not pos:
print("There is not balance in position {:d}".format(i+1))
if all(self.balance):
print("All positions are balanced!")
# Print all sequences indicating channel by letter color and mark
# unbalanced positions:
def pretty_print(self):
colorize = {
"red": " \033[0;31m{0}\033[00m",
"green": " \033[0;32m{0}\033[00m",
"cyan": " \033[0;36m{0}\033[00m",
"none": " {0}"
}
col_chan = {
"A": "cyan",
"T": "green",
"G": "none",
"C": "red"
}
for i in range(max(self.lengths)):
print(" {:d}".format(i+1), end="")
print("")
for seq in self.seqs:
for x in seq:
print(colorize[col_chan[x]].format(x), end="")
print("")
for pos in self.balance:
if not pos:
print(" ↑", end="")
else:
print(" ", end="")
print("")
return self
def main(args):
# Version 1:
# summarize(check_balance(sum_chan(seqs_to_col_chan(validate(args.seqs)))))
# and so on...
# I like this version better:
cbc = ColorBalanceChecker(args.seqs)
cbc.pretty_print()
cbc.summarize()
if __name__ == "__main__":
p = argparse.ArgumentParser(description="Check if a collection of index "
"sequences are color balanced")
p.add_argument("seqs", nargs="+", help="Nucleotide sequences (can be "
"different length)")
main(p.parse_args())
```
#### File: misc/Python/index_finder.py
```python
import sys
import argparse
import re
import hashlib
import json
import os
import errno
COMPL_MAP = {"A": "T", "T": "A", "C": "G", "G": "C"}
def file_hash(path):
BUF_SIZE = 65536
md5_hash = hashlib.md5()
with open(path, "rb") as f:
data = f.read(BUF_SIZE)
while data:
md5_hash.update(data)
data = f.read(BUF_SIZE)
return md5_hash.hexdigest()
def rev(seq):
return seq[::-1]
def compl(seq):
c = [COMPL_MAP[nt] for nt in seq]
return "".join(c)
def rev_compl(seq):
rc = [COMPL_MAP[nt] for nt in seq[::-1]]
return "".join(rc)
# Build a dict of know index sequences from a text file:
def build_index_dict(path, length):
ref_dict = {}
if length is None:
seq_pattern = re.compile(r"(?<![ATCG])[ATCGN]{4,}")
else:
seq_pattern = re.compile(r"(?<![ATCG])[ATCGN]{{{}}}".format(length))
with open(path, "r") as ref:
for line in ref:
match = set(seq_pattern.findall(line))
if match:
for m in match:
ref_dict.setdefault(m, []).append(line.strip())
return ref_dict
def load_index_dict(path):
with open(path, "r") as f:
d = json.load(f)
return d
def save_index_dict(obj, path):
with open(path, "w") as f:
json.dump(obj, f)
def print_index_dict(ref_dict):
for seq, matches in ref_dict.items():
if len(matches) > 1:
print(seq)
for match in matches:
print("\t{}".format(match))
def main(args):
if not os.path.isfile(args.ref):
# File not found
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), args.ref)
md5 = file_hash(args.ref)
cache = "{}{}.json".format(md5, args.length or "")
if not args.rebuild and os.path.isfile(cache):
print("Loading cached index dict ({})".format(cache), file=sys.stderr)
ref_dict = load_index_dict(cache)
else:
ref_dict = build_index_dict(args.ref, args.length)
print("Caching index dict ({})".format(cache), file=sys.stderr)
save_index_dict(ref_dict, cache)
if args.list:
print_index_dict(ref_dict)
n = 0
for x in ref_dict.values():
n += len(x)
print("\nTotal barcodes parsed in reference dict: {}".format(n))
print("Unique barcodes in reference dict: {}".format(len(ref_dict)))
else:
for arg in args.seqs:
if args.length:
seq = arg[:args.length]
else:
seq = arg
if seq in ref_dict:
matches = ref_dict[seq]
print("{} found in:".format(seq))
for m in matches:
print("\t{}".format(m))
else:
print("{}: No matches found".format(seq))
if __name__ == "__main__":
p = argparse.ArgumentParser(description="Find index clashes")
g = p.add_mutually_exclusive_group(required=True)
g.add_argument("--seqs", nargs="+", help="All sequences to search for")
g.add_argument("--list", action="store_true", default=False,
help="Print non-unique indexes in the reference list")
p.add_argument("--ref", required=True, help="Reference text file containing"
" known index sequences")
p.add_argument("--rebuild", action="store_true", help="Don't use any cached"
" reference object")
p.add_argument("--length", type=int, choices=range(4,8), help="Set the "
"number of letters to consider, both in the query strings and "
"when building the reference")
main(p.parse_args())
```
#### File: misc/Python/statusdb-get.py
```python
import sys
import json
import csv
import re
import requests
import getpass
import argparse
import collections
def get_data(url, js_path):
user = input("User name for {0}: ".format(url))
passw = getpass.getpass("Password: ")
with open(js_path) as js_file:
js = js_file.read()
### Format JavaScript ###
# Replace newlines and remove tabs:
js = js.replace("\n", " ")
js = js.replace("\t", "")
# Remove JS comments:
js = re.sub("(\/\*)[^\*]+\*\/", "", js).strip()
### Request data with POST ###
post_data = json.dumps({"map": js})
header = {"content-type": "application/json"}
response = requests.post(url, headers=header, auth=(user, passw),
data=post_data, stream=True)
return response
def write_json(resp, out_file):
for line in resp.iter_lines(decode_unicode=True):
out_file.write(line)
def write_simple_json(resp, out_file):
def _simplify():
obj = resp.json()
for row in obj["rows"]:
key = row["key"]
value = row["value"]
yield json.dumps({key: value})
for line in _simplify():
out_file.write("{}\n".format(line))
def flatten(l):
for val in l.values() if isinstance(l, dict) else l:
if isinstance(val, (tuple, list)):
for sub in flatten(val):
yield sub
elif isinstance(val, dict):
for sub in flatten(val.values()):
yield sub
else:
yield val
def write_csv_gen(resp, out_file):
writer = csv.writer(out_file, delimiter=",")
# writer.writerow(header)
# Parse using for loop:
# header = None
# for line in resp.iter_lines(decode_unicode=True):
# if re.match("\{\"id", line):
# obj = json.loads(line.rstrip("\r\n,"))
# if not header:
# header = ["key"]
# header.extend([key for key in obj["value"].keys()])
# writer.writerow(header)
# row = [obj["key"]]
# row.extend([val for val in obj["value"].values()])
# writer.writerow(row)
# Parse using generator function:
def _csv_gen():
p = re.compile(r"\{\"id")
for line in resp.iter_lines(decode_unicode=True):
if p.match(line):
obj = json.loads(line.rstrip("\r\n,"))
row = [obj["key"]]
vals = obj["value"]
row.extend(flatten(vals))
yield row
writer.writerows(_csv_gen())
# CSV formatter for reads per barcode data:
def write_custom1(resp, out_file):
# Data object is of format:
# Total flowcells: data["total_rows"]
# Offset: data["offset"]
# Flowcell data: data["rows"]
# Where data["rows"] is a list and if fc = data["rows"][n]:
# Database hash key: fc["id"]
# Flowcell/run ID: fc["key"]
# Lane data: fc["value"]
# Where fc["value"] is a list and if n is the lane index then
# fc["value"][n-1] is of format {barcode_sequence : million_reads}
# Write comma-separated output with format RUN_ID,LANE,BARCODE,READS:
writer = csv.writer(out_file, delimiter=",")
writer.writerow(["RUN_ID", "LANE", "BARCODE", "READS"])
p = re.compile(r"\{\"id")
for line in resp.iter_lines(decode_unicode=True):
if p.match(line):
fc = json.loads(line.rstrip("\r\n,"))
writer.writerows([[fc["key"], i, bc, reads]
for i, lane in fc["value"].items()
for bc, reads in lane.items()])
# CSV formatter for date data:
def write_custom2(resp, out_file):
header = ["project","app","facility","sample_type","prep",
"n_samples","rc_fail","prep_fail","lanes","sequencer","open_date",
"close_date","queue_date","samples_date","sequenced_date","deliver_date"
,"prep_date","qc_date","rc_date","order_date"]
obj = resp.json()
s = "\t".join(header)
out_file.write("{}\n".format(s))
for e in obj["rows"]:
row = []
vals = e["value"]
for x in header:
if x in vals and vals[x] != None and vals[x] != "undefined":
row.append(vals[x])
else:
row.append("")
s = "\t".join(row)
out_file.write("{}\n".format(s))
# CSV formatter for fragment size data:
def write_custom3(resp, out_file):
writer = csv.writer(out_file, delimiter=",")
header = ["proj", "app" ,"open", "lib", "prep", "sample", "size", "nm"]
obj = resp.json()
writer.writerow(header)
for e in obj["rows"]:
vals = e["value"]
for k,v in vals["samples"].items():
row = [vals[x] for x in header[:-3]] + [k, v["size"], v["nm"]]
writer.writerow(row)
# CSV formatter for flowcell data:
def write_custom4(resp, out_file):
writer = csv.writer(out_file, delimiter=",")
header = ["date", "flowcell", "sequencer", "lane", "barcode", "project", "sample" ,"reads"]
writer.writerow(header)
p = re.compile(r"\{\"id")
for line in resp.iter_lines(decode_unicode=True):
if p.match(line):
obj = json.loads(line.rstrip("\r\n,"))
vals = obj["value"]
pre = [vals[x] for x in header[:3]]
for lane, barcodes in vals["lanes"].items():
for bc, data in barcodes.items():
writer.writerow(pre + [lane, bc] + [data[x] for x in header[-3:]])
if __name__ == "__main__":
# Parse command-line arguments:
parser = argparse.ArgumentParser(description="Query a CouchDB database")
parser.add_argument("jsfile", help="File containing JavaScript map function")
parser.add_argument("--out", help="File to write response to (default: stdout")
parser.add_argument("--url", help="Database URL", default="http://tools-dev.scilifelab.se:5984/projects/_temp_view")
parser.add_argument("--csv", help="Convert response to CSV", action="store_true")
parser.add_argument("-s", "--simplify", help="Omit database id's", action="store_true")
args = parser.parse_args()
if args.csv:
write_func = write_csv_gen
else:
if args.simplify:
write_func = write_simple_json
else:
write_func = write_json
write_func = write_custom2
resp = get_data(args.url, args.jsfile)
if args.out:
with open(args.out, "w") as out_file:
write_func(resp, out_file)
else:
write_func(resp, sys.stdout)
``` |
{
"source": "jgrxnde97/DS-Unit-3-Sprint-2-SQL-and-Databases",
"score": 4
} |
#### File: DS-Unit-3-Sprint-2-SQL-and-Databases/unit3-sprint2-sprint-challenge/demo_data_redo_redo.py
```python
import sqlite3
# making sqlite connection and cursor
conn = sqlite3.connect('demo_data.sqlite3')
curs = conn.cursor()
# creating table
# variable t = table
def demo_create():
t = '''CREATE TABLE demo(
s text NOT NULL,
x integer NOT NULL,
y integer NOT NULL
)'''
curs.execute(t)
demo_create()
# inserting data into table
def demo_insert():
t1 = """INSERT INTO demo VALUES
('g', 3, 9)
('v', 5, 7)
('f', 8, 7)"""
curs.execute(t1)
demo_insert()
# save
def demo_save():
conn.commit()
demo_save()
# count num rows
def row_count():
a = """
SELECT COUNT (s)
FROM demo
"""
curs.execute(a)
return curs.fetchall()
row_count()
# how many rows where 'x' and 'y' are at least 5?
def min_count():
a2 = """
SELECT COUNT (*)
FROM demo
WHERE x >= 5 and y >=5
"""
curs.execute(a2)
return curs.fetchall()
min_count()
# how many unique values of 'y' are there?
def uni_count():
a3 = """
SELECT COUNT(DISTINCT y)
FROM demo
"""
curs.execute(a3)
return curs.fetchall()
uni_count()
```
#### File: DS-Unit-3-Sprint-2-SQL-and-Databases/unit3-sprint2-sprint-challenge/northwind_small.py
```python
import sqlite3
# making connection
conn = sqlite3.connect('northwind_small.sqlite3')
curs = conn.cursor()
# most expensive items
def most_ex():
A = """
SELECT ProductName, UnitPrice, SupplierId
FROM Product
ORDER BY UnitPrice desc
LIMIT 10
"""
curs.execute(A)
return curs.fetchall()
most_ex()
# part three: sailing northwind
# avg hire age
def avg_age():
A1 = """
SELECT AVG(HireDate - BirthDate)
FROM Employee
"""
curs.execute(A1)
return curs.fetchall()
avg_age()
# add supplier to top prods
def suppl_name():
B = """
SELECT
Product.ProductName AS "ProductName",
Product.UnitPrice AS Price,
Supplier.CompanyName AS "SupplierName"
FROM Product, Supplier
WHERE Product.SupplierId = Supplier.Id
ORDER BY UnitPrice DESC
LIMIT 10
"""
curs.execute(B)
return curs.fetchall()
suppl_name()
def lg_categ():
B2= """
SELECT CategoryName
FROM Category
WHERE Id = (
SELECT Product.CategoryId
FROM Product
GROUP BY Product.CategoryId
ORDER BY COUNT (Product.ProductName) DESC
LIMIT 1)
"""
curs.execute(B2)
return curs.fetchall()
lg_categ()
# part four: questions
# 01. the relationship between 'employee' and 'territory'
# is a one-to-many relation. There is one employee to many territories.
# 02. mongodb is useful when you evolve your data overtime.
# It's useful for storing data that is not relational and tabular.
# It is good for transactional store when the performance is
# starting to become a concern.
# 03. Newsql is a class of relational database management systems
# that seek to provide the scalability of Nosql systems for online
# transaction processing workloads while maintaining acid guarantees
# of a traditional database system.
# the 'best of both worlds' per say - it wants to be able to have acid
# guarantees and the ability to still use sql and have horizontal
# scaling which adds relational neccessities to non relational systems.
``` |
{
"source": "jGsch/kaggle-dfdc",
"score": 2
} |
#### File: jGsch/kaggle-dfdc/train-xception.py
```python
import os
import csv
import shutil
import random
from PIL import Image
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
import xception_conf as config
from model_def import xception
from augmentation_utils import train_transform, val_transform
def save_checkpoint(path, state_dict, epoch=0, arch="", acc1=0):
new_state_dict = {}
for k, v in state_dict.items():
if k.startswith("module."):
k = k[7:]
if torch.is_tensor(v):
v = v.cpu()
new_state_dict[k] = v
torch.save({
"epoch": epoch,
"arch": arch,
"acc1": acc1,
"state_dict": new_state_dict,
}, path)
class DFDCDataset(Dataset):
def __init__(self, data_csv, required_set, data_root="",
ratio=(0.25, 0.05), stable=False, transform=None):
video_info = []
data_list = []
with open(data_csv) as fin:
reader = csv.DictReader(fin)
for row in reader:
if row["set_name"] == required_set:
label = int(row["is_fake"])
n_frame = int(row["n_frame"])
select_frame = round(n_frame * ratio[label])
for sample_idx in range(select_frame):
data_list.append((len(video_info), sample_idx))
video_info.append({
"name": row["name"],
"label": label,
"n_frame": n_frame,
"select_frame": select_frame,
})
self.stable = stable
self.data_root = data_root
self.video_info = video_info
self.data_list = data_list
self.transform = transform
def __getitem__(self, index):
video_idx, sample_idx = self.data_list[index]
info = self.video_info[video_idx]
if self.stable:
frame_idx = info["n_frame"] * sample_idx // info["select_frame"]
else:
frame_idx = random.randint(0, info["n_frame"] - 1)
image_path = os.path.join(self.data_root, info["name"],
"%03d.png" % frame_idx)
try:
img = Image.open(image_path).convert("RGB")
except OSError:
img = np.random.randint(0, 255, (320, 320, 3), dtype=np.uint8)
if self.transform is not None:
# img = self.transform(img)
result = self.transform(image=np.array(img))
img = result["image"]
return img, info["label"]
def __len__(self):
return len(self.data_list)
def main():
torch.backends.cudnn.benchmark = True
train_dataset = DFDCDataset(config.data_list, "train", config.data_root,
transform=train_transform)
val_dataset = DFDCDataset(config.data_list, "val", config.data_root,
transform=val_transform, stable=True)
kwargs = dict(batch_size=config.batch_size, num_workers=config.num_workers,
shuffle=True, pin_memory=True)
train_loader = DataLoader(train_dataset, **kwargs)
val_loader = DataLoader(val_dataset, **kwargs)
# Model initialization
model = xception(num_classes=2, pretrained=None)
if hasattr(config, "resume") and os.path.isfile(config.resume):
ckpt = torch.load(config.resume, map_location="cpu")
start_epoch = ckpt.get("epoch", 0)
best_acc = ckpt.get("acc1", 0.0)
model.load_state_dict(ckpt["state_dict"])
else:
start_epoch = 0
best_acc = 0.0
model = model.cuda()
model = nn.DataParallel(model)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),
0.01, momentum=0.9, weight_decay=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.2)
os.makedirs(config.save_dir, exist_ok=True)
for epoch in range(config.n_epoches):
if epoch < start_epoch:
scheduler.step()
continue
print("Epoch {}".format(epoch + 1))
model.train()
loss_record = []
acc_record = []
for count, (inputs, labels) in enumerate(train_loader):
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_loss = loss.item()
loss_record.append(iter_loss)
preds = torch.argmax(outputs.data, 1)
iter_acc = torch.sum(preds == labels).item() / len(preds)
acc_record.append(iter_acc)
if count and count % 100 == 0:
print("T-Iter %d: loss=%.4f, acc=%.4f"
% (count, iter_loss, iter_acc))
epoch_loss = np.mean(loss_record)
epoch_acc = np.mean(acc_record)
print("Training: loss=%.4f, acc=%.4f" % (epoch_loss, epoch_acc))
model.eval()
loss_record = []
acc_record = []
with torch.no_grad():
for count, (inputs, labels) in enumerate(val_loader):
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
outputs = model(inputs)
preds = torch.argmax(outputs, 1)
loss = criterion(outputs, labels)
iter_loss = loss.item()
loss_record.append(iter_loss)
preds = torch.argmax(outputs.data, 1)
iter_acc = torch.sum(preds == labels).item() / len(preds)
acc_record.append(iter_acc)
if count and count % 100 == 0:
print("V-Iter %d: loss=%.4f, acc=%.4f"
% (count, iter_loss, iter_acc))
epoch_loss = np.mean(loss_record)
epoch_acc = np.mean(acc_record)
print("Validation: loss=%.4f, acc=%.4f" % (epoch_loss, epoch_acc))
scheduler.step()
ckpt_path = os.path.join(config.save_dir, "ckpt-%d.pth" % epoch)
save_checkpoint(
ckpt_path,
model.state_dict(),
epoch=epoch + 1,
acc1=epoch_acc)
if epoch_acc > best_acc:
print("Best accuracy!")
shutil.copy(ckpt_path,
os.path.join(config.save_dir, "best.pth"))
best_acc = epoch_acc
print()
if __name__ == "__main__":
main()
``` |
{
"source": "JG-searching/bidskit",
"score": 2
} |
#### File: bidskit/bidskit/bidstree.py
```python
import os
import sys
import json
import subprocess
import bidskit.io as bio
class BIDSTree:
def __init__(self, dataset_dir, overwrite=False):
print('Initializing BIDS dataset directory tree in %s' % dataset_dir)
self.bids_dir = dataset_dir
self.derivatives_dir = os.path.join(dataset_dir, 'derivatives')
self.sourcedata_dir = os.path.join(dataset_dir, 'sourcedata')
self.code_dir = os.path.join(dataset_dir, 'code')
self.work_dir = os.path.join(dataset_dir, 'work')
bio.safe_mkdir(self.derivatives_dir)
bio.safe_mkdir(self.sourcedata_dir)
bio.safe_mkdir(self.code_dir)
bio.safe_mkdir(self.work_dir)
self.translator_file = os.path.join(self.code_dir, 'Protocol_Translator.json')
print('Creating required file templates')
# README file
self.readme_file = os.path.join(dataset_dir, 'README')
with open(self.readme_file, 'w') as fd:
fd.writelines('Useful information about this dataset\n')
# CHANGES changelog file
self.changes_file = os.path.join(dataset_dir, 'CHANGES')
with open(self.changes_file, 'w') as fd:
fd.writelines(['1.0.0 YYYY-MM-DD\n', ' - Initial release\n'])
# Create template JSON dataset description (must comply with BIDS 1.2 spec)
self.datadesc_json = os.path.join(self.bids_dir, 'dataset_description.json')
meta_dict = dict({
'Name': 'Descriptive name for this dataset',
'BIDSVersion': '1.2',
'License': 'This data is made available under the Creative Commons BY-SA 4.0 International License.',
'Authors': ['First Author', 'Second Author'],
'Acknowledgments': 'Thanks to everyone for all your help',
'HowToAcknowledge': 'Please cite: Author AB, Seminal Paper Title, High Impact Journal, 2019',
'Funding': ['First Grant', 'Second Grant'],
'ReferencesAndLinks': ['A Reference', 'Another Reference', 'A Link'],
'DatasetDOI': '10.0.1.2/abcd.10'
})
bio.write_json(self.datadesc_json, meta_dict, overwrite)
# Create participants JSON file defining columns in participants.tsv
# See
self.participants_json = os.path.join(self.bids_dir, 'participants.json')
meta_dict = dict({
'age': {
'Description': 'Age of participant',
'Units': 'years'
},
'sex': {
'Description': 'Sex of participant',
'Levels': {
'M': 'male',
'F': 'female',
'T': 'transgender'
}
},
'group': {
'Description': 'participant group assignment'
},
})
bio.write_json(self.participants_json, meta_dict, overwrite)
# Create .bidsignore file to skip work/ during validation
self.ignore_file = os.path.join(dataset_dir, '.bidsignore')
with open(self.ignore_file, 'w') as fd:
fd.writelines('work/\n')
def write_translator(self, translator):
"""
Write protocol translation dictionary template to JSON file
:param translator: dictionary, translation dictionary to write
:return: None
"""
if os.path.isfile(self.translator_file):
print('* Protocol dictionary already exists : ' + self.translator_file)
print('* Skipping creation of new dictionary')
else:
json_fd = open(self.translator_file, 'w')
json.dump(translator, json_fd, indent=4, separators=(',', ':'))
json_fd.close()
print('')
print('---')
print('New protocol dictionary created : %s' % self.translator_file)
print('Remember to replace "EXCLUDE" values in dictionary with an appropriate image description')
print('For example "MP-RAGE T1w 3D structural" or "MB-EPI BOLD resting-state')
print('---')
print('')
return
def read_translator(self):
"""
Read protocol translations from JSON file in DICOM directory
:return: translator: dictionary
"""
if os.path.isfile(self.translator_file):
# Read JSON protocol translator
json_fd = open(self.translator_file, 'r')
translator = json.load(json_fd)
json_fd.close()
else:
translator = dict()
return translator
def validate(self):
"""
Run BIDS tree through the command line BIDS validator
:return:
"""
# Check for bids-validator installation
try:
output = subprocess.check_output(['bids-validator', '-v'])
except FileNotFoundError:
print('')
print('* Optional external bids-validator not found')
print('* Please see https://github.com/jmtyszka/bidskit/blob/master/docs/Installation.md for more information')
sys.exit(0)
print('\n----------------------')
print('Running BIDS validator')
print('----------------------\n')
# Run bids-validator on BIDS dataset
subprocess.run(['bids-validator', self.bids_dir])
``` |
{
"source": "Jgshep/Python-Text-Adventure-Engine",
"score": 3
} |
#### File: Jgshep/Python-Text-Adventure-Engine/level.py
```python
import Player
print 'This is a level test. Time to combine this stuff.'
###ROOMS####
Rooms={}
Rooms['Kitchen']={}
Rooms['Kitchen']['Description']='a place for cooking'
Rooms['Kitchen']['Items']={}
Rooms['Kitchen']['Items'][ 'cheese']={
'Description':'a block of semihard, white cheese'
}
Rooms['Kitchen']['Items']['knife']={
'damage':1,
'Description':'a sharp stabby thing'
}
##Kitchen exits
Rooms['Kitchen']['Exits']={
'north':'Bedroom',
'west':'Hallway'
}
##end Kitchen
##Begin Bedroom
Rooms['Bedroom']={}
Rooms['Bedroom']['Description']='a place for sleeping'
Rooms['Bedroom']['Items']={}
Rooms['Bedroom']['Items']['suit']={
'Description':'a fancy set of clothes'
}
Rooms['Bedroom']['Items']['comb'] = {
'Description':'to fix your unruly hair'
}
Rooms['Bedroom']['Items']['key']={
'Description':'to unlock things'
}
##Bedroom Exits
Rooms['Bedroom']['Exits']={
'south':'Kitchen'
}
##End Bedroom
##Begin Hallway
Rooms['Hallway']={}
Rooms['Hallway']['Description']='the entrance to this home'
Rooms['Hallway']['Items']={}
Rooms['Hallway']['Items']['umbrella']={
'Description':'for a rainy day'
}
Rooms['Hallway']['Items']['shoes'] = {
'Description':'for walking'
}
Rooms['Hallway']['Exits']={
'east':'Kitchen',
'south':'Courtyard'
}
##End Hallway
##Begin Courtyard
Rooms['Courtyard']={}
Rooms['Courtyard']['Description']='a lovely front yard, with a flower garden and benches.'
Rooms['Courtyard']['Enemies']={}
Rooms['Courtyard']['Enemies']['cockroach']={
'hitpoints':2,
'power':1,
'Description':'a gigantic roach, hellbent on destroying you.'
}
Rooms['Courtyard']['Exits']={
'north':'Hallway',
'west':'Drawbridge'
}
##End Courtyard
##Begin Drawbridge
Rooms['Drawbridge']={}
Rooms['Drawbridge']['Description']='a rickety-looking rope bridge over a gaping chasm.'
Rooms['Drawbridge']['Enemies']={}
Rooms['Drawbridge']['Enemies']['goblin']={
'hitpoints':5,
'power':1,
'Description':'a feeble goblin guarding the drawbridge!'
}
Rooms['Drawbridge']['Exits']={
'east':'Courtyard'
}
##ToDo: Add enemies. First I have to make a combat system.
##End Drawbridge
###END ROOMS
###BEGIN PLAYER###
def Describe(Person):
print '\n\n'
print '+'*50
print '\n'
print 'You are in the %s, %s' % (Person.curPosition, Rooms[Person.curPosition]['Description'])
try:
print 'The items contained within are: '
for item in Rooms[Person.curPosition]['Items'].items():
item_dict=dict(item[1])
print '\t* %s, %s' %(item[0], item_dict['Description'])
try:
print '\t Damage: %d' % item_dict['damage']
except KeyError:
pass
# print '\t* %s, %s' %(item, description)
except KeyError:
print '\t<no items>'
try:
for enemy in Rooms[Person.curPosition]['Enemies'].iteritems():
enemy_dict=dict(enemy[1])
print 'A foe is here! The %s, %s' %(enemy[0], enemy_dict['Description'])
print 'It has %d hitpoints and %d power.' %(enemy_dict['hitpoints'], enemy_dict['power'])
except KeyError:
pass
print 'The exits are to the: '
for exit, location in Rooms[Person.curPosition]['Exits'].items():
print '\t - %s, the %s' %(exit, location)
print '+'*50
def Move(Person, place):
Person.curPosition = Rooms[Person.curPosition]['Exits'][place]
Describe(Person)
```
#### File: Jgshep/Python-Text-Adventure-Engine/Player.py
```python
import level
import random
class Player:
def __init__(self, position):
self.curPosition=position
inventoryItems={}
hitpoints=10 #base - will eventually be able to change with experience system
attackPwr=1 #again, base.
equipped = ''
dead = False
def Move(direction):
Player.curPosition=direction
def Look(person):
level.Describe(person)
def Inventory(Person):
print '-'*50
print 'Your inventory contains the following: '
for item in Player.inventoryItems.items():
item_dict=dict(item[1])
print '\t* %s, %s' % (item[0],item_dict['Description'])
print 'You currently have %d hitpoints' %Person.hitpoints
print 'Your current attack power is %d.' %Person.attackPwr
if not Person.equipped:
print 'You are currently weilding your two bare hands. Or more, if you\'re a multiply-appendeged individual.'
else:
print 'You are currently weilding %s.' %Person.equipped
print '-'*50
####COMBAT############################################################
#I need:
# HP (can set in Player.__init__) done
# base attack (1 - in init?) done
# attack subroutine
# block subroutine?
#
#I'm going to need a random number generator.
#I'll use the 1d2, 2d2 etc style
#starting with 1d1, random values can be between 0 and 2
#Enemies will start with 1d1, with 5 HP.
#Functions:
# Roll(damage) - random number generator, accepts damage variable
# Attack() will probably get the damage variable from Player.attackPwr
# Upon pickup, knife grants +1 to attackPwr? Sure, that's easy enough
#Could I use a class like Player() to create enemies? humm.
# I think using an 'Enemies' entry in the room's dictionary would be easier.
######################################################################
def Attack(Person, enemy):
damage=Roll(Person.attackPwr)
level.Rooms[Person.curPosition]['Enemies'][enemy]['hitpoints']-=damage
if damage == Person.attackPwr:
print 'Critical hit! You hit the %s for %d points of damage!' %(enemy, damage)
elif damage == 0:
print 'You hit the shadow of the %s. Unfortunately, the actual %s takes no damage.' %(enemy, enemy)
else:
print 'You strike at the %s for %d points of damage.' %(enemy, damage)
if level.Rooms[Person.curPosition]['Enemies'][enemy]['hitpoints'] <= 0:
print 'You have vanquished the %s!' %enemy
del level.Rooms[Person.curPosition]['Enemies'][enemy]
else:
print 'The %s swings at you!' %enemy
damage=Roll(level.Rooms[Person.curPosition]['Enemies'][enemy]['power'])
if damage == 0:
print 'You deftly sidestep the blow, taking 0 damage.'
else:
print 'The %s hits you for %d damage.' %(enemy, damage)
Person.hitpoints -= damage
if Person.hitpoints <= 0:
print 'Oh no! You have died. Better luck next time!'
dead = True
else:
print 'You have %d HP remaining. The %s has %d HP. ' %(Person.hitpoints, enemy, level.Rooms[Person.curPosition]['Enemies'][enemy]['hitpoints'])
def Roll(damage):
#Will need to return a value.
return random.randint(0,damage)
###############################
#Equip: Takes a Player instance variable, and an item (from levelTest.Rooms[room]['Items'][item])
#Checks the item's ['power'] (item will need to be expanded), and adds +(power) to Person.attackPwr
#Will also need an Unequip subroutine
#Checking to make sure it's a valid item will be done in gameTest, where this function is called.
###############################
def Equip(Person, item):
try:
print 'You attempt to equip the %s, %s.' %(item, Person.inventoryItems[item]['Description'])
Person.attackPwr += (Person.inventoryItems[item]['damage'])
Person.equipped=item
print 'You feel stronger! Your attack has gone up by %d' %(Person.inventoryItems[item]['damage'])
except KeyError:
print '''That's not a weapon! Try again.'''
``` |
{
"source": "jgshier/pigskin",
"score": 2
} |
#### File: pigskin/tests/conftest.py
```python
import os
import pytest
import json
try:
from urllib import quote # Python 2.X
except ImportError:
from urllib.parse import quote # Python 3+
from pigskin.pigskin import pigskin
pytest.gp_username = os.getenv('PIGSKIN_USER', '')
pytest.gp_password = os.getenv('PIGSKIN_PASS', '')
scrub_list = []
for i in [ pytest.gp_username, pytest.gp_password ]:
if i:
scrub_list.append(i)
scrub_list.append(quote(i))
def scrub_request(request):
try:
body = request.body.decode()
except (AttributeError, UnicodeDecodeError) as e:
return request
for i in scrub_list:
body = body.replace(i, 'REDACTED')
request.body = body.encode()
return request
def scrub_response(response):
try:
body = response['body']['string'].decode()
except (AttributeError, UnicodeDecodeError) as e:
return response
for i in scrub_list:
body = body.replace(i, 'REDACTED')
response['body']['string'] = body.encode()
try: # load JSON as a python dict so it can be pretty printed
parsed = json.loads(body)
response['body']['pretty'] = parsed
except ValueError as e:
pass
return response
@pytest.fixture
def vcr_config():
return {
'decode_compressed_response': True,
'before_record_request': scrub_request,
'before_record_response': scrub_response,
}
```
#### File: pigskin/tests/test_log_request.py
```python
import pytest
import requests
from pigskin.pigskin import pigskin
gp = pigskin()
# These tests are just to make sure that the logging doesn't blow up. There's no
# need to test the output, just that it doesn't fail.
@pytest.mark.vcr()
def test_response_json():
r = requests.get('https://httpbin.org/json')
result = gp._log_request(r)
assert result == True
@pytest.mark.vcr()
def test_response_html():
r = requests.get('https://httpbin.org/html')
result = gp._log_request(r)
assert result == True
@pytest.mark.vcr()
def test_response_bytes():
r = requests.get('https://httpbin.org/bytes/20')
result = gp._log_request(r)
assert result == True
def test_empty_handlel():
result = gp._log_request(None)
assert result == True
``` |
{
"source": "jgsmagalhaes/PredictTreeGrowth",
"score": 2
} |
#### File: PredictTreeGrowth/algorithm/main.py
```python
import argparse
import csv
import gc
import json
import matplotlib
import numpy
import os
import sys
matplotlib.use("TkAgg")
from keras.backend import tensorflow_backend as K
from keras.layers import Activation, Dense, LSTM, ReLU
from keras.models import Sequential
from keras.optimizers import Adam
from keras.utils import plot_model
from keras.wrappers.scikit_learn import KerasRegressor
from math import sqrt
from matplotlib import pyplot
from numpy.random import seed
from pandas import DataFrame
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import LabelEncoder
from tensorflow import set_random_seed
""" Parse Args """
parser = argparse.ArgumentParser()
parser.add_argument("--hyperparameters", help="Parameters", default='hyperparameters.json')
args = parser.parse_args()
import builtins
builtins.hyperparameters = args.hyperparameters
from helpers import DataSaver, LossHistory, LSTMModel, preprocess_data
import predict
sys.path.insert(0, '../utils')
import utils
""" Fix seed """
seed(1)
set_random_seed(1)
""" Constants """
# Get parameters from JSON file
PARAM_GRID = None
with open(args.hyperparameters) as f:
PARAM_GRID = json.load(f, encoding='utf-8')
STARTING_TREE_INDEX = PARAM_GRID['starting_index'] # 1-indexed
TRAIN_TEST_YEAR_SPLIT = PARAM_GRID['train_test_split']
DATASET_PATH = PARAM_GRID['dataset_path']
RESULTS_PATH = PARAM_GRID['results_path']
MODEL_PATH = os.path.join(RESULTS_PATH, 'model.h5')
MODEL_BAK_PATH = os.path.join(RESULTS_PATH, 'model_bak.h5')
FULL_DATASET = utils.read_data(DATASET_PATH, date_column=0, index_column=0)
""" Helper functions """
def get_tree_results_path(tree_index):
return os.path.join(RESULTS_PATH, 'tree_%d' % (int(tree_index)+1))
""" Main functions """
"""
Main algorithm
"""
def algorithm(lstm_model, tree_index, train_scaled, test_scaled, list_variables, tree_names):
results_path = get_tree_results_path(tree_index)
""" Start experiment """
print('Starting experiment')
epochs_list = list(range(1, numpy.array(PARAM_GRID['epochs']).max()+1))
df = DataSaver(epochs_list, columns=['Epoch'])
df.append_col('Tree name', tree_names[tree_index], index=0)
""" Format training data """
X, Y = train_scaled[:, 0:-len(list_variables)], train_scaled[:, -len(list_variables):]
X = X.reshape(X.shape[0], 1, X.shape[1])
""" Format validation data """
val_x, val_y = test_scaled[:, 0:-len(list_variables)], test_scaled[:, -len(list_variables):]
val_x = val_x.reshape(val_x.shape[0], 1, val_x.shape[1])
""" Create and fit an LSTM model """
results = model.fit(
X,
Y,
validation_data=(val_x, val_y),
epochs=PARAM_GRID['epochs'],
batch_size=PARAM_GRID['batch_size']
)
""" Save weights and configs """
print('Saving weights information')
config = lstm_model.model.get_config().copy()
config['input_shape'] = lstm_model.model.input_shape
config['output_shape'] = lstm_model.model.output_shape
config['weights'] = list()
for layer in lstm_model.model.get_weights():
config['weights'].append(layer.tolist())
with open(os.path.join(results_path, 'model_configuration.txt'), 'w') as outfile:
json.dump(config, outfile, indent=4)
""" Generate and save training and validation loss plots for each cross_validation """
print('Saving results to file')
losses = results.history
train_loss = losses['loss']
test_loss = losses['val_loss']
metric_rmse = losses['metric_rmse']
metric_r2_score = losses['metric_r2_score']
""" Add loss to raw data """
df.append_col('Train loss estimation', numpy.array(train_loss).flatten())
df.append_col('Test loss estimation', numpy.array(test_loss).flatten())
df.append_col('Metric RMSE estimation', numpy.array(metric_rmse).flatten())
df.append_col('Metric R2 score estimation', numpy.array(metric_r2_score).flatten())
del losses
del train_loss
del test_loss
del metric_rmse
gc.collect()
""" Save raw data """
result_file_path = os.path.join(RESULTS_PATH, 'training_results.csv')
df = df.get_dataframe()
df.to_csv(os.path.join(results_path, 'train_test_loss_epoch_raw.csv'), index=False)
if not os.path.isfile(result_file_path):
df.to_csv(result_file_path, header=True, index=False)
else:
df.to_csv(result_file_path, mode='a', header=False, index=False)
""" Main function starts here """
if __name__ == "__main__":
""" Start of script """
""" Get all the different trees in the dataset """
tree_names = numpy.unique(FULL_DATASET.values[:, 0])
""" Create result directories """
if not os.path.isdir(RESULTS_PATH):
os.makedirs(RESULTS_PATH)
for i in range(len(tree_names)):
tree_path = get_tree_results_path(i)
if not os.path.isdir(tree_path):
os.makedirs(tree_path)
""" Pick input columns from full dataset """
print('Picking columns from dataset ' + DATASET_PATH)
VARIABLES = PARAM_GRID['variables']
DATASET = FULL_DATASET[VARIABLES]
VARIABLES = VARIABLES[1:]
""" Get train, test and forecast values from dataset """
train_raw_values = DATASET[str(TRAIN_TEST_YEAR_SPLIT[0]):str(TRAIN_TEST_YEAR_SPLIT[1])].values
test_raw_values = DATASET[str(TRAIN_TEST_YEAR_SPLIT[1]+1):str(TRAIN_TEST_YEAR_SPLIT[2])].values
""" Preprocess all the data at once """
print('Preprocessing data')
encoder = LabelEncoder()
train_input_data = preprocess_data(train_raw_values.copy(), encoder)
test_input_data = preprocess_data(test_raw_values.copy(), encoder)
""" Transform data to be supervised learning """
print('Transforming data to supervised multivariate model')
train_supervised = DataFrame()
test_supervised = DataFrame()
for (i, tree) in enumerate(tree_names):
train_tree_data = train_input_data[train_input_data[:, 0] == i, :]
test_tree_data = test_input_data[test_input_data[:, 0] == i, :]
train_tree_supervised = utils.timeseries_to_supervised_multivariate(train_tree_data, 1, 1)
test_tree_supervised = utils.timeseries_to_supervised_multivariate(test_tree_data, 1, 1)
""" Drop second tree name column and first row with 0 values """
train_tree_supervised.drop('var1(t)', axis=1, inplace=True)
test_tree_supervised.drop('var1(t)', axis=1, inplace=True)
train_tree_supervised.drop(0, inplace=True)
test_tree_supervised.drop(0, inplace=True)
train_supervised = train_supervised.append(train_tree_supervised)
test_supervised = test_supervised.append(test_tree_supervised)
train_supervised = train_supervised.values
test_supervised = test_supervised.values
""" Create scaler and scale data """
scaler = utils.create_scaler(train_supervised[:, 1:])
""" Create model """
# If old model exists, rename it
if os.path.isfile(MODEL_PATH):
if (os.path.isfile(MODEL_BAK_PATH)):
os.remove(MODEL_BAK_PATH);
os.rename(MODEL_PATH, MODEL_BAK_PATH)
history = LossHistory()
lstm_create = LSTMModel(history)
model = lstm_create(
kernel_init=PARAM_GRID['kernel_init'],
optimizer=PARAM_GRID['optimizer'],
bias_init=PARAM_GRID['bias_init'],
batch_size=PARAM_GRID['batch_size'],
lr=PARAM_GRID['lr'],
neurons=PARAM_GRID['neurons'],
input_shape=(TRAIN_TEST_YEAR_SPLIT[1] - TRAIN_TEST_YEAR_SPLIT[0], 1, len(VARIABLES))
)
""" Run algorithm on tree data"""
for (i, tree) in enumerate(tree_names):
if (i < STARTING_TREE_INDEX-1):
print('Skipping tree %d' %(i+1))
continue
print('Processing tree %d' % (i+1))
""" Get tree data """
train_tree_data = train_supervised[train_supervised[:, 0] == i, :]
test_tree_data = test_supervised[test_supervised[:, 0] == i, :]
# Remove tree name column
train_tree_data = train_tree_data[:, 1:]
test_tree_data = test_tree_data[:, 1:]
# Scale data
train_scaled = scaler.transform(train_tree_data)
test_scaled = scaler.transform(test_tree_data)
""" Run algorithm """
algorithm(model, i, train_scaled, test_scaled, VARIABLES, tree_names)
""" Save model for backup """
model.save(MODEL_PATH)
""" Clear variables """
del train_tree_data
del test_tree_data
gc.collect()
""" Start prediction """
print('Starting prediction')
predict.main()
sys.exit(0)
```
#### File: PredictTreeGrowth/algorithm/predict.py
```python
import sys
import numpy
import os
import gc
import json
import matplotlib
import csv
import argparse
matplotlib.use("TkAgg")
from keras.layers import Activation, Dense, LSTM, ReLU
from keras.models import Sequential, load_model
from keras.optimizers import Adam
from keras.utils import plot_model
from math import sqrt
from matplotlib import pyplot
from numpy.random import seed
from pandas import DataFrame
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import LabelEncoder
from tensorflow import set_random_seed
from keras.backend import tensorflow_backend as K
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import GridSearchCV
from helpers import metric_rmse, metric_r2_score, preprocess_data
sys.path.insert(0, '../utils')
import utils
import builtins
if hasattr(builtins, "hyperparameters"):
defaultHyperparamSource = builtins.hyperparameters
else:
defaultHyperparamSource = 'hyperparameters.json'
""" Parse Args """
parser = argparse.ArgumentParser()
parser.add_argument("--hyperparameters", help="Parameters", default=defaultHyperparamSource)
args = parser.parse_args()
""" Fix seed """
seed(1)
set_random_seed(1)
""" Constants """
# Get parameters from JSON file
PARAM_GRID = None
with open(args.hyperparameters) as f:
PARAM_GRID = json.load(f, encoding='utf-8')
STARTING_TREE_INDEX = PARAM_GRID['starting_index'] # 1-indexed
DATASET_PATH = PARAM_GRID['dataset_path']
RESULTS_PATH = PARAM_GRID['results_path']
MODEL_PATH = os.path.join(RESULTS_PATH, 'model.h5')
FULL_DATASET = utils.read_data(DATASET_PATH, date_column=0, index_column=0)
""" Helper functions """
def predict(
lstm_model,
tree_index,
forecast_input_data_raw,
forecast_scaled,
prediction_years,
list_variables,
tree_names,
scaler
):
print('Starting prediction')
forecast_scaled_x = forecast_scaled[:, 0:-len(list_variables)]
inputs = utils.slice_data_to_batch_size_factor(forecast_scaled_x, PARAM_GRID['batch_size'])
forecast_input_data = forecast_input_data_raw[:inputs.shape[0]]
tree_sig_indices = list()
for (index, variable) in enumerate(list_variables):
if (
variable.lower() == 'a' or
variable.lower() == 's' or
variable.lower() == 'ele' or
variable.lower() == 'sp'
):
tree_sig_indices.append(index)
predictions = list()
for year in range(prediction_years[0], prediction_years[1]):
inputs_reshaped = inputs.reshape(inputs.shape[0], 1, inputs.shape[1])
yhat = lstm_model.predict(inputs_reshaped, batch_size=PARAM_GRID['batch_size'], verbose=1)
yhat_inverted = utils.invert_scale(scaler, inputs, yhat, len(list_variables))
""" Add forecasted value to predictions """
predictions.append(yhat_inverted[0])
""" Calculate next input """
inputs = numpy.vstack((inputs[1:], yhat[-1:]))
""" Copy over values of 'A', 'S', 'Ele' and 'Sp' """
for index in tree_sig_indices:
inputs[-1][index] = inputs[0][index]
""" Clean up memory """
gc.collect()
""" Set forecasted value """
columns = ['Year', 'Tree name'] + list_variables
predictions = numpy.array(predictions)
df = DataFrame(
index=list(range(1, len(predictions)+1)),
columns=columns
)
df['Tree name'] = tree_names[tree_index]
df['Year'] = list(range(prediction_years[0], prediction_years[1]))
for index in range(len(list_variables)):
variable = list_variables[index]
df[variable] = predictions[:, index]
""" Add true value and RMSE to data """
true_val = forecast_input_data[:, 0]
rmse = numpy.sqrt(
numpy.mean(
numpy.square(
numpy.subtract(
predictions[:true_val.shape[0], 0],
true_val
)
)
)
)
true_val_column = true_val.tolist() + ['-'] * (len(predictions) - true_val.size)
df.insert(3, 'BAI True Value', true_val_column)
df.insert(4, 'RMSE', rmse)
prediction_path = os.path.join(RESULTS_PATH, 'predictions.csv')
if not os.path.isfile(prediction_path):
df.to_csv(prediction_path, header=True, index=False)
else:
df.to_csv(prediction_path, mode='a', header=False, index=False)
rmse_df = DataFrame(index=[1], columns=['RMSE'])
rmse_df['Tree name'] = tree_names[tree_index]
rmse_df['RMSE'] = rmse
rmse_path = os.path.join(RESULTS_PATH, 'rmse.csv')
if not os.path.isfile(rmse_path):
rmse_df.to_csv(rmse_path, header=True, index=False)
else:
rmse_df.to_csv(rmse_path, mode='a', header=False, index=False)
""" Clean up memory """
del df
del predictions
del true_val
del rmse
del true_val_column
gc.collect()
""" Main function starts here """
def main():
""" Start of script """
""" Get the model """
model = load_model(
MODEL_PATH,
custom_objects={'metric_rmse': metric_rmse, 'metric_r2_score': metric_r2_score}
)
# try:
# except:
# print('Something went wrong while loading the model or model does not exist at ' + MODEL_PATH)
# print('Exiting')
# return
""" Get all the different trees in the dataset """
tree_names = numpy.unique(FULL_DATASET.values[:, 0])
""" Create result directories """
if not os.path.isdir(RESULTS_PATH):
os.makedirs(RESULTS_PATH)
""" Pick input columns from full dataset """
print('Picking columns from dataset ' + DATASET_PATH)
VARIABLES = PARAM_GRID['variables']
DATASET = FULL_DATASET[VARIABLES]
VARIABLES = VARIABLES[1:]
""" Get train, test and forecast values from dataset """
forecast_values = DATASET.values
""" Preprocess all the data at once """
print('Preprocessing data')
encoder = LabelEncoder()
forecast_input_data = preprocess_data(forecast_values.copy(), encoder)
""" Transform data to be supervised learning """
print('Transforming data to supervised multivariate model')
forecast_supervised = DataFrame()
for (i, tree) in enumerate(tree_names):
forecast_tree_data = forecast_input_data[forecast_input_data[:, 0] == i, :]
forecast_tree_supervised = utils.timeseries_to_supervised_multivariate(forecast_tree_data, 1, 1)
""" Drop second tree name column and first row with 0 values """
forecast_tree_supervised.drop('var1(t)', axis=1, inplace=True)
forecast_tree_supervised.drop(0, inplace=True)
forecast_supervised = forecast_supervised.append(forecast_tree_supervised)
forecast_supervised = forecast_supervised.values
""" Create scaler and scale data """
scaler = utils.create_scaler(forecast_supervised[:, 1:])
""" Start prediction """
print('Starting prediction')
for (i, tree) in enumerate(tree_names):
if (i < STARTING_TREE_INDEX-1):
print('Skipping tree %d' %(i+1))
continue
print('Processing tree %d' % (i+1))
""" Get tree data """
forecast_tree_data = forecast_supervised[forecast_supervised[:, 0] == i, :]
forecast_tree_input_data = forecast_input_data[forecast_input_data[:, 0] == i, :]
# Remove tree name column
forecast_tree_data = forecast_tree_data[:, 1:]
forecast_tree_input_data = forecast_tree_input_data[:, 1:]
# Scale data
forecast_scaled = scaler.transform(forecast_tree_data)
""" Prediction years """
prediction_years = (1981, 2051) # 2016 to 2050 inclusive
""" Reset states to prepare for prediction """
model.model.reset_states()
""" Predict """
predict(
model,
i,
forecast_tree_input_data,
forecast_scaled,
prediction_years,
VARIABLES,
tree_names,
scaler
)
""" Clear variables """
del forecast_tree_data
del forecast_tree_input_data
del forecast_scaled
gc.collect()
if __name__ == "__main__":
main()
sys.exit(0)
``` |
{
"source": "jgsogo/bb-django-articles",
"score": 3
} |
#### File: bb-django-articles/articles/decorators.py
```python
import functools
import logging
import time
log = logging.getLogger('articles.decorators')
def logtime(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if func.__class__.__name__ == 'function':
executing = '%s.%s' % (func.__module__, func.__name__)
elif 'method' in func.__class__.__name__:
executing = '%s.%s.%s' % (func.__module__, func.__class__.__name__, func.__name__)
else:
executing = str(func)
log.debug('Logging execution time for %s with args: %s; kwargs: %s' % (executing, args, kwargs))
start = time.time()
res = func(*args, **kwargs)
duration = time.time() - start
log.debug('Called %s... duration: %s seconds' % (executing, duration))
return res
return wrapped
def once_per_instance(func):
"""Makes it so an instance method is called at most once before saving"""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
if not hasattr(self, '__run_once_methods'):
self.__run_once_methods = []
name = func.__name__
if name in self.__run_once_methods:
log.debug('Method %s has already been called for %s... not calling again.' % (name, self))
return False
res = func(self, *args, **kwargs)
self.__run_once_methods.append(name)
return res
return wrapped
```
#### File: bb-django-articles/articles/directives.py
```python
INLINESTYLES = False
try:
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
from docutils import nodes
from docutils.parsers.rst import directives
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
parsed = highlight(u'\n'.join(content), lexer, formatter)
parsed = '<div class="codeblock">%s</div>' % parsed
return [nodes.raw('', parsed, format='html')]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
directives.register_directive('sourcecode', pygments_directive)
# create an alias, so we can use it with rst2pdf... leave the other for
# backwards compatibility
directives.register_directive('code-block', pygments_directive)
except:
# the user probably doesn't have pygments installed
pass
``` |
{
"source": "jgsogo/bb-django-flickr",
"score": 3
} |
#### File: bb-django-flickr/flickr/utils.py
```python
from datetime import datetime
def ts_to_dt(timestamp, offset=''):
return '%s%s' % (datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S'), offset)
def unslash(url):
return url.replace('\\/', '/')
``` |
{
"source": "jgsogo/boost-graph-wrapper",
"score": 2
} |
#### File: jgsogo/boost-graph-wrapper/conanfile.py
```python
from conans import ConanFile, CMake, tools
import os
class BoostGraphWrapperConan(ConanFile):
name = "boost-graph-wrapper"
version = "0.0"
license = "MIT"
url = "<Package recipe repository url here, for issues about the package>"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=False"
generators = "cmake"
exports = "conanfile.py", "CMakeLists.txt", "boost-graph-wrapper/*", "tests/*"
def requirements(self):
self.requires.add("Boost/1.60.0@lasote/stable")
self.requires.add("spdlog/0.9.0@memsharded/stable")
def imports(self):
self.copy("*.dll", dst="bin", src="bin") # From bin to bin
self.copy("*.dylib*", dst="bin", src="lib") # From lib to bin
def build(self):
cmake = CMake(self.settings)
shared = "-DBUILD_SHARED_LIBS=ON" if self.options.shared else ""
build_tests = "-DBUILD_TEST:BOOL=ON" if self.scope.BUILD_TEST else ""
self.run('cmake "%s" %s %s %s' % (self.conanfile_directory, cmake.command_line, build_tests, build_tests))
self.run("cmake --build . %s" % cmake.build_config)
if build_tests:
self.run("ctest -C {}".format(self.settings.build_type))
def package(self):
self.copy("*.h", dst="include", src="boost-graph-wrapper")
self.copy("*boost-graph-wrapper.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["boost-graph-wrapper"]
``` |
{
"source": "jgsogo/conan-apache-log4cxx",
"score": 2
} |
#### File: jgsogo/conan-apache-log4cxx/conanfile.py
```python
from conans import ConanFile, AutoToolsBuildEnvironment, tools, MSBuild, CMake
from conans.errors import ConanException
from conans.tools import os_info, SystemPackageTool
import os
class Apachelog4cxxConan(ConanFile):
name = "apache-log4cxx"
version = "0.10.0"
license = "Apache-2.0"
url = "https://github.com/jgsogo/conan-apache-log4cxx"
settings = "os", "compiler", "build_type", "arch"
# TODO: Options here https://logging.apache.org/log4cxx/latest_stable/building/autotools.html
options = {
"shared": [True, False],
"enable-wchar_t": ["yes", "no"],
"enable-unichar": ["yes", "no"],
"enable-cfstring": ["yes", "no"],
"with-logchar": ["utf-8", "wchar_t", "unichar"],
"with-charset": ["utf-8", "iso-8859-1", "usascii", "ebcdic", "auto"],
"with-SMTP": ["libesmtp", "no"],
"with-ODBC": ["unixODBC", "iODBC", "Microsoft", "no"]
}
default_options = "enable-wchar_t=yes", "enable-unichar=no", "enable-cfstring=no", "with-logchar=utf-8", "with-charset=auto", "with-SMTP=no", "with-ODBC=no", "shared=True"
lib_name = "logging-log4cxx-" + version.replace('.', '_')
exports_sources = ["CMakeLists.txt", "*.cmake", "*.patch"]
generators = "cmake"
def requirements(self):
self.requires.add("apache-apr/1.6.3@jgsogo/stable")
self.requires.add("apache-apr-util/1.6.1@jgsogo/stable")
def source(self):
tools.get("https://github.com/apache/logging-log4cxx/archive/v{version}.tar.gz".format(version=self.version.replace(".", "_")))
def patch(self):
if self.settings.os == "Windows":
if int(self.settings.compiler.version.value) <=12:
# Remove noexcept, keyword not supported for msvc<=12
tools.replace_in_file("apache-log4cxx-win2012.patch",
"noexcept(true)",
"")
tools.patch(base_path=self.lib_name, patch_file="apache-log4cxx-win2012.patch")
tools.replace_in_file(os.path.join(self.lib_name, 'src', 'main', 'cpp', 'stringhelper.cpp'),
"#include <apr.h>",
"#include <apr.h>\n#include <iterator>")
else:
tools.patch(base_path=self.lib_name, patch_file="log4cxx-1-gcc.4.4.patch")
tools.patch(base_path=self.lib_name, patch_file="log4cxx-5-gcc6-fix-narrowing-conversion.patch")
tools.replace_in_file(os.path.join(self.lib_name, 'src', 'main', 'include', 'log4cxx', 'private', 'Makefile.am'),
"privateinc_HEADERS= $(top_builddir)/src/main/include/log4cxx/private/*.h log4cxx_private.h",
"privateinc_HEADERS= $(top_builddir)/src/main/include/log4cxx/private/*.h")
tools.replace_in_file(os.path.join(self.lib_name, 'src', 'main', 'include', 'log4cxx', 'Makefile.am'),
"log4cxxinc_HEADERS= $(top_srcdir)/src/main/include/log4cxx/*.h log4cxx.h",
"log4cxxinc_HEADERS= $(top_srcdir)/src/main/include/log4cxx/*.h")
# Brew patch: https://github.com/Homebrew/legacy-homebrew/blob/56b57d583e874e6dfe7a417d329a147e4d4b064f/Library/Formula/log4cxx.rb
tools.replace_in_file(os.path.join(self.lib_name, 'src', 'main', 'include', 'log4cxx', 'helpers', 'simpledateformat.h'),
"#include <vector>",
"#include <vector>\n#include <locale>")
tools.replace_in_file(os.path.join(self.lib_name, 'src', 'main', 'include', 'log4cxx', 'helpers', 'simpledateformat.h'),
"namespace std { class locale; }",
"")
tools.replace_in_file(os.path.join(self.lib_name, 'src', 'main', 'cpp', 'stringhelper.cpp'),
"#include <cctype>",
"#include <cctype>\n#include <cstdlib>")
def build(self):
self.patch()
if self.settings.os == "Windows":
cmake = CMake(self)
cmake.definitions["APR_ALTLOCATION"] = self.deps_cpp_info["apache-apr"].rootpath
cmake.definitions["APRUTIL_ALTLOCATION"] = self.deps_cpp_info["apache-apr-util"].rootpath
cmake.definitions["BUILD_SHARED_LIBS"] = self.options.shared
cmake.configure()
cmake.build()
cmake.install()
else:
with tools.chdir(self.lib_name):
self.run("./autogen.sh")
env_build = AutoToolsBuildEnvironment(self)
args = ['--prefix', self.package_folder,
'--with-apr={}'.format(os.path.join(self.deps_cpp_info["apache-apr"].rootpath)),
'--with-apr-util={}'.format(os.path.join(self.deps_cpp_info["apache-apr-util"].rootpath)),
]
for key, value in self.options.items():
if key != 'shared':
args += ["--{}={}".format(key, value), ]
env_build.configure(configure_dir=self.lib_name,
host=self.settings.arch,
args=args)
env_build.make()
env_build.make(args=['install'])
def package(self):
self.copy("*.h", dst="include", src=os.path.join('src', 'main', 'include'), keep_path=True)
self.copy("*.h", dst="include", src=os.path.join(self.lib_name, 'src', 'main', 'include'), keep_path=True)
def package_info(self):
if self.options.shared:
self.cpp_info.libs = tools.collect_libs(self)
else:
self.cpp_info.libs = tools.collect_libs(self) + ["odbc32", "mswsock", ]
self.cpp_info.defines = ["LOG4CXX_STATIC", ]
``` |
{
"source": "jgsogo/conan_ci_jenkins",
"score": 3
} |
#### File: conan_ci_jenkins/python_runner/pr_tags.py
```python
import argparse
import json
import os
from github import Github
def _get_value(body, tag):
pos = body.lower().find(tag.lower())
if pos != -1:
cl = body[pos + len(tag):].splitlines()[0]
return cl.strip()
return None
def get_tag_from_pr(pr_number, tag):
"""Given a PR number and a tag to search, it returns the line written in the body"""
gh_token = os.getenv("GH_TOKEN")
g = Github(gh_token)
repo = g.get_repo("conan-io/conan")
pr = repo.get_pull(pr_number)
body = pr.body
value = _get_value(body, tag)
return value
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Launch tests in a venv')
parser.add_argument('output_file', help='e.g.: file.json')
parser.add_argument('branch_name', help='e.g.: PR-23')
args = parser.parse_args()
TAG_PYVERS = "#PYVERS:"
TAG_TAGS = "#TAGS:"
TAG_REVISIONS = "#REVISIONS:"
out_file = args.output_file
branch = args.branch_name
if not branch.startswith("PR-"):
print("The branch is not a PR")
exit(-1)
pr_number = int(branch.split("PR-")[1])
def clean_list(the_list):
if not the_list:
return []
return [a.strip() for a in the_list.split(",")]
# Read tags to include
tags = clean_list(get_tag_from_pr(pr_number, TAG_TAGS))
# Read pythons to include
tmp = clean_list(get_tag_from_pr(pr_number, TAG_PYVERS))
pyvers = {"Windows": [], "Linux": [], "Macos": []}
for t in tmp:
if "@" in t:
the_os, pyver = t.split("@")
if the_os not in ["Macos", "Linux", "Windows"]:
print("Invalid os: %s" % the_os)
exit(-1)
pyvers[the_os].append(pyver)
else:
pyvers["Macos"].append(t)
pyvers["Linux"].append(t)
pyvers["Windows"].append(t)
# Rest revisions?
tmp = get_tag_from_pr(pr_number, TAG_REVISIONS)
revisions = tmp.strip().lower() in ["1", "true"] if tmp else False
with open(out_file, "w") as f:
the_json = {"tags": tags, "pyvers": pyvers, "revisions": revisions}
f.write(json.dumps(the_json))
``` |
{
"source": "jgsogo/conan-expat",
"score": 2
} |
#### File: jgsogo/conan-expat/conanfile.py
```python
from conans import ConanFile, CMake, tools
import os
class ExpatConan(ConanFile):
name = "expat"
version = "2.2.5"
description = "Fast XML parser in C"
url = "https://github.com/bincrafters/conan-expat"
license = "MIT"
exports = ['LICENSE.md', 'FindExpat.cmake']
exports_sources = ['CMakeLists.txt']
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=False"
def source(self):
base_url = "https://github.com/libexpat/libexpat/archive"
zip_name = "R_%s.zip" % self.version.replace(".", "_")
tools.get("%s/%s" % (base_url, zip_name))
def build(self):
cmake = CMake(self, parallel=True)
cmake.definitions['BUILD_doc'] = False
cmake.definitions['BUILD_examples'] = False
cmake.definitions['BUILD_tests'] = False
cmake.definitions['BUILD_tools'] = False
cmake.definitions['CMAKE_DEBUG_POSTFIX'] = 'd'
cmake.definitions['CMAKE_POSITION_INDEPENDENT_CODE'] = True
cmake.definitions['BUILD_shared'] = self.options.shared
cmake.configure()
cmake.build()
cmake.install()
def package(self):
self.copy("FindExpat.cmake", ".", ".")
def package_info(self):
self.cpp_info.libs = ["expatd" if self.settings.build_type == "Debug" else "expat"]
if not self.options.shared:
self.cpp_info.defines = ["XML_STATIC"]
def configure(self):
del self.settings.compiler.libcxx
``` |
{
"source": "jgsogo/conan-opencv",
"score": 2
} |
#### File: jgsogo/conan-opencv/conanfile.py
```python
from conans import ConanFile, CMake, tools
from conans.tools import os_info, SystemPackageTool
import os
class OpenCVConan(ConanFile):
name = "OpenCV"
version = "2.4.13.5"
license = "LGPL"
homepage = "https://github.com/opencv/opencv"
description = "OpenCV (Open Source Computer Vision Library)"
url = "https://github.com/conan-community/conan-opencv.git"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=False"
generators = "cmake"
requires = ("zlib/1.2.11@conan/stable", "libjpeg/9b@bincrafters/stable",
"libpng/1.6.34@bincrafters/stable", "libtiff/4.0.8@bincrafters/stable",
"jasper/2.0.14@conan/stable")
def source(self):
tools.download("https://github.com/opencv/opencv/archive/2.4.13.5.zip", "opencv.zip")
tools.unzip("opencv.zip")
os.unlink("opencv.zip")
tools.replace_in_file("opencv-%s/CMakeLists.txt" % self.version, "project(OpenCV CXX C)",
"""project(OpenCV CXX C)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()""")
def system_requirements(self):
if os_info.linux_distro == "ubuntu":
installer = SystemPackageTool()
installer.update() # Update the package database
for pack_name in ("libgtk2.0-dev", "pkg-config", "libpango1.0-dev", "libcairo2-dev",
"libglib2.0-dev "):
installer.install(pack_name) # Install the package
def build(self):
cmake = CMake(self)
cmake.definitions["BUILD_EXAMPLES"] = "OFF"
cmake.definitions["BUILD_DOCS"] = "OFF"
cmake.definitions["BUILD_TESTS"] = "OFF"
cmake.definitions["BUILD_opencv_apps"] = "OFF"
cmake.definitions["BUILD_ZLIB"] = "OFF"
cmake.definitions["BUILD_JPEG"] = "OFF"
cmake.definitions["BUILD_PNG"] = "OFF"
cmake.definitions["BUILD_TIFF"] = "OFF"
cmake.definitions["BUILD_JASPER"] = "OFF"
if self.settings.compiler == "Visual Studio":
if "MT" in str(self.settings.compiler.runtime):
cmake.definitions["BUILD_WITH_STATIC_CRT"] = "ON"
else:
cmake.definitions["BUILD_WITH_STATIC_CRT"] = "OFF"
cmake.configure(source_folder='opencv-%s' % self.version)
cmake.build()
opencv_libs = ["contrib","stitching", "nonfree", "superres", "ocl", "ts", "videostab", "gpu", "photo", "objdetect",
"legacy", "video", "ml", "calib3d", "features2d", "highgui", "imgproc", "flann", "core"]
def package(self):
self.copy("*.h*", "include", "opencv-%s/include" % self.version)
self.copy("*.h*","include/opencv2","opencv2") #opencv2/opencv_modules.hpp is generated
for lib in self.opencv_libs:
self.copy("*.h*", "include", "opencv-%s/modules/%s/include" % (self.version, lib))
self.copy("*.lib", "lib", "lib", keep_path=False)
self.copy("*.a", "lib", "lib", keep_path=False)
self.copy("*.dll", "bin", "bin", keep_path=False)
self.copy("*.dylib", "lib", "lib", keep_path=False)
self.copy("*.so", "lib", "lib", keep_path=False)
self.copy("*.xml", "data", "opencv-%s/data" % (self.version))
self.copy("*opencv.pc", keep_path=False)
if not self.options.shared:
self.copy("*.lib", "lib", "3rdparty/lib", keep_path=False)
self.copy("*.a", "lib", "3rdparty/lib", keep_path=False)
def package_info(self):
version = self.version.split(".")[:-1] # last version number is not used
version = "".join(version) if self.settings.os == "Windows" else ""
debug = "d" if self.settings.build_type == "Debug" and self.settings.compiler == "Visual Studio" else ""
for lib in self.opencv_libs:
self.cpp_info.libs.append("opencv_%s%s%s" % (lib, version, debug))
if self.settings.os == "Windows" and not self.options.shared:
self.cpp_info.libs.extend(["IlmImf"])
if self.settings.os == "Linux":
if not self.options.shared:
other_libs = self.collect_libs()
for other_lib in ["IlmImf"]:
if other_lib in other_libs:
self.cpp_info.libs.append(other_lib)
else:
self.cpp_info.libs.append(other_lib.replace("lib", ""))
self.cpp_info.libs.extend(["gthread-2.0", "freetype", "fontconfig", "glib-2.0", "gobject-2.0", "pango-1.0", "pangoft2-1.0", "gio-2.0", "gdk_pixbuf-2.0",
"cairo", "atk-1.0", "pangocairo-1.0"," gtk-x11-2.0", "gdk-x11-2.0", "rt", "pthread", "m", "dl"])
``` |
{
"source": "jgsogo/conan-recipes",
"score": 3
} |
#### File: .github/scripts/add_version.py
```python
import yaml
import os
import hashlib
import requests
from copy import deepcopy
def _version_key(v: str):
if v.startswith('cci.'):
return _version_key(f"0.{v[4:]}")
if v.startswith('v'):
return _version_key(v[1:])
tokens = v.split('.')
try:
return [int(it) for it in tokens]
except ValueError:
return [-1]
def add_version_to_config(name, version):
config_filename = os.path.join('recipes', name, 'config.yml')
with open(config_filename, "r") as stream:
data = yaml.safe_load(stream)
data_versions = data['versions']
versions = list(data_versions.keys())
versions = sorted(versions, key=lambda u: _version_key(u))
# Will add the new version using the same folder as the latest one
last_version = versions[-1]
# Operate on config.yml
last_version_folder = data_versions.get(last_version).get('folder')
data_versions[version] = {'folder': data_versions[last_version]['folder']}
with open(config_filename, "w") as f:
yaml.dump(data, f)
print(data)
return last_version_folder
def add_version_to_conandata(name, version, version_folder):
conandata_filename = os.path.join('recipes', name, version_folder, 'conandata.yml')
with open(conandata_filename, "r") as stream:
data = yaml.safe_load(stream)
data_versions = data['sources']
versions = list(data_versions.keys())
versions = sorted(versions, key=lambda u: _version_key(u))
last_version = versions[-1]
# Operate on conandata.yml (sources)
url = f"https://github.com/jgsogo/{name}/archive/refs/tags/{version}.tar.gz"
u = requests.get(url)
hash = hashlib.sha256(u.content).hexdigest();
data_versions[version] = {
'url': url,
'sha256': hash
}
# Operate on conandata.yml (patches)
data_patches = data.get('patches', {})
patches = data_patches.get(last_version, [])
if patches:
data_patches[version] = deepcopy(data_patches[last_version])
with open(conandata_filename, "w") as f:
yaml.dump(data, f)
print(data)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Add version to library.')
parser.add_argument('--library', dest='library', help='Name of the library')
parser.add_argument('--version', dest='version', help='Version to add')
args = parser.parse_args()
print(f"Add version '{args.version}' to library '{args.library}'")
folder = add_version_to_config(args.library, args.version)
add_version_to_conandata(args.library, args.version, folder)
```
#### File: composite/all/conanfile.py
```python
import os
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
required_conan_version = ">=1.33.0"
class CompositeConan(ConanFile):
python_requires = "base_conanfile/v0.2.0@jgsogo/stable"
python_requires_extend = "base_conanfile.BaseCMakeConanfile"
name = "composite"
homepage = "https://github.com/jgsogo/composite"
description = "Data structures with composite nodes"
topics = ("cpp20", "tree", "graph")
license = "MIT"
def validate(self):
self._validate_cppstd("20")
def package_id(self):
self.info.header_only()
```
#### File: render_context/all/conanfile.py
```python
import os
from conans import ConanFile
required_conan_version = ">=1.33.0"
class RenderContextConan(ConanFile):
python_requires = "base_conanfile/v0.2.0@jgsogo/stable"
python_requires_extend = "base_conanfile.BaseCMakeConanfile"
name = "render_context"
homepage = "https://github.com/jgsogo/render_context"
description = "Utilities to draw elements using Magnum with ImGUI integration"
topics = ("cpp20", "magnum", "imgui")
license = "MIT"
def validate(self):
self._validate_cppstd("20")
def requirements(self):
self.requires("imgui/cci.20211117+docking@jgsogo/stable")
self.requires("magnum/2020.06@jgsogo/stable")
self.requires("magnum-integration/2020.06")
def package_info(self):
self.cpp_info.defines.append("IMGUI_USER_CONFIG=\"{}\"".format(str(os.path.join(self.package_folder, "include", "render", "imgui", "imconfig.h"))))
self.cpp_info.requires = ['magnum::magnum_main', 'imgui::imgui', 'magnum-integration::imgui']
self.cpp_info.libs = ['units', 'render_imgui']
``` |
{
"source": "jgsogo/cpp-inside",
"score": 2
} |
#### File: bindings/python/crnd.py
```python
import ctypes
import sys
import os
me = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(me, 'messages'))
from messages import status_pb2, help_pb2, sample_request_pb2, sample_pb2, model_pb2
class ProtoSerialized(ctypes.Structure):
_fields_ = [('data', ctypes.c_void_p),
('size', ctypes.c_int),]
def parse_as(self, proto_class):
msg = (ctypes.c_char *self.size).from_address(self.data)
proto = proto_class()
proto.ParseFromString(msg)
return proto
@classmethod
def build_from(cls, proto):
instance = cls()
instance._as_str = proto.SerializeToString() # Need to keep this in memory
instance.size = len(instance._as_str)
instance.data = ctypes.cast(instance._as_str, ctypes.c_void_p)
return instance
def as_ref(self):
return ctypes.byref(self)
class CRND:
def __init__(self, path_to_library):
self.dll = ctypes.cdll.LoadLibrary(path_to_library)
@staticmethod
def _call(f, data_proto_class, *input_args):
@ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.POINTER(ProtoSerialized), ctypes.POINTER(ProtoSerialized))
def callback(_, data_in, status_in):
callback.status = status_in.contents.parse_as(status_pb2.Status)
if callback.status.ok:
callback.data = data_in.contents.parse_as(data_proto_class)
args = list(input_args) + [callback, ]
f(None, *args)
if callback.status.ok:
return callback.data, callback.status
else:
return None, callback.status
def _sample(self, seed, samples, model):
sample_request = sample_request_pb2.SampleRequest()
sample_request.seed = seed
sample_request.n_samples = samples
sample_request.model.CopyFrom(model)
sample_request_p = ProtoSerialized.build_from(sample_request)
sample, status = self._call(self.dll.sample, sample_pb2.Sample, sample_request_p.as_ref())
if not status.ok:
raise Exception(status.error_message)
return sample.samples
def help(self, output):
help, status = self._call(self.dll.help, help_pb2.Help)
output.write(str(help))
def uniform(self, seed, samples):
pass
def normal(self, seed, samples, mean, stddev):
pass
def lognormal(self, seed, samples, mean, stddev):
model = model_pb2.Model()
model.id = model_pb2.Model.LOGNORMAL
model.params["mean"] = mean
model.params["stddev"] = stddev
return self._sample(seed, samples, model)
```
#### File: examples/python/main.py
```python
import sys
import os
import argparse
me = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(me, '..', '..', 'bindings', 'python'))
from crnd import CRND
def draw_histogram(rolls, title):
nstars = 300
nclasses = 20
p = [0] * (nclasses + 1)
min_value = min(rolls)
max_value = max(rolls)
step = (max_value-min_value)/float(nclasses)
for elem in rolls:
p[int((elem-min_value)/step)] += 1
sys.stdout.write("{}\n".format(title))
for i in range(nclasses):
stars = p[i]*nstars/len(rolls)
sys.stdout.write("{:2.6f}: {}\n".format(min_value + i*step + step/2.0, "*"*int(stars)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate some random series')
parser.add_argument('--crnd', dest='crnd', help='Path to library')
args = parser.parse_args()
crnd = CRND(args.crnd)
# crnd.help(sys.stdout)
r = crnd.lognormal(12345, 100000, 3, 0.2)
draw_histogram(r, "lognormal(3, 0.2)")
``` |
{
"source": "jgsogo/django-desktop-win",
"score": 2
} |
#### File: jgsogo/django-desktop-win/config.py
```python
from __future__ import unicode_literals
import re
import os
import sys
import argparse
import subprocess
import glob
import shutil
from slugify import slugify
# Python 2 vs 3 conditional imports and auxiliary functions
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
if sys.version_info < (3,):
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
raw_input = input
def u(x):
return x
# Actual code
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DOWNLOAD_PATH = os.path.join(BASE_DIR, 'tmp')
PROJECTS_PATH = os.path.join(BASE_DIR, 'projects')
# Just reimplement waitress.serve function to flush sys.stdout
waitress_server = """
import sys
sys.path.append(r"{manage_path}")
from waitress.server import create_server
import logging
def waitress_serve(app, **kw):
_server = kw.pop('_server', create_server) # test shim
_quiet = kw.pop('_quiet', False) # test shim
_profile = kw.pop('_profile', False) # test shim
if not _quiet: # pragma: no cover
# idempotent if logging has already been set up
logging.basicConfig()
server = _server(app, **kw)
if not _quiet: # pragma: no cover
sys.stdout.write('serving on http://%s:%s\\n' % (server.effective_host,
server.effective_port))
sys.stdout.flush()
if _profile: # pragma: no cover
profile('server.run()', globals(), locals(), (), False)
else:
server.run()
from {wsgi_dir}.wsgi import application
waitress_serve(application, host='127.0.0.1', port=0)
"""
def user_input(msg, default=None, choices=None, quit='Q'):
data = None
while not data:
if default:
msg = msg + ' [%s]' % default
r = raw_input(msg + ': ')
input = u(r.replace('\\', '/'))
if not len(input):
input = default
if input == quit:
return None
if choices and (not any([it==input for it in choices])):
sys.stdout.write("Invalid value '%s', valid choices are '%s'. (enter '%s' to quit)\n" % (input, "', '".join(choices), quit))
else:
data = input
return data
def download(url, filename):
u = urlopen(url)
meta = u.info()
try:
file_size = int(meta.get("Content-Length"))
except AttributeError:
file_size = int(meta.getheaders("Content-Length")[0])
sys.stdout.write("Downloading: %s Bytes: %s\n" % (filename, file_size))
if os.path.exists(filename):
# TODO: Check size!
return
# Start download
f = open(filename, 'wb')
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
sys.stdout.write(status)
f.close()
def find_file(lookup, basedir):
ret = []
for root, dirnames, filenames in os.walk(basedir):
for filename in filenames:
if filename == lookup:
ret.append(os.path.abspath(os.path.join(root, filename)))
return ret
class WinPythonConfig():
ARCH_CHOICES = ['x64', 'x86']
MD5_SHA1_FILE = 'http://winpython.github.io/md5_sha1.txt'
DOWNLOAD_URL = 'http://downloads.sourceforge.net/project/winpython/WinPython_{python_version}/{winpython_version}/{basename}.exe'
regex_name = 'WinPython-{architecture}-{python_version}.(\d).(\d)Zero'
python_version = None
architecture = None
def __init__(self, python, arch, interactive=True):
self.interactive = True
self.get_python_version(python)
self.get_architecture(arch)
self.python_exe = None
self.pip_exe = None
self.env_bat = None
def get_python_version(self, python):
if python:
self.python_version = python
elif not self.python_version and self.interactive:
self.python_version = user_input(" >> python version to work with [2.7|3.5|...]")
else:
raise ValueError("Use '--python' argument to provide a python version.")
# Validation
if not re.match(r'\d.\d', self.python_version):
raise ValueError("Python version not well formed: '%s'" % self.python_version)
print(" - python version: %s" % self.python_version)
return self.python_version
def get_architecture(self, arch):
if arch:
self.architecture = arch
elif not self.architecture and self.interactive:
self.architecture = user_input(" >> target architecture (Win32) %s" % self.ARCH_CHOICES, choices=self.ARCH_CHOICES)
else:
raise ValueError("Use '--arch' argument to provide a architecture.")
# Validation
if not self.architecture in self.ARCH_CHOICES:
raise ValueError("Invalid architecture type. Valid types are %s" % self.ARCH_CHOICES)
print(" - architecture: %s" % self.architecture)
return self.architecture
@property
def basename(self):
if not hasattr(self, '_basename'):
if self.architecture == 'x64':
arch = '64bit'
elif self.architecture == 'x86':
arch = '32bit'
else:
raise ValueError("Architecture '%s' not recognized" % architecture)
pattern = self.regex_name.format(architecture=arch, python_version=self.python_version)
m = [re.search(pattern, str(line), re.IGNORECASE) for line in urlopen(self.MD5_SHA1_FILE).readlines()]
m = [x for x in m if x is not None]
if not len(m):
raise ValueError("Cannot find a WinPython version for architecture '%s' and Python '%s'" % (self.architecture, self.python_version))
item = max(m, key=lambda u: (u.group(1), u.group(2)))
# Keep cases
basename = self.regex_name.format(architecture=arch, python_version=self.python_version)
basename = basename.replace('(\d)', item.group(1), 1)
basename = basename.replace('(\d)', item.group(2), 1)
setattr(self, '_basename', basename)
setattr(self, '_winpython_version', "%s.%s.%s" % (self.python_version, item.group(1), item.group(2)))
return getattr(self, '_basename')
@property
def winpython_version(self):
basename = self.basename # Just to get the values
return getattr(self, '_winpython_version')
@property
def download_url(self):
return self.DOWNLOAD_URL.format(python_version=self.python_version,
winpython_version=self.winpython_version,
basename=self.basename)
def download_to(self, path):
if not os.path.exists(path):
os.makedirs(path)
filename = os.path.join(path, self.basename + '.exe')
download(self.download_url, filename)
return filename
_on_cleanup = []
def install(self, filename, target_dir, async=True):
print("Installing WinPython, this can take a little...")
target_filename = os.path.join(target_dir, os.path.basename(filename))
dirname = os.path.dirname(target_filename)
if not self.find_exes(dirname):
shutil.copy2(filename, target_filename)
install_process = subprocess.Popen([target_filename, '/S'])
def do_install():
print("Finishing WinPython installation")
install_process.communicate()
# Look for python.exe and pip.exe
self.find_exes(dirname)
os.remove(target_filename)
if async:
self._on_cleanup.append(do_install)
else:
do_install()
def find_exes(self, dirname):
try:
# Look for python.exe and pip.exe
self.python_exe = find_file('python.exe', dirname)[0]
self.pip_exe = find_file('pip.exe', dirname)[0]
self.env_bat = find_file('env.bat', dirname)[0]
print(" - python.exe: %s" % self.python_exe)
print(" - pip.exe : %s" % self.pip_exe)
print(" - env.bat : %s" % self.env_bat)
return True
except IndexError:
return False
def run_python(self, command, stdout=sys.stdout, async=True):
if isinstance(command, list):
command = ' '.join(command)
full_cmd = self.env_bat + '& ' + command
p = subprocess.Popen(full_cmd, stdout=stdout, shell=True)
if async:
self._on_cleanup.append(p.communicate)
else:
p.communicate()
def __del__(self):
for clean_action in self._on_cleanup:
clean_action()
class DjangoConfig():
django_dir = None
home = None
def __init__(self, django_dir, home, interactive=True):
self.interactive = interactive
self.get_django_dir(django_dir)
self.get_home_url(home)
def get_django_dir(self, django_dir):
if django_dir:
self.django_dir = django_dir
elif not self.django_dir and self.interactive:
self.django_dir = user_input(" >> django directory")
else:
raise ValueError("Use '--django_dir' argument to inform about Django project")
if not os.path.exists(self.django_dir):
raise ValueError("Provided Django directory (%s) does not exist or it is inaccesible" % self.django_dir)
print(" - django_dir: %s" % self.django_dir)
return self.django_dir
def get_home_url(self, home):
if not home:
home = user_input(" >> Home URL for application", default='/')
home = home.replace('\\', '/')
if not home.startswith("/"):
home = '/' + home
self.home = home
return self.home
def install(self):
print("Looking for Django project files: manage.py and requirements...")
# Look for 'manage.py' and 'requirements.py'
manage_script = find_file('manage.py', self.django_dir)
requirements_file = find_file('requirements.txt', self.django_dir)
if len(manage_script) != 1 or len(requirements_file) > 1:
raise ValueError('django_dir must point to just one single project')
self.requirements_file = requirements_file[0] if len(requirements_file) > 0 else None
self.manage_script = manage_script[0]
print(" - manage.py : %s" % self.manage_script)
print(" - requirements.txt: %s" % self.requirements_file)
class ConfigScript():
app_id = None
cfg_filename = None
def _get_cfg_filename(self, args):
filenames = []
if args['config_ini'] is not None:
filename, file_extension = os.path.splitext(args['config_ini'])
if file_extension:
return os.path.abspath(args['config_ini'])
return os.path.join(PROJECTS_PATH, args['config_ini'], 'config.ini')
return None
def parse_args(self):
parser = argparse.ArgumentParser(description='Configure django-desktop-win.')
parser.add_argument('config_ini', nargs='?', help='configuration file')
parser.add_argument('--python', nargs='?', help='python version to work with: 2.7, 3.4,...')
parser.add_argument('--arch', nargs='?', choices=['x64', 'x86'], help='target architecture (Win32)')
parser.add_argument('--django_dir', nargs='?', help='Path to a self contained Django project')
args = vars(parser.parse_args())
# Read data from config file
self.cfg_filename = self._get_cfg_filename(args)
if self.cfg_filename:
args = self.read_ini(args)
self.app_id = slugify(args['appName'])
else:
name_default = None
if args['django_dir'] is not None:
name_default = os.path.basename(args['django_dir'])
args['appName'] = user_input(" >> Application Name", default=name_default)
self.app_id = slugify(args['appName'])
dirpath = os.path.join(PROJECTS_PATH, self.app_id)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
self.cfg_filename = os.path.join(dirpath, 'config.ini')
return args
def save_ini(self, args):
with open(self.cfg_filename, 'w') as f:
for key, value in args.items():
if value is not None:
f.write('%s=%s\n' % (key, value))
def read_ini(self, args):
with open(self.cfg_filename, 'r') as f:
for line in f.readlines():
if len(line.strip()):
item = line.split('=')
if item[0] not in args or args[item[0]] is None:
args[item[0]] = item[1].strip()
return args
def run(self, *args, **kwargs):
args = self.parse_args()
# WinPython
print("Gathering input data for WinPython")
winpython = WinPythonConfig(args['python'], args['arch'])
args['python'] = winpython.python_version
args['arch'] = winpython.architecture
# Django
print("\nGathering input data for Django project")
django = DjangoConfig(args['django_dir'], home=args.get('home', None))
django.install()
args['django_dir'] = django.django_dir
args['home'] = django.home
# Create stuff
# - config.ini: store configuration
self.save_ini(args)
# Perform actions
app_dir = os.path.dirname(self.cfg_filename)
filename = winpython.download_to(DOWNLOAD_PATH)
winpython.install(filename, app_dir, async=False)
if django.requirements_file:
print("Installing requirements")
winpython.run_python([winpython.pip_exe, 'install', '-r', django.requirements_file, '--upgrade'], async=False)
winpython.run_python([winpython.pip_exe, 'install', 'waitress', '--upgrade'], async=False)
# - run.py: file to run django using waitress (local)
run_py = os.path.abspath(os.path.join(app_dir, 'run.py'))
with open(run_py, 'w') as f:
wsgi = find_file('wsgi.py', django.django_dir)[0]
wsgi_paths = os.path.split(os.path.dirname(wsgi))
f.write(waitress_server.format(manage_path=os.path.dirname(django.manage_script), wsgi_dir=wsgi_paths[-1]))
# - requirements.txt:
with open(os.path.join(app_dir, 'requirements.txt'), 'w') as f:
waitress = False
for line in open(django.requirements_file, 'r').readlines():
waitress = waitress or 'waitress' in line
f.write(line)
if not waitress:
f.write('waitress')
# - start.bat: start CEF, for local development (local)
deploy_dir = os.path.abspath(os.path.join(PROJECTS_PATH, 'deploy', 'bin64' if args['arch'] == 'x64' else 'bin32'))
with open(os.path.join(app_dir, 'start.bat'), 'w') as f:
cef_exe = find_file('cefsimple.exe', deploy_dir)
if not len(cef_exe):
print("cefsimple.exe not found at '%s'. You have to compile CEF and call config again." % deploy_dir)
else:
f.write('"%s" --python="%s" --manage="%s" --url=%s' % (cef_exe[0], winpython.python_exe, run_py, django.home))
# - defines.iss
defines_filename = 'defines_py%s_%s.iss' % (winpython.python_version, args['arch'])
with open(os.path.join(app_dir, defines_filename), 'w') as f:
f.write('#define MainDir "%s"\n' % PROJECTS_PATH)
f.write('#define AppDir "%s"\n' % app_dir)
f.write('#define MyAppName "%s"\n' % args['appName'])
f.write('#define Architecture "%s"\n' % args['arch'])
f.write('#define DeployDir "%s"\n' % deploy_dir)
f.write('#define Home "%s"\n' % args['home'])
f.write('#define DjangoDir "%s"\n' % django.django_dir)
f.write('#define PythonVersion "%s"\n' % winpython.python_version)
f.write('#define WinPythonArchitecture "%s"\n' % winpython.architecture)
f.write('#define WinPythonBasename "%s"\n' % winpython.basename)
f.write('#define WinPythonDownload "%s"\n' % winpython.download_url)
f.write('#define WinPythonRelPath "%s"\n' % os.path.relpath(os.path.dirname(winpython.python_exe), app_dir))
f.write('#define WinPythonRelExe "%s"\n' % os.path.relpath(winpython.python_exe, app_dir))
f.write('#define WinPythonPipRelPath "%s"\n' % os.path.relpath(winpython.pip_exe, app_dir))
f.write('#define WinPythonEnvRelPath "%s"\n' % os.path.relpath(winpython.env_bat, app_dir))
f.write('#define ManagePyPath "%s"\n' % os.path.relpath(django.manage_script, django.django_dir))
f.write('#define ManagePyRelPath "%s"\n' % os.path.relpath(os.path.dirname(django.manage_script), django.django_dir))
# - inno_setup.tmp.iss
with open(os.path.join(app_dir, '%s_py%s_%s.iss' % (args['appName'], winpython.python_version, args['arch'])), 'w') as f:
f.write('#include "%s"\n' % defines_filename)
f.write('#include "../../inno_setup.tmp.iss"\n')
# - CMakeLists.txt
with open(os.path.join(app_dir, 'CMakeLists.txt'), 'w') as f:
f.write('cmake_minimum_required(VERSION 2.8)\n')
f.write('set(PROJECT_NAME, "%s")\n' % args['appName'])
f.write('include(${CMAKE_CURRENT_SOURCE_DIR}/../../cef/CMakeLists.txt)\n')
if __name__ == '__main__':
script = ConfigScript()
script.run()
``` |
{
"source": "jgsogo/godot-python",
"score": 2
} |
#### File: jgsogo/godot-python/conanfile.py
```python
from conans import ConanFile, CMake
class GodotPython(ConanFile):
name = "godot-python"
version = "0.1"
generators = "cmake_find_package"
def requirements(self):
self.requires("pybind11/2.2.4@conan/stable")
self.requires("godot-cpp/[~0.0.0-0,include_prerelease=True]@jgsogo/stable")
self.requires("cpython/3.7.3@jgsogo/stable")
def build(self):
cmake = CMake(self)
cmake.configure(source_folder="src")
cmake.build()
``` |
{
"source": "jgsogo/nlproc_spa",
"score": 2
} |
#### File: spa/nltk/conll2002_tagger.py
```python
from nltk.corpus import conll2002
from nlproc.pos_tagger import postagger_register
from .ngram_tagger import NLTKNgramTagger
import logging
log = logging.getLogger(__name__)
CONLL2002 = 'conll2002'
FILEIDS =['esp.testa', 'esp.testb']
def conll2002_tagger(use_mwe, ngrams):
class Conll2002Tagger(NLTKNgramTagger):
tagset = 'es-eagles.map'
def __init__(self):
super(Conll2002Tagger, self).__init__(id=CONLL2002, use_mwe=use_mwe, ngrams=ngrams)
def get_tagged_sentences(self):
return conll2002.tagged_sents(fileids=FILEIDS)
@classmethod
def pos_tag(cls, tokens, use_mwe=True, ngrams=2):
item_class = conll2002_tagger(use_mwe=use_mwe, ngrams=ngrams)
item = item_class()
item.load(train=True)
return item.tag_sents(tokens)
return Conll2002Tagger
Conll2002Tagger = conll2002_tagger(use_mwe=True, ngrams=2)
postagger_register("{}-mwe-1grams".format(CONLL2002))(conll2002_tagger(use_mwe=True, ngrams=1))
postagger_register("{}-mwe-2grams".format(CONLL2002))(conll2002_tagger(use_mwe=True, ngrams=2))
postagger_register("{}-mwe-3grams".format(CONLL2002))(conll2002_tagger(use_mwe=True, ngrams=3))
postagger_register("{}-nomwe-1grams".format(CONLL2002))(conll2002_tagger(use_mwe=False, ngrams=1))
postagger_register("{}-nomwe-2grams".format(CONLL2002))(conll2002_tagger(use_mwe=False, ngrams=2))
postagger_register("{}-nomwe-3grams".format(CONLL2002))(conll2002_tagger(use_mwe=False, ngrams=3))
if __name__ == '__main__':
#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)7s - %(message)s')
formatter = logging.Formatter('%(asctime)s - %(levelname)7s - %(message)s')
ch = logging.StreamHandler()
ch.setFormatter(formatter)
ch.setLevel(logging.DEBUG)
log.setLevel(logging.DEBUG)
log.addHandler(ch)
from nlproc.pos_tagger import postagger_factory
conll_mwe_2grams = postagger_factory("{}-nomwe-2grams".format(CONLL2002))()
conll_mwe_2grams.load(train=True)
print(conll_mwe_2grams.tag(["La", "casa", "es", "azul"]))
"""
cess_mwe_1grams = postagger_factory("{}-mwe-1grams".format(CESS_ESP))()
cess_mwe_1grams.load(train=True)
print(cess_mwe_1grams.tag(["La", "casa", "es", "azul"]))
cess_mwe_3grams = postagger_factory("{}-mwe-3grams".format(CESS_ESP))()
cess_mwe_3grams.load(train=True)
print(cess_mwe_3grams.tag(["La", "casa", "es", "azul"]))
"""
``` |
{
"source": "jgsonx/gym",
"score": 3
} |
#### File: envs/algorithmic/duplicated_input.py
```python
import random
import numpy as np
from gym.envs.algorithmic import algorithmic_env
from gym.envs.algorithmic.algorithmic_env import ha
class DuplicatedInputEnv(algorithmic_env.AlgorithmicEnv):
def __init__(self, duplication=2, base=5):
self.duplication = duplication
algorithmic_env.AlgorithmicEnv.__init__(self,
inp_dim=1,
base=base,
chars=True)
def set_data(self):
self.content = {}
self.target = {}
copies = int(self.total_len / self.duplication)
for i in range(copies):
val = random.randrange(self.base)
self.target[i] = val
for d in range(self.duplication):
self.content[ha(np.array([i * self.duplication + d]))] = val
self.total_reward = self.total_len / self.duplication
```
#### File: envs/doom/doom_predict_position.py
```python
import logging
import os
import numpy as np
from doom_py import DoomGame, Mode, Button, GameVariable, ScreenFormat, ScreenResolution, Loader
from gym import error, spaces
from gym.envs.doom import doom_env
logger = logging.getLogger(__name__)
class DoomPredictPositionEnv(doom_env.DoomEnv):
"""
------------ Training Mission 7 - Predict Position ------------
This map is designed to train you on using a rocket launcher.
It is a rectangular map with a monster on the opposite side. You need
to use your rocket launcher to kill it. The rocket adds a delay between
the moment it is fired and the moment it reaches the other side of the room.
You need to predict the position of the monster to kill it.
Allowed actions:
[0] - ATTACK - Shoot weapon - Values 0 or 1
[13] - TURN_RIGHT - Turn right - Values 0 or 1
[14] - TURN_LEFT - Turn left - Values 0 or 1
Note: see controls.md for details
Rewards:
+ 1 - Killing the monster
-0.0001 - Several times per second - Kill the monster faster!
Goal: 0.5 point
Kill the monster
Hint: Missile launcher takes longer to load. You must wait a good second after the game starts
before trying to fire it.
Ends when:
- Monster is dead
- Out of missile (you only have one)
- Timeout (20 seconds - 700 frames)
-----------------------------------------------------
"""
def __init__(self):
package_directory = os.path.dirname(os.path.abspath(__file__))
self.loader = Loader()
self.game = DoomGame()
self.game.load_config(os.path.join(package_directory, 'assets/predict_position.cfg'))
self.game.set_vizdoom_path(self.loader.get_vizdoom_path())
self.game.set_doom_game_path(self.loader.get_freedoom_path())
self.game.set_doom_scenario_path(self.loader.get_scenario_path('predict_position.wad'))
self.game.set_doom_map('map01')
self.screen_height = 480 # Must match .cfg file
self.screen_width = 640 # Must match .cfg file
# 3 allowed actions [0, 13, 14] (must match .cfg file)
self.action_space = spaces.HighLow(np.matrix([[0, 1, 0]] * 3))
self.observation_space = spaces.Box(low=0, high=255, shape=(self.screen_height, self.screen_width, 3))
self.game.set_window_visible(False)
self.viewer = None
self.game.init()
self.game.new_episode()
```
#### File: gym/monitoring/stats_recorder.py
```python
import json
import os
import time
from gym import error
from gym.utils import atomic_write
class StatsRecorder(object):
def __init__(self, directory, file_prefix):
self.initial_reset_timestamp = None
self.directory = directory
self.file_prefix = file_prefix
self.episode_lengths = []
self.episode_rewards = []
self.timestamps = []
self.steps = None
self.rewards = None
self.done = None
self.closed = False
filename = '{}.{}.stats.json'.format(self.file_prefix, os.getpid())
self.path = os.path.join(self.directory, filename)
def before_step(self, action):
assert not self.closed
if self.done:
raise error.ResetNeeded("Trying to step environment which is currently done. While the monitor is active, you cannot step beyond the end of an episode. Call 'env.reset()' to start the next episode.")
elif self.steps is None:
raise error.ResetNeeded("Trying to step an environment before reset. While the monitor is active, you must call 'env.reset()' before taking an initial step.")
def after_step(self, observation, reward, done, info):
self.steps += 1
self.rewards += reward
if done:
self.done = True
def before_reset(self):
assert not self.closed
self.done = False
if self.initial_reset_timestamp is None:
self.initial_reset_timestamp = time.time()
def after_reset(self, observation):
self.save_complete()
self.steps = 0
self.rewards = 0
def save_complete(self):
if self.steps is not None:
self.episode_lengths.append(self.steps)
self.episode_rewards.append(self.rewards)
self.timestamps.append(time.time())
def close(self):
self.save_complete()
self.flush()
self.closed = True
def flush(self):
if self.closed:
return
with atomic_write.atomic_write(self.path) as f:
json.dump({
'initial_reset_timestamp': self.initial_reset_timestamp,
'timestamps': self.timestamps,
'episode_lengths': self.episode_lengths,
'episode_rewards': self.episode_rewards,
}, f)
``` |
{
"source": "jgspires/chinese-postman-problem",
"score": 3
} |
#### File: src/entities/Edge.py
```python
class Edge:
def __init__(self, x, y, weight):
"""Instancia uma nova aresta, onde 'x' e 'y' são os índices dos vértices aos quais ela se conecta e 'weight' é o peso."""
self.vertices = [x, y]
self.weight = weight
```
#### File: src/entities/Graph.py
```python
from functools import total_ordering
from queue import PriorityQueue
from entities.Edge import Edge
import copy
class Graph:
def __init__(self, v_num):
"""Instancia um novo grafo, onde 'v_num' é o número de vértices."""
self.v_num = v_num
self.edges = []
self.possible_combinations = []
def add_edge(self, x, y, weight):
"""Adiciona uma aresta ao grafo, onde 'x' e 'y' são os vértices aos quais ela é ligada e 'weight' é o peso."""
if x < self.v_num and y < self.v_num:
self.edges.append(Edge(x, y, weight))
else:
raise IndexError(self.edges)
def remove_edge(self, x, y):
"""Remove uma aresta do grafo, onde 'x' e 'y' são os vértices aos quais ela é ligada."""
for edge in self.edges:
if x in edge.vertices and y in edge.vertices:
self.edges.remove(edge)
return
def dijkstra(self, v):
"""Executa o algoritmo de dijkstra a partir do vértice 'v'. Utiliza uma fila de prioridade para facilitar processamento.
Retorna uma lista com as menores distâncias entre 'v' e os outros vértices do grafo."""
# Instancia uma lista de tamanho referente à quantidade de vértices presentes no grafo
# e inicializa todos os seus membros para infinito
dijkstra_dist = {vertex: float("inf") for vertex in range(self.v_num)}
dijkstra_dist[v] = 0
visited = []
pq = PriorityQueue()
pq.put((0, v))
while not pq.empty():
(dist, current_vertex) = pq.get()
visited.append(current_vertex)
for neighbor in range(self.v_num):
distance = self.get_edge_weight(current_vertex, neighbor)
if distance != 0:
if neighbor not in visited:
old_cost = dijkstra_dist[neighbor]
new_cost = dijkstra_dist[current_vertex] + distance
if new_cost < old_cost:
pq.put((new_cost, neighbor))
dijkstra_dist[neighbor] = new_cost
return dijkstra_dist
def get_edge_weight(self, x, y):
"""Retorna o peso da aresta entre os vértices 'x' e 'y'. Ou 0 se não houver aresta entre eles."""
for i in range(len(self.edges)):
if x in self.edges[i].vertices and y in self.edges[i].vertices:
return self.edges[i].weight
return 0
def get_odd_vertices(self):
"""Retorna uma lista com todos os índices dos vértices de grau ímpar presentes no grafo."""
odd_vertices = []
for i in range(self.v_num):
degree = self.get_vertex_degree(i)
if degree % 2 != 0:
odd_vertices.append(i)
return odd_vertices
def get_vertex_degree(self, v_index):
"""Retorna o grau do vértice de índice 'v_index'."""
degree = 0
for i in range(len(self.edges)):
if v_index in self.edges[i].vertices:
degree += 1
return degree
def get_odd_pairs(self):
"""Retorna uma lista com todos os pares de vértices de grau ímpar presentes no grafo."""
pairs = []
odd_vertices = self.get_odd_vertices()
for v_index in range(len(odd_vertices) - 1):
pairs.append([])
for i in range(v_index + 1, len(odd_vertices)):
pairs[v_index].append([odd_vertices[v_index], odd_vertices[i]])
return pairs
def get_possible_combinations(self, pairs, done, final):
"""Retorna uma lista com todas as combinações de pares possíveis."""
if pairs == self.get_odd_pairs():
self.l = (len(pairs) + 1) // 2
if pairs[0][0][0] not in done:
done.append(pairs[0][0][0])
for i in pairs[0]:
f = final[:]
val = done[:]
if i[1] not in val:
f.append(i)
else:
continue
if len(f) == self.l:
self.possible_combinations.append(f)
return
else:
val.append(i[1])
self.get_possible_combinations(pairs[1:], val, f)
else:
self.get_possible_combinations(pairs[1:], done, final)
def chinese_postman(self, start_index):
"""Retorna a menor distância a ser percorrida para atravessar todas as arestas considerando um circuito."""
odd_vertices = self.get_odd_vertices()
if len(odd_vertices) != 0:
self.eulerify(self.get_shortest_path_distance()["combination"])
path = self.get_eulerian_path(start_index)
distance = self.get_sum_of_edge_weights()
return {"path": path, "distance": distance}
def get_eulerian_path(self, start_index):
"""Retorna os vértices de um circuito percorrido no grafo (que precisa estar eulerizado), partindo do vértice de índice "start_index"."""
graph = copy.deepcopy(self)
stack = []
eulerian_path = []
current_vertex = start_index
while len(graph.get_neighbours(current_vertex)) > 0 or len(stack) > 0:
current_neighbours = graph.get_neighbours(current_vertex)
if len(current_neighbours) > 0:
stack.append(current_vertex)
neighbour = current_neighbours[0]
graph.remove_edge(current_vertex, neighbour)
current_vertex = neighbour
else:
eulerian_path.append(current_vertex)
current_vertex = stack.pop()
eulerian_path.append(start_index)
return eulerian_path
def get_neighbours(self, v):
"""Retorna o índice dos vizinhos do vértice "v"."""
neighbours = []
for edge in self.edges:
if edge.vertices[0] == v or edge.vertices[1] == v:
if edge.vertices[0] != v and edge.vertices[0] not in neighbours:
neighbours.append(edge.vertices[0])
elif edge.vertices[1] not in neighbours:
neighbours.append(edge.vertices[1])
return neighbours
def get_shortest_path_distance(self):
"""Retorna um dictionary contendo a menor distância e a combinação de vértices ímpares que nela resultou."""
self.possible_combinations = []
self.get_possible_combinations(self.get_odd_pairs(), [], [])
combinations = self.possible_combinations
chosen_combo = None
shortest_distance = None
for combo in combinations:
total_distance = 0
for pair in combo:
dijkstra_distances = self.dijkstra(pair[0])
total_distance += dijkstra_distances[pair[1]]
if shortest_distance is None or shortest_distance > total_distance:
shortest_distance = total_distance
chosen_combo = combo
return {"combination": chosen_combo, "distance": shortest_distance}
def eulerify(self, combination):
"""Euleriza o grafo, duplicando as arestas entre os vértices especificados na lista de pares de vértices ímpares 'combination'."""
for pair in combination:
for edge in self.edges:
if edge.vertices[0] == pair[0] and edge.vertices[1] == pair[1]:
weight = edge.weight
self.add_edge(pair[0], pair[1], weight)
break
def get_sum_of_edge_weights(self):
"""Retorna a soma de todos os pesos das arestas presentes no grafo."""
total_weight = 0
for edge in self.edges:
total_weight += edge.weight
return total_weight
``` |
{
"source": "JGSS-GabrielSousa/Heroes-and-Monsters-API",
"score": 4
} |
#### File: JGSS-GabrielSousa/Heroes-and-Monsters-API/jsonChecker.py
```python
import json
errorMsg = ": Don't exist in "
def checkFile(filepath, directory):
print(filepath)
print(directory)
print()
with open(filepath) as json_file:
data = json.load(json_file)
if(directory == "data/monster/"):
if "name" not in data:
print("(name)"+errorMsg+filepath)
``` |
{
"source": "jgstew/file_meta_data",
"score": 2
} |
#### File: jgstew/file_meta_data/file_meta_data.py
```python
import hachoir_core
import hachoir_core.cmd_line
import hachoir_metadata
import hachoir_parser
import sys
# TODO: add TrID
# I changed this
# https://bitbucket.org/haypo/hachoir/wiki/hachoir-metadata/code
def getMetaData(filename):
text = ""
filename, realname = hachoir_core.cmd_line.unicodeFilename(filename), filename
parser = hachoir_parser.createParser(filename, realname)
if not parser:
print >>sys.stderr, "Unable to parse file"
return text
try:
metadata = hachoir_metadata.extractMetadata(parser)
except HachoirError, err:
print "Metadata extraction error: %s" % unicode(err)
metadata = None
if not metadata:
print >>sys.stderr, "Unable to extract metadata"
return text
text = metadata.exportPlaintext()
return text
if __name__ == "__main__":
filename = "../temp/diskcheckup.exe"
if 1 < len(sys.argv):
filename = sys.argv[1]
meta_data_text = getMetaData(filename)
#print meta_data_text
for line in meta_data_text:
print hachoir_core.tools.makePrintable(line, hachoir_core.i18n.getTerminalCharset() )
``` |
{
"source": "jgstew/generate-bes-from-template",
"score": 2
} |
#### File: src/generate_bes_from_template/action_createfile_from_file.py
```python
from __future__ import absolute_import
import os.path
# import pystache
import chevron # pylint: disable=import-error
PYSTACHE_TEMPLATE_CREATEFILE = """\
delete __createfile
// --- START of contents of {{{file_name}}} ------------
createfile until {{{token_end_of_file}}}
{{{file_contents}}}
{{{token_end_of_file}}}
// --- END of contents of {{{file_name}}} ------------
delete "{{{file_path_destination}}}"
copy __createfile "{{{file_path_destination}}}"
"""
def action_createfile_from_file(file_path, file_path_destination=None):
"""Read text file, turn into BigFix Action CreateFile Commnad"""
template_dict = {}
if not file_path_destination:
template_dict['file_path_destination'] = file_path
else:
template_dict['file_path_destination'] = file_path_destination
# default token for end of file if not included
if 'token_end_of_file' not in template_dict:
template_dict['token_end_of_file'] = '_END_OF_FILE_'
if 'file_name' not in template_dict:
template_dict['file_name'] = get_filename_from_pathname(template_dict['file_path_destination']) # pylint: disable=line-too-long
# https://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
try:
with open(file_path, "rt") as file_read:
# need to escape `{` for BigFix CreateFile command
template_dict['file_contents'] = file_read.read().replace('{', '{{')
except UnicodeDecodeError:
return "ERROR: UnicodeDecodeError - bad file"
# make sure file contents does not contain END_OF_FILE token
while template_dict['token_end_of_file'] in template_dict['file_contents']:
template_dict['token_end_of_file'] = (
"_" + template_dict['token_end_of_file'] + "_"
)
return chevron.render(PYSTACHE_TEMPLATE_CREATEFILE, template_dict)
def get_filename_from_pathname(pathname):
"""splits the filename from end of string after path separators including }"""
# https://github.com/jgstew/tools/blob/master/Python/get_filename_from_pathname.py
return pathname.replace('\\', '/').replace('}', '/').split('/')[-1]
def main():
"""Only called if this script is run directly"""
# use this script itself as the demo createfile
output_string = action_createfile_from_file(os.path.abspath(__file__))
print(output_string)
return output_string
# if called directly, then run this example:
if __name__ == '__main__':
main()
```
#### File: generate-bes-from-template/tests/tests.py
```python
import argparse
import os.path
import sys
# don't create bytecode for tests because it is cluttery in python2
sys.dont_write_bytecode = True
# check for --test_pip arg
parser = argparse.ArgumentParser()
parser.add_argument(
"--test_pip", help="to test package installed with pip", action="store_true"
)
args = parser.parse_args()
if not args.test_pip:
# add module folder to import paths for testing local src
sys.path.append(
os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "src")
)
# reverse the order so we make sure to get the local src module
sys.path.reverse()
from generate_bes_from_template import *
tests_count = 0 # pylint: disable=invalid-name
# print(action_prefetch_from_template.__file__)
# make sure we are testing the right place:
if args.test_pip:
# this will false positive on windows
assert "/src/" not in action_prefetch_from_template.__file__
else:
# check for only 'src' so it will work on windows and non-windows
assert "src" in action_prefetch_from_template.__file__
def test_partials(partials_path="."):
"""test mustache template partials"""
print("test_partials()")
script_folder = os.path.dirname(os.path.abspath(__file__))
template_file_path = os.path.join(script_folder, "TemplateExample.mustache")
result = generate_bes_from_template.generate_content_from_template( # pylint: disable=unexpected-keyword-arg
{}, template_file_path, partials_path=partials_path
)
return result
# pylint: disable=line-too-long
assert str(action_prefetch_from_template.main()) == (
"prefetch LGPO.zip sha1:0c74dac83aed569607aaa6df152206c709eef769 size:815660 https://download.microsoft.com/download/8/5/C/85C25433-A1B0-4FFA-9429-7E023E7DA8D8/LGPO.zip sha256:6ffb6416366652993c992280e29faea3507b5b5aa661c33ba1af31f48acea9c4"
)
tests_count += 1
assert str(generate_bes_from_template.main()).startswith(
"""<?xml version="1.0" encoding="UTF-8"?>
<BES xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="BES.xsd">
<Task>
<Title>Example Generated From Template</Title>
<Description><![CDATA[ This Task was generated automatically! ]]></Description>
<Relevance><![CDATA[ Comment: Always False */ FALSE /* This example doesn't do anything, so it is always false. ]]></Relevance>
<Relevance><![CDATA[ TRUE ]]></Relevance>
<Category></Category>
<DownloadSize>9999</DownloadSize>
<Source>Internal</Source>
<SourceID><![CDATA[JGStew]]></SourceID>
"""
)
tests_count += 1
assert str(action_createfile_from_file.main()).startswith(
"""delete __createfile
// --- START of contents of action_createfile_from_file.py ------------
createfile until __END_OF_FILE__
#!/usr/local/python
"""
)
tests_count += 1
if not args.test_pip:
script_folder_path = os.path.dirname(os.path.abspath(__file__))
# print(test_partials(script_folder_path))
assert str(test_partials(script_folder_path)).startswith("Hello, World!")
tests_count += 1
assert str(test_partials(script_folder_path)).startswith(
"Hello, World! You can load partials from a folder!"
)
tests_count += 1
# tests pass, return 0:
print("-------------------------------------")
print("Success: %d Tests pass" % tests_count)
print("")
sys.exit(0)
``` |
{
"source": "jgstew/generate-prefetch",
"score": 3
} |
#### File: src/bigfix_prefetch/__main__.py
```python
import argparse
import os
try:
from . import prefetch_from_file
except ImportError:
import prefetch_from_file
try:
from . import prefetch_from_url
except ImportError:
import prefetch_from_url
def validate_filepath_or_url(filepath_or_url=""):
"""validate string is filepath or URL"""
if ("://" in filepath_or_url) or (
os.path.isfile(filepath_or_url) and os.access(filepath_or_url, os.R_OK)
):
return filepath_or_url
else:
raise ValueError(filepath_or_url)
def build_argument_parser():
"""Build and return the argument parser."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"filepath_or_url",
nargs="?",
type=validate_filepath_or_url,
default="bigfix_prefetch/__init__.py",
help="Path to file or URL to create prefetch for.",
)
parser.add_argument(
"--prefetch-block",
default=False,
action="store_true",
help="generate a prefetch block instead of prefetch statement",
)
parser.add_argument(
"--override-url",
default="http://localhost/unknown",
help="URL to use in prefetch statement if providing file path",
)
return parser
def main(argv=None):
"""execution starts here"""
# print("bigfix_prefetch __main__ main()")
# Parse command line arguments.
argparser = build_argument_parser()
args = argparser.parse_args(argv)
try:
prefetch_result = prefetch_from_file.file_to_prefetch(
args.filepath_or_url, args.override_url
)
print(prefetch_result)
return prefetch_result
except FileNotFoundError:
prefetch_result = prefetch_from_url.url_to_prefetch(args.filepath_or_url)
print(prefetch_result)
return prefetch_result
main()
``` |
{
"source": "jgstew/jgstew-recipes",
"score": 3
} |
#### File: jgstew-recipes/SharedProcessors/FileHasher.py
```python
from hashlib import md5, sha1, sha256
from autopkglib import ( # pylint: disable=import-error,unused-import
Processor,
ProcessorError,
)
__all__ = ["FileHasher"]
class FileHasher(Processor): # pylint: disable=invalid-name
"""Hashes file at pathname and returns hash metadata."""
description = __doc__
input_variables = {
"file_path": {
"required": False,
"description": ("Path to hash. Defaults to %pathname%."),
}
}
output_variables = {
"filehasher_sha1": {"description": "The input file SHA1"},
"filehasher_sha256": {"description": "The input file SHA256"},
"filehasher_md5": {"description": "The input file MD5"},
"filehasher_size": {"description": "The input file size"},
}
__doc__ = description
def hash(self, file_path):
"""
run hashes on file
Keyword arguments:
file_path -- the file to hash
"""
# https://github.com/jgstew/bigfix_prefetch/blob/master/url_to_prefetch.py
hashes = sha1(), sha256(), md5()
chunksize = max(409600, max(a_hash.block_size for a_hash in hashes))
size = 0
file_stream = open(file_path, "rb")
while True:
chunk = file_stream.read(chunksize)
if not chunk:
break
# get size of chunk and add to existing size
size += len(chunk)
# add chunk to hash computations
for a_hash in hashes:
a_hash.update(chunk)
self.output(
"File MD5 = {filehasher_md5}".format(
filehasher_md5=hashes[2].hexdigest()
),
1,
)
self.output(
"File SHA1 = {filehasher_sha1}".format(
filehasher_sha1=hashes[0].hexdigest()
),
1,
)
self.output(
"File SHA256 = {filehasher_sha256}".format(
filehasher_sha256=hashes[1].hexdigest()
),
1,
)
self.output("File Size = {filehasher_size}".format(filehasher_size=size), 1)
self.env["filehasher_sha1"] = hashes[0].hexdigest()
self.env["filehasher_sha256"] = hashes[1].hexdigest()
self.env["filehasher_md5"] = hashes[2].hexdigest()
self.env["filehasher_size"] = str(size)
def main(self):
"""Execution starts here"""
# I think this gets the pathname value if `file_path` is not specified?
file_path = self.env.get("file_path", self.env.get("pathname"))
self.hash(file_path)
if __name__ == "__main__":
PROCESSOR = FileHasher()
PROCESSOR.execute_shell()
``` |
{
"source": "jgstew/tools",
"score": 4
} |
#### File: tools/Python/yyyymmddhhmmss.py
```python
import time
# http://stackoverflow.com/questions/2487109/python-date-time-formatting
# https://docs.python.org/2/library/datetime.html
def yyyymmddhhmmss(before="",after="",now=time.localtime()):
return before + time.strftime("%Y%m%d%H%M%S",now) + after
def yyyymmddhhmm(before="",after="",now=time.localtime()):
return before + time.strftime("%Y%m%d%H%M",now) + after
if __name__ == "__main__":
print yyyymmddhhmmss()
``` |
{
"source": "jgsu-acm/problems",
"score": 2
} |
#### File: src/creator/creator.py
```python
import shutil
from collections import defaultdict
from pathlib import Path
from src.formatter import Formatter
from src.problem import Problem
PATH_GENTP_CPP = Path("template/gen_tp.cpp")
PATH_GENTP_PY = Path("template/gen_tp.py")
PATH_STDTP = Path("template/std_tp.cpp")
class Creator(Problem):
sections = {
"background": "题目背景",
"description": "题目描述",
"input_format": "输入格式",
"output_format": "输出格式",
"samples": "输入输出样例",
"hint": "说明/提示"
}
def __init__(self, pid: str, spid: str, is_sa: bool, nogen: bool, nostd: bool, use_python: bool):
super().__init__(pid)
self._spid, self.__is_sa, self.__nogen, self.__nostd, self.__use_python = \
spid, is_sa, nogen, nostd, use_python
self._content = defaultdict(str)
self._content["samples"] = "```input1\n\n```\n\n```output1\n\n```"
self.__path_gen = self._path_gen_py if use_python else self._path_gen_cpp
self.__path_gentp = PATH_GENTP_PY if use_python else PATH_GENTP_CPP
def _get(self):
pass
def __write(self):
psecs = []
if self._content["background"]:
psecs.append("background")
if self.__is_sa:
psecs.extend(["description", "hint"])
else:
psecs.extend(["description", "input_format", "output_format", "samples", "hint"])
with open(self._path_md, "w", encoding="UTF-8") as fp:
if psecs:
for sec in psecs[:-1]:
fp.write(f"# {self.sections[sec]}\n")
if self._content[sec]:
fp.write(f"{self._content[sec]}\n")
fp.write(f"# {self.sections[psecs[-1]]}\n")
if self._content[psecs[-1]]:
fp.write(f"{self._content[psecs[-1]]}")
def create(self):
if self._path_md.exists():
if input(f"题目 {self._pid} 已经存在了,继续(Y)还是跳过(Not Y)").lower() != 'y':
self._logger.info("跳过")
return self
self._path.mkdir(parents=True, exist_ok=True)
self._logger.info("创建题面")
self._get()
self.__write()
Formatter(self._pid).format()
if self.__is_sa:
self._logger.info("创建配置文件")
with open(self._path / "config.yaml", "w", encoding="UTF-8") as fp:
fp.write('type: objective\noutputs:\n - ["", 100]\n')
return self
if not self.__nogen:
self._logger.info("创建生成器")
shutil.copy(self.__path_gentp, self.__path_gen)
if not self.__nostd:
self._logger.info("创建标程")
shutil.copy(PATH_STDTP, self._path_std)
return self
```
#### File: problems/src/formatter.py
```python
import re
import string
from typing import Callable
from src.problem import Problem
RE_CHINESE_CHAR = r"[\u4e00-\u9fa5]"
RE_CHINESE_PUNC = r"[\u3002\uff1b\uff0c\uff1a\u201c\u201d\uff08\uff09\u3001\uff1f\u300a\u300b]"
RE_CHINESE_CHAR_OR_PUNC = r"[\u3002\uff1b\uff0c\uff1a\u201c\u201d\uff08\uff09\u3001\uff1f\u300a\u300b]|[\u4e00-\u9fa5]"
RE_SEPARATED_FROM_CHN = r"[A-Za-z0-9$`]|!?\[.*?]\(.*?\)"
FORMAT_RULES: list[Callable[[str], str]] = [
# 删除
lambda s: s.rstrip(),
lambda s: re.sub(rf"(?<={RE_CHINESE_CHAR_OR_PUNC}) (?={RE_CHINESE_CHAR_OR_PUNC})", "", s), # 删除两汉字之间的空格
lambda s: re.sub(rf" (?={RE_CHINESE_PUNC})", "", s),
lambda s: re.sub(rf"(?<={RE_CHINESE_PUNC}) ", "", s),
# 替换
lambda s: re.sub(r"^- ", "* ", s), # - -> *
lambda s: s.replace('≤', "\\leq"), # ≤ -> \leq
lambda s: s.replace('≥', "\\geq"), # ≥ -> \geq
lambda s: s.replace('≠', "\\neq"), # ≥ -> \neq
lambda s: re.sub(r"\\le\s", lambda m: "\\leq ", s), # \le -> \leq
lambda s: re.sub(r"\\ge\s", lambda m: "\\geq ", s), # \ge -> \geq
lambda s: re.sub(r"\\ne\s", lambda m: "\\neq ", s), # \ne -> \neq
lambda s: re.sub(r"\d{2,}(?=\^)", lambda m: f"{{{m.group()}}}", s), # 10^5 -> {10}^5
lambda s: re.sub(rf"(?<={RE_CHINESE_CHAR}),(?={RE_CHINESE_CHAR})", ",", s), # 两汉字间半角逗号 -> 全角
lambda s: re.sub(rf"(?<={RE_CHINESE_CHAR}):$", ":", s), # 汉字后半角冒号 -> 全角冒号
# 增加
lambda s: re.sub(rf"({RE_SEPARATED_FROM_CHN})({RE_CHINESE_CHAR})", lambda m: ' '.join(m.groups()), s),
lambda s: re.sub(rf"({RE_CHINESE_CHAR})({RE_SEPARATED_FROM_CHN})", lambda m: ' '.join(m.groups()), s), # 增加空格
lambda s: re.sub(r"([A-Za-z0-9])\\", lambda m: f"{m.group(1)} \\", s), # 增加公式中反斜杠前空格
]
class Formatter(Problem):
def __init__(self, pid: str):
super().__init__(pid)
@staticmethod
def __format_line(line: str):
for rule in FORMAT_RULES:
line = rule(line)
return line
def format(self):
self._logger.info("格式化")
lines = []
with open(self._path_md, "r+", encoding="UTF-8") as fp:
status = None
for line in fp.readlines():
line = self.__format_line(line)
if status:
if status == "code" and line == "```":
status = None
lines[-1] += "\n```"
elif status == "environment" and not line:
status = None
elif status == "html" and re.match(r"</.*>", line):
status = None
lines[-1] += '\n' + line
elif status == "formula" and (line.count("$$") == 1 or line.count("$") % 2):
status = None
lines[-1] += '\n' + line
else:
lines[-1] += '\n' + line
else:
if re.match(r"```.*", line):
status = "code"
lines.append(line)
elif line and line[0] in ['>', '*', '-', *string.digits, '|']:
status = "environment"
lines.append(line)
elif re.match(r"<.*>", line):
status = "html"
lines.append(line)
elif line.count("$$") == 1 or line.count("$") % 2:
status = "formula"
lines.append(line)
elif line:
lines.append(line)
with open(self._path_md, "w", encoding="UTF-8") as fp:
for line in lines[:-1]:
fp.write(f"{line}\n\n")
fp.write(f"{lines[-1]}\n")
``` |
{
"source": "JGSuharnoko/hls4ml",
"score": 3
} |
#### File: converters/PyTorch/PyTorchPooling.py
```python
import math
from hls4ml.converters.pytorch_to_hls import pytorch_handler
from hls4ml.converters.utils import * #Get parse data format and the two padding functions
#Assuming version 0.5.0 not in master branch yet
#In Progress, do NOT commit to master yet
pooling_layers = ['MaxPool1D', 'MaxPool2D', 'AvgPool1D', 'AvgPool2D']
@pytorch_handler(*pooling_layers) #Iterate through the 4 types of pooling layers
def parse_pooling_layer(pytorch_layer, input_names, input_shapes, data_reader, config):
""""
Suppose model has attribute pool1, model.pool1 = maxpool2D(a,b,c)
pytorch_layer = model_name.pool1, model_name.pool2, etc
"""
layer = {}
layer['name'] = layer_name # i.e. model.pool would give pool
layer['class_name'] = pytorch_layer['class_name'] # i.e. maxpool2D, maxpool1d, etc
layer['data_format'] = 'channels_first' #By Pytorch default, cannot change
### Unsure about what this does ###
layer['inputs'] = input_names
#Check if 1D or 2D. 1, 2 is the second to last character
if int(layer['class_name'][-2]) == 1:
'''Compute number of channels'''
(layer['n_in'], layer['n_filt'])
= parse_data_format(input_shapes[0], 'channels_first')
#prepare padding input
layer['pool_width'] = pytorch_layer.kernel_size
layer['stride_width'] = pytorch_layer.stride
#pytorch_layer.stride[0] from another document
layer['padding'] = pytorch_layer.padding[0]
'''Compute padding 1d'''
#hls4ml layers after padding
(
layer['n_out'],
layer['pad_left'],
layer['pad_right']
)
= compute_padding_1d(
layer['padding'], layer['n_in'],
layer['stride_width'], layer['pool_width']
)
#Assuming only 'channels_first' is available
output_shape=[input_shapes[0][0], layer['n_filt'], layer['n_out']]
elif int(layer['class_name'][-2]) == 2:
'''Compute number of channels'''
(layer['in_height'], layer['in_width'], layer['n_filt'])
= parse_data_format(input_shapes[0], 'channels_first')
layer['stride_height'] = pytorch_layer.stride[0]
layer['stride_width'] = pytorch_layer.stride[1]
layer['pool_height'] = pytorch_layer.kernel_size[0]
layer['pool_width'] = pytorch_layer.kernel_size[1]
#Side note, it seems that pool width and height is the same
layer['padding'] = pytorch_layer.padding[0]
#pytorch_layer.padding is an object with attributes lower()--> Should output 'same' or 'valid' or otherwise unsupported
#hls4ml layers after padding
(
layer['out_height'], layer['out_width'],
layer['pad_top'], layer['pad_bottom'],
layer['pad_left'], layer['pad_right']
)
= compute_padding_2d(
layer['padding'],
layer['in_height'], layer['in_width'],
layer['stride_height'],layer['stride_width'],
layer['pool_height'], layer['pool_width']
)
#Good
#Only channels_first is available in pytorch. cannot change
output_shape=[input_shapes[0][0], layer['n_filt'], layer['out_height'], layer['out_width']]
#Return parsed layer and output shape
return layer, output_shape
``` |
{
"source": "JGSuw/Firmware",
"score": 4
} |
#### File: Tools/sdlog2/flightLogParse.py
```python
from pylab import *
import os, sys
def useage():
how_to_use = """
About: flightLogAnalyze produces plots of vehicle flight data from CSV log files.
How to use flightLogAnalyze.py:
python flightLogParse.py <csv log file>
Input: <csv log file> path to the comma separated value log file to analyze
"""
print(how_to_use)
if __name__ == "__main__":
# Check for input file
if len(sys.argv) < 2:
useage()
else: print(sys.argv[1])
# Open the file for reading
infile = open(sys.argv[1], 'r')
lines = []
for line in infile:
lines.append(line)
infile.close()
# Get a list of headings and print them
headings = lines[0].split(',')
print(headings)
# Split the lines into parts
split_lines = []
for line in lines[1:]:
split_lines.append(line.split(','))
# Get Vehicle attitude data
indices = [headings.index('ATT_Roll'),headings.index('ATT_Pitch'), headings.index('ATT_Yaw'), headings.index('TIME_StartTime')]
attitude = [[],[],[]]
times = []
for i in range(0,len(split_lines)):
parts = split_lines[i]
try:
roll = float(parts[indices[0]])
pitch = float(parts[indices[1]])
yaw = float(parts[indices[2]])
time = float(parts[indices[3]]) / 1E6
except ValueError: continue
attitude[0].append(roll)
attitude[1].append(pitch)
attitude[2].append(yaw)
times.append(time)
# Get attitude rates
indices = [headings.index('ATT_RollRate'),headings.index('ATT_PitchRate'),headings.index('ATT_YawRate')]
attitude_rate = [[],[],[]]
for i in range(0,len(split_lines)):
parts = split_lines[i]
try:
roll = float(parts[indices[0]])
pitch = float(parts[indices[1]])
yaw = float(parts[indices[2]])
except ValueError: continue
attitude_rate[0].append(roll)
attitude_rate[1].append(pitch)
attitude_rate[2].append(yaw)
# Get local position data
indices = [headings.index('LPOS_X'), headings.index('LPOS_Y'), headings.index('LPOS_Z')]
lpos = [[],[],[]]
for i in range(0,len(split_lines)):
parts = split_lines[i]
try:
x = float(parts[indices[0]])
y = float(parts[indices[1]])
z = float(parts[indices[2]])
except ValueError: continue
lpos[0].append(x)
lpos[1].append(y)
lpos[2].append(z)
# Get velocity data
indices = [headings.index('LPOS_VX'), headings.index('LPOS_VY'), headings.index('LPOS_VZ')]
vel = [[],[],[]]
for i in range(0,len(split_lines)):
parts = split_lines[i]
try:
vx = float(parts[indices[0]])
vy = float(parts[indices[1]])
vz = float(parts[indices[2]])
except ValueError: continue
vel[0].append(vx)
vel[1].append(vy)
vel[2].append(vz)
# Get accelleration data
indices = [headings.index('LPSP_AX'),headings.index('LPSP_AY'),headings.index('LPSP_AZ')]
accel = [[],[],[]]
for i in range(0,len(split_lines)):
parts = split_lines[i]
try:
ax = float(parts[indices[0]])
ay = float(parts[indices[1]])
az = float(parts[indices[2]])
except ValueError: continue
accel[0].append(ax)
accel[1].append(ay)
accel[2].append(az)
# Plot distance sensor data
indices = [headings.index('DIST_Distance'), headings.index('LPOS_Dist')]
distances = [[],[]]
for i in range(0,len(split_lines)):
parts = split_lines[i]
try:
distance = float(parts[indices[0]])
lpos_distance = float(parts[indices[1]])
except ValueError: continue
distances[0].append(distance)
distances[1].append(lpos_distance)
# Get IMU data
# Get optical flow data
indices = [headings.index('FLOW_RX'),headings.index('FLOW_RY'),headings.index('FLOW_RZ'), headings.index('FLOW_Dist'), headings.index('FLOW_Qlty')]
flow_data = [[],[],[],[],[]]
for i in range(0,len(split_lines)):
parts = split_lines[i]
try:
rx = float(parts[indices[0]])
ry = float(parts[indices[1]])
rz = float(parts[indices[2]])
d = float(parts[indices[3]])
q = float(parts[indices[4]])
except ValueError: continue
flow_data[0].append(rx)
flow_data[1].append(ry)
flow_data[2].append(rz)
flow_data[3].append(d)
flow_data[4].append(q)
# Get Wind Estimate Data
indices = [headings.index('WIND_X'), headings.index('WIND_Y')]
wind_data = [[],[]]
for i in range(0,len(split_lines)):
parts = split_lines[i]
try:
wind_x = float(parts[indices[0]])
wind_y = float(parts[indices[1]])
except ValueError: continue
wind_data[0].append(wind_x)
wind_data[1].append(wind_y)
# Figure 1: local position, velocity, and acceleration estimates
figure(1)
subplot(311)
plot(times,accel[0])
plot(times,accel[1])
plot(times,accel[2])
ylabel('Accelleration (meters per second squared)')
xlabel('Time (seconds)')
legend(['LPSP_AX', 'LPSP_AY', 'LPSP_AZ'])
# Plot the local posiion data against time
subplot(312)
plot(times,vel[0])
plot(times,vel[1])
plot(times,vel[2])
ylabel('Velocity (meters per second)')
xlabel('Time (seconds)')
legend(['LPOS_VX','LPOS_VY', 'LPOS_VZ'])
# Plot velocity estimates against time
subplot(313)
plot(times,lpos[0])
plot(times,lpos[1])
plot(times,lpos[2])
ylabel('Distance (meters)')
xlabel('Time (seconds)')
legend(['LPOS_X','LPOS_Y','LPOS_Z'])
# Differentiate ultrasound sensor to get a feel for the spikes
d = flow_data[3]
diff = []
for i in range(len(d)-1):
diff.append(d[i+1]-d[i])
# Attitudes, attitude rate, and accelleration
figure(2)
subplot(311)
plot(times,attitude[0])
plot(times,attitude[1])
plot(times,attitude[2])
ylabel('Angle (radians)')
xlabel('Time (seconds)')
legend(['roll','pitch','yaw'])
subplot(312)
plot(times,attitude_rate[0])
plot(times,attitude_rate[1])
plot(times,attitude_rate[2])
ylabel('Angle rate (radians / second)')
xlabel('Time (seconds)')
legend(['roll rate','pitch rate','yaw rate'])
# Optical Flow Data
figure(3)
subplot(211)
plot(times,flow_data[0])
plot(times,flow_data[1])
plot(times,flow_data[2])
ylabel('Flow integral')
xlabel('Time (seconds)')
legend(['RX','RY','RZ'])
subplot(212)
plot(times,flow_data[3])
plot(times,flow_data[4])
xlabel('Time (seconds)')
legend(['D', 'Q'])
# Miscellaneous Figure
figure(4)
# Spike analysis in ultrasound data
subplot(311)
diff = asarray(diff)
dist = asarray(flow_data[3][:-1])
plot(fabs(diff)/dist)
plot(dist)
ylabel('Distance (meters)')
xlabel('Time (seconds)')
legend(['Normalized difference', 'Measured distance'])
# Plot of wind data against time
subplot(312)
plot(times,distances[0])
plot(times,distances[1])
ylabel('Distance (meters)')
xlabel('Time (seconds)')
legend(['Distance Sensor','Distance Estimate'])
# Plot distance sensor data
subplot(313)
plot(times, wind_data[0])
plot(times, wind_data[1])
ylabel('?')
xlabel('Time (seconds)')
legend(['X Wind Component', 'Y Wind Component'])
show()
``` |
{
"source": "jgthomas/borg-import",
"score": 3
} |
#### File: src/borg_import/main.py
```python
import argparse
import logging
import shutil
import shlex
import subprocess
import sys
import textwrap
from pathlib import Path
from .rsnapshots import get_snapshots
from .rsynchl import get_rsyncsnapshots
log = logging.getLogger(__name__)
def borg_import(args, archive_name, path, timestamp=None):
borg_cmdline = ['borg', 'create']
if timestamp:
borg_cmdline += '--timestamp', timestamp.isoformat()
if args.create_options:
borg_cmdline += args.create_options.split()
repository = args.repository.resolve()
location = '{}::{}'.format(repository, archive_name)
borg_cmdline.append(location)
borg_cmdline.append('.')
log.debug('Borg command line: %r', borg_cmdline)
log.debug('Borg working directory: %s', path)
try:
subprocess.check_call(borg_cmdline, cwd=str(path))
except subprocess.CalledProcessError as cpe:
if cpe.returncode != 1:
raise
log.debug('Borg exited with a warning (being quiet about it since Borg spoke already)')
def list_borg_archives(args):
borg_cmdline = ['borg', 'list', '--short']
repository = args.repository.resolve()
borg_cmdline.append(str(repository))
return subprocess.check_output(borg_cmdline).decode().splitlines()
class Importer:
name = 'name-of-command'
description = 'descriptive description describing this importer'
epilog = 'epilog-y epilog epiloging about this importer (docstringy for multiple lines)'
def populate_parser(self, parser):
"""
Add arguments to argparse *parser*.
Specify the callback for the importer like so::
parser.set_defaults(function=self.import_something)
Then you can define *import_something* like this in the class::
def import_something(self, args):
... # do something!
"""
class rsnapshotImporter(Importer):
name = 'rsnapshot'
description = 'import rsnapshot backups'
epilog = """
Imports from rsnapshot backup sets by renaming each snapshot
to a common name independent of the snapshot (and the backup set),
which allows the Borg files cache to work with maximum efficiency.
The directory is called "borg-import-dir" inside the rsnapshot root,
and borg-import will note which snapshot is currently located there
in a file called "borg-import-dir.snapshot" besides it, in case
things go wrong.
Otherwise nothing in the rsnapshot root is modified, and neither
are the contents of the snapshots.
"""
def populate_parser(self, parser):
parser.add_argument('--backup-set', help='Only consider given backup set (can be given multiple times).',
action='append', dest='backup_sets')
parser.add_argument('rsnapshot_root', metavar='RSNAPSHOT_ROOT',
help='Path to rsnapshot root directory', type=Path)
# TODO: support the full wealth of borg possibilities
parser.add_argument('repository', metavar='BORG_REPOSITORY', help='Borg repository', type=Path)
parser.set_defaults(function=self.import_rsnapshot)
def import_rsnapshot(self, args):
existing_archives = list_borg_archives(args)
import_path = args.rsnapshot_root / 'borg-import-dir'
import_journal = args.rsnapshot_root / 'borg-import-dir.snapshot'
if import_path.exists():
print('{} exists. Cannot continue.'.format(import_path))
return 1
for rsnapshot in get_snapshots(args.rsnapshot_root):
timestamp = rsnapshot['timestamp'].replace(microsecond=0)
snapshot_original_path = rsnapshot['path']
name = rsnapshot['name']
archive_name = args.prefix + name
if args.backup_sets and rsnapshot['backup_set'] not in args.backup_sets:
print('Skipping (backup set is not selected):', name)
continue
if archive_name in existing_archives:
print('Skipping (already exists in repository):', name)
continue
print('Importing {} (timestamp {}) '.format(name, timestamp), end='')
if archive_name != name:
print('as', archive_name)
else:
print()
log.debug(' Moving {} -> {}'.format(rsnapshot['path'], import_path))
# We move the snapshots to import_path so that the files cache in Borg can work effectively.
with import_journal.open('w') as fd:
fd.write('Current snapshot: %s\n' % rsnapshot['name'])
fd.write('Original path: %s\n' % snapshot_original_path)
snapshot_original_path.rename(import_path)
try:
borg_import(args, archive_name, import_path, timestamp=timestamp)
finally:
log.debug(' Moving {} -> {}'.format(import_path, rsnapshot['path']))
import_path.rename(snapshot_original_path)
import_journal.unlink()
class rsynchlImporter(Importer):
name = 'rsynchl'
description = 'import rsync+hardlink backups'
epilog = """
Imports from rsync backup sets by renaming each snapshot to a common
name independent of the snapshot, which allows the Borg files cache
to work with maximum efficiency.
An archive will be created for each folder in the rsync_root. The
archive name will be the folder name and the archive timestamp will
be the folder mtime. If the borg repository already contains an
archive with the folder name, that folder will be skipped.
The directory is called "borg-import-dir" inside the specified root,
and borg-import will note which snapshot is currently located there
in a file called "borg-import-dir.snapshot" besides it, in case
things go wrong.
Otherwise nothing in the rsync root is modified, and neither
are the contents of the snapshots.
"""
def populate_parser(self, parser):
parser.add_argument('rsync_root', metavar='RSYNC_ROOT',
help='Path to root directory', type=Path)
# TODO: support the full wealth of borg possibilities
parser.add_argument('repository', metavar='BORG_REPOSITORY', help='Borg repository', type=Path)
parser.set_defaults(function=self.import_rsynchl)
def import_rsynchl(self, args):
existing_archives = list_borg_archives(args)
import_path = args.rsync_root / 'borg-import-dir'
import_journal = args.rsync_root / 'borg-import-dir.snapshot'
if import_path.exists():
print('{} exists. Cannot continue.'.format(import_path))
return 1
for rsnapshot in get_rsyncsnapshots(args.rsync_root):
timestamp = rsnapshot['timestamp'].replace(microsecond=0)
snapshot_original_path = rsnapshot['path']
name = rsnapshot['name']
archive_name = args.prefix + name
if archive_name in existing_archives:
print('Skipping (already exists in repository):', name)
continue
print('Importing {} (timestamp {}) '.format(name, timestamp), end='')
if archive_name != name:
print('as', archive_name)
else:
print()
log.debug(' Moving {} -> {}'.format(rsnapshot['path'], import_path))
# We move the snapshots to import_path so that the files cache in Borg can work effectively.
with import_journal.open('w') as fd:
fd.write('Current snapshot: %s\n' % rsnapshot['name'])
fd.write('Original path: %s\n' % snapshot_original_path)
snapshot_original_path.rename(import_path)
try:
borg_import(args, archive_name, import_path, timestamp=timestamp)
finally:
log.debug(' Moving {} -> {}'.format(import_path, rsnapshot['path']))
import_path.rename(snapshot_original_path)
import_journal.unlink()
def build_parser():
common_parser = argparse.ArgumentParser(add_help=False)
common_group = common_parser.add_argument_group('Common options')
common_group.add_argument("--create-options", "-o",
help="Additional borg create options "
"(note: Use -o=\"--foo --bar\" syntax to avoid parser confusion).")
common_group.add_argument("--prefix", help="Add prefix to imported archive names", default='')
common_group.add_argument("--debug", action='store_const', dest='log_level', const=logging.DEBUG,
help='Display debug/trace messages.')
parser = argparse.ArgumentParser(description='Import existing backups from other software to Borg')
parser.set_defaults(log_level=logging.WARNING)
subparsers = parser.add_subparsers()
for importer_class in Importer.__subclasses__():
importer = importer_class()
subparser = subparsers.add_parser(importer.name,
help=importer.description, epilog=textwrap.dedent(importer.epilog),
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[common_parser])
importer.populate_parser(subparser)
return parser
def main():
if not shutil.which('borg'):
print('The \'borg\' command can\'t be found in the PATH. Please correctly install borgbackup first.')
print('See instructions at https://borgbackup.readthedocs.io/en/stable/installation.html')
return 1
parser = build_parser()
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(message)s')
if 'function' not in args:
return parser.print_help()
try:
return args.function(args)
except subprocess.CalledProcessError as cpe:
print('{} invocation failed with status {}'.format(cpe.cmd[0], cpe.returncode))
print('Command line was:', *[shlex.quote(s) for s in cpe.cmd])
return cpe.returncode
if __name__ == "__main__":
sys.exit(main())
```
#### File: src/borg_import/rsynchl.py
```python
import re
from .helpers.discover import discover, parser
from .helpers.names import make_name
from .helpers.timestamps import datetime_from_mtime
def get_rsyncsnapshots(root):
"""Get all snapshot metadata discovered in the rsync root directory."""
regex = re.compile(r'(?P<snapshot_name>.+)')
for path in discover(str(root), 1):
parsed = parser(path, regex)
if parsed is not None:
abs_path = root / path
meta = dict(
name=make_name(parsed['snapshot_name']),
path=abs_path,
timestamp=datetime_from_mtime(abs_path),
)
yield meta
``` |
{
"source": "jgu2/jade",
"score": 2
} |
#### File: jade/cli/pipeline.py
```python
import logging
import os
import shutil
import sys
from pathlib import Path
import click
from jade.common import HPC_CONFIG_FILE, OUTPUT_DIR
from jade.hpc.common import HpcType
from jade.loggers import setup_logging
from jade.jobs.pipeline_manager import PipelineManager
from jade.models import (
HpcConfig,
LocalHpcConfig,
SingularityParams,
SubmitterParams,
get_model_defaults,
)
from jade.utils.utils import get_cli_string, load_data
logger = logging.getLogger(__name__)
SUBMITTER_PARAMS_DEFAULTS = get_model_defaults(SubmitterParams)
@click.group()
def pipeline():
"""Manage JADE execution pipeline."""
setup_logging("pipeline", None)
@click.command()
@click.argument(
"auto-config-cmds",
nargs=-1,
)
@click.option(
"-b",
"--per-node-batch-size",
default=SUBMITTER_PARAMS_DEFAULTS["per_node_batch_size"],
show_default=True,
help="Number of jobs to run on one node in one batch.",
)
@click.option(
"-c",
"--config-file",
type=click.Path(),
default=PipelineManager.CONFIG_FILENAME,
show_default=True,
help="pipeline config file.",
)
@click.option(
"-h",
"--hpc-config",
type=click.Path(),
default=HPC_CONFIG_FILE,
show_default=True,
help="HPC config file.",
)
@click.option(
"-l",
"--local",
is_flag=True,
default=False,
show_default=True,
help="Run locally even if on HPC.",
)
@click.option(
"-n",
"--max-nodes",
default=None,
type=int,
show_default=True,
help="Max number of node submission requests to make in parallel. Default is unbounded.",
)
@click.option(
"-p",
"--poll-interval",
default=SUBMITTER_PARAMS_DEFAULTS["poll_interval"],
type=float,
show_default=True,
help="Interval in seconds on which to poll jobs for status.",
)
@click.option(
"-q",
"--num-processes",
default=None,
show_default=False,
type=int,
help="Number of processes to run in parallel; defaults to num CPUs.",
)
@click.option(
"--reports/--no-reports",
is_flag=True,
default=True,
show_default=True,
help="Generate reports after execution.",
)
@click.option(
"-S",
"--enable-singularity",
is_flag=True,
default=False,
show_default=True,
help="Add Singularity parameters and set the config to run in a container.",
)
@click.option(
"-C",
"--container",
type=click.Path(exists=True),
help="Path to container",
)
@click.option(
"--verbose", is_flag=True, default=False, show_default=True, help="Enable verbose log output."
)
def create(
auto_config_cmds,
per_node_batch_size,
config_file,
hpc_config,
local,
max_nodes,
poll_interval,
num_processes,
reports,
enable_singularity,
container,
verbose,
):
"""Create a pipeline with multiple Jade configurations."""
if local:
hpc_config = HpcConfig(hpc_type=HpcType.LOCAL, hpc=LocalHpcConfig())
else:
if not os.path.exists(hpc_config):
print(
f"{hpc_config} does not exist. Generate it with 'jade config hpc' "
"or run in local mode with '-l'",
file=sys.stderr,
)
sys.exit(1)
hpc_config = HpcConfig(**load_data(hpc_config))
if enable_singularity:
singularity_params = SingularityParams(enabled=True, container=container)
else:
singularity_params = None
submit_params = SubmitterParams(
generate_reports=reports,
hpc_config=hpc_config,
max_nodes=max_nodes,
num_processes=num_processes,
per_node_batch_size=per_node_batch_size,
poll_interval=poll_interval,
singularity_params=singularity_params,
verbose=verbose,
)
PipelineManager.create_config(auto_config_cmds, config_file, submit_params)
@click.command()
@click.option("-o", "--output", default=OUTPUT_DIR, show_default=True, help="Output directory.")
@click.option(
"--verbose", is_flag=True, default=False, show_default=True, help="Enable verbose log output."
)
def status(output, verbose):
"""Check status of the pipeline."""
try:
mgr = PipelineManager.load(output)
except FileNotFoundError:
print(f"{output} is not a valid pipeline output directory", file=stderr)
sys.exit(1)
config = mgr.config
completed_stages = []
current_stage = None
for stage in config.stages:
if stage.stage_num < config.stage_num:
completed_stages.append(stage)
elif stage.stage_num == config.stage_num:
current_stage = stage
print(f"Is complete: {config.is_complete}")
if current_stage is not None:
print(f"Current stage number: {config.stage_num}")
print("\nTo view the status of the current stage:")
print(f" jade show-status -o {current_stage.path}")
if completed_stages:
print(f"\nTo view results of the completed stages:")
for stage in completed_stages:
print(f" jade show-results -o {stage.path}")
@click.command()
@click.argument("config-file")
@click.option(
"-f",
"--force",
default=False,
is_flag=True,
show_default=True,
help="Delete output directory if it exists.",
)
@click.option("-o", "--output", default=OUTPUT_DIR, show_default=True, help="Output directory.")
@click.option(
"--verbose", is_flag=True, default=False, show_default=True, help="Enable verbose log output."
)
def submit(config_file, output, force, verbose=False):
"""Submit the pipeline for execution."""
if os.path.exists(output):
if force:
shutil.rmtree(output)
else:
print(
f"{output} already exists. Delete it or use '--force' to overwrite.",
file=sys.stderr,
)
sys.exit(1)
os.makedirs(output, exist_ok=True)
filename = os.path.join(output, "pipeline_submit.log")
level = logging.DEBUG if verbose else logging.INFO
setup_logging(__name__, filename, file_level=level, console_level=level)
logger.info(get_cli_string())
mgr = PipelineManager.create(config_file, output)
try:
mgr.submit_next_stage(1)
except Exception:
logger.exception("Pipeline execution failed")
raise
logging.shutdown()
sys.exit(0)
@click.command()
@click.argument("output")
@click.option(
"--stage-num",
required=True,
type=int,
help="stage number to submit",
)
@click.option(
"--return-code",
required=True,
type=int,
help="return code of stage index that just completed",
)
@click.option(
"--verbose", is_flag=True, default=False, show_default=True, help="Enable verbose log output."
)
def submit_next_stage(output, stage_num, return_code, verbose=False):
"""Internal command to submit the next stage of the pipeline for execution."""
filename = os.path.join(output, "pipeline_submit.log")
level = logging.DEBUG if verbose else logging.INFO
setup_logging(__name__, filename, file_level=level, console_level=level, mode="a")
logger.info(get_cli_string())
mgr = PipelineManager.load(output)
try:
mgr.submit_next_stage(stage_num, return_code=return_code)
except Exception:
logger.exception("Pipeline execution failed")
raise
logging.shutdown()
sys.exit(0)
pipeline.add_command(create)
pipeline.add_command(status)
pipeline.add_command(submit)
pipeline.add_command(submit_next_stage)
```
#### File: jade/cli/run.py
```python
import logging
import os
import sys
import click
from jade.common import OUTPUT_DIR
from jade.events import StructuredErrorLogEvent, EVENT_CATEGORY_ERROR, EVENT_NAME_UNHANDLED_ERROR
from jade.loggers import log_event, setup_logging, setup_event_logging
from jade.jobs.job_post_process import JobPostProcess
from jade.utils.utils import get_cli_string, load_data
from jade.exceptions import InvalidExtension
from jade.extensions.registry import Registry, ExtensionClassType
@click.argument("extension")
@click.option(
"-n",
"--name",
required=True,
type=str,
help="The name of the job that needs to run.",
)
@click.option("-o", "--output", default=OUTPUT_DIR, show_default=True, help="Output directory.")
@click.option("--config-file", required=True, help="Job configuration file")
@click.option(
"-f",
"--output-format",
default="csv",
show_default=True,
help="Output format for data (csv or json).",
)
@click.option(
"--verbose", is_flag=True, default=False, show_default=True, help="Enable verbose log output."
)
@click.command()
def run(extension, **kwargs):
"""Runs individual job."""
registry = Registry()
if not registry.is_registered(extension):
raise InvalidExtension(f"Extension '{extension}' is not registered.")
# Parse Argument
config_file = kwargs["config_file"]
name = kwargs["name"]
output = kwargs["output"]
output_format = kwargs["output_format"]
verbose = kwargs["verbose"]
level = logging.DEBUG if verbose else logging.INFO
# Create directory for current job
job_dir = os.path.join(output, name)
os.makedirs(job_dir, exist_ok=True)
# Structural logging setup
event_file = os.path.join(job_dir, "events.log")
setup_event_logging(event_file)
# General logging setup
log_file = os.path.join(job_dir, "run.log")
general_logger = setup_logging(
extension,
log_file,
console_level=logging.ERROR,
file_level=level,
)
general_logger.info(get_cli_string())
# Create config for run
try:
cli = registry.get_extension_class(extension, ExtensionClassType.CLI)
ret = cli.run(config_file, name, output, output_format, verbose)
except Exception as err:
msg = f"unexpected exception in run '{extension}' job={name} - {err}"
general_logger.exception(msg)
event = StructuredErrorLogEvent(
source=name,
category=EVENT_CATEGORY_ERROR,
name=EVENT_NAME_UNHANDLED_ERROR,
message=msg,
)
log_event(event)
ret = 1
if ret == 0:
try:
config = load_data(config_file)
if "job_post_process_config" in config.keys():
post_process = JobPostProcess(
module_name=config["job_post_process_config"]["module"],
class_name=config["job_post_process_config"]["class"],
data=config["job_post_process_config"]["data"],
job_name=name,
output=output,
)
post_process.run(config_file=config_file, output=output)
except Exception as err:
msg = f"unexpected exception in post-process '{extension}' job={name} - {err}"
general_logger.exception(msg)
event = StructuredErrorLogEvent(
source=name,
category=EVENT_CATEGORY_ERROR,
name=EVENT_NAME_UNHANDLED_ERROR,
message=msg,
)
log_event(event)
ret = 1
sys.exit(ret)
```
#### File: extensions/demo/autoregression_parameters.py
```python
from collections import namedtuple
from jade.common import DEFAULT_SUBMISSION_GROUP
from jade.jobs.job_parameters_interface import JobParametersInterface
class AutoRegressionParameters(JobParametersInterface):
"""
A class used for creating auto-regression job.
"""
parameters_type = namedtuple("AutoRegression", "country")
_EXTENSION = "demo"
def __init__(self, country, data):
"""
Init auto-regression parameter class
Parameters
----------
country: str
The name of a country.
data: str
The path to the csv file containing the GDP data.
"""
self.country = country
self.data = data
self._name = self._create_name()
def __str__(self):
return "<AutoRegressionParameters: {}>".format(self.name)
@property
def extension(self):
return self._EXTENSION
@property
def name(self):
return self._name
def _create_name(self):
return self.country.replace(" ", "_").lower()
def serialize(self):
return {
"country": self._name,
"data": self.data,
"extension": self.extension,
}
@classmethod
def deserialize(cls, param):
return cls(param["country"], param["data"])
@property
def estimated_run_minutes(self):
return None
@property
def cancel_on_blocking_job_failure(self):
return False
def get_blocking_jobs(self):
return set()
def remove_blocking_job(self, name):
pass
def set_blocking_jobs(self, blocking_jobs):
pass
@property
def submission_group(self):
return DEFAULT_SUBMISSION_GROUP
@submission_group.setter
def submission_group(self, group):
return
```
#### File: extensions/demo/create_merge_pred_gdp.py
```python
import os
import sys
from jade.models import PipelineConfig
from jade.utils.subprocess_manager import run_command
from jade.utils.utils import load_data
PRED_GDP_COMMANDS_FILE = "pred_gdp_commands.txt"
def main():
config = PipelineConfig(**load_data(os.environ["JADE_PIPELINE_STATUS_FILE"]))
cur_stage = config.stages[-1]
cur_stage_output = cur_stage.path
previous_stage = config.stages[-2]
previous_stage_output = previous_stage.path
script = "jade/extensions/demo/merge_pred_gdp.py"
with open(PRED_GDP_COMMANDS_FILE, "w") as f_out:
cmd = f"python {script} run {previous_stage_output} {cur_stage_output}"
f_out.write(cmd + "\n")
cmd = "jade config create pred_gdp_commands.txt -c config-stage2.json"
sys.exit(run_command(cmd))
if __name__ == "__main__":
main()
```
#### File: extensions/generic_command/generic_command_parameters.py
```python
import logging
from collections import namedtuple
from typing import Dict, List, Optional, Set
from pydantic import Field, validator
from jade.models import JadeBaseModel
from jade.common import DEFAULT_SUBMISSION_GROUP
from jade.jobs.job_parameters_interface import JobParametersInterface
logger = logging.getLogger(__name__)
_EXTENSION = "generic_command"
class GenericCommandParameters(JobParametersInterface):
"""A class used for creating a job for a generic command."""
parameters_type = namedtuple("GenericCommand", "command")
def __init__(self, **kwargs):
self._model = GenericCommandParametersModel(**kwargs)
def __str__(self):
return "<GenericCommandParameters: {}>".format(self.name)
def __getattr__(self, name):
if name in GenericCommandParametersModel.__fields__:
return getattr(self._model, name)
raise AttributeError(f"'GenericCommandParameters' object has no attribute '{name}'")
def __setattr__(self, name, value):
if name == "extension":
raise AttributeError(f"'GenericCommandParameters' does not allow setting 'extension'")
if name in GenericCommandParametersModel.__fields__:
setattr(self._model, name, value)
self.__dict__[name] = value
@property
def command(self):
if self._model.use_multi_node_manager:
return f"jade-internal run-multi-node-job {self.name} {self._model.command}"
return self._model.command
@property
def estimated_run_minutes(self):
return self._model.estimated_run_minutes
@property
def extension(self):
return _EXTENSION
@property
def name(self):
return self._create_name()
def _create_name(self):
return str(self._model.job_id)
def serialize(self):
assert self._model.job_id is not None
# If job sizes get huge then we should exclude parameters with default values.
return self._model.dict()
@classmethod
def deserialize(cls, data):
return cls(**data)
@property
def cancel_on_blocking_job_failure(self):
return self._model.cancel_on_blocking_job_failure
def get_blocking_jobs(self):
return self._model.blocked_by
def remove_blocking_job(self, name):
self._model.blocked_by.remove(name)
def set_blocking_jobs(self, blocking_jobs):
self._model.blocked_by = blocking_jobs
@property
def submission_group(self):
return self._model.submission_group
class GenericCommandParametersModel(JadeBaseModel):
"""Model definition for generic command parameters"""
use_multi_node_manager: Optional[bool] = Field(
title="use_multi_node_manager",
description="If true JADE will run this job with its multi-node manager.",
default=False,
)
command: str = Field(
title="command",
description="Command that can be invoked in a terminal (shell characters not allowed)",
)
blocked_by: Optional[Set[str]] = Field(
title="blocked_by",
description="Array of job names that must complete before this one can start.",
default=set(),
)
cancel_on_blocking_job_failure: Optional[bool] = Field(
title="cancel_on_blocking_job_failure",
description="If true JADE will cancel this job if any of its blocking jobs fail.",
default=False,
)
estimated_run_minutes: Optional[int] = Field(
title="estimated_run_minutes",
description="JADE will use this value along with num-processes and walltime to "
"build per-node batches of jobs if time-based-batching is enabled.",
)
submission_group: Optional[str] = Field(
title="submission_group",
description="Optional name of a submission group",
default=DEFAULT_SUBMISSION_GROUP,
)
append_output_dir: Optional[bool] = Field(
title="append_output_dir",
description="If true JADE will append --jade-runtime-output=X where X is the output "
"directory specified in jade submit-jobs.",
default=False,
)
ext: Optional[Dict] = Field(
title="ext",
description="User-defined extension data to be used at runtime. Must be serializable in "
"JSON format.",
default={},
)
job_id: Optional[int] = Field(
title="job_id",
description="Unique job identifier, generated by JADE",
)
extension: Optional[str] = Field(
title="extension",
description="job extension type, generated by JADE",
default=_EXTENSION,
)
@validator("append_output_dir")
def handle_append_output_dir(cls, value, values):
if values["use_multi_node_manager"]:
logger.debug("Override 'append_output_dir' because 'use_multi_node_manager' is set")
return True
return value
@validator("blocked_by")
def handle_blocked_by(cls, value):
return {str(x) for x in value}
```
#### File: jade/extensions/registry.py
```python
import copy
import enum
import importlib
import logging
import os
import pathlib
from jade.exceptions import InvalidParameter
from jade.utils.utils import dump_data, load_data
DEFAULT_REGISTRY = {
"extensions": [
{
"name": "generic_command",
"description": "Allows batching of a list of CLI commands.",
"job_execution_module": "jade.extensions.generic_command.generic_command_execution",
"job_execution_class": "GenericCommandExecution",
"job_configuration_module": "jade.extensions.generic_command.generic_command_configuration",
"job_configuration_class": "GenericCommandConfiguration",
"job_parameters_module": "jade.extensions.generic_command.generic_command_parameters",
"job_parameters_class": "GenericCommandParameters",
"cli_module": "jade.extensions.generic_command.cli",
},
],
"logging": [
"jade",
],
}
class ExtensionClassType(enum.Enum):
"""Possible values for computational sequencing mode"""
CLI = "cli_module"
CONFIGURATION = "config_class"
EXECUTION = "exec_class"
PARAMETERS = "param_class"
logger = logging.getLogger(__name__)
class Registry:
"""Manages extensions registered with JADE."""
_REGISTRY_FILENAME = ".jade-registry.json"
FORMAT_VERSION = "v0.2.0"
def __init__(self, registry_filename=None):
if registry_filename is None:
self._registry_filename = os.path.join(
str(pathlib.Path.home()),
self._REGISTRY_FILENAME,
)
else:
self._registry_filename = registry_filename
self._extensions = {}
self._loggers = set()
if not os.path.exists(self._registry_filename):
self.reset_defaults()
else:
data = self._check_registry_config(self._registry_filename)
for extension in data["extensions"]:
self._add_extension(extension)
for package_name in data["logging"]:
self._loggers.add(package_name)
def _add_extension(self, extension):
for field in DEFAULT_REGISTRY["extensions"][0]:
if field not in extension:
raise InvalidParameter(f"required field {field} not present")
try:
cmod = importlib.import_module(extension["job_configuration_module"])
emod = importlib.import_module(extension["job_execution_module"])
pmod = importlib.import_module(extension["job_parameters_module"])
cli_mod = importlib.import_module(extension["cli_module"])
except ImportError as exc:
if "statsmodels" in exc.msg:
# Older versions of Jade installed the demo extension into the registry as
# well as its dependencies. Newer versions do not. This causes import errors
# when a user upgrades to the newer version.
# Remove the demo extension. The user can add it later if they want.
# This can be removed whenever all users have gone through an upgrade.
self._remove_demo_extension()
return
else:
raise
ext = copy.copy(extension)
ext[ExtensionClassType.CONFIGURATION] = getattr(cmod, extension["job_configuration_class"])
ext[ExtensionClassType.EXECUTION] = getattr(emod, extension["job_execution_class"])
ext[ExtensionClassType.PARAMETERS] = getattr(pmod, extension["job_parameters_class"])
ext[ExtensionClassType.CLI] = cli_mod
self._extensions[extension["name"]] = ext
def _check_registry_config(self, filename):
data = load_data(filename)
if isinstance(data, list):
# Workaround to support the old registry format. 03/06/2020
# It can be removed eventually.
new_data = {
"extensions": data,
"logging": DEFAULT_REGISTRY["logging"],
}
dump_data(new_data, self.registry_filename, indent=4)
print(
"\nReformatted registry. Refer to `jade extensions --help` "
"for instructions on adding logging for external packages.\n"
)
data = new_data
format = data.get("format_version", "v0.1.0")
if format == "v0.1.0":
self.reset_defaults()
data = load_data(filename)
print(
"\nWARNING: Reformatted registry. You will need to "
"re-register any external extensions.\n"
)
return data
def _serialize_registry(self):
data = {
"extensions": [],
"logging": list(self._loggers),
"format_version": self.FORMAT_VERSION,
}
for _, extension in sorted(self._extensions.items()):
ext = {k: v for k, v in extension.items() if not isinstance(k, ExtensionClassType)}
data["extensions"].append(ext)
filename = self.registry_filename
dump_data(data, filename, indent=4)
logger.debug("Serialized data to %s", filename)
def add_logger(self, package_name):
"""Add a package name to the logging registry.
Parameters
----------
package_name : str
"""
self._loggers.add(package_name)
self._serialize_registry()
def remove_logger(self, package_name):
"""Remove a package name from the logging registry.
Parameters
----------
package_name : str
"""
self._loggers.remove(package_name)
self._serialize_registry()
def list_loggers(self):
"""List the package names registered to be logged.
Returns
-------
list
"""
return sorted(list(self._loggers))
def show_loggers(self):
"""Print the package names registered to be logged."""
print(", ".join(self.list_loggers()))
def get_extension_class(self, extension_name, class_type):
"""Get the class associated with the extension.
Parameters
----------
extension_name : str
class_type : ExtensionClassType
Raises
------
InvalidParameter
Raised if the extension is not registered.
"""
extension = self._extensions.get(extension_name)
if extension is None:
raise InvalidParameter(f"{extension_name} is not registered")
return extension[class_type]
def is_registered(self, extension_name):
"""Check if the extension is registered"""
return extension_name in self._extensions
def iter_extensions(self):
"""Return an iterator over registered extensions.
Returns
-------
dict_values
"""
return self._extensions.values()
def list_extensions(self):
"""Return a list of registered extensions.
Returns
-------
list of dict
"""
return list(self.iter_extensions())
def register_extension(self, extension):
"""Registers an extension in the registry.
Parameters
----------
extension : dict
Raises
------
InvalidParameter
Raised if the extension is invalid.
"""
self._add_extension(extension)
self._serialize_registry()
logger.debug("Registered extension %s", extension["name"])
@property
def registry_filename(self):
"""Return the filename that stores the registry."""
return self._registry_filename
def reset_defaults(self):
"""Reset the registry to its default values."""
self._extensions.clear()
self._loggers.clear()
for extension in DEFAULT_REGISTRY["extensions"]:
self.register_extension(extension)
for package_name in DEFAULT_REGISTRY["logging"]:
self.add_logger(package_name)
self._serialize_registry()
logger.debug("Initialized registry to its defaults.")
def show_extensions(self):
"""Show the registered extensions."""
print("JADE Extensions:")
for name, extension in sorted(self._extensions.items()):
print(f" {name}: {extension['description']}")
def unregister_extension(self, extension_name):
"""Unregisters an extension.
Parameters
----------
extension_name : str
"""
if extension_name not in self._extensions:
raise InvalidParameter(f"extension {extension_name} isn't registered")
self._extensions.pop(extension_name)
self._serialize_registry()
def register_demo_extension(self):
self.register_extension(
{
"name": "demo",
"description": "Country based GDP auto-regression analysis",
"job_execution_module": "jade.extensions.demo.autoregression_execution",
"job_execution_class": "AutoRegressionExecution",
"job_configuration_module": "jade.extensions.demo.autoregression_configuration",
"job_configuration_class": "AutoRegressionConfiguration",
"job_parameters_module": "jade.extensions.demo.autoregression_parameters",
"job_parameters_class": "AutoRegressionParameters",
"cli_module": "jade.extensions.demo.cli",
},
)
def _remove_demo_extension(self):
registry_file = pathlib.Path.home() / self._REGISTRY_FILENAME
if not registry_file.exists():
return
data = load_data(registry_file)
found = False
for i, ext in enumerate(data["extensions"]):
if ext["name"] == "demo":
data["extensions"].pop(i)
found = True
break
if found:
dump_data(data, registry_file, indent=2)
```
#### File: jade/hpc/hpc_manager.py
```python
import logging
import os
import time
from jade.enums import Status
from jade.exceptions import InvalidParameter
from jade.hpc.common import HpcType, HpcJobStatus
from jade.hpc.fake_manager import FakeManager
from jade.hpc.local_manager import LocalManager
from jade.hpc.pbs_manager import PbsManager
from jade.hpc.slurm_manager import SlurmManager
from jade.models import HpcConfig
logger = logging.getLogger(__name__)
class HpcManager:
"""Manages HPC job submission and monitoring."""
def __init__(self, submission_groups, output):
self._output = output
self._configs = {}
self._intfs = {}
self._hpc_type = None
assert submission_groups
for name, group in submission_groups.items():
self._configs[name] = group.submitter_params.hpc_config
self._intfs[name] = self.create_hpc_interface(group.submitter_params.hpc_config)
if self._hpc_type is None:
self._hpc_type = group.submitter_params.hpc_config.hpc_type
logger.debug("Constructed HpcManager with output=%s", output)
def _get_interface(self, submission_group_name=None):
if submission_group_name is None:
# In many cases we don't care which interface is used.
# We could store job IDs by group if we need to perform actions by group
# in the future.
# As of now we don't track IDs at all in this class.
return next(iter(self._intfs.values()))
return self._intfs[submission_group_name]
def am_i_manager(self):
"""Return True if the current node is the manager node.
Returns
-------
bool
"""
intf = self._get_interface()
return intf.am_i_manager()
def cancel_job(self, job_id):
"""Cancel job.
Parameters
----------
job_id : str
Returns
-------
int
return code
"""
intf = self._get_interface()
ret = intf.cancel_job(job_id)
if ret == 0:
logger.info("Successfully cancelled job ID %s", job_id)
else:
logger.info("Failed to cancel job ID %s", job_id)
return ret
def check_status(self, name=None, job_id=None):
"""Return the status of a job by name or ID.
Parameters
----------
name : str
job name
job_id : str
job ID
Returns
-------
HpcJobStatus
"""
if (name is None and job_id is None) or (name is not None and job_id is not None):
raise InvalidParameter("exactly one of name / job_id must be set")
intf = self._get_interface()
info = intf.check_status(name=name, job_id=job_id)
logger.debug("info=%s", info)
return info.status
def check_statuses(self):
"""Check the statuses of all user jobs.
Returns
-------
dict
key is job_id, value is HpcJobStatus
"""
intf = self._get_interface()
return intf.check_statuses()
def get_hpc_config(self, submission_group_name):
"""Returns the HPC config parameters.
Parameters
----------
submission_group_name : str
Returns
-------
dict
config parameters
"""
return self._get_interface(submission_group_name=submission_group_name)
@property
def hpc_type(self):
"""Return the type of HPC management system.
Returns
-------
HpcType
"""
return self._hpc_type
def list_active_nodes(self, job_id):
"""Return the nodes currently participating in the job.
Parameters
----------
job_id : str
Returns
-------
list
list of node hostnames
"""
intf = self._get_interface()
return intf.list_active_nodes(job_id)
def submit(
self,
directory,
name,
script,
submission_group_name,
wait=False,
keep_submission_script=True,
dry_run=False,
):
"""Submits scripts to the queue for execution.
Parameters
----------
directory : str
directory to contain the submission script
name : str
job name
script : str
Script to execute.
submission_group_name : str
wait : bool
Wait for execution to complete.
keep_submission_script : bool
Do not delete the submission script.
dry_run : bool
Do not actually submit jobs. Just create the files.
Returns
-------
tuple
(job_id, submission status)
"""
intf = self._get_interface(submission_group_name)
intf.check_storage_configuration()
# TODO: enable this logic if batches have unique names.
# info = intf.check_status(name=name)
# if info.status in (HpcJobStatus.QUEUED, HpcJobStatus.RUNNING):
# raise JobAlreadyInProgress(
# "Not submitting job '{}' because it is already active: "
# f"{info}"
# )
filename = os.path.join(directory, name + ".sh")
intf.create_submission_script(name, script, filename, self._output)
logger.info("Created submission script %s", filename)
if dry_run:
logger.info("Dry run mode enabled. Return without submitting.")
return 0, Status.GOOD
result, job_id, err = intf.submit(filename)
if result == Status.GOOD:
logger.info("job '%s' with ID=%s submitted successfully", name, job_id)
if not keep_submission_script:
os.remove(filename)
else:
logger.error("Failed to submit job '%s': result=%s: %s", name, result, err)
if wait:
self._wait_for_completion(job_id)
return job_id, result
@staticmethod
def create_hpc_interface(config):
"""Returns an HPC implementation instance appropriate for the current
environment.
"""
if config.hpc_type == HpcType.SLURM:
intf = SlurmManager(config)
elif config.hpc_type == HpcType.FAKE:
intf = FakeManager(config)
elif config.hpc_type == HpcType.LOCAL:
intf = LocalManager(config)
else:
raise ValueError("Unsupported HPC type: {}".format(config.hpc_type))
logger.debug("HPC manager type=%s", config.hpc_type)
return intf
def _wait_for_completion(self, job_id):
status = HpcJobStatus.UNKNOWN
intf = self._get_interface()
while status not in (HpcJobStatus.COMPLETE, HpcJobStatus.NONE):
time.sleep(5)
job_info = intf.check_status(job_id=job_id)
logger.debug("job_info=%s", job_info)
if job_info.status != status:
logger.info("Status of job ID %s changed to %s", job_id, job_info.status)
status = job_info.status
logger.info("Job ID %s is complete", job_id)
```
#### File: jade/hpc/pbs_manager.py
```python
import logging
from jade.enums import Status
from jade.hpc.common import HpcJobStatus
from jade.hpc.hpc_manager_interface import HpcManagerInterface
from jade.utils.subprocess_manager import run_command
from jade.utils import utils
logger = logging.getLogger(__name__)
PBS_SCRIPT = """#!/bin/bash
#PBS -N {name} # job name
#PBS -A {alloc} # allocation account
#PBS -l {qos}
#PBS -q {queue} # queue (debug, short, batch, or long)
#PBS -o {path}/{name}_$PBS_JOBID.o
#PBS -e {path}/{name}_$PBS_JOBID.e
{qos_str}
{feature}
echo Running on: $HOSTNAME, Machine Type: $MACHTYPE
{script}
wait
"""
class PbsManager(HpcManagerInterface):
"""Manages PBS jobs."""
_STATUSES = {
"Q": HpcJobStatus.QUEUED,
"R": HpcJobStatus.RUNNING,
"C": HpcJobStatus.COMPLETE,
}
def __init__(self, config):
self._config = config
def am_i_manager(self):
assert False
def cancel_job(self, job_id):
return 0
def check_status(self, name=None, job_id=None):
qstat_rows = self._qstat()
if qstat_rows is None:
return HpcJobStatus.NONE
# TODO job_id
return self._get_status_from_output(qstat_rows, name)
def check_statuses(self):
assert False
def check_storage_configuration(self):
pass
def create_cluster(self):
pass
def create_local_cluster(self):
pass
def get_config(self):
return self._config
def get_current_job_id(self):
assert False
def get_local_scratch(self):
return "."
@staticmethod
def get_num_cpus():
return 18
@staticmethod
def _get_status_from_output(qstat_rows, name):
# column location of various job identifiers
col_loc = {"id": 0, "name": 3}
# reverse the list so most recent jobs are first
qstat_rows.reverse()
# update job status from qstat list
status = HpcJobStatus.UNKNOWN
for row in qstat_rows:
row = row.split()
# make sure the row is long enough to be a job status listing
# TODO regex?
if len(row) > 10:
if row[col_loc["name"]].strip() == name.strip():
# Job status is located at the -2 index
status = PbsManager._STATUSES.get(row[-2], HpcJobStatus.UNKNOWN)
if status is HpcJobStatus.UNKNOWN:
logger.error("Unknown PBS job status: %s", row[-2])
break
return status
def _qstat(self):
"""Run the PBS qstat command and return the stdout split to rows.
Returns
-------
qstat_rows : list | None
List of strings where each string is a row in the qstat printout.
Returns None if qstat is empty.
"""
cmd = "qstat -u {user}".format(user=self.USER)
output = {}
run_command(cmd, output)
if not output["stdout"]:
# No jobs are currently running.
return None
qstat_rows = output["stdout"].split("\n")
return qstat_rows
def create_submission_script(self, name, script, filename, path="."):
feature = self._config.get("feature")
if feature is None:
feature = ""
else:
feature = "#PBS -l feature={}".format("feature")
script = PBS_SCRIPT.format(
name=name,
alloc=self._config["allocation"],
qos=self._config["qos"],
queue=self._config["queue"],
path=path,
feature=feature,
script=script,
)
utils.create_script(filename, script)
def list_active_nodes(self, job_id):
assert False
def log_environment_variables(self):
pass
def submit(self, filename):
output = {}
ret = run_command("qsub {}".format(filename), output)
if ret == 0:
result = Status.GOOD
job_id = output["stdout"]
else:
result = Status.ERROR
job_id = None
return result, job_id, output["stderr"]
```
#### File: jade/jobs/analysis_execution_base.py
```python
import os
from jade.common import ANALYSIS_DIR
from jade.jobs.job_execution_interface import JobExecutionInterface
class AnalysisExecutionBase(JobExecutionInterface):
"""Base class for analysis jobs. This job type is intended to run on the
output results of another job."""
def __init__(self, output_dir, simulations_dir):
self._analysis_dir = self.get_analysis_dir(output_dir)
self._simulations_dir = simulations_dir
os.makedirs(self._analysis_dir, exist_ok=True)
@staticmethod
def get_analysis_dir(output_dir):
"""Get the analysis directory to use.
Parameters
----------
output_dir : str
Returns
-------
str
"""
return os.path.join(output_dir, ANALYSIS_DIR)
@property
def results_directory(self):
"""Return the results directory created by the simulation."""
return self._analysis_dir
def list_results_files(self):
"""Return a list of result filenames created by the simulation."""
return [os.path.join(self._analysis_dir, x) for x in os.listdir(self._analysis_dir)]
def post_process(self, **kwargs):
pass
```
#### File: jade/jobs/async_cli_command.py
```python
import logging
import os
import shlex
import subprocess
import sys
import time
from pathlib import Path
from jade.common import JOBS_OUTPUT_DIR, RESULTS_DIR
from jade.enums import JobCompletionStatus, Status
from jade.events import StructuredLogEvent, EVENT_NAME_BYTES_CONSUMED, EVENT_CATEGORY_RESOURCE_UTIL
from jade.jobs.async_job_interface import AsyncJobInterface
from jade.jobs.results_aggregator import ResultsAggregator
from jade.loggers import log_event
from jade.result import Result
from jade.utils.utils import get_directory_size_bytes
logger = logging.getLogger(__name__)
class AsyncCliCommand(AsyncJobInterface):
"""Defines a a CLI command that can be submitted asynchronously."""
def __init__(self, job, cmd, output, batch_id):
self._job = job
self._cli_cmd = cmd
self._output = Path(output)
self._pipe = None
self._is_pending = False
self._start_time = None
self._return_code = None
self._is_complete = False
self._batch_id = batch_id
def __del__(self):
if self._is_pending:
logger.warning("job %s destructed while pending", self._cli_cmd)
def _complete(self):
self._return_code = self._pipe.returncode
exec_time_s = time.time() - self._start_time
status = JobCompletionStatus.FINISHED
output_dir = self._output / JOBS_OUTPUT_DIR / self._job.name
bytes_consumed = get_directory_size_bytes(output_dir)
event = StructuredLogEvent(
source=self._job.name,
category=EVENT_CATEGORY_RESOURCE_UTIL,
name=EVENT_NAME_BYTES_CONSUMED,
message="job output directory size",
bytes_consumed=bytes_consumed,
)
log_event(event)
result = Result(self._job.name, self._return_code, status, exec_time_s)
ResultsAggregator.append(self._output, result, batch_id=self._batch_id)
logger.info(
"Job %s completed return_code=%s exec_time_s=%s",
self._job.name,
self._return_code,
exec_time_s,
)
def cancel(self):
self._return_code = 1
self._is_complete = True
result = Result(self._job.name, self._return_code, JobCompletionStatus.CANCELED, 0.0)
ResultsAggregator.append(self._output, result, batch_id=self._batch_id)
logger.info("Canceled job %s", self._job.name)
@property
def cancel_on_blocking_job_failure(self):
return self._job.cancel_on_blocking_job_failure
def set_blocking_jobs(self, jobs):
self._job.set_blocking_jobs(jobs)
def is_complete(self):
if self._is_complete:
return True
if not self._is_pending:
ret = self._pipe.poll()
assert ret is None, f"{ret}"
return True
if self._pipe.poll() is not None:
self._is_pending = False
self._complete()
return not self._is_pending
@property
def job(self):
"""Get the job.
Parameters
----------
job : JobParametersInterface
"""
return self._job
@property
def name(self):
return self._job.name
def get_blocking_jobs(self):
return self._job.get_blocking_jobs()
def remove_blocking_job(self, name):
self._job.remove_blocking_job(name)
@property
def return_code(self):
return self._return_code
def run(self):
"""Run the job. Writes results to file when complete."""
assert self._pipe is None
self._start_time = time.time()
# Disable posix if on Windows.
cmd = shlex.split(self._cli_cmd, posix="win" not in sys.platform)
self._pipe = subprocess.Popen(cmd)
self._is_pending = True
logger.debug("Submitted %s", self._cli_cmd)
return Status.GOOD
```
#### File: jade/jobs/job_container_by_key.py
```python
from collections import OrderedDict
import logging
from jade.exceptions import InvalidParameter
from jade.jobs.job_container_interface import JobContainerInterface
from jade.utils.utils import check_filename
logger = logging.getLogger(__name__)
class JobContainerByKey(JobContainerInterface):
"""Stores jobs by key which is a namedtuple."""
def __init__(self):
# collections.namedtuple: JobParametersInterface
self._jobs = OrderedDict()
def __iter__(self):
for job in self._jobs.values():
yield job
def __len__(self):
return len(self._jobs)
@staticmethod
def _get_key(job=None, key=None):
if key is None and job is None:
raise InvalidParameter("either key or job must be passed")
if key is not None and job is not None:
raise InvalidParameter("only one of key and job can be " "passed")
if key is None:
key = job.name
return key
def add_job(self, job, key=None):
if key is None:
key = job.name
if key in self._jobs:
raise InvalidParameter(f"key={key} is already stored")
check_filename(key)
self._jobs[key] = job
logger.debug("Added job %s", key)
def clear(self):
self._jobs.clear()
logger.debug("Cleared all jobs.")
def remove_job(self, job=None, key=None):
key = self._get_key(job=job, key=key)
self._jobs.pop(key)
logger.info("Removed job with key=%s", key)
def get_job(self, name):
for job in self:
if job.name == name:
return job
raise InvalidParameter(f"job {name} not found")
def get_job_by_key(self, key):
job = self._jobs.get(key)
if job is None:
raise InvalidParameter(f"job key={key} not found")
return job
def get_jobs(self, sort=False):
if sort:
keys = list(self._jobs.keys())
keys.sort()
return [self._jobs[x] for x in keys]
return list(self)
def shuffle(self):
keys = list(self._jobs.keys())
random.shuffle(keys)
new_jobs = {}
for key in keys:
new_jobs[key] = self._jobs.pop(key)
self._jobs = new_jobs
```
#### File: jade/jobs/job_container_interface.py
```python
import abc
class JobContainerInterface(abc.ABC):
"""Defines interface for job containers."""
@abc.abstractmethod
def __iter__(self):
pass
@abc.abstractmethod
def __len__(self):
pass
@abc.abstractmethod
def add_job(self, job):
"""Add a job to the configuration.
Parameters
----------
job : JobParametersInterface
"""
@abc.abstractmethod
def clear(self):
"""Clear all configured jobs."""
@abc.abstractmethod
def get_job(self, name):
"""Return the job matching name.
Returns
-------
namedtuple
"""
@abc.abstractmethod
def get_jobs(self, sort=False):
"""Return all jobs.
Parameters
----------
sort : bool
Returns
-------
list
"""
def list_jobs(self):
"""Return a list of all jobs.
Returns
------
list
list of JobParametersInterface
"""
return list(iter(self))
@abc.abstractmethod
def remove_job(self, job):
"""Remove a job from the configuration.
Parameters
----------
job : JobParametersInterface
"""
@abc.abstractmethod
def shuffle(self):
"""Shuffle the order of the jobs."""
```
#### File: jade/jobs/job_inputs_interface.py
```python
import abc
class JobInputsInterface(abc.ABC):
"""Interface definition for configuration inputs."""
@property
def base_directory(self):
"""Return the base directory."""
@abc.abstractmethod
def get_available_parameters(self):
"""Return a dictionary containing all available parameters."""
```
#### File: jade/jobs/results_aggregator.py
```python
import csv
import glob
import logging
import os
import time
from pathlib import Path
from filelock import SoftFileLock, Timeout
from jade.common import RESULTS_DIR
from jade.result import Result, deserialize_result, serialize_result
LOCK_TIMEOUT = 300
PROCESSED_RESULTS_FILENAME = "processed_results.csv"
logger = logging.getLogger(__name__)
class ResultsAggregator:
"""Synchronizes updates to the results file.
One instance is used to aggregate results from all compute nodes.
One instance is used for each compute node.
"""
def __init__(self, filename, timeout=LOCK_TIMEOUT, delimiter=","):
"""
Constructs ResultsAggregator.
Parameters
----------
filename : Path
Results file.
timeout : int
Lock acquistion timeout in seconds.
delimiter : str
Delimiter to use for CSV formatting.
"""
self._filename = filename
self._lock_file = self._filename.parent / (self._filename.name + ".lock")
self._timeout = timeout
self._delimiter = delimiter
self._is_node = "batch" in filename.name
@classmethod
def create(cls, output_dir, **kwargs):
"""Create a new instance.
Parameters
----------
output_dir : str
Returns
-------
ResultsAggregator
"""
agg = cls(Path(output_dir) / PROCESSED_RESULTS_FILENAME, **kwargs)
agg.create_files()
return agg
@classmethod
def load(cls, output_dir, **kwargs):
"""Load an instance from an output directory.
Parameters
----------
output_dir : str
Returns
-------
ResultsAggregator
"""
return cls(Path(output_dir) / PROCESSED_RESULTS_FILENAME, **kwargs)
@classmethod
def load_node_results(cls, output_dir, batch_id, **kwargs):
"""Load a per-node instance from an output directory.
Parameters
----------
output_dir : str
batch_id : int
Returns
-------
ResultsAggregator
"""
return cls(Path(output_dir) / RESULTS_DIR / f"results_batch_{batch_id}.csv", **kwargs)
@classmethod
def load_node_results_file(cls, path, **kwargs):
"""Load a per-node instance from an output directory.
Parameters
----------
path : Path
Returns
-------
ResultsAggregator
"""
return cls(path, **kwargs)
@staticmethod
def _get_fields():
return Result._fields
def _do_action_under_lock(self, func, *args, **kwargs):
# Using this instead of FileLock because it will be used across nodes
# on the Lustre filesystem.
lock = SoftFileLock(self._lock_file, timeout=self._timeout)
start = time.time()
try:
lock.acquire(timeout=self._timeout)
except Timeout:
# Picked a default value such that this should not trip. If it does
# trip under normal circumstances then we need to reconsider this.
logger.error(
"Failed to acquire file lock %s within %s seconds", self._lock_file, self._timeout
)
raise
duration = time.time() - start
if duration > 10:
logger.warning("Acquiring ResultsAggregator lock took too long: %s", duration)
try:
return func(*args, **kwargs)
finally:
lock.release()
def create_files(self):
"""Initialize the results file. Should only be called by the parent
process.
"""
self._do_action_under_lock(self._create_files)
def _create_files(self):
with open(self._filename, "w") as f_out:
f_out.write(self._delimiter.join(self._get_fields()))
f_out.write("\n")
@classmethod
def append(cls, output_dir, result, batch_id=None):
"""Append a result to the file.
output_dir : str
result : Result
batch_id : int
"""
if batch_id is None:
aggregator = cls.load(output_dir)
else:
aggregator = cls.load_node_results(output_dir, batch_id)
aggregator.append_result(result)
def append_result(self, result):
"""Append a result to the file.
result : Result
"""
start = time.time()
text = self._delimiter.join([str(getattr(result, x)) for x in self._get_fields()])
self._do_action_under_lock(self._append_result, text)
duration = time.time() - start
if duration > 10:
logger.warning("Appending a result took too long: %s", duration)
def _append_result(self, text):
with open(self._filename, "a") as f_out:
if f_out.tell() == 0:
f_out.write(self._delimiter.join(self._get_fields()))
f_out.write("\n")
f_out.write(text)
f_out.write("\n")
def _append_processed_results(self, results):
assert not self._is_node
with open(self._filename, "a") as f_out:
for result in results:
text = self._delimiter.join([str(getattr(result, x)) for x in self._get_fields()])
f_out.write(text)
f_out.write("\n")
def clear_results_for_resubmission(self, jobs_to_resubmit):
"""Remove jobs that will be resubmitted from the results file.
Parameters
----------
jobs_to_resubmit : set
Job names that will be resubmitted.
"""
results = [x for x in self.get_results() if x.name not in jobs_to_resubmit]
self._write_results(results)
logger.info("Cleared %s results from %s", len(results), self._filename)
def clear_unsuccessful_results(self):
"""Remove failed and canceled results from the results file."""
results = [x for x in self.get_results() if x.return_code == 0]
self._write_results(results)
logger.info("Cleared failed results from %s", self._filename)
def _write_results(self, results):
_results = [serialize_result(x) for x in results]
with open(self._filename, "w") as f_out:
writer = csv.DictWriter(f_out, fieldnames=_results[0].keys())
writer.writeheader()
if results:
writer.writerows(_results)
def get_results(self):
"""Return the current results.
Returns
-------
list
list of Result objects
"""
return self._do_action_under_lock(self._get_all_results)
def get_results_unsafe(self):
"""Return the results. It is up to the caller to ensure that
a lock is not needed.
Returns
-------
list
list of Result objects
"""
return self._get_results()
def _get_all_results(self):
unprocessed_results = list((self._filename / RESULTS_DIR).glob("results*.csv"))
if unprocessed_results:
logger.error("Found unprocessed results: %s", unprocessed_results)
# TODO: Older code included unprocessed results here. Not sure why.
return self._get_results()
def _get_results(self):
with open(self._filename) as f_in:
results = []
reader = csv.DictReader(f_in, delimiter=self._delimiter)
for row in reader:
row["return_code"] = int(row["return_code"])
row["exec_time_s"] = float(row["exec_time_s"])
row["completion_time"] = float(row["completion_time"])
result = deserialize_result(row)
results.append(result)
return results
def move_results(self, func):
"""Move the results to a new location and delete the file.
Parameters
----------
func : function
Returns
-------
list
list of Result
"""
return self._do_action_under_lock(self._move_results, func)
def _move_results(self, func):
results = self._get_results()
func(results)
os.remove(self._filename)
return results
@classmethod
def list_results(cls, output_dir, **kwargs):
"""Return the current results.
Parameters
----------
output_dir : str
Returns
-------
list
list of Result objects
"""
results = cls.load(output_dir, **kwargs)
return results.get_results()
def process_results(self):
"""Move all temp results into the consolidated file, then clear the file.
Returns
-------
list
list of Result objects that are newly completed
"""
assert not self._is_node
return self._do_action_under_lock(self._process_results)
def _get_node_results_files(self):
assert not self._is_node
return list((self._filename.parent / RESULTS_DIR).glob("results_batch_*.csv"))
def _process_results(self):
results = []
for path in self._get_node_results_files():
agg = ResultsAggregator.load_node_results_file(path)
results += agg.move_results(self._append_processed_results)
return results
```
#### File: jade/models/hpc.py
```python
import enum
from typing import Optional, Union, List
from pydantic import Field, validator, root_validator
from jade.hpc.common import HpcType
from jade.models.base import JadeBaseModel
class SlurmConfig(JadeBaseModel):
"""Defines config options for the SLURM queueing system."""
account: str = Field(
title="account",
description="Project account to use",
)
partition: Optional[str] = Field(
title="partition",
description="HPC partition on which to submit",
default=None,
)
qos: Optional[str] = Field(
title="qos",
description="Set to high to get faster node allocations at twice the cost",
default=None,
)
walltime: Optional[str] = Field(
title="walltime",
description="Maximum time allocated to each node",
default="4:00:00",
)
mem: Optional[str] = Field(
title="mem",
description="Request nodes that have at least this amount of memory",
default=None,
)
tmp: Optional[str] = Field(
title="tmp",
description="Request nodes that have at least this amount of storage scratch space",
default=None,
)
nodes: Optional[int] = Field(
title="nodes",
description="Number of nodes to use for each job",
default=None,
)
ntasks: Optional[int] = Field(
title="ntasks",
description="Number of tasks per job (nodes is not required if this is provided)",
default=None,
)
ntasks_per_node: Optional[int] = Field(
title="ntasks_per_node",
description="Number of tasks per job (max in number of CPUs)",
default=None,
)
@root_validator(pre=True)
def handle_allocation(cls, values: dict) -> dict:
if "allocation" in values:
values["account"] = values.pop("allocation")
return values
@root_validator
def handle_nodes_and_tasks(cls, values: dict) -> dict:
if (
values["nodes"] is None
and values["ntasks"] is None
and values["ntasks_per_node"] is None
):
values["nodes"] = 1
return values
class FakeHpcConfig(JadeBaseModel):
"""Defines config options for the fake queueing system."""
# Keep this required so that Pydantic can differentiate the models.
walltime: str = Field(
title="walltime",
description="Maximum time allocated to each node",
)
class LocalHpcConfig(JadeBaseModel):
"""Defines config options when there is no HPC."""
class HpcConfig(JadeBaseModel):
"""Defines config options for the HPC."""
hpc_type: HpcType = Field(
title="hpc_type",
description="Type of HPC queueing system (such as 'slurm')",
)
job_prefix: Optional[str] = Field(
title="job_prefix",
description="Prefix added to each HPC job name",
default="job",
)
hpc: Union[SlurmConfig, FakeHpcConfig, LocalHpcConfig] = Field(
title="hpc",
description="Interface-specific config options",
)
@validator("hpc", pre=True)
def assign_hpc(cls, value, values):
if isinstance(value, JadeBaseModel):
return value
if values["hpc_type"] == HpcType.SLURM:
return SlurmConfig(**value)
elif values["hpc_type"] == HpcType.FAKE:
return FakeHpcConfig(**value)
elif values["hpc_type"] == HpcType.LOCAL:
return LocalHpcConfig()
raise ValueError(f"Unsupported: {values['hpc_type']}")
```
#### File: jade/models/__init__.py
```python
from pydantic import BaseModel
from jade.models.base import JadeBaseModel
from jade.models.hpc import HpcConfig, SlurmConfig, FakeHpcConfig, LocalHpcConfig
from jade.models.singularity import SingularityParams
from jade.models.submitter_params import SubmitterParams
from jade.models.submission_group import SubmissionGroup
from jade.models.jobs import Job, JobState, JobStatus
from jade.models.cluster_config import ClusterConfig
from jade.models.pipeline import PipelineConfig, PipelineStage
def get_model_defaults(model_class: BaseModel):
"""Return the default values for fields in a Pydantic BaseModel.
If a field doesn't have a default then return None.
Default values may also be None.
Returns
-------
dict
"""
return {x: y.get("default") for x, y in model_class.schema()["properties"].items()}
```
#### File: jade/utils/custom_click_options.py
```python
import click
class CustomOptions(click.Option):
"""Custom option class extending base click option"""
def __init__(self, *args, **kwargs):
if "allowed_values" in kwargs:
self.allowed_values = kwargs.pop("allowed_values")
if "not_required_if" in kwargs:
self.not_required_if = kwargs.pop("not_required_if")
if "required_if" in kwargs:
self.required_if = kwargs.pop("required_if")
super(CustomOptions, self).__init__(*args, **kwargs)
def handle_parse_result(self, ctx, opts, args):
self.handle_custom_options(opts)
return super(CustomOptions, self).handle_parse_result(ctx, opts, args)
def handle_custom_options(self, opts):
"""Handles custom options that have been created"""
if hasattr(self, "allowed_values") and isinstance(self.allowed_values, list):
if self.name in opts and opts[self.name] not in self.allowed_values:
raise ValueError(
"Invalid value given, only allowed values are " + f"{self.allowed_values}"
)
if hasattr(self, "not_required_if"):
not_required_if_present = self.not_required_if in opts.keys()
if not_required_if_present:
self.required = False
self.prompt = None
if hasattr(self, "required_if"):
required_if_present = self.required_if in opts.keys()
if required_if_present and opts[self.required_if] is not None:
self.required = True
```
#### File: jgu2/jade/setup.py
```python
import os
import logging
from codecs import open
from pathlib import Path
from setuptools import setup, find_packages
logger = logging.getLogger(__name__)
def read_lines(filename):
return Path(filename).read_text().splitlines()
here = os.path.abspath(os.path.dirname(__file__))
with open("README.md", encoding="utf-8") as f:
readme = f.read()
with open(os.path.join(here, "jade", "version.py"), encoding="utf-8") as f:
version = f.read()
version = version.split()[2].strip('"').strip("'")
# TODO: Current code breaks with statsmodels 0.13.
demo_requires = ["matplotlib", "statsmodels==0.12"]
dataframe_utils_requires = ["tables", "pyarrow"]
dev_requires = read_lines("dev-requirements.txt") + demo_requires + dataframe_utils_requires
setup(
name="NREL-jade",
version=version,
description="Provides HPC workflow automation services",
long_description=readme,
long_description_content_type="text/markdown",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
url="https://github.com./NREL/jade",
packages=find_packages(),
package_dir={"jade": "jade"},
entry_points={
"console_scripts": [
"jade=jade.cli.jade:cli",
"jade-internal=jade.cli.jade_internal:cli",
],
},
include_package_data=True,
license="BSD license",
zip_safe=False,
keywords=["jade", "hpc", "workflow"],
python_requires=">=3.7",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python :: 3.7",
],
test_suite="tests",
extras_require={
"dev": dev_requires,
"demo": demo_requires,
"dataframe_utils": dataframe_utils_requires,
},
install_requires=read_lines("requirements.txt"),
)
```
#### File: integration/cli/test_stats.py
```python
import os
import shutil
from jade.utils.subprocess_manager import run_command
def test_stats__plot(example_output):
path = os.path.join(example_output, "stats")
try:
ret = run_command(f"jade stats plot -o {example_output}")
assert ret == 0
for stat in ("Cpu", "Disk", "Memory", "Network"):
filename = os.path.join(path, stat + "StatsViewer__resource_monitor_batch_0.html")
assert os.path.exists(filename)
finally:
if os.path.exists(path):
shutil.rmtree(path)
def test_stats__show(example_output):
output = {}
ret = run_command(f"jade stats show -o {example_output}", output)
assert ret == 0
assert len(output["stdout"]) > 0
for text in ("Network stat", "Memory stat", "Disk stat", "CPU stat"):
assert text in output["stdout"]
def test_stats__bytes_consumed(example_output):
output = {}
ret = run_command(f"jade stats bytes-consumed -o {example_output}", output)
assert ret == 0
assert len(output["stdout"]) > 0
ret = run_command(f"jade stats bytes-consumed --no-human-readable -o {example_output}", output)
assert ret == 0
bytes_consumed = int(output["stdout"].strip())
assert bytes_consumed > 0
def test_stats__exec_time(example_output):
output = {}
ret = run_command(f"jade stats exec-time -o {example_output}", output)
assert ret == 0
assert len(output["stdout"]) > 0
ret = run_command(f"jade stats exec-time --no-human-readable -o {example_output}", output)
assert ret == 0
exec_time = float(output["stdout"].strip())
assert exec_time > 0
```
#### File: tests/unit/test_events.py
```python
import os
from jade.events import (
StructuredLogEvent,
StructuredErrorLogEvent,
EventsSummary,
EVENT_NAME_UNHANDLED_ERROR,
)
def test_structured_event__create():
"""Test class initialization and methods"""
event = StructuredLogEvent(
source="job_1",
category="ParameterError",
name="test-error",
message="Something happens",
country="Canada",
foo="foo info",
bar="bar info",
)
assert "timestamp" in str(event)
assert "source" in str(event)
assert "foo" in event.data
assert "bar" in event.data
def test_structured_error_event__create():
"""Test class initialization and methods"""
try:
raise Exception("test")
except Exception:
event = StructuredErrorLogEvent(
source="job_1",
category="ParameterError",
name="test-error",
message="Something happens",
)
assert "timestamp" in str(event)
assert "error" in event.data
assert "filename" in event.data
assert "lineno" in event.data
def test_event_summary__show_events(test_data_dir, capsys):
"""Should print tabular events in terminal"""
event_dir = os.path.join(test_data_dir, "events", "job-outputs", "australia")
event_summary = EventsSummary(event_dir)
event_summary.show_events(EVENT_NAME_UNHANDLED_ERROR)
captured = capsys.readouterr()
assert "Exception" in captured.out
assert "australia" in captured.out
assert "united_states" not in captured.out
``` |
{
"source": "jguedez/charmcraft",
"score": 2
} |
#### File: charmcraft/charmcraft/linters.py
```python
import ast
import os
import pathlib
import shlex
from collections import namedtuple
from typing import List, Generator, Union
import yaml
from charmcraft import config
from charmcraft.metadata import parse_metadata_yaml
CheckType = namedtuple("CheckType", "attribute lint")(attribute="attribute", lint="lint")
# result information from each checker/linter
CheckResult = namedtuple("CheckResult", "name result url check_type text")
# generic constant for common results
UNKNOWN = "unknown"
IGNORED = "ignored"
WARNINGS = "warnings"
ERRORS = "errors"
FATAL = "fatal"
OK = "ok"
def check_dispatch_with_python_entrypoint(
basedir: pathlib.Path,
) -> Union[pathlib.Path, None]:
"""Verify if the charm has a dispatch file pointing to a Python entrypoint.
:returns: the entrypoint path if all succeeds, None otherwise.
"""
# get the entrypoint from the last useful dispatch line
dispatch = basedir / "dispatch"
entrypoint_str = ""
try:
with dispatch.open("rt", encoding="utf8") as fh:
last_line = None
for line in fh:
if line.strip():
last_line = line
if last_line:
entrypoint_str = shlex.split(last_line)[-1]
except (IOError, UnicodeDecodeError):
return
entrypoint = basedir / entrypoint_str
if entrypoint.suffix == ".py" and os.access(entrypoint, os.X_OK):
return entrypoint
class Language:
"""Check the language used to write the charm.
Currently only Python is detected, if the following checks are true:
- the charm has a text dispatch with a python call
- the charm has a `.py` entry point
- the entry point file is executable
"""
check_type = CheckType.attribute
name = "language"
url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--language"
text = "The charm is written with Python."
# different result constants
Result = namedtuple("Result", "python unknown")(python="python", unknown=UNKNOWN)
def run(self, basedir: pathlib.Path) -> str:
"""Run the proper verifications."""
python_entrypoint = check_dispatch_with_python_entrypoint(basedir)
return self.Result.unknown if python_entrypoint is None else self.Result.python
class Framework:
"""Check the framework the charm is based on.
Currently it detects if the Operator Framework is used, if...
- the language attribute is set to python
- the charm contains venv/ops
- the charm imports ops in the entry point.
...or the Reactive Framework is used, if the charm...
- has a metadata.yaml with "name" in it
- has a reactive/<name>.py file that imports "charms.reactive"
- has a file name that starts with "charms.reactive-" inside the "wheelhouse" directory
"""
check_type = CheckType.attribute
name = "framework"
url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--framework"
# different result constants
Result = namedtuple("Result", "operator reactive unknown")(
operator="operator", reactive="reactive", unknown=UNKNOWN
)
# different texts to be exposed as `text` (see the property below)
result_texts = {
Result.operator: "The charm is based on the Operator Framework.",
Result.reactive: "The charm is based on the Reactive Framework.",
Result.unknown: "The charm is not based on any known Framework.",
}
def __init__(self):
self.result = None
@property
def text(self):
"""Return a text in function of the result state."""
if self.result is None:
return None
return self.result_texts[self.result]
def _get_imports(self, filepath: pathlib.Path) -> Generator[List[str], None, None]:
"""Parse a Python filepath and yield its imports.
If the file does not exist or cannot be parsed, return empty. Otherwise
return the name for each imported module, split by possible dots.
"""
if not os.access(filepath, os.R_OK):
return
try:
parsed = ast.parse(filepath.read_bytes())
except SyntaxError:
return
for node in ast.walk(parsed):
if isinstance(node, ast.Import):
for name in node.names:
yield name.name.split(".")
elif isinstance(node, ast.ImportFrom):
yield node.module.split(".")
def _check_operator(self, basedir: pathlib.Path) -> bool:
"""Detect if the Operator Framework is used."""
python_entrypoint = check_dispatch_with_python_entrypoint(basedir)
if python_entrypoint is None:
return False
opsdir = basedir / "venv" / "ops"
if not opsdir.exists() or not opsdir.is_dir():
return False
for import_parts in self._get_imports(python_entrypoint):
if import_parts[0] == "ops":
return True
return False
def _check_reactive(self, basedir: pathlib.Path) -> bool:
"""Detect if the Reactive Framework is used."""
try:
metadata = parse_metadata_yaml(basedir)
except Exception:
# file not found, corrupted, or mandatory "name" not present
return False
wheelhouse_dir = basedir / "wheelhouse"
if not wheelhouse_dir.exists():
return False
if not any(f.name.startswith("charms.reactive-") for f in wheelhouse_dir.iterdir()):
return False
module_basename = metadata.name.replace("-", "_")
entrypoint = basedir / "reactive" / f"{module_basename}.py"
for import_parts in self._get_imports(entrypoint):
if import_parts[0] == "charms" and import_parts[1] == "reactive":
return True
return False
def run(self, basedir: pathlib.Path) -> str:
"""Run the proper verifications."""
if self._check_operator(basedir):
result = self.Result.operator
elif self._check_reactive(basedir):
result = self.Result.reactive
else:
result = self.Result.unknown
self.result = result
return result
class JujuMetadata:
"""Check that the metadata.yaml file exists and is sane.
The charm is considered to have a valid metadata if the following checks are true:
- the metadata.yaml is present
- it is a valid YAML file
- it has at least the following fields: name, summary, and description
"""
check_type = CheckType.lint
name = "metadata"
url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--metadata"
text = "Problems found with metadata.yaml file."
# different result constants
Result = namedtuple("Result", "ok errors")(ok=OK, errors=ERRORS)
def run(self, basedir: pathlib.Path) -> str:
"""Run the proper verifications."""
try:
metadata = parse_metadata_yaml(basedir)
except Exception:
# file not found, corrupted, or mandatory "name" not present
return self.Result.errors
# no need to verify "name" as it's mandatory in the metadata parsing
if metadata.summary and metadata.description:
result = self.Result.ok
else:
result = self.Result.errors
return result
class JujuActions:
"""Check that the actions.yaml file is valid YAML if it exists."""
check_type = CheckType.lint
name = "juju-actions"
url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--juju-actions"
text = "The actions.yaml file is not a valid YAML file."
# different result constants
Result = namedtuple("Result", "ok errors")(ok=OK, errors=ERRORS)
def run(self, basedir: pathlib.Path) -> str:
"""Run the proper verifications."""
filepath = basedir / "actions.yaml"
if not filepath.exists():
# it's optional
return self.Result.ok
try:
with filepath.open("rt", encoding="utf8") as fh:
yaml.safe_load(fh)
except Exception:
return self.Result.errors
return self.Result.ok
class JujuConfig:
"""Check that the config.yaml file (if it exists) is valid.
The file is considered valid if the following checks are true:
- has an 'options' key
- it is a dictionary
- each item inside has the mandatory 'type' key
"""
check_type = CheckType.lint
name = "juju-config"
url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--juju-config"
# different result constants
Result = namedtuple("Result", "ok errors")(ok=OK, errors=ERRORS)
def __init__(self):
self.text = None
def run(self, basedir: pathlib.Path) -> str:
"""Run the proper verifications."""
filepath = basedir / "config.yaml"
if not filepath.exists():
# it's optional
return self.Result.ok
try:
with filepath.open("rt", encoding="utf8") as fh:
content = yaml.safe_load(fh)
except Exception:
self.text = "The config.yaml file is not a valid YAML file."
return self.Result.errors
options = content.get("options")
if not isinstance(options, dict):
self.text = "Error in config.yaml: must have an 'options' dictionary."
return self.Result.errors
for value in options.values():
if "type" not in value:
self.text = "Error in config.yaml: items under 'options' must have a 'type' key."
return self.Result.errors
return self.Result.ok
# all checkers to run; the order here is important, as some checkers depend on the
# results from others
CHECKERS = [
Language,
JujuActions,
JujuConfig,
JujuMetadata,
Framework,
]
def analyze(
config: config.Config,
basedir: pathlib.Path,
*,
override_ignore_config: bool = False,
) -> List[CheckResult]:
"""Run all checkers and linters."""
all_results = []
for cls in CHECKERS:
# do not run the ignored ones
if cls.check_type == CheckType.attribute:
ignore_list = config.analysis.ignore.attributes
else:
ignore_list = config.analysis.ignore.linters
if cls.name in ignore_list and not override_ignore_config:
all_results.append(
CheckResult(
check_type=cls.check_type,
name=cls.name,
result=IGNORED,
url=cls.url,
text="",
)
)
continue
checker = cls()
try:
result = checker.run(basedir)
except Exception:
result = UNKNOWN if checker.check_type == CheckType.attribute else FATAL
all_results.append(
CheckResult(
check_type=checker.check_type,
name=checker.name,
url=checker.url,
text=checker.text,
result=result,
)
)
return all_results
``` |
{
"source": "J-Guenther/Climate_Chart",
"score": 3
} |
#### File: J-Guenther/Climate_Chart/climate_c.py
```python
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import matplotlib as mpl
mpl.rcParams['hatch.linewidth'] = 4 # previous svg hatch linewidth
from os.path import join
def MakeDataframe(temp, prec):
df = pd.read_csv(temp, sep=";", decimal=",")
dfprec = pd.read_csv(prec, sep=";", decimal=",")
df['precipitation'] = dfprec.mm
df.drop(df.columns[[0,1,6]], axis=1, inplace=True)
df = df.rename(columns={'Datum': 'date', 'tas':'tempMean','tasmin':'tempMin','tasmax': 'tempMax'})
return df
class ClimateChart:
def __init__(self, dataframe=None, climateChart=None):
if dataframe is not None and climateChart is None:
self.data = dataframe
self.data['date'] = pd.to_datetime(self.data['date'], format='%Y-%m-%d', errors='coerce')
self.data = self.data.set_index('date')
self.data['tempMean'] = pd.to_numeric(self.data['tempMean'])
self.data['tempMin'] = pd.to_numeric(self.data['tempMin'])
self.data['tempMax'] = pd.to_numeric(self.data['tempMax'])
self.temperature = self.data.groupby([(self.data.index.month)]).mean()
self.precipitation = self.data.precipitation.groupby([(self.data.index.year),(self.data.index.month)]).sum()
self.precipitation = self.precipitation.groupby(level=[1]).mean()
elif dataframe is None and climateChart is not None:
self.temperature = climateChart[0]
self.precipitation = climateChart[1]
else:
print("Error!")
def GetTemperature(self):
return self.temperature.copy()
def GetPrecipitation(self):
return self.precipitation.copy()
def Plot(self, title, subtitle, fontsize, filepath = None):
# Create Canvas
# fig = plt.figure(frameon=True)
# fig.set_size_inches(10,6)
# ax = plt.Axes(fig, [0,0,1,1])
# fig.add_axes(ax)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10,6), subplot_kw={'adjustable': 'box-forced'})
fig.dpi = 75
fig.suptitle(title , size=fontsize+4, weight='bold', horizontalalignment="center")
# Define x-axsis
labels = list(["J","F","M","A","M","J","J","A","S","O","N","D"])
linewidth = 4
# Plot Temperature
self.tempMean_scaled = [x if x < 50 else self.scale(x, (50, 100), (50,75)) for x in self.temperature.tempMean]
self.tempMin_scaled = [x if x < 50 else self.scale(x, (50, 100), (50,75)) for x in self.temperature.tempMin]
self.tempMax_scaled = [x if x < 50 else self.scale(x, (50, 100), (50,75)) for x in self.temperature.tempMax]
self.tempMean_scaled = np.array(self.tempMean_scaled)
ax.plot(self.tempMean_scaled, linewidth=linewidth, color="#fdae61", label="Mean Temp.")
ax.plot(self.tempMin_scaled, linewidth=linewidth, color='#1a9850', label="Min. Temp.")
ax.plot(self.tempMax_scaled, linewidth=linewidth, color='#b2182b', label="Max. Temp.")
# Set Temperature X-Axis
ax.set_xticks(np.arange(0, 12, 1))
ax.set_xticklabels(labels)
self.precipitation_scaled_for_fill = [self.scale(x, (0, 99), (0,49)) if x < 100 else self.scale(x, (100, 600), (50,75)) for x in self.precipitation]
#Cut off Data
self.precipitation_scaled_for_fill = [x if x < 75 else 75 for x in self.precipitation_scaled_for_fill]
self.precipitation_scaled_for_fill = np.array(self.precipitation_scaled_for_fill)
ax.fill_between(np.arange(0,12,1), self.tempMean_scaled, \
self.precipitation_scaled_for_fill, \
hatch = "|", edgecolor= "#FFFFFF", facecolor="#2166ac", linewidth=0, \
where= self.tempMean_scaled <= self.precipitation_scaled_for_fill, interpolate=True, label="Humid")
ax.fill_between(np.arange(0,12,1), self.tempMean_scaled, \
self.precipitation_scaled_for_fill, \
hatch = ".", edgecolor= "#e34a33", facecolor="#FFFFFF", linewidth=0, \
where= self.tempMean_scaled >= self.precipitation_scaled_for_fill, interpolate=True, label="Arid")
# Format Temperature Axis
ax.set_ylabel("Temperatur in °c", fontsize = fontsize)
ax.set_xlabel('Monate', fontsize = fontsize)
ax.set_yticks([-30, -20, -10, 0, 10, 20, 30, 40, 50, 55, 60, 65, 70 ,75])
ax.set_yticklabels(["-30","-20","-10","0","10","20","30","40","50","","","","",""])
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
ax.set_title(subtitle, y = 1, fontsize=fontsize)
# Make a new axis for Precipitation
ax2 = ax.twinx()
self.precipitation_scaled = [x if x < 100 else self.scale(x, (100, 600), (100,150)) for x in self.precipitation]
self.precipitation_scaled = [x if x < 150 else 150 for x in self.precipitation_scaled]
print(self.precipitation_scaled)
#ax2.bar(range(0,12,1), self.precipitation_scaled, color='#2166ac')
ax2.plot(self.precipitation_scaled, linewidth=linewidth, color="#2166ac", label="Precipitation")
# Set X-Axis
ax2.set_xticks(np.arange(0,12,1))
ax2.set_yticks([-60, -40, -20, 0,20,40,60,80,100,110,120,130,140,150])
ax2.set_yticklabels(["","","","0","20","40","60","80","100","200","300","400","500","600"])
# Set drawing order
ax.set_zorder(ax2.get_zorder()+1) # put ax in front of ax2
ax.patch.set_visible(False) # hide the 'canvas'
# Format second axis
ax2.set_ylabel('Niederschlag in mm', fontsize = fontsize)
ax2.tick_params(labelsize=fontsize)
self.align_yaxis(ax, 10, ax2, 20)
ax.set_xlim(0,11)
fig.legend(loc="center", mode="expand", ncol=6, bbox_to_anchor=(0.06, -0.13, .8,.3), shadow=False, frameon=False)
plt.show()
if filepath is not None:
fig.savefig(join(filepath, '.png'), bbox_inches=('tight'))
def scale(self, val, src, dst):
"""
Scale the given value from the scale of src to the scale of dst.
"""
return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]
def align_yaxis(self, ax1, v1, ax2, v2):
"""adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
inv = ax2.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
miny, maxy = ax2.get_ylim()
ax2.set_ylim(miny+dy, maxy+dy)
def __sub__(self, other):
newTemperature = self.temperature - other.GetTemperature()
newPrecipitation = self.precipitation - other.GetPrecipitation()
return ClimateChart(dataframe=None, climateChart=(newTemperature, newPrecipitation))
``` |
{
"source": "jguev/instant-insanity",
"score": 3
} |
#### File: commonmark/render/rst.py
```python
from __future__ import unicode_literals
from commonmark.render.renderer import Renderer
class ReStructuredTextRenderer(Renderer):
"""
Render reStructuredText from Markdown
Example:
.. code:: python
import commonmark
parser = commonmark.Parser()
ast = parser.parse('Hello `inline code` example')
renderer = commonmark.ReStructuredTextRenderer()
rst = renderer.render(ast)
print(rst) # Hello ``inline code`` example
"""
def __init__(self, indent_char=' '):
self.indent_char = indent_char
self.indent_length = 0
def lit(self, s):
if s == '\n':
indent = '' # Avoid whitespace if we're just adding a newline
elif self.last_out != '\n':
indent = '' # Don't indent if we're in the middle of a line
else:
indent = self.indent_char * self.indent_length
return super(ReStructuredTextRenderer, self).lit(indent + s)
def cr(self):
self.lit('\n')
def indent_lines(self, literal, indent_length=4):
indent = self.indent_char * indent_length
new_lines = []
for line in literal.splitlines():
new_lines.append(indent + line)
return '\n'.join(new_lines)
# Nodes
def document(self, node, entering):
pass
def softbreak(self, node, entering):
self.cr()
def linebreak(self, node, entering):
self.cr()
self.cr()
def text(self, node, entering):
self.out(node.literal)
def emph(self, node, entering):
self.out('*')
def strong(self, node, entering):
self.out('**')
def paragraph(self, node, entering):
if node.parent.t == 'item':
pass
else:
self.cr()
def link(self, node, entering):
if entering:
self.out('`')
else:
self.out(' <%s>`_' % node.destination)
def image(self, node, entering):
directive = '.. image:: ' + node.destination
if entering:
self.out(directive)
self.cr()
self.indent_length += 4
self.out(':alt: ')
else:
self.indent_length -= 4
def code(self, node, entering):
self.out('``')
self.out(node.literal)
self.out('``')
def code_block(self, node, entering):
directive = '.. code::'
language_name = None
info_words = node.info.split() if node.info else []
if len(info_words) > 0 and len(info_words[0]) > 0:
language_name = info_words[0]
if language_name:
directive += ' ' + language_name
self.cr()
self.out(directive)
self.cr()
self.cr()
self.out(self.indent_lines(node.literal))
self.cr()
def list(self, node, entering):
if entering:
self.cr()
def item(self, node, entering):
tagname = '*' if node.list_data['type'] == 'bullet' else '#.'
if entering:
self.out(tagname + ' ')
else:
self.cr()
def block_quote(self, node, entering):
if entering:
self.indent_length += 4
else:
self.indent_length -= 4
def heading(self, node, entering):
heading_chars = [
'#',
'*',
'=',
'-',
'^',
'"'
]
try:
heading_char = heading_chars[node.level-1]
except IndexError:
# Default to the last level if we're in too deep
heading_char = heading_chars[-1]
heading_length = len(node.first_child.literal)
banner = heading_char * heading_length
if entering:
self.cr()
else:
self.cr()
self.out(banner)
self.cr()
``` |
{
"source": "jguhh/Prostheoinf",
"score": 3
} |
#### File: Prostheoinf/localsearch/maximum_cut.py
```python
import networkx as nx
from localsearch.local_search import NeighborhoodRelation
from typing import FrozenSet, List, Tuple, Optional
from abc import ABC
from itertools import combinations
class MaximumCutRelation(NeighborhoodRelation, ABC):
def __init__(self, p1_symbol: str, p2_symbol: str) -> None:
super().__init__(nx.Graph)
self.p1_symbol = p1_symbol
self.p2_symbol = p2_symbol
def partition_symbols(self):
return self.p1_symbol, self.p2_symbol
class SingleFlipRelation(MaximumCutRelation):
"""
The Single-Flip-Relation from section 4.1. Generates new instances by moving
one vertex A->B or B->A with A != {} and B != {}.
NOTE: Implemented through KFlipRelation with k=1. It is redundant,
but left here for clarity and conformness with the paper of the project.
"""
def __init__(self, p1_symbol: str, p2_symbol: str) -> None:
super().__init__(p1_symbol, p2_symbol)
self.kflip = KFlipRelation(1, p1_symbol, p2_symbol)
def neighbors(self, instance: nx.Graph, exclude_marked: Optional[bool] = False) -> FrozenSet[nx.Graph]:
return self.kflip.neighbors(instance, exclude_marked)
class KFlipRelation(MaximumCutRelation):
"""
The K-Flip-Relation from section 4.2. Generates new instances by moving
at most K vertices A->B or B->A with A != {} and B != {}.
"""
def __init__(self, k: int, p1_symbol: str, p2_symbol: str) -> None:
super().__init__(p1_symbol, p2_symbol)
self.k = k
self.excluded = []
def neighbors(self, instance: nx.Graph, exclude_marked: bool = False) -> FrozenSet[nx.Graph]:
p1 = MaximumCut.split_partition(instance, self.p1_symbol, exclude_marked)
p2 = MaximumCut.split_partition(instance, self.p2_symbol, exclude_marked)
neighborhood = []
for k in range(self.k):
neighborhood.extend(self.flip(p1, p2, k + 1))
graph_neighborhood = []
for solution in neighborhood:
graph_neighborhood.append(self._partition_graph(instance, solution))
return graph_neighborhood
def flip(self, partition1: List[str], partition2: List[str], k: int) -> List[Tuple[List[str], List[str]]]:
flipped = []
flipped.extend(self.half_flip(partition1, partition2, k))
flipped.extend(self.half_flip(partition2, partition1, k, reverse=True))
return flipped
def half_flip(self,
delete: List[str],
append: List[str], k: int,
reverse: bool = False,) -> List[Optional[Tuple[List[str], List[str]]]]:
half_flipped = []
if len(delete) > k:
p1_powerset = combinations(delete, k)
for subset in p1_powerset:
new_part1 = [node for node in delete if node not in set(subset)]
new_part2 = append.copy()
new_part2.extend(list(subset))
if not reverse:
half_flipped.append((
new_part1,
new_part2
))
else:
half_flipped.append((
new_part2,
new_part1
))
return half_flipped
def _partition_graph(self, graph: nx.Graph, mapping) -> List[nx.Graph]:
partitioned_graph = graph.copy()
for node in partitioned_graph.nodes:
partitioned_graph.nodes[node]['partition'] = self.p1_symbol if node in mapping[0] else self.p2_symbol
return partitioned_graph
# %%
class KernighanLinHeuristic(MaximumCutRelation):
def __init__(self, p1_symbol: str, p2_symbol: str, cost_fn) -> None:
super().__init__(p1_symbol, p2_symbol)
self.single_flip = SingleFlipRelation(self.p1_symbol, self.p2_symbol)
self.cost_fn = cost_fn
def neighbors(self, instance: nx.Graph) -> FrozenSet[nx.Graph]:
for node in instance.nodes:
instance.nodes[node]['marked'] = False
neighborhood = []
current_instance = instance
while True:
sf_neighborhood = self.single_flip.neighbors(current_instance, exclude_marked=True)
max_val, max_sol = -1, None
for solution in sf_neighborhood:
val = self.cost_fn(solution, self.p1_symbol, self.p2_symbol)
if val > max_val:
max_val, max_sol = val, solution
neighborhood.append(max_sol)
current_instance = self._mark_differing_nodes(current_instance, max_sol)
marked_values = [current_instance.nodes[node]['marked'] for node in current_instance.nodes]
if sum(marked_values) == len(current_instance.nodes) - 2:
break
return neighborhood
def _mark_differing_nodes(self, original: nx.Graph, differing: nx.Graph):
for orig, diff in zip(
sorted(original.nodes),
sorted(differing.nodes)
):
if original.nodes[orig]['partition'] != differing.nodes[diff]['partition']:
differing.nodes[diff]['marked'] = True
return differing
#%%
class MaximumCut:
@staticmethod
def split_partition(graph: nx.Graph, pname: str, exclude_marked: bool) -> List[str]:
return [node for (node, data) in graph.nodes(data=True)\
if data['partition'] == pname and not (exclude_marked and data['marked'])]
@staticmethod
def cost(graph: nx.Graph, p1symbol: str, p2symbol: str) -> float:
p1 = MaximumCut.split_partition(graph, p1symbol, False)
p2 = MaximumCut.split_partition(graph, p2symbol, False)
weight_total = 0
for v1, v2, data in graph.edges(data=True):
if (v1 in p1 and v2 in p2) or \
(v1 in p2 and v2 in p1):
weight_total += data["weight"]
return float(weight_total)
``` |
{
"source": "jguhlin/funannotate",
"score": 2
} |
#### File: funannotate/funannotate/train.py
```python
import sys
import os
import subprocess
import shutil
import argparse
from Bio import SeqIO
import funannotate.library as lib
from natsort import natsorted
from funannotate.interlap import InterLap
from collections import defaultdict
from Bio.SeqIO.FastaIO import SimpleFastaParser
from Bio.SeqIO.QualityIO import FastqGeneralIterator
def runTrimmomaticPE(left, right, cpus=1):
'''
function is wrapper for Trinity trimmomatic
'''
# create tmpdir
folder = os.path.join(tmpdir, 'trimmomatic')
if not os.path.isdir(folder):
os.makedirs(folder)
lib.log.info("Adapter and Quality trimming PE reads with Trimmomatic")
left_paired = os.path.join(folder, 'trimmed_left.fastq')
left_single = os.path.join(folder, 'trimmed_left.unpaired.fastq')
right_paired = os.path.join(folder, 'trimmed_right.fastq')
right_single = os.path.join(folder, 'trimmed_right.unpaired.fastq')
cmd = ['trimmomatic', 'PE', '-threads', str(cpus), '-phred33',
left, right, left_paired, left_single, right_paired, right_single,
'ILLUMINACLIP:' +
os.path.join(parentdir, 'config', 'TruSeq3-PE.fa')+':2:30:10',
'SLIDINGWINDOW:4:5', 'LEADING:5', 'TRAILING:5', 'MINLEN:25']
lib.runSubprocess(cmd, '.', lib.log)
for x in [left_paired, left_single, right_paired, right_single]:
lib.Fzip_inplace(x, cpus)
trim_left = os.path.join(folder, 'trimmed_left.fastq.gz')
trim_right = os.path.join(folder, 'trimmed_right.fastq.gz')
return trim_left, trim_right
def runTrimmomaticSE(reads, cpus=1):
'''
function is wrapper for Trinity trimmomatic
'''
# create tmpdir
folder = os.path.join(tmpdir, 'trimmomatic')
if not os.path.isdir(folder):
os.makedirs(folder)
lib.log.info("Adapter and Quality trimming SE reads with Trimmomatic")
output = os.path.join(folder, 'trimmed_single.fastq')
cmd = ['trimmomatic', 'SE', '-threads', str(cpus), '-phred33',
reads, output, 'ILLUMINACLIP:' +
os.path.join(parentdir, 'config', 'TruSeq3-SE.fa')+':2:30:10',
'SLIDINGWINDOW:4:5', 'LEADING:5', 'TRAILING:5', 'MINLEN:25']
lib.runSubprocess(cmd, '.', lib.log)
lib.Fzip_inplace(output, cpus)
trim_single = os.path.join(folder, 'trimmed_single.fastq.gz')
return trim_single
def runNormalization(readTuple, memory, min_coverage=5, coverage=50, cpus=1, stranded='no'):
'''
function is wrapper for Trinity read normalization
have to run normalization separately for PE versus single
'''
left_norm, right_norm, single_norm = (None,)*3
SENormalLog = os.path.join(tmpdir, 'trinity_normalization.SE.log')
PENormalLog = os.path.join(tmpdir, 'trinity_normalization.PE.log')
lib.log.info("Running read normalization with Trinity")
if stranded != 'no':
cmd = [os.path.join(TRINITY, 'util', 'insilico_read_normalization.pl'), '--PARALLEL_STATS',
'--JM', memory, '--min_cov', str(
min_coverage), '--max_cov', str(coverage),
'--seqType', 'fq', '--output', os.path.join(
tmpdir, 'normalize'), '--CPU', str(cpus),
'--SS_lib_type', stranded]
else:
cmd = [os.path.join(TRINITY, 'util', 'insilico_read_normalization.pl'), '--PARALLEL_STATS',
'--JM', memory, '--min_cov', str(
min_coverage), '--max_cov', str(coverage),
'--seqType', 'fq', '--output', os.path.join(tmpdir, 'normalize'), '--CPU', str(cpus)]
if readTuple[2]: # single reads present, so run normalization just on those reads
cmd = cmd + ['--single', readTuple[2]]
lib.runSubprocess2(cmd, '.', lib.log, SENormalLog)
single_norm = os.path.join(tmpdir, 'normalize', 'single.norm.fq')
if readTuple[0] and readTuple[1]:
cmd = cmd + ['--pairs_together', '--left',
readTuple[0], '--right', readTuple[1]]
left_norm = os.path.join(tmpdir, 'normalize', 'left.norm.fq')
right_norm = os.path.join(tmpdir, 'normalize', 'right.norm.fq')
lib.runSubprocess2(cmd, '.', lib.log, PENormalLog)
return left_norm, right_norm, single_norm
def long2fasta(readTuple, cpus, tmpdir, combined, combinedClean):
'''
Run SeqClean on long reads, return cleaned tuple and combined output
tuple is (pb_iso, nano_cdna, nano_mrna)
'''
def _convert2fasta(file, output):
messy = []
with open(output, 'w') as outfile:
if file.endswith('.gz'):
newfile = file.replace('.gz', '')
messy.append(newfile)
lib.Funzip(file, newfile, cpus)
file = newfile
if file.endswith('.fa') or file.endswith('.fasta'):
with open(file, 'r') as infile:
for title, seq in SimpleFastaParser(infile):
if '/' in title:
title = title.replace('/', '_')
outfile.write('>{:}\n{:}\n'.format(title, lib.softwrap(seq)))
elif file.endswith('.fq') or file.endswith('.fastq'):
with open(file, 'r') as infile:
for title, seq, qual in FastqGeneralIterator(infile):
if '/' in title:
title = title.replace('/', '_')
outfile.write('>{:}\n{:}\n'.format(title, lib.softwrap(seq)))
# clean up
for x in messy:
lib.SafeRemove(x)
if os.path.islink(combined) or os.path.isfile(combined):
results = []
originals = []
for i in [PBiso+'.clean', nanocdna+'.clean', nanomrna+'.clean']:
if lib.checkannotations(i):
results.append(i)
originals.append(i.replace('.clean', ''))
else:
results.append(None)
originals.append(None)
return tuple(originals), tuple(results)
else:
lib.log.info(
'Processing long reads: converting to fasta and running SeqClean')
results = []
if readTuple[0] and not lib.checkannotations(PBiso+'.clean'):
_convert2fasta(readTuple[0], PBiso)
runSeqClean(PBiso, tmpdir, cpus=cpus)
if readTuple[1] and not lib.checkannotations(nanocdna+'.clean'):
_convert2fasta(readTuple[1], nanocdna)
runSeqClean(nanocdna, tmpdir, cpus=cpus)
if readTuple[2] and not lib.checkannotations(nanomrna+'.clean'):
_convert2fasta(readTuple[2], nanomrna)
runSeqClean(nanomrna, tmpdir, cpus=cpus)
for i in [PBiso+'.clean', nanocdna+'.clean', nanomrna+'.clean']:
if lib.checkannotations(i):
results.append(i)
else:
results.append(None)
validResults = [x for x in results if x is not None]
validOriginal = [x.replace('.clean', '') for x in validResults]
validCln = [x.replace('.clean', '.cln') for x in validResults]
ClnOut = combined+'.cln'
if len(validResults) > 1:
lib.catFiles(*validResults, output=combinedClean)
lib.catFiles(*validOriginal, output=combined)
lib.catFiles(*validCln, output=ClnOut)
else:
if not lib.checkannotations(combinedClean):
os.symlink(os.path.abspath(validResults[0]), combinedClean)
if not lib.checkannotations(combined):
os.symlink(os.path.abspath(validOriginal[0]), combined)
if not lib.checkannotations(ClnOut):
os.symlink(os.path.abspath(validCln[0]), ClnOut)
return tuple(validOriginal), tuple(results)
def runSeqClean(input, folder, cpus=1):
'''
wrapper to run PASA seqclean on Trinity transcripts
'''
if cpus > 16:
cpus = 16
if os.path.isfile(input + ".clean"):
lib.log.info('Existing SeqClean output found: {:}'.format(
os.path.join(folder, input + ".clean")))
else:
cmd = [os.path.join(PASA, 'bin', 'seqclean'),
os.path.basename(input), '-c', str(cpus)]
lib.runSubprocess(cmd, folder, lib.log)
for f in os.listdir(folder):
if os.path.isdir(os.path.join(folder, f)):
if f.startswith('cleaning'):
lib.SafeRemove(os.path.join(folder, f))
def bam2fasta(input, output, cpus=1):
cmd = ['samtools', 'fasta', '-@', str(cpus), '-F', '0x4', input]
lib.runSubprocess2(cmd, '.', lib.log, output)
def bam2fasta_unmapped(input, output, cpus=1):
cmd = ['samtools', 'fasta', '-@', str(cpus), '-f', '0x4', input]
lib.runSubprocess2(cmd, '.', lib.log, output)
def mapTranscripts(genome, longTuple, assembled, tmpdir, trinityBAM, allBAM, cpus=1, max_intronlen=3000):
'''
function will map long reads and trinity to genome, return sorted BAM
'''
isoBAM = os.path.join(tmpdir, 'isoseq.coordSorted.bam')
isoSeqs = os.path.join(tmpdir, 'isoseq.coordSorted.fasta')
nano_cdnaBAM = os.path.join(tmpdir, 'nano_cDNA.coordSorted.bam')
nano_cdnaSeqs = os.path.join(tmpdir, 'nano_cDNA.coordSorted.fasta')
nano_mrnaBAM = os.path.join(tmpdir, 'nano_mRNA.coordSorted.bam')
nano_mrnaSeqs = os.path.join(tmpdir, 'nano_mRNA.coordSorted.fasta')
mappedSeqs = []
mappedLong = os.path.join(tmpdir, 'long-reads.mapped.fasta')
# tuple is (iso-seq, nanopore_cDNA, nanopore_mRNA)
if not all(v is None for v in longTuple):
# run minimap2 alignment
lib.log.info('Aligning long reads to genome with minimap2')
if longTuple[0]: # run iso-seq method
lib.iso_seq_minimap2(
longTuple[0], genome, cpus, max_intronlen, isoBAM)
bam2fasta(isoBAM, isoSeqs, cpus=cpus)
if longTuple[1]: # run nano cDNA
lib.nanopore_cDNA_minimap2(
longTuple[1], genome, cpus, max_intronlen, nano_cdnaBAM)
bam2fasta(nano_cdnaBAM, nano_cdnaSeqs, cpus=cpus)
if longTuple[2]: # run nano mRNA
lib.nanopore_mRNA_minimap2(
longTuple[2], genome, cpus, max_intronlen, nano_mrnaBAM)
bam2fasta(nano_mrnaBAM, nano_mrnaSeqs, cpus=cpus)
for x in [isoSeqs, nano_cdnaSeqs, nano_mrnaSeqs]:
if lib.checkannotations(x):
mappedSeqs.append(x)
if len(mappedSeqs) > 0:
lib.catFiles(*mappedSeqs, output=mappedLong)
lib.SafeRemove(isoSeqs)
lib.SafeRemove(nano_cdnaSeqs)
lib.SafeRemove(nano_mrnaSeqs)
if lib.checkannotations(assembled): # Trinity transcripts
# want to recover any long-reads that don't map to Trinity transcripts but do map to genome
crosscheckBAM = os.path.join(tmpdir, 'trinity.vs.long-reads.bam')
unmappedLong = os.path.join(tmpdir, 'long-reads.trinity.unique.fasta')
if lib.checkannotations(mappedLong):
lib.log.info(
'Finding long-reads not represented in Trinity assemblies')
minimap_cmd = ['minimap2', '-ax', 'map-ont', '-t',
str(cpus), '--secondary=no', assembled, mappedLong]
cmd = [os.path.join(parentdir, 'aux_scripts', 'sam2bam.sh'), " ".join(
minimap_cmd), str(cpus // 2), crosscheckBAM]
if not lib.checkannotations(crosscheckBAM):
lib.runSubprocess(cmd, '.', lib.log)
bam2fasta_unmapped(crosscheckBAM, unmappedLong, cpus=cpus)
lib.log.info(
'Adding {:,} unique long-reads to Trinity assemblies'.format(lib.countfasta(unmappedLong)))
lib.SafeRemove(crosscheckBAM)
if lib.checkannotations(unmappedLong):
trinityCombined = os.path.join(tmpdir, 'trinity.long-reads.fasta')
trinityCombinedClean = trinityCombined+'.clean'
lib.catFiles(*[assembled, unmappedLong], output=trinityCombined)
runSeqClean(trinityCombined, tmpdir, cpus=cpus)
else:
trinityCombinedClean = assembled
trinityCombined = assembled.replace('.clean', '')
# finally run trinity mapping
lib.minimap2Align(trinityCombinedClean, genome,
cpus, max_intronlen, trinityBAM)
else:
trinityCombined = mappedLong
trinityCombinedClean = trinityCombined+'.clean'
runSeqClean(trinityCombined, tmpdir, cpus=cpus)
bamResults = [isoBAM, nano_cdnaBAM, nano_mrnaBAM, trinityBAM]
foundResults = []
for r in bamResults:
if lib.checkannotations(r):
foundResults.append(r)
if len(foundResults) > 1:
lib.log.info('Merging BAM files: {:}'.format(', '.join(foundResults)))
lib.mergeBAMs(*foundResults, cpus=cpus, output=allBAM)
elif len(foundResults) == 0:
lib.log.error(
'Alignment failed, BAM files empty. Please check logfile')
sys.exit(1)
else:
os.symlink(os.path.abspath(foundResults[0]), os.path.abspath(allBAM))
return trinityCombined, trinityCombinedClean
def runPASAtrain(genome, transcripts, cleaned_transcripts, gff3_alignments, stringtie_gtf, stranded, intronlen, cpus, dbname, output, pasa_db='sqlite', pasa_alignment_overlap=30, aligners=['blat', 'gmap']):
'''
function will run PASA align assembly and then choose best gene models for training
'''
pasa_cpus = int(cpus)
# create tmpdir
folder = os.path.join(tmpdir, 'pasa')
if not os.path.isdir(folder):
os.makedirs(folder)
pasaLOG = os.path.join(folder, 'pasa-assembly.log')
# get config files and edit
alignConfig = os.path.join(folder, 'alignAssembly.txt')
pasaDBname = dbname.replace('-', '_')
if pasa_db == 'sqlite':
pasaDBname_path = os.path.abspath(os.path.join(folder, pasaDBname))
else:
pasaDBname_path = pasaDBname
with open(alignConfig, 'w') as config1:
with open(os.path.join(PASA, 'pasa_conf', 'pasa.alignAssembly.Template.txt'), 'r') as template1:
for line in template1:
line = line.replace('<__DATABASE__>', pasaDBname_path)
line = line.replace('<__MYSQLDB__>', pasaDBname_path)
config1.write(line)
if not os.path.isfile(os.path.join(folder, pasaDBname+'.assemblies.fasta')):
# now run first PASA step, note this will dump any database with same name
lib.log.info("Running PASA alignment step using {:,} transcripts".format(
lib.countfasta(cleaned_transcripts)))
cmd = [LAUNCHPASA, '-c', os.path.abspath(alignConfig), '-r', '-C', '-R', '-g', os.path.abspath(genome),
'--IMPORT_CUSTOM_ALIGNMENTS', gff3_alignments, '-T',
'-t', os.path.abspath(
cleaned_transcripts), '-u', os.path.abspath(transcripts),
'--stringent_alignment_overlap', pasa_alignment_overlap, '--TRANSDECODER', '--ALT_SPLICE',
'--MAX_INTRON_LENGTH', str(intronlen), '--CPU', str(pasa_cpus)]
cmd += ['--ALIGNERS']
filtaligners = []
for x in aligners:
if x != 'minimap2':
filtaligners.append(x)
cmd.append(','.join(filtaligners))
if stranded != 'no':
cmd = cmd + ['--transcribed_is_aligned_orient']
if lib.checkannotations(stringtie_gtf):
cmd = cmd + ['--trans_gtf', os.path.abspath(stringtie_gtf)]
lib.runSubprocess6(cmd, folder, lib.log, pasaLOG)
else:
lib.log.info('Existing PASA assemblies found: {:}'.format(
os.path.join(folder, pasaDBname+'.assemblies.fasta')))
# generate TSV gene-transcripts
Loci = []
numTranscripts = 0
with open(os.path.join(folder, 'pasa.gene2transcripts.tsv'), 'w') as gene2transcripts:
with open(os.path.join(folder, pasaDBname+'.pasa_assemblies_described.txt'), 'r') as description:
for line in description:
if not line.startswith('#'):
cols = line.split('\t')
gene2transcripts.write('g_%s\t%s\n' % (cols[1], cols[2]))
numTranscripts += 1
if not cols[1] in Loci:
Loci.append(cols[1])
lib.log.info("PASA assigned {:,} transcripts to {:,} loci (genes)".format(
numTranscripts, len(Loci)))
lib.log.info("Getting PASA models for training with TransDecoder")
pasa_training_gff = os.path.join(
folder, pasaDBname+'.assemblies.fasta.transdecoder.genome.gff3')
transdecoder_log = os.path.join(folder, 'pasa-transdecoder.log')
cmd = [os.path.join(PASA, 'scripts', 'pasa_asmbls_to_training_set.dbi'), '--pasa_transcripts_fasta', pasaDBname+'.assemblies.fasta',
'--pasa_transcripts_gff3', pasaDBname+'.pasa_assemblies.gff3']
lib.runSubprocess6(cmd, folder, lib.log, transdecoder_log)
# grab final result
shutil.copyfile(pasa_training_gff, output)
lib.log.info(
'PASA finished. PASAweb accessible via: localhost:port/cgi-bin/index.cgi?db=%s' % pasaDBname_path)
def pasa_transcript2gene(input):
# modify kallisto ouput to map gene names to each mRNA ID so you know what locus they have come from
mRNADict = {}
# since mRNA is unique, parse the transcript file which has mRNAID geneID in header
with open(input, 'r') as transin:
for line in transin:
if line.startswith('>'):
line = line.rstrip()
line = line.replace('>', '')
cols = line.split(' ')
mRNAID = cols[0]
geneID = cols[1]
location = cols[-1]
if not mRNAID in mRNADict:
mRNADict[mRNAID] = (geneID, location)
return mRNADict
def runKallisto(input, fasta, readTuple, stranded, cpus, folder, output):
'''
function takes GFF3 output from PASA compare, extracts transcripts, and then calculates TPM
using Kallisto to idenitfy the best scoring gene model for each locus, the left and right
these should be the adapter cleaned non-normalized Illumina reads
'''
lib.log.info(
"Using Kallisto TPM data to determine which PASA gene models to select at each locus")
# convert GFF to transcripts
if not os.path.exists(folder):
# handle already existing folder okay? could also delete it
os.makedirs(folder)
PASAtranscripts = os.path.join(folder, 'transcripts.fa')
cmd = [os.path.join(PASA, 'misc_utilities',
'gff3_file_to_proteins.pl'), input, fasta, 'cDNA']
lib.log.info("Building Kallisto index")
lib.runSubprocess2(cmd, '.', lib.log, PASAtranscripts)
# generate kallisto index
cmd = ['kallisto', 'index', '-i',
os.path.join(folder, 'bestModel'), PASAtranscripts]
lib.runSubprocess(cmd, '.', lib.log)
# use kallisto to map reads to index
# base command
cmd = ['kallisto', 'quant', '-i', os.path.join(folder, 'bestModel'), '-o', os.path.join(
folder, 'kallisto'), '--plaintext', '-t', str(cpus)]
# parse the strand information
if stranded == 'RF':
strandcmd = ['--rf-stranded']
elif stranded == 'FR':
strandcmd = ['--fr-stranded']
else:
strandcmd = []
# adapt command for input, i.e. single or PE ends -> what do you do if you have both?
# single, not just using estimated lengths and SD, I think this is okay? can make this an option otherwise
if readTuple[2] and not readTuple[0] and not readTuple[1]:
cmd = cmd + ['--single', '-l', '200', '-s', '20', readTuple[2]]
elif readTuple[0] and readTuple[1]:
cmd = cmd + strandcmd + [readTuple[0], readTuple[1]]
lib.log.info("Mapping reads using pseudoalignment in Kallisto")
lib.runSubprocess(cmd, '.', lib.log)
# modify kallisto ouput to map gene names to each mRNA ID so you know what locus they have come from
mRNADict = pasa_transcript2gene(PASAtranscripts)
# some PASA models can have incomplete CDS and are wrong, get list of incompletes to ignore list
ignore = []
with open(input, 'r') as infile:
for line in infile:
if line.startswith('#PROT'):
if line.endswith('\t\n'):
ID = line.split(' ')[1]
ignore.append(ID)
if len(ignore) > 0:
lib.log.debug("Ignoring %i incomplete PASA models: %s" %
(len(ignore), ','.join(ignore)))
# now make new tsv file with #mRNAID geneID location TPM
with open(output, 'w') as outfile:
outfile.write("#mRNA-ID\tgene-ID\tLocation\tTPM\n")
with open(os.path.join(folder, 'kallisto', 'abundance.tsv'), 'r') as infile:
for line in infile:
if line.startswith('targed_id'):
continue
line = line.rstrip()
cols = line.split('\t')
if cols[0] in ignore:
continue
if cols[0] in mRNADict:
geneHit = mRNADict.get(cols[0])
geneID = geneHit[0]
location = geneHit[1]
outfile.write('%s\t%s\t%s\t%s\n' %
(cols[0], geneID, location, cols[4]))
def getPASAtranscripts2genes(input, output, pasa_alignment_overlap=30):
'''
function to parse PASA assemblies GFF3 to generate TSV file for transdecoder
GFF format is non-standard and looks like this, the transcript IDs are in the Target field
CM002236 assembler-Neurospora_crassa_train2 cDNA_match 933 2973 . - . ID=align_64070;Target=asmbl_1 1 2041 +
CM002236 assembler-Neurospora_crassa_train2 cDNA_match 933 1449 . - . ID=align_64071;Target=asmbl_2 1447 1963 +
CM002236 assembler-Neurospora_crassa_train2 cDNA_match 1528 2973 . - . ID=align_64071;Target=asmbl_2 1 1446 +
'''
Genes = {}
with open(input, 'r') as infile:
for line in infile:
line = line.rstrip()
contig, source, feature, start, end, score, strand, phase, attributes = line.split(
'\t')
ID, Target = (None,)*2
info = attributes.split(';')
for x in info:
if x.startswith('ID='):
ID = x.replace('ID=', '')
elif x.startswith('Target='):
tmp = x.replace('Target=', '')
Target = tmp.split(' ')[0]
if ID and Target:
if not ID in Genes:
Genes[ID] = {'contig': contig, 'ids': Target,
'mRNA': [(int(start), int(end))]}
else:
Genes[ID]['mRNA'].append((int(start), int(end)))
# after all positions added, now create interlap on start stop positions
inter = defaultdict(InterLap)
for k, v in natsorted(Genes.items()):
sortedExons = sorted(v['mRNA'], key=lambda tup: tup[0])
inter[v['contig']].add(
(sortedExons[0][0], sortedExons[-1][1], k, v['ids']))
# now loop through interlap object and create a gene2transcript dictionary
Transcript2Gene = {}
counter = 1
for scaffold in inter:
for x in inter[scaffold]:
loc = [x[0], x[1]]
hits = list(inter[scaffold].find(loc))
Overlap = []
for y in hits:
percentOverlap = pOverlap(loc, [y[0], y[1]])
if percentOverlap >= (float(pasa_alignment_overlap) / 100) and not y[3] in Transcript2Gene:
Overlap.append(y[3])
if len(Overlap) > 0:
for transcript in Overlap:
if not transcript in Transcript2Gene:
Transcript2Gene[transcript] = 'g_'+str(counter)
counter += 1
# finally print out TSV file
unique = []
with open(output, 'w') as outfile:
for k, v in natsorted(Transcript2Gene.items()):
if not v in unique:
unique.append(v)
outfile.write('{:}\t{:}\n'.format(v, k))
return len(unique)
def pOverlap(one, two):
rone = set(range(one[0], one[1]))
rtwo = set(range(two[0], two[1]))
overlap = rone & rtwo
return len(overlap) / float(len(rone))
def getBestModel(input, fasta, abundances, outfile, pasa_alignment_overlap=30):
# function to parse PASA results and generate GFF3; supports multiple transcripts
lib.log.info(
"Parsing expression value results. Keeping best transcript at each locus.")
Expression = {}
with open(abundances, 'r') as tpms:
for line in tpms:
line = line.rstrip()
if line.startswith('#') or line.startswith('target_id'):
continue
transcriptID, geneID, Loc, TPM = line.split('\t')
if not transcriptID in Expression:
Expression[geneID] = float(TPM)
# load GFF3 output into annotation and interlap dictionaries.
inter_gene, Genes = lib.gff2interlap(input, fasta)
bestHits = []
overlap = []
for scaffold in inter_gene:
for x in inter_gene[scaffold]:
loc = [x[0], x[1]]
hits = list(inter_gene[scaffold].find(loc))
ExpHits = []
for y in hits:
percentOverlap = pOverlap(loc, [y[0], y[1]])
# overlap more than args.pasa_alignment_overlap
if percentOverlap >= (float(pasa_alignment_overlap) / 100):
if y[2] in Expression:
ExpHits.append(
(y[2], Expression[y[2]], percentOverlap))
else:
ExpHits.append((y[2], 0.00, percentOverlap))
sortedExpHits = sorted(ExpHits, key=lambda x: x[1], reverse=True)
for i in range(0, len(sortedExpHits)):
if i == 0:
bestHits.append(sortedExpHits[i][0])
else:
overlap.append(sortedExpHits[i][0])
bestModels = {}
for k, v in natsorted(Genes.items()):
if k in bestHits:
bestModels[k] = v
lib.dict2gff3(bestModels, outfile)
lib.log.info('Wrote {:,} PASA gene models'.format(len(bestModels)))
def main(args):
# setup menu with argparse
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self, prog):
super(MyFormatter, self).__init__(prog, max_help_position=48)
parser = argparse.ArgumentParser(prog='funannotate-train.py', usage="%(prog)s [options] -i genome.fasta",
description='''Script is a wrapper for automated Trinity/PASA generation of training data.''',
epilog="""Written by <NAME> (2017-2018) <EMAIL>""",
formatter_class=MyFormatter)
parser.add_argument('-i', '--input', required=True,
help='Genome in FASTA format')
parser.add_argument('-l', '--left', nargs='+',
help='Left (R1) FASTQ Reads')
parser.add_argument('--left_norm', help='Left (R1) FASTQ Reads')
parser.add_argument('--right_norm',
help='Right (R2) normalized FASTQ Reads')
parser.add_argument('--single_norm', help='single normalized FASTQ Reads')
parser.add_argument('-r', '--right', nargs='+',
help='Right (R2) FASTQ Reads')
parser.add_argument('-s', '--single', nargs='+',
help='Single ended FASTQ Reads')
parser.add_argument('--pacbio_isoseq', help='PacBio Iso-seq data')
parser.add_argument('--nanopore_cdna', help='Nanopore 2d cDNA data')
parser.add_argument('--nanopore_mrna', help='Nanopore direct mRNA data')
parser.add_argument('-o', '--out', required=True,
help='Basename folder of output files')
parser.add_argument('-c', '--coverage', default=50,
type=int, help='Depth to normalize reads to')
parser.add_argument('-m', '--min_coverage', default=5, type=int,
help='Minimum depth to pass to Trinity during normalization')
parser.add_argument('--trinity',
help='Trinity genome guided FASTA results')
parser.add_argument('--memory', default='50G',
help='RAM to use for Jellyfish/Trinity')
parser.add_argument('--no_normalize_reads',
action='store_true', help='skip normalization')
parser.add_argument('--no_trimmomatic', '--no-trimmomatic', dest='no_trimmomatic',
action='store_true', help='skip quality trimming via trimmomatic')
parser.add_argument('--jaccard_clip', action='store_true',
help='Turn on jaccard_clip for dense genomes')
parser.add_argument('--pasa_alignment_overlap', default='30.0',
help='PASA --stringent_alingment_overlap')
parser.add_argument('--pasa_db', default='sqlite',
choices=['mysql', 'sqlite'], help='PASA SQL database to use')
parser.add_argument('--max_intronlen', default=3000,
help='Maximum intron length for gene models')
parser.add_argument('--stranded', default='no',
choices=['RF', 'FR', 'F', 'R', 'no'], help='RNA seq strandedness')
parser.add_argument('--cpus', default=2, type=int,
help='Number of CPUs to use')
parser.add_argument('--header_length', default=16,
type=int, help='Max length for fasta headers')
parser.add_argument('--species',
help='Species name (e.g. "Aspergillus fumigatus") use quotes if there is a space')
parser.add_argument('--isolate', help='Isolate name (e.g. Af293)')
parser.add_argument('--strain', help='Strain name (e.g. CEA10)')
parser.add_argument('--aligners', default=['minimap2', 'blat'], nargs='+', choices=[
'minimap2', 'gmap', 'blat'], help='transcript alignment programs')
parser.add_argument('--PASAHOME',
help='Path to PASA home directory, $PASAHOME')
parser.add_argument('--TRINITYHOME',
help='Path to Trinity config directory, $TRINITYHOME')
args = parser.parse_args(args)
# create folder structure
if not os.path.isdir(args.out):
os.makedirs(args.out)
os.makedirs(os.path.join(args.out, 'training'))
os.makedirs(os.path.join(args.out, 'logfiles'))
else:
# make sure subdirectories exist
dirs = [os.path.join(args.out, 'training'),
os.path.join(args.out, 'logfiles')]
for d in dirs:
if not os.path.isdir(d):
os.makedirs(d)
global tmpdir, PASA, LAUNCHPASA, PASAVERSION, TRINITY, PBiso, nanocdna, nanomrna, parentdir
parentdir = os.path.join(os.path.dirname(__file__))
tmpdir = os.path.join(args.out, 'training')
# create log file
log_name = os.path.join(args.out, 'logfiles', 'funannotate-train.log')
if os.path.isfile(log_name):
os.remove(log_name)
# initialize script, log system info and cmd issue at runtime
lib.setupLogging(log_name)
cmd_args = " ".join(sys.argv)+'\n'
lib.log.debug(cmd_args)
print "-------------------------------------------------------"
lib.SystemInfo()
# get version of funannotate
version = lib.get_version()
lib.log.info("Running %s" % version)
# do some checks and balances
if not args.PASAHOME:
try:
PASA = os.environ["PASAHOME"].strip()
except KeyError:
lib.log.error(
"$PASAHOME environmental variable not found, PASA is not properly configured. You can use the --PASAHOME argument to specifiy a path at runtime")
sys.exit(1)
else:
PASA = args.PASAHOME.strip()
# try to autodetect different PASA distributions
if os.path.isfile(os.path.join(PASA, 'Launch_PASA_pipeline.pl')): # then v2.3.0 or newer
LAUNCHPASA = os.path.join(PASA, 'Launch_PASA_pipeline.pl')
PASAVERSION = '2.3.0'
elif os.path.isfile(os.path.join(PASA, 'scripts', 'Launch_PASA_pipeline.pl')): # older version
LAUNCHPASA = os.path.join(PASA, 'scripts', 'Launch_PASA_pipeline.pl')
args.pasa_db = 'mysql' # sqlite not available
PASAVERSION = '2.2.0'
if not args.TRINITYHOME:
try:
TRINITY = os.environ["TRINITYHOME"].strip()
except KeyError:
try:
TRINITY = os.environ["TRINITY_HOME"].strip()
except KeyError:
lib.log.error(
"$TRINITYHOME nor $TRINITY_HOME environmental variable not found, TRINITY is not properly configured. You can use the --TRINITYHOME argument to specify a path at runtime.")
sys.exit(1)
else:
TRINITY = args.TRINITYHOME.strip()
programs = ['fasta', 'minimap2', 'hisat2', 'hisat2-build', 'Trinity', 'java',
'kallisto', LAUNCHPASA, os.path.join(PASA, 'bin', 'seqclean')]
if not args.no_trimmomatic:
programs.append('trimmomatic')
programs += args.aligners
lib.CheckDependencies(programs)
# see if organism/species/isolate was passed at command line, build PASA naming scheme
organism, strain, isolate = (None,)*3
if args.species:
organism = args.species
else:
organism = os.path.basename(args.input).split('.fa')[0]
if args.strain:
strain = args.strain
if args.isolate:
isolate = args.isolate
if strain:
organism_name = organism+'_'+strain
elif isolate:
organism_name = organism+'_'+isolate
else:
organism_name = organism
organism_name = organism_name.replace(' ', '_')
# check input, make sure fasta headers are compatible
header_test = lib.checkFastaHeaders(args.input, args.header_length)
if not header_test[0]:
lib.log.error(
"Fasta headers on your input have more characters than the max (%i), reformat headers to continue." % args.header_length)
lib.log.error("First 5 header names:\n%s" %
'\n'.join(header_test[1][:5]))
sys.exit(1)
# move into tmpfolder
genome = os.path.join(tmpdir, 'genome.fasta')
shutil.copyfile(args.input, genome)
if args.left and args.right and args.single:
lib.log.info(
"Combining PE and SE reads supported, but you will lose stranded information, setting --stranded no")
args.stranded = 'no'
# check input reads
# get absolute paths for reads and concate if there are multiple
s_reads, l_reads, r_reads = (None,)*3
if not lib.checkannotations(os.path.join(tmpdir, 'single.fq.gz')):
if args.single:
single_reads = []
for y in args.single:
single_reads.append(os.path.abspath(y))
if single_reads[0].endswith('.gz'):
ending = '.fq.gz'
else:
ending = '.fq'
s_reads = os.path.join(tmpdir, 'single'+ending)
if len(single_reads) > 1:
lib.log.info(
"Multiple inputs for --single detected, concatenating SE reads")
lib.concatenateReads(single_reads, s_reads)
else:
s_reads = single_reads[0]
if s_reads.endswith('.fq'):
lib.Fzip_inplace(s_reads, args.cpus)
s_reads = s_reads+'.gz'
if not lib.checkannotations(os.path.join(tmpdir, 'single.fq.gz')):
if os.path.dirname(os.path.abspath(tmpdir)) != os.path.dirname(os.path.abspath(s_reads)):
try:
os.symlink(os.path.realpath(s_reads),
os.path.join(tmpdir, 'single.fq.gz'))
except OSError:
pass
else:
s_reads = os.path.join(tmpdir, 'single.fq.gz')
if not lib.checkannotations(os.path.join(tmpdir, 'left.fq.gz')) or not lib.checkannotations(os.path.join(tmpdir, 'right.fq.gz')):
if args.left and args.right:
left_reads = []
for i in args.left:
left_reads.append(os.path.abspath(i))
right_reads = []
for x in args.right:
right_reads.append(os.path.abspath(x))
# since I can't get the comma separated input to work through subprocess, lets concatenate reads
if left_reads[0].endswith('.gz'):
ending = '.fq.gz'
else:
ending = '.fq'
l_reads = os.path.join(tmpdir, 'left'+ending)
r_reads = os.path.join(tmpdir, 'right'+ending)
if len(left_reads) > 1:
lib.log.info(
"Multiple inputs for --left and --right detected, concatenating PE reads")
lib.concatenateReads(left_reads, l_reads)
lib.concatenateReads(right_reads, r_reads)
else:
l_reads = left_reads[0]
r_reads = right_reads[0]
if l_reads.endswith('.fq'):
lib.Fzip_inplace(l_reads, args.cpus)
l_reads = l_reads+'.gz'
if r_reads.endswith('.fq'):
lib.Fzip_inplace(r_reads, args.cpus)
r_reads = r_reads+'.gz'
if not lib.checkannotations(os.path.join(tmpdir, 'left.fq.gz')):
if os.path.dirname(os.path.abspath(tmpdir)) != os.path.dirname(os.path.abspath(l_reads)):
try:
os.symlink(os.path.realpath(l_reads),
os.path.join(tmpdir, 'left.fq.gz'))
except OSError:
pass
if not lib.checkannotations(os.path.join(tmpdir, 'right.fq.gz')):
if os.path.dirname(os.path.abspath(tmpdir)) != os.path.dirname(os.path.abspath(r_reads)):
try:
os.symlink(os.path.realpath(r_reads),
os.path.join(tmpdir, 'right.fq.gz'))
except OSError:
pass
else:
l_reads = os.path.join(tmpdir, 'left.fq.gz')
r_reads = os.path.join(tmpdir, 'right.fq.gz')
# get tuple of input reads so you can parse them in downstream tools
all_reads = (l_reads, r_reads, s_reads)
lib.log.debug('Input reads: {:}'.format(all_reads))
# trimmomatic on reads, first run PE
if args.no_trimmomatic or args.trinity or args.left_norm or args.single_norm:
lib.log.info("Trimmomatic will be skipped")
trim_left = l_reads
trim_right = r_reads
trim_single = s_reads
else:
# check if they exist already in folder
if not os.path.isfile(os.path.join(tmpdir, 'trimmomatic', 'trimmed_left.fastq.gz')) or not os.path.isfile(os.path.join(tmpdir, 'trimmomatic', 'trimmed_right.fastq.gz')):
if all_reads[0] and all_reads[1]:
trim_left, trim_right = runTrimmomaticPE(
l_reads, r_reads, cpus=args.cpus)
else:
trim_left, trim_right = (None,)*2
else:
trim_left, trim_right = os.path.join(tmpdir, 'trimmomatic', 'trimmed_left.fastq.gz'), os.path.join(
tmpdir, 'trimmomatic', 'trimmed_right.fastq.gz')
if not os.path.isfile(os.path.join(tmpdir, 'trimmomatic', 'trimmed_single.fastq.gz')) and s_reads:
if all_reads[2]:
trim_single = runTrimmomaticSE(s_reads, cpus=args.cpus)
else:
trim_single = None
else:
if s_reads:
trim_single = os.path.join(
tmpdir, 'trimmomatic', 'trimmed_single.fastq.gz')
else:
trim_single = None
# get tuple of trimmed reads
trim_reads = (trim_left, trim_right, trim_single)
lib.log.debug('Quality trimmed reads: {:}'.format(trim_reads))
# check that reads are present and make sure they follow trinity naming conventions, i.e. either illumina default or /1 and /2 to PE reads
for read in trim_reads:
if read:
if not os.path.isfile(read):
lib.log.error("Trimmomatic failed, %s does not exist." % read)
sys.exit(1)
# PE reads are passed, lets make sure they have proper naming
if trim_reads[0] and trim_reads[1]:
# if needed to fix they will be fixed in place
lib.CheckFASTQandFix(trim_reads[0], trim_reads[1])
# normalize reads
left_norm, right_norm, single_norm = (None,)*3
if not os.path.isdir(os.path.join(tmpdir, 'normalize')):
os.makedirs(os.path.join(tmpdir, 'normalize'))
if args.no_normalize_reads or args.trinity or args.left_norm or args.single_norm:
lib.log.info("Read normalization will be skipped")
if args.left_norm:
left_norm = args.left_norm
right_norm = args.right_norm
lib.SafeRemove(os.path.join(tmpdir, 'normalize', 'left.norm.fq'))
lib.SafeRemove(os.path.join(tmpdir, 'normalize', 'right.norm.fq'))
if os.path.dirname(os.path.abspath(tmpdir)) != os.path.dirname(os.path.abspath(args.left_norm)):
os.symlink(os.path.realpath(args.left_norm),
os.path.join(tmpdir, 'normalize', 'left.norm.fq'))
if os.path.dirname(os.path.abspath(tmpdir)) != os.path.dirname(os.path.abspath(args.right_norm)):
os.symlink(os.path.realpath(args.right_norm),
os.path.join(tmpdir, 'normalize', 'right.norm.fq'))
else:
left_norm = trim_left
right_norm = trim_right
if args.single_norm:
single_norm = args.single_norm
lib.SafeRemove(os.path.join(tmpdir, 'normalize', 'single.norm.fq'))
if os.path.dirname(os.path.abspath(tmpdir)) != os.path.dirname(os.path.abspath(args.single_norm)):
os.symlink(os.path.realpath(args.single_norm),
os.path.join(tmpdir, 'normalize', 'single.norm.fq'))
else:
single_norm = trim_single
else:
# check if exists
if trim_left and trim_right:
if not os.path.islink(os.path.join(tmpdir, 'normalize', 'left.norm.fq')) or not os.path.islink(os.path.join(tmpdir, 'normalize', 'right.norm.fq')):
if not all(v is None for v in trim_reads):
left_norm, right_norm, single_norm = runNormalization(trim_reads, args.memory, cpus=args.cpus,
stranded=args.stranded, min_coverage=args.min_coverage, coverage=args.coverage)
else:
left_norm, right_norm = os.path.join(tmpdir, 'normalize', 'left.norm.fq'), os.path.join(
tmpdir, 'normalize', 'right.norm.fq')
if os.path.islink(os.path.join(tmpdir, 'normalize', 'single.norm.fq')):
single_norm = os.path.join(
tmpdir, 'normalize', 'single.norm.fq')
if trim_single:
if not os.path.islink(os.path.join(tmpdir, 'normalize', 'single.norm.fq')) and not trim_left and not trim_right and trim_single:
if not all(v is None for v in trim_reads):
left_norm, right_norm, single_norm = runNormalization(trim_reads, args.memory, cpus=args.cpus,
stranded=args.stranded, min_coverage=args.min_coverage, coverage=args.coverage)
else:
if os.path.islink(os.path.join(tmpdir, 'normalize', 'single.norm.fq')):
single_norm = os.path.join(
tmpdir, 'normalize', 'single.norm.fq')
else:
single_norm = None
# setup reads and check if normalization worked
norm_reads = (left_norm, right_norm, single_norm)
lib.log.debug('Normalized reads: {:}'.format(norm_reads))
if all(v is None for v in norm_reads):
lib.log.error('No short reads detected, Trinity will be skipped.')
for read in norm_reads:
if read:
if not os.path.isfile(read):
lib.log.error(
"Read normalization failed, %s does not exist." % read)
sys.exit(1)
# check if long reads are passed, get full path
pb_iso, nano_cdna, nano_mrna = (None,)*3
if args.pacbio_isoseq:
pb_iso = os.path.abspath(args.pacbio_isoseq)
if args.nanopore_cdna:
nano_cdna = os.path.abspath(args.nanopore_cdna)
if args.nanopore_mrna:
nano_mrna = os.path.abspath(args.nanopore_mrna)
long_reads = (pb_iso, nano_cdna, nano_mrna)
lib.log.debug('Long reads: {:}'.format(long_reads))
# get long read FASTA file
longReadFA = os.path.join(tmpdir, 'long-reads.fasta')
longReadClean = os.path.join(tmpdir, 'long-reads.fasta.clean')
PBiso = os.path.join(tmpdir, 'iso-seq.fasta')
nanocdna = os.path.join(tmpdir, 'nano-cdna.fasta')
nanomrna = os.path.join(tmpdir, 'nano-mrna.fasta')
long_clean = (None, None, None)
if not all(v is None for v in long_reads):
if not lib.checkannotations(longReadFA):
long_readsFA, long_clean = long2fasta(long_reads, args.cpus, tmpdir, os.path.abspath(
longReadFA), os.path.abspath(longReadClean))
else:
found_clean = []
for x in [PBiso, nanocdna, nanomrna]:
if lib.checkannotations(x):
found_clean.append(x)
else:
found_clean.append(None)
long_clean = tuple(found_clean)
if not lib.checkannotations(longReadFA):
longReadFA = None
lib.log.debug('Long reads FASTA format: {:}'.format(long_reads))
lib.log.debug('Long SeqCleaned reads: {:}'.format(long_clean))
# now run Trinity with trimmomatic and read normalization
shortBAM = os.path.join(tmpdir, 'hisat2.coordSorted.bam')
trinity_transcripts = os.path.join(tmpdir, 'trinity.fasta')
if not lib.checkannotations(trinity_transcripts):
if args.trinity:
lib.log.info(
"Parsing assembled trinity data : {:}".format(args.trinity))
shutil.copyfile(os.path.abspath(args.trinity), trinity_transcripts)
else:
if not all(v is None for v in norm_reads):
# run trinity genome guided
#runTrinityGG(genome, norm_reads, longReadClean, shortBAM, trinity_transcripts)
cmd = [sys.executable, os.path.join(parentdir, 'aux_scripts', 'trinity.py'),
'-f', genome, '-o', trinity_transcripts, '-b', shortBAM, '-t', tmpdir,
'--stranded', args.stranded, '--max_intronlen', str(
args.max_intronlen),
'--cpus', str(args.cpus), '--TRINITYHOME', TRINITY, '--memory', args.memory,
'--logfile', os.path.join(args.out, 'logfiles', 'funannotate-trinity.log')]
if args.jaccard_clip:
cmd.append('--jaccard_clip')
if norm_reads[2]: # single
cmd += ['-s', norm_reads[2]]
else:
cmd += ['-l', norm_reads[0], '-r', norm_reads[1]]
if lib.checkannotations(longReadClean):
cmd += ['--long', longReadClean]
# run trinity
subprocess.call(cmd)
if not lib.checkannotations(trinity_transcripts):
lib.log.info('ERROR: Trinity de novo assembly failed')
sys.exit(1)
else:
lib.log.info("{:,} existing Trinity results found: {:}".format(
lib.countfasta(trinity_transcripts), trinity_transcripts))
# if stringtie installed, run on shortBAM incorporate into PASA later on
stringtieGTF = os.path.join(tmpdir, 'funannotate_train.stringtie.gtf')
if not lib.checkannotations(stringtieGTF):
if lib.which('stringtie') and lib.checkannotations(shortBAM):
lib.log.info('Running StringTie on Hisat2 coordsorted BAM')
cmd = ['stringtie', '-p', str(args.cpus)]
if args.stranded != 'no':
if args.stranded.startswith('R'):
cmd = cmd + ['--rf']
else:
cmd = cmd + ['--fr']
cmd = cmd + [shortBAM]
lib.runSubprocess8(cmd, '.', lib.log, stringtieGTF)
# run SeqClean to clip polyA tails and remove low quality seqs.
cleanTranscripts = os.path.join(tmpdir, 'trinity.fasta.clean')
if lib.checkannotations(trinity_transcripts):
lib.log.info(
'Removing poly-A sequences from trinity transcripts using seqclean')
runSeqClean(trinity_transcripts, tmpdir, cpus=args.cpus)
if not lib.checkannotations(cleanTranscripts):
lib.log.info('SeqClean on transcripts failed, check logfiles')
sys.exit(1)
# map long reads and Trinity transcripts to genome for PASA
allBAM = os.path.join(tmpdir, 'transcript.alignments.bam')
trinityBAM = os.path.join(tmpdir, 'trinity.alignments.bam')
if not lib.checkannotations(allBAM):
trinity_transcripts, cleanTranscripts = mapTranscripts(
genome, long_clean, cleanTranscripts, tmpdir, trinityBAM, allBAM, cpus=args.cpus, max_intronlen=args.max_intronlen)
else:
if lib.checkannotations(trinityBAM):
lib.log.info("Existing BAM alignments found: {:}, {:}".format(
trinityBAM, allBAM))
else:
lib.log.info("Existing BAM alignments found: {:}".format(allBAM))
# convert BAM to GFF3
allGFF3 = os.path.join(tmpdir, 'transcript.alignments.gff3')
trinityGFF3 = os.path.join(tmpdir, 'trinity.alignments.gff3')
if not lib.checkannotations(allGFF3) and lib.checkannotations(allBAM):
lib.log.info('Converting transcript alignments to GFF3 format')
lib.bam2gff3(allBAM, allGFF3)
if not lib.checkannotations(trinityGFF3) and lib.checkannotations(trinityBAM):
lib.log.info('Converting Trinity transcript alignments to GFF3 format')
lib.bam2gff3(trinityBAM, trinityGFF3)
# now run PASA steps
PASA_gff = os.path.join(tmpdir, 'funannotate_train.pasa.gff3')
PASA_tmp = os.path.join(tmpdir, 'pasa.step1.gff3')
if not lib.checkannotations(PASA_tmp):
if lib.checkannotations(trinityBAM):
runPASAtrain(genome, trinity_transcripts, cleanTranscripts, os.path.abspath(trinityGFF3),
stringtieGTF, args.stranded, args.max_intronlen, args.cpus, organism_name, PASA_tmp,
pasa_db=args.pasa_db, pasa_alignment_overlap=args.pasa_alignment_overlap, aligners=args.aligners)
# no trinity seqs, so running PASA with only long reads
elif lib.checkannotations(longReadFA):
runPASAtrain(genome, os.path.abspath(longReadFA), os.path.abspath(longReadClean),
os.path.abspath(
allGFF3), stringtieGTF, args.stranded, args.max_intronlen, args.cpus,
organism_name, PASA_tmp, pasa_db=args.pasa_db, pasa_alignment_overlap=args.pasa_alignment_overlap, aligners=args.aligners)
else:
lib.log.info("Existing PASA output found: {:}".format(PASA_tmp))
# Refine PASA models (there are many overlapping transcripts run kallisto and choose best model at each location)
KallistoAbundance = os.path.join(tmpdir, 'kallisto.tsv')
if all(v is None for v in trim_reads):
kallistoreads = norm_reads
else:
kallistoreads = trim_reads
if all(v is None for v in kallistoreads) and lib.checkannotations(longReadClean):
# use minimap to count reads
lib.log.info(
'Generating relative expression values to PASA transcripts')
PASAtranscripts = os.path.join(tmpdir, 'transcripts.fa')
cmd = [os.path.join(
PASA, 'misc_utilities', 'gff3_file_to_proteins.pl'), PASA_tmp, genome, 'cDNA']
if not lib.checkannotations(PASAtranscripts):
lib.runSubprocess2(cmd, '.', lib.log, PASAtranscripts)
PASAdict = pasa_transcript2gene(PASAtranscripts)
minimapBAM = os.path.join(tmpdir, 'long-reads_transcripts.bam')
minimap_cmd = ['minimap2', '-ax' 'map-ont', '-t',
str(args.cpus), '--secondary=no', PASAtranscripts, longReadClean]
cmd = [os.path.join(parentdir, 'aux_scripts', 'sam2bam.sh'), " ".join(
minimap_cmd), str(args.cpus // 2), minimapBAM]
if not lib.checkannotations(minimapBAM):
lib.runSubprocess(cmd, '.', lib.log)
if not lib.checkannotations(KallistoAbundance):
lib.mapCount(minimapBAM, PASAdict, KallistoAbundance)
else:
if not lib.checkannotations(KallistoAbundance):
runKallisto(PASA_tmp, genome, kallistoreads, args.stranded, args.cpus, os.path.join(
tmpdir, 'getBestModel'), KallistoAbundance)
else:
lib.log.info(
"Existing Kallisto output found: {:}".format(KallistoAbundance))
# parse Kallisto results with PASA GFF
getBestModel(PASA_tmp, genome, KallistoAbundance, PASA_gff,
pasa_alignment_overlap=args.pasa_alignment_overlap)
# collect final output files
BAMfinal = os.path.join(tmpdir, 'funannotate_train.coordSorted.bam')
TranscriptFinal = os.path.join(
tmpdir, 'funannotate_train.trinity-GG.fasta')
LongFinal = os.path.join(tmpdir, 'funannotate_long-reads.fasta')
TranscriptAlignments = os.path.join(
tmpdir, 'funannotate_train.transcripts.gff3')
# remove symlinks if from previous run
for x in [BAMfinal, TranscriptFinal, LongFinal, TranscriptAlignments]:
lib.SafeRemove(x)
if lib.checkannotations(allBAM):
os.symlink(os.path.realpath(allBAM), os.path.abspath(BAMfinal))
if longReadFA:
os.symlink(os.path.realpath(longReadClean), os.path.abspath(LongFinal))
if lib.checkannotations(allGFF3):
os.symlink(os.path.realpath(allGFF3),
os.path.abspath(TranscriptAlignments))
if lib.checkannotations(trinity_transcripts):
os.symlink(os.path.realpath(trinity_transcripts),
os.path.abspath(TranscriptFinal))
lib.log.info('PASA database name: {:}'.format(
organism_name.replace('-', '_')))
if args.strain:
lib.log.info('Trinity/PASA has completed, you are now ready to run funanotate predict, for example:\n\n\
funannotate predict -i {:} \\\n\
-o {:} -s "{:}" --strain {:} --cpus {:}\n'.format(args.input, args.out, organism, args.strain, args.cpus))
elif args.isolate:
lib.log.info('Trinity/PASA has completed, you are now ready to run funanotate predict, for example:\n\n\
funannotate predict -i {:} \\\n\
-o {:} -s "{:}" --isolate {:} --cpus {:}\n'.format(args.input, args.out, organism, args.isolate, args.cpus))
else:
lib.log.info('Trinity/PASA has completed, you are now ready to run funanotate predict, for example:\n\n\
funannotate predict -i {:} \\\n\
-o {:} -s "{:}" --cpus {:}\n'.format(args.input, args.out, organism, args.cpus))
print("-------------------------------------------------------")
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "jguiditta/atkinson",
"score": 3
} |
#### File: unit/config/test_search.py
```python
import os
from unittest.mock import patch
import pytest
from atkinson.config import search
class CheckAvailMock(object):
"""Class to control how paths are checked"""
def __init__(self, expected):
"""Constructor"""
self.expected = expected
def check(self, full_path):
"""Method to match paths that we want"""
return full_path in self.expected
def test_config_path_no_override():
"""Test we only get our paths"""
expected = list(search.DEFAULT_CONFIG_PATHS)
assert list(search.config_search_paths()) == expected
def test_cofig_path_override():
"""Test if a single override is added"""
expected = list(search.DEFAULT_CONFIG_PATHS)
expected.append('/opt')
assert list(search.config_search_paths('/opt')) == expected
def test_config_path_override_list():
"""Test if a list of overrides is added"""
expected = list(search.DEFAULT_CONFIG_PATHS)
expected.extend(['/opt', '/usr/share'])
assert list(search.config_search_paths(['/opt', '/usr/share'])) == expected
def test_config_path_with_user():
"""Test if a list that contains a path with ~ is expanded"""
expected = list(search.DEFAULT_CONFIG_PATHS)
expected.extend([os.path.join(os.environ['HOME'], 'my_configs')])
assert list(search.config_search_paths('~/my_configs')) == expected
def test_conf_path_list_user():
"""Test override list has a path with ~"""
expected = list(search.DEFAULT_CONFIG_PATHS)
expected.extend([os.path.join(os.environ['HOME'], 'my_configs')])
assert list(search.config_search_paths(['~/my_configs'])) == expected
def test_default_returned_all():
"""Test config returned from ~/.atkinson"""
with patch('atkinson.config.search._check_available') as aval_mock:
aval_mock.return_value = True
expected = [os.path.join(x, 'config.yml')
for x in search.DEFAULT_CONFIG_PATHS]
assert list(search.get_config_files()) == expected
@pytest.mark.parametrize('expected_path', list(search.DEFAULT_CONFIG_PATHS))
def test_default_slots(expected_path):
"""Test config returned from each default path individually"""
with patch('atkinson.config.search._check_available') as aval_mock:
file_name = 'config.yml'
expected = os.path.join(expected_path, file_name)
check = CheckAvailMock(expected)
aval_mock.side_effect = check.check
assert list(search.get_config_files()) == [expected]
def test_default_not_found():
"""Test config can't be found in the default paths"""
with patch('atkinson.config.search._check_available') as aval_mock:
aval_mock.return_value = False
assert list(search.get_config_files()) == []
@pytest.mark.parametrize('expected_path', list(search.DEFAULT_CONFIG_PATHS))
def test_custom_returned_slot(expected_path):
"""Test custom config file name can be returned from the default paths"""
with patch('atkinson.config.search._check_available') as aval_mock:
file_name = 'my_config.yml'
expected = [os.path.join(expected_path, file_name)]
check = CheckAvailMock(expected)
aval_mock.side_effect = check.check
assert list(search.get_config_files(filenames=file_name)) == expected
def test_default_override():
"""Test default config from override"""
with patch('atkinson.config.search._check_available') as aval_mock:
file_name = 'config.yml'
override = '~/my_config_dir'
expected = [os.path.join(os.path.expanduser(override), file_name)]
check = CheckAvailMock(expected)
aval_mock.side_effect = check.check
assert list(search.get_config_files(overrides=override)) == expected
def test_default_override_not_found():
"""Test an override is given but the file is not found.
Fall back to the defaults.
"""
with patch('atkinson.config.search._check_available') as aval_mock:
my_overrides = ['~/my_config_dir']
file_name = 'config.yml'
expected = [os.path.join(os.path.expanduser(x), file_name)
for x in my_overrides]
check = CheckAvailMock(expected)
aval_mock.side_effect = check.check
actual = list(search.get_config_files(overrides=my_overrides))
assert actual == expected
def test_default_second_override():
"""Test overrides are given, the file is found in the second."""
with patch('atkinson.config.search._check_available') as aval_mock:
my_overrides = ['~/my_config_dir', '~/my_second_configs']
file_name = 'config.yml'
expected = [os.path.join(os.path.expanduser(x), file_name)
for x in my_overrides]
check = CheckAvailMock(expected)
aval_mock.side_effect = check.check
actual = list(search.get_config_files(overrides=my_overrides))
assert actual == expected
def test_several_confs_no_override():
""" Test that several configs can be found"""
with patch('atkinson.config.search._check_available') as aval_mock:
file_names = ['configA.yml', 'configB.yml']
expected = [os.path.join('/etc/atkinson', x) for x in file_names]
check = CheckAvailMock(expected)
aval_mock.side_effect = check.check
assert list(search.get_config_files(filenames=file_names)) == expected
def test_several_confs_mix_override():
"""Test that several configs can be found in overrides and defaults"""
with patch('atkinson.config.search._check_available') as aval_mock:
file_names = ['configA.yml', 'configB.yml']
override = '/opt/configs'
expected = ['/etc/atkinson/configB.yml', '/opt/configs/configA.yml']
check = CheckAvailMock(expected)
aval_mock.side_effect = check.check
actual = list(search.get_config_files(filenames=file_names,
overrides=override))
assert actual == expected
def test_override_extra_defaults():
"""Test an extra filename, override and default filename
returns the correct order"""
with patch('atkinson.config.search._check_available') as aval_mock:
override = '/opt/configs'
expected = ['/opt/configs/config.yml', '/opt/configs/extra.yml']
check = CheckAvailMock(expected)
aval_mock.side_effect = check.check
actual = list(search.get_config_files(filenames='extra.yml',
overrides=override))
assert actual == expected
```
#### File: unit/logging/test_logging.py
```python
import logging
from unittest.mock import MagicMock
from atkinson.logging import logger
def test_gets_default_logger():
"""
GIVEN we request a logger with the default atkinson logger
WHEN we specify the root log level
THEN we get back a default python logger, with that level set.
"""
mylogger = logger.getLogger({'driver': 'atkinson.logging.drivers.default',
'root': {'level': 'DEBUG'}})
assert isinstance(mylogger, logging.Logger)
assert mylogger.getEffectiveLevel() == logging.DEBUG
def test_no_logger_specified():
"""
GIVEN we request a logger without specifying any details
THEN we get back a default python logger with log level = WARNING
"""
mylogger = logger.getLogger()
assert isinstance(mylogger, logging.Logger)
assert mylogger.getEffectiveLevel() == logging.WARNING
def test_driver_not_found():
"""
GIVEN we request a logger with a driver that cannot be found
THEN we get back a default python logger with log level = WARNING
"""
logging.Logger.error = MagicMock()
mylogger = logger.getLogger({'driver': 'foo.bar.baz'})
mylogger.error.assert_called_with(
"Loading the default driver, No module named 'foo'")
assert isinstance(mylogger, logging.Logger)
assert mylogger.getEffectiveLevel() == logging.WARNING
``` |
{
"source": "jguiditta/flask-container-scaffold",
"score": 3
} |
#### File: src/flask_container_scaffold/logging.py
```python
import logging
from flask import has_request_context, request
class FlaskRequestFormatter(logging.Formatter):
"""
A Formatter logging class to add IP information to the log records.
Usage example:
from flask_container_scaffold.logging import FlaskRequestFormatter
dictConfig({
'version': 1,
'formatters': {
'default': {
'()': FlaskRequestFormatter,
'format': '[%(asctime)s] %(remote_addr)s %(levelname)s: %(message)s',
},
},
...
})
"""
def get_ip_from_forwarded(self, field):
# RFC 7239 defines the following format for the Forwarded field:
# for=12.34.56.78;host=example.com;proto=https, for=23.45.67.89
# In testing, the first IP has consistently been the real user IP.
forwarded = field.split(",")[0]
for value in forwarded.split(";"):
if value.startswith("for="):
return value.split("=")[1]
else:
return None
def format(self, record):
if has_request_context():
# HTTP_FORWARDED seems to be the most reliable way to get the
# user real IP.
forwarded = request.environ.get('HTTP_FORWARDED')
if forwarded:
ip = self.get_ip_from_forwarded(forwarded)
record.remote_addr = ip or request.remote_addr
else:
record.remote_addr = request.remote_addr
else:
record.remote_addr = "-"
return super().format(record)
```
#### File: src/flask_container_scaffold/util.py
```python
from toolchest.yaml import parse
# TODO: extract this method out to toolchest library.
def load_yaml(filename='config.yml', logger=None):
"""
Convenience wrapper around toolchest.yaml::parse to allow you to parse a
file by path+name
:param filename: A yaml file to be parsed
:param logger: Optional logger for potential errors
:return: A dictionary formed out of the yaml data
"""
config = {}
with open(filename, 'r') as file_handle:
config = parse(file_handle, logger=logger)
return config
``` |
{
"source": "jguilhermeam/cnn-iets",
"score": 3
} |
#### File: cnn-iets/evaluation/metrics.py
```python
class Metrics:
def __init__(self):
self.precision = 0
self.recall = 0
self.f_measure = 0
def calculate_f_measure(self):
numerator = 2*self.precision*self.recall
denominator = self.precision+self.recall
try:
self.f_measure = numerator / denominator
except ZeroDivisionError:
pass
``` |
{
"source": "jguilhermeam/ondux",
"score": 2
} |
#### File: ondux/evaluation/metrics.py
```python
class Metrics:
'''
Represent the metrics used for experimental evaluation
Metrics have the following properties:
precision: A number representing the precision.
recall: A number representing the recall.
f_measure: A number representing the f_measure.
'''
def __init__(self):
self.precision = 0
self.recall = 0
self.f_measure = 0
```
#### File: ondux/learning/inverted_index.py
```python
from collections import defaultdict
class InvertedIndex:
"""A InvertedIndex has the following properties:
Attributes:
inverted_k_base: A dict representing the all terms
of the Knowledge Base and the attributes they occurr.
"""
def __init__(self, k_base):
"""Return a Knowledge Base object"""
self.inverted_k_base = defaultdict()
self.create_inverted_k_base(k_base)
def create_inverted_k_base(self, k_base):
'''Create an inverted dict from the Knowledge Base'''
for attribute in k_base:
for occ in k_base[attribute]:
self.inverted_k_base.setdefault(
occ.term, []).append((attribute, occ.frequency))
```
#### File: ondux/matching/matching.py
```python
import logging
from pprint import pprint
from learning.knowledge_base import KnowledgeBase
from utils import functions as F
from .content_based import ContentBasedFeatures as CBF
logger = logging.getLogger(__name__)
def match_blocks(blocking_list, k_base):
'''Associate each block generated in the Blocking step
with an attribute represented in the knowledge base'''
matching_blocks = []
for blocks in blocking_list:
labeled_blocks = []
for block in blocks:
labeled_blocks.append(classify_block(block, k_base))
matching_blocks.append(labeled_blocks)
return matching_blocks
def classify_block(block, k_base):
'''Classify a block based on content-based features
extracted from Knowledge Base'''
attribute_score = {}
max_score = 0
label = "none"
attr_list = k_base.get_attributes()
for attr in attr_list:
if block.value.isdigit():
score = CBF.numeric_matching(block.value, attr, k_base)
else:
score = CBF.attribute_frequency(block.value, attr, k_base)
attribute_score[attr] = score
if score > max_score:
max_score = score
label = attr
block.matching_score = attribute_score
block.label = label
return block
``` |
{
"source": "jguillaumes/MeteoClient",
"score": 3
} |
#### File: MeteoClient/tools/weatherSetTime.py
```python
import time
import sys
from weatherLib import logMessage,saveData,\
connect_wait_ES,\
connect_wait_BT,\
openFile
from datetime import datetime
import calendar
#w_address = "00:14:03:06:45:72"
w_address = "00:21:13:02:54:4C"
w_service = "00001101-0000-1000-8000-00805f9b34fb"
sock = connect_wait_BT(address=w_address, service=w_service)
def getLine():
line = ""
onLoop = True
while onLoop:
byte = sock.recv(1) # Get byte from socket
if byte == b'\r': # Carriage return?
byte = sock.recv(1) # Consume LF
if byte != b'\n': # IF not LF, big trouble: discard line
line = ""
else:
onLoop = False;
else:
line = line + byte.decode()
return line
try:
d = datetime.utcnow()
timeString = d.strftime("%Y%m%d%H%M%S")
l = getLine()
print(l)
sock.send("TIME " + timeString + "\r\n")
l = getLine()
while l != 'OK-000':
print(l)
l = getLine()
print(l)
sock.send("BYE ")
sock.close()
sys.exit(0)
except KeyboardInterrupt:
print("Closing socket...")
logMessage(level="INFO", message="Ending process, closing BT socket.")
sock.send("BYE ")
sock.close()
sys.exit(0)
except Exception as e:
msg = "Exception: {0:s}".format(repr(e))
print(msg)
logMessage(level="CRIT", message=msg)
print("Unexpected error, trying to close...")
sock.send("BYE ")
sock.close()
raise
```
#### File: MeteoClient/tools/weatherTest.py
```python
import weatherLib as wl
import sys
tracebacks = True
def exception_handler(exception_type, exception, traceback, debug_hook=sys.excepthook):
if tracebacks:
debug_hook(exception_type, exception, traceback)
else:
print ("%s: %s" % (exception_type.__name__, exception))
sys.excepthook = exception_handler
#es_hosts = [ 'elastic00.jguillaumes.dyndns.org','elastic01.jguillaumes.dyndns.org','elastic02.jguillaumes.dyndns.org']
es_hosts = ['localhost']
conn = wl.connect_wait_ES(hostlist=es_hosts)
testfile = open('weather0.dat', 'r')
for line in testfile:
wl.saveData(conn=conn, line=line)
```
#### File: MeteoClient/weatherLib/weatherQueue.py
```python
import os
import sqlite3
import configparser
import threading
import calendar
import pkg_resources
from time import sleep
from weatherLib.weatherUtil import WLogger,parseLine
_SELECT_TSA = 'select maxtsa from tsas where day = ?'
_INSERT_QUEUE = 'insert into queue(id, timeReceived, data, isES, isDB) ' + \
'values(?,strftime(\'%Y-%m-%dT%H:%M:%f+00:00\',\'now\'),?,0,0)'
_INSERT_DAY = 'insert into tsas(day, maxtsa) values(?,1)'
_UPDATE_TSA = 'update tsas set maxtsa = ? where day = ?'
_SELECT_DB = 'select id,data,isDB from queue where isDB = 0 order by isDB,id'
_UPDATE_DB = 'update queue set isDB = 1 where id = ?'
_SELECT_ES = 'select id,data,isDB from queue where isES = 0 order by isES,id'
_UPDATE_ES = 'update queue set isES = 1 where id = ?'
_PURGE_QUEUE = 'delete from queue where isDB=1 and isES=1'
_COUNT_QUEUE = 'select count(*) from queue where isDB=1 and isES=1'
class WeatherQueue(object):
"""
Weather measurements queue.
Implemented on a sqlite3 database
"""
def __init__(self,dbdir):
"""
Initialize the queue database connection and, if necessary,
create the database. Also create the lock object that will
be used to synchronize access
"""
self.logger = WLogger()
self.theLock = threading.Lock()
self.curDay = 0
self.curTSA = 0
ini_file = pkg_resources.resource_filename(__name__,'./database/wQueue.ini')
config = configparser.ConfigParser()
config.read([ini_file])
tableDDL = config['queueDatabase']['table']
tsasDDL = config['queueDatabase']['control']
indexESDDL = config['queueDatabase']['indexES']
indexDBDDL = config['queueDatabase']['indexDB']
dbFile = os.path.join(dbdir,'wQueue.db')
try:
self.theConn = sqlite3.connect(dbFile,check_same_thread=False)
self.theConn.isolation_level = 'IMMEDIATE'
self.theConn.execute(tableDDL)
self.theConn.execute(indexESDDL)
self.theConn.execute(indexDBDDL)
self.theConn.execute(tsasDDL)
self.theConn.commit()
self.logger.logMessage(level="INFO",message="Queue database opened at {0:s}".format(dbFile))
except:
self.logger.logException('Error initializing queue database')
def pushLine(self,line):
"""
Push a line into the queue.
This function blocks until the database is not locked
"""
stamp,_,_,_,_,_,_,_,_,_,_,_ = parseLine(line)
datestamp = calendar.timegm(stamp.date().timetuple())
theTsa = 1
with self.theLock:
try:
result = self.theConn.execute(_SELECT_TSA, [datestamp])
resCol = result.fetchone()
if resCol == None:
self.theConn.execute(_INSERT_DAY, [datestamp])
else:
theTsa = resCol[0] + 1
self.theConn.execute(_UPDATE_TSA, [theTsa, datestamp])
fullTsa = (stamp.year * 10000 +
stamp.month * 100 +
stamp.day) * 1000000 + theTsa
self.theConn.execute(_INSERT_QUEUE, [fullTsa,line])
self.theConn.commit()
except:
self.logger.logException('Error inserting line into the queue database')
self.theConn.rollback()
def getDbQueue(self):
"""
Get al the queue lines NOT marked as inserted into the database.
(isDB == 0)
"""
with self.theLock:
try:
result = self.theConn.execute(_SELECT_DB)
queueContent = result.fetchall()
return queueContent
except:
self.logger.logException('Error fetching DB queue')
self.theConn.rollback()
return None
def markDbQueue(self, theId):
"""
Mark a queue entry as inserted into the database
Parameters:
- theId: row identifier to mark
"""
with self.theLock:
with self.theConn:
self.theConn.execute(_UPDATE_DB, [theId])
self.theConn.commit()
self.logger.logMessage(level='DEBUG',
message = 'Queue entry {0} marked as DB-done'.format(theId))
def getESQueue(self):
"""
Get al the queue lines NOT marked as indexed in elasticserch.
(isES == 0)
"""
with self.theLock:
try:
result = self.theConn.execute(_SELECT_ES)
queueContent = result.fetchall()
return queueContent
except:
self.logger.logException('Error fetching ES queue')
self.theConn.rollback()
return None
def markESQueue(self, theId):
"""
Mark a queue entry as indexed in elasticsearch
Parameters:
- theId: row identifier to mark
"""
with self.theLock:
with self.theConn:
self.theConn.execute(_UPDATE_ES, [theId])
self.theConn.commit()
self.logger.logMessage(level='DEBUG',
message = 'Queue entry {0} marked as ES-done'.format(theId))
def purgeQueue(self):
with self.theLock:
with self.theConn as conn:
result = conn.execute(_COUNT_QUEUE)
r = result.fetchone()
count = r[0]
self.logger.logMessage(message="About to purge {0} queue entries.".format(count))
conn.execute(_PURGE_QUEUE)
conn.commit()
self.logger.logMessage(message="Queue purged.")
class QueueJanitorThread(threading.Thread):
"""
Class to implement a thread to do maintenance tasks in the queue
database.
It will awake itself periodically to delete the queue elements
which have already been processed.
"""
_logger = WLogger()
def __init__(self,queue,period=60):
super(QueueJanitorThread, self).__init__()
self.theQueue = queue
self.thePeriod = period
self._stopSwitch = False
self.name = 'QueueJanitorThread'
self._pending = False
QueueJanitorThread._logger.logMessage("Janitor configured to run every {0} seconds".format(period))
def stop(self):
self._stopSwitch = True
def run(self):
"""
Run method.
It creates a timer object and schedules it according to the configured
perdiod.
The method runs an infinite loop with 1-second delays to check if the
termination flag (_stopSwitch) has been raised. In this case it cancels
the timer request (if pending) and ends.
"""
theTimer = None
self._pending = False
QueueJanitorThread._logger.logMessage("Starting thread {0}.".format(self.getName()), level="INFO")
while not self._stopSwitch:
if not self._pending:
theTimer = threading.Timer(self.thePeriod,self.doCleanup)
theTimer.name = "JanitorTimer"
self._pending = True
theTimer.start()
sleep(1)
theTimer.cancel()
QueueJanitorThread._logger.logMessage("Thread {0} stopped by request.".format(self.getName()), level="INFO")
def doCleanup(self):
"""
This method is scheduled inside a Timer object by the run() loop.
"""
self.theQueue.purgeQueue()
self._pending = False
``` |
{
"source": "JGuillaumin/swa-tf",
"score": 2
} |
#### File: swa-tf/swa_tf/moving_free_batch_normalization.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import utils as tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.layers.normalization import BatchNormalization
from tensorflow.python.ops import gen_control_flow_ops
class MovingFreeBatchNormalization(BatchNormalization):
def build(self, input_shape):
super(BatchNormalization, self).build(input_shape)
self.built = False
# all assertion are
input_shape = tensor_shape.TensorShape(input_shape)
ndims = len(input_shape)
# Raise parameters of fp16 batch norm to fp32
if self.dtype == dtypes.float16 or self.dtype == dtypes.bfloat16:
param_dtype = dtypes.float32
else:
param_dtype = self.dtype or dtypes.float32
axis_to_dim = {x: input_shape[x].value for x in self.axis}
if len(axis_to_dim) == 1 and self.virtual_batch_size is None:
# Single axis batch norm (most common/default use-case)
param_shape = (list(axis_to_dim.values())[0],)
else:
# Parameter shape is the original shape but with 1 in all non-axis dims
param_shape = [axis_to_dim[i] if i in axis_to_dim
else 1 for i in range(ndims)]
if self.virtual_batch_size is not None:
# When using virtual batches, add an extra dim at index 1
param_shape.insert(1, 1)
for idx, x in enumerate(self.axis):
self.axis[idx] = x + 1 # Account for added dimension
try:
# Disable variable partitioning when creating the moving mean and variance
if hasattr(self, '_scope') and self._scope:
partitioner = self._scope.partitioner
self._scope.set_partitioner(None)
else:
partitioner = None
# internal statistics fitted during a pre-inference step
self.mean = self.add_variable(
name='mean',
shape=param_shape,
dtype=param_dtype,
initializer=self.moving_mean_initializer,
trainable=False)
self.variance = self.add_variable(
name='variance',
shape=param_shape,
dtype=param_dtype,
initializer=self.moving_variance_initializer,
trainable=False)
self.n_updates = self.add_variable(
name='n_updates',
shape=[],
dtype=param_dtype,
initializer=init_ops.zeros_initializer(),
trainable=False)
finally:
if partitioner:
self._scope.set_partitioner(partitioner)
self.built = True
def _assign_moving_average(self, variable, value, momentum):
with ops.name_scope(None, 'AssignMovingAvg',
[variable, value, momentum]) as scope:
decay = ops.convert_to_tensor(1.0 - momentum, name='decay')
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
update_delta = (variable - value) * decay
return state_ops.assign_sub(variable, update_delta, name=scope)
def _update_statistics(self, variable, value, n_updates):
with ops.name_scope(None, 'UpdateStatistics',
[variable, value, n_updates]) as scope:
with ops.colocate_with(variable):
stat = variable * n_updates + value
stat /= n_updates + 1
return state_ops.assign(variable, stat, name=scope)
def _fused_batch_norm(self, inputs, training, use_moving_statistics):
"""Returns the output of fused batch norm."""
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
# use_moving_statistics==True use moving_mean and moving_variance, else mean and variance
mean = tf_utils.smart_cond(use_moving_statistics, lambda: self.moving_mean, lambda: self.mean)
variance = tf_utils.smart_cond(use_moving_statistics, lambda: self.moving_variance, lambda: self.variance)
# these variables will be used in _fused_batch_norm_inference(), thanks to python closure
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=mean,
variance=variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = tf_utils.smart_cond(training, _fused_batch_norm_training, _fused_batch_norm_inference)
# if training == True: mean and variance returned are mean and variance of the current batch
# elif training == False: mean and variance return are (self.mean, self.variance) or
# (self.moving_mean, self.moving_variance) depending of the value of use_moving_statistics
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = tf_utils.constant_value(training)
if training_value is None:
momentum = tf_utils.smart_cond(training,
lambda: self.momentum,
lambda: 1.0)
else:
momentum = ops.convert_to_tensor(self.momentum)
if training_value or training_value is None:
# if training, first create operations which update self.mean and self.variance
mean_update = self._update_statistics(self.mean, mean, self.n_updates)
variance_update = self._update_statistics(self.variance, variance, self.n_updates)
with ops.control_dependencies([mean_update, variance_update]):
update_n_updates = state_ops.assign_add(self.n_updates, 1., )
# add this combination of operations to a specific collection 'UPDATE_BN_OPS'
ops.add_to_collection('UPDATE_BN_OPS', update_n_updates)
# operations to reset bn statistics
reset_mean = state_ops.assign(self.mean, array_ops.zeros_like(self.mean))
reset_variance = state_ops.assign(self.variance, array_ops.zeros_like(self.variance))
reset_n_updates = state_ops.assign(self.n_updates, 0.)
with ops.control_dependencies([reset_mean, reset_variance, reset_n_updates]):
reset_bn = gen_control_flow_ops.no_op("ResetBatchNormStats")
ops.add_to_collection('RESET_BN_OPS', reset_bn)
# to keep the classical behavior of the Batch Norm !
# update moving averages and add operations to tf.GraphKeys.UPDATE_OPS
# these operation must be run when optimizing the network
moving_mean_update = self._assign_moving_average(self.moving_mean, mean, momentum)
moving_variance_update = self._assign_moving_average(self.moving_variance, variance, momentum)
self.add_update(moving_mean_update, inputs=True)
self.add_update(moving_variance_update, inputs=True)
return output
def call(self, inputs, training=None, use_moving_statistics=True):
"""
:param inputs: input features
:param training: boolean or boolean Tensor (with shape []) which determines the current training phase
:param use_moving_statistics: boolean or boolean Tensor (with shape []) which selects statistics to use
when training==True (or the Tensor value) statistics (mean and variance) are from the inputs !
when training==False, if use_moving_statistics==True -> feed forward with moving statistics (updated
with operations defined in GraphKeys.UPDATE_OPS)
else (use_moving_statistics==False -> feed forward with raw statistics (updated
with operations from collections 'UPDATE_BN_OPS'
'RESET_BN_OPS' contains operations to reset these vaiables between inferences.
"""
in_eager_mode = context.executing_eagerly()
if self.virtual_batch_size is not None:
# Virtual batches (aka ghost batches) can be simulated by reshaping the
# Tensor and reusing the existing batch norm implementation
original_shape = [-1] + inputs.shape.as_list()[1:]
expanded_shape = [self.virtual_batch_size, -1] + original_shape[1:]
# Will cause errors if virtual_batch_size does not divide the batch size
inputs = array_ops.reshape(inputs, expanded_shape)
def undo_virtual_batching(outputs):
outputs = array_ops.reshape(outputs, original_shape)
return outputs
if self.fused:
outputs = self._fused_batch_norm(inputs, training=training, use_moving_statistics=use_moving_statistics)
if self.virtual_batch_size is not None:
# Currently never reaches here since fused_batch_norm does not support
# virtual batching
outputs = undo_virtual_batching(outputs)
return outputs
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.get_shape()
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.axis]
if self.virtual_batch_size is not None:
del reduction_axes[1] # Do not reduce along virtual batch dim
# Broadcasting only necessary for single-axis batch norm where the axis is
# not the last dimension
broadcast_shape = [1] * ndims
broadcast_shape[self.axis[0]] = input_shape[self.axis[0]].value
def _broadcast(v):
if (v is not None and
len(v.get_shape()) != ndims and
reduction_axes != list(range(ndims - 1))):
return array_ops.reshape(v, broadcast_shape)
return v
scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
def _compose_transforms(scale, offset, then_scale, then_offset):
if then_scale is not None:
scale *= then_scale
offset *= then_scale
if then_offset is not None:
offset += then_offset
return (scale, offset)
# Determine a boolean value for `training`: could be True, False, or None.
training_value = tf_utils.constant_value(training)
if training_value is not False:
if self.adjustment:
adj_scale, adj_bias = self.adjustment(array_ops.shape(inputs))
# Adjust only during training.
adj_scale = tf_utils.smart_cond(training,
lambda: adj_scale,
lambda: array_ops.ones_like(adj_scale))
adj_bias = tf_utils.smart_cond(training,
lambda: adj_bias,
lambda: array_ops.zeros_like(adj_bias))
scale, offset = _compose_transforms(adj_scale, adj_bias, scale, offset)
# Some of the computations here are not necessary when training==False
# but not a constant. However, this makes the code simpler.
keep_dims = self.virtual_batch_size is not None or len(self.axis) > 1
# mean and variance of the current batch
mean, variance = nn.moments(inputs, reduction_axes, keep_dims=keep_dims)
mean = tf_utils.smart_cond(training,
lambda: mean,
lambda: tf_utils.smart_cond(use_moving_statistics,
lambda: self.moving_mean,
lambda: self.mean))
variance = tf_utils.smart_cond(training,
lambda: variance,
lambda: tf_utils.smart_cond(use_moving_statistics,
lambda: self.moving_variance,
lambda: self.variance))
if self.renorm:
r, d, new_mean, new_variance = self._renorm_correction_and_moments(
mean, variance, training)
# When training, the normalized values (say, x) will be transformed as
# x * gamma + beta without renorm, and (x * r + d) * gamma + beta
# = x * (r * gamma) + (d * gamma + beta) with renorm.
r = _broadcast(array_ops.stop_gradient(r, name='renorm_r'))
d = _broadcast(array_ops.stop_gradient(d, name='renorm_d'))
scale, offset = _compose_transforms(r, d, scale, offset)
else:
new_mean, new_variance = mean, variance
if self.virtual_batch_size is not None:
# This isn't strictly correct since in ghost batch norm, you are
# supposed to sequentially update the moving_mean and moving_variance
# with each sub-batch. However, since the moving statistics are only
# used during evaluation, it is more efficient to just update in one
# step and should not make a significant difference in the result.
new_mean = math_ops.reduce_mean(mean, axis=1, keepdims=True)
new_variance = math_ops.reduce_mean(variance, axis=1, keepdims=True)
def _do_update(var, value):
if in_eager_mode and not self.trainable:
return
return self._assign_moving_average(var, value, self.momentum)
moving_mean_update = tf_utils.smart_cond(
training,
lambda: _do_update(self.moving_mean, new_mean),
lambda: self.moving_mean)
moving_variance_update = tf_utils.smart_cond(
training,
lambda: _do_update(self.moving_variance, new_variance),
lambda: self.moving_variance)
if not context.executing_eagerly():
self.add_update(moving_mean_update, inputs=True)
self.add_update(moving_variance_update, inputs=True)
mean_update = self._update_statistics(self.mean, mean, self.n_updates)
variance_update = self._update_statistics(self.variance, variance, self.n_updates)
with ops.control_dependencies([mean_update, variance_update]):
# update n_updates only after updating self.mean and self.variance
update_n_updates = state_ops.assign_add(self.n_updates, 1.)
ops.add_to_collection('UPDATE_BN_OPS', update_n_updates)
reset_mean = state_ops.assign(self.mean, array_ops.zeros_like(self.mean))
reset_variance = state_ops.assign(self.variance, array_ops.zeros_like(self.variance))
reset_n_updates = state_ops.assign(self.n_updates, 0.)
with ops.control_dependencies([reset_mean, reset_variance, reset_n_updates]):
reset_bn = gen_control_flow_ops.no_op("ResetBatchNormStats")
ops.add_to_collection('RESET_OPS', reset_bn)
else:
# training == False
mean = tf_utils.smart_cond(use_moving_statistics, lambda: self.moving_mean, lambda: self.mean)
variance = tf_utils.smart_cond(use_moving_statistics, lambda: self.moving_variance, lambda: self.variance)
mean = math_ops.cast(mean, inputs.dtype)
variance = math_ops.cast(variance, inputs.dtype)
if offset is not None:
offset = math_ops.cast(offset, inputs.dtype)
outputs = nn.batch_normalization(inputs,
_broadcast(mean),
_broadcast(variance),
offset,
scale,
self.epsilon)
# If some components of the shape got lost due to adjustments, fix that.
outputs.set_shape(input_shape)
if self.virtual_batch_size is not None:
outputs = undo_virtual_batching(outputs)
return outputs
def moving_free_batch_normalization(inputs,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
training=False,
trainable=True,
use_moving_statistics=True,
name=None,
reuse=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
virtual_batch_size=None,
adjustment=None):
"""
:param inputs: input tensor
:param axis: An `int`, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
:param momentum: Momentum for the moving average.
:param epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
:param center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
:param scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
:param beta_initializer: Initializer for the beta weight.
:param gamma_initializer: Initializer for the gamma weight.
:param moving_mean_initializer: Initializer for the moving mean and the raw mean (when not using the moving
statistics).
:param moving_variance_initializer: Initializer for the moving variance and the raw variance (when not using the
moving statistics).
:param beta_regularizer: Optional regularizer for the beta weight.
:param gamma_regularizer: Optional regularizer for the gamma weight.
:param beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
:param gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
:param training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(normalized with statistics of the current batch) or in inference mode
(normalized with moving statistics). **NOTE**: make sure to set this
parameter correctly, or else your training/inference will not work
properly.
:param trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
:param use_moving_statistics: Either a Python boolean, or a TensorFlow boolean scalar tensor (e.g. a placeholder).
Whether to use moving statitics or computed statitics in inference mode (training==False).
:param name: String, the name of the layer.
:param reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
:param renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
:param renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
:param renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
:param fused: if `None` or `True`, use a faster, fused implementation if possible.
If `False`, use the system recommended implementation.
:param virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
:param adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random_uniform(shape[-1:], 0.93, 1.07),
tf.random_uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
:return: Output tensor, corresponding to the normalized neural activation
"""
layer = MovingFreeBatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
fused=fused,
trainable=trainable,
virtual_batch_size=virtual_batch_size,
adjustment=adjustment,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs, training=training, use_moving_statistics=use_moving_statistics)
# Aliases
MovingFreeBatchNorm = MovingFreeBatchNormalization
moving_free_batch_norm = moving_free_batch_normalization
``` |
{
"source": "jgukelberger/fdint",
"score": 2
} |
#### File: fdint/scripts/gen__nonparabolic_pyx.py
```python
import os
import sys
fpath = os.path.join(os.path.dirname(__file__), '../fdint/_nonparabolic.pyx')
templates_dir = os.path.join(os.path.dirname(__file__), 'templates/')
import numpy
INF = numpy.inf
with open(fpath, 'w') as f:
# Generate `nonparabolic`, etc.
for i in xrange(1):
k2 = str(i).replace('-','m')
f.write('''
@cython.cdivision(True)
cdef inline double nonparabolic(double phi, double alpha):''')
for phi_min, phi_max, ext in [(-INF, -2e0, '_lt_m2'),
(-2e0, 0e0, '_m2_to_0'),
( 0e0, 2e0, '_0_to_2'),
( 2e0, 5e0, '_2_to_5'),
( 5e0, 10e0, '_5_to_10'),
(10e0, 20e0, '_10_to_20'),
(20e0, 40e0, '_20_to_40'),
(40e0, INF, '_gt_40')]:
if phi_max != INF:
#TODO: binary search optimization
f.write('''
if phi < {phi_max:.1f}:
return nonparabolic{ext}(phi, 2.0*alpha)'''.format(ext=ext,phi_max=phi_max))
else:
f.write('''
return nonparabolic{ext}(phi, 2.0*alpha)
'''.format(ext=ext,phi_max=phi_max))
# Generate `nonparabolic_lt_m2`, etc.
for i in xrange(1,2):
k2 = str(i).replace('-','m')
for phi_min, phi_max, ext in [(-INF, -2e0, '_lt_m2'),
(-2e0, 0e0, '_m2_to_0'),
( 0e0, 2e0, '_0_to_2'),
( 2e0, 5e0, '_2_to_5'),
( 5e0, 10e0, '_5_to_10'),
(10e0, 20e0, '_10_to_20'),
(20e0, 40e0, '_20_to_40'),
(40e0, INF, '_gt_40')]:
f.write('''
@cython.cdivision(True)
cdef inline double nonparabolic{ext}(double phi, double beta):'''
''.format(ext=ext))
for m, k in enumerate(xrange(i, 22, 2)):
# m is the order of the approximation
if m == 0:
continue # skip 0th order
if m > 9:
break
#TODO: binary search optimization
if phi_max != INF:
f.write('''
if(beta <= BS1h{ext}__{m} and beta <= BS3h{ext}__{m}):
return nonparabolic{ext}__{m}(phi, beta)'''.format(ext=ext, m=m))
if m % 2 == 1:
last_odd_m = m
if phi_max != INF:
f.write('''
warnings.warn('nonparabolic: less than 24 bits of accuracy',
RuntimeWarning)
return nonparabolic{ext}__{m}(phi, beta)
'''.format(ext=ext, m=last_odd_m))
else:
f.write('''
warnings.warn('nonparabolic: 24 bits of accuracy not guaranteed',
RuntimeWarning)
return nonparabolic{ext}__{m}(phi, beta)
'''.format(ext=ext, m=last_odd_m))
# Generate `nonparabolic_lt_m2`, etc.
for phi_min, phi_max, ext in [(-INF, -2e0, '_lt_m2'),
(-2e0, 0e0, '_m2_to_0'),
( 0e0, 2e0, '_0_to_2'),
( 2e0, 5e0, '_2_to_5'),
( 5e0, 10e0, '_5_to_10'),
(10e0, 20e0, '_10_to_20'),
(20e0, 40e0, '_20_to_40'),
(40e0, INF, '_gt_40')]:
for m, _ in enumerate(xrange(i, 22, 2)):
# m is the order of the approximation
if m == 0:
continue # skip 0th order
if m > 9:
break
f.write('''
@cython.cdivision(True)
cdef inline double nonparabolic{ext}__{m}(double phi, double beta):
'''
''.format(ext=ext, m=m))
# f1h=fd1h_lt_m2(phi), etc.
for n, nk2 in enumerate(xrange(1, 22, 2)):
nk2 = str(nk2).replace('-','m')
if n > m+1:
break
f.write(' cdef double f{nk2}h=fd{nk2}h{ext}(phi)\n'
''.format(nk2=nk2, ext=ext))
# gf1h=..., gf3h=...
for i in xrange(1,4,2):
k2 = str(i).replace('-','m')
for n, nk2 in enumerate(xrange(i, 22, 2)):
if n > m:
break
nk2 = str(nk2).replace('-','m')
if n == 0:
f.write(' cdef double gf{k2}h=( G0 *f{nk2}h\n'
''.format(k2=k2, nk2=nk2, ext=ext))
else:
mstr = str(m).replace('10','A')
nstr = str(n).replace('10','A')
f.write(' +beta*(G{m}{n}*f{nk2}h\n'
''.format(nk2=nk2, ext=ext,
m=mstr,
n=nstr,
))
f.write(' )'+')'*m+'\n')
f.write(' return gf1h+beta*gf3h\n')
```
#### File: jgukelberger/fdint/setup.py
```python
import sys
import os.path
from setuptools import setup, Extension
import numpy
try:
from Cython.Build import cythonize
USE_CYTHON = True
except:
if len(sys.argv) >= 1 and 'sdist' in sys.argv[1:]:
raise RuntimeError('Cython is required to build a source distribution.')
USE_CYTHON = False
def no_cythonize(extensions, **_ignore):
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
if extension.language == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
sources.append(sfile)
extension.sources[:] = sources
return extensions
ext = '.pyx' if USE_CYTHON else '.c'
extensions = [Extension("fdint._fdint", ["fdint/_fdint"+ext]),
Extension("fdint.fd", ["fdint/fd"+ext]),
Extension("fdint.dfd", ["fdint/dfd"+ext]),
Extension("fdint.ifd", ["fdint/ifd"+ext]),
Extension("fdint.gfd", ["fdint/gfd"+ext]),
Extension("fdint.dgfd", ["fdint/dgfd"+ext]),
Extension("fdint.scfd", ["fdint/scfd"+ext]),]
# read in __version__
exec(open('fdint/version.py').read())
metadata = dict(
name='fdint',
version=__version__, # read from version.py
description = 'A free, open-source python package for quickly and '
'precisely approximating Fermi-Dirac integrals.',
long_description=open('README.rst').read(),
url='http://scott-maddox.github.io/fdint',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
packages=['fdint',
'fdint.tests',
'fdint.examples'],
package_dir={'fdint': 'fdint'},
data_files=['fdint/__init__.pxd',
'fdint/_fdint.pxd',
'fdint/scfd.pxd',],
test_suite='fdint.tests',
setup_requires=['numpy'],
install_requires=['numpy'],
# zip_safe=True,
# use_2to3=True,
include_dirs=[numpy.get_include()],
)
if USE_CYTHON:
metadata['ext_modules'] = cythonize(extensions)
else:
metadata['ext_modules'] = no_cythonize(extensions)
setup(**metadata)
``` |
{
"source": "jgulickson/Oanda-REST-V20",
"score": 2
} |
#### File: Oanda-REST-V20/oanda_ep/status.py
```python
try:
import json
from common import communicate
except ImportError:
raise ImportError("Import failed to load in status.py.")
#
# Status Class
#
class ServiceStatus:
def __init__(self):
self._url = None
self._response = None
self.description = None
self.level = None
self.status = None
self.id = None
#
# Get Service Status
# http://developer.oanda.com/rest-live-v20/health/
#
def get_service_status(self, hostname, account_id, headers, timeout, query):
self._url = communicate.create_http_url(hostname, "status", "get_service_status", account_id, query)
self._response = communicate.send_http_request("get", self._url, headers, timeout, None).json()
print("get_service_status completed")
#
# Store Service Status
# http://developer.oanda.com/rest-live-v20/health/
#
def store_service_status(self):
try:
self.description = str(self._response["current-event"]["status"]["description"]).strip()
self.level = str(self._response["current-event"]["status"]["level"]).strip().upper()
self.status = str(self._response["current-event"]["status"]["id"]).strip().lower()
self.id = str(self._response["id"]).strip().lower()
except KeyError:
raise KeyError("Specified key does not exist in service status _response.")
except ValueError:
raise ValueError("Service status _response is not valid.")
print("store_service_status completed")
#
# Validate Service Status
# http://developer.oanda.com/rest-live-v20/health/
#
def validate_service_status(self):
#
# TODO (Improve validation logic)
#
assert (self.description is not None), "Description is not valid."
assert (self.level is not None), "Level is not valid."
assert (self.status == "up" and "down"), "Status is not valid."
assert (self.id == "fxtrade-practice-rest-api" and "fxtrade-rest-api"), "Service ID is not valid."
print("validate_service_status completed")
``` |
{
"source": "jgung/tf-nlp",
"score": 2
} |
#### File: tf-nlp/test/test_readers.py
```python
import unittest
import pkg_resources
from tfnlp.common.constants import LABEL_KEY
from tfnlp.readers import conll_2003_reader, conll_2005_reader, conllx_reader, MultiConllReader, ConllReader
class TestChunk(unittest.TestCase):
def test_multi_reader(self):
filepath = pkg_resources.resource_filename(__name__, "resources/conllx.txt")
multireader = MultiConllReader([conllx_reader(), ConllReader({0: "pos"})], ['.dep', '.pos'])
instances = list(multireader.read_file(filepath))
self.assertEqual(2, len(instances))
self.assertTrue("pos" in instances[0])
self.assertEqual("XX", instances[0]["pos"][0])
def test_reader_single(self):
filepath = pkg_resources.resource_filename(__name__, "resources/conll03-test.txt")
instances = list(conll_2003_reader().read_file(filepath))
self.assertEqual(3, len(instances))
self.assertEqual(["O", "B-ORG", "I-ORG", "I-ORG", "O", "O", "O", "O"], instances[0][LABEL_KEY])
self.assertEqual(["B-LOC", "I-LOC", "O"], instances[1][LABEL_KEY])
def test_srl_reader(self):
filepath = pkg_resources.resource_filename(__name__, "resources/conll05-test.txt")
instances = list(conll_2005_reader().read_file(filepath))
self.assertEqual(6, len(instances))
self.assertEqual(
['B-A1', 'I-A1', 'I-A1', 'I-A1', 'B-AM-MOD', 'O', 'B-V', 'B-A2', 'I-A2', 'I-A2', 'I-A2', 'B-AM-TMP', 'I-AM-TMP', 'O',
'B-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV',
'I-AM-ADV', 'O'], instances[0][LABEL_KEY])
self.assertEqual(
['B-A1', 'I-A1', 'I-A1', 'O', 'B-V', 'B-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1',
'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'O', 'B-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV',
'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV',
'I-AM-ADV', 'I-AM-ADV', 'O'], instances[1][LABEL_KEY])
self.assertEqual(['B-A1', 'I-A1', 'I-A1', 'O', 'O', 'O', 'B-V', 'B-A4', 'I-A4', 'I-A4', 'I-A4', 'I-A4', 'B-A3', 'I-A3',
'I-A3', 'I-A3', 'I-A3', 'I-A3', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O',
'O', 'O', 'O', 'O'], instances[2][LABEL_KEY])
self.assertEqual(
['B-A1', 'I-A1', 'I-A1', 'I-A1', 'I-A1', 'I-A1', 'I-A1', 'I-A1', 'I-A1', 'O', 'B-V', 'B-C-A1', 'I-C-A1', 'I-C-A1',
'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1',
'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'O'], instances[3][LABEL_KEY])
self.assertEqual(
['B-A1', 'I-A1', 'I-A1', 'I-A1', 'I-A1', 'I-A1', 'I-A1', 'I-A1', 'I-A1', 'O', 'O', 'O', 'B-V', 'O', 'B-AM-ADV',
'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV',
'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'O'], instances[4][LABEL_KEY])
self.assertEqual(
['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-A1', 'I-A1',
'I-A1', 'I-A1', 'B-V', 'B-AM-TMP', 'B-AM-LOC', 'I-AM-LOC', 'I-AM-LOC', 'I-AM-LOC', 'I-AM-LOC', 'O'],
instances[5][LABEL_KEY])
def test_srl_reader_phrases(self):
filepath = pkg_resources.resource_filename(__name__, "resources/conll05-test.txt")
instances = list(conll_2005_reader(phrase=True).read_file(filepath))
self.assertEqual(6, len(instances))
self.assertEqual(
['B-A1', 'I-A1', 'B-AM-MOD', 'O', 'B-V', 'B-A2', 'I-A2', 'B-AM-TMP', 'O', 'B-AM-ADV', 'I-AM-ADV', 'I-AM-ADV',
'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'O'], instances[0][LABEL_KEY])
self.assertEqual(
['B-A1', 'O', 'B-V', 'B-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'O', 'B-AM-ADV',
'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV',
'I-AM-ADV', 'O'], instances[1][LABEL_KEY])
self.assertEqual(
['B-A1', 'O', 'O', 'O', 'B-V', 'B-A4', 'I-A4', 'B-A3', 'I-A3', 'I-A3', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O',
'O', 'O', 'O', 'O'], instances[2][LABEL_KEY])
self.assertEqual(
['B-A1', 'I-A1', 'I-A1', 'I-A1', 'O', 'B-V', 'B-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1',
'I-C-A1', 'I-C-A1', 'I-C-A1', 'I-C-A1', 'O'], instances[3][LABEL_KEY])
self.assertEqual(
['B-A1', 'I-A1', 'I-A1', 'I-A1', 'O', 'O', 'O', 'B-V', 'O', 'B-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV',
'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'I-AM-ADV', 'O'], instances[4][LABEL_KEY])
self.assertEqual(['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-A1', 'B-V', 'B-AM-TMP', 'B-AM-LOC', 'I-AM-LOC', 'O'],
instances[5][LABEL_KEY])
```
#### File: tfnlp/common/eval.py
```python
import os
import re
from collections import defaultdict
from typing import Iterable, Tuple, Dict, List
import numpy as np
import tensorflow as tf
from tensorflow.compat.v1 import logging
from tensorflow.compat.v1 import trainable_variables
from tensorflow.python.lib.io import file_io
from tfnlp.common.bert import BERT_SUBLABEL, BERT_CLS, BERT_SEP
from tfnlp.common.chunk import chunk
from tfnlp.common.conlleval import conll_eval_lines
from tfnlp.common.parsing import nonprojective
from tfnlp.common.srleval import evaluate
SUMMARY_FILE = 'eval-summary'
EVAL_LOG = 'eval.log'
PREDICTIONS_FILE = 'predictions.txt'
GOLD_FILE = 'gold.txt'
def conll_eval(gold_batches, predicted_batches, indices, output_file=None):
"""
Run the CoNLL-2003 evaluation script on provided predicted sequences.
:param gold_batches: list of gold label sequences
:param predicted_batches: list of predicted label sequences
:param indices: order of sequences
:param output_file: optional output file name for predictions
:return: tuple of (overall F-score, script_output)
"""
def get_lines():
for gold_seq, predicted_seq, index in sorted(zip(gold_batches, predicted_batches, indices), key=lambda k: k[2]):
for i, (label, prediction) in enumerate(zip(gold_seq, predicted_seq)):
if label in [BERT_SUBLABEL, BERT_CLS, BERT_SEP]:
continue
if prediction == BERT_SUBLABEL:
prediction = 'O'
res = "_ {} {}".format(label, prediction)
yield res
yield "" # sentence break
if output_file:
with file_io.FileIO(output_file, 'w') as output:
for line in get_lines():
output.write(line + '\n')
result = conll_eval_lines(get_lines(), raw=True).to_conll_output()
return float(re.split('\\s+', re.split('\n', result)[1].strip())[7]), result
def conll_srl_eval(gold_batches, predicted_batches, markers, ids):
"""
Run the CoNLL-2005 evaluation script on provided predicted sequences.
:param gold_batches: list of gold label sequences
:param predicted_batches: list of predicted label sequences
:param markers: list of predicate marker sequences
:param ids: list of sentence indices
:return: tuple of (overall F-score, script_output, confusion_matrix)
"""
gold_props = _convert_to_sentences(labels=gold_batches, pred_indices=markers, sentence_ids=ids)
pred_props = _convert_to_sentences(labels=predicted_batches, pred_indices=markers, sentence_ids=ids)
return evaluate(gold_props, pred_props)
def _convert_to_sentences(labels: List[Iterable[str]],
pred_indices: List[int],
sentence_ids: List[int]) -> List[Dict[int, List[str]]]:
sentences = []
for predicates, props_by_predicate in _get_predicates_and_props(labels, pred_indices, sentence_ids):
current_sentence = defaultdict(list)
props = [v for k, v in sorted(props_by_predicate.items(), key=lambda x: x[0])]
for tok, predicate in enumerate(predicates):
current_sentence[0].append(predicate)
for i, prop in enumerate(props):
current_sentence[i + 1].append(prop[tok])
sentences.append(current_sentence)
return sentences
def write_props_to_file(output_file,
labels: List[Iterable[str]],
markers: List[int],
sentence_ids: List[int]):
"""
Write PropBank predictions to a file.
:param output_file: output file
:param labels: lists of labels
:param markers: predicate markers
:param sentence_ids: sentence indices
"""
with file_io.FileIO(output_file, 'w') as output_file:
for predicates, props_by_predicate in _get_predicates_and_props(labels, markers, sentence_ids):
# sorting to ensure proposition columns are in correct order (by appearance of predicate in sentence)
prop_list = [arg for _, arg in sorted(props_by_predicate.items(), key=lambda item: item[0])]
line = ''
for tok, predicate in enumerate(predicates):
line += '%s %s\n' % (predicate, ' '.join([prop[tok] for prop in prop_list]))
output_file.write(line + '\n')
def _get_predicates_and_props(labels: List[Iterable[str]],
pred_indices: List[int],
sentence_ids: List[int]) -> Iterable[Tuple[Iterable[str], Dict[int, List[str]]]]:
prev_sent_idx = -1 # previous sentence's index
predicates = [] # list of '-' or 'x', with one per token ('x' indicates the token is a predicate)
props_by_predicate = {} # dict from predicate indices to list of predicted or gold argument labels (1 per token)
for labels, pred_idx, curr_sent_idx in sorted(zip(labels, pred_indices, sentence_ids), key=lambda x: x[2]):
filtered_labels = []
for label in labels:
if label == BERT_SUBLABEL:
continue
filtered_labels.append(label)
if prev_sent_idx != curr_sent_idx: # either first sentence, or a new sentence
prev_sent_idx = curr_sent_idx
if predicates:
yield predicates, props_by_predicate
predicates = ["-"] * len(filtered_labels)
props_by_predicate = {}
predicates[pred_idx] = 'x' # official eval script requires predicate to be a character other than '-'
props_by_predicate[pred_idx] = chunk(filtered_labels, conll=True) # assign SRL labels for this predicate
if predicates:
yield predicates, props_by_predicate
def append_prediction_output(identifier, header, line, detailed, output_path, confusions=None):
output_dir = os.path.dirname(output_path)
summary_file = os.path.join(output_dir, '%s.%s.tsv' % (SUMMARY_FILE, identifier))
eval_log = os.path.join(output_dir, EVAL_LOG)
exists = tf.gfile.Exists(summary_file) and tf.gfile.Exists(eval_log)
if not exists:
with file_io.FileIO(summary_file, 'w') as summary:
summary.write(header)
summary.write('\n')
with file_io.FileIO(eval_log, 'w') as log:
log.write('%s\n\n' % output_dir)
with file_io.FileIO(summary_file, 'a') as summary:
summary.write(line)
summary.write('\n')
with file_io.FileIO(eval_log, 'a') as log:
log.write('\nID: %s\n' % output_path)
log.write(str(detailed) + '\n')
if confusions:
log.write('\n%s\n\n' % str(confusions))
def get_parse_prediction(arc_prob_matrix, rel_prob_tensor, rel_feat=None):
arc_preds = nonprojective(arc_prob_matrix)
arc_preds_one_hot = np.zeros([rel_prob_tensor.shape[0], rel_prob_tensor.shape[2]])
arc_preds_one_hot[np.arange(len(arc_preds)), arc_preds] = 1.
rel_preds = np.argmax(np.einsum('nrb,nb->nr', rel_prob_tensor, arc_preds_one_hot), axis=1)
if rel_feat:
rel_preds = [rel_feat.index_to_feat(rel) for rel in rel_preds]
return arc_preds, rel_preds
def to_conllx_line(index, word, arc_pred, rel_pred):
# ID FORM LEMMA CPOS POS FEAT HEAD DEPREL PHEAD PDEPREL
fields = ['_'] * 10
fields[0] = str(index + 1)
fields[1] = word
fields[2] = word
fields[6] = str(arc_pred)
fields[7] = rel_pred
return fields
def to_conll09_line(index, word, arc_pred, rel_pred):
# ID FORM LEMMA PLEMMA POS PPOS FEAT PFEAT HEAD PHEAD DEPREL PDEPREL FILLPRED PRED APREDs
fields = ['_'] * 15
fields[0] = str(index + 1)
fields[1] = word
fields[2] = word
fields[8] = str(arc_pred)
fields[9] = str(arc_pred)
fields[10] = rel_pred
fields[11] = rel_pred
return fields
def write_parse_result_to_file(sentence_heads, sentence_rels, file, line_func=to_conllx_line, words=None):
if not words or len(words) == 0:
words = ['x'] * (len(sentence_rels) - 1)
for index, (word, arc_pred, rel_pred) in enumerate(zip(words, sentence_heads[1:], sentence_rels[1:])):
fields = line_func(index, word, arc_pred, rel_pred)
file.write('\t'.join(fields) + '\n')
file.write('\n')
def log_trainable_variables():
"""
Log every trainable variable name and shape and return the total number of trainable variables.
:return: total number of trainable variables
"""
all_weights = {variable.name: variable for variable in trainable_variables()}
total_size = 0
weights = []
for variable_name in sorted(list(all_weights)):
variable = all_weights[variable_name]
weights.append("%s\tshape %s" % (variable.name[:-2].ljust(80), str(variable.shape).ljust(20)))
variable_size = int(np.prod(np.array(variable.shape.as_list())))
total_size += variable_size
weights.append("Total trainable variables size: %d" % total_size)
logging.log_first_n(logging.INFO, "Trainable variables:\n%s\n", 1, '\n'.join(weights))
return total_size
```
#### File: tfnlp/common/parsing.py
```python
import numpy as np
def find_cycles(edges):
vertices = np.arange(len(edges))
indices = np.zeros_like(vertices) - 1
lowlinks = np.zeros_like(vertices) - 1
stack = []
onstack = np.zeros_like(vertices, dtype=np.bool)
current_index = 0
cycles = []
# ===========================================================================
def strong_connect(_vertex, _current_index):
indices[_vertex] = _current_index
lowlinks[_vertex] = _current_index
stack.append(_vertex)
_current_index += 1
onstack[_vertex] = True
for vertex_ in np.where(edges == _vertex)[0]:
if indices[vertex_] == -1:
_current_index = strong_connect(vertex_, _current_index)
lowlinks[_vertex] = min(lowlinks[_vertex], lowlinks[vertex_])
elif onstack[vertex_]:
lowlinks[_vertex] = min(lowlinks[_vertex], indices[vertex_])
if lowlinks[_vertex] == indices[_vertex]:
cycle = []
vertex_ = -1
while vertex_ != _vertex:
vertex_ = stack.pop()
onstack[vertex_] = False
cycle.append(vertex_)
if len(cycle) > 1:
cycles.append(np.array(cycle))
return _current_index
# ===========================================================================
for vertex in vertices:
if indices[vertex] == -1:
current_index = strong_connect(vertex, current_index)
return cycles
def find_roots(edges):
return np.where(edges[1:] == 0)[0] + 1
def make_root(probs, root):
probs = np.array(probs)
probs[1:, 0] = 0
probs[root, :] = 0
probs[root, 0] = 1
probs /= np.sum(probs, axis=1, keepdims=True)
return probs
def greedy(probs):
edges = np.argmax(probs, axis=1)
cycles = True
while cycles:
cycles = find_cycles(edges)
for cycle_vertices in cycles:
# Get the best heads and their probabilities
cycle_edges = edges[cycle_vertices]
cycle_probs = probs[cycle_vertices, cycle_edges]
# Get the second-best edges and their probabilities
probs[cycle_vertices, cycle_edges] = 0
backoff_edges = np.argmax(probs[cycle_vertices], axis=1)
backoff_probs = probs[cycle_vertices, backoff_edges]
probs[cycle_vertices, cycle_edges] = cycle_probs
# Find the node in the cycle that the model is the least confident about and its probability
new_root_in_cycle = np.argmax(backoff_probs / cycle_probs)
new_cycle_root = cycle_vertices[new_root_in_cycle]
# Set the new root
# noinspection PyUnresolvedReferences
probs[new_cycle_root, cycle_edges[new_root_in_cycle]] = 0
edges[new_cycle_root] = backoff_edges[new_root_in_cycle]
return edges
def score_edges(probs, edges):
return np.sum(np.log(probs[np.arange(1, len(probs)), edges[1:]]))
def nonprojective(probs):
probs *= 1 - np.eye(len(probs)).astype(np.float32)
# ensure head/dummy token is assigned itself as a head
probs[0] = 0
probs[0, 0] = 1
probs /= np.sum(probs, axis=1, keepdims=True)
edges = greedy(probs)
roots = find_roots(edges)
best_edges = edges
best_score = -np.inf
if len(roots) > 1:
for root in roots:
_probs = make_root(probs, root)
_edges = greedy(_probs)
score = score_edges(_probs, _edges)
if score > best_score:
best_edges = _edges
best_score = score
return best_edges
```
#### File: tfnlp/common/utils.py
```python
import json
import os
import pickle
from tensorflow.python.lib.io import file_io
class Params(dict):
def __init__(self, **kwargs):
super(Params).__init__()
for key, val in kwargs.items():
setattr(self, key, val)
self[key] = val
def __setattr__(self, key, value):
super().__setattr__(key, value)
self[key] = value
def convert_to_attributes(dictionary):
for key, val in dictionary.items():
if isinstance(val, dict):
dictionary[key] = convert_to_attributes(val)
elif isinstance(val, list):
result = []
for entry in val:
if isinstance(entry, dict):
result.append(convert_to_attributes(entry))
else:
result.append(entry)
dictionary[key] = result
return Params(**dictionary)
def read_jsons(json_string):
"""
Read a JSON string as an attribute dictionary--all dicts are recursively converted to attribute dictionaries.
:param json_string: JSON string
:return: attribute dictionary for input JSON
"""
json_dict = json.loads(json_string)
return convert_to_attributes(json_dict)
def read_json(json_path, as_params=True):
"""
Read a JSON file as an attribute dictionary--all dicts are recursively converted to attribute dictionaries.
:param json_path: path to JSON file
:param as_params: convert result into a `Params` object with key values accessible through attributes
:return: attribute dictionary for input JSON
"""
with file_io.FileIO(json_path, 'r') as lines:
json_dict = json.load(lines)
if not as_params:
return json_dict
return convert_to_attributes(json_dict)
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
def write_json(value, json_path):
with file_io.FileIO(json_path, 'w') as json_out:
json_out.write(json.dumps(value, indent=4, sort_keys=True, default=set_default))
def serialize(serializable, out_path, out_name=None):
if out_name:
out_name = out_name if out_name.endswith(".pkl") else "{}.pkl".format(out_name)
path = os.path.join(out_path, out_name) if out_name else out_path
parent_path = os.path.abspath(os.path.join(path, os.path.pardir))
try:
os.makedirs(parent_path)
except OSError:
if not os.path.isdir(parent_path):
raise
if os.path.exists(path):
raise AssertionError("Pre-existing vocabulary file at %s" % path)
with file_io.FileIO(path, mode="wb") as out_file:
pickle.dump(serializable, out_file)
def deserialize(in_path, in_name=None):
if in_name:
in_name = in_name if in_name.endswith(".pkl") else "{}.pkl".format(in_name)
path = os.path.join(in_path, in_name) if in_name else in_path
with file_io.FileIO(path, mode="rb") as in_file:
return pickle.load(in_file)
def binary_np_array_to_unicode(np_string_array):
if isinstance(np_string_array, list):
return np_string_array
return [bstr.decode('utf-8') for bstr in np_string_array.tolist()]
```
#### File: tfnlp/layers/reduce.py
```python
import math
import tensorflow as tf
class ReduceFunc(object):
def apply(self, tensor):
pass
class Mean(ReduceFunc):
def apply(self, tensor):
return tf.reduce_mean(tensor, axis=-2) # reduce along sequence dimension
class ConvNet(ReduceFunc):
def __init__(self, input_size, kernel_size, num_filters, max_length):
"""
Initialize 1D CNN with max-over-time pooling reduction op.
:param input_size: input/channel dimensionality
:param kernel_size: size of 1D window, length of first dimension of each filter
:param num_filters: number of [kernel_size, num_channels] filters, the dimensionality of output
:param max_length: maximum length of 3rd dimension of input tensors
"""
super(ConvNet, self).__init__()
self.input_size = input_size
self.kernel_size = kernel_size
self.filters = num_filters
self.sequence_length = max_length
def apply(self, tensor):
return self.max_over_time_pooling_cnn(tensor=tensor, input_size=self.input_size,
sequence_length=self.sequence_length,
num_filters=self.filters, kernel_size=self.kernel_size)
@staticmethod
def max_over_time_pooling_cnn(tensor, input_size, num_filters, kernel_size, sequence_length=None):
"""
Return a 1D CNN with max-over-time pooling.
:param input_size: channel dimensionality
:param tensor: 4D input tensor: [batch_size, time_steps, sequence_length, num_channels]
:param num_filters: number of [kernel_size, num_channels] filters, the dimensionality of output
:param kernel_size: size of 1D window, length of first dimension of each filter
:param sequence_length: number of time steps (3rd dimension of input tensor)
:return: 3D tensor [batch_size, time_steps, filters]
"""
shape = tf.shape(tensor)
if tensor.shape.ndims == 4:
flatten = True
elif tensor.shape.ndims == 3:
flatten = False
else:
raise ValueError('Expecting 3 or 4-dimensional Tensor as input, got %s dims' % tensor.shape.ndims)
# flatten sequences for input
if flatten:
tensor = tf.reshape(tensor, shape=[-1, sequence_length, input_size])
limit = math.sqrt(3.0 / num_filters)
initializer = tf.random_uniform_initializer(-limit, limit)
tensor = tf.layers.conv1d(tensor, filters=num_filters, kernel_size=kernel_size, activation=tf.nn.relu,
kernel_initializer=initializer)
if flatten:
tensor = tf.layers.max_pooling1d(tensor, pool_size=sequence_length - kernel_size + 1, strides=1)
tensor = tf.reshape(tensor, shape=[-1, shape[1], num_filters])
else:
tensor = tf.reduce_max(tensor, axis=1)
return tensor
```
#### File: tfnlp/model/model.py
```python
from collections import OrderedDict
import tensorflow as tf
import tensorflow_estimator as tfe
from tensorflow.python.estimator.export.export_output import PredictOutput
from tensorflow.python.saved_model import signature_constants
from tfnlp.common import constants
from tfnlp.common.config import train_op_from_config
from tfnlp.common.eval import log_trainable_variables
from tfnlp.common.training_utils import assign_ema_weights
from tfnlp.layers.heads import ClassifierHead, TaggerHead, TokenClassifierHead, BiaffineSrlHead
from tfnlp.layers.layers import encoder, embedding, get_embedding_input
from tfnlp.model.parser import ParserHead
def build(features, mode, params):
config = params.config
training = mode == tfe.estimator.ModeKeys.TRAIN
encoder_configs = {enc.name: enc for enc in config.encoders}
head_configs = {head.name: head for head in config.heads}
inputs = {feat: embedding(features, feat_conf, training) for feat, feat_conf in params.extractor.features.items()}
heads = {}
encoders = {}
def get_head(_head_config):
if _head_config.name in heads:
return heads[_head_config.name]
head_encoder = get_encoder(encoder_configs[_head_config.encoder])
head = model_head(_head_config, head_encoder, features, mode, params)
heads[_head_config.name] = head
return head
def get_encoder(_encoder_config):
if _encoder_config.name in encoders:
return encoders[_encoder_config.name]
# build encoder recursively
encoder_features = OrderedDict()
for encoder_input in _encoder_config.inputs:
if encoder_input in inputs:
# input from embedding/feature input
encoder_features[encoder_input] = inputs[encoder_input]
elif encoder_input in encoder_configs:
# input from another encoder
encoder_config = encoder_configs[encoder_input]
encoder_features[encoder_input] = get_encoder(encoder_config)
elif encoder_input in head_configs:
# input from a model head
head_config = head_configs[encoder_input]
head = get_head(head_config)
weights = None
if training and head_config.teacher_forcing:
predictions = features[head.name]
else:
if head_config.weighted_embedding:
weights = head.scores
predictions = head.predictions
encoder_features[encoder_input] = get_embedding_input(predictions, head.extractor, training, weights=weights)
else:
raise ValueError('Missing encoder input: %s' % encoder_input)
result = encoder(features, list(encoder_features.values()), mode, _encoder_config)
encoders[_encoder_config.name] = result
return result
return [get_head(head) for head in config.heads]
def multi_head_model_fn(features, mode, params):
config = params.config
heads = build(features, mode, params)
# combine losses
loss = None
if mode in [tfe.estimator.ModeKeys.TRAIN, tfe.estimator.ModeKeys.EVAL]:
# compute loss for each target
losses = [head.weight * head.loss for head in heads]
# just compute mean over losses (possibly consider a more sophisticated strategy?)
loss = losses[0] if len(losses) == 1 else tf.reduce_mean(tf.stack(losses))
dependencies = []
# optionally setup exponential moving average of parameters
if config.ema_decay > 0:
dependencies.append(_exponential_moving_average_op(mode, config.ema_decay))
else:
dependencies.append(tf.no_op())
with tf.control_dependencies(dependencies):
# make sure we have properly assigned averaged variables if we are evaluating
if mode == tfe.estimator.ModeKeys.TRAIN:
log_trainable_variables()
train_op = train_op_from_config(config, loss)
return tfe.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# EVAL/PREDICT -----------------------------------------------------------------------------------------------------------
# combine predictions
predictions = {}
if mode in [tfe.estimator.ModeKeys.EVAL, tfe.estimator.ModeKeys.PREDICT]:
for head in heads:
predictions[head.name] = head.predictions
# combine evaluation hooks and metrics
eval_metric_ops = {}
evaluation_hooks = []
if mode == tfe.estimator.ModeKeys.EVAL:
for head in heads:
eval_metric_ops.update(head.metric_ops)
evaluation_hooks.extend(head.evaluation_hooks)
# combine export outputs
export_outputs = None
if mode == tfe.estimator.ModeKeys.PREDICT:
export_outputs = {}
combined_outputs = {}
for head in heads:
export_outputs[head.name] = PredictOutput(head.export_outputs)
combined_outputs.update(head.export_outputs)
# combined signature with all relevant outputs
export_outputs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = PredictOutput(combined_outputs)
return tfe.estimator.EstimatorSpec(mode=mode,
predictions=predictions,
loss=loss,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs,
evaluation_hooks=evaluation_hooks)
def model_head(config, inputs, features, mode, params):
"""
Initialize a model head from a given configuration.
:param config: head configuration
:param inputs: output from encoder (e.g. biLSTM), input to head
:param features: all model inputs
:param mode: Estimator mode type (TRAIN, EVAL, or PREDICT)
:param params: HParams input to Estimator
:return: initialized model head
"""
heads = {
constants.CLASSIFIER_KEY: ClassifierHead,
constants.TAGGER_KEY: TaggerHead,
constants.NER_KEY: TaggerHead,
constants.SRL_KEY: TaggerHead,
constants.BIAFFINE_SRL_KEY: BiaffineSrlHead,
constants.TOKEN_CLASSIFIER_KEY: TokenClassifierHead,
constants.PARSER_KEY: ParserHead
}
if config.type not in heads:
raise AssertionError('Unsupported head type: %s' % config.type)
head = heads[config.type](inputs=inputs, config=config, features=features, params=params,
training=mode == tfe.estimator.ModeKeys.TRAIN)
if mode == tfe.estimator.ModeKeys.TRAIN:
head.training()
elif mode == tfe.estimator.ModeKeys.EVAL:
head.evaluation()
elif mode == tfe.estimator.ModeKeys.PREDICT:
head.prediction()
return head
def _exponential_moving_average_op(mode, ema_decay):
ema = tf.train.ExponentialMovingAverage(ema_decay, num_updates=tf.train.get_global_step(), zero_debias=True)
ema_op = ema.apply(tf.trainable_variables())
tf.logging.debug("Using EMA for variables: %s" % str([v.name for v in tf.trainable_variables()]))
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, ema_op)
# only use EMA averages when evaluating
ema_dep = tf.cond(tf.equal(mode, tfe.estimator.ModeKeys.TRAIN),
lambda: tf.no_op(),
lambda: assign_ema_weights(ema))
return ema_dep
```
#### File: tfnlp/model/parser.py
```python
import os
import tensorflow as tf
from tensorflow.compat.v1 import Variable
import tfnlp.common.constants as constants
from tfnlp.cli.evaluators import DepParserEvaluator
from tfnlp.common.config import append_label
from tfnlp.common.eval_hooks import ParserEvalHook
from tfnlp.layers.heads import ModelHead
from tfnlp.layers.layers import get_encoder_input
from tfnlp.layers.util import select_logits, bilinear, get_shape, mlp
class ParserHead(ModelHead):
def __init__(self, inputs, config, features, params, training):
super().__init__(inputs, config, features, params, training)
self.arc_predictions = None
self.arc_logits = None
self.rel_logits = None
self.n_steps = None
self.arc_targets = None
self.mask = None
self.arc_probs = None
self.rel_probs = None
self.predictions = None
self.lens = self.features[constants.LENGTH_KEY] + 1 # plus one for sentinel
def _all(self):
inputs = get_encoder_input(self.inputs)
input_shape = get_shape(inputs) # (b x n x d), d == output_size
self.n_steps = input_shape[1] # n
# apply 2 arc and 2 rel MLPs to each output vector (1 for representing dependents, 1 for heads)
def _mlp(size, name):
return mlp(inputs, input_shape, self.config.mlp_dropout, size, self._training, name, n_splits=2)
dep_arc_mlp, head_arc_mlp = _mlp(self.config.arc_mlp_size, name="arc_mlp") # (bn x d), where d == arc_mlp_size
dep_rel_mlp, head_rel_mlp = _mlp(self.config.rel_mlp_size, name="rel_mlp") # (bn x d), where d == rel_mlp_size
# apply binary biaffine classifier for arcs
with tf.variable_scope("arc_bilinear_logits"):
self.arc_logits = bilinear(dep_arc_mlp, head_arc_mlp, 1, self.n_steps, include_bias2=False) # (b x n x n)
self.arc_predictions = tf.argmax(self.arc_logits, axis=-1) # (b x n)
# apply variable class biaffine classifier for rels
with tf.variable_scope("rel_bilinear_logits"):
num_labels = self.extractor.vocab_size() # r
self.rel_logits = bilinear(dep_rel_mlp, head_rel_mlp, num_labels, self.n_steps) # (b x n x r x n)
def _train_eval(self):
self.mask = tf.sequence_mask(self.lens, name="padding_mask")
# compute combined arc and rel losses (both via softmax cross entropy)
def compute_loss(logits, targets, name):
with tf.variable_scope(name):
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets)
losses = tf.boolean_mask(losses, self.mask)
return tf.reduce_mean(losses)
self.arc_targets = tf.identity(self.features[constants.HEAD_KEY], name=constants.HEAD_KEY)
arc_loss = compute_loss(self.arc_logits, self.arc_targets, "arc_bilinear_loss")
_rel_logits = select_logits(self.rel_logits, self.arc_targets, self.n_steps)
rel_loss = compute_loss(_rel_logits, self.targets, "rel_bilinear_loss")
arc_loss = self.config.get('arc_loss_weight', 1) * arc_loss
rel_loss = self.config.get('rel_loss_weight', 1) * rel_loss
self.loss = arc_loss + rel_loss
self.metric = Variable(0, name=append_label(constants.OVERALL_KEY, self.name), dtype=tf.float32, trainable=False)
def _eval_predict(self):
# compute relations, and arc/prob probabilities for use in MST algorithm
self.arc_probs = tf.nn.softmax(self.arc_logits) # (b x n)
self.rel_probs = tf.nn.softmax(self.rel_logits, axis=2) # (b x n x r x n)
_rel_logits = select_logits(self.rel_logits, self.arc_predictions, self.n_steps) # (b x n x r)
self.predictions = tf.argmax(_rel_logits, axis=-1) # (b x n)
def _evaluation(self):
# compute metrics, such as UAS, LAS, and LA
arc_correct = tf.boolean_mask(tf.to_int32(tf.equal(self.arc_predictions[:, 1:], self.arc_targets[:, 1:])),
self.mask[:, 1:])
rel_correct = tf.boolean_mask(tf.to_int32(tf.equal(self.predictions[:, 1:], self.targets[:, 1:])),
self.mask[:, 1:])
n_arc_correct = tf.cast(tf.reduce_sum(arc_correct), tf.int32)
n_rel_correct = tf.cast(tf.reduce_sum(rel_correct), tf.int32)
correct = arc_correct * rel_correct
n_correct = tf.cast(tf.reduce_sum(correct), tf.int32)
n_tokens = tf.cast(tf.reduce_sum(self.lens - 1), tf.int32) # minus 1 for sentinel
self.metric_ops = {
constants.UNLABELED_ATTACHMENT_SCORE: tf.metrics.mean(n_arc_correct / n_tokens),
constants.LABEL_SCORE: tf.metrics.mean(n_rel_correct / n_tokens),
constants.LABELED_ATTACHMENT_SCORE: tf.metrics.mean(n_correct / n_tokens),
}
overall_score = tf.identity(self.metric)
self.metric_ops[append_label(constants.OVERALL_KEY, self.name)] = (overall_score, overall_score)
overall_key = append_label(constants.OVERALL_KEY, self.name)
# https://github.com/tensorflow/tensorflow/issues/20418 -- metrics don't accept variables, so we create a tensor
eval_placeholder = tf.placeholder(dtype=tf.float32, name='update_%s' % overall_key)
self.evaluation_hooks = []
hook = ParserEvalHook(
{
constants.ARC_PROBS: self.arc_probs,
constants.REL_PROBS: self.rel_probs,
constants.LENGTH_KEY: self.lens, # plus one for the sentinel
constants.HEAD_KEY: self.features[constants.HEAD_KEY],
constants.DEPREL_KEY: self.features[constants.DEPREL_KEY],
constants.SENTENCE_INDEX: self.features[constants.SENTENCE_INDEX]
},
evaluator=DepParserEvaluator(
target=self.extractor,
output_path=os.path.join(self.params.job_dir, self.name + '.dev'),
script_path=self.params.script_path
),
eval_update=tf.assign(self.metric, eval_placeholder),
eval_placeholder=eval_placeholder,
output_dir=self.params.job_dir
)
self.evaluation_hooks.append(hook)
def _prediction(self):
self.export_outputs = {constants.REL_PROBS: self.rel_probs,
constants.ARC_PROBS: self.arc_probs}
``` |
{
"source": "jgunstone/xlsxtemplater",
"score": 3
} |
#### File: xlsxtemplater/xlsxtemplater/templater.py
```python
import pandas as pd
import os
import copy
import subprocess
from dataclasses import asdict
from xlsxtemplater.utils import open_file, jobno_fromdir, get_user, date
from xlsxtemplater.templaterdefs import *
def create_meta(fpth):
di = {}
di['JobNo'] = jobno_fromdir(fpth)
di['Date'] = date()
di['Author'] = get_user()
return di
def create_readme(toexcel: ToExcel) -> SheetObj:
"""
creates a readme dataframe from the metadata in the dataobject definitions
"""
def notes_from_sheet(sheet: SheetObj):
di = {
'sheet_name':sheet.sheet_name,
'xlsx_params':str(type(sheet.xlsx_params)),
'xlsx_exporter': str(sheet.xlsx_exporter)
}
di.update(sheet.notes)
return di
li = [notes_from_sheet(sheet) for sheet in toexcel.sheets]
df = pd.DataFrame.from_records(li).set_index('sheet_name')
df = df.reset_index()
di = {
'sheet_name': 'readme',
'xlsx_exporter': df_to_sheet_table,
'xlsx_params': params_readme(df),
'df': df,
}
readme = from_dict(data_class=SheetObj,data=di)
return readme
def create_sheet_objs(data_object, fpth) -> ToExcel:
'''
pass a dataobject and return a ToExcel objects
this function interprests the user input and tidies into the correct format.
'''
def default(df, counter):
di_tmp = {
'sheet_name': 'Sheet{0}'.format(counter),
#'xlsx_exporter': df_to_sheet_table,
#'xlsx_params': None,
'df': df,
}
counter += 1
return di_tmp, counter
def add_notes(di, fpth):
if 'notes' not in di.keys():
di['notes'] = {}
di['notes'].update(create_meta(fpth))
return di
counter = 1
lidi = []
if type(data_object) == pd.DataFrame:
# then export the DataFrame with the default exporter (i.e. as a table to sheet_name = Sheet1)
di, counter = default(data_object, counter)
di = add_notes(di)
lidi.append(di)
if type(data_object) == list:
# then iterate through the list. 1no sheet / item in list
for l in data_object:
if type(l) == pd.DataFrame:
# then export the DataFrame with the default exporter (i.e. as a table to sheet_name = Sheet#)
di, counter = default(l, counter)
di = add_notes(di, fpth)
lidi.append(di)
elif type(l) == dict:
# then export the DataFrame with the exporter defined by the dict
l = add_notes(l, fpth)
#l = add_defaults(l)
lidi.append(l)
else:
print('you need to pass a list of dataframes or dicts for this function to work')
if type(data_object) == dict:
data_object = add_notes(data_object, fpth)
lidi.append(data_object)
sheets = [from_dict(data_class=SheetObj,data=l) for l in lidi] # defaults are added here if not previously specified
toexcel = ToExcel(sheets=sheets)
return toexcel
def object_to_excel(toexcel: ToExcel, fpth: str, file_properties: FileProperties):
"""
Args:
toexcel: ToExcel object
fpth:
file_properties: FileProperties object
Returns:
fpth
"""
# initiate xlsxwriter
writer = pd.ExcelWriter(fpth, engine='xlsxwriter')
workbook = writer.book
for sheet in toexcel.sheets:
sheet.xlsx_exporter(sheet.df, writer, workbook, sheet.sheet_name, sheet.xlsx_params)
workbook.set_properties(asdict(file_properties))
# save and close the workbook
writer.save()
return fpth
def to_excel(data_object,
fpth,
file_properties=None: FileProperties,
openfile=True: bool,
make_readme=True: bool) -> str:
"""
function to output dataobject (list of dicts of dataframes and associated metadata)
to excel in nicely formatted tables.
Args:
data_object (list of dicts): gets converted to a templaterdefs.ToExcel object, which is a list
of templaterdefs.SheetObj's. any dict keys not in SheetObj definition will be ignored.
min required is [{'df':df}]
fpth (str filepath): of xlsx output
file_properties: FileProperties obj defining metadata
openfile: bool
make_readme: creates a readme header sheet. default to true. avoid changing unless
necessary as it is required for the from_excel command.
Returns:
fpth: of output excel file
Example:
# a vanilla example
df = pd.DataFrame.from_dict({'col1':[0,1],'col2':[1,2]})
li = [{
'sheet_name': 'df',
#'xlsx_exporter': sheet_table, # don't pass xlsx_exporter to get default
#'xlsx_params': params_ifctemplate(), # don't pass xlsx_params to get default
'df': df,
'notes':{
'a note': 'a note',
'how many notes?':'as many as you like',
'what types?':'numbers and strings only',
'how are they shown?':'as fields in the readme sheet'
}
}]
to_excel(li, fpth, openfile=True,make_readme=True)
"""
toexcel = create_sheet_objs(data_object, fpth)
if make_readme:
readme = create_readme(toexcel) # get sheet meta data
# create metadata to make the readme worksheet
toexcel.sheets.insert(0, readme)
if file_properties is None:
file_properties = FileProperties()
object_to_excel(toexcel, fpth, file_properties)
if openfile:
open_file(fpth)
return fpth
# TODO: make to_json function that outputs the same data to a json file.
if __name__ == '__main__':
if __debug__ == True:
fdir = os.path.join('test_data')
fpth = os.path.join(fdir,'bsDataDictionary_Psets.xlsx')
df = pd.read_excel(fpth)
#fpth = wdir + '\\' + 'bsDataDictionary_Psets-processed.xlsx'
#df1 = pd.read_excel(fpth,sheet_name='1_PropertySets')
di = {
'sheet_name': 'IfcProductDataTemplate',
'xlsx_exporter': df_to_sheet_table,
'xlsx_params': params_ifctemplate(),
'df': df,
}
li = [di]
fpth = os.path.join(fdir,'bsDataDictionary_Psets-out.xlsx')
to_excel(li, fpth, openfile=False)
print('{} --> written to excel'.format(fpth))
from utils import from_excel
li = from_excel(fpth)
if type(li) is not None:
print('{} --> read from excel'.format(fpth))
``` |
{
"source": "jgurhem/Graph_Generator",
"score": 3
} |
#### File: graph_help/colorschemes/DarkColorScheme.py
```python
from .DefaultColorScheme import DefaultColorScheme
class DarkColorScheme(DefaultColorScheme):
def __init__(self):
self.colors = dict()
self.colors['background'] = 'black'
self.colors['edge'] = 'white'
self.colors['fontcolor'] = 'black'
self.colors['initv'] = 'grey65'
self.colors['initm'] = 'grey65'
self.colors['inv'] = 'red'
self.colors['pmv'] = 'magenta'
self.colors['pmm1'] = 'blue'
self.colors['pmm2'] = 'blue'
self.colors['pmm_d'] = 'darkgreen'
self.colors['pmv_d'] = 'darkolivegreen3'
self.colors['sls'] = 'cyan3'
```
#### File: Graph_Generator/graph_help/Graph_dot.py
```python
from .Node import AbstractNode
from .AbstractGraph import AbstractGraph
from . import colorschemes as cs
import pygraphviz as pgv
class Graph(AbstractGraph):
def __init__(self, gname):
self.G = pgv.AGraph(strict=False, directed=True)
self.gname=gname
self.fontsize = 10
self.colorscheme = cs.DefaultColorScheme.DefaultColorScheme()
def set_fontsize(self, fontsize):
self.fontsize = fontsize
def set_colorscheme(self, colorscheme):
if colorscheme == 'dark':
self.colorscheme = cs.DarkColorScheme.DarkColorScheme()
else:
self.colorscheme = cs.DefaultColorScheme.DefaultColorScheme()
self.G.graph_attr.update(bgcolor = self.colorscheme['background'])
def graph_print(self):
print(self.G.string())
def graph_write(self):
self.G.write(self.gname + ".dot")
def __add_node(self, node, op):
self.G.add_node(node.get_id(), label=node.get_id(), color=self.colorscheme[op], style='filled', fontcolor=self.colorscheme['fontcolor'], op=op, fontsize=self.fontsize)
def __add_dependency(self, fro, to):
self.G.add_edge(fro.get_id(), to.get_id(), color=self.colorscheme['edge'], penwidth=4, arrowsize=4)
def op_vector_init(self, v):
super(Graph, self).op_vector_init(v)
self.__add_node(v, 'initv')
def op_matrix_init(self, m):
super(Graph, self).op_matrix_init(m)
self.__add_node(m, 'initm')
def op_matrix_inv(self, m_in, m_inv):
super(Graph, self).op_matrix_inv(m_in, m_inv)
self.__add_node(m_inv, 'inv')
self.__add_dependency(m_in, m_inv)
def op_pmv(self, m, v):
super(Graph, self).op_pmv(m, v)
v_incr = v.incr_last_coord()
self.__add_node(v_incr, 'pmv')
self.__add_dependency(v, v_incr)
self.__add_dependency(m, v_incr)
def op_pmm1(self, m1, m2):
"""m1 = m1 * m2"""
super(Graph, self).op_pmm1(m1, m2)
m_incr = m1.incr_last_coord()
self.__add_node(m_incr, 'pmm1')
self.__add_dependency(m1, m_incr)
self.__add_dependency(m2, m_incr)
def op_pmm2(self, m1, m2):
"""m2 = m1 * m2"""
super(Graph, self).op_pmm2(m1, m2)
m_incr = m2.incr_last_coord()
self.__add_node(m_incr, 'pmm2')
self.__add_dependency(m1, m_incr)
self.__add_dependency(m2, m_incr)
def op_pmm_d(self, A, B, C):
"""C = C - A * B"""
super(Graph, self).op_pmm_d(A, B, C)
m_incr = C.incr_last_coord()
self.__add_node(m_incr, 'pmm_d')
self.__add_dependency(A, m_incr)
self.__add_dependency(B, m_incr)
self.__add_dependency(C, m_incr)
def op_pmv_d(self, A, b, c):
"""c = c - A * b"""
super(Graph, self).op_pmv_d(A, b, c)
v_incr = c.incr_last_coord()
self.__add_node(v_incr, 'pmv_d')
self.__add_dependency(A, v_incr)
self.__add_dependency(b, v_incr)
self.__add_dependency(c, v_incr)
def op_sls(self, m, v):
super(Graph, self).op_sls(m, v)
v_incr = v.incr_last_coord()
self.__add_node(v_incr, 'sls')
self.__add_dependency(v, v_incr)
self.__add_dependency(m, v_incr)
def graph_print_dep(self, show=['']):
for n in self.G.nodes():
if not n.attr['op'] in show and show != ['']: continue
print()
print(n + " -- " + n.attr['op'])
for e in self.G.edges(n):
if e[0] == n:
print(" -> " + e[1] + " -- " + self.G.get_node(e[1]).attr['op'])
else:
print(" <- " + e[0] + " -- " + self.G.get_node(e[0]).attr['op'])
```
#### File: Graph_Generator/graph_help/ParseInputArg.py
```python
from optparse import OptionParser
def parse_input_arg():
parser = OptionParser()
parser.add_option("-n", "--n", dest="N", type="int", help="number of blocks", default=4)
parser.add_option("-s", "--s", dest="show", type="str", help="list of operations shown in dependencies, default shows all operations", default="")
parser.add_option("--pdep", "--print_dependencies", dest="pdep", action="store_true", help="print dependencies", default=False)
parser.add_option("--pdot", "--print_dot_graph", dest="pdot", action="store_true", help="print dot graph", default=True)
parser.add_option("--no-pdot", "--no-print_dot_graph", dest="pdot", action="store_false", help="print dot graph", default=True)
parser.add_option("--wdot", "--write_dot_graph", dest="wdot", action="store_true", help="write dot graph in '<graph_name>.dot'", default=False)
parser.add_option("--backend", dest="backend", action="store", help="Supported backends : dot and Pegasus", default="dot", type=str)
parser.add_option("--datasize", dest="datasize", action="store", help="Size of the global matrix", default=16, type=int)
parser.add_option("--fontsize", dest="fontsize", help="Dot graph font size", default=10, type=int)
parser.add_option("--colorscheme", dest="colorscheme", help="Dot graph color scheme", default='default', type=str)
parser.add_option("--bin_dir", dest="bin_dir", action="store", help="Bin directory containing the applications", default="", type=str)
(options, args) = parser.parse_args()
return options
``` |
{
"source": "jgurhem/PJR",
"score": 2
} |
#### File: jgurhem/PJR/best_case.py
```python
import sys
import core.DictHelper as dh
import core.ParseInputArg as pia
import core.JsonToCmd as jtc
parser = pia.Parser()
parser.add_filter()
parser.add_not_show()
parser.add_option('--best_case', action='store', dest="best_case_value", default="time_calc", help='time value used to sort the cases')
parser.add_option('--cmd', action='store_true', dest="to_cmd", default=False, help='print commands to perform the test')
parser.add_option('--add-cmd', action='store_true', dest="add_cmd", default=False, help='print supplementary commands to perform the test')
parser.add_option('-N', '--number_show', action='store', dest="number_values_shown", type=int, default=0, help="show N best cases, default : show all (N=0)")
parser.add_option('--prefix', action='store', dest="prefix", type=str, default="", help="command prefix")
parser.add_option('--suffix', action='store', dest="suffix", type=str, default="", help="command suffix")
in_var = parser.get_options()
def my_key(i):
t = []
t.append(i["lang"])
t.append(float(i["nb_nodes"]))
t.append(float(i[in_var.best_case_value].get_mean()))
i["tmp_key_sort_best_case"] = (i["lang"], i["nb_nodes"])
return t
def isCasePerformed(in_md, case):
for d in in_md:
if (d["lang"], d["nb_nodes"], d["datasize"], d["nb_blocks"]) == (case["lang"], case["nb_nodes"], case["datasize"], case["nb_blocks"]):
return True
return False
input_res = dh.read_json_file(sys.argv[1], in_var.filter_dict, "val")
input_res = sorted(input_res, key = my_key)
in_var.not_show.append("tmp_key_sort_best_case")
old_d = None
counter = dict()
for d in input_res:
new_d = dict()
counter[d["tmp_key_sort_best_case"]] = counter.get(d["tmp_key_sort_best_case"], 0) + 1
if in_var.number_values_shown != 0 and counter.get(d["tmp_key_sort_best_case"], 0) > in_var.number_values_shown: continue
for k, v in d.items():
if k in in_var.not_show: continue
new_d[k] = v
print(new_d)
if in_var.to_cmd:
print(in_var.prefix + jtc.dict_to_cmd(d) + in_var.suffix)
if in_var.add_cmd and old_d !=None and d["tmp_key_sort_best_case"] == old_d["tmp_key_sort_best_case"]:
d_copy = dict()
for k, v in d.items():
d_copy[k] = v
d_copy["nb_blocks"] = str(int((int(d["nb_blocks"]) + int(old_d["nb_blocks"])) / 2))
if not isCasePerformed(input_res, d_copy):
print(in_var.prefix + jtc.dict_to_cmd(d_copy) + in_var.suffix)
old_d = d
```
#### File: PJR/core/ParseInputArg.py
```python
from optparse import OptionParser
class InputError(Exception):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
def add_args_to_dict(option, opt, value, parser):
my_dict = getattr(parser.values, option.dest)
split = value.split(':')
if len(split) != 2:
raise InputError(f'Got "-f {value}" as input value but expect -f filter_key:filter_value1,filter_value2')
my_dict[split[0]] = split[1].split(',')
def get_comma_separated_args(option, opt, value, parser):
setattr(parser.values, option.dest, value.split(','))
class Parser():
def __init__(self):
self.parser = OptionParser()
self.add_option = self.parser.add_option
def add_option_list(self, *args, **kwargs):
self.add_option(*args, type='string', action='callback', callback=get_comma_separated_args, dest=kwargs['dest'], default=list())
def add_filter(self):
self.add_option('-f', '--filter', type='string', action='callback', callback=add_args_to_dict, dest="filter_dict", default=dict())
def add_not_show(self):
self.add_option('-s', '--not_show', type='string', action='callback', callback=get_comma_separated_args, dest = 'not_show', default=list())
def add_dark_background(self):
self.add_option('--dark_background', action='store_true', dest = 'dark_background', default = False)
def get_options(self):
(options, args) = self.parser.parse_args()
return options
```
#### File: PJR/core/Value.py
```python
class Value:
def __init__(self, val):
if isinstance(val, Value):
self.__n = val.__n
self.__sum = val.__sum
self.__min = val.__min
self.__max = val.__max
else:
self.__n = 1
self.__sum = val
self.__min = val
self.__max = val
def __str__(self):
s = '<n=' + str(self.__n)
s += f'; sum= {self.__sum:.3f}'
s += f'; mean= {self.get_mean():.3f}'
s += f'; min= {self.__min:.3f}'
s += f'; max= {self.__max:.3f}'
s += '>'
return s
def __repr__(self):
return self.__str__()
def add(self, val):
if isinstance(val, Value):
self.__n += val.__n
self.__sum += val.__sum
self.__min = min(self.__min, val.__min)
self.__max = max(self.__max, val.__max)
else:
self.__n += 1
self.__sum += val
self.__min = min(self.__min, val)
self.__max = max(self.__max, val)
def get_mean(self):
return self.__sum / self.__n
def get_min(self):
return self.__min
def get_max(self):
return self.__max
def get_val(self):
return self
``` |
{
"source": "jgurhem/TBSLA",
"score": 3
} |
#### File: tools/common/argparse.py
```python
import argparse
def init_parser():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--op", dest="op", help="Operation", type=str, required=True, choices=['spmv', 'a_axpx', 'spmv_no_redist'])
parser.add_argument("--format", dest="format", help="Matrix format", type=str, required=True)
parser.add_argument("--matrixtype", dest="matrixtype", help="Matrix used as input", type=str, required=True)
parser.add_argument("--matrixfolder", dest="matrixfolder", help="Folder containing the matrices", type=str, default=".")
return parser
def init_pagerank():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--matrix_dim", dest="matrix_dim", help="Number of rows and columns in the matrix", type=int, required=True)
parser.add_argument("--GC", dest="GC", help="Number of columns in the process grid", type=int, required=True)
parser.add_argument("--GR", dest="GR", help="Number of rows in the process grid", type=int, required=True)
parser.add_argument("--C", dest="C", help="Number of diagonals", type=int, default=10)
parser.add_argument("--format", dest="format", help="Matrix format", type=str, required=True)
parser.add_argument("--personalized_nodes", dest="personalized_nodes", help="List of personalized nodes in double quotes and separated by spaces", type=str, required=False, default="")
return parser
def init_mpiomp():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--threads", dest="threads", help="Number of threads per MPI process", type=int, required=True)
parser.add_argument("--tpc", dest="tpc", help="Threads per cores", type=int, default=1)
return parser
def init_omp():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--threads", dest="threads", help="Number of threads", type=int, required=True)
parser.add_argument("--tpc", dest="tpc", help="Threads per cores", type=int, default=1)
return parser
def init_yml():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--CPT", dest="CPT", help="Cores per task", type=int, required=True)
parser.add_argument("--BGR", dest="BGR", help="Number of submatrices in the row dimension", type=int, required=True)
parser.add_argument("--BGC", dest="BGC", help="Number of submatrices in the column dimension", type=int, required=True)
parser.add_argument("--LGR", dest="LGR", help="Number of processes for the row dimension in the tasks", type=int, required=True)
parser.add_argument("--LGC", dest="LGC", help="Number of processes for the column dimension in the tasks", type=int, required=True)
parser.add_argument("--compile", dest="compilation", help="Compile app and components before submitting script", action='store_const', default='False', const='True')
return parser
def add_submit():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--nodes", dest="nodes", help="Nodes used", type=int, required=True)
parser.add_argument("--lang", dest="lang", help="Language", type=str, required=True)
parser.add_argument("--wall-time", dest="walltime", help="Wall time in minutes", type=int, default=60)
return parser
def add_common(required=False):
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--resfile", dest="resfile", help="Result file where the performances timings are stored.", type=str, default="results.json", required=required)
parser.add_argument("--machine", dest="machine", help="configuration", type=str, default="_default", required=required)
parser.add_argument("--timeout", dest="timeout", help="Timeout for the run of an application in seconds.", type=int, default=60, required=required)
parser.add_argument("--dry", dest="dry", help="Do not submit the application", action='store_const', default="False", const="True")
return parser
def add_gcgr():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--GC", dest="GC", help="Number of columns in the process grid", type=int, required=True)
parser.add_argument("--GR", dest="GR", help="Number of rows in the process grid", type=int, required=True)
return parser
def add_qs():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--Q", dest="Q", help="Probability of column perturbation with cqmat", type=float, default=0.1)
parser.add_argument("--S", dest="S", help="Seed to generate cqmat", type=int, default=10)
return parser
def add_c():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--NC", dest="NC", help="Number of columns", type=int, required=True)
parser.add_argument("--NR", dest="NR", help="Number of rows", type=int, required=True)
parser.add_argument("--C", dest="C", help="Number of diagonals", type=int, default=10)
return parser
```
#### File: TBSLA/tools/gen_submit_pagerank_cmd.py
```python
import os
import sys
import argparse
import math
Ns = 1
Ne = 4
#N = 32000
N = 800000
#N = 1500000
#N = 3000000
C = 300
machine = 'Poincare'
matrix_format = {'COO', 'CSR', 'ELL', 'SCOO'}
#matrix_format = {'COO', 'CSR', 'ELL', 'SCOO', 'DENSE'}
#matrix_format = {'COO', 'CSR', 'ELL', 'DENSE'}
def decomp(n):
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
i = Ns
while i <= Ne:
print('# nb nodes : ', i)
factors = decomp(i * 16)
for mf in matrix_format:
g2 = 1
for f in factors:
g1 = int(i * 16 / g2)
if g1 == 0: continue
print(f'python tools/submit_pagerank.py --timeout 400 --matrix_dim {N} --format {mf} --nodes {i} --C {C} --machine {machine} --lang MPI --wall-time 1:00:00 --GR {g1} --GC {g2}')
if g1 != g2:
print(f'python tools/submit_pagerank.py --timeout 400 --matrix_dim {N} --format {mf} --nodes {i} --C {C} --machine {machine} --lang MPI --wall-time 1:00:00 --GR {g2} --GC {g1}')
g2 *= f
i *= 2
```
#### File: tools/machine/Ruche.py
```python
from datetime import datetime
import os
LOAD_ENV_BEFORE_SUBMIT=False
def get_cores_per_node(args):
return 40
def get_sockets_per_node(args):
return 1
def get_mpirun(args):
return "srun "
def get_mpirun_options_hpx(args):
return f"-n {args.nodes}"
def get_mpirun_options_mpiomp(args):
return f"-n {int(args.nodes * get_cores_per_node(args) / args.threads)}"
def get_submit_cmd(args):
return "sbatch"
def get_env(args):
env = """
module purge
#module load jemalloc/5.2.1/intel-19.0.3.199
module load gcc/9.2.0/gcc-4.8.5
#module load openmpi/4.0.2/gcc-9.2.0
module load openmpi/3.1.5/gcc-9.2.0
module load cmake/3.16.2/intel-19.0.3.199
module load python/3.7.6/intel-19.0.3.199
export PATH=$PATH:${HOME}/install/tbsla/bin
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${HOME}/install/hpx/2b91d44/release/lib64:${HOME}/install/boost/1.69.0/release/lib:${HOME}/install/hwloc/2.0.4/lib:${HOME}/install/tbsla/lib:${HOME}/install/tbsla/lib64
"""
return env
def get_header(args):
ncores = (get_cores_per_node(args) if args.lang != "HPX" else 1) * args.nodes
header = f"""\
#!/bin/bash
#SBATCH -p cpu_med
#SBATCH --nodes={args.nodes}
#SBATCH --ntasks-per-node={get_cores_per_node(args)}
#SBATCH --exclusive
#SBATCH --output logs/tbsla_%x.%J.out
export TBSLA_LOG_FILE=logs/tbsla_$SLURM_JOB_NAME.$SLURM_JOB_ID.out
"""
header += get_env(args)
header += "\n"
return header
def get_additional_info(args):
dic = dict()
dic['log_file'] = os.environ['TBSLA_LOG_FILE']
return dic
def post_processing(args):
s = ""
return s
def post_run_cmd(args):
s = ""
return s
```
#### File: TBSLA/tools/stats.py
```python
import json
import sys
def get_stats(json_input_file):
stats = dict()
with open(json_input_file) as fp:
for cnt, line in enumerate(fp):
line = line.strip()
if not line.startswith("{"): continue
mydict = json.loads(line)
key = (mydict["lang"], mydict["format"], mydict["nodes"])
if key not in stats:
stats[key] = dict()
if mydict["success"] not in stats[key]:
stats[key][mydict["success"]] = 0
stats[key][mydict["success"]] += 1
return stats
if len(sys.argv) == 2:
r = get_stats(sys.argv[1])
print("(lang, format, nodes)")
for k in sorted(list(r.keys())):
print(k, " -> true : ", r[k].get("true", 0), " false : ",r[k].get("false", 0))
else:
print(sys.argv[0], "json_file_name")
``` |
{
"source": "jgurtowski/ectools",
"score": 3
} |
#### File: jgurtowski/ectools/bases_outsideclr.py
```python
import sys
from itertools import imap
#clear ranges are just file like: {read [tab] clr_start [tab] clr_end}
if not len(sys.argv) == 3:
print "bases_outsideclr.py db.clr in.uncov.regions"
sys.exit(1)
def cdb_line_to_record(line):
arr = line.strip().split()
return (arr[0], (int(arr[1]),int(arr[2])))
cdbfh = open(sys.argv[1])
rfh = open(sys.argv[2])
cdb = dict(imap(cdb_line_to_record , cdbfh))
for line in rfh:
arr = line.strip().split()
rname = arr[0]
if not cdb.has_key(rname):
sys.stderr.write("Error: could not find %s in db\n" % rname)
continue
(clr_start, clr_stop) = cdb[rname]
bases_outside_clr = 0
for uncov_start, uncov_end in imap(lambda x: tuple(map(int,x.split(","))), arr[1:]):
bases_outside_clr += max(0,uncov_end - clr_stop) - max(0,uncov_start-clr_stop)
bases_outside_clr += max(0,clr_start - uncov_start) - max(0,clr_start-uncov_end)
print "\t".join(map(str,[rname,clr_start,clr_stop,bases_outside_clr,"\t".join(arr[1:])]))
cdbfh.close()
rfh.close()
```
#### File: jgurtowski/ectools/log.py
```python
from misc import passFunc
def logger(output_fh):
if not output_fh:
return passFunc
def _log(msg):
'''Logs to output_fh'''
output_fh.write(msg)
output_fh.write("\n")
return _log
``` |
{
"source": "jgurtowski/pbcore_python",
"score": 3
} |
#### File: pbcore_python/tests/test_pbcore_io_FastaIO.py
```python
from nose.tools import assert_equal, assert_true, assert_false
from pbcore import data
from pbcore.io import FastaReader, FastaWriter, FastaRecord
from StringIO import StringIO
class TestFastaRecord:
def setup(self):
self.name = "chr1|blah|blah"
self.sequence = "GATTACA" * 20
self.expected__str__ = \
">chr1|blah|blah\n" \
"GATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATT\n" \
"ACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAG\n" \
"ATTACAGATTACAGATTACA"
self.record = FastaRecord(self.name, self.sequence)
def test__init__(self):
assert_equal(self.name, self.record.name)
assert_equal(self.sequence, self.record.sequence)
def test__str__(self):
assert_equal(self.expected__str__, str(self.record))
def test_fromString(self):
recordFromString = FastaRecord.fromString(self.expected__str__)
assert_equal(self.name, recordFromString.name)
assert_equal(self.sequence, recordFromString.sequence)
def test_md5(self):
assert_equal("67fc75ce599ed0ca1fc8ed2dcbccc95d",
self.record.md5)
def test_eq(self):
name = 'r1'
seq = 'ACGT'
r1 = FastaRecord(name, seq)
r2 = FastaRecord(name, seq)
assert_true(r1 == r2)
def test_not_equal(self):
r1 = FastaRecord('r1', 'ACGT')
r2 = FastaRecord('r2', 'ACGT')
r3 = FastaRecord('r1', 'ACGT')
assert_true(r1 != r2)
assert_false(r1 != r3)
class TestFastaReader:
def test_readFasta(self):
f = FastaReader(data.getFasta())
entries = list(f)
assert_equal(48, len(entries))
assert_equal("ref000001|EGFR_Exon_2", entries[0].name)
assert_equal("TTTCTTCCAGTTTGCCAAGGCACGAGTAACAAGCTCACGCAGTTGGGCACTTT"
"TGAAGATCATTTTCTCAGCCTCCAGAGGATGTTCAATAACTGTGAGGTGGTCC"
"TTGGGAATTTGGAAATTACCTATGTGCAGAGGAATTATGATCTTTCCTTCTTA"
"AAGGTTGGTGACTTTGATTTTCCT",
entries[0].sequence)
assert_equal("e3912e9ceacd6538ede8c1b2adda7423",
entries[0].md5)
def test_dosLineEndingsFasta(self):
f = FastaReader(data.getDosFormattedFasta())
entries = list(f)
for e in entries:
assert_true("\r" not in e.name)
assert_equal(16, len(e.sequence))
class TestFastaWriter:
def setup(self):
self.fasta1 = StringIO(
">chr1|blah|blah\n" \
"GATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATT\n" \
"ACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAG\n" \
"ATTACAGATTACAGATTACA\n")
self.fasta2 = StringIO(self.fasta1.getvalue() + "\n" + \
">chr2|blah|blah\n" \
"GATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATT\n" \
"ACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAG\n" \
"ATTACAGATTACAGATTACA\n")
def test_writeFasta1(self):
f = StringIO()
w = FastaWriter(f)
for record in FastaReader(self.fasta1):
w.writeRecord(record)
assert_equal(self.fasta1.getvalue(), f.getvalue())
def test_writeFasta2(self):
f = StringIO()
w = FastaWriter(f)
for record in FastaReader(self.fasta1):
w.writeRecord(record.name, record.sequence)
assert_equal(self.fasta1.getvalue(), f.getvalue())
``` |
{
"source": "jgust/SublimeCscope",
"score": 2
} |
#### File: sublime_cscope/commands/index.py
```python
import sublime
import sublime_plugin
from .. import indexer
class ScRefreshAllCommand(sublime_plugin.WindowCommand):
def run(self):
indexer.refresh()
```
#### File: sublime_cscope/commands/query.py
```python
import linecache as lc
import functools
import math
import os
import sublime
import sublime_plugin
from ...SublimeCscope import DEBUG, PACKAGE_NAME
from ..cscope_runner import CscopeQueryCommand
from ..cscope_results import CscopeResultsToBuffer, CscopeResultsToQuickPanel
# Results to buffer constants
RTB_CSCOPE_ACTIONS = {
'find_symbol': 'the symbol{}',
'find_definition': 'the definition of{}',
'find_callees': 'functions called by{}',
'find_callers': 'functions calling{}',
'find_string': 'the occurrences of string{}',
'find_egrep_pattern': 'the occurrences of egrep pattern{}',
'find_files_including': 'files #including{}'
}
RTB_HEADER = PACKAGE_NAME + ' results for {}\n'
RTB_MATCH_OR_MATCHES = ['match', 'matches']
RTB_IN_OR_ACCROSS = ['in', 'across']
RTB_FOOTER = '\n{0:d} {2} {3} {1:d} files\n'
RTB_LINE_PREFIX = '{0:>5}'
RTB_PRE_POST_MATCH = RTB_LINE_PREFIX + ' {1}'
RTB_MATCH = RTB_LINE_PREFIX + ': {1}'
RTB_CONTEXT_LINES = 2
class ScQueryCommand(sublime_plugin.TextCommand):
@property
def action(self):
raise NotImplementedError()
@property
def search_term(self):
if len(self.view.sel()) != 1:
return None
s = self.view.sel()[0]
if s.b == s.a:
selected_word_reg = self.view.word(self.view.sel()[0])
else:
selected_word_reg = s
selected_word = self.view.substr(selected_word_reg).strip()
return selected_word
def is_enabled(self):
from ..indexer import get_db_location
return bool(get_db_location(self.view.window()))
def run_with_input(self, input_str, results_to_buffer=False):
if input_str:
query_command = CscopeQueryCommand(self.action, input_str, win=self.view.window())
query_command.run()
results = query_command.results.get_sorted_results(sort_by=self.view.file_name())
if not results:
return
if results_to_buffer:
CscopeResultsToBuffer.generate_results(self.action,
input_str,
results,
win=self.view.window())
else:
CscopeResultsToQuickPanel.generate_results(self.action,
input_str,
results,
win=self.view.window())
else:
print(PACKAGE_NAME + ' - Unable to run query since no input was given.')
def run_in_background(self, input_str, rtb=False):
runner_lambda = lambda: self.run_with_input(input_str, results_to_buffer=rtb)
sublime.set_timeout_async(runner_lambda, 5)
def run(self, edit, results_to_buffer=False):
search_term = self.search_term
run_cb = functools.partial(self.run_in_background, rtb=results_to_buffer)
if search_term:
run_cb(search_term)
else:
panel_text = (PACKAGE_NAME + ' - Find ' + RTB_CSCOPE_ACTIONS[self.action]).format(':')
self.view.window().show_input_panel(panel_text, '', run_cb, None, None)
class ScFindSymbolCommand(ScQueryCommand):
@ScQueryCommand.action.getter
def action(self):
return 'find_symbol'
class ScFindDefinitionCommand(ScQueryCommand):
@ScQueryCommand.action.getter
def action(self):
return 'find_definition'
class ScFindCalleesCommand(ScQueryCommand):
@ScQueryCommand.action.getter
def action(self):
return 'find_callees'
class ScFindCallersCommand(ScQueryCommand):
@ScQueryCommand.action.getter
def action(self):
return 'find_callers'
class ScFindStringCommand(ScQueryCommand):
@ScQueryCommand.action.getter
def action(self):
return 'find_string'
class ScFindEgrepPatternCommand(ScQueryCommand):
@ScQueryCommand.action.getter
def action(self):
return 'find_egrep_pattern'
class ScFindFilesIncludingCommand(ScQueryCommand):
@ScQueryCommand.action.getter
def action(self):
return 'find_files_including'
@ScQueryCommand.search_term.getter
def search_term(self):
st = super().search_term
if not st:
return st
if st.startswith('#include'):
st = st[len('#include'):]
st = st.lstrip(' "<').rstrip(' ">')
root, ext = os.path.splitext(st)
if not ext:
st = os.extsep.join([st, 'h'])
if DEBUG:
print("Querying for h-file: %s" % st)
return st
class ScWriteQueryResultsCommand(sublime_plugin.TextCommand):
"""
Internal command that writes query results to the Results buffer.
"""
def run(self, edit, action='', search_term='', results=[]):
start_pos = self.view.size()
current_pos = start_pos
regions = []
highlight_search_term = (action != 'find_callees')
current_pos = self.write_header(edit, current_pos, action, search_term)
prev_file = None
prev_line = 0
file_count = 0
for res in results:
tmp_file = prev_file
prev_file, prev_line, current_pos, result_pos = self.write_result(edit, current_pos,
prev_file, prev_line,
res)
if tmp_file != prev_file:
file_count += 1
if result_pos > 0:
if highlight_search_term:
reg = self.view.find(search_term, result_pos)
else:
_, _, func, _ = res
reg = self.view.find(func, result_pos)
if reg:
regions.append(reg)
current_pos = self.write_context_lines(edit, current_pos, prev_file,
prev_line+1, RTB_CONTEXT_LINES)
current_pos = self.write_footer(edit, current_pos, len(results), file_count)
all_regions = self.view.get_regions(PACKAGE_NAME)
all_regions.extend(regions)
if all_regions:
self.view.add_regions(PACKAGE_NAME, all_regions, 'text', '', sublime.DRAW_NO_FILL)
def write_header(self, edit, pos, action, search_term):
search_term_str = ' "' + search_term + '"'
header = RTB_HEADER.format(RTB_CSCOPE_ACTIONS[action].format(search_term_str))
if pos > 0:
header = '\n' + header
return pos + self.view.insert(edit, pos, header)
def write_footer(self, edit, pos, results_total, files_total):
footer = RTB_FOOTER.format(results_total, files_total,
RTB_MATCH_OR_MATCHES[int(results_total > 1)],
RTB_IN_OR_ACCROSS[int(files_total > 1)])
return pos + self.view.insert(edit, pos, footer)
def write_context_lines(self, edit, pos, file_name,
start_line, num_ctx_lines = RTB_CONTEXT_LINES):
if num_ctx_lines > 0:
for line_num in range(start_line, start_line + num_ctx_lines):
line = lc.getline(file_name, line_num)
if line:
pos += self.view.insert(edit, pos, RTB_PRE_POST_MATCH.format(line_num, line))
return pos
def write_result(self, edit, pos, prev_file, prev_line, result):
fn, ln, func, _ = result
if fn != prev_file:
lc.checkcache(fn)
prev_line = 0
pos += self.view.insert(edit, pos, '\n{}:\n'.format(fn))
matched_line = lc.getline(fn,ln)
result_pos = 0
if not matched_line:
print("{} Could not find line {:d} in file {}" +
" while writing results to buffer.".format(PACKAGE_NAME, ln, fn))
return (fn, ln, pos, result_pos)
line_diff = ln - prev_line - 1
if prev_line > 0:
if line_diff > RTB_CONTEXT_LINES:
pos = self.write_context_lines(edit, pos, fn, prev_line + 1)
seperator_dots = math.ceil(math.log(ln, 10))
sep = (RTB_LINE_PREFIX + '\n').format('.' * seperator_dots)
pos += self.view.insert(edit, pos, sep)
ctx_lines = min(line_diff, RTB_CONTEXT_LINES)
pos = self.write_context_lines(edit, pos, fn, ln - ctx_lines, ctx_lines)
result_pos = pos
pos += self.view.insert(edit, pos, RTB_MATCH.format(ln, matched_line))
return (fn, ln, pos, result_pos)
```
#### File: SublimeCscope/sublime_cscope/event_listener.py
```python
import sublime
import sublime_plugin
from ..SublimeCscope import DEBUG
from . import indexer
# These commands should trigger a state change event in the indexer
PROJECT_COMMANDS = ('prompt_add_folder',
'prompt_open_project_or_workspace',
'prompt_switch_project_or_workspace',
'prompt_select_workspace',
'open_recent_project_or_workspace')
class EventListener(sublime_plugin.EventListener):
"""Monitors events from the editor and tries to figure out
when it is meaningful to notify the indexers"""
def __init__(self):
super().__init__()
self._curr_active_window = 0
self._project_command_in_progres = []
self._last_saved_buffer = None
self._last_closed_buffer = None
def _check_active_window(self):
curr_active_window = sublime.active_window().id()
# don't notify any change the first time
if self._curr_active_window == 0:
self._curr_active_window = curr_active_window
return False
prev_active_window = self._curr_active_window
self._curr_active_window = curr_active_window
#A change in active window can mean that a new window was created,
#a window was closed or the user switched between windows.
if prev_active_window != curr_active_window:
return True
return False
def _clear_last_saved_buffer(self):
self._last_saved_buffer = None
def _clear_last_closed_buffer(self):
self._last_closed_buffer = None
def _find_open_file(self, file_name):
for win in sublime.windows():
if win.find_open_file(file_name):
return True
return False
def on_post_save(self, view):
self._check_active_window()
file_name = view.file_name()
if not view.is_scratch() and file_name:
# ignore multiple calls for the same buffer for 1 second.
if file_name != self._last_saved_buffer:
self._last_saved_buffer = file_name
indexer.buffer_promoted(file_name)
sublime.set_timeout_async(self._clear_last_saved_buffer, 1000)
def on_close(self, view):
self._check_active_window()
file_name = view.file_name()
if not view.is_scratch() and file_name:
# only send buffer demoted if all views into the buffer have been
# closed.
if file_name != self._last_closed_buffer and not self._find_open_file(file_name):
self._last_closed_buffer = file_name
indexer.buffer_demoted(file_name)
sublime.set_timeout_async(self._clear_last_closed_buffer, 1000)
def on_activated(self, view):
focus_changed = self._check_active_window()
window_id = view.window().id() if view.window() else 0
proj_command_complete = False
if window_id in self._project_command_in_progres:
proj_command_complete = True
self._project_command_in_progres.remove(window_id)
if window_id and (focus_changed or proj_command_complete):
indexer.window_state_changed()
def on_window_command(self, win, cmd_name, args):
self._check_active_window()
if not win.id():
return
# if DEBUG:
# print("Got window command: %s" % cmd_name)
if cmd_name in PROJECT_COMMANDS:
if win.id() not in self._project_command_in_progres:
self._project_command_in_progres.append(win.id())
else:
print("Got command %s from win: %d while other already in progress")
elif cmd_name == 'refresh_folder_list':
indexer.refresh(win)
elif cmd_name == 'remove_folder':
indexer.window_state_changed()
```
#### File: SublimeCscope/sublime_cscope/indexer.py
```python
import os
import sys
import stat
import fnmatch
import threading
import traceback
from queue import Queue
from threading import Thread, Event
from collections import defaultdict
from functools import wraps, partial, reduce
from itertools import filterfalse, chain
import sublime
# The primary DB is indexed on the fly by cscope
# and can therefore only contain a limited amount of files
# before the indexing time becomes noticable. For projects of
# size up to TWO_TIER_THRESHOLD we keep all the files in the primary SB
PRIMARY_DB = 'primary'
# For projects larger than TWO_TIER_THRESHOLD, we use a two tier solution
# instead. The primary DB is still indexed on the fly but now only contains
# files that are open in the editor and have been modified since the last
# indexing run of the secondary DB. The secondary DB will then contain all
# the files in the project, but will be indexed less frequently so it will
# most likely be out of date, for the files being modified. That is ok since
# the primary DB will hold up to date information for those files.
SECONDARY_DB = 'secondary'
from ..SublimeCscope import DEBUG, PACKAGE_NAME
from . import settings
from . import cscope_runner
DEBUG_DECORATORS = False
DEBUG_INDEXERCONFIG = False
DB_FOLDER_POSTFIX = '-' + PACKAGE_NAME.lower()
TWO_TIER_THRESHOLD = 50
# The global dict of indexers
# There should be one per project or workspace
_indexers = {}
_indexers_by_win = {}
class ActorQuit(Exception):
pass
class UnhandledMessageException(Exception):
pass
class ActorCommandMsg():
def __init__(self, action, wait_for_result=False, result_callback=None):
self._action = action
self._result = Queue() if wait_for_result else None
self._result_callback = result_callback
def _set_result(self, result):
if self._result:
self._result.put(result)
elif isinstance(result, Exception):
raise result
elif self._result_callback:
self._result_callback(result)
def result(self):
if self._result:
res = self._result.get()
if isinstance(res, Exception):
raise res
return res
else:
return None
def run(self):
try:
res = self._action()
except Exception as e:
res = e
finally:
self._set_result(res)
# Decorator that hides the details of sending messages to Actors
def send_msg(func):
@wraps(func)
def wrapper(self, *args, **kwds):
result_cb = None
is_sync = False
send_always = False
#make sure the Actor is started
self.start()
if not self._is_started():
raise AssertionError("Actor %s is not running" % self.__class__)
is_external = bool(self._thread_id and self._thread_id != threading.get_ident())
#strip away any arguments aimed for the decorator
if kwds:
result_cb = kwds.pop('result_callback', None)
is_sync = kwds.pop('wait_for_result', False)
send_always = kwds.pop('send_always', False)
#deadly combo, that will cause a deadlock in the actor
if send_always and is_sync and not is_external:
raise AssertionError("You can't send a message to yourself and wait for the result!")
if send_always or is_external:
action = lambda: func(self, *args, **kwds)
msg = ActorCommandMsg(action, wait_for_result=is_sync, result_callback=result_cb)
if DEBUG_DECORATORS:
print("Sending %s msg: %s" % ('sync' if is_sync else 'async', func.__name__))
self.send(msg)
return msg.result()
if DEBUG_DECORATORS: print("Calling %s directly" % func.__name__)
return func(self, *args, **kwds)
return wrapper
class ActorBase:
def __init__(self):
self._mailbox = Queue()
self._started = Event()
self._terminated = Event()
self._thread_id = 0
self.recv_count = 0
def send(self, msg):
self._mailbox.put(msg)
def recv(self):
msg = self._mailbox.get()
self.recv_count += 1
if msg is ActorQuit:
raise ActorQuit()
return msg
def _close(self):
self.send(ActorQuit)
def _join(self):
self._terminated.wait()
def _bootstrap(self):
try:
self._thread_id = threading.get_ident()
self._started.set()
self._run()
except ActorQuit:
pass
finally:
self._thread_id = 0
self._started.clear()
self._terminated.set()
def _run(self):
while True:
msg = self.recv()
if isinstance(msg, ActorCommandMsg):
msg.run()
else:
self.handle_message(msg)
def _is_started(self):
return self._started.is_set() and not self._terminated.is_set()
def handle_message(self, msg):
raise UnhandledMessageException(msg)
def quit(self):
self._close()
self._join()
def start(self):
if self._is_started():
return
self._terminated.clear()
t = Thread(target=self._bootstrap)
t.daemon = True
t.start()
class Indexer(ActorBase):
""" The indexers maintains the cscope indexes
The Indexer is responsible for maintaining an up-to-date
cscope index of the project it is associated with.
"""
def __init__(self):
super().__init__()
self._crawler = Crawler()
self._crawl_in_progress = False
self._partial_crawl_queue = []
self._index_timestamp = None
self._two_tier_mode = False
self._file_index = {}
self._promotion_set = set()
self._demotion_set = set()
self._config = None
self._force_rebuild_db = False
def start(self):
super().start()
self._crawler.start()
def quit(self):
self._crawler.quit()
super().quit()
def _reset_results(self):
self._two_tier_mode = False
self._partial_crawl_queue.clear()
self._file_index.clear()
self._promotion_set.clear()
self._demotion_set.clear()
def _count_files(self, file_index):
return reduce(lambda tot, i: tot + len(i['files']), file_index.values(), 0)
def _write_file_list(self, files, file_name):
# Only try to create our own folder
if not os.path.exists(os.path.dirname(file_name)):
os.mkdir(os.path.dirname(file_name))
with open(file_name, mode='wt', encoding='utf-8') as file_list:
flist = ['"' + f + '"' if ' ' in f else f for f in files]
flist.append('\n')
file_list.write('\n'.join(flist))
def _gen_index(self, full_update=True):
success = False
try:
primary_list = os.path.join(self._config.db_location, PRIMARY_DB + '.files')
secondary_list = os.path.join(self._config.db_location, SECONDARY_DB + '.files')
#generate the file list
files = []
for v in self._file_index.values():
if v['files']:
files.extend(map(lambda f: os.path.join(v['path'], f), v['files']))
if self._two_tier_mode:
if self._promotion_set:
self._write_file_list(self._promotion_set, primary_list)
elif os.path.exists(primary_list):
os.remove(primary_list)
if full_update:
self._write_file_list(files, secondary_list)
cscope_runner.generate_index(self._config.db_location,
_find_window_from_indexer(self),
force_rebuild=self._force_rebuild_db)
self._force_rebuild_db = False
else:
self._write_file_list(files, primary_list)
if os.path.exists(secondary_list):
os.remove(secondary_list)
success = True
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print("%s: Generating index for project: %s caused an exception")
print(''.join('!! ' + line for line in lines))
return success
@send_msg
def _perform_crawl(self, partial_crawl=False):
start_path = None
if not self._config or not self._config.is_complete:
return
if self._crawl_in_progress:
print("Project: '%s' refresh is already in progress" % self._config.db_location)
return
elif partial_crawl:
#try to find a starting point that includes all paths in
#self._partial_crawl_queue
start_path = os.path.commonprefix(self._partial_crawl_queue)
if start_path.endswith(os.path.sep):
start_path = start_path[:-1]
if start_path and not os.path.exists(start_path):
start_path = os.path.dirname(start_path)
base_path, _ = self._config.find_base_path(start_path)
if start_path and not base_path:
start_path = None
if DEBUG:
if start_path:
print("Performing partial refresh starting from %s for project: %s" %
(start_path, self._config.db_location))
else:
print("Performing full refresh for project: %s" % self._config.db_location)
self._partial_crawl_queue.clear()
self._crawl_in_progress = True
self._crawler.crawl(self._config,
user_data=start_path,
start_path=start_path,
result_callback=self._crawl_result_ready)
@send_msg
def _crawl_result_ready(self, result):
self._crawl_in_progress = False
crawl_res, partial_update = result
if DEBUG:
print("Crawl results received. Found %d files" % self._count_files(crawl_res))
if self._count_files(crawl_res) > TWO_TIER_THRESHOLD:
if not self._two_tier_mode:
if partial_update:
print("%s: A partial update of project: %s resulted in threshold exceeded. "
"Performing full update." %
(PACKAGE_NAME, os.path.dirname(self._config.db_location)))
self._perform_crawl()
return
else:
if DEBUG: print("Threshold exceeded, switching to two tier mode")
self._reset_results()
self._two_tier_mode = True
elif not partial_update and self._two_tier_mode:
if DEBUG: print("%s: Project: %s. Project size is below threshold. "
"Reverting back to one tier mode" %
(PACKAGE_NAME, os.path.dirname(self._config.db_location)))
self._reset_results()
file_index = {}
if partial_update:
# Extract the relevant subset to compare
for k, v in list(self._file_index.values()):
if k['path'].startswith(partial_update):
file_index[k] = v
del self._file_index[k]
else:
file_index = self._file_index
self._file_index = {}
self._partial_crawl_queue.clear()
partial_update = ''
if (file_index != crawl_res):
if DEBUG:
print("Crawl of project: %s contained changes." %
os.path.dirname(self._config.db_location))
self._file_index.update(crawl_res)
if self._gen_index():
#remove files from the demotion list
tmp = {f for f in self._demotion_set if f.startswith(partial_update)}
self._demotion_set -= tmp
self._promotion_set -= tmp
# Perfrom any pending partial crawls
if self._partial_crawl_queue:
self._perform_crawl(partial_crawl=True, send_always=True)
@send_msg
def refresh(self):
self._force_rebuild_db = True
self._perform_crawl()
@send_msg
def set_config(self, config):
if config and config != self._config:
if DEBUG: print("New config received. Refreshing project %s" % config.db_location)
self._config = config
self.refresh()
@send_msg
def promote_buffer(self, file_path):
if file_path in self._promotion_set:
return
base, name = os.path.split(file_path)
st = os.stat(base)
if st.st_ino in self._file_index:
# in case the folder exists in the index under a different name
# use that name instead
base = self._file_index[st.st_ino]['path']
file_path = os.path.join(base, name)
if file_path in self._promotion_set:
return
if not self._config.file_matches(base, name):
return
if DEBUG: print("Promoting: %s" % file_path)
if self._two_tier_mode:
self._promotion_set.add(os.path.join(base, name))
self._gen_index(full_update=False)
elif not name in self._file_index.get(st.st_ino, {}).get('files',[]):
# file not found in index
self._perform_crawl()
@send_msg
def demote_buffer(self, file_path):
if file_path not in self._promotion_set:
return
if file_path in self._demotion_set:
return
if DEBUG: print("Demoting: %s" % file_path)
self._demotion_set.add(file_path)
self._partial_crawl_queue.append(os.path.dirname(file_path))
self._perform_crawl(True, send_always=True)
class Crawler(ActorBase):
""" The Crawler scans the project folders for files to index. """
@send_msg
def crawl(self, config, user_data, start_path=None):
result = defaultdict(dict)
if start_path:
base_path, follow_syms = config.find_base_path(start_path)
folders_to_search = [(start_path, base_path, follow_syms)]
else:
folders_to_search = [(base_path, base_path, follow_syms) for
base_path, follow_syms in config.base_paths()]
for start, base, follow_syms in folders_to_search:
os_walk = partial(os.walk, followlinks=follow_syms)
os_stat = partial(os.stat, follow_symlinks=follow_syms)
file_matcher = partial(config.file_matches, base_path=base)
folder_matcher = partial(config.folder_matches, base_path=base)
visited_files = set()
self._crawl_one_subfolder(start, result,
os_walk, os_stat,
file_matcher, folder_matcher,
visited_files)
return (result, user_data)
def _crawl_one_subfolder(self, start_path, result, os_walk,
os_stat, file_matcher,
folder_matcher, visited_files):
start_path = os.path.normpath(start_path)
if DEBUG: print("Starting to crawl folder: %s" % start_path)
prev = None
prev_inode = 0
for current, subdirs, files in os_walk(start_path):
inode = prev_inode
if current != prev:
prev = current
inode = os_stat(current).st_ino
if inode in result:
AssertionError("Inode %d already seen. path: %s == %s" %
(inode, current, result[inode]['path']))
result[inode]['path'] = current
result[inode]['magic'] = 0
result[inode]['files'] = []
self._process_files(current, files, result[inode],
os_stat, file_matcher, visited_files)
self._process_subfolders(current, subdirs, os_stat,
folder_matcher, result.keys())
def _process_files(self, path, files, result,
os_stat, file_matcher, visited_files):
for f in files:
try:
st = os_stat(os.path.join(path, f))
except (FileNotFoundError, OSError) as e:
print("%s: %s" % (PACKAGE_NAME, e))
continue
if st.st_ino in visited_files:
if DEBUG: print("File %s was already visited" % os.path.join(path, f))
continue
if file_matcher(path, f, st.st_mode):
result['files'].append(f)
result['magic'] += st.st_size + st.st_mtime
visited_files.add(st.st_ino)
def _process_subfolders(self, path, subdirs, os_stat,
folder_matcher, visited_folders):
filtered_subdirs = []
for d in subdirs:
try:
st = os_stat(os.path.join(path, d))
except (FileNotFoundError, OSError) as e:
print("%s: %s" % (PACKAGE_NAME, e))
continue
if st.st_ino in visited_folders:
if DEBUG: print("File %s was already visited" % os.path.join(path, d))
continue
if folder_matcher(path, d, st.st_mode):
filtered_subdirs.append(d)
subdirs.clear()
subdirs.extend(filtered_subdirs)
class IndexerConfig():
def __init__(self, window):
self._is_complete = False
self._file_exts = None
self._db_location = get_db_location(window)
if not self._db_location:
return
self._file_exts = _set_from_sorted_list(settings.get('index_file_extensions', window))
if not self._file_exts:
print("%s: The list of file extensions to index was empty. \
Please check your settings." % PACKAGE_NAME)
return
self._search_std_incl_folders = settings.get('search_std_include_folders', window)
self._std_incl_folders = _set_from_sorted_list(settings.get('std_include_folders', window))
self._folder_configs = {}
self._index_blacklist = set()
global_folder_exclude = []
global_folder_include = []
global_file_exclude = []
global_file_include = []
if window.active_view():
s = window.active_view().settings()
self._index_blacklist = _set_from_sorted_list(s.get('index_exclude_patterns', []))
global_folder_exclude = s.get('folder_exclude_patterns', [])
global_folder_include = s.get('folder_include_patterns', [])
global_file_exclude = s.get('file_exclude_patterns', [])
global_file_include = s.get('file_include_patterns', [])
proj_data = window.project_data()
for folder in proj_data['folders']:
folder_path = folder['path']
if not folder_path:
next
if not os.path.isabs(folder_path):
base_path, _ = os.path.split(self._db_location)
if DEBUG:
print("Found relative folder: %s. prepending %s" %
(folder_path, base_path + os.path.sep))
folder_path = os.path.join(base_path, folder_path)
folder_config = {}
folder_config['follow_symlinks'] = folder.get('follow_symlinks', True)
folder_config['file_whitelist'] = _set_from_sorted_list(global_file_include + \
folder.get('file_include_patterns',[]))
folder_config['file_blacklist'] = _set_from_sorted_list(global_file_exclude + \
folder.get('file_exclude_patterns',[]))
folder_config['folder_whitelist'] = _set_from_sorted_list(global_folder_include + \
folder.get('folder_include_patterns',[]))
folder_config['folder_blacklist'] = _set_from_sorted_list(global_folder_exclude + \
folder.get('folder_exclude_patterns',[]))
self._folder_configs[folder_path] = folder_config
# For the config to be consider complete (i.e. usable) we need at least
# one file extention and one folder.
self._is_complete = len(self._file_exts) > 0 and len(self._folder_configs) > 0
@property
def is_complete(self):
return self._is_complete
@property
def file_exts(self):
return self._file_exts
@property
def db_location(self):
return self._db_location
@property
def search_std_incl_folders(self):
return self._search_std_incl_folders
@property
def std_incl_folders(self):
return self._std_incl_folders
def __eq__(self, r):
res = True
if self is r:
return True
elif not isinstance(r, self.__class__):
res = NotImplemented
else:
keys_to_cmp = [
'_is_complete',
'_db_location',
'_file_exts',
'_folder_configs',
'_index_blacklist',
'_search_std_incl_folders',
'_std_incl_folders'
]
ldict = self.__dict__
rdict = r.__dict__
results = list(filterfalse(lambda k: ldict.get(k, None) == rdict.get(k, None),
keys_to_cmp))
# if results is empty, all keys evaluated to equal
res = bool(not results)
if DEBUG_INDEXERCONFIG and not res:
for key in results:
print("%s failed: '%s' != '%s'" %
(key, ldict.get(key, None), rdict.get(key, None)))
return res
def __ne__(self, r):
res = self.__eq__(r)
if res is NotImplemented:
return res
return not res
def _is_whitelisted_file(self, base_path, dirpath, file_name):
_, ext = os.path.splitext(file_name)
if not ext in self._file_exts:
return False
full_name = os.path.join(dirpath, file_name)
include_patterns = self._folder_configs[base_path]['file_whitelist']
# if the list is empty then all files are allowed
if not include_patterns:
return True
for pattern in include_patterns:
if fnmatch.fnmatch(file_name, pattern):
return True
if fnmatch.fnmatch(full_name, pattern):
return True
return False
def _is_blacklisted_file(self, base_path, dirpath, file_name):
exclude_patterns = self._folder_configs[base_path]['file_blacklist']
# if the list is empty then all files are allowed
if not exclude_patterns:
return False
full_name = os.path.join(dirpath, file_name)
for pattern in exclude_patterns:
if fnmatch.fnmatch(file_name, pattern):
return True
if fnmatch.fnmatch(full_name, pattern):
return True
for pattern in self._index_blacklist:
if fnmatch.fnmatch(file_name, pattern):
return True
if fnmatch.fnmatch(full_name, pattern):
return True
return False
def _is_whitelisted_folder(self, base_path, dirpath, folder):
include_patterns = self._folder_configs[base_path]['folder_whitelist']
# if the list is empty then all files are allowed
if not include_patterns:
return True
full_path = os.path.join(dirpath, folder)
for pattern in include_patterns:
if fnmatch.fnmatch(folder, pattern):
return True
if fnmatch.fnmatch(full_path, pattern):
return True
return False
def _is_blacklisted_folder(self, base_path, dirpath, folder):
exclude_patterns = self._folder_configs[base_path]['folder_blacklist']
# if the list is empty then all files are allowed
if not exclude_patterns:
return False
full_path = os.path.join(dirpath, folder)
for pattern in exclude_patterns:
if fnmatch.fnmatch(folder, pattern):
return True
if fnmatch.fnmatch(full_path, pattern):
return True
for pattern in self._index_blacklist:
if fnmatch.fnmatch(folder, pattern):
return True
if fnmatch.fnmatch(full_path, pattern):
return True
return False
def find_base_path(self, dirpath):
not_found = (None, None)
if not dirpath:
return not_found
for bp in self._folder_configs.keys():
if dirpath.startswith(bp):
return (bp, self._folder_configs[bp]['follow_symlinks'])
if DEBUG:
print("No base path found for '%s' in (%s)" % (dirpath, self._folder_configs.keys()))
return not_found
def base_paths(self):
return tuple((key, self._folder_configs[key]['follow_symlinks'])
for key in self._folder_configs.keys())
def file_matches(self, dirpath, file_name, st_mode=0, base_path=None):
if not base_path:
base_path, follow_symlinks = self.find_base_path(dirpath)
if not base_path:
return False
st_mode = os.stat(os.path.join(dirpath, file_name),
follow_symlinks=follow_symlinks).st_mode
if not stat.S_ISREG(st_mode):
return False
if not self._is_whitelisted_file(base_path, dirpath, file_name):
return False
if self._is_blacklisted_file(base_path, dirpath, file_name):
return False
return True
def folder_matches(self, dirpath, folder, st_mode=0, base_path=None):
if not base_path:
base_path, follow_symlinks = self.find_base_path(dirpath)
if not base_path:
return False
st_mode = os.stat(os.path.join(dirpath, file_name), follow_symlinks=follow_symlinks)
if not stat.S_ISDIR(st_mode):
return False
if not self._is_whitelisted_folder(base_path, dirpath, folder):
return False
if self._is_blacklisted_folder(base_path, dirpath, folder):
return False
return True
# The folder where we store cscope indexes for workspaces (since they have no project
# folder associated with them.)
def _get_tmp_db_folder():
return os.path.join(sublime.cache_path(), PACKAGE_NAME, 'workspace_tmp')
def _get_proj_name(view_or_window):
proj_name = None
win = view_or_window
if hasattr(view_or_window, 'window'):
win = view_or_window.window()
# we are only interested in windows with folders open
if win and win.folders():
proj_name = win.project_file_name()
# if the window doesn't have a proj_name, generate a dummy_one
if not proj_name:
proj_name = os.path.join(_get_tmp_db_folder(), 'workspace_' + str(win.id()))
return proj_name
def _set_from_sorted_list(l):
if not l:
return set()
l.sort()
return set(l)
def _disassociate_window(proj_file, win):
indexer_data = _indexers.get(proj_file, None)
if indexer_data:
indexer_data['windows'].remove(win)
if not indexer_data['windows']:
return True
return False
def _trim_indexers():
for key, indexer_data in list(_indexers.items()):
# remove indexers that are not associated with any windows
if not indexer_data['windows']:
indexer = _indexers.pop(key)['indexer']
indexer.quit()
def _find_window_from_proj_file(proj_file):
win = None
if proj_file in _indexers:
indexer_data = _indexers[proj_file]
windows = [w for w in sublime.windows() if w.id() in indexer_data['windows']]
if windows:
win = windows[0]
return win
def _find_window_from_indexer(indexer):
win = None
for proj_file, indexer_data in _indexers.items():
if indexer is indexer_data['indexer']:
win = _find_window_from_proj_file(proj_file)
return win
# The module level API
def get_db_location(win):
if not win:
return None
proj_name = _get_proj_name(win)
if not proj_name:
return None
path, name_ext = os.path.split(proj_name)
if not os.path.exists(path):
print("%s: Path: %s does not exist. Will not attempt to index project: %s"
% (PACKAGE_NAME, path, proj_name))
return None
name, ext = os.path.splitext(name_ext)
db_location = os.path.join(path, name + DB_FOLDER_POSTFIX)
if os.path.isfile(db_location):
print("%s: Path: %s already exists but is not a folder. \
Will not attempt to index project: %s" % (PACKAGE_NAME, db_location, proj_name))
return None
return db_location
def refresh(win=None, explicit_refresh=True):
"""
Refresh the file tree of the indexer belonging to window
if win is None refresh all indexers.
"""
tmp_folder = _get_tmp_db_folder()
if os.path.isfile(tmp_folder):
print("%s: %s exists but is not a folder. Removing" % (PACKAGE_NAME, tmp_folder))
os.remove(tmp_folder)
if not os.path.exists(tmp_folder):
print("%s: Creating tmp folder: %s." % (PACKAGE_NAME, tmp_folder))
os.makedirs(tmp_folder, exist_ok=True)
windows = [win] if win else sublime.windows()
indexer_win_pair = [(_get_proj_name(win), win) for win in windows
if _get_proj_name(win)]
for proj_file, win in indexer_win_pair:
# in case the window is being reused with a new project,
# disassociate from the old project
if win.id() in _indexers_by_win and _indexers_by_win[win.id()] != proj_file:
_disassociate_window(_indexers_by_win[win.id()], win.id())
indexer_data = _indexers.setdefault(proj_file, {})
indexer = indexer_data.setdefault('indexer', Indexer())
indexer_cfg = IndexerConfig(win)
if indexer_cfg != indexer_data.get('config', None):
# Since there is a change in the config
# The indexer will do an implicit refresh
explicit_refresh = False
indexer.set_config(indexer_cfg)
indexer_data['config'] = indexer_cfg
indexer_windows = indexer_data.setdefault('windows', [])
if not win.id() in indexer_windows:
indexer_windows.append(win.id())
_indexers_by_win[win.id()] = proj_file
indexer.start()
if explicit_refresh:
indexer.refresh()
def buffer_promoted(file_path):
"""
The file located at 'file_path' has been opened and modified and should
therefore be promoted to the indexers' active list.
"""
# Special case were the file is a project file
if file_path in _indexers:
sublime.set_timeout_async(lambda: settings_changed(file_path), 1000)
return
# Notify all indexers that the buffer should be promoted
# The indexers will ignore this call if the buffer doesn't belong to their
# project
for indexer_data in _indexers.values():
indexer_data['indexer'].promote_buffer(file_path)
if DEBUG: print("buffer_promoted: '%s'" % file_path)
def buffer_demoted(file_path):
"""
The file located at 'file_path' has been closed and should therefore
be demoted to the indexers' passive list.
"""
#ignore any project files being closed
if file_path in _indexers:
return
for indexer_data in _indexers.values():
indexer_data['indexer'].demote_buffer(file_path)
def window_state_changed():
"""
Called every time there is a significant state change in the currently
open windows and we need to take action.
"""
# look for any indexers to close
curr_windows = {win.id() for win in sublime.windows()}
old_windows = _indexers_by_win.keys()
obsolete_windows = old_windows - curr_windows
for key in obsolete_windows:
proj_file = _indexers_by_win.pop(key)
_disassociate_window(proj_file, key)
# implicitly refresh all active windows
refresh(explicit_refresh=False)
# Remove orphan indexers
_trim_indexers()
def settings_changed(proj_file=None):
"""
Called each time our settings object
(or project file) has been modified
"""
if proj_file and proj_file in _indexers:
# A specific project file was modified.
# Notify the indexer if the config differs.
indexer_data = _indexers[proj_file]
indexer = indexer_data['indexer']
config = indexer_data['config']
win = _find_window_from_proj_file(proj_file)
if not win:
return
new_config = IndexerConfig(win)
if new_config != config:
indexer.set_config(new_config)
indexer_data['config'] = new_config
else:
# implicitly refresh all active windows
refresh(explicit_refresh=False)
def quit():
"""Closes all indexers and removes them."""
_indexers_by_win.clear()
for indexer_data in _indexers.values():
indexer_data.setdefault('windows',[]).clear()
_trim_indexers()
```
#### File: SublimeCscope/sublime_cscope/settings.py
```python
import sublime
from ..SublimeCscope import PACKAGE_NAME
SETTING_DEFAULTS = {
'index_file_extensions': [
".c",
".cc",
".cpp",
".h",
".hpp",
".l",
".y",
".py",
".rb",
".java"
],
'cscope_path': None,
'search_std_include_folders': False,
'extra_include_folders': [],
'tmp_folder': [],
'maximum_results': 1000
}
def load_settings():
return sublime.load_settings(PACKAGE_NAME + '.sublime-settings')
def get(key, view_or_window):
default = SETTING_DEFAULTS.get(key, None)
#first lookup the setting in project if it exists
#(prefixed by 'sublimecscope_')
win = view_or_window
if hasattr(view_or_window, 'window'):
win = view_or_window.window()
proj_settings = win.project_data().get('settings', None) if win else None
proj_settings_key = 'sublimecscope_' + key
if proj_settings and proj_settings_key in proj_settings:
return proj_settings[proj_settings_key]
#Otherwise look in our own settings
return load_settings().get(key, default)
``` |
{
"source": "jguszr/mondaystats",
"score": 3
} |
#### File: jguszr/mondaystats/getData.py
```python
import requests
from requests.auth import HTTPBasicAuth
import json
import objectpath
import pandas as pd
import matplotlib.pyplot as plt
global KEY_TOKEN
KEY_TOKEN = "<KEY>"
def get_all_boards():
print("get_boards")
boards = requests.get("https://api.monday.com:443/v1/boards.json?per_page=12&only_globals=true&order_by_latest=false&api_key="+ str(KEY_TOKEN))
return json.loads(boards.content)
def get_board_by_name(board_name, json_content):
print("get_board_by_name(board_name, jsonc_ontent):")
for board in json_content:
if board["name"]==board_name:
return board
return None
def get_board_pulses(board_id):
print("get_board_pulses(board_id):")
page = 0
lst_of_pulses = []
while True:
pulses = requests.get("https://api.monday.com:443/v1/boards/"+str(board_id)+"/pulses.json?page="+str(page)+"&per_page=25&order_by=updated_at_desc&api_key="+str(KEY_TOKEN))
if len(pulses.content) != 2:
lst_of_pulses.append(pulses.content)
else:
break
page +=1
return lst_of_pulses
def get_board_ids(board_id):
print("def get_board_ids(board_id):")
resp = requests.get("https://api.monday.com:443/v1/boards/"+str(board_id)+"/groups.json?show_archived=false&api_key="+str(KEY_TOKEN))
if len(resp.content)!=2:
return json.loads(resp.content)
def prepare_data():
mb = get_board_by_name("MainBoard",get_all_boards())
x = get_board_pulses(mb["id"])
lst_of_boards = []
lst_of_groups = get_board_ids(mb["id"])
for i in x:
lst_of_boards.append(json.loads(i))
lst = []
for f in lst_of_boards:
for d in f :
rec = {}
rec["name"] = d["pulse"]["name"]
rec["created_at"] = d["pulse"]["created_at"]
rec["updated_at"] = d["pulse"]["updated_at"]
rec["group_id"] = [x["title"] for x in lst_of_groups if x["id"]== d["board_meta"]["group_id"] ][0]
## handling column_values
rec["Assignee"] = None
rec["Priority"] = None
rec["Status"] = None
rec["Estimado"] = None
rec["Realizado"] = None
rec["Plataformas"] = None
for c in d["column_values"]:
try:
if rec["Assignee"] == None:
rec["Assignee"] = handle_internal_value(c, "Assignee", "name")
if rec["Priority"] == None:
rec["Priority"] = handle_internal_value(c, "Priority", "index")
if rec["Status"] == None:
rec["Status"] = handle_internal_value(c, "Status", "index")
if rec["Estimado"] == None:
rec["Estimado"] = handle_internal_value(c, "Estimado", "")
if rec["Realizado"] == None:
rec["Realizado"] = handle_internal_value(c, "Realizado", "")
if rec["Plataformas"] == None:
rec["Plataformas"] = handle_internal_value(c, "Plataformas", "")
except:
continue
lst.append(rec)
return pd.DataFrame(lst)
def handle_internal_value(c, required_title, returning_field):
if c["title"]==required_title:
if returning_field=="":
return c["value"]
else:
try:
return c["value"][returning_field]
except:
raise
def handle_ds(data):
data["Estimado"] = pd.to_numeric(data["Estimado"])
data["Realizado"] = pd.to_numeric(data["Realizado"])
return data
#ds = handle_ds(prepare_data())
#ds.to_csv("complete.csv")
#print(ds.head(20))
``` |
{
"source": "jguterl/electronvolt",
"score": 4
} |
#### File: jguterl/electronvolt/electronvolt.py
```python
from math import pi
from math import e as euler # prevent duplicating elementary charge
from math import log as ln # log defaults to ln
from math import exp, sin, cos, tan, asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh, degrees, radians
# %% unit converter
class Unit:
def __init__(self, d):
self.d = {unit:power for unit, power in d.items() if power} # remove zero power
def __repr__(self): # for commandline convenience. __str__ redirects here
terms = []
for unit, power in self.d.items():
if power == 1:
terms.append(unit)
else:
terms.append('{0}**{1}'.format(unit, power))
return ' * '.join(terms)
def __eq__(self, other):
return self.d == other.d
def __mul__(self, other):
units = set(self.d) | set(other.d)
d = {unit : self.d.get(unit, 0) + other.d.get(unit, 0) for unit in units}
return Unit(d)
def __truediv__(self, other):
units = set(self.d) | set(other.d)
d = {unit : self.d.get(unit, 0) - other.d.get(unit, 0) for unit in units}
return Unit(d)
def __pow__(self, p):
result = {unit : power * p for unit, power in self.d.items()}
return Unit(result)
def __bool__(self):
return bool(self.d)
# %% physical quantity calculator
class Quantity:
def __init__(self, value, unit):
self.value = value
self.unit = unit
def __repr__(self):
if not self.unit:
return repr(self.value)
return '{0} * {1}'.format(self.value, self.unit)
def __eq__(self, other):
return self.value == other.value and self.unit == other.unit
def __lt__(self, other):
return self.value < other.value and self.unit == other.unit
def __gt__(self, other):
return self.value > other.value and self.unit == other.unit
def __le__(self, other):
return self.value <= other.value and self.unit == other.unit
def __gt__(self, other):
return self.value >= other.value and self.unit == other.unit
def __add__(self, other):
if isinstance(other, (int, float)): # handles 1*kg/kg + 1
return self + Quantity(other, Unit({})) # implicit-ish recursion
assert self.unit == other.unit, "Addition undefined between '{0}' and '{1}'".format(self.unit, other.unit)
return Quantity(self.value + other.value, self.unit)
def __radd__(self, other):
return self + other
def __sub__(self, other):
if isinstance(other, (int, float)): # handles 1*kg/kg - 1
return self - Quantity(other, Unit({}))
assert self.unit == other.unit, "Subtraction undefined between '{0}' and '{1}'".format(self.unit, other.unit)
return Quantity(self.value - other.value, self.unit)
def __neg__(self):
return Quantity(-self.value, self.unit)
def __rsub__(self, other):
return - (self - other) # self.__rsub__(other) becomes -self.__sub__(other)
def __mul__(self, other): # both kg*10 and kg*m works
if isinstance(other, (int, float)): # not considering 0*kg == 0
return self * Quantity(other, Unit({}))
return Quantity(self.value * other.value, self.unit * other.unit)
def __rmul__(self, other): # handles 10*kg
return self * other
def __truediv__(self, other):
if isinstance(other, (int, float)):
return self / Quantity(other, Unit({}))
return Quantity(self.value / other.value, self.unit / other.unit)
def __rtruediv__(self, other):
return (self / other) ** -1
def __pow__(self, exponent):
return Quantity(self.value ** exponent, self.unit ** exponent)
def __rpow__(self, base): # handles euler ** (1*s/s)
assert not self.unit, "Exponent of '{}' is undefined".format(self.unit)
return base ** self.value
def __contains__(self, other):
return self.unit == other.unit # sloppy. not doing .convert()
def __bool__(self):
return bool(self.value)
def __float__(self): # handles exp(1*s/s)
assert not self.unit, "Conversion undefined from '{}' to ''".format(self.unit)
return float(self.value)
# %% trigonometric functions
# sin, provided by math, input in radians
# cos
# tan
csc = lambda x : 1 / sin(x)
sec = lambda x : 1 / cos(x)
cot = lambda x : 1 / tan(x)
# asin, return in radians
# acos
# atan
acsc = lambda x : asin(1 / x)
asec = lambda x : acos(1 / x)
acot = lambda x : atan(1 / x)
sind = lambda x : sin(radians(x)) # input in degrees
cosd = lambda x : cos(radians(x))
tand = lambda x : tan(radians(x))
cscd = lambda x : csc(radians(x))
secd = lambda x : sec(radians(x))
cotd = lambda x : cot(radians(x))
asind = lambda x : degrees(asin(x)) # return in degrees
acosd = lambda x : degrees(acos(x))
atand = lambda x : degrees(atan(x))
acscd = lambda x : degrees(acsc(x))
asecd = lambda x : degrees(asec(x))
acotd = lambda x : degrees(acot(x))
# sinh, hyperbolic functions
# cosh
# tanh
csch = lambda x : 1 / sinh(x)
sech = lambda x : 1 / cosh(x)
coth = lambda x : 1 / tanh(x)
# asinh
# acosh
# atanh
acsch = lambda x : asinh(1 / x)
asech = lambda x : acosh(1 / x)
acoth = lambda x : atanh(1 / x)
# %% prefixes
yotta = 1e24
zetta = 1e21
exa = 1e18
peta = 1e15
tera = 1e12
giga = 1e9
mega = 1e6
kilo = 1e3
hecto = 1e2
deca = 1e1
deci = 1e-1
centi = 1e-2
milli = 1e-3
micro = 1e-6
nano = 1e-9
pico = 1e-12
femto = 1e-15
atto = 1e-18
zepto = 1e-21
yocto = 1e-24
hundred = hecto
thousand = kilo
million = mega
billion = giga
trillion = tera
# %% units and constants
s = Quantity(1, Unit({'s' : 1}))
m = Quantity(1, Unit({'m' : 1}))
kg = Quantity(1, Unit({'kg' : 1}))
A = Quantity(1, Unit({'A' : 1}))
K = Quantity(1, Unit({'K' : 1}))
mol = Quantity(1, Unit({'mol' : 1}))
cd = Quantity(1, Unit({'cd' : 1}))
minute = 60 * s
hour = 60 * minute
day = 24 * hour
week = 7 * day
year = 365.25 * day # average year
ms = milli * s
us = micro * s # microsecond
ns = nano * s
km = kilo * m
dm = deci * m
cm = centi * m
mm = milli * m
um = micro * m # micrometer
nm = nano * m
fm = femto * m # fermi
Hz = s**-1 # Hertz
kHz = kilo * Hz
MHz = mega * Hz
GHz = giga * Hz
THz = tera * Hz
g = 9.80665 * m / s**2 # gravitational acceleration
N = kg * m / s**2 # Newton
Pa = N / m**2 # Pascal
J = N * m # Joule
W = J / s # Watt
h = 6.62607015e-34 * J * s # Planck constant
hbar = h / (2 * pi) # reduced Planck constant
NA = 6.02214076e23 * mol**-1 # Avogadro constant
kB = 1.380649e-23 * J / K # Boltzmann constant
R = NA * kB # ideal gas constant
C = A * s # Coulomb
V = J / C # Volt
F = C / V # Farad
Ohm = V / A # Ohm
T = V * s / m**2 # Tesla
Wb = T * m**2 # Weber
H = Ohm * s # <NAME>
c = 299792458 * m / s # speed of light
mu0 = 1.25663706212e-6 * H / m # vacuum magnetic permeability
epsilon0 = 1 / (mu0 * c**2) # vacuum electric permittivity
k = 1 / (4 * pi * epsilon0) # Coulomb constant
e = 1.602176634e-19 * C # elementary charge
inch = 25.4 * mm # symbol in is a python keyword
foot = 12 * inch
yard = 3 * foot
mile = 1760 * yard
lb = 0.45359237 * kg # pound mass
lbf = lb * g # pound force
slug = lbf * s**2 / foot # imperial unit of mass
blob = lbf * s**2 / inch # imperial unit of mass
kph = km / hour # kilometer per hour
mph = mile / hour # miles per hour
gram = kg / kilo # gram
L = dm**3 # liter
psi = 6.894757 * kilo * Pa # pound per square inch
kWh = kilo * W * hour # kilowatt-hour
me = 9.1093837015e-31 * kg # electron mass
mp = 1.67262192369e-27 * kg # proton mass
mn = 1.67492749804e-27 * kg # neutron mass
u = 1.66053906660e-27 * kg # atomic mass unit, 1/12 atomic mass of carbon 12
mH = 1.007825 * u # atomic mass of hydrogen
mHe = 4.002602 * u # atomic mass of helium
sigma = pi**2 * kB**4 / (60 * hbar**3 * c**2) # Stefan-Boltzmann constant
a0 = 4 * pi * epsilon0 * hbar**2 / (me * e**2) # Bohr radius
hground = - me * e**4 / (8 * h**2 * epsilon0**2) # hydrogen ground state energy
alpha = e**2 / (4 * pi * epsilon0 * hbar * c) # fine structure constant
Rinfty = alpha**2 * me * c / (2 * h) # Rydberg constant
Bq = s**-1 # Becquerel
Ci = 3.7e10 * Bq # Curie, radioactive decay
mCi = milli * Ci # millicurie
uCi = micro * Ci # microcurie
eV = e * V # electronvolt
keV = kilo * eV # kilo-electronvolt
MeV = mega * eV # mega-electronvolt
GeV = giga * eV # giga-electronvolt
TeV = tera * eV # tera-electronvolt
eVpc = eV / c # electronvolt per speed of light
MeVpc = mega * eVpc # mega-electronvolt per c
eVpc2 = eV / c**2 # electronvolt per c squared
MeVpc2 = mega * eVpc2 # mega-electronvolt per c squared
G = 6.67430e-11 * m**3 * kg**-1 * s**-2 # gravitational constant
au = 149597870700 * m # astronomical unit
ly = c * year # light year
pc = au / radians(1/3600) # parsec
Mpc = mega * pc # megaparsec
H0 = 72 * km/s / Mpc # Hubble parameter
# %% testings
assert 1 * m != 1 * s
assert 2 * kg + 2 * kg < 5 * kg
assert N / C == V / m
assert kg * c**2 in J
assert pc > ly
# %% print constant table at import
table = ''
for v, q in globals().copy().items():
if isinstance(q, (int, float)):
if v == 'pi':
table += '\nMath Constants\n'
elif v == 'yotta':
table += '\nMetric Prefixes\n'
elif v == 'hundred':
table += '\nCommon Prefixes\n'
table += '{:<15}{:.9g}\n'.format(v, q)
if isinstance(q, Quantity):
if v == 's':
table += '\nSI Base Units\n'
elif v == 'minute':
table += '\nTime\n'
elif v == 'km':
table += '\nLength\n'
elif v == 'Hz':
table += '\nFrequency\n'
elif v == 'g':
table += '\nClassical Mechanics\n'
elif v == 'h':
table += '\nThermodynamics\n'
elif v == 'C':
table += '\nElectromagnetism\n'
elif v == 'inch':
table += '\nImperial Units\n'
elif v == 'kph':
table += '\nCommon Units\n'
elif v == 'me':
table += '\nAtomic Physics\n'
elif v == 'sigma':
table += '\nQuantum Mechanics\n'
elif v == 'Bq':
table += '\nRadioactive Decays\n'
elif v == 'eV':
table += '\nNuclear Physics\n'
elif v == 'G':
table += '\nCosmology\n'
table += '{:<15}{:<25.9g}{}\n'.format(v, q.value, repr(q.unit))
print(table)
# %% new cell
``` |
{
"source": "JGU-VC/activation-pattern-analysis",
"score": 2
} |
#### File: activations/DS-act-stats/__init__.py
```python
import os
import torch.nn.functional as F
import torch
import numpy as np
import torch.nn as nn
def reset_checks(state):
for mode_i, (mode, dataset) in enumerate(zip(state["net_mode"],state["dataset_mode"])):
state["last_measured"][mode_i] = 0
def init(state,event):
event.rng_save()
if state["dummy"]:
state["skip"] = True
# generate new copy of net
# nothing shall be done to the original net while evaluating
# TODO: this is only needed for batch-statistics. can this be improved performance-wise?
if "train" in state["net_mode"]:
state["current_id"] = 0
state["net_cached"] = event.send_net_to_device(event.init_net())
# add queries
queries = state["activation_stats.queries"]
if "HA" in state["query"] or "MAL" in state["query"] or "HAgL" in state["query"]:
query = {
"save_current_counts": False,
"with_grad": False,
"with_labels": False,
"histdecay": 1.0,
"use_global_stats": False,
"active": False,
"additional_id": "validation",
}
queries.append(query)
if "HAL" in state["query"] or "MAL" in state["query"] or "HAgL" in state["query"]:
query = {
"save_current_counts": False,
"with_grad": False,
"with_labels": True,
"histdecay": 1.0,
"use_global_stats": False,
"active": False,
"additional_id": "validation",
}
queries.append(query)
state["queries"] = queries
assert len(state["dataset_mode"]) == 1
state["dataloader"] = event.dataloader(state["dataset_mode"][0], use_cache=False, deterministic=True, with_transform=False)
# check if non-allowed mode used
if len(set(state["net_mode"])) != len(state["net_mode"]):
raise ValueError("Currrently only one dataset_mode per net_mode allowed.")
# evaluate pretraned network for since_final mode
if state["since_final"]:
home = state["current_dir"] if "current_dir" in state else "."
ckpt_file = os.path.join(home, "ckpts", state["tag"]+".ckpt")
checkpoint = torch.load(ckpt_file)
# sanity check
# checkpoint = torch.load(ckpt_file)
# state["main.net"].load_state_dict(checkpoint['state_dict'])
# validate over full dataset
for mode_i, (mode, dataset) in enumerate(zip(state["net_mode"],state["dataset_mode"])):
if mode == "train":
net = state["net_cached"]
net.load_state_dict(checkpoint['state_dict'])
net.train()
relus = [m for m in net.modules() if "activation" in str(type(m))]
for id, relu in enumerate(relus):
for query in state["queries"]:
relu.reset_stats(query, only_global_stats=True)
relu.set_stats_active(query["id"], True)
event.validate(valloader=state["dataloader"], evalmode=mode=="eval", plot=False, dummy=state["dummy"])
for id, relu in enumerate(relus):
for query in state["queries"]:
relu.set_stats_active(query["id"], False)
q_id = state["queries"][0]["id"]
relu.stats[q_id]["global_stats_final"] = relu.stats[q_id]["global_stats"]
del relu.stats[q_id]["global_stats"]
event.rng_restore()
# event.set_seed()
state["current_id"] = 0
event.rng_restore()
def after_step(state, event, result=None, *args, **kwargs):
event.rng_save()
progress = state["main.current_batch"] / state["main.num_batches"]
progress += state["main.current_epoch"]
check = progress // state["every_epoch"]
query_texts = {
"HA": "Entropy",
"HAL": "H(A,L)",
"HAgL": "H(A|L)",
"MAL": "MI(A;L)"
}
at_least_one_measured = False
for mode_i, (mode, dataset) in enumerate(zip(state["net_mode"],state["dataset_mode"])):
if check and check > state["last_measured"][mode_i] // state["every_epoch"] or state["last_measured"][mode_i] < 0:
at_least_one_measured = True
state["last_measured"][mode_i] = progress
net = state["main.net"]
title = "[%sm|%sd]" % (mode[0],dataset[:2])
# get cached network if train mode is used
# TODO: try this without net_cached (batch norms update gets in the way a.t.m.)
if mode == "train":
state["net_cached"].load_state_dict(state["main.net"].state_dict())
net = state["net_cached"]
net.train()
# validate over full dataset
relus = [m for m in net.modules() if "activation" in str(type(m))]
for id, relu in enumerate(relus):
for query in state["queries"]:
relu.reset_stats(query, only_global_stats=True)
relu.set_stats_active(query["id"], True)
event.validate(valloader=state["dataloader"], evalmode=mode=="eval", plot=False, dummy=state["dummy"])
for id, relu in enumerate(relus):
for query in state["queries"]:
relu.set_stats_active(query["id"], False)
if state["dummy"]:
continue
# plot
n_cls = state["dataset.num_classes"]
ε = 1e-14
for id, relu in enumerate(relus):
q_id = state["queries"][0]["id"]
# get counts from relu
counts = relu.stats[q_id]["global_stats"]
counts_labels = relu.stats[state["queries"][1]["id"]]["global_stats"] if len(state["queries"]) > 1 else None
hashmap_size = len(counts)
num_data = counts.sum()
counts_nonzero = counts[counts >= ε]
max_counts = counts.max()
if counts_labels is not None:
counts_labels = counts_labels[counts_labels >= ε]
len_counts_nonzero = len(counts_nonzero)
# ---------------- #
# entropy measures #
# ---------------- #
for query in state["query"]:
# calculate entropy
max_H = event.calc_max_H_from_counts(counts_nonzero, num_data, relu.num_filters, counts_with_labels=counts_labels, mode=query, n_cls=n_cls)
entropy = event.calc_H_from_counts(counts_nonzero, num_data, counts_with_labels=counts_labels, mode=query, n_cls=n_cls)
# plot measures
query_text = query_texts[query]
event.optional.plot_scalar2d(entropy, id, title=title+" "+query_text)
event.optional.plot_scalar2d(entropy/max_H, id, title=title+" %% max %s" % query_text)
relu.entropy_perc = (entropy/max_H).item()
relu.entropy = entropy.item()
relu.max_H = max_H
event.optional.plot_scalar2d(max_counts, id, title=title+" Count most frequent Pattern")
event.optional.plot_scalar2d(1.0*max_counts/num_data, id, title=title+" % Count of most frequent Pattern")
del counts_nonzero
# ---------------- #
# hashmap measures #
# ---------------- #
# histogram measures
event.optional.plot_scalar2d(len_counts_nonzero/num_data.double(), id, title=title+" % Num Patterns")
event.optional.plot_scalar2d(len_counts_nonzero/hashmap_size, id, title=title+" % Hashmap Filled")
event.optional.plot_scalar2d(len_counts_nonzero, id, title=title+" Num Patterns")
if not hasattr(relu, 'init_DS_act_stats'):
event.optional.plot_bar(num_data, id, title="Num Data")
event.optional.plot_bar(int(str(counts.dtype)[-2:]), id, title="Hashmap dtype")
event.optional.plot_bar(len(counts), id, title="Hashmap size")
relu.init_DS_act_stats = True
if state["plot.since_final"] and "global_stats_final" not in relu.stats[q_id]:
raise ValueError("No final statistics found. Rerun with DS-act-stats.save_final module instead.")
if state["plot.since_init"] and "global_stats_init" in relu.stats[q_id] or state["plot.since_last"] and "global_stats_last" in relu.stats[q_id]:
hasbin = relu.stats[q_id]["global_stats"] > 0
hasbin_t = relu.stats[q_id]["global_stats"] > state["threshold"]
hasbin_sum = hasbin.sum()
hasbin_t_sum = hasbin_t.sum()
# count changes since initialization
if state["plot.since_init"] and "global_stats_init" in relu.stats[q_id]:
hasbin_init = relu.stats[q_id]["global_stats_init"] > 0
assert len(hasbin) == len(hasbin_init)
same_as_init = (hasbin_init & hasbin).sum()
hasbin_init_sum = hasbin_init.sum()
changes_since_init = ((~hasbin_init & hasbin).int() - (hasbin_init & ~hasbin).int()).sum()
del hasbin_init
hasbin_init_t = relu.stats[q_id]["global_stats_init"] > state["threshold"]
same_as_init_t = (hasbin_init_t & hasbin_t).sum()
hasbin_init_t_sum = hasbin_init_t.sum()
changes_since_init_t = ((~hasbin_init_t & hasbin_t).int() - (hasbin_init_t & ~hasbin_t).int()).sum()
if state["plot.absolute_stats"]:
event.optional.plot_scalar2d(changes_since_init, id, title=title+"[sinceInit] Patterns Changed")
if state["plot.relative_stats"]:
event.optional.plot_scalar2d(1.0*same_as_init/(hasbin_sum + hasbin_init_sum - same_as_init), id, title=title+"[sinceInit] JI(init,current)")
event.optional.plot_scalar2d(1.0*(same_as_init_t+ε)/(hasbin_t_sum + hasbin_init_t_sum - same_as_init_t + ε), id, title=title+"[sinceInit][>T] JI(init,current)")
wJI = (torch.min(counts,relu.stats[q_id]["global_stats_init"]).sum().item()/torch.max(counts,relu.stats[q_id]["global_stats_init"]).sum().item())
event.optional.plot_scalar2d(1.0*wJI, id, title=title+"[sinceInit] wJI(init,current)")
event.optional.plot_scalar2d(1.0*wJI/state["every_epoch"], id, title=title+"[sinceInit] rwJI(init,current)")
event.optional.plot_scalar2d(1.0*changes_since_init/( len_counts_nonzero ), id, title=title+"[sinceInit] % Patterns Changed")
# count changes since last statistics
if state["plot.since_last"] and "global_stats_last" in relu.stats[q_id]:
hasbin_last = relu.stats[q_id]["global_stats_last"] > 0
assert len(hasbin) == len(hasbin_last)
hasbin_last_sum = hasbin_last.sum()
same_as_last = (hasbin_last & hasbin).sum()
changes_since_last = ((~hasbin_last & hasbin).int() - (hasbin_last & ~hasbin).int()).sum()
changeH_max = torch.log2(1.0*(hasbin | hasbin_last).sum())
# change entropy (not a real entropy)
# change = 1.0*(relu.stats[q_id]["global_stats_last"] - relu.stats[q_id]["global_stats"]).abs()
# hasbin_sum_weighted = (change*hasbin).sum()
# hasbin_last_sum_weighted = (change*hasbin_last).sum()
# same_as_last_weighted = (change*(hasbin_last & hasbin)).sum()
# kullback-leibler
# counts = relu.stats[q_id]["global_stats"]
# counts_last = relu.stats[q_id]["global_stats_last"]
# KL = (counts/num_data * torch.log2(counts/(counts_last+ε)+ε)).sum().item()
# KL2 = (counts_last/num_data * torch.log2(counts_last/(counts+ε)+ε)).sum().item()
# event.optional.plot_scalar2d(KL, id, title=title+"[sinceLast] KL1")
# event.optional.plot_scalar2d(KL2, id, title=title+"[sinceLast] KL2")
# weighted IJ
# JI_last_current_weighted = (1.0*same_as_last_weighted/(hasbin_sum_weighted + hasbin_last_sum_weighted - same_as_last_weighted)).item()
# relu.changeH = changeH.item()
# relu.changeH_perc = changeH.item()/changeH_max.item()
# relu.changeH_perc = JI_last_current_weighted
# weighted IJ
counts_curr = relu.stats[q_id]["global_stats"]
counts_last = relu.stats[q_id]["global_stats_last"]
JI_last_current_weighted = 1.0*torch.min(counts_curr,counts_last).sum().item()/torch.max(counts_curr,counts_last).sum().item()
relu.weighted_JI = JI_last_current_weighted
# relu.changeH = changeH.item()
# relu.changeH_perc = changeH.item()/changeH_max.item()
# relu.changeH_perc = JI_last_current_weighted
del hasbin_last
# change Entropy
# # changeH = 1.0*(relu.stats[q_id]["global_stats_last"] - relu.stats[q_id]["global_stats"]).abs()
# # changeH /= changeH.sum() + 1e-20
# # changeH = - (changeH * torch.log2(changeH + 1e-20)).sum()
# # changeH /= torch.max(relu.stats[q_id]["global_stats_last"], relu.stats[q_id]["global_stats"]) + 1e-20
# changeH = 1.0*(relu.stats[q_id]["global_stats_last"] - relu.stats[q_id]["global_stats"]).abs()
# changeH /= changeH.sum() + 1e-20
# # changeH = 1.0*(changeH - changeH.min()) / (changeH.max() - changeH.min())
# changeH = - (changeH * torch.log2(changeH + 1e-20)).sum()
hasbin_last_t = relu.stats[q_id]["global_stats_last"] > state["threshold"]
same_as_last_t = (hasbin_last_t & hasbin_t).sum()
hasbin_last_t_sum = hasbin_last_t.sum()
changes_since_last_t = ((~hasbin_last_t & hasbin_t).int() - (hasbin_last_t & ~hasbin_t).int()).sum()
JI_last_current = (1.0*same_as_last/(hasbin_sum + hasbin_last_sum - same_as_last)).item()
JIT_last_current = (1.0*(same_as_last_t + ε)/(hasbin_t_sum + hasbin_last_t_sum - same_as_last_t + ε)).item()
relu.JI_last_current = JI_last_current
relu.JIT_last_current = JIT_last_current
if state["plot.absolute_stats"]:
event.optional.plot_scalar2d(changes_since_last, id, title=title+"[sinceLast] Patterns Changed")
if state["plot.relative_stats"]:
event.optional.plot_scalar2d(JI_last_current, id, title=title+"[sinceLast] JI(last,current)")
event.optional.plot_scalar2d(JIT_last_current, id, title=title+"[sincelast][>T] JI(last,current)")
event.optional.plot_scalar2d(JI_last_current_weighted, id, title=title+"[sinceLast] wJI(last,current)")
event.optional.plot_scalar2d(JI_last_current_weighted/state["every_epoch"], id, title=title+"[sinceLast] rwJI(last,current)")
event.optional.plot_scalar2d(1.0*changes_since_last/( len_counts_nonzero ), id, title=title+"[sinceLast] % Patterns Changed")
# event.optional.plot_scalar2d(changeH/changeH_max, id, title=title+"[sinceLast] % Weighted JI")
event.optional.plot_scalar2d(JI_last_current_weighted*relu.entropy, id, title=title+"[sinceLast] wJI·Entropy")
event.optional.plot_scalar2d(JI_last_current_weighted*relu.entropy_perc, id, title=title+"[sinceLast] wJI·relEntropy")
if state["plot.since_final"] and "global_stats_final" in relu.stats[q_id] or state["plot.since_last"] and "global_stats_last" in relu.stats[q_id]:
hasbin = relu.stats[q_id]["global_stats"] > 0
hasbin_t = relu.stats[q_id]["global_stats"] > state["threshold"]
hasbin_sum = hasbin.sum()
hasbin_t_sum = hasbin_t.sum()
# count changes since final
if state["plot.since_final"] and "global_stats_final" in relu.stats[q_id]:
hasbin_final = relu.stats[q_id]["global_stats_final"] > 0
assert len(hasbin) == len(hasbin_final)
same_as_final = (hasbin_final & hasbin).sum()
hasbin_final_sum = hasbin_final.sum()
changes_since_final = ((~hasbin_final & hasbin).int() - (hasbin_final & ~hasbin).int()).sum()
del hasbin_final
hasbin_final_t = relu.stats[q_id]["global_stats_final"] > state["threshold"]
same_as_final_t = (hasbin_final_t & hasbin_t).sum()
hasbin_final_t_sum = hasbin_final_t.sum()
changes_since_final_t = ((~hasbin_final_t & hasbin_t).int() - (hasbin_final_t & ~hasbin_t).int()).sum()
if state["plot.absolute_stats"]:
event.optional.plot_scalar2d(changes_since_final, id, title=title+"[sinceFinal] Patterns Changed")
if state["plot.relative_stats"]:
event.optional.plot_scalar2d(1.0*same_as_final/(hasbin_sum + hasbin_final_sum - same_as_final), id, title=title+"[sinceFinal] JI(final,current)")
event.optional.plot_scalar2d(1.0*(same_as_final_t+ε)/(hasbin_t_sum + hasbin_final_t_sum - same_as_final_t + ε), id, title=title+"[sinceFinal][>T] JI(final,current)")
wJI = (torch.min(counts,relu.stats[q_id]["global_stats_final"]).sum().item()/torch.max(counts,relu.stats[q_id]["global_stats_final"]).sum().item())
event.optional.plot_scalar2d(1.0*wJI, id, title=title+"[sinceFinal] wJI(final,current)")
event.optional.plot_scalar2d(1.0*wJI/state["every_epoch"], id, title=title+"[sinceFinal] rwJI(final,current)")
event.optional.plot_scalar2d(1.0*changes_since_final/( len_counts_nonzero ), id, title=title+"[sinceFinal] % Patterns Changed")
# save hashmap stats for change stats
if state["plot.since_init"] and "global_stats_init" not in relu.stats[q_id]:
relu.stats[q_id]["global_stats_init"] = counts
if state["plot.since_last"]:
if "global_stats_last" in relu.stats[q_id]:
del relu.stats[q_id]["global_stats_last"]
relu.stats[q_id]["global_stats_last"] = counts
if at_least_one_measured:
event.optional.actstats_done()
event.rng_restore()
return result, args, kwargs
def register(mf):
mf.register_default_module("relu", required_event='activation_layer')
mf.load("..activation_stats")
mf.load("validate")
mf.register_defaults({
"threshold": 10,
"query": ["HA"],
# "query": ["HA","HAgL","HAL","MAL"],
"net_mode": ["train"],
"dataset_mode": ["train"],
"every_epoch": 0.2,
"plot.since_init": True,
"plot.since_last": True,
"plot.since_final": False,
"plot.absolute_stats": True,
"plot.relative_stats": True,
"plot.test": True,
"dummy": False,
})
mf.register_helpers({
"last_measured": [-1,-1],
"queries": [
# example query
# {
# "with_grad": True,
# "with_labels": True,
# "histdecay": 1.0,
# "use_global_stats": True, # use collected stats if exist
# }
],
})
mf.register_event('after_step', after_step, unique=False)
mf.register_event('after_training', reset_checks, unique=False)
mf.register_event('after_training', after_step, unique=False)
mf.register_event('init', init, unique=False)
```
#### File: nonstandard_variants/pyramidnet20/__init__.py
```python
def register(mf):
mf.register_default_module("basicblock_pyramid", required_event="resblock")
mf.load("grp.model.resnet.cifar_variants.pyramidnet_defaults")
mf.set_scope("grp.model.resnet")
mf.overwrite_defaults({
"num_blocks": 3 * [3], # first_layer + (3 blocks * 18 reslayers * 2 convs) + final_layer = 110 layers
}, scope="grp.model.resnet")
```
#### File: sotacifar10/base/__init__.py
```python
def register(mf):
# this is a default training/validate/testing experiment
mf.register_default_module(["train", "validate", "test"], required_event="main", overwrite_globals={
"main.epochs": 200,
})
# although we are configuring for cifar10 here, this can also be overwritten
mf.register_default_module("cifar10", required_event="dataset")
# setting defaults for the absoulte minimum setup, e.g. model, activations, normalization, ...
mf.register_default_module("resnet56", required_event="init_net")
mf.register_default_module("batchnorm", required_event="normalization_layer")
mf.register_default_module("relu", required_event="activation_layer")
mf.register_default_module("conv2d", required_event="filter_layer")
mf.register_default_module("classifier", required_event="classifier_layer")
mf.register_default_module("optimizer", required_event="step")
mf.register_default_module("gpu", required_event="send_net_to_device")
# Augmentation
mf.register_default_module("augment", required_event="dataset_transform", overwrite_globals={
"data.augment.flip": True,
"data.augment.cropsize": 32,
"data.augment.croppadding": 4,
"data.augment.rotationdeg": 0,
})
mf.overwrite_globals({
"data.cpuloader.batchsize": 128,
# Validation
"data.cpuloader.val_prop": 0.01,
"data.cpuloader.drop_last": False,
})
```
#### File: main/test/__init__.py
```python
from tqdm import tqdm
import torch
def test(state, event, *args, testloader=None, net=None, evalmode=True, plot=True, **kwargs):
# breakpoint()
del args, kwargs # unused
if not testloader:
testloader = event.dataloader("test")
if not net:
if "main.net" in state:
net = state["main.net"]
else:
# get network
net = state["net"] = event.init_net()
# send to device
net = event.send_net_to_device(net)
# get criterion
criterion = event.init_loss()
criterion = event.send_loss_to_device(criterion)
# optional events (typically optimizer, learning rate scheduler, etc.)
event.optional.before_testing()
# testing loop
event._mf.print_heading("Testing Loop ...")
state["num_batches"] = len(testloader)
if evalmode:
net.eval()
acc_1 = event.Welford()
acc_5 = event.Welford()
with torch.no_grad():
if "tqdm_batch" not in state:
state["tqdm_batch"] = tqdm(total=len(testloader), position=0, desc="Test", dynamic_ncols=False)
for state["current_batch"], data in enumerate(testloader):
_inputs = event.send_data_to_device(data[0])
_labels = event.send_labels_to_device(data[1])
state["main.labels"] = _labels
output = net(_inputs)
state["examples_seen"] += len(_inputs)
_, pred = output.topk(5, 1, largest=True, sorted=True)
_labels = _labels.view(_labels.size(0), -1).expand_as(pred)
correct = pred.eq(_labels).float()
# compute top-1/top-5
correct_5 = correct[:, :5].sum(1).cpu().numpy()
correct_1 = correct[:, :1].sum(1).cpu().numpy()
[acc_1(c) for c in correct_1] # pylint: disable=expression-not-assigned
[acc_5(c) for c in correct_5] # pylint: disable=expression-not-assigned
state["tqdm_batch"].update(1)
state["tqdm_batch"].reset()
state["tqdm_batch"].clear()
event.optional.after_testing()
state["test_accuracy"] = acc_1.mean
net.train()
if plot:
event.optional.plot_scalar(acc_1.mean, 0, title="test_acc_1")
event.optional.plot_scalar(acc_5.mean, 0, title="test_acc_5")
event.optional.reset_dataloader("test")
# pre-delete tqdm object (garbage collection fires execiption due to bug in in tqdm related to `dynamic_ncols=False`)
# if "tqdm_batch" in state:
# del state["tqdm_batch"]
def register(mf):
mf.set_scope("..")
mf.register_helpers({
"current_epoch": 0,
"current_batch": 0,
"num_batches": 0,
"examples_seen": 0
})
mf.load('Welford')
mf.register_event('main', test)
mf.register_event('test', test)
```
#### File: main/validate/__init__.py
```python
from tqdm import tqdm
import torch
from miniflask import outervar
def validate(state, event, *args, valloader=None, net=outervar, evalmode=True, plot=True, dummy=False, **kwargs):
del args, kwargs # unused
if not valloader:
valloader = event.dataloader("val")
if valloader is not None:
if evalmode:
net.eval()
acc_1 = event.Welford()
acc_5 = event.Welford()
with torch.no_grad():
if "tqdm_batch" not in state:
state["tqdm_batch"] = tqdm(total=len(valloader), position=3, desc="Validation", dynamic_ncols=False)
for _, data in enumerate(valloader):
if dummy:
continue
_inputs = event.send_data_to_device(data[0])
_labels = event.send_labels_to_device(data[1])
state["main.labels"] = _labels
output = net(_inputs)
_, pred = output.topk(5, 1, largest=True, sorted=True)
_labels = _labels.view(_labels.size(0), -1).expand_as(pred)
correct = pred.eq(_labels).float()
# compute top-1/top-5
correct_5 = correct[:, :5].sum(1).cpu().numpy()
correct_1 = correct[:, :1].sum(1).cpu().numpy()
[acc_1(c) for c in correct_1] # pylint: disable=expression-not-assigned
[acc_5(c) for c in correct_5] # pylint: disable=expression-not-assigned
state["tqdm_batch"].update(1)
state["tqdm_batch"].reset()
state["tqdm_batch"].clear()
state["val_accuracy"] = acc_1.mean
net.train()
if plot:
event.optional.plot_scalar(acc_1.mean, title="validation_acc_1")
event.optional.plot_scalar(acc_5.mean, title="validation_acc_5")
event.optional.reset_dataloader("val")
def register(mf):
mf.load('Welford')
mf.register_event('after_epoch', validate)
# mf.register_event('after_training', validate)
mf.register_event('validate', validate)
```
#### File: blocks/util/__init__.py
```python
from enum import Enum
from torch import nn
class ShortcutOption(Enum):
A = 0
B = 1
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super().__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
def register(mf):
mf.set_scope("...")
mf.register_helpers({
"ShortcutOption": ShortcutOption,
"LambdaLayer": LambdaLayer,
})
```
#### File: imgnet_variants/pyramidnet_defaults/__init__.py
```python
def register(mf):
mf.load("...")
mf.load("...blocks.util")
mf.set_scope("...")
mf.overwrite_defaults({
"num_planes": [64, 64 + 300], # alpha=300
"strides": [2, 2, 2, 2],
"plane_increase_strategy": mf.state["PlaneIncreaseStrategy"].ADD,
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
"replace_stride_with_dilation": [False, False, False],
# first conv layer
"first_conv_filters": 64,
"first_conv_kernel_size": 7,
"first_conv_padding": 3,
"first_conv_stride": 2,
"first_conv_activation": True,
"first_conv_max_pool": True,
# PyramidNets always use shortcut type A
"option": mf.state["ShortcutOption"].A,
}, scope="model.resnet")
```
#### File: util/classifier/__init__.py
```python
from torch.nn import Linear
def classifier_layer(state, in_channels, out_channels=None, bias=None, **kwargs):
del kwargs
if out_channels is None:
out_channels = state["dataset.num_classes"]
if bias is None:
bias = state["bias"]
return Linear(in_channels, out_channels, bias=bias)
def register(mf):
mf.register_defaults({
"bias": True
})
mf.register_event("classifier_layer", classifier_layer, unique=True)
mf.register_event("classifier_layer_cls", lambda: Linear, unique=True)
```
#### File: scheduler/multistep/__init__.py
```python
import torch
def after_init_optimizer(state, optimizer, net, parameters, *args, **kwargs):
del net, parameters # unused
state["scheduler"] = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=state["milestones"], last_epoch=state["main.start_epoch"] - 1, gamma=state["gamma"])
return optimizer, args, kwargs
def scheduler_step(state, *args, **kwargs):
del args, kwargs # unused
state["scheduler"].step()
def register(mf):
mf.register_defaults({
"milestones": [10, 20],
"gamma": 0.1,
})
mf.register_event('after_init_optimizer', after_init_optimizer, unique=False)
mf.register_event('init_scheduler', after_init_optimizer)
mf.register_event('after_epoch', scheduler_step, unique=False)
mf.register_event('scheduler_step', scheduler_step, unique=False)
```
#### File: log/visdom/plot_scalar.py
```python
import numpy as np
def plot_scalar(state, Y, X=None, title="Scalar", **kwargs):
if X is None:
X = state["main.examples_seen"]
# plotname & title
pname = "scalar-" + title
kwargs["title"] = title
# assure right format
if not isinstance(Y, np.ndarray):
Y = np.array(Y).reshape([-1])
if not isinstance(X, np.ndarray):
X = np.array(X).reshape([-1])
# helper object
append = pname in state["WINDOWS"] and len(X) == 1
state["WINDOWS"][pname] = {}
# shall append ?
update_kwarg = dict(update="append") if append else {}
# create/update plot
state["vis"].line(
Y=Y,
X=X,
win=pname,
opts=kwargs,
**update_kwarg
)
```
#### File: log/visdom/plot_scatter.py
```python
def plot_scatter(state, X, Y=None, title="Scatter", update=False, **kwargs):
'''X is a Nx2 or Nx3 Tensor that specifies the locations of N points in the scatter plot.
Y is a N Tensor that contains discrete labels.
See https://github.com/facebookresearch/visdom#visscatter for all possible parameters.'''
# plotname & title
pname = "scalar-" + title
kwargs["title"] = title
# helper object
append = pname in state["WINDOWS"]
obj = state["WINDOWS"][pname] = {}
del obj # unused
# shall append ?
update_kwarg = dict(update="append") if append and update else {}
# create/update plot
state["vis"].scatter(
Y=Y,
X=X,
win=pname,
opts=kwargs,
**update_kwarg
)
```
#### File: util/progressfile/__init__.py
```python
from os import environ
from miniflask import outervar
def write_progress(state, tqdm_epoch=outervar):
d = tqdm_epoch.format_dict
elapsed = tqdm_epoch.format_interval(d['elapsed'])
progress = "%.2f%%" % (d['n'] / d['total'] * 100)
remaining = tqdm_epoch.format_interval(d["elapsed"] * (d["total"] or 0) / max(d["n"], 1))
with open(state["progressfile"], "w") as f:
f.write(progress + "\n")
f.write(elapsed + "\n")
f.write(remaining)
def register(mf):
if "PROGRESSFILE" in environ:
mf.register_event("before_epoch", write_progress, unique=False)
mf.register_helpers({
"progressfile": environ['PROGRESSFILE']
})
```
#### File: utils/EMA/__init__.py
```python
import torch.nn as nn
# How to apply exponential moving average decay for variables?
# https://discuss.pytorch.org/t/how-to-apply-exponential-moving-average-decay-for-variables/10856/2
class EMA(nn.Module):
def __init__(self, mu):
super().__init__()
self.mu = mu
def forward(self, current, last_average):
if last_average is None:
return current
new_average = self.mu * current + (1 - self.mu) * last_average
return new_average
class AVG:
def __init__(self):
self.sum = 0
self.counter = 0
def reset(self):
self.__init__()
def add_value(self, value):
self.counter += 1
self.sum += value
def get_avg(self):
return self.sum / self.counter
# ema = EMA(0.999)
# current = Variable(torch.rand(5),requires_grad=True)
# average = Variable(torch.zeros(5),requires_grad=True)
# average = ema(current, average)
def register(mf):
mf.register_event('EMA', EMA, unique=True)
mf.register_event('AVG', AVG, unique=True)
```
#### File: divider/plottypes/1dscalar.py
```python
import re
import json
from subprocess import Popen, PIPE
import numpy as np
from util.names import Jaccard2last_mean_over_time, train_H_over_time
from util.extract import get_data, get_expname, compile_filename
def register(parser):
parser.add_argument('files', type=str, nargs='+', help='number of files')
parser.add_argument('scalarname', type=str, help='plotname')
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def plot(plt, args):
files = filter
print(args.files)
plotname = args.scalarname
if plotname.endswith(".json") or plotname.endswith(".bin"):
raise ValueError("No plotname specified.")
for full_filename in args.files:
expdir = "/".join(full_filename.split("/")[:-1])
filename = full_filename.split("/")[-1]
expname = filename[:-5]
jq = lambda cmd: Popen("jq '%s' %s " % (cmd,full_filename), shell=True, stdout=PIPE, stderr=PIPE).communicate()[0].decode('utf-8')
jq_json = lambda cmd: json.loads(jq(cmd))
jq_array = lambda cmd: np.array(jq_json(cmd))
keys = jq_json('.jsons | keys')
mode_data = re.compile(".*scalar2d-\[(\w+\|\w+)\].*").match(",".join(keys))[1]
x_type = "%i"
if "flr" in filename and "mcmc" in filename:
name_re = compile_filename("flr-mcmcstats-{word}-{value}-{value}_{value}")
def name_match_fn(d,m):
d["net"], d["perlayer"], d["initlr"], d["seed"] = m[1], m[2], m[3], m[4]
data = get_data(args.files, name_re, name_match_fn, expname=expdir+"/"+expname, exclude_unfinished=True, cache=True)
else:
# print(keys)
test_acc = float(jq('.jsons["scalar-test_acc_1"].content.data[-1].y[-1]'))
print(filename, test_acc)
if plotname == "meanji":
data = jq_json('.jsons["scalar2d-['+mode_data+'][sinceLast] JI(last,current)"].content.data[0]')
x = data["x"]
x = np.array(x, dtype=np.int)
y = np.mean(data["z"],0)
elif plotname == "dLdJI":
any_d = next(iter(data.values()))
any_len = len(any_d["scalar-loss"]["y"])
losses = np.stack([d["scalar-loss"]["y"] for d in data.values() if len(d["scalar-loss"]["y"]) == any_len])
jis = [Jaccard2last_mean_over_time(d) for d in data.values()]
min_len = np.min([ji.shape[0] for ji in jis])
jis = [ji[:min_len] for ji in jis]
jis = np.stack(jis)
ji_x = np.array(any_d["scalar2d-[tm|trd][sinceLast] JI(last,current)"]["x"],dtype=np.int)
ji_y = np.mean(np.array(any_d["scalar2d-[tm|trd][sinceLast] JI(last,current)"]["z"]),0)
ji_x = ji_x[:min_len]
ji_y = ji_y[:min_len]
# ji_y = np.stack([d["scalar-learning rate"]["y"] for d in data.values() if len(d["scalar-loss"]["y"]) == any_len])
# jis = np.array([ji_y])
# ji_x = any_d["scalar-learning rate"]["x"]
loss_x = np.array(any_d["scalar-loss"]["x"],dtype=np.int)
losses_var = losses.var(0)
jis_mean = jis.mean(0)
jis_mean = np.interp(loss_x, ji_x, jis_mean)
x, y = loss_x[1:], losses_var[1:]/jis_mean[1:]**2
# x = np.linspace(0.2*len(y),len(y),len(y))/len(y)
# x = ji_y
x = np.interp(loss_x, ji_x, ji_y)
x = x[1:]
x_type = "%.8f"
y = moving_average(y, n=25)
x = moving_average(x, n=25)
elif plotname == "dEdJI":
any_d = next(iter(data.values()))
any_len = len(any_d["scalar-loss"]["y"])
losses = np.stack([d["scalar-loss"]["y"] for d in data.values() if len(d["scalar-loss"]["y"]) == any_len])
jis = [train_H_over_time(d) for d in data.values()]
min_len = np.min([ji.shape[0] for ji in jis])
jis = [ji[:min_len] for ji in jis]
jis = np.stack(jis)
ji_x = np.array(any_d["scalar2d-[tm|trd] % max Entropy"]["x"],dtype=np.int)
ji_y = np.mean(np.array(any_d["scalar2d-[tm|trd] % max Entropy"]["z"]),0)
ji_x = ji_x[:min_len]
ji_y = ji_y[:min_len]
# ji_y = np.stack([d["scalar-learning rate"]["y"] for d in data.values() if len(d["scalar-loss"]["y"]) == any_len])
# jis = np.array([ji_y])
# ji_x = any_d["scalar-learning rate"]["x"]
loss_x = np.array(any_d["scalar-loss"]["x"],dtype=np.int)
losses_var = losses.var(0)
jis_mean = jis.mean(0)
jis_mean = np.interp(loss_x, ji_x, jis_mean)
x, y = loss_x[1:], losses_var[1:]/jis_mean[1:]**2
# x = np.linspace(0.2*len(y),len(y),len(y))/len(y)
# x = ji_y
x = np.interp(loss_x, ji_x, ji_y)
x = x[1:]
x_type = "%.8f"
y = moving_average(y, n=25)
x = moving_average(x, n=25)
else:
data = jq_json(".jsons[\"scalar-"+plotname+"\"].content.data[0]")
x = data["x"]
y = data["y"]
plt.plot(x,y)
# np.savetext("paper/fantasticlr/data/%s.csv" % plotname)
# np.savetxt("paper/fantasticlr-cifar10/data/%s-%s.csv" % (expname,plotname), np.array([x,y]).T, header="x y", fmt=" ".join([x_type,'%.8f']), comments="")
y2 = moving_average(y, n=25)
x2 = moving_average(x, n=25)
# np.savetxt("paper/fantasticlr-cifar10/data/%s-%s-smooth.csv" % (expname,plotname), np.array([x2,y2]).T, header="x y", fmt=" ".join([x_type,'%.8f']), comments="")
if plotname == "dLdJI" or plotname == "dEdJI":
break
fontsize = 2
plt.tight_layout()
# plt.legend()
plt.title(plotname)
# np.savetxt("/tmp/scalar1d-%s.txt" % (plotname), [x,y])
# plt.savefig("paper/fantasticlr/img/scalar1d-%s.pdf" % (plotname))
plt.show()
# plt.savefig("paper/fantasticlr/img/scalar1d-%s.pdf" % (plotname))
# save as csv
# np.savetxt("paper/measures/data/%s-%s.csv" % (filename,plotname), data, header="x y z", fmt=" ".join(['%s','%s','%.8f']))
```
#### File: divider/plottypes/2dscalar.py
```python
import sys
import re
import numpy as np
from os import makedirs
from os.path import sep as path_sep
from colored import fg, attr
from util.names import networks, datasets, train_H_plot, test_acc
from util.extract import get_data, get_expname, compile_filename
def register(parser):
parser.add_argument('--label', type=str, help='label')
parser.add_argument('file', type=str, nargs=1, help='number of files')
parser.add_argument('scalarname', type=str, help='plotname')
def plot(plt, args):
plotname = args.scalarname
expdir = "/".join(args.file[0].split("/")[:-1])
filename = args.file[0].split("/")[-1]
expname = filename.split("_")[0]
expname2 = "_".join(filename.split("/")[-1].split("_")[1:])
name_re = compile_filename("{word}-{word}_{word}(_.*|\.json)")
def name_match_fn(d,m):
d["expname"], d["mode"], d["net"] = m[1], m[2], m[3]
data = get_data(args.file, name_re, name_match_fn, exclude_unfinished=False, cache=True)
# print(data.keys())
try:
d = data[filename]
except:
print(fg('red'),filename,"not found",attr('reset'))
return
real_plotname = "scalar2d-[%s]%s" % (d["mode_data"], plotname if plotname.startswith("[") else " "+plotname)
# print(d.keys())
# print(d[plotname].keys())
x, y, z, z_min, z_max = [d[real_plotname][k] for k in ["x","y","z","zmin","zmax"]]
x, y, z = np.array(x), np.array(y), np.array(z)
z_min, z_max = z.min(), z.max()
print(fg('green'),plotname, z_min, z_max,attr('reset'))
print(filename, "Accuracy",test_acc(d))
if plotname == "Entropy":
cmap = 'magma'
cmap = 'inferno'
z_min, z_max = 0, 21.30
elif plotname == "% max Entropy":
cmap = 'magma'
cmap = 'inferno'
# z_min, z_max = 0, 21.30
elif plotname == "[sinceLast] % Patterns Changed":
z_mean = z.mean(1, keepdims=True)
z_max = z.max(1, keepdims=True)
z_min = z.min(1, keepdims=True)
# z = z / z_mean
# z = (z - z_min)/ (z_max - z_min)
# z_helper = z[:,0:]
# z_min, z_max = z_helper.min(), z_helper.max()
# z_min, z_max = -10, 4
z_min, z_max = -0.5, 0.5
# z = np.sign(z)*np.log(np.abs(z)+1e-7)
# z_min, z_max = z.min(), z.max()
# z_min, z_max = -1, 1
# cmap = 'rainbow'
print(z_min, z_max)
cmap = 'viridis'
elif plotname == "[sinceLast] JI(last,current)":
cmap = 'viridis'
z_min, z_max = 0, 1
elif plotname == "[sinceInit] JI(init,current)":
cmap = 'viridis'
# z = np.log(z+1e-4)
z_min, z_max = z.min(), z.max()
# print(z_min, z_max)
# z_min, z_max = -8, -0.05
elif plotname == "% Num Patterns":
cmap = 'twilight_shifted'
z_min, z_max = 0, 1
elif plotname == "% Count of most frequent Pattern":
cmap = 'RdBu_r'
z_min, z_max = 0, 0.376202
elif plotname == "% Hashmap Filled":
cmap = 'RdBu_r'
# z_min, z_max = 0, 0.376202
else:
cmap = 'RdBu'
plotname = plotname.replace("%","percent")
filename = filename.replace(".json","")
# plt.plot(x,y,z)
# plt.imshow(a, cmap='hot', interpolation='nearest')
fontsize = 2
fig, ax = plt.subplots(1,1, figsize=(5,5))
p = ax.pcolormesh(x, y, z, shading='nearest', cmap=cmap, vmin=z_min, vmax=z_max, linewidth=0, rasterized=True)
# ax.set_xlabel('x-label', fontsize=fontsize)
# ax.set_ylabel('y-label', fontsize=fontsize)
# ax.set_title('Title', fontsize=fontsize)
# ax.set_xticklabels(x, fontsize=fontsize)
# ax.set_yticklabels(y, fontsize=fontsize)
ax.set_xticklabels([], fontsize=fontsize)
ax.set_yticklabels([], fontsize=fontsize)
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
fig.colorbar(p)
if not args.save:
plt.show()
return
if not args.label:
raise ValueError("Label missing")
label = args.label
netname = networks[d["net"]]
makedirs("paper_plots/measures-%s/img/" % label, exist_ok=True)
# plt.savefig("paper_plots/measures%s/img/%s-%s.pdf" % (label,filename,plotname))
# save as csv
x, y = np.meshgrid(np.array(x).astype(np.int),np.array(y).astype(np.int))
z = np.array(z)
data = np.stack([x,y,z]).reshape([3,-1]).T
makedirs("paper_plots/measures-%s/data/%s/%s" % (label, d["mode"], netname), exist_ok=True)
print("paper_plots/measures-%s/data/%s/%s/%s-%s.csv" % (label,d["mode"],netname,filename,plotname))
np.savetxt("paper_plots/measures-%s/data/%s/%s/%s-%s.csv" % (label,d["mode"],netname,filename,plotname), data, header="x y z", fmt=" ".join(['%s','%s','%.8f']))
```
#### File: divider/plottypes/acc_vs_entropy.py
```python
def acc_vs_entropy():
name_re = re.compile(("_".join(["{word}"]*5+["{value}"]*3+["{word}","{value}"])+".*").format(word=word_re,value=value_re))
def name_match_fn(d,m):
d["expname"], d["net"], d["norm"], d["activation"], d["optimizer"], d["learning_rate"], d["weight_decay"], d["momentum"], d["scheduler"], d["bias"] = m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8], m[9], m[10]
data = get_data(name_re, name_match_fn)
ax_x = 2
ax_y = 1
fig, axs = plt.subplots(ax_y,ax_x, figsize=(10,5))
axs = axs.flat
global_color = "net"
global_color2 = "net"
plots = [(train_H_mean,train_acc, global_color),(train_H_mean,test_acc, global_color)]#, (train_H_mean,train_acc, global_color2),(train_H_mean,test_acc, global_color2)]
for ax, (x_label,y_label, c_label) in zip(axs,plots):
x = [x_label(d) for d in data.values()]
y = [y_label(d) for d in data.values()]
labels = [d[c_label] for d in data.values()]
labels_unique = list(set(labels))
colors = [labels_unique.index(c) for c in labels]
scatter = ax.scatter(x, y, c=colors, label=labels)
ax.set_xlabel(get_name(x_label))
ax.set_ylabel(get_name(y_label))
ax.set_title('colored by %s' % (c_label))
ax.legend(scatter.legend_elements()[0], labels_unique, loc="lower right")
plt.show()
```
#### File: divider/plottypes/lrscaling.py
```python
def lr_scale_scalar_plots(expname, net):
name_re = re.compile("%s-%s__%s\.json" % (word_re,word_re,value_re))
def name_match_fn(d,m):
d["expname"], d["net"], d["lrscale"] = m[1], m[2], m[3]
data = get_data(name_re, name_match_fn)
print(expname)
dataset = datasets[expname.split("-")[-1]]
print(dataset)
L = locals()
L.update(globals())
# plt.style.use('ggplot')
keys = list(data.keys())
lrscales = sorted(list(set([d["lrscale"] for d in data.values()])), key=lambda s: 1/float(s[1:]) if s.startswith(":") else float(s))
scatter = None
# lrscales = lrscales[len(lrscales)//4:3*len(lrscales)//4:2]
thresh0 = 20
thresh = 30
every = 4
lrscales = list(filter(lambda s: int(s[1:])%every or int(s[1:])<=thresh0 if s.startswith(":") else int(s)%every or int(s)<=thresh0, lrscales))
lrscales = list(filter(lambda s: int(s[1:])<=thresh if s.startswith(":") else int(s)<=thresh, lrscales))
print(lrscales)
C, ci = {}, 0
for l in lrscales:
# C[l] = np.log(1/float(l[1:]) if l.startswith(":") else float(l))
C[l] = 1/float(l[1:]) if l.startswith(":") else float(l)
ci += 1
cmax = max(C.values())
cmin = min(C.values())
ax_x = 2
# plots = [(train_H_mean,train_acc),(train_H_mean,test_acc),(train_step,train_H_mean_per_step),(train_step,Jaccard2last_max),(train_step,Jaccard2last_mean),(train_step,Jaccard2last_min)]
# plots = [(train_H_correlation,train_acc),(train_step,Jaccard2last_mean)]
# plots = [(train_H_mean,train_acc),(train_step,Jaccard2last_mean)]
plots = [(train_H_mean,train_acc),(train_step,Jaccard2last_median)]
# plots = [(train_H_mean,train_acc),(train_step,Jaccard2last_max)]
# plots = [(train_H_mean,train_acc),(train_step,Jaccard2last_mean_thresh)]
ax_y = len(plots)//ax_x
fig, axs = plt.subplots(ax_y,ax_x, figsize=(10,2.5), gridspec_kw={'width_ratios': [1, 3]})
data_keys = ["%s-%s__lrscale=%s.json" % (expname,net,l) for l in lrscales]
data_keys = list(filter(lambda k: k in data, data_keys))
for_csv = {
"lr": [],
}
for plot1, plot2 in plots:
for_csv[get_name(plot1)] = []
for_csv[get_name(plot2)] = []
for ax, (x_label,y_label) in zip(axs.T.flat,plots):
ax.set_title(networks[net])
if not x_label or not y_label:
continue
x = [x_label(data[key]) for key in data_keys]
y = [y_label(data[key]) for key in data_keys]
color = [C[data[key]["lrscale"]] for key in data_keys]
label = [data[key]["lrscale"] for key in data_keys]
# get correlation
if x_label == train_H_correlation:
x = np.stack(x).reshape([len(data_keys),-1])
y = np.stack(y).reshape([-1,1])
D = x
# D = np.concatenate([x,y],1)
D -= np.mean(D,0)
pca = PCA(1)
D = pca.fit_transform(D)
x = D
# c = plt.cm.viridis((color-cmin)/(cmax-cmin))
scatter = ax.scatter(x,y,c=color,label=label)
# ax.plot(x,y, '*:')#,c=c,label=l)
camelnames = {
"train_H_mean": "mean Activation Entropy",
"train_acc": "Train Accuracy",
"test": "Test Accuracy",
"Jaccard2last_median": "median Jaccard Index",
"train_step": "Training Time"
}
ax.set_xlabel(camelnames[get_name(x_label)])
ax.set_ylabel(camelnames[get_name(y_label)])
fig.subplots_adjust(left=0.1,right=1.1,bottom=0.1,wspace=0.3, hspace=None)
hlines_at = set()
for key, l, lr in zip(data_keys, label, color):
c = plt.cm.viridis((lr-cmin)/(cmax-cmin))
# any_len = len(any_d["scalar-loss"]["y"])
# losses = np.stack([d["scalar-loss"]["y"] for d in data.values() if len(d["scalar-loss"]["y"]) == any_len])
# jis = np.stack([Jaccard2last_mean_over_time(d) for d in data.values()])
# ji_x = np.array(any_d["scalar2d-[tm|trd][sinceLast] JI(last,current)"]["x"],dtype=np.int)
# ji_y = np.mean(np.array(any_d["scalar2d-[tm|trd][sinceLast] JI(last,current)"]["z"]),0)
# loss_x = np.array(any_d["scalar-loss"]["x"],dtype=np.int)
x2 = None
y2 = None
if x_label == train_H_mean:
x2 = train_H_over_time(data[key])
if x_label == train_step:
x2 = train_step_over_time(data[key])
if y_label == train_acc:
y2 = train_acc_over_time(data[key])
elif y_label == Δ_acc:
y2 = Δ_acc_over_time(data[key])
elif y_label == Jaccard2last_max:
y2 = Jaccard2last_max_over_time(data[key])
# y2 = deriv(y2)
# x2 = x2[1:]
elif y_label == Jaccard2last_min:
y2 = Jaccard2last_min_over_time(data[key])
# y2 = deriv(y2)
# x2 = x2[1:]
elif y_label == Jaccard2last_mean:
y2 = Jaccard2last_mean_over_time(data[key])
elif y_label == Jaccard2last_mean_thresh:
y2 = Jaccard2last_mean_over_time_thres(data[key])
# y2 = deriv(y2)
# x2 = x2[1:]
elif y_label == Jaccard2last_median:
y2 = Jaccard2last_median_over_time(data[key])
elif y_label == train_H_mean_per_step:
# x2 = [0]+x2
y2 = train_H_over_time(data[key])
else:
continue
if x2 is not None and y2 is not None:
f = len(y2)//len(x2)
f2 = len(x2)//len(y2)
if f >= 1:
# assert f*len(x2) == len(y2)
# y2 = y2[::f]
y2 = y2[::f][:len(x2)]
if f2 >= 1:
# assert f2*len(y2) == len(x2)
# x2 = x2[::f2]
x2 = x2[::f2][:len(y2)]
ax.plot(x2,y2,color=c,alpha=0.5)
# save data to be stored in csv file
if x_label == train_H_mean:
x2 = x2[1:]
if y_label == train_acc:
y2 = y2[1:]
if x_label == plots[0][0]:
for_csv["lr"].append(lr)
for_csv[get_name(x_label)].append(x2)
for_csv[get_name(y_label)].append(y2)
if x_label == train_step:
# determine learning rate drops
scalarlr_tag = 'scalar-learning rate' if 'scalar-learning rate' in data[key] else 'scalar-learning rate (group 0)'
scalarlr_train_steps = np.array(data[key][scalarlr_tag]['x'])
scalarlr_values = np.array(data[key][scalarlr_tag]['y'])
scalarlr_changes_at = np.where(scalarlr_values[:-1] != scalarlr_values[1:])[0]
scalarlr_changes_at = set(scalarlr_changes_at).difference(hlines_at)
if len(scalarlr_changes_at):
print(scalarlr_changes_at)
ax.vlines(scalarlr_train_steps[list(scalarlr_changes_at)], ymin=0, ymax=1)
hlines_at.update(scalarlr_changes_at)
# f = fig.add_subplot(111, frameon=False)
# plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
# plt.grid(False)
# plt.tight_layout()
if scatter:
plt.colorbar(scatter, ax=axs.ravel().tolist(), norm=matplotlib.colors.LogNorm())
# plt.title("%s. (Colored by log(learning rate))" % networks[net])
# plt.rcParams['figure.figsize'] = (10.0, 7.0)
# plt.rcParams['axes.titlesize'] = "small"
# font = {'family' : 'normal',
# 'weight' : 'normal',
# 'size' : 12}
# rc('font', **font)
# plt.savefig("%s-%s.png" % (expname, net), dpi=200)
plt.title(networks[net])
for key, data in for_csv.items():
if key == "lr":
continue
if key == "train_H_correlation":
continue
if key == "train_acc":
continue
np.savetxt("paper/lrscaling-%s/data/%s-%s.csv" % (dataset,net,key), np.stack(data).T, header=" ".join(str(s) for s in for_csv["lr"]), fmt=" ".join(['%.5f']*len(for_csv["lr"])))
plt.savefig("paper/lrscaling-%s/img/lrscaling-%s.pdf" % (dataset,net))
# plt.show()
```
#### File: divider/plottypes/scaletable.py
```python
def scale_table():
name_re = re.compile(("_".join(["{word}"]*5+["{value}"]*3+["{word}","{value}"]+["{value}"]*3)+".*\.json").format(word=word_re,value=value_re))
def name_match_fn(d,m):
d["expname"], d["net"], d["norm"], d["activation"], d["optimizer"], d["learning_rate"], d["weight_decay"], d["momentum"], d["scheduler"], d["bias"], d["lrscale"], d["momscale"], d["bsscale"] = m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8], m[9], m[10], m[11], m[12], m[13]
data = get_data(name_re, name_match_fn)
scalar2d = "scalar2d-[tm|trd][sinceLast] JI(last,current)"
# scalar2d = "scalar2d-[tm|trd] % max Entropy"
# scalar2d = "scalar2d-[tm|trd][sinceLast][>T] JI(last,current)"
data = dict(filter(lambda elem: scalar2d in elem[1], data.items()))
keys = list(data.keys())
lrscales = sorted(list(set([d["lrscale"] for d in data.values()])), key=lambda s: float(s))
bsscales = sorted(list(set([d["bsscale"] for d in data.values()])), key=lambda s: float(s))
momscales = sorted(list(set([d["momscale"] for d in data.values()])), key=lambda s: float(s))
# ax_x = 7
# ax_y = 7
# plots = np.random.choice(keys,ax_x*ax_y)
# fig, axs = plt.subplots(ax_y,ax_x, figsize=(10,5))
vars = ["lrscales", "momscales"]
# vars = ["lrscales", "bsscales"]
# vars = ["momscales", "bsscales"]
var_x = lrscales if "lrscales" in vars else momscales
var_y = bsscales if "bsscales" in vars else momscales
str_x = "learning rate scaling" if "lrscales" in vars else "momentum scaling"
str_y = "batch size scaling" if "bsscales" in vars else "momentum scaling"
lr_str = "%s" if "lrscales" in vars else "1.0"
mom_str = "%s" if "momscales" in vars else "1.0"
bs_str = "%s" if "bsscales" in vars else "1.0"
ax_x = len(var_x)
ax_y = len(var_y)
plots = ["scalings-c10_convnet_batchnorm_relu_sgd_lr=std_wd=std_mom=std_multistep_bias=std_lrscale=%s_momscale=%s_bsscale=%s.json" % (lr_str,mom_str,bs_str) % (l,m) for m in var_y for l in var_x]
# plots = ["scalings-c10_resnet20,basicblock_pre_batchnorm_relu_sgd_lr=std_wd=std_mom=std_multistep_bias=std_lrscale=%s_momscale=%s_bsscale=%s.json" % (lr_str,mom_str,bs_str) % (l,m) for l in var_x for m in var_y]
fig, axs = plt.subplots(ax_y,ax_x, figsize=(10,5))
min_z = min([np.array(data[key][scalar2d]["z"]).min() for key in plots if key in data])
max_z = max([np.array(data[key][scalar2d]["z"]).max() for key in plots if key in data])
for ax, key in zip(axs.flat,plots):
if key not in data:
print(key,"not found")
continue
d = data[key]
ax.pcolormesh(d[scalar2d]["x"],d[scalar2d]["y"],d[scalar2d]["z"], vmin=min_z, vmax=max_z)
# ax.set_title("lr=%s mom=%s bs=%s" % (d["lrscale"], d["momscale"], d["bsscale"]))
ax.tick_params(
axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the top edge are off
right=False, # ticks along the top edge are off
labelbottom=False,
labelleft=False
) # labels along the bottom edge are off
fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
for i in range(ax_x):
plt.setp(axs[-1, i], xlabel=var_x[i])
for i in range(ax_y):
plt.setp(axs[i, 0], ylabel=var_y[i])
plt.grid(False)
plt.xlabel(str_x)
plt.ylabel(str_y)
plt.title("Value: \"%s\" for %s-vs-%s" % (scalar2d, str_x, str_y))
plt.show()
```
#### File: util/miniflask/setup.py
```python
import codecs
import os
import re
import setuptools
NAME = "miniflask"
PACKAGES = setuptools.find_packages(where="src")
META_PATH = os.path.join("src", NAME, "__init__.py")
KEYWORDS = [NAME, "plugin-engine", "plugin-system"]
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Environment :: Console",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Operating System :: Unix",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
INSTALL_REQUIRES = [
"colored"
]
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
"""
Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(
r"^__{meta}__\s+=\s+['\"]([^'\"]*)['\"]".format(meta=meta),
META_FILE, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
if __name__ == "__main__":
setuptools.setup(
name=NAME,
description=find_meta("description"),
license=find_meta("license"),
url=find_meta("url"),
version=find_meta("version"),
author=find_meta("author"),
maintainer=find_meta("author"),
long_description=read("README.md"),
long_description_content_type="text/markdown",
packages=PACKAGES,
package_dir={"": "src"},
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
install_requires=INSTALL_REQUIRES,
python_requires='>=3.6',
)
```
#### File: tests/outervar/test_outervar.py
```python
from pathlib import Path
import miniflask # noqa: E402
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
def test_outervar():
event = mf.event
mf.load("module1")
var_a = 42
mf.event.main()
del event, var_a # now unused
```
#### File: modules/modulenomfargs/__init__.py
```python
def func(x, **kwargs):
del kwargs # unused
return x
def register(mf):
mf.register_event('func', func, unique=True)
```
#### File: models/dosomething/__init__.py
```python
from miniflask.exceptions import StateKeyError
def dosomething(state, event):
del event # unused
print("in event: variable =", state["variable"])
if "new_variable" in state:
print("in event: new_variable =", state["new_variable"])
def main(state, event):
state["new_variable"] = 42
del state["new_variable"]
print("before event", state["variable"])
with state.temporary({
"variable": 42
}):
event.dosomething()
print("after event", state["variable"])
try:
_ = state["new_variable"]
print("variable 'new_variable' should not exist")
except StateKeyError:
pass
with state.temporary({
"new_variable": 12345
}):
event.dosomething()
try:
_ = state["new_variable"]
print("variable 'new_variable' should not exist")
except StateKeyError:
pass
def register(mf):
mf.register_defaults({
"variable": 0
})
mf.register_event('dosomething', dosomething)
mf.register_event('main', main)
```
#### File: tests/temporary/test_temporary.py
```python
from pathlib import Path
import miniflask # noqa: E402
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
def test_temporary():
mf.run(modules=["dosomething"], argv=[])
```
#### File: datasets/imagenet/__init__.py
```python
def dataset(state, event):
del state, event # unused
return "imagenet"
def register(mf):
mf.register_event("dataset", dataset, unique=True)
```
#### File: tests/used_case1/test_used_case1.py
```python
from pathlib import Path
import miniflask # noqa: E402
mf = miniflask.init(
module_dirs=str(Path(__file__).parent / "modules"),
debug=True
)
def test_used_case1():
mf.run(argv=[])
``` |
{
"source": "jgvhabets/dyskinesia_neurophys",
"score": 3
} |
#### File: code/lfpecog_features/feats_spectral_baseline.py
```python
import os
from typing import Any
import json
import numpy as np
from scipy.signal import welch, cwt, morlet2
class EphyBaseLevel:
""" Create base data per level """
def __init__(
self,
runClass,
dtype,
level,
row,
base_ind
):
self.level = level
self.rawsig = getattr(runClass, f'{dtype}_arr')[
row, base_ind[0]:base_ind[1]
]
fs = getattr(runClass, f'{dtype}_Fs')
self.psd_512 = welch(
self.rawsig,
fs=fs,
nperseg=512, noverlap=256,
)
self.psd_256 = welch(
self.rawsig,
fs=fs,
nperseg=256, noverlap=128,
)
w = 8 # depth/spaces?
base_f = np.linspace(1, fs / 2, 100)
widths = (fs * w) / (2 * base_f * np.pi)
time = np.arange(len(self.rawsig))
scp_cfs = cwt(
self.rawsig, morlet2,
widths=widths, w=w, dtype='complex128'
)
self.wav = {
'time': time,
'freq': base_f,
'psd': np.abs(scp_cfs)
}
self.wavlog = {
'time': time,
'freq': base_f,
'psd': np.log10(np.abs(scp_cfs))
}
def __repr__(self):
return (
f'{self.__class__.__name__} Class '
f'for {self.level}')
class EphyBaseData:
"""Create data per ecog/ lfp-L / lfp-R"""
def __init__(self, runClass, runname, dtype):
self.dtype = dtype
base_ind_f = os.path.join( # make sure projectpath is cwd
'data/analysis_derivatives/'
'base_spectral_run_indices.json'
)
with open(base_ind_f) as jsonfile:
base_ind = json.load(jsonfile)
sub = runClass.sub
ses = runClass.ses
base_ind = base_ind[sub][ses][dtype]
for row, level in enumerate(
getattr(runClass, f'{dtype}_names')
):
# iterate all levels, skip first time-row
if np.logical_and(row == 0, level == 'time'):
continue
setattr(
self,
level, # key of attr
EphyBaseLevel(
runClass,
dtype,
level,
row,
base_ind
)
)
def __repr__(self):
return (
f'{self.__class__.__name__} Class '
f'for {self.dtype}')
class EphyBase():
'''Baseline creation for spectral analyses'''
def __init__(self, runClass, runname: str):
self.runClass = runClass.runs[runname]
self.runname = runname
self.ecog = EphyBaseData(
runClass=self.runClass,
runname=self.runname,
dtype='ecog',
)
self.lfp_left = EphyBaseData(
runClass=self.runClass,
runname=self.runname,
dtype='lfp_left',
)
self.lfp_right = EphyBaseData(
runClass=self.runClass,
runname=self.runname,
dtype='lfp_right',
)
def __repr__(self):
return (f'{self.__class__.__name__}: '
f'Main EphysBaseline Class')
```
#### File: code/lfpecog_features/handTap_detect.py
```python
from xmlrpc.client import Boolean
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
from scipy.signal import find_peaks
def handTapDetector(
SubClass,
buffsec: float=.05,
TAPthr: float=5e-7,
MOVthr: float=2e-7,
runs_excl: list=[],
min_len=0.1,
check_plots=False,
plot_annot=False,
plotdir=None,
savedir=None,
):
'''
Function to detect blocks of movement in handtapping
runs. Works based on threshold for signal vector
magnitude, and applies that on a block. When the number
of samples that is above threshold, is higher than the
block threshold: whole block is set as active.
In the end consecutive blocks are merged.
Arguments:
- subClass: Class with imported preproc-data
- buffsec: seconds to include in a buff-block
- buffthr: part of buffer that has to be higher
than threshold
- svthr: the actual SVM threshold
- runs_excl: if runs have to be skipped
- min_len (int): min block length in seconds
- check_plots (Boolean): make plots to check
algorithm or not
- plot_annot: if video-annotated times should be
plotted -> gives file_path here, otherwise False
- plotdir: diretory where to save plot if created
- savedir: diretory where to save taps
Returns:
- RunMoveBlocks (dict): contains dict's per run
with active blocks, contains the ACC-timestamps.
PM: FOR SUB-008 THIS WAS INDEX-FINGER TAPPING!
'''
RunMovBlocks = {}
for run in SubClass.runs_incl:
if run in runs_excl: continue
print(f'\nStart {run}')
# time = SubClass.runs[run].acc_right_arr[0, :]
sideblocks = {}
# calculate signal vector magnitudes
for side in ['left', 'right']:
sideblocks[side] = {}
s_acc = f'acc_{side}_arr'
svm = np.sqrt(
getattr(SubClass.runs[run], s_acc)[1, :]**2 +
getattr(SubClass.runs[run], s_acc)[2, :]**2 +
getattr(SubClass.runs[run], s_acc)[3, :]**2
) # calculate sign vector magnitude
accFs = getattr(SubClass.runs[run], f'acc_{side}_Fs')
min_len_n = min_len / (1 / accFs) # n of min samples in tap
iState = {
'Taps': np.where(svm > TAPthr)[0],
'Moves': np.where(svm > MOVthr)[0]
}
for i in iState['Taps']: # iterate every tap index
# find Tap-i in Mov-i's, and delete: prevent doubles
idel = np.where(iState['Moves'] == i)
iState['Moves'] = np.delete(iState['Moves'], idel)
gaps = 0.5 # seconds which ends a tap block
gap_n = gaps / (1 / accFs) # n of samples in gap
for state in iState: sideblocks[side][state] = {}
for state in iState:
blockN = 0
block = []
for i, idiff in zip(
iState[state][:-1], np.diff(iState[state])
):
if idiff < gap_n:
# add consecut i's betw 2 i's in seconds!)
iadd = list(np.linspace(
start=i,
stop=i + idiff - 1,
num=idiff,
) / accFs)
block.extend(iadd)
else:
if len(block) > min_len_n:
sideblocks[side][state][blockN] = block
blockN += 1
block = []
# Check Tap-patterns (needs timestamp for annotation-check)
newTaps, extraMoves = tap_pattern_checker(
run=run, side=side,
tapblocks=sideblocks[side]['Taps'],
acc_og=getattr(SubClass.runs[run], s_acc),
accFs=accFs,
plot=check_plots,
plotdir=plotdir,
)
sideblocks[side]['Taps'] = newTaps
starti = len(sideblocks[side]['Moves'].keys())
for movb in extraMoves:
sideblocks[side]['Moves'][starti] = extraMoves[movb]
starti += 1
RunMovBlocks[run] = sideblocks
# add blocks in timestamps next to second-floats
# load tsv with start timestamps of neurophys+acc recording
bids_dir = ('/Users/jeroenhabets/OneDrive - Charité - Uni'
'versitätsmedizin Berlin/BIDS_Berlin_ECOG_LFP/rawdata')
sub = f'sub-{SubClass.runs[run].sub}'
ses = f'ses-{SubClass.runs[run].ses}'
scans = os.path.join(
bids_dir, sub, ses, f'{sub}_{ses}_scans.tsv'
)
scans = pd.read_csv(scans, sep='\t')
# convert detected second-timestamps into pd-timestamps
dopatime = run[-6:]
# find matching starttime in scans.tsv
for i in range(scans.shape[0]):
if dopatime in scans['filename'][i]:
RunMovBlocks[run]['starttime'] = pd.Timestamp(
scans['acq_time'][i]
)
# add timedeltas to starttime
for side in ['left', 'right']:
for state in ['Taps', 'Moves']:
RunMovBlocks[run][f'{side}_{state}_stamps'] = {}
for block in RunMovBlocks[run][side][state]:
ds = []
for t in RunMovBlocks[run][side][state][block]:
ds.append(pd.Timedelta(t, 'sec') )
RunMovBlocks[run][f'{side}_{state}_stamps'][block
] = [RunMovBlocks[run]['starttime'] + d for d in ds]
if check_plots:
check_plots_handTapDetect(
SubClass,
RunMovBlocks,
run,
plotdir,
plot_annot,
fignamedetail=(f'buff{str(buffsec)[2:]}_Tap'
f'{str(TAPthr)[:1]}_{str(TAPthr)[-2:]}_Mov'
f'{str(MOVthr)[:1]}_{str(MOVthr)[-2:]}_'
f'gap{gaps * 1000}'
)
)
if savedir:
tap_saver(RunMovBlocks, savedir, sub)
return RunMovBlocks
def check_plots_handTapDetect(
SubClass, RunMovBlocks, run,
plotdir, plot_annot, fignamedetail,
):
print(f'PLOTTING FIGURE {run} .....')
# create range with timestamps along acc-array, instead of
# floats of seconds since start (use for x-axis plot)
tstart = RunMovBlocks[run]['starttime'] # first timestamp in array
nsamples = getattr(SubClass.runs[run],'acc_left_arr').shape[-1]
arr_fs = getattr(SubClass.runs[run],'acc_left_Fs')
tend = tstart + pd.Timedelta(1 / arr_fs, unit='s') * nsamples
timeax = pd.date_range(
start=tstart, end=tend, freq=f'{1000 / arr_fs}ms')[:-1]
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
for side in ['acc_left_arr', 'acc_right_arr']:
for r, axis in zip([1, 2, 3], ['X', 'Y', 'Z']):
ax.plot(
# getattr(SubClass.runs[run], side)[0, :],
timeax,
getattr(SubClass.runs[run], side)[r, :].T,
label=f'{axis} {side[4:-4]}',
)
ylim1=-3e-6
ylim2=5e-6
# clrs = {'left': 'steelblue', 'right': 'y'} # colors for sides
alpha=.8
alpha2=.2
kwparms = {
'left_Moves': {
'color': 'springgreen',
'alpha': alpha2,
},
'left_Taps': {
'color': 'green',
'alpha': alpha,
},
'right_Moves': {
'color': 'gold',
'alpha': alpha2,
},
'right_Taps': {
'color': 'purple',
'alpha': alpha,
}
}
for side in ['left', 'right']:
# color detected states
for state in ['Taps', 'Moves']:
for n, b in enumerate(RunMovBlocks[run][side][state]):
if n == 0: # add legend-label only once
ax.fill_between(
RunMovBlocks[run][f'{side}_{state}_stamps'][b],
y1=ylim1, y2=ylim2,
label=f'{state} {side} (Acc-detect.)',
**kwparms[f'{side}_{state}'],
)
else:
ax.fill_between(
RunMovBlocks[run][f'{side}_{state}_stamps'][b],
y1=ylim1, y2=ylim2,
**kwparms[f'{side}_{state}'],
)
# add manual annotations
if plot_annot:
annot = np.load(plot_annot, allow_pickle=True).item()
try:
ax.scatter(
annot[run][f'{side}_stamps'],
[4e-6] * len(annot[run][f'{side}_stamps']),
c=kwparms[f'{side}_Taps']['color'], edgecolor='k',
s=100, alpha=.5, marker='*',
label=f'Tap {side} (Video-ann.)',
)
except KeyError:
print('No video-annotations for ', run)
pass
ax.set_ylabel('ACC (m/s/s)')
ax.set_xlabel('Time (sec)')
ax.set_ylim(ylim1, ylim2,)
n_xticks = 7
xticks = timeax[::len(timeax) // n_xticks]
xtlabels = timeax[::len(timeax) // n_xticks].strftime('%X')
ax.set_xticks(xticks)
ax.set_xticklabels(xtlabels)
if plot_annot:
ax.set_title(f'Tap Acc-Detection vs Video-Annotation\n'
f'{run} - {SubClass.runs[run].sub} '
f'{SubClass.runs[run].ses})', size=14)
else:
ax.set_title(f'Tap Detection {run}\n'
f'({SubClass.runs[run].sub} '
f'{SubClass.runs[run].ses})', size=14)
ax.legend(
loc='upper left', bbox_to_anchor=(-.1, -.13),
ncol=4, frameon=False, fontsize=12,
)
plt.tight_layout(pad=.1)
sub = SubClass.runs[run].sub
fname = f'ACC_TapvsMov_detection_{sub}_{run}_{fignamedetail}.png'
plt.savefig(os.path.join(plotdir, fname),
dpi=150, facecolor='w',)
plt.close()
"""
copy function, include transfer from taps to move if not TAP-pattern
"""
def tap_pattern_checker(
run, side, tapblocks, acc_og, accFs, tapAxis='y',
posThr=1e-7, negThr=1e-7, plot=False, plotdir=None,
):
newTaps = {} # true pattern tap-blocks: new Tap Dict
extraMoves={} # false pattern tap-blocks: convert to moves
tap_i = 0 # indices to fill new dicts
mov_i = 0
smooth = False
i_incl = 24
if plot:
fig, axes=plt.subplots(i_incl // 4, 4, figsize=(12,16),
sharey='row')
axes = axes.flatten()
for b in np.arange(len(tapblocks)):
if b >= i_incl: plot = False
peakDict = {
'pos': {
'Thr': posThr,
'dir': 1,
'ind': [],
'top': []
},
'neg': {
'Thr': negThr,
'dir': -1,
'ind': [],
'top': []
}
}
try:
i0 = tapblocks[b][0] * 200
i1 = tapblocks[b][-1] * 200
except KeyError:
# print(f'Block {b} no more tap blocks')
continue
acc = {
'x': acc_og[1, int(i0):int(i1)],
'y': acc_og[2, int(i0):int(i1)],
'z': acc_og[3, int(i0):int(i1)]
}
acc['svm'] = np.sqrt(acc['x']**2 + acc['y']**2 + acc['z']**2)
for sig in acc.keys():
# smoothinng
if smooth:
acc[sig] = pd.Series(acc[sig]).rolling(3).mean()
if plot: axes[b].plot(acc[sig], alpha=.5, label=sig)
for p in peakDict:
peaks = find_peaks(
peakDict[p]['dir'] * acc[tapAxis],
# height=peakDict[p]['Thr'] * .1,
width=1,
distance=25,
prominence=peakDict[p]['Thr'],
wlen=40,
)
if len(peaks[0]) > 0:
if plot:
axes[b].scatter(
peaks[0],
peakDict[p]['dir'] * peaks[1]['prominences'],
# label=f'{p} peaks'
)
peakDict[p]['ind'].extend(peaks[0])
peakDict[p]['top'].extend(peaks[1]['prominences'])
# check pos-neg-neg-pos pattern
peakFound = False
try:
# taps longer than 1 sec -> moves
if len(acc[tapAxis]) > accFs:
extraMoves[mov_i] = tapblocks[b]
mov_i += 1
# check tap-double-sinusoid (+ - - +)
elif sum(np.logical_and(
peakDict['neg']['ind'] > peakDict['pos']['ind'][0],
peakDict['neg']['ind'] < peakDict['pos']['ind'][-1]
)) >= 2: # if there are 2 neg peaks between 2 pos peaks
peakFound = True
newTaps[tap_i] = tapblocks[b]
tap_i += 1
else: # other pattern -> moves
extraMoves[mov_i] = tapblocks[b]
mov_i += 1
except IndexError:
extraMoves[mov_i] = tapblocks[b]
mov_i += 1
if plot:
# axes[b].set_xticks(np.arange(0, len(x), 100), size=10)
# axes[b].set_xticklabels(np.arange(i0, i0 + len(x), 100) / 200, size=10)
# axes[b].set_xlabel('Time (seconds)', size=10)
axes[b].set_ylim(-2e-6, 2e-6)
axes[b].set_ylabel('ACC', size=10)
if b == 0:
axes[b].legend(fontsize=16, ncol=6, frameon=False,
bbox_to_anchor=(0.5, 1.3), loc='upper left')
# add peak detect as color
if peakFound:
axes[b].fill_between(
alpha=.1, color='green',
x=np.arange(len(acc['x'])), y1=-2e-6, y2=2e-6, )
else:
axes[b].fill_between(
alpha=.1, color='red',
x=np.arange(len(acc['x'])), y1=-2e-6, y2=2e-6, )
if plot:
fname = f'TapChecker_{run[-6:]}_{side}_scr'
if smooth: fname += 'smooth'
plt.savefig(
os.path.join(plotdir, fname),
dpi=150, facecolor='w',
)
plt.close()
return newTaps, extraMoves
def tap_saver(
blocks, savedir, sub
):
os.makedirs(savedir, exist_ok=True)
dict_name = f'taps_moves_{sub}'
np.save(
os.path.join(savedir, dict_name),
blocks
)
# TODO: add text file with parameters of blocks
# # Load annotation dict
# video_taps = np.load(os.path.join(deriv_dir, f'{dict_name}.npy'),
# allow_pickle=True).item()
return f'Tap and Moves blocks-dictionary saved for {sub}'
``` |
{
"source": "jgwak1/Hybrid-MBMF",
"score": 2
} |
#### File: Hybrid-MBMF/rl_algorithm/hybrid.py
```python
from tabnanny import verbose
from webbrowser import Grail
from psutil import virtual_memory
from stable_baselines3.ddpg import DDPG
from stable_baselines3.sac import SAC
from stable_baselines3.td3 import TD3
import gym
import torch
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, RolloutReturn, Schedule, TrainFreq, TrainFrequencyUnit
from stable_baselines3.common.evaluation import evaluate_policy
from typing import NewType, Union, Optional, Dict, Any
from collections import defaultdict
import numpy as np
from torch import _linalg_inv_out_helper_
# internal imports
from rl_algorithm.data_buffer import DataBuffer
from rl_algorithm.model.DR_Model import DynamicsRewardModel
from utils.misc import Test
class hybrid_mbmf:
def __init__(self,
env: gym.Env,
AC_Type: Union[DDPG, SAC, TD3], # Let's assume that AC-agent used for MBAC and MFAC are the same, and offpolicy agents.
AC_kwargs: Dict = {},
training_steps: int = 1000,
learning_starts: int = 100,
gradient_steps: int = 100,
train_freq = (1, "episode"), # (1, "step"),
seed = 0,
) -> None:
# for training
self.training_steps = training_steps
self.seed = seed
self.learning_starts = learning_starts
self.gradient_steps = gradient_steps
self.train_freq = TrainFreq(train_freq[0], TrainFrequencyUnit(train_freq[1])) # for compatibility with SB3 train()
# training environment
self.env = DummyVecEnv([lambda: env]) # for compatiblity with SB3 train()
# Model-Free Actor-Critic RL Algorithms that are each trained with only real-data, and both real-data and virtual-data.
self.AC_Type = AC_Type
self.offpolicy_kwargs = {
"policy": "MlpPolicy",
"env": self.env,
"learning_starts": self.learning_starts,
"gradient_steps": self.gradient_steps,
"seed": self.seed,
"verbose": 2
}
self.offpolicy_kwargs.update( AC_kwargs )
self.MFAC = self.AC_Type(**self.offpolicy_kwargs)
self.MBAC = self.AC_Type(**self.offpolicy_kwargs)
# T,R Model
self.Model = DynamicsRewardModel( env = env )
# Following 'DataBuffers' are "external" databuffers which each store real data and virtual data.
# Above AC-RL Aents have their own "internal" databuffers ('rollout-buffer' or 'replay-buffer' for used for their learning.
# whether to depends
#self.NewRealData = DataBuffer(buffer_size=int(1e4), observation_space=env.observation_space, action_space= env.action_space)
self.RealDataBuffer = DataBuffer(buffer_size=int(1e4), observation_space=env.observation_space, action_space= env.action_space)
self.VirtualDataBuffer = DataBuffer(buffer_size=int(1e4), observation_space=env.observation_space, action_space= env.action_space)
return
def MBMFAction(self, current_obs, current_timestep):
'''
Selects between actions suggested by MFAC and MBAC.
[2022-03-25]
I think I should instance-level-override the collect_rollout() of self.MFAC object.
This function can be used somewhere in the function-body when overriding collcet_rollout.
Goal is to avoid complicating things and try not to mess too much of SB3 code.
'''
pass
#def SortOut(self, virtual_data_batch: DataBuffer):
def SortOut(self,
real_obs: torch.Tensor, action: torch.Tensor,
virtual_nextobs: torch.Tensor, virtual_reward: torch.Tensor) -> bool: # returns bool of better-than-nothing or worse-than-nothing
'''
[ Google Docs "Hybrid-MBMF Algorithm Design" ]
Educated-Guess whether the virtual-data is better-than-nothing for training or not. '''
# Compare
# Coach's [ Q_{MF}(s,a) ] ------ (1)
# and
# Boxer's [ r + E_{s’~Μ(s,a)} [ γ * Q_{MF}(next_s, π_{MF}(next_s) ) ] ------ (2)
is_better_than_nothing = False
# (1) Q_{MF}(s,a)
#real_obs = real_obs.to(dtype=torch.float32)
#action = torch.from_numpy(action).to(dtype=torch.float32)
# self.MFAC.critic doc:
'''
# Get current Q-values estimates for each critic network
current_q_values = self.critic(replay_data.observations, replay_data.actions)
'''
'''
Error is coming from
[ torch.modules.nn.flatten.py ]
def forward(self, input: Tensor) -> Tensor:
return input.flatten(self.start_dim, self.end_dim) <<<< here
"real_obs" has to be as following;
tensor([[-0.0615, 0.0281, -0.2272, -0.0502, 0.0960, 0.2686, -0.0184, -0.4265,
-0.8214, 1.1791, 2.2890, 1.8804, -9.0074, -4.5512, 1.0007, -8.1646,
-5.9016]])
In other words, it should be "torch.Size([1, 17])" instead of torch.size([17]), when did tensorobj.shape
'''
# Returned "Coach_Q" is 2 q-values from 2 q-networks
'''
< From source: stable-baselines.common.policies.ContinuousCritic >
def forward(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, ...]:
# Learn the features extractor using the policy loss only
# when the features_extractor is shared with the actor
with th.set_grad_enabled(not self.share_features_extractor):
features = self.extract_features(obs)
qvalue_input = th.cat([features, actions], dim=1)
return tuple(q_net(qvalue_input) for q_net in self.q_networks)
# By default, it creates two critic networks used to reduce overestimation
thanks to clipped Q-learning (cf TD3 paper).
'''
# For some AC methods (like TD3),
# two critic networks are used to reduce overestimation. (a.k.a., clipped Q-learning)
# we can just get the minimum of those two.
Coach_Q = min( self.MFAC.critic( real_obs, action ) )# Get "Q_{MF}( real_obs, action )"
Coach_Q = Coach_Q.item()
# (2) r + E_{s’~Μ(s,a)} [ γ * Q_{MF}(next_s, π_{MF}(next_s) )
discount_rate = self.MFAC.gamma
virtual_reward_scalar = virtual_reward.item()
next_Q = min( self.MFAC.critic( virtual_nextobs, self.MFAC.actor( virtual_nextobs ) ) )
next_Q_scalar = next_Q.item()
Boxer_Q = virtual_reward_scalar + ( discount_rate * next_Q_scalar )
print(f"SortOut\n\tCoach_Q: {Coach_Q}\n\tBoxer_Q: {Boxer_Q}")
''' Dinstinguish "better than nothing" and "worse than nothing" from a 'VirtualDataBatch'. '''
return is_better_than_nothing
def Improve(self, real_obs, action, virtual_nextobs, virtual_reward) -> Any:
''' Improve the worse-than-nothing virtual data '''
pass
def Learn_Dev(self) -> None:
'''
[2022-03-26]
FIRST THINK HOW TO INCORPORATE THE MODEL VIRTUAL DATA GENERATION HERE.
[1]
Override self.MFAC.collect_rollouts() at instance-level,
so that we can just simply insert(incorporate) the "Action-Select" part to the
existing collect_rollouts() so that I don't mess things up in there?
[ Refer to: https://stackoverflow.com/questions/394770/override-a-method-at-instance-level ]
OR
Make a child-class for class of self.MFAC, and override collect_rollouts()
'''
'''
[2022-03-26]
PSEUDO-CODE for "Hybrid-MBMF"
1. Initialize:
(1) MFAC { Actor, Critic, Buffer(for Only Real-Data) } <--- " Support-Policy "
(2) MBAC { Actor, Critic, Buffer(for Both Real-Data AND Virtual-Data)} <--- " Target-Policy "
(3) MODEL { P_{θ}(s’,r | s,a) }
(4) Virtual-Data-Buffer # Basically, Real-Data-Buffer is MFAC::Buffer
2. For N epochs do:
3.
4. MFAC interacts with MDP, and collects Real-Data. { Have MFAC very explorative in the beginning. }
5. Train MFAC with Real-Data.
6. Train MODEL using Accumulated Real-Data w/ Supervised Learning.
7. For M steps do:
8. Generate Virtual-Data using MODEL.
(1) Sample s_{t} from MFAC::Buffer uniformly at random.
(2) Apply 1 Random Action to sampled s_{t} making it the new s_{t}
{ Reasoning: To have Virtual-Data not starting from the state we already have as Real-Data, but still near. }
(3) From new s_{t}, step MODEL using MFAC::Actor.
9.
10. For M steps do:
11. SortOut better-than-nothing from generated Virtual-Data using MFAC's Critic. { MFAC is Support-Policy }
12. Improve the worse-than-nothing virtual data?
13. Train MBAC with sorted-out/imporved data.
'''
# Setups
_setup_learn_args = {
"total_timesteps": self.training_steps, "eval_env": self.env,
"callback": None, "eval_freq": -1, "n_eval_episodes": 5,
"log_path": None, "reset_num_timesteps": True
}
total_timesteps_MBAC, callback_MBAC = self.MBAC._setup_learn( **_setup_learn_args )
total_timesteps_MFAC, callback_MFAC = self.MFAC._setup_learn( **_setup_learn_args )
Epochs = 30
Interaction_Steps = 3000
VirtualData_Steps = 3000
# For N epochs do:
for N in range(Epochs): # Try to equate 1 "Epoch" to 1 self.Model.Train function call
# MFAC interacts with MDP, and collects Real-Data.
# { Have MFAC very explorative in the beginning. }
self.MFAC.collect_rollouts(callback = callback_MFAC,
env = self.env,
learning_starts = self.learning_starts,
train_freq = self.train_freq,
replay_buffer = self.MFAC.replay_buffer
)
#replay_buffer = self.RealDataBuffer) # Could collect to our RealDataBuffer instead of "self.MFAC.replay_buffer"
# But this would need training self.MFAC would need to put into self.MFAC.replay_buffer
# This seems redundant.
# :param train_freq: How much experience to collect
# by doing rollouts of current policy.
# Either ``TrainFreq(<n>, TrainFrequencyUnit.STEP)``
# or ``TrainFreq(<n>, TrainFrequencyUnit.EPISODE)``
# with ``<n>`` being an integer greater than 0.
# Train MFAC with Real-Data.
self.MFAC.train(gradient_steps = self.gradient_steps, batch_size = 100)
# TODO: Train MODEL using Accumulated Real-Data w/ Supervised Learning
self.Model.Train( samples_buffer = self.MFAC.replay_buffer, epoch = N ) # could also pass epoch in to here.
# synchronize MBAC replaybuffer and MFAC replaybuffer
self.MBAC.replay_buffer = self.MFAC.replay_buffer
for M in range(VirtualData_Steps):
# TODO:
# [1] Generate Virtual-Data using MODEL.
# (1-1) Sample s_{t} from MFAC::Buffer uniformly at random.
# (1-2) From sampled s_{t}, apply 1 Random Action with the MODEL, generating one virtual-data.
# => { Reasoning: To have Virtual-Data starting from the state which we already have in Real-Data but with different action.; near-distribution virtual data } #
# [2] SortOut better-than-nothing from generated Virtual-Data using MFAC's Critic. { MFAC is Support-Policy }
# [3] Improve the worse-than-nothing virtual data?
# [4] Train MBAC with sorted-out/imporved data.
''' Implementation '''
#[1] Generate Virtual-Data using MODEL.
# (1-1) Sample s_{t} from MFAC::Buffer uniformly at random.
# [Reference]: https://github.com/DLR-RM/stable-baselines3/blob/master/stable_baselines3/common/buffers.py
RealData_sample = self.MFAC.replay_buffer.sample(batch_size = 1)
# (1-2) From sampled s_{t}, apply 1 Random Action with the MODEL, generating one virtual-data.
# => { Reasoning: To have Virtual-Data starting from the state which we already have in Real-Data but with different action.; near-distribution virtual data }
sampled_realobs = RealData_sample.observations.to(dtype=torch.float32)
random_action = torch.from_numpy( self.env.action_space.sample() )
random_action = torch.unsqueeze(random_action, axis = 0)
#raw_virtual_data = self.Model.predict(observation= sampled_realobs[0], action= random_action)
raw_virtual_data = self.Model.predict(observation= sampled_realobs, action= random_action)
virtual_nextobs = raw_virtual_data.squeeze()[0:-1].unsqueeze(dim=0) # first to second-last
virtual_reward = raw_virtual_data.squeeze()[-1].unsqueeze(dim=0) # last
#[2] SortOut better-than-nothing from generated Virtual-Data using MFAC's Critic. { MFAC is Support-Policy }
Better_than_Nothing = self.SortOut(real_obs = sampled_realobs, action = random_action,
virtual_nextobs = virtual_nextobs, virtual_reward = virtual_reward)
if Better_than_Nothing:
pass
else:
#[3] Improve the worse-than-nothing virtual data?
pass
#
# self.MBAC.replay_buffer.extend( virtual_data )
#
#[4] Train MBAC with sorted-out/imporved data.
#self.MBAC.replay_buffer.extend() # extend: Add a new batch of transitions to the buffer
self.MBAC.train(gradient_steps = self.gradient_steps, batch_size = 100)
pass
'''
currstep = 1
while currstep <= self.training_steps:
# [2022-03-26] Write based on the pseudo-code above.
if currstep > 0 and currstep > self.learning_starts:
# MFAC being trained with real-data
self.MFAC.train(gradient_steps = self.gradient_steps, batch_size = 100)
# MBAC trained by a batch sampled form a buffer that contains both virtual-data and real-data
self.MBAC.train(gradient_steps = self.gradient_steps, batch_size = 100)
currstep += 1
'''
# eval and Test
mean_reward, std_reward = evaluate_policy(self.MFAC, env = self.env, n_eval_episodes= 10)
print("mean_reward:{}\nstd_reward:{}".format(mean_reward, std_reward))
Test( self.MFAC, self.env )
return
def Learn(self) -> None:
'''
[Refer to]
"MBPO w/ DeepRL" Pseudo-code (Page 6 of "When to Trust Your Model-Based Policy Optimization"; <NAME>, et al. NIPS 2019)
1. Initialize target-policy, Model 'P_{θ}(s’,r | s,a)', real-dataset 'D_real', model-dataset 'D_virtual'.
2. For N epochs do:
3. Train Model on D_real via maximum likelihood.
4. For E steps do:
5. Take action in environment according to target-policy;
7. Add experience to D_real.
8. for M model-rollouts do:
9. Sample s_{t} uniformly from D_real
10. Perform k-step model-rollout starting from s_{t} using target-policy, and add to D_virtual.
11.
12. for G gradient updates do:
13. Update target-policy parameters with model-data (D_virtual).
"MBPO Github Repo (by Authors)" : https://github.com/JannerM/mbpo
--> Note that MBPO uses a ensemble of models
'''
'''
[2022-03-24 NOTES]
TODO:
(4) Model proto-type
- Model input layer, output layer 을 input_env로부터 어떻게 받을것인지.
'''
#eps= 1
#eps_rewsum = defaultdict(float)
_setup_learn_args = {
"total_timesteps": self.training_steps, "eval_env": self.env,
"callback": None, "eval_freq": -1, "n_eval_episodes": 5,
"log_path": None, "reset_num_timesteps": True
}
total_timesteps_MBAC, callback_MBAC = self.MBAC._setup_learn( **_setup_learn_args )
total_timesteps_MFAC, callback_MFAC = self.MFAC._setup_learn( **_setup_learn_args )
currstep = 1
while currstep < self.training_steps:
'''
[2022-03-25]
*** Plans ***
[1]
Override self.MFAC.collect_rollouts() at instance-level,
so that we can just simply insert(incorporate) the "Action-Select" part to the
existing collect_rollouts() so that I don't mess things up in there?
[ Refer to: https://stackoverflow.com/questions/394770/override-a-method-at-instance-level ]
[2]
Maybe could incorporate the "Model's virtual-data generation" also in the overriding of collect_rollouts()?
Virtual-data will anyways be added to MBAC's replay-buffer.
Question is:
what should be the (s,a) of virtual-data?
'''
# MFAC interacting with the model and collecting real-data
self.MFAC.collect_rollouts(env = self.env, learning_starts = self.learning_starts,
callback = callback_MFAC,
train_freq = self.train_freq, replay_buffer = self.MFAC.replay_buffer)
if currstep > 0 and currstep > self.learning_starts:
# MFAC being trained with real-data
self.MFAC.train(gradient_steps = self.gradient_steps, batch_size = 100)
currstep += 1
# eval and Test
mean_reward, std_reward = evaluate_policy(self.MFAC, env = self.env, n_eval_episodes= 10)
print("mean_reward:{}\nstd_reward:{}".format(mean_reward, std_reward))
Test( self.MFAC, self.env )
'''
********************* FOLLOWING IS OLD WORK THAT DOESN'T WORK *************************************************************
obs = self.env.reset()
for currstep in range( self.training_steps ):
action = self.MBMFAction(obs, currstep)[0]
s = obs
obs, reward, done, info = self.env.step( action )
self.MFAC.replay_buffer.add(obs=s, action = action, next_obs= obs, reward= reward, done = done, infos= [{'info': info}] )
print("*"*80)
print("[episode: {} | step: {}]\n\nobs: {}\n\nnext_obs: {}\n\naction: {}\n\nreward: {}\n\ndone: {}\n\n".format(eps, currstep, s, obs, action, reward, done))
# Perhaps store transitions o it as below than above?
# https://github.com/DLR-RM/stable-baselines3/blob/00ac43b0a90852eddec31e6f36cac7089c235614/stable_baselines3/common/off_policy_algorithm.py#L521
# Store data in replay buffer (normalized action and unnormalized observation)
#self.MFAC._store_transition(self.MFAC.replay_buffer, np.array([action]), np.array([obs]), np.array([reward]), np.array([done]), np.array([info]))
#self.VirtualDataBuffer
eps_rewsum[str(eps)]+=reward
# train MFAC with only RealData
#self.MFAC.replay_buffer = self.RealDataBuffer # perhaps for MFAC, could just add to it's own replay_buffer.
#if self.MFAC.replay_buffer.size() > 100:
# print("Now we are going to train at step:{} ".format(currstep))
self.MFAC.train(gradient_steps = 100, batch_size = 100) # gradient_stpeps = 1000 은 .learn() 따라하는것임.
# train Model ( Dynamics-Reward Model: P_{θ}(s’,r | s,a) )
# train MBAC with RealData and VirtualData
if done:
eps+=1
obs = self.env.reset()
'''
return None
# Gets
def get_rl_info(self):
''' returns rl info in dict '''
return dict( rl_agent = str(self.AC_Type), rl_env= str(self.env), params= self.kwargs, training_steps= self.training_steps )
# Save and Load
def save_hybrid(self):
''' save components of hybrid_mbmf perhaps as pkl if all serializable '''
pass
@classmethod
def load_hybrid(cls):
''' perhaps load from saved_pkl and distribute each component to member-vars '''
pass
``` |
{
"source": "jgwak/McRecon",
"score": 2
} |
#### File: McRecon/models/gan_mask_net.py
```python
import numpy as np
# Theano
import theano
import theano.tensor as tensor
from models.net import Net, tensor5
from lib.config import cfg
from lib.layers import TensorProductLayer, ConvLayer, PoolLayer, Unpool3DLayer, \
LeakyReLU, SoftmaxWithLoss3D, Conv3DLayer, InputLayer, FlattenLayer, \
FCConv3DLayer, TanhLayer, SigmoidLayer, ComplementLayer, AddLayer, \
EltwiseMultiplyLayer, RaytracingLayer, DimShuffleLayer, Pool3DLayer, \
DifferentiableStepLayer, SubtractLayer, InstanceNoiseLayer, \
get_trainable_params
class GANMaskNet(Net):
def network_definition(self):
# (multi_views, self.batch_size, 3, self.img_h, self.img_w),
self.x = tensor5()
self.is_x_tensor4 = False
img_w = self.img_w
img_h = self.img_h
n_gru_vox = 4
n_vox = self.n_vox
n_convfilter = [96, 128, 256, 256, 256, 256]
n_fc_filters = [1024, 2]
n_deconvfilter = [128, 128, 128, 128, 96, 2]
n_conv_advfilter = [32, 128, 128, 128, 32]
n_fc_advfilter = [1024, 2]
input_shape = (self.batch_size, 3, img_w, img_h)
voxel_shape = (self.batch_size, n_vox, n_vox, n_vox)
# To define weights, define the network structure first
x = InputLayer(input_shape)
conv1a = ConvLayer(x, (n_convfilter[0], 7, 7), param_type='generator')
conv1b = ConvLayer(conv1a, (n_convfilter[0], 3, 3), param_type='generator')
pool1 = PoolLayer(conv1b)
conv2a = ConvLayer(pool1, (n_convfilter[1], 3, 3), param_type='generator')
conv2b = ConvLayer(conv2a, (n_convfilter[1], 3, 3), param_type='generator')
conv2c = ConvLayer(pool1, (n_convfilter[1], 1, 1), param_type='generator')
pool2 = PoolLayer(conv2c)
conv3a = ConvLayer(pool2, (n_convfilter[2], 3, 3), param_type='generator')
conv3b = ConvLayer(conv3a, (n_convfilter[2], 3, 3), param_type='generator')
conv3c = ConvLayer(pool2, (n_convfilter[2], 1, 1), param_type='generator')
pool3 = PoolLayer(conv3b)
conv4a = ConvLayer(pool3, (n_convfilter[3], 3, 3), param_type='generator')
conv4b = ConvLayer(conv4a, (n_convfilter[3], 3, 3), param_type='generator')
pool4 = PoolLayer(conv4b)
conv5a = ConvLayer(pool4, (n_convfilter[4], 3, 3), param_type='generator')
conv5b = ConvLayer(conv5a, (n_convfilter[4], 3, 3), param_type='generator')
conv5c = ConvLayer(pool4, (n_convfilter[4], 1, 1), param_type='generator')
pool5 = PoolLayer(conv5b)
conv6a = ConvLayer(pool5, (n_convfilter[5], 3, 3), param_type='generator')
conv6b = ConvLayer(conv6a, (n_convfilter[5], 3, 3), param_type='generator')
pool6 = PoolLayer(conv6b)
flat6 = FlattenLayer(pool6)
fc7 = TensorProductLayer(flat6, n_fc_filters[0], param_type='generator')
# Set the size to be 256x4x4x4
s_shape = (self.batch_size, n_gru_vox, n_deconvfilter[0], n_gru_vox, n_gru_vox)
# Dummy 3D grid hidden representations
prev_s = InputLayer(s_shape)
t_x_s_update = FCConv3DLayer(prev_s, fc7, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3), param_type='generator')
t_x_s_reset = FCConv3DLayer(prev_s, fc7, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3), param_type='generator')
reset_gate = SigmoidLayer(t_x_s_reset)
rs = EltwiseMultiplyLayer(reset_gate, prev_s)
t_x_rs = FCConv3DLayer(rs, fc7, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3), param_type='generator')
def recurrence(x_curr, prev_s_tensor, prev_in_gate_tensor):
# Scan function cannot use compiled function.
input_ = InputLayer(input_shape, x_curr)
conv1a_ = ConvLayer(input_, (n_convfilter[0], 7, 7), params=conv1a.params)
rect1a_ = LeakyReLU(conv1a_)
conv1b_ = ConvLayer(rect1a_, (n_convfilter[0], 3, 3), params=conv1b.params)
rect1_ = LeakyReLU(conv1b_)
pool1_ = PoolLayer(rect1_)
conv2a_ = ConvLayer(pool1_, (n_convfilter[1], 3, 3), params=conv2a.params)
rect2a_ = LeakyReLU(conv2a_)
conv2b_ = ConvLayer(rect2a_, (n_convfilter[1], 3, 3), params=conv2b.params)
rect2_ = LeakyReLU(conv2b_)
conv2c_ = ConvLayer(pool1_, (n_convfilter[1], 1, 1), params=conv2c.params)
res2_ = AddLayer(conv2c_, rect2_)
pool2_ = PoolLayer(res2_)
conv3a_ = ConvLayer(pool2_, (n_convfilter[2], 3, 3), params=conv3a.params)
rect3a_ = LeakyReLU(conv3a_)
conv3b_ = ConvLayer(rect3a_, (n_convfilter[2], 3, 3), params=conv3b.params)
rect3_ = LeakyReLU(conv3b_)
conv3c_ = ConvLayer(pool2_, (n_convfilter[2], 1, 1), params=conv3c.params)
res3_ = AddLayer(conv3c_, rect3_)
pool3_ = PoolLayer(res3_)
conv4a_ = ConvLayer(pool3_, (n_convfilter[3], 3, 3), params=conv4a.params)
rect4a_ = LeakyReLU(conv4a_)
conv4b_ = ConvLayer(rect4a_, (n_convfilter[3], 3, 3), params=conv4b.params)
rect4_ = LeakyReLU(conv4b_)
pool4_ = PoolLayer(rect4_)
conv5a_ = ConvLayer(pool4_, (n_convfilter[4], 3, 3), params=conv5a.params)
rect5a_ = LeakyReLU(conv5a_)
conv5b_ = ConvLayer(rect5a_, (n_convfilter[4], 3, 3), params=conv5b.params)
rect5_ = LeakyReLU(conv5b_)
conv5c_ = ConvLayer(pool4_, (n_convfilter[4], 1, 1), params=conv5c.params)
res5_ = AddLayer(conv5c_, rect5_)
pool5_ = PoolLayer(res5_)
conv6a_ = ConvLayer(pool5_, (n_convfilter[5], 3, 3), params=conv6a.params)
rect6a_ = LeakyReLU(conv6a_)
conv6b_ = ConvLayer(rect6a_, (n_convfilter[5], 3, 3), params=conv6b.params)
rect6_ = LeakyReLU(conv6b_)
res6_ = AddLayer(pool5_, rect6_)
pool6_ = PoolLayer(res6_)
flat6_ = FlattenLayer(pool6_)
fc7_ = TensorProductLayer(flat6_, n_fc_filters[0], params=fc7.params)
rect7_ = LeakyReLU(fc7_)
prev_s_ = InputLayer(s_shape, prev_s_tensor)
t_x_s_update_ = FCConv3DLayer(
prev_s_,
rect7_, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
params=t_x_s_update.params)
t_x_s_reset_ = FCConv3DLayer(
prev_s_,
rect7_, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
params=t_x_s_reset.params)
update_gate_ = SigmoidLayer(t_x_s_update_)
comp_update_gate_ = ComplementLayer(update_gate_)
reset_gate_ = SigmoidLayer(t_x_s_reset_)
rs_ = EltwiseMultiplyLayer(reset_gate_, prev_s_)
t_x_rs_ = FCConv3DLayer(
rs_, rect7_, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3), params=t_x_rs.params)
tanh_t_x_rs_ = TanhLayer(t_x_rs_)
gru_out_ = AddLayer(
EltwiseMultiplyLayer(update_gate_, prev_s_),
EltwiseMultiplyLayer(comp_update_gate_, tanh_t_x_rs_))
return gru_out_.output, update_gate_.output
s_update, r_update = theano.scan(recurrence,
sequences=[self.x[:, :, :3]], # along with images, feed in the index of the current frame
outputs_info=[tensor.zeros_like(np.zeros(s_shape),
dtype=theano.config.floatX),
tensor.zeros_like(np.zeros(s_shape),
dtype=theano.config.floatX)])
update_all = s_update[-1]
s_all = s_update[0]
s_last = s_all[-1]
gru_s = InputLayer(s_shape, s_last)
unpool7 = Unpool3DLayer(gru_s)
conv7a = Conv3DLayer(unpool7, (n_deconvfilter[1], 3, 3, 3),
param_type='generator')
rect7a = LeakyReLU(conv7a)
conv7b = Conv3DLayer(rect7a, (n_deconvfilter[1], 3, 3, 3),
param_type='generator')
rect7 = LeakyReLU(conv7b)
res7 = AddLayer(unpool7, rect7)
unpool8 = Unpool3DLayer(res7)
conv8a = Conv3DLayer(unpool8, (n_deconvfilter[2], 3, 3, 3),
param_type='generator')
rect8a = LeakyReLU(conv8a)
conv8b = Conv3DLayer(rect8a, (n_deconvfilter[2], 3, 3, 3),
param_type='generator')
rect8 = LeakyReLU(conv8b)
res8 = AddLayer(unpool8, rect8)
unpool9 = Unpool3DLayer(res8)
conv9a = Conv3DLayer(unpool9, (n_deconvfilter[3], 3, 3, 3),
param_type='generator')
rect9a = LeakyReLU(conv9a)
conv9b = Conv3DLayer(rect9a, (n_deconvfilter[3], 3, 3, 3),
param_type='generator')
rect9 = LeakyReLU(conv9b)
conv9c = Conv3DLayer(unpool9, (n_deconvfilter[3], 1, 1, 1),
param_type='generator')
res9 = AddLayer(conv9c, rect9)
conv10a = Conv3DLayer(res9, (n_deconvfilter[3], 3, 3, 3),
param_type='generator')
rect10a = LeakyReLU(conv10a)
conv10b = Conv3DLayer(rect10a, (n_deconvfilter[3], 3, 3, 3),
param_type='generator')
rect10 = LeakyReLU(conv10b)
conv10c = Conv3DLayer(rect10a, (n_deconvfilter[3], 3, 3, 3),
param_type='generator')
res10 = AddLayer(conv10c, rect10)
conv11 = Conv3DLayer(res10, (n_deconvfilter[4], 3, 3, 3),
param_type='generator')
conv12 = Conv3DLayer(conv11, (n_deconvfilter[5], 3, 3, 3),
param_type='generator')
voxel_loss = SoftmaxWithLoss3D(conv12.output)
reconstruction = voxel_loss.prediction()
voxel_input = InputLayer(voxel_shape, reconstruction[:, :, 1])
rend = RaytracingLayer(voxel_input, self.camera, img_w, img_h,
self.pad_x, self.pad_y)
# Discriminator network starts here.
disc_input = InputLayer(voxel_shape)
disc_padded = DimShuffleLayer(disc_input, (0, 1, 'x', 2, 3))
conv15 = Conv3DLayer(disc_padded, (n_conv_advfilter[0], 3, 3, 3),
param_type='discriminator')
conv16 = Conv3DLayer(conv15, (n_conv_advfilter[0], 3, 3, 3),
param_type='discriminator')
pool16 = Pool3DLayer(conv16) # b x 16 x c x 16 x 16
conv17 = Conv3DLayer(pool16, (n_conv_advfilter[1], 3, 3, 3),
param_type='discriminator')
conv18 = Conv3DLayer(conv17, (n_conv_advfilter[1], 3, 3, 3),
param_type='discriminator')
pool18 = Pool3DLayer(conv18) # b x 8 x c x 8 x 8
conv19 = Conv3DLayer(pool18, (n_conv_advfilter[2], 3, 3, 3),
param_type='discriminator')
conv20 = Conv3DLayer(conv19, (n_conv_advfilter[2], 3, 3, 3),
param_type='discriminator')
pool20 = Pool3DLayer(conv20) # b x 4 x c x 4 x 4
conv21 = Conv3DLayer(pool20, (n_conv_advfilter[3], 3, 3, 3),
param_type='discriminator')
conv22 = Conv3DLayer(conv21, (n_conv_advfilter[3], 3, 3, 3),
param_type='discriminator')
pool22 = Pool3DLayer(conv22) # b x 2 x c x 2 x 2
conv23 = Conv3DLayer(pool22, (n_conv_advfilter[4], 3, 3, 3),
param_type='discriminator')
conv24 = Conv3DLayer(conv23, (n_conv_advfilter[4], 1, 1, 1),
param_type='discriminator')
flat24 = FlattenLayer(conv24)
fc24 = TensorProductLayer(flat24, n_fc_advfilter[1],
param_type='discriminator')
def get_discriminator(data_centered, use_dropout):
conv15_ = Conv3DLayer(data_centered,
(n_conv_advfilter[0], 3, 3, 3), params=conv15.params)
rect15_ = LeakyReLU(conv15_)
conv16_ = Conv3DLayer(rect15_, (n_conv_advfilter[0], 3, 3, 3),
params=conv16.params)
rect16_ = LeakyReLU(conv16_)
pool16_ = Pool3DLayer(rect16_) # b x 16 x c x 16 x 16
conv17_ = Conv3DLayer(pool16_, (n_conv_advfilter[1], 3, 3, 3),
params=conv17.params)
rect17_ = LeakyReLU(conv17_)
conv18_ = Conv3DLayer(rect17_, (n_conv_advfilter[1], 3, 3, 3),
params=conv18.params)
rect18_ = LeakyReLU(conv18_)
pool18_ = Pool3DLayer(rect18_) # b x 8 x c x 8 x 8
conv19_ = Conv3DLayer(pool18_, (n_conv_advfilter[2], 3, 3, 3),
params=conv19.params)
rect19_ = LeakyReLU(conv19_)
conv20_ = Conv3DLayer(rect19_, (n_conv_advfilter[2], 3, 3, 3),
params=conv20.params)
rect20_ = LeakyReLU(conv20_)
pool20_ = Pool3DLayer(rect20_) # b x 4 x c x 4 x 4
conv21_ = Conv3DLayer(pool20_, (n_conv_advfilter[3], 3, 3, 3),
params=conv21.params)
rect21_ = LeakyReLU(conv21_)
conv22_ = Conv3DLayer(rect21_, (n_conv_advfilter[3], 3, 3, 3),
params=conv22.params)
rect22_ = LeakyReLU(conv22_)
pool22_ = Pool3DLayer(rect22_) # b x 2 x c x 2 x 2
conv23_ = Conv3DLayer(pool22_, (n_conv_advfilter[4], 3, 3, 3),
params=conv23.params)
rect23_ = LeakyReLU(conv23_)
conv24_ = Conv3DLayer(rect23_, (n_conv_advfilter[4], 1, 1, 1),
params=conv24.params)
flat24_ = FlattenLayer(conv24_)
fc24_ = TensorProductLayer(flat24_, n_fc_advfilter[1],
params=fc24.params)
return SoftmaxWithLoss3D(fc24_.output, axis=1)
voxel_padded = DimShuffleLayer(voxel_input, (0, 1, 'x', 2, 3))
if cfg.TRAIN.STABILIZER == 'diffstep':
voxel_stabilized = DifferentiableStepLayer(voxel_padded,
backprop=cfg.TRAIN.DIFF_BACKPROP)
elif cfg.TRAIN.STABILIZER == 'noise':
voxel_stabilized = InstanceNoiseLayer(voxel_padded,
std=self.noise * cfg.TRAIN.NOISE_MAXSTD)
elif cfg.TRAIN.STABILIZER == 'ignore':
voxel_stabilized = voxel_padded
else:
raise NotImplemented
voxel_centered = SubtractLayer(voxel_stabilized, 0.5)
gt_input = InputLayer(voxel_shape, self.y[:, :, 1])
gt_padded = DimShuffleLayer(gt_input, (0, 1, 'x', 2, 3))
if cfg.TRAIN.STABILIZER == 'diffstep':
gt_stabilized = gt_padded
elif cfg.TRAIN.STABILIZER == 'noise':
gt_stabilized = InstanceNoiseLayer(gt_padded,
std=self.noise * cfg.TRAIN.NOISE_MAXSTD)
elif cfg.TRAIN.STABILIZER == 'ignore':
gt_stabilized = gt_padded
else:
raise NotImplemented
gt_centered = SubtractLayer(gt_stabilized, 0.5)
# Discriminator 1: takes fake voxel as input.
discriminator_fake_loss = get_discriminator(voxel_centered, True)
# Discriminator 2: takes real voxel as input.
discriminator_real_loss = get_discriminator(gt_centered, True)
# Discriminator 3: takes generated voxel as input, doesn't use dropout.
discriminator_fake_test = get_discriminator(voxel_centered, False)
# Discriminator 4: takes real voxel as input, doesn't use dropout.
discriminator_real_test = get_discriminator(gt_centered, False)
assert not r_update, 'Unexpected update in the RNN.'
label_shape = np.zeros((self.batch_size, 1))
fake_label = tensor.zeros_like(label_shape, dtype=theano.config.floatX)
real_label = tensor.ones_like(label_shape, dtype=theano.config.floatX)
all_fake = tensor.concatenate((real_label, fake_label), axis=1)
all_real = tensor.concatenate((fake_label, real_label), axis=1)
self.voxel_loss = discriminator_fake_test.loss(all_real)
self.mask_loss = tensor.nnet.nnet.binary_crossentropy(
tensor.clip(rend.output[:, :, 0], 1e-7, 1.0 - 1e-7),
tensor.gt(self.x[:, :, 3], 0.).astype(theano.config.floatX)).mean()
self.discriminator_loss = (discriminator_fake_loss.loss(all_fake) +
discriminator_real_loss.loss(all_real)) / 2.
self.generator_loss = self.voxel_loss + self.mask_loss * 100
self.error = voxel_loss.error(self.y)
self.error_F = discriminator_fake_test.error(all_fake)
self.error_R = discriminator_real_test.error(all_real)
self.generator_params = get_trainable_params()['generator']
self.discriminator_params = get_trainable_params()['discriminator']
self.all_params = self.generator_params + self.discriminator_params
self.load_params = self.all_params
self.output = reconstruction
self.activations = [rend.output[:, :, 0]]
``` |
{
"source": "jgwashburn/dexcom_reader",
"score": 3
} |
#### File: dexcom_reader/dexcom_reader/util.py
```python
import constants
import datetime
import os
import platform
import plistlib
import re
import subprocess
def ReceiverTimeToTime(rtime):
return constants.BASE_TIME + datetime.timedelta(seconds=rtime)
def linux_find_usbserial(vendor, product):
DEV_REGEX = re.compile('^tty(USB|ACM)[0-9]+$')
for usb_dev_root in os.listdir('/sys/bus/usb/devices'):
device_name = os.path.join('/sys/bus/usb/devices', usb_dev_root)
if not os.path.exists(os.path.join(device_name, 'idVendor')):
continue
idv = open(os.path.join(device_name, 'idVendor')).read().strip()
if idv != vendor:
continue
idp = open(os.path.join(device_name, 'idProduct')).read().strip()
if idp != product:
continue
for root, dirs, files in os.walk(device_name):
for option in dirs + files:
if DEV_REGEX.match(option):
return os.path.join('/dev', option)
def osx_find_usbserial(vendor, product):
def recur(v):
if hasattr(v, '__iter__') and 'idVendor' in v and 'idProduct' in v:
if v['idVendor'] == vendor and v['idProduct'] == product:
tmp = v
while True:
if 'IODialinDevice' not in tmp and 'IORegistryEntryChildren' in tmp:
tmp = tmp['IORegistryEntryChildren']
elif 'IODialinDevice' in tmp:
return tmp['IODialinDevice']
else:
break
if type(v) == list:
for x in v:
out = recur(x)
if out is not None:
return out
elif type(v) == dict or issubclass(type(v), dict):
for x in v.values():
out = recur(x)
if out is not None:
return out
sp = subprocess.Popen(['/usr/sbin/ioreg', '-k', 'IODialinDevice',
'-r', '-t', '-l', '-a', '-x'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, _ = sp.communicate()
plist = plistlib.readPlistFromString(stdout)
return recur(plist)
def find_usbserial(vendor, product):
"""Find the tty device for a given usbserial devices identifiers.
Args:
vendor: (int) something like 0x0000
product: (int) something like 0x0000
Returns:
String, like /dev/ttyACM0 or /dev/tty.usb...
"""
if platform.system() == 'Linux':
vendor, product = [('%04x' % (x)).strip() for x in (vendor, product)]
return linux_find_usbserial(vendor, product)
elif platform.system() == 'Darwin':
return osx_find_usbserial(vendor, product)
else:
raise NotImplementedError('Cannot find serial ports on %s'
% platform.system())
``` |
{
"source": "jgweir/hammr",
"score": 2
} |
#### File: commands/os/os.py
```python
import shlex
from hurry.filesize import size
from texttable import Texttable
from ussclicore.argumentParser import ArgumentParser, ArgumentParserError
from argparse import RawTextHelpFormatter
from ussclicore.cmd import Cmd, CoreGlobal
from ussclicore.utils import generics_utils, printer
from hammr.utils.hammr_utils import *
class Os(Cmd, CoreGlobal):
"""List all the OSes the user has access to. Includes, name, version, architecture and release date. You can also search for available packages"""
cmd_name="os"
def __init__(self):
super(Os, self).__init__()
def arg_list(self):
doParser = ArgumentParser(prog=self.cmd_name+" list", add_help = True, description="Displays all the operating systems available to use by the use")
return doParser
def do_list(self, args):
try:
#call UForge API
printer.out("Getting distributions for ["+self.login+"] ...")
distributions = self.api.Users(self.login).Distros.Getall()
distributions = distributions.distributions
if distributions is None or not hasattr(distributions, "distribution"):
printer.out("No distributions available")
else:
table = Texttable(800)
table.set_cols_dtype(["t","t","t","t","t", "t"])
table.header(["Id", "Name", "Version", "Architecture", "Release Date", "Profiles"])
distributions = generics_utils.order_list_object_by(distributions.distribution, "name")
for distribution in distributions:
profiles = self.api.Distributions(distribution.dbId).Profiles.Getall()
profiles = profiles.distribProfiles.distribProfile
if len(profiles) > 0:
profile_text=""
for profile in profiles:
profile_text+=profile.name+"\n"
table.add_row([distribution.dbId, distribution.name, distribution.version, distribution.arch, distribution.releaseDate.strftime("%Y-%m-%d %H:%M:%S") if distribution.releaseDate is not None else "", profile_text])
else:
table.add_row([distribution.dbId, distribution.name, distribution.version, distribution.arch, distribution.releaseDate.strftime("%Y-%m-%d %H:%M:%S") if distribution.releaseDate is not None else "", "-"])
print table.draw() + "\n"
printer.out("Found "+str(len(distributions))+" distributions")
return 0
except ArgumentParserError as e:
printer.out("ERROR: In Arguments: "+str(e), printer.ERROR)
self.help_list()
except Exception as e:
return handle_uforge_exception(e)
def help_list(self):
doParser = self.arg_list()
doParser.print_help()
def arg_search(self):
doParser = ArgumentParser(prog=self.cmd_name+" search", add_help = True, description="Search packages from an OS", formatter_class=RawTextHelpFormatter)
mandatory = doParser.add_argument_group("mandatory arguments")
mandatory.add_argument('--id', dest='id', required=True, help="Os id")
mandatory.add_argument('--pkg', dest='pkg', required=True, help='''\
Regular expression of the package:\n\
"string" : search all packages which contains "string"\n\
"*string": search all packages which end with "string"\n\
"string*": search all packages which start with "string"''')
return doParser
def do_search(self, args):
try:
#add arguments
doParser = self.arg_search()
doArgs = doParser.parse_args(shlex.split(args))
#if the help command is called, parse_args returns None object
if not doArgs:
return 2
#call UForge API
printer.out("Search package '"+doArgs.pkg+"' ...")
distribution = self.api.Distributions(doArgs.id).Get()
printer.out("for OS '"+distribution.name+"', version "+distribution.version)
pkgs = self.api.Distributions(distribution.dbId).Pkgs.Getall(Query="name=="+doArgs.pkg)
pkgs = pkgs.pkgs.pkg
if pkgs is None or len(pkgs) == 0:
printer.out("No package found")
else:
table = Texttable(800)
table.set_cols_dtype(["t","t","t","t","t","t","t"])
table.header(["Name", "Version", "Arch", "Release", "Build date", "Size", "FullName"])
pkgs = generics_utils.order_list_object_by(pkgs, "name")
for pkg in pkgs:
table.add_row([pkg.name, pkg.version, pkg.arch, pkg.release, pkg.pkgBuildDate.strftime("%Y-%m-%d %H:%M:%S"), size(pkg.size), pkg.fullName])
print table.draw() + "\n"
printer.out("Found "+str(len(pkgs))+" packages")
except ArgumentParserError as e:
printer.out("ERROR: In Arguments: "+str(e), printer.ERROR)
self.help_search()
except Exception as e:
return handle_uforge_exception(e)
def help_search(self):
doParser = self.arg_search()
doParser.print_help()
```
#### File: commands/template/template.py
```python
import time
import tarfile
import os.path
import ntpath
import shutil
import shlex
from junit_xml import TestSuite, TestCase
from texttable import Texttable
from ussclicore.argumentParser import ArgumentParser, ArgumentParserError
from ussclicore.cmd import Cmd, CoreGlobal
from progressbar import AnimatedMarker, Bar, BouncingBar, Counter, ETA, \
FileTransferSpeed, FormatLabel, Percentage, \
ProgressBar, ReverseBar, RotatingMarker, \
SimpleProgress, Timer
from uforge.objects.uforge import *
from ussclicore.utils import generics_utils, printer, progressbar_widget
from hammr.utils.hammr_utils import *
from hammr.utils.bundle_utils import *
from hammr.utils import constants
from hammr.utils import generate_utils
from os.path import realpath
class Template(Cmd, CoreGlobal):
"""Create a template based on your configuration file. List, delete, clone or export/import existing templates. Generate an image from your configuration file. Validate your configuration file before building your template"""
cmd_name="template"
def __init__(self):
super(Template, self).__init__()
def arg_list(self):
doParser = ArgumentParser(prog=self.cmd_name+" list", add_help = True, description="Displays all the created templates")
return doParser
def do_list(self, args):
try:
#call UForge API
printer.out("Getting templates for ["+self.login+"] ...")
appliances = self.api.Users(self.login).Appliances().Getall()
appliances = appliances.appliances
if appliances is None or not hasattr(appliances, 'appliance'):
printer.out("No template")
else:
images = self.api.Users(self.login).Images.Getall()
images = images.images
table = Texttable(800)
table.set_cols_dtype(["t","t","t","t","t","t","t","t","t","t"])
table.header(["Id", "Name", "Version", "OS", "Created", "Last modified", "# Imgs", "Updates", "Imp", "Shared"])
appliances = generics_utils.order_list_object_by(appliances.appliance, "name")
for appliance in appliances:
nbImage=0
if images is not None and hasattr(images, 'image'):
for image in images.image:
if hasattr(image, 'parentUri') and image.parentUri == appliance.uri:
nbImage+=1
table.add_row([appliance.dbId, appliance.name, str(appliance.version), appliance.distributionName+" "+appliance.archName,
appliance.created.strftime("%Y-%m-%d %H:%M:%S"), appliance.lastModified.strftime("%Y-%m-%d %H:%M:%S"), nbImage, appliance.nbUpdates, "X" if appliance.imported else "", "X" if appliance.shared else ""])
print table.draw() + "\n"
printer.out("Found "+str(len(appliances))+" templates")
return 0
except ArgumentParserError as e:
printer.out("ERROR: In Arguments: "+str(e), printer.ERROR)
self.help_list()
except Exception as e:
return handle_uforge_exception(e)
def help_list(self):
doParser = self.arg_list()
doParser.print_help()
def arg_export(self):
doParser = ArgumentParser(prog=self.cmd_name+" export", add_help = True, description="Exports a template by creating an archive (compressed tar file) that includes the template configuration file")
mandatory = doParser.add_argument_group("mandatory arguments")
mandatory.add_argument('--id', dest='id', required=True, help="the ID of the template to export")
optional = doParser.add_argument_group("optional arguments")
optional.add_argument('--file', dest='file', required=False, help="destination path where to store the template configuration file on the local filesystem")
optional.add_argument('--outputFormat', dest='output_format', required=False, help="output format (yaml or json) of the template file to export (yaml is the default one)")
return doParser
def do_export(self, args):
try:
#add arguments
doParser = self.arg_export()
doArgs = doParser.parse_args(shlex.split(args))
#if the help command is called, parse_args returns None object
if not doArgs:
return 2
#call UForge API
printer.out("Exporting template with id ["+doArgs.id+"] :")
myAppliance = self.api.Users(self.login).Appliances(doArgs.id).Get()
if myAppliance is None or type(myAppliance) is not Appliance:
printer.out("No template")
else:
output_format="yaml"
if doArgs.output_format is not None:
output_format=doArgs.output_format
applianceExport = self.api.Users(self.login).Appliances(myAppliance.dbId).Exports().Export(output_format)
status = applianceExport.status
progress = ProgressBar(widgets=[Percentage(), Bar()], maxval=100).start()
while not (status.complete or status.error):
progress.update(status.percentage)
status = self.api.Users(self.login).Appliances(myAppliance.dbId).Exports(applianceExport.dbId).Status.Get()
time.sleep(2)
progress.finish()
if status.error:
printer.out("Export error: "+status.message+"\n"+status.errorMessage, printer.ERROR)
if status.detailedError:
printer.out(status.detailedErrorMsg)
else:
printer.out("Downloading archive...")
if doArgs.file is None:
file = open("archive.tar.gz", "w")
else:
file = open(doArgs.file, "w")
self.api.Users(self.login).Appliances(myAppliance.dbId).Exports(applianceExport.dbId).Downloads.Download(streamingResponseFile=file)
file.close()
#Delete export archive on the server
self.api.Users(self.login).Appliances(myAppliance.dbId).Exports(applianceExport.dbId).Delete()
printer.out("Download complete of file ["+file.name+"]", printer.OK)
return 0
except IOError as e:
printer.out("File error: "+str(e), printer.ERROR)
except ArgumentParserError as e:
printer.out("ERROR: In Arguments: "+str(e), printer.ERROR)
self.help_export()
except Exception as e:
return handle_uforge_exception(e)
def help_export(self):
doParser = self.arg_export()
doParser.print_help()
def arg_import(self):
doParser = ArgumentParser(prog=self.cmd_name+" import", add_help = True, description="Creates a template from an archive")
mandatory = doParser.add_argument_group("mandatory arguments")
mandatory.add_argument('--file', dest='file', required=True, help="the path of the archive")
optional = doParser.add_argument_group("optional arguments")
optional.add_argument('-f', '--force', dest='force', action='store_true', help='force template creation (delete template/bundle if already exist)', required = False)
optional.add_argument('-r', '--rbundles', dest='rbundles', action='store_true', help='if a bundle already exists, use it in the new template. Warning: this option ignore the content of the bundle described in the template file', required = False)
optional.add_argument('--usemajor', dest='use_major', action='store_true', help='use distribution major version if exit', required = False)
optional.set_defaults(force=False)
optional.set_defaults(use_major=False)
return doParser
def do_import(self, args):
try:
#add arguments
doParser = self.arg_import()
doArgs = doParser.parse_args(shlex.split(args))
#if the help command is called, parse_args returns None object
if not doArgs:
return 2
#call UForge API
return self.import_stack(doArgs.file, True, doArgs.force, doArgs.rbundles, doArgs.use_major)
except ArgumentParserError as e:
printer.out("In Arguments: "+str(e)+"\n", printer.ERROR)
self.help_import()
except Exception as e:
return handle_uforge_exception(e)
def help_import(self):
doParser = self.arg_import()
doParser.print_help()
def arg_validate(self):
doParser = ArgumentParser(prog=self.cmd_name+" validate", add_help = True, description="Validates the syntax of a template configuration file")
mandatory = doParser.add_argument_group("mandatory arguments")
mandatory.add_argument('--file', dest='file', required=True, help="the yaml/json template configuration file")
return doParser
def do_validate(self, args):
try:
#add arguments
doParser = self.arg_validate()
doArgs = doParser.parse_args(shlex.split(args))
#if the help command is called, parse_args returns None object
if not doArgs:
return 2
file = generics_utils.get_file(doArgs.file)
if file is None:
return 2
template=validate(file)
if template is None:
return 2
print "OK : Syntax of template file [" + realpath(file) + "] is ok"
return 0
except ArgumentParserError as e:
printer.out("ERROR: In Arguments: "+str(e), printer.ERROR)
self.help_validate()
def help_validate(self):
doParser = self.arg_validate()
doParser.print_help()
def arg_create(self):
doParser = ArgumentParser(prog=self.cmd_name+" create", add_help = True, description="Create a new template and save to the UForge server")
mandatory = doParser.add_argument_group("mandatory arguments")
mandatory.add_argument('--file', dest='file', required=True, help="yaml/json file containing the template content")
optional = doParser.add_argument_group("optional arguments")
optional.add_argument('--archive-path', dest='archive_path', required=False, help="path of where to store the archive of the created template. If provided hammr, creates an archive of the created template, equivalent to running template export")
optional.add_argument('-f', '--force', dest='force', action='store_true', help='force template creation (delete template/bundle if already exist)', required = False)
optional.add_argument('-r', '--rbundles', dest='rbundles', action='store_true', help='if a bundle already exists, use it in the new template. Warning: this option ignore the content of the bundle described in the template file', required = False)
optional.add_argument('--usemajor', dest='use_major', action='store_true', help='use distribution major version if exit', required = False)
optional.set_defaults(force=False)
optional.set_defaults(use_major=False)
return doParser
def do_create(self, args):
try:
#add arguments
doParser = self.arg_create()
doArgs = doParser.parse_args(shlex.split(args))
#if the help command is called, parse_args returns None object
if not doArgs:
return 2
#--
#get file (remote or local)
file = generics_utils.get_file(doArgs.file)
if file is None:
return 2
#validate parsing and mandatory fields
template = validate(file)
if template is None:
return 2
isJsonFile= check_extension_is_json(file)
if "builders" in template:
template["builders"]=None
archive_files=[]
if "config" in template["stack"]:
for config in template["stack"]["config"]:
#add to list of file to tar
if "source" in config:
file_tar_path=constants.FOLDER_CONFIGS + os.sep + generics_utils.remove_URI_forbidden_char(ntpath.basename(config["source"]))
archive_files.append([file_tar_path,config["source"]])
#changing source path to archive related source path
config["source"]=file_tar_path
else:
printer.out("No source file found in config", printer.ERROR)
return 2
try:
checkList = []
if "bundles" in template["stack"]:
for bundle in template["stack"]["bundles"]:
if "files" in bundle:
for files in bundle["files"]:
checkList,archive_files = recursivelyAppendToArchive(bundle, files, "", checkList, archive_files)
else:
printer.out("No files section found for bundle", printer.ERROR)
return 2
if "license" in bundle and "source" in bundle["license"]:
#add to list of file to tar
file_tar_path=constants.FOLDER_BUNDLES + os.sep + generics_utils.remove_URI_forbidden_char(bundle["name"]) + os.sep + generics_utils.remove_URI_forbidden_char(ntpath.basename(bundle["license"]["source"]))
archive_files.append([file_tar_path,bundle["license"]["source"]])
#changing source path to archive related source path
bundle["license"]["source"]=file_tar_path
if "sourceLogo" in bundle:
#add to list of file to tar
file_tar_path=constants.FOLDER_BUNDLES + os.sep + generics_utils.remove_URI_forbidden_char(bundle["name"]) + os.sep + generics_utils.remove_URI_forbidden_char(ntpath.basename(bundle["sourceLogo"]))
archive_files.append([file_tar_path,bundle["sourceLogo"]])
#changing source path to archive related source path
bundle["sourceLogo"]=file_tar_path
except ValueError as ve:
printer.out(str(ve), printer.ERROR)
return 2
except KeyError as e:
printer.out("Error in bundle", printer.ERROR)
return 2
if "source_logo" in template["stack"]:
#add to list of file to tar
file_tar_path=constants.FOLDER_LOGO + os.sep + generics_utils.remove_URI_forbidden_char(ntpath.basename(template["stack"]["source_logo"]))
archive_files.append([file_tar_path,template["stack"]["source_logo"]])
#changing source path to archive related source path
template["stack"]["source_logo"]=file_tar_path
if os.path.isdir(constants.TMP_WORKING_DIR):
#delete tmp dir
shutil.rmtree(constants.TMP_WORKING_DIR)
os.mkdir(constants.TMP_WORKING_DIR)
if isJsonFile:
fileName = constants.TEMPLATE_JSON_FILE_NAME
newFileName = constants.TEMPLATE_JSON_NEW_FILE_NAME
else:
fileName = constants.TEMPLATE_YAML_FILE_NAME
newFileName = constants.TEMPLATE_YAML_NEW_FILE_NAME
archive_files = dump_data_in_file(template, archive_files, isJsonFile, fileName, newFileName)
if doArgs.archive_path is not None:
tar_path = doArgs.archive_path
else:
tar_path = constants.TMP_WORKING_DIR+os.sep+"archive.tar.gz"
tar = tarfile.open(tar_path, "w|gz")
for file_tar_path,file_global_path in archive_files:
if not os.path.isdir(file_global_path):
file = generics_utils.get_file(file_global_path, constants.TMP_WORKING_DIR+os.sep+os.path.basename(file_global_path))
if file is None:
printer.out("Downloaded bunlde file not found", printer.ERROR)
return 2
tar.add(file, arcname=file_tar_path)
else:
tar.add(file_global_path, arcname=file_tar_path)
tar.close()
#arhive is created, doing import
r = self.import_stack(tar_path, False, doArgs.force, doArgs.rbundles, doArgs.use_major)
if r != 0:
return r
#delete tmp dir
shutil.rmtree(constants.TMP_WORKING_DIR)
return 0
except OSError as e:
printer.out("OSError: "+str(e), printer.ERROR)
except IOError as e:
printer.out("File error: "+str(e), printer.ERROR)
except ArgumentParserError as e:
printer.out("In Arguments: "+str(e), printer.ERROR)
self.help_create()
except Exception as e:
return handle_uforge_exception(e)
def help_create(self):
doParser = self.arg_create()
doParser.print_help()
def arg_build(self):
do_parser = ArgumentParser(prog=self.cmd_name+" build", add_help = True, description="Builds a machine image from the template")
mandatory = do_parser.add_argument_group("mandatory arguments")
mandatory.add_argument('--file', dest='file', required=True, help="yaml/json file providing the builder parameters")
optional = do_parser.add_argument_group("optional arguments")
optional.add_argument('--id',dest='id',required=False, help="id of the template to build")
optional.add_argument('--junit',dest='junit',required=False, help="name of junit XML output file")
optional.add_argument('--simulate', dest='simulated', action='store_true', help='Simulate the generation (only the Checking Dependencies process will be executed)', required = False)
optional.add_argument('--force', dest='forced', action='store_true', help='Force the checking dependencies', required = False)
return do_parser
def do_build(self, args):
try:
#add arguments
do_parser = self.arg_build()
do_args = do_parser.parse_args(shlex.split(args))
#if the help command is called, parse_args returns None object
if not do_args:
return 2
if do_args.id:
template = validate(do_args.file)
else :
template=validate_builder_file_with_no_template_id(do_args.file)
if template is None:
return 2
if do_args.id:
my_appliance = self.api.Users(self.login).Appliances().Getall(Query="dbId=="+do_args.id)
my_appliance = my_appliance.appliances.appliance
else:
#Get template which correpond to the template file
my_appliance = self.api.Users(self.login).Appliances().Getall(Query="name=='"+template["stack"]["name"]+"';version=='"+template["stack"]["version"]+"'")
my_appliance = my_appliance.appliances.appliance
if my_appliance is None or len(my_appliance)!=1:
printer.out("No template found on the plateform")
return 0
my_appliance=my_appliance[0]
r_install_profile = self.api.Users(self.login).Appliances(my_appliance.dbId).Installprofile("").Getdeprecated()
if r_install_profile is None:
printer.out("No installation found on the template '"+template["stack"]["name"]+"'", printer.ERROR)
return 0
try:
i=1
test_results=[]
for builder in template["builders"]:
try:
printer.out("Generating '"+builder["type"]+"' image ("+str(i)+"/"+str(len(template["builders"]))+")")
test = None
if do_args.junit is not None:
test = TestCase('Generation '+builder["type"])
test_results.append(test)
start_time = time.time()
format_type = builder["type"]
target_format = generate_utils.get_target_format_object(self.api, self.login, format_type)
if target_format is None:
printer.out("Builder type unknown: "+format_type, printer.ERROR)
return 2
myimage = image()
myinstall_profile = installProfile()
if r_install_profile.partitionAuto:
if "installation" in builder:
if "swapSize" in builder["installation"]:
myinstall_profile.swapSize = builder["installation"]["swapSize"]
if "diskSize" in builder["installation"]:
myinstall_profile.diskSize = builder["installation"]["diskSize"]
else:
myinstall_profile.swapSize = r_install_profile.swapSize
myinstall_profile.diskSize = r_install_profile.partitionTable.disks.disk[0].size
func = getattr(generate_utils, "generate_"+generics_utils.remove_special_chars(target_format.format.name), None)
if func:
myimage,myinstall_profile = func(myimage, builder, myinstall_profile, self.api, self.login)
else:
printer.out("Builder type unknown: "+format_type, printer.ERROR)
return 2
if myimage is None:
return 2
myimage.targetFormat = target_format
myimage.installProfile = myinstall_profile
if do_args.simulated is not None and do_args.simulated:
myimage.simulated=True
if do_args.forced is not None and do_args.forced:
myimage.forceCheckingDeps=True
r_image = self.api.Users(self.login).Appliances(my_appliance.dbId).Images().Generate(myimage)
status = r_image.status
status_widget = progressbar_widget.Status()
status_widget.status = status
widgets = [Bar('>'), ' ', status_widget, ' ', ReverseBar('<')]
progress = ProgressBar(widgets=widgets, maxval=100).start()
while not (status.complete or status.error or status.cancelled):
status_widget.status = status
progress.update(status.percentage)
status = self.api.Users(self.login).Appliances(my_appliance.dbId).Images(r_image.dbId).Status.Get()
time.sleep(2)
status_widget.status = status
progress.finish()
if status.error:
printer.out("Generation '"+builder["type"]+"' error: "+status.message+"\n"+status.errorMessage, printer.ERROR)
if status.detailedError:
printer.out(status.detailedErrorMsg)
if do_args.junit is not None:
test.elapsed_sec=time.time() - start_time
test.add_error_info("Error", status.message+"\n"+status.errorMessage)
elif status.cancelled:
printer.out("Generation '"+builder["type"]+"' canceled: "+status.message, printer.WARNING)
if do_args.junit is not None:
test.elapsed_sec=time.time() - start_time
test.add_failure_info("Canceled", status.message)
else:
printer.out("Generation '"+builder["type"]+"' ok", printer.OK)
printer.out("Image URI: "+r_image.uri)
printer.out("Image Id : "+generics_utils.extract_id(r_image.uri))
if do_args.junit is not None:
test.elapsed_sec=time.time() - start_time
#the downloadUri already contains downloadKey at the end
if r_image.downloadUri is not None:
test.stdout=self.api.getUrl() +"/"+r_image.downloadUri
i+=1
except Exception as e:
if is_uforge_exception(e):
print_uforge_exception(e)
if do_args.junit is not None and "test_results" in locals() and len(test_results)>0:
test=test_results[len(test_results)-1]
test.elapsed_sec=time.time() - start_time
test.add_error_info("Error", get_uforge_exception(e))
else:
raise
if do_args.junit is not None:
test_name = my_appliance.distributionName+" "+my_appliance.archName
ts = TestSuite("Generation "+test_name, test_results)
with open(do_args.junit, 'w') as f:
TestSuite.to_file(f, [ts], prettyprint=False)
return 0
except KeyError as e:
printer.out("unknown error in template file", printer.ERROR)
except ArgumentParserError as e:
printer.out("ERROR: In Arguments: "+str(e), printer.ERROR)
self.help_build()
except KeyboardInterrupt:
printer.out("\n")
if generics_utils.query_yes_no("Do you want to cancel the job ?"):
if 'my_appliance' in locals() and 'rImage' in locals() and hasattr(my_appliance, 'dbId') and hasattr(r_image, 'dbId'):
self.api.Users(self.login).Appliances(my_appliance.dbId).Images(r_image.dbId).Status.Cancel()
else:
printer.out("Impossible to cancel", printer.WARNING)
else:
printer.out("Exiting command")
except Exception as e:
print_uforge_exception(e)
if do_args.junit is not None and "test_results" in locals() and len(test_results)>0:
test=test_results[len(test_results)-1]
if "start_time" in locals():
elapse=time.time() - start_time
else:
elapse=0
test.elapsed_sec=elapse
test.add_error_info("Error", get_uforge_exception(e))
else:
return 2
finally:
if "do_args" in locals() and do_args.junit is not None and "test_results" in locals() and len(test_results)>0:
if "my_appliance" in locals():
test_name = my_appliance.distributionName+" "+my_appliance.archName
else:
test_name = ""
ts = TestSuite("Generation "+test_name, test_results)
with open(do_args.junit, 'w') as f:
TestSuite.to_file(f, [ts], prettyprint=False)
def help_build(self):
do_parser = self.arg_build()
do_parser.print_help()
def import_stack(self, file, isImport, isForce, rbundles, isUseMajor):
try:
if isImport:
printer.out("Importing template from ["+file+"] archive ...")
else:
if constants.TMP_WORKING_DIR in str(file):
printer.out("Creating template from temporary ["+file+"] archive ...")
else:
printer.out("Creating template from ["+file+"] archive ...")
file = open(file, "r")
# The following code could not be used for the moment
# appImport = applianceImport()
# appImport.imported = isImport
# appImport.forceRw = isForce
# appImport.reuseBundles = rbundles
# appImport.useMajor = isUseMajor
# appImport = self.api.Users(self.login).Imports.Import(appImport)
appImport = self.api.Users(self.login).Imports.Import(Imported=isImport, Force=isForce, Reusebundles=rbundles, Usemajor=isUseMajor)
if appImport is None:
if isImport:
printer.out("error importing appliance", printer.ERROR)
else:
printer.out("error creating appliance", printer.ERROR)
return 2
else:
status = self.api.Users(self.login).Imports(appImport.dbId).Uploads.Upload(file)
progress = ProgressBar(widgets=[Percentage(), Bar()], maxval=100).start()
while not (status.complete or status.error):
progress.update(status.percentage)
status = self.api.Users(self.login).Imports(appImport.dbId).Status.Get()
time.sleep(2)
progress.finish()
if status.error:
if isImport:
printer.out("Template import: "+status.message+"\n"+status.errorMessage, printer.ERROR)
if status.detailedError:
printer.out(status.detailedErrorMsg)
else:
printer.out("Template create: "+status.message+"\n"+status.errorMessage, printer.ERROR)
else:
if isImport:
printer.out("Template import: DONE", printer.OK)
else:
printer.out("Template create: DONE", printer.OK)
#get appliance import
appImport = self.api.Users(self.login).Imports(appImport.dbId).Get()
printer.out("Template URI: "+appImport.referenceUri)
printer.out("Template Id : "+generics_utils.extract_id(appImport.referenceUri))
return 0
except IOError as e:
printer.out("File error: "+str(e), printer.ERROR)
return 2
except Exception as e:
return handle_uforge_exception(e)
def arg_delete(self):
doParser = ArgumentParser(prog=self.cmd_name+" delete", add_help = True, description="Deletes an existing template")
mandatory = doParser.add_argument_group("mandatory arguments")
mandatory.add_argument('--id', dest='id', required=True, help="the ID of the template to delete")
optional = doParser.add_argument_group("optional arguments")
optional.add_argument('--no-confirm',dest='no_confirm',action='store_true', required=False, help="do not print confirmation dialog")
optional.set_defaults(no_confirm=False)
return doParser
def do_delete(self, args):
try:
#add arguments
doParser = self.arg_delete()
doArgs = doParser.parse_args(shlex.split(args))
#if the help command is called, parse_args returns None object
if not doArgs:
return 2
#call UForge API
printer.out("Searching template with id ["+doArgs.id+"] ...")
myAppliance = self.api.Users(self.login).Appliances(doArgs.id).Get()
if myAppliance is None or type(myAppliance) is not Appliance:
printer.out("Template not found")
else:
table = Texttable(800)
table.set_cols_dtype(["t","t","t","t","t","t","t","t","t","t"])
table.header(["Id", "Name", "Version", "OS", "Created", "Last modified", "# Imgs", "Updates", "Imp", "Shared"])
table.add_row([myAppliance.dbId, myAppliance.name, str(myAppliance.version), myAppliance.distributionName+" "+myAppliance.archName,
myAppliance.created.strftime("%Y-%m-%d %H:%M:%S"), myAppliance.lastModified.strftime("%Y-%m-%d %H:%M:%S"), len(myAppliance.imageUris.uri),myAppliance.nbUpdates, "X" if myAppliance.imported else "", "X" if myAppliance.shared else ""])
print table.draw() + "\n"
if doArgs.no_confirm:
self.api.Users(self.login).Appliances(myAppliance.dbId).Delete()
printer.out("Template deleted", printer.OK)
elif generics_utils.query_yes_no("Do you really want to delete template with id "+str(myAppliance.dbId)):
self.api.Users(self.login).Appliances(myAppliance.dbId).Delete()
printer.out("Template deleted", printer.OK)
return 0
except ArgumentParserError as e:
printer.out("ERROR: In Arguments: "+str(e), printer.ERROR)
self.help_delete()
except Exception as e:
return handle_uforge_exception(e)
def help_delete(self):
doParser = self.arg_delete()
doParser.print_help()
def arg_clone(self):
doParser = ArgumentParser(prog=self.cmd_name+" clone", add_help = True, description="Clones the template. The clone is copying the meta-data of the template")
mandatory = doParser.add_argument_group("mandatory arguments")
mandatory.add_argument('--id', dest='id', required=True, help="the ID of the template to clone")
mandatory.add_argument('--name', dest='name', required=True, help="the name to use for the new cloned template")
mandatory.add_argument('--version', dest='version', required=True, help="the version to use for the cloned template")
return doParser
def do_clone(self, args):
try:
#add arguments
doParser = self.arg_clone()
doArgs = doParser.parse_args(shlex.split(args))
#if the help command is called, parse_args returns None object
if not doArgs:
return 2
#call UForge API
printer.out("Clonnig template with id ["+doArgs.id+"] ...")
myAppliance = appliance()
myAppliance.name = doArgs.name
myAppliance.version = doArgs.version
rAppliance = self.clone_appliance(doArgs.id, myAppliance)
if type(rAppliance) is Appliance:
printer.out("Clonned successfully", printer.OK)
else:
printer.out("Clone error", printer.ERROR)
except ArgumentParserError as e:
printer.out("ERROR: In Arguments: "+str(e), printer.ERROR)
self.help_clone()
except Exception as e:
return handle_uforge_exception(e)
def clone_appliance(self, id, appliance):
return self.api.Users(self.login).Appliances(id).Clones.Clone(appliance)
def help_clone(self):
doParser = self.arg_clone()
doParser.print_help()
```
#### File: unit/utils/test_hammr_utils.py
```python
__author__ = 'UshareSoft'
import unittest
import json
import yaml
import paramiko
from mock import patch
from tests.unit.utils.file_utils import find_relative_path_for
from hammr.utils import constants
from hammr.utils import hammr_utils
from uforge.application import Api
class TestFiles(unittest.TestCase):
def test_pythonObjectFromYamlParsingShouldBeTheSameAsJsonParsing(self):
# Given
json_path = find_relative_path_for("tests/integration/data/test-parsing.json")
yaml_path = find_relative_path_for("tests/integration/data/test-parsing.yml")
# When
json_data = json.load(open(json_path))
yaml_data = yaml.load(open(yaml_path))
# Then
self.assertEqual(json_data, yaml_data,
'Error : python object made from json parsing is different from yaml parsing')
@patch("hammr.utils.hammr_utils.validate_configurations_file")
@patch("hammr.utils.hammr_utils.check_extension_is_json")
def test_validate_if_check_extension_is_json_return_true(self, mock_check_extension_is_json,
mock_validate_configurations_file):
# Given
json_path = "test.json"
mock_check_extension_is_json.return_value = True
# When
hammr_utils.validate(json_path)
# Then
mock_validate_configurations_file.assert_called_with(json_path, isJson=True)
@patch("hammr.utils.hammr_utils.validate_configurations_file")
@patch("hammr.utils.hammr_utils.check_extension_is_json")
def test_validate_if_check_extension_is_json_return_false(self, mock_check_extension_is_json,
mock_validate_configurations_file):
# Given
json_path = "test.json"
mock_check_extension_is_json.return_value = False
# When
hammr_utils.validate(json_path)
# Then
mock_validate_configurations_file.assert_called_with(json_path, isJson=False)
def test_check_extension_is_json_return_true_if_extension_is_json(self):
# Given
json_path = "test.json"
# When
is_json = hammr_utils.check_extension_is_json(json_path)
# Then
self.assertTrue(is_json)
def test_check_extension_is_json_return_false_if_extension_is_yaml(self):
# Given
yaml_path = "test.yaml"
# When
is_json = hammr_utils.check_extension_is_json(yaml_path)
# Then
self.assertFalse(is_json)
def test_check_extension_is_json_return_false_if_extension_is_yml(self):
# Given
yml_path = "test.yml"
# When
is_json = hammr_utils.check_extension_is_json(yml_path)
# Then
self.assertFalse(is_json)
def test_check_extension_raise_exception_if_extension_is_not_supported(self):
# Given
unsupported_extension_path = "test.uss"
# When
# Then
self.assertRaises(Exception, hammr_utils.check_extension_is_json, unsupported_extension_path)
def test_extract_appliance_id_return_correct_id_for_correct_uri(self):
# Given
tested_uri = "users/14/appliances/12/whatever/8/testing"
# When
appliance_id = hammr_utils.extract_appliance_id(tested_uri)
# Then
self.assertEqual(12, appliance_id)
def test_extract_appliance_id_return_none_for_non_appliance_uri(self):
# Given
tested_uri = "users/myuser/scannedinstances/18/scans/15/testing"
# When
appliance_id = hammr_utils.extract_appliance_id(tested_uri)
# Then
self.assertIsNone(appliance_id)
def test_extract_scan_id_return_correct_id_for_correct_uri(self):
# Given
tested_uri = "users/14/scannedinstances/12/scans/108/whatever/18/testing"
# When
scan_id = hammr_utils.extract_scan_id(tested_uri)
# Then
self.assertEqual(108, scan_id)
def test_extract_scan_id_return_none_for_non_scan_uri(self):
# Given
tested_uri = "users/14/appliances/12/whatever/8/testing"
# When
scan_id = hammr_utils.extract_scan_id(tested_uri)
# Then
self.assertIsNone(scan_id)
def test_extract_scannedinstance_id_return_correct_id_for_correct_uri(self):
# Given
tested_uri = "users/14/scannedinstances/120/scans/108/whatever/18/testing"
# When
scannedinstance_id = hammr_utils.extract_scannedinstance_id(tested_uri)
# Then
self.assertEqual(120, scannedinstance_id)
def test_extract_scannedinstance_id_return_non_for_nonscan_uri(self):
# Given
tested_uri = "users/14/appliances/12/whatever/8/testing"
# When
scannedinstance_id = hammr_utils.extract_scannedinstance_id(tested_uri)
# Then
self.assertIsNone(scannedinstance_id)
def test_is_uri_based_on_scan_return_true_for_scan_uri(self):
# Given
tested_uri = "users/myuser/scannedinstances/120/scans/108/images/12"
# When
is_scan_uri = hammr_utils.is_uri_based_on_scan(tested_uri)
# Then
self.assertTrue(is_scan_uri)
def test_is_uri_based_on_scan_return_false_for_appliance_uri(self):
# Given
tested_uri = "users/14/appliances/102/images/8"
# When
is_scan_uri = hammr_utils.is_uri_based_on_scan(tested_uri)
# Then
self.assertFalse(is_scan_uri)
def test_is_uri_based_on_appliance_return_true_for_app_uri(self):
# Given
tested_uri = "users/14/appliances/102/images/8"
# When
is_appliance_uri = hammr_utils.is_uri_based_on_appliance(tested_uri)
# Then
self.assertTrue(is_appliance_uri)
def test_is_uri_based_on_appliance_return_false_for_scan_uri(self):
# Given
tested_uri = "users/myuser/scannedinstances/120/scans/108/images/12"
# When
is_appliance_uri = hammr_utils.is_uri_based_on_appliance(tested_uri)
# Then
self.assertFalse(is_appliance_uri)
@patch("ussclicore.utils.download_utils.Download")
@patch("ussclicore.utils.generics_utils.get_uforge_url_from_ws_url")
@patch("os.mkdir")
def test_download_binary_in_local_temp_dir_download_with_good_url_and_directory(self, mock_mkdir, mock_get_uforge_url, mock_download):
# Given
api = Api("url", username="username", password="password", headers=None,
disable_ssl_certificate_validation=False, timeout=constants.HTTP_TIMEOUT)
mock_get_uforge_url.return_value = "/url"
# When
local_binary_path = hammr_utils.download_binary_in_local_temp_dir(api, "/tmp/local/temp/dir", "/uri/binary", "binaryName")
# Then
mock_download.assert_called_with("/url/uri/binary", "/tmp/local/temp/dir/binaryName", not api.getDisableSslCertificateValidation())
self.assertEqual(local_binary_path, "/tmp/local/temp/dir/binaryName")
@patch("paramiko.SFTPClient.from_transport")
@patch("paramiko.SFTPClient")
@patch("paramiko.Transport")
@patch("paramiko.SSHClient.connect")
def test_upload_binary_to_client_use_put_from_paramiko_SFTPClient(self, mock_connect, mock_transport, mock_sftp_client, mock_paramiko_from_transport):
# Given
mock_paramiko_from_transport.return_value = mock_sftp_client
# When
hammr_utils.upload_binary_to_client("hostname", 22, "username", "password",
"/tmp/local/temp/dir/binaryName", "/tmp/uri/binary", None)
# Then
mock_transport.assert_called_with(("hostname", 22))
mock_sftp_client.put.assert_called_with("/tmp/local/temp/dir/binaryName", "/tmp/uri/binary")
mock_connect.assert_called_with("hostname", 22, "username", "password", None)
@patch("paramiko.SSHClient.exec_command")
def test_launch_binary_call_exec_command_with_given_command(self, mock_exec_command):
# Given
mock_exec_command.return_value = "stdin", "stdout", "stderr"
# When
hammr_utils.launch_binary(paramiko.SSHClient(), "command to launch")
# Then
mock_exec_command.assert_called_with("command to launch")
def test_validate_builder_file_with_no_template_id_return_None_when_stack_is_missing(self):
# Given
yaml_path = find_relative_path_for("tests/integration/data/publish_builder.yml")
# When
data = hammr_utils.validate_builder_file_with_no_template_id(yaml_path)
# Then
self.assertEqual(data, None)
def test_validate_builder_file_with_no_template_id_return_None_when_builder_is_missing(self):
# Given
yaml_path = find_relative_path_for("tests/integration/data/test-parsing.yml")
# When
data = hammr_utils.validate_builder_file_with_no_template_id(yaml_path)
# Then
self.assertEqual(data, None)
def test_validate_validate_builder_file_with_no_template_id_return_data_when_stack_and_builder_are_not_missing(self):
# Given
json_path = find_relative_path_for("tests/integration/data/templatePXE.json")
# When
data = hammr_utils.validate_builder_file_with_no_template_id(json_path)
# Then
self.assertNotEqual(data, None)
def test_validate_builder_file_with_no_template_id_return_None_when_stack_and_builder_are_missing(self):
# Given
yaml_path = find_relative_path_for("tests/integration/data/deploy_aws.yml")
# When
data = hammr_utils.validate_builder_file_with_no_template_id(yaml_path)
# Then
self.assertEqual(data, None)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jgwerner/create-helm-template-poc",
"score": 2
} |
#### File: create-helm-template-poc/templategenerator/__init__.py
```python
import os
from flask import Flask
project_dir = os.path.dirname(os.path.abspath(__file__))
def create_app():
"""Creates flask app and pushes context.
Returns:
app: the Flask application object
"""
app = Flask(__name__)
app.app_context().push()
return app
``` |
{
"source": "jgwerner/django",
"score": 2
} |
#### File: illumidesk/components/stripe.py
```python
from django.conf import settings
def get_stripe_public_key():
return settings.STRIPE_LIVE_PUBLIC_KEY if settings.STRIPE_LIVE_MODE else settings.STRIPE_TEST_PUBLIC_KEY
def get_stripe_secret_key():
return settings.STRIPE_LIVE_SECRET_KEY if settings.STRIPE_LIVE_MODE else settings.STRIPE_TEST_SECRET_KEY
```
#### File: illumidesk/teams/decorators.py
```python
from functools import wraps
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from .roles import user_can_access_team, user_can_administer_team
from .models import Team
def login_and_team_required(view_func):
return _get_decorated_function(view_func, user_can_access_team)
def team_admin_required(view_func):
return _get_decorated_function(view_func, user_can_administer_team)
def _get_decorated_function(view_func, permission_test_function):
@wraps(view_func)
def _inner(request, team_slug, *args, **kwargs):
user = request.user
if not user.is_authenticated:
return HttpResponseRedirect('{}?next={}'.format(reverse('account_login'), request.path))
else:
team = get_object_or_404(Team, slug=team_slug)
if permission_test_function(user, team):
request.team = team
return view_func(request, team_slug, *args, **kwargs)
else:
# treat not having access to a team like a 404 to avoid accidentally leaking information
return render(request, '404.html', status=404)
return _inner
```
#### File: illumidesk/teams/forms.py
```python
from allauth.account.forms import SignupForm
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from illumidesk.teams.util import get_next_unique_team_slug
from .models import Team, Invitation
class TeamSignupForm(SignupForm):
invitation_id = forms.CharField(widget=forms.HiddenInput(), required=False)
team_name = forms.CharField(
label=_("Team Name"),
max_length=100,
widget=forms.TextInput(attrs={'placeholder': _('My Team Name')}),
required=False,
)
def clean_team_name(self):
team_name = self.cleaned_data['team_name']
invitation_id = self.cleaned_data.get('invitation_id')
# if invitation is not set then team name is required
if not invitation_id and not team_name:
raise forms.ValidationError(_('Team Name is required!'))
return team_name
def clean_invitation_id(self):
invitation_id = self.cleaned_data.get('invitation_id')
if invitation_id:
try:
invite = Invitation.objects.get(id=invitation_id)
if invite.is_accepted:
raise forms.ValidationError(_(
'It looks like that invitation link has expired. '
'Please request a new invitation or sign in to continue.'
))
except (Invitation.DoesNotExist, ValidationError):
# ValidationError is raised if the ID isn't a valid UUID, which should be treated the same
# as not found
raise forms.ValidationError(_(
'That invitation could not be found. '
'Please double check your invitation link or sign in to continue.'
))
return invitation_id
def save(self, request):
invitation_id = self.cleaned_data['invitation_id']
team_name = self.cleaned_data['team_name']
user = super().save(request)
if invitation_id:
assert not team_name
else:
slug = get_next_unique_team_slug(team_name)
team = Team.objects.create(name=team_name, slug=slug)
team.members.add(user, through_defaults={'role': 'admin'})
team.save()
return user
class TeamChangeForm(forms.ModelForm):
class Meta:
model = Team
fields = ('name', 'slug')
```
#### File: illumidesk/teams/invitations.py
```python
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
def send_invitation(invitation):
email_context = {
'invitation': invitation,
}
send_mail(
subject="You're invited to {}!".format(settings.PROJECT_METADATA['NAME']),
message=render_to_string('illumidesk/email/invitation.txt', context=email_context),
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[invitation.email],
fail_silently=False,
html_message=render_to_string('illumidesk/email/invitation.html', context=email_context),
)
def process_invitation(invitation, user):
invitation.team.members.add(user, through_defaults={'role': invitation.role})
invitation.is_accepted = True
invitation.accepted_by = user
invitation.save()
```
#### File: teams/templatetags/team_tags.py
```python
from django import template
from illumidesk.teams.roles import user_can_access_team
from illumidesk.teams.roles import user_can_administer_team
register = template.Library()
@register.filter
def is_member_of(user, team):
return user_can_access_team(user, team)
@register.filter
def is_admin_of(user, team):
return user_can_administer_team(user, team)
```
#### File: illumidesk/users/forms.py
```python
from django import forms
from django.contrib.auth.forms import get_user_model
from django.contrib.auth.forms import UserChangeForm
from django.contrib.auth.forms import UserCreationForm
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import IllumiDeskUser
User = get_user_model()
class UserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class UserCreationForm(UserCreationForm):
error_message = UserCreationForm.error_messages.update(
{'duplicate_username': _('This username has already been taken.')}
)
class Meta(UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data['username']
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise ValidationError(self.error_messages['duplicate_username'])
class IllumiDeskUserChangeForm(UserChangeForm):
email = forms.EmailField(required=True)
class Meta:
model = IllumiDeskUser
fields = ('email', 'first_name', 'last_name')
class UploadAvatarForm(forms.Form):
avatar = forms.FileField()
``` |
{
"source": "jgwerner/illumidesk.next",
"score": 2
} |
#### File: illumidesk/lti13/handlers.py
```python
import os
import json
from pathlib import Path
import pem
from Crypto.PublicKey import RSA
from jupyterhub.handlers import BaseHandler
from illumidesk.authenticators.utils import LTIUtils
from illumidesk.lti13.auth import get_jwk
from tornado import web
from urllib.parse import urlencode
from urllib.parse import quote
class LTI13ConfigHandler(BaseHandler):
"""
Handles JSON configuration file for LTI 1.3
"""
async def get(self) -> None:
"""
Gets the JSON config which is used by LTI platforms
to install the external tool.
- The extensions key contains settings for specific vendors, such as canvas,
moodle, edx, among others.
- The tool uses public settings by default. Users that wish to install the tool with
private settings should either copy/paste the json or toggle the application to private
after it is installed with the platform.
- Usernames are obtained by first attempting to get and normalize values sent when
tools are installed with public settings. If private, the username is set using the
anonumized user data when requests are sent with private installation settings.
"""
lti_utils = LTIUtils()
self.set_header('Content-Type', 'application/json')
# get the origin protocol
protocol = lti_utils.get_client_protocol(self)
self.log.debug('Origin protocol is: %s' % protocol)
# build the full target link url value required for the jwks endpoint
target_link_url = f'{protocol}://{self.request.host}/'
self.log.debug('Target link url is: %s' % target_link_url)
keys = {
'title': 'IllumiDesk',
'scopes': [
'https://purl.imsglobal.org/spec/lti-ags/scope/lineitem',
'https://purl.imsglobal.org/spec/lti-ags/scope/lineitem.readonly',
'https://purl.imsglobal.org/spec/lti-ags/scope/result.readonly',
'https://purl.imsglobal.org/spec/lti-ags/scope/score',
'https://purl.imsglobal.org/spec/lti-nrps/scope/contextmembership.readonly',
'https://canvas.instructure.com/lti/public_jwk/scope/update',
'https://canvas.instructure.com/lti/data_services/scope/create',
'https://canvas.instructure.com/lti/data_services/scope/show',
'https://canvas.instructure.com/lti/data_services/scope/update',
'https://canvas.instructure.com/lti/data_services/scope/list',
'https://canvas.instructure.com/lti/data_services/scope/destroy',
'https://canvas.instructure.com/lti/data_services/scope/list_event_types',
'https://canvas.instructure.com/lti/feature_flags/scope/show',
'https://canvas.instructure.com/lti/account_lookup/scope/show',
],
'extensions': [
{
'platform': 'canvas.instructure.com',
'settings': {
'platform': 'canvas.instructure.com',
'placements': [
{
'placement': 'course_navigation',
'message_type': 'LtiResourceLinkRequest',
'windowTarget': '_blank',
'target_link_uri': target_link_url,
'custom_fields': {
'email': '$Person.email.primary',
'lms_user_id': '$User.id',
}, # noqa: E231
},
{
'placement': 'assignment_selection',
'message_type': 'LtiResourceLinkRequest',
'target_link_uri': target_link_url,
},
],
},
'privacy_level': 'public',
}
],
'description': 'IllumiDesk Learning Tools Interoperability (LTI) v1.3 tool.',
'custom_fields': {
'email': '$Person.email.primary',
'lms_user_id': '$User.id',
}, # noqa: E231
'public_jwk_url': f'{target_link_url}hub/lti13/jwks',
'target_link_uri': target_link_url,
'oidc_initiation_url': f'{target_link_url}hub/oauth_login',
}
self.write(json.dumps(keys))
class LTI13JWKSHandler(BaseHandler):
"""
Handler to serve our JWKS
"""
def get(self) -> None:
"""
- This method requires that the LTI13_PRIVATE_KEY environment variable
is set with the full path to the RSA private key in PEM format.
"""
if not os.environ.get('LTI13_PRIVATE_KEY'):
raise EnvironmentError('LTI13_PRIVATE_KEY environment variable not set')
key_path = os.environ.get('LTI13_PRIVATE_KEY')
# check the pem permission
if not os.access(key_path, os.R_OK):
self.log.error(f'The pem file {key_path} cannot be load')
raise PermissionError()
private_key = pem.parse_file(key_path)
public_key = RSA.import_key(private_key[0].as_text()).publickey().exportKey()
self.log.debug('public_key is %s' % public_key)
jwk = get_jwk(public_key)
self.log.debug('the jwks is %s' % jwk)
keys_obj = {'keys': []}
keys_obj['keys'].append(jwk)
# we do not need to use json.dumps because tornado is converting our dict automatically and adding the content-type as json
# https://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write
self.write(keys_obj)
class FileSelectHandler(BaseHandler):
@web.authenticated
async def get(self):
"""Return a sorted list of notebooks recursively found in shared path"""
user = self.current_user
auth_state = await user.get_auth_state()
self.log.debug('Current user for file select handler is %s' % user.name)
# decoded = self.authenticator.decoded
self.course_id = auth_state['course_id']
self.grader_name = f'grader-{self.course_id}'
self.grader_root = Path(
'/home',
self.grader_name,
)
self.course_root = self.grader_root / self.course_id
self.course_shared_folder = Path('/shared', self.course_id)
a = ''
link_item_files = []
notebooks = list(self.course_shared_folder.glob('**/*.ipynb'))
notebooks.sort()
for f in notebooks:
fpath = str(f.relative_to(self.course_shared_folder))
self.log.debug('Getting files fpath %s' % fpath)
if fpath.startswith('.') or f.name.startswith('.'):
self.log.debug('Ignoring file %s' % fpath)
continue
# generate the assignment link that uses gitpuller
user_redirect_path = quote('/user-redirect/git-pull', safe='')
assignment_link_path = f'?next={user_redirect_path}'
urlpath_workspace = f'tree/{self.course_id}/{fpath}'
self.log.debug(f'urlpath_workspace:{urlpath_workspace}')
query_params_for_git = [
('repo', f'/home/jovyan/shared/{self.course_id}'),
('branch', 'master'),
('urlpath', urlpath_workspace),
]
encoded_query_params_without_safe_chars = quote(urlencode(query_params_for_git), safe='')
url = f'https://{self.request.host}/{assignment_link_path}?{encoded_query_params_without_safe_chars}'
self.log.debug('URL to fetch files is %s' % url)
link_item_files.append(
{
'path': fpath,
'content_items': json.dumps(
{
"@context": "http://purl.imsglobal.org/ctx/lti/v1/ContentItem",
"@graph": [
{
"@type": "LtiLinkItem",
"@id": url,
"url": url,
"title": f.name,
"text": f.name,
"mediaType": "application/vnd.ims.lti.v1.ltilink",
"placementAdvice": {"presentationDocumentTarget": "frame"},
}
],
}
),
}
)
self.log.debug('Rendering file-select.html template')
html = self.render_template(
'file_select.html',
files=link_item_files,
action_url=auth_state['launch_return_url'],
)
self.finish(html)
```
#### File: illumidesk/apis/test_nbgrader_service_helper.py
```python
import os
from pathlib import Path
import shutil
import pytest
from unittest.mock import patch
from illumidesk.apis.nbgrader_service import NbGraderServiceHelper
from illumidesk.apis.nbgrader_service import nbgrader_format_db_url
class TestNbGraderServiceBaseHelper:
def setup_method(self, method):
"""
Setup method to initialize objects/properties used for the tests
"""
self.course_id = 'PS- ONE'
self.sut = NbGraderServiceHelper(self.course_id)
def test_course_id_required_otherwise_raises_an_error(self):
"""
Does the initializer accept empty or none value for course_id?
"""
with pytest.raises(ValueError):
NbGraderServiceHelper('')
def test_course_id_is_normalized_in_the_constructor(self):
"""
Does the course-id value is normalized?
"""
assert self.sut.course_id == 'ps-one'
@patch('shutil.chown')
@patch('os.makedirs')
@patch('illumidesk.apis.nbgrader_service.Gradebook')
def test_create_assignment_in_nbgrader_uses_the_assignment_name_normalized(
self, mock_gradebook, mock_makedirs, mock_chown
):
"""
Does the assignment is created with normalized value?
"""
self.sut.create_assignment_in_nbgrader('LAB 1')
assert mock_gradebook.return_value.__enter__.return_value.update_or_create_assignment.called
assert mock_gradebook.return_value.__enter__.return_value.update_or_create_assignment.call_args[0][0] == 'lab1'
@patch('os.makedirs')
@patch('pathlib.Path.mkdir')
@patch('illumidesk.apis.nbgrader_service.Gradebook')
def test_create_assignment_in_nbgrader_method_fixes_source_directory_permissions(
self, mock_gradebook, mock_path_mkdir, mock_makedirs
):
"""
Does the assignment source directory is created and it is fixed with the correct file permissions?
"""
with patch.object(shutil, 'chown') as mock_chown:
self.sut.create_assignment_in_nbgrader('lab-abc')
source_dir = os.path.abspath(Path(self.sut.course_dir, 'source'))
mock_chown.assert_any_call(source_dir, user=10001, group=100)
@patch('os.makedirs')
@patch('pathlib.Path.mkdir')
@patch('illumidesk.apis.nbgrader_service.Gradebook')
def test_create_assignment_in_nbgrader_method_fixes_assignment_directory_permissions(
self, mock_gradebook, mock_path_mkdir, mock_makedirs
):
"""
Does the assignment directory is fixed with the correct file permissions?
"""
with patch.object(shutil, 'chown') as mock_chown:
self.sut.create_assignment_in_nbgrader('lab-abc')
assignment_dir = os.path.abspath(Path(self.sut.course_dir, 'source', 'lab-abc'))
mock_chown.assert_any_call(assignment_dir, user=10001, group=100)
@patch('shutil.chown')
@patch('pathlib.Path.mkdir')
@patch('illumidesk.apis.nbgrader_service.Gradebook')
def test_add_user_to_nbgrader_gradebook_raises_error_when_empty(self, mock_gradebook, mock_path_mkdir, mock_chown):
"""
Does add_user_to_nbgrader_gradebook method accept an empty username, or lms user id?
"""
with pytest.raises(ValueError):
self.sut.add_user_to_nbgrader_gradebook(username='', lms_user_id='abc123')
with pytest.raises(ValueError):
self.sut.add_user_to_nbgrader_gradebook(username='user1', lms_user_id='')
class TestNbGraderServiceHelper:
def test_nbgrader_format_db_url_method_uses_env_vars_to_get_db_url(self, monkeypatch):
monkeypatch.setattr('illumidesk.apis.nbgrader_service.nbgrader_db_host', 'test_host')
monkeypatch.setattr('illumidesk.apis.nbgrader_service.nbgrader_db_password', '<PASSWORD>')
monkeypatch.setattr('illumidesk.apis.nbgrader_service.nbgrader_db_user', 'test_user')
monkeypatch.setattr('illumidesk.apis.nbgrader_service.org_name', 'org-dummy')
assert nbgrader_format_db_url('Course 1') == 'postgresql://test_user:test_pwd@test_host:5432/org-dummy_course1'
```
#### File: illumidesk/authenticators/test_setup_course_hook.py
```python
import json
import os
from jupyterhub.auth import Authenticator
import pytest
from tornado.web import RequestHandler
from tornado.httpclient import AsyncHTTPClient
from unittest.mock import AsyncMock
from unittest.mock import patch
from illumidesk.apis.jupyterhub_api import JupyterHubAPI
from illumidesk.apis.announcement_service import AnnouncementService
from illumidesk.apis.nbgrader_service import NbGraderServiceHelper
from illumidesk.authenticators.authenticator import LTI11Authenticator
from illumidesk.authenticators.authenticator import LTI13Authenticator
from illumidesk.authenticators.authenticator import setup_course_hook
from illumidesk.authenticators.utils import LTIUtils
@pytest.mark.asyncio
async def test_setup_course_hook_is_assigned_to_lti11_authenticator_post_auth_hook():
"""
Does the setup course hook get assigned to the post_auth_hook for the LTI11Authenticator?
"""
authenticator = LTI11Authenticator(post_auth_hook=setup_course_hook)
assert authenticator.post_auth_hook == setup_course_hook
@pytest.mark.asyncio
async def test_setup_course_hook_is_assigned_to_lti13_authenticator_post_auth_hook():
"""
Does the setup course hook get assigned to the post_auth_hook for the LTI13Authenticator?
"""
authenticator = LTI13Authenticator(post_auth_hook=setup_course_hook)
assert authenticator.post_auth_hook == setup_course_hook
@pytest.mark.asyncio()
async def test_setup_course_hook_raises_environment_error_with_missing_org(
monkeypatch, make_auth_state_dict, setup_course_hook_environ, make_mock_request_handler
):
"""
Is an environment error raised when the organization name is missing when calling
the setup_course_hook function?
"""
monkeypatch.setenv('ORGANIZATION_NAME', '')
local_authenticator = Authenticator(post_auth_hook=setup_course_hook)
local_handler = make_mock_request_handler(RequestHandler, authenticator=local_authenticator)
local_authentication = make_auth_state_dict()
with pytest.raises(EnvironmentError):
await local_authenticator.post_auth_hook(local_authenticator, local_handler, local_authentication)
@pytest.mark.asyncio()
async def test_setup_course_hook_calls_normalize_strings(
auth_state_dict,
setup_course_environ,
setup_course_hook_environ,
make_mock_request_handler,
make_http_response,
mock_nbhelper,
):
"""
Does the setup_course_hook return normalized strings for the username and the course_id?
"""
local_authenticator = Authenticator(post_auth_hook=setup_course_hook)
local_handler = make_mock_request_handler(RequestHandler, authenticator=local_authenticator)
local_authentication = auth_state_dict
with patch.object(LTIUtils, 'normalize_string', return_value='intro101') as mock_normalize_string:
with patch.object(JupyterHubAPI, 'add_student_to_jupyterhub_group', return_value=None):
with patch.object(
AsyncHTTPClient, 'fetch', return_value=make_http_response(handler=local_handler.request)
):
_ = await setup_course_hook(local_authenticator, local_handler, local_authentication)
assert mock_normalize_string.called
@pytest.mark.asyncio()
async def test_setup_course_hook_raises_json_decode_error_without_client_fetch_response(
monkeypatch,
setup_course_environ,
setup_course_hook_environ,
make_auth_state_dict,
make_mock_request_handler,
make_http_response,
mock_nbhelper,
):
"""
Does the setup course hook raise a json decode error if the response form the setup course
microservice is null or empty?
"""
local_authenticator = Authenticator(post_auth_hook=setup_course_hook)
local_handler = make_mock_request_handler(RequestHandler, authenticator=local_authenticator)
local_authentication = make_auth_state_dict()
with patch.object(JupyterHubAPI, 'add_student_to_jupyterhub_group', return_value=None):
with patch.object(
AsyncHTTPClient, 'fetch', return_value=make_http_response(handler=local_handler.request, body=None)
):
with pytest.raises(json.JSONDecodeError):
await setup_course_hook(local_authenticator, local_handler, local_authentication)
@pytest.mark.asyncio()
async def test_setup_course_hook_calls_add_student_to_jupyterhub_group_when_role_is_learner(
setup_course_environ,
setup_course_hook_environ,
make_auth_state_dict,
make_http_response,
make_mock_request_handler,
mock_nbhelper,
):
"""
Is the jupyterhub_api add student to jupyterhub group function called when the user role is
the learner role?
"""
local_authenticator = Authenticator(post_auth_hook=setup_course_hook)
local_handler = make_mock_request_handler(RequestHandler, authenticator=local_authenticator)
local_authentication = make_auth_state_dict()
with patch.object(
JupyterHubAPI, 'add_student_to_jupyterhub_group', return_value=None
) as mock_add_student_to_jupyterhub_group:
with patch.object(AsyncHTTPClient, 'fetch', return_value=make_http_response(handler=local_handler.request)):
result = await setup_course_hook(local_authenticator, local_handler, local_authentication)
assert mock_add_student_to_jupyterhub_group.called
@patch('shutil.chown')
@patch('pathlib.Path.mkdir')
@patch('illumidesk.apis.nbgrader_service.Gradebook')
@pytest.mark.asyncio()
async def test_setup_course_hook_calls_add_user_to_nbgrader_gradebook_when_role_is_learner(
mock_mkdir,
mock_chown,
mock_gradebook,
monkeypatch,
setup_course_environ,
setup_course_hook_environ,
make_auth_state_dict,
make_mock_request_handler,
make_http_response,
):
"""
Is the jupyterhub_api add user to nbgrader gradebook function called when the user role is
the learner role?
"""
local_authenticator = Authenticator(post_auth_hook=setup_course_hook)
local_handler = make_mock_request_handler(RequestHandler, authenticator=local_authenticator)
local_authentication = make_auth_state_dict()
with patch.object(JupyterHubAPI, 'add_student_to_jupyterhub_group', return_value=None):
with patch.object(
NbGraderServiceHelper, 'add_user_to_nbgrader_gradebook', return_value=None
) as mock_add_user_to_nbgrader_gradebook:
with patch.object(
AsyncHTTPClient, 'fetch', return_value=make_http_response(handler=local_handler.request)
):
await setup_course_hook(local_authenticator, local_handler, local_authentication)
assert mock_add_user_to_nbgrader_gradebook.called
@pytest.mark.asyncio()
async def test_setup_course_hook_calls_add_instructor_to_jupyterhub_group_when_role_is_instructor(
monkeypatch,
setup_course_environ,
setup_course_hook_environ,
make_auth_state_dict,
make_mock_request_handler,
make_http_response,
mock_nbhelper,
):
"""
Is the jupyterhub_api add instructor to jupyterhub group function called when the user role is
the instructor role?
"""
local_authenticator = Authenticator(post_auth_hook=setup_course_hook)
local_handler = make_mock_request_handler(RequestHandler, authenticator=local_authenticator)
local_authentication = make_auth_state_dict(user_role='Instructor')
with patch.object(
JupyterHubAPI, 'add_instructor_to_jupyterhub_group', return_value=None
) as mock_add_instructor_to_jupyterhub_group:
with patch.object(AsyncHTTPClient, 'fetch', return_value=make_http_response(handler=local_handler.request)):
await setup_course_hook(local_authenticator, local_handler, local_authentication)
assert mock_add_instructor_to_jupyterhub_group.called
@pytest.mark.asyncio()
async def test_setup_course_hook_calls_add_instructor_to_jupyterhub_group_when_role_is_TeachingAssistant(
monkeypatch,
setup_course_environ,
setup_course_hook_environ,
make_auth_state_dict,
make_mock_request_handler,
make_http_response,
mock_nbhelper,
):
"""
Is the jupyterhub_api add instructor to jupyterhub group function called when the user role is
the instructor role?
"""
local_authenticator = Authenticator(post_auth_hook=setup_course_hook)
local_handler = make_mock_request_handler(RequestHandler, authenticator=local_authenticator)
local_authentication = make_auth_state_dict(user_role='urn:lti:role:ims/lis/TeachingAssistant')
with patch.object(
JupyterHubAPI, 'add_instructor_to_jupyterhub_group', return_value=None
) as mock_add_instructor_to_jupyterhub_group:
with patch.object(AsyncHTTPClient, 'fetch', return_value=make_http_response(handler=local_handler.request)):
await setup_course_hook(local_authenticator, local_handler, local_authentication)
assert mock_add_instructor_to_jupyterhub_group.called
@pytest.mark.asyncio()
async def test_setup_course_hook_does_not_call_add_student_to_jupyterhub_group_when_role_is_instructor(
setup_course_environ,
setup_course_hook_environ,
make_auth_state_dict,
make_http_response,
make_mock_request_handler,
mock_nbhelper,
):
"""
Is the jupyterhub_api add student to jupyterhub group function called when the user role is
the instructor role?
"""
local_authenticator = Authenticator(post_auth_hook=setup_course_hook)
local_handler = make_mock_request_handler(RequestHandler, authenticator=local_authenticator)
local_authentication = make_auth_state_dict(user_role='Instructor')
with patch.object(
JupyterHubAPI, 'add_student_to_jupyterhub_group', return_value=None
) as mock_add_student_to_jupyterhub_group:
with patch.object(
JupyterHubAPI, 'add_instructor_to_jupyterhub_group', return_value=None
) as mock_add_instructor_to_jupyterhub_group:
with patch.object(
AsyncHTTPClient, 'fetch', return_value=make_http_response(handler=local_handler.request)
):
await setup_course_hook(local_authenticator, local_handler, local_authentication)
assert not mock_add_student_to_jupyterhub_group.called
assert mock_add_instructor_to_jupyterhub_group.called
@pytest.mark.asyncio()
async def test_setup_course_hook_does_not_call_add_instructor_to_jupyterhub_group_when_role_is_learner(
setup_course_environ,
setup_course_hook_environ,
make_auth_state_dict,
make_http_response,
make_mock_request_handler,
mock_nbhelper,
):
"""
Is the jupyterhub_api add instructor to jupyterhub group function not called when the user role is
the learner role?
"""
local_authenticator = Authenticator(post_auth_hook=setup_course_hook)
local_handler = make_mock_request_handler(RequestHandler, authenticator=local_authenticator)
local_authentication = make_auth_state_dict()
with patch.object(JupyterHubAPI, 'add_student_to_jupyterhub_group', return_value=None):
with patch.object(
JupyterHubAPI, 'add_instructor_to_jupyterhub_group', return_value=None
) as mock_add_instructor_to_jupyterhub_group:
with patch.object(
AsyncHTTPClient,
'fetch',
return_value=make_http_response(handler=local_handler.request),
):
await setup_course_hook(local_authenticator, local_handler, local_authentication)
assert not mock_add_instructor_to_jupyterhub_group.called
@pytest.mark.asyncio()
async def test_setup_course_hook_initialize_data_dict(
setup_course_environ,
setup_course_hook_environ,
make_auth_state_dict,
make_http_response,
make_mock_request_handler,
mock_nbhelper,
):
"""
Is the data dictionary correctly initialized when properly setting the org env-var and and consistent with the
course id value in the auth state?
"""
local_authenticator = Authenticator(post_auth_hook=setup_course_hook)
local_handler = make_mock_request_handler(RequestHandler, authenticator=local_authenticator)
local_authentication = make_auth_state_dict()
expected_data = {
'org': 'test-org',
'course_id': 'intro101',
'domain': '127.0.0.1',
}
with patch.object(JupyterHubAPI, 'add_student_to_jupyterhub_group', return_value=None):
with patch.object(AsyncHTTPClient, 'fetch', return_value=make_http_response(handler=local_handler.request)):
result = await setup_course_hook(local_authenticator, local_handler, local_authentication)
assert expected_data['course_id'] == result['auth_state']['course_id']
assert expected_data['org'] == os.environ.get('ORGANIZATION_NAME')
assert expected_data['domain'] == local_handler.request.host
@pytest.mark.asyncio()
async def test_setup_course_hook_calls_announcement_service_when_is_new_setup(
setup_course_hook_environ,
make_auth_state_dict,
make_http_response,
make_mock_request_handler,
mock_nbhelper,
):
"""
Is the annuncement service called in new setup?
"""
local_authenticator = Authenticator(post_auth_hook=setup_course_hook)
local_handler = make_mock_request_handler(RequestHandler, authenticator=local_authenticator)
local_authentication = make_auth_state_dict()
response_args = {'handler': local_handler.request, 'body': {'is_new_setup': True}}
with patch.object(JupyterHubAPI, 'add_student_to_jupyterhub_group', return_value=None):
with patch.object(
AsyncHTTPClient,
'fetch',
side_effect=[
make_http_response(**response_args),
None,
], # noqa: E231
):
AnnouncementService.add_announcement = AsyncMock(return_value=None)
await setup_course_hook(local_authenticator, local_handler, local_authentication)
assert AnnouncementService.add_announcement.called
@pytest.mark.asyncio()
async def test_is_new_course_initiates_rolling_update(
setup_course_environ,
setup_course_hook_environ,
make_auth_state_dict,
make_http_response,
make_mock_request_handler,
mock_nbhelper,
):
"""
If the course is a new setup does it initiate a rolling update?
"""
local_authenticator = Authenticator(post_auth_hook=setup_course_hook)
local_handler = make_mock_request_handler(RequestHandler, authenticator=local_authenticator)
local_authentication = make_auth_state_dict()
response_args = {'handler': local_handler.request, 'body': {'is_new_setup': True}}
with patch.object(JupyterHubAPI, 'add_student_to_jupyterhub_group', return_value=None):
with patch.object(
AsyncHTTPClient,
'fetch',
side_effect=[
make_http_response(**response_args),
None,
], # noqa: E231
) as mock_client:
AnnouncementService.add_announcement = AsyncMock(return_value=None)
await setup_course_hook(local_authenticator, local_handler, local_authentication)
assert mock_client.called
mock_client.assert_any_call(
'http://setup-course:8000/rolling-update',
headers={'Content-Type': 'application/json'},
body='',
method='POST',
)
mock_client.assert_any_call(
'http://setup-course:8000',
headers={'Content-Type': 'application/json'},
body='{"org": "test-org", "course_id": "intro101", "domain": "127.0.0.1"}',
method='POST',
)
```
#### File: tests/illumidesk/conftest.py
```python
from io import StringIO
import json
import jwt
from nbgrader.api import Course
import pytest
import os
import secrets
import time
import uuid
from Crypto.PublicKey import RSA
from illumidesk.grades.sender_controlfile import LTIGradesSenderControlFile
from illumidesk.authenticators.utils import LTIUtils
from oauthlib.oauth1.rfc5849 import signature
from tornado.web import Application
from tornado.web import RequestHandler
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPResponse
from tornado.httputil import HTTPHeaders
from tornado.httputil import HTTPServerRequest
from typing import Dict
from typing import List
from unittest.mock import patch
from unittest.mock import Mock
@pytest.fixture(scope='module')
def auth_state_dict():
authenticator_auth_state = {
'name': 'student1',
'auth_state': {
'course_id': 'intro101',
'course_lineitems': 'my.platform.com/api/lti/courses/1/line_items',
'lms_user_id': '185d6c59731a553009ca9b59ca3a885100000',
'user_role': 'Learner',
},
}
return authenticator_auth_state
@pytest.fixture(scope='module')
def app():
class TestHandler(RequestHandler):
def get(self):
self.write("test")
def post(self):
self.write("test")
application = Application(
[
(r'/', TestHandler),
]
) # noqa: E231
return application
@pytest.fixture(scope='function')
def mock_nbhelper():
with patch('shutil.chown'):
with patch('pathlib.Path.mkdir'):
with patch('illumidesk.apis.nbgrader_service.Gradebook'):
with patch.multiple(
'illumidesk.apis.nbgrader_service.NbGraderServiceHelper',
# __init__=lambda x, y: None,
update_course=Mock(return_value=None),
create_database_if_not_exists=Mock(),
add_user_to_nbgrader_gradebook=Mock(return_value=None),
create_assignment_in_nbgrader=Mock(return_value=None),
get_course=Mock(
return_value=Course(
id='123', lms_lineitems_endpoint='canvas.docker.com/api/lti/courses/1/line_items'
)
),
) as mock_nb:
yield mock_nb
@pytest.fixture(scope='function')
def jupyterhub_api_environ(monkeypatch):
"""
Set the enviroment variables used in Course class
"""
monkeypatch.setenv('JUPYTERHUB_API_TOKEN', str(uuid.uuid4()))
monkeypatch.setenv('JUPYTERHUB_API_URL', 'https://localhost/hub/api')
monkeypatch.setenv('JUPYTERHUB_ADMIN_USER', 'admin')
@pytest.fixture(scope='function')
def lti11_config_environ(monkeypatch, pem_file):
"""
Set the enviroment variables used in Course class
"""
monkeypatch.setenv('LTI_CONSUMER_KEY', 'ild_test_consumer_key')
monkeypatch.setenv('LTI_SHARED_SECRET', 'ild_test_shared_secret')
@pytest.fixture(scope='function')
def lti13_config_environ(monkeypatch, pem_file):
"""
Set the enviroment variables used in Course class
"""
monkeypatch.setenv('LTI13_PRIVATE_KEY', pem_file)
monkeypatch.setenv('LTI13_TOKEN_URL', 'https://my.platform.domain/login/oauth2/token')
monkeypatch.setenv('LTI13_ENDPOINT', 'https://my.platform.domain/api/lti/security/jwks')
monkeypatch.setenv('LTI13_CLIENT_ID', 'https://my.platform.domain/login/oauth2/token')
monkeypatch.setenv('LTI13_AUTHORIZE_URL', 'https://my.platform.domain/api/lti/authorize_redirect')
@pytest.fixture(scope='function')
def lti11_complete_launch_args():
"""
Valid response when retrieving jwks from the platform.
"""
args = {
'oauth_callback': ['about:blank'.encode()],
'oauth_consumer_key': ['my_consumer_key'.encode()],
'oauth_signature_method': ['HMAC-SHA1'.encode()],
'oauth_timestamp': ['1585947271'.encode()],
'oauth_nonce': ['01fy8HKIASKuD9gK9vWUcBj9fql1nOCWfOLPzeylsmg'.encode()],
'oauth_signature': ['abc123'.encode()],
'oauth_version': ['1.0'.encode()],
'context_id': ['888efe72d4bbbdf90619353bb8ab5965ccbe9b3f'.encode()],
'context_label': ['intro101'.encode()],
'context_title': ['intro101'.encode()],
'custom_canvas_assignment_title': ['test-assignment'.encode()],
'custom_canvas_user_login_id': ['student1'.encode()],
'custom_worskpace_type': ['foo'.encode()],
'ext_roles': ['urn:lti:instrole:ims/lis/Learner'.encode()],
'launch_presentation_document_target': ['iframe'.encode()],
'launch_presentation_height': ['1000'.encode()],
'launch_presentation_locale': ['en'.encode()],
'launch_presentation_return_url': [
'https: //illumidesk.instructure.com/courses/161/external_content/success/external_tool_redirect'.encode()
],
'launch_presentation_width': ['1000'.encode()],
'lis_outcome_service_url': [
'http://www.imsglobal.org/developers/LTI/test/v1p1/common/tool_consumer_outcome.php?b64=MTIzNDU6OjpzZWNyZXQ='.encode()
],
'lis_person_contact_email_primary': ['<EMAIL>'.<EMAIL>()],
'lis_person_name_family': ['Bar'.encode()],
'lis_person_name_full': ['<NAME>'.encode()],
'lis_person_name_given': ['Foo'.encode()],
'lti_message_type': ['basic-lti-launch-request'.encode()],
'lis_result_sourcedid': ['feb-123-456-2929::28883'.encode()],
'lti_version': ['LTI-1p0'.encode()],
'resource_link_id': ['888efe72d4bbbdf90619353bb8ab5965ccbe9b3f'.encode()],
'resource_link_title': ['IllumiDesk'.encode()],
'roles': ['Learner'.encode()],
'tool_consumer_info_product_family_code': ['canvas'.encode()],
'tool_consumer_info_version': ['cloud'.encode()],
'tool_consumer_instance_contact_email': ['<EMAIL>'.encode()],
'tool_consumer_instance_guid': ['srnuz6h1U8kOMmETzoqZTJiPWzbPXIYkAUnnAJ4u:test-lms'.encode()],
'tool_consumer_instance_name': ['myorg'.encode()],
'user_id': ['185d6c59731a553009ca9b59ca3a885100000'.encode()],
'user_image': ['https://lms.example.com/avatar-50.png'.encode()],
}
return args
@pytest.fixture(scope='function')
def lti13_login_params():
"""
Creates a dictionary with k/v's that emulates an initial login request.
"""
client_id = '125900000000000085'
iss = 'https://platform.vendor.com'
login_hint = '<PASSWORD>'
target_link_uri = 'https://edu.example.com/hub'
lti_message_hint = '<KEY>'
params = {
'client_id': [client_id.encode()],
'iss': [iss.encode()],
'login_hint': [login_hint.encode()],
'target_link_uri': [target_link_uri.encode()],
'lti_message_hint': [lti_message_hint.encode()],
}
return params
@pytest.fixture(scope='function')
def lti13_auth_params():
"""
Creates a dictionary with k/v's that emulates a login request.
"""
client_id = '125900000000000081'
redirect_uri = 'https://acme.illumidesk.com/hub/oauth_callback'
lti_message_hint = '<KEY>'
login_hint = '<PASSWORD>'
state = '<KEY>
nonce = '38048502278109788461591832959'
params = {
'response_type': ['id_token'.encode()],
'scope': ['openid'.encode()],
'client_id': [client_id.encode()],
'redirect_uri': [redirect_uri.encode()],
'response_mode': ['form_post'.encode()],
'lti_message_hint': [lti_message_hint.encode()],
'prompt': ['none'.encode()],
'login_hint': [login_hint.encode()],
'state': [state.encode()],
'nonce': [nonce.encode()],
}
return params
@pytest.fixture(scope='function')
def lti13_auth_params_dict(lti13_auth_params):
"""
Return the initial LTI 1.3 authorization request as a dict
"""
utils = LTIUtils()
args = utils.convert_request_to_dict(lti13_auth_params)
return args
@pytest.fixture(scope='function')
def lti13_login_params_dict(lti13_login_params):
"""
Return the initial LTI 1.3 authorization request as a dict
"""
utils = LTIUtils()
args = utils.convert_request_to_dict(lti13_login_params)
return args
@pytest.fixture
def mock_jhub_user(request):
"""
Creates a Authenticated User mock by returning a wrapper function to help us to customize its creation
Usage:
user_mocked = mock_jhub_user(environ={'USER_ROLE': 'Instructor'})
or
user_mocked = mock_jhub_user()
or
user_mocked = mock_jhub_user(environ={'USER_ROLE': 'Instructor'}, auth_state=[])
"""
def _get_with_params(environ: dict = None, auth_state: list = []) -> Mock:
"""
wrapper function that accept environment and auth_state
Args:
auth_state: Helps with the `the get_auth_state` method
"""
mock_user = Mock()
mock_spawner = Mock()
# define the mock attrs
spawner_attrs = {'environment': environ or {}}
mock_spawner.configure_mock(**spawner_attrs)
attrs = {
'name': 'user1',
'spawner': mock_spawner,
"get_auth_state.side_effect": auth_state or [],
}
mock_user.configure_mock(**attrs)
return mock_user
return _get_with_params
@pytest.fixture
def pem_file(tmp_path):
"""
Create a test private key file used with LTI 1.3 request/reponse flows
"""
key = RSA.generate(2048)
key_path = f'{tmp_path}/private.key'
with open(key_path, 'wb') as content_file:
content_file.write(key.exportKey('PEM'))
return key_path
@pytest.fixture
def grades_controlfile_reset_file_loaded():
"""
Set flag to false to reload control file used in LTIGradesSenterControlFile class
"""
LTIGradesSenderControlFile.FILE_LOADED = False
@pytest.fixture(scope='function')
def setup_jupyterhub_db(monkeypatch):
"""
Set the enviroment variables used to identify the end user image.
"""
monkeypatch.setenv('POSTGRES_JUPYTERHUB_DB', 'jupyterhub')
monkeypatch.setenv('POSTGRES_JUPYTERHUB_HOST', 'jupyterhub-db')
monkeypatch.setenv('POSTGRES_JUPYTERHUB_PORT', '5432')
monkeypatch.setenv('POSTGRES_JUPYTERHUB_USER', 'foobar')
monkeypatch.setenv('POSTGRES_JUPYTERHUB_PASSWORD', '<PASSWORD>')
@pytest.fixture(scope='function')
def setup_jupyterhub_config_base(monkeypatch):
"""
Set the enviroment variables used to identify the end user image.
"""
monkeypatch.setenv('JUPYTERHUB_BASE_URL', '/')
monkeypatch.setenv('JUPYTERHUB_SHUTDOWN_ON_LOGOUT', 'True')
monkeypatch.setenv('JUPYTERHUB_ADMIN_USER', 'admin0')
monkeypatch.setenv('SPAWNER_CPU_LIMIT', '0.5')
monkeypatch.setenv('SPAWNER_MEM_LIMIT', '2G')
monkeypatch.setenv('DOCKER_SPAWN_CMD', 'single_user_test.sh')
monkeypatch.setenv('DOCKER_NETWORK_NAME', 'test-network')
monkeypatch.setenv('EXCHANGE_DIR', '/path/to/exchange')
monkeypatch.setenv('DOCKER_NOTEBOOK_DIR', '/home/saturn')
monkeypatch.setenv('NB_NON_GRADER_UID', '1000')
@pytest.fixture(scope='function')
def setup_image_environ(monkeypatch):
"""
Set the enviroment variables used to identify the end user image.
"""
monkeypatch.setenv('DOCKER_END_USER_IMAGE', 'standard_image')
monkeypatch.setenv('SPAWNER_CPU_LIMIT', '2G')
monkeypatch.setenv('SPAWNER_MEM_LIMIT', '1.0')
@pytest.fixture(scope='function')
def setup_course_environ(monkeypatch, tmp_path, jupyterhub_api_environ):
"""
Set the environment variables used in Course class`
"""
monkeypatch.setenv('MNT_ROOT', str(tmp_path))
monkeypatch.setenv('NB_GRADER_UID', '10001')
monkeypatch.setenv('NB_GID', '100')
monkeypatch.setenv('DOCKER_GRADER_IMAGE', 'grader-image')
monkeypatch.setenv('USER_ROLE', 'Grader')
@pytest.fixture(scope='function')
def setup_course_hook_environ(monkeypatch, jupyterhub_api_environ):
"""
Set the environment variables used in the setup_course_hook function
"""
monkeypatch.setenv('ANNOUNCEMENT_SERVICE_PORT', '8889')
monkeypatch.setenv('DOCKER_SETUP_COURSE_SERVICE_NAME', 'setup-course')
monkeypatch.setenv('DOCKER_SETUP_COURSE_PORT', '8000')
monkeypatch.setenv('ORGANIZATION_NAME', 'test-org')
@pytest.fixture(scope='function')
def pre_spawn_hook_environ(monkeypatch, jupyterhub_api_environ):
"""
Set the environment variables used in the setup_course_hook function
"""
monkeypatch.setenv('NB_GID', '100')
monkeypatch.setenv('NB_NON_GRADER_UID', '1000')
@pytest.fixture(scope='function')
def setup_utils_environ(monkeypatch, tmp_path):
"""
Set the enviroment variables used in SetupUtils class
"""
monkeypatch.setenv('JUPYTERHUB_SERVICE_NAME', 'jupyterhub')
monkeypatch.setenv('ILLUMIDESK_DIR', '/home/foo/illumidesk_deployment')
@pytest.fixture(scope='function')
def test_quart_client(monkeypatch, tmp_path):
"""
Set the env-vars required by quart-based application
"""
monkeypatch.setenv('JUPYTERHUB_CONFIG_PATH', str(tmp_path))
# important than environ reads JUPYTERHUB_CONFIG_PATH variable before
# app initialization
from illumidesk.setup_course.app import app
return app.test_client()
@pytest.fixture(scope='function')
def make_mock_request_handler() -> RequestHandler:
"""
Sourced from https://github.com/jupyterhub/oauthenticator/blob/master/oauthenticator/tests/mocks.py
"""
def _make_mock_request_handler(
handler: RequestHandler, uri: str = 'https://hub.example.com', method: str = 'GET', **settings: dict
) -> RequestHandler:
"""Instantiate a Handler in a mock application"""
application = Application(
hub=Mock(
base_url='/hub/',
server=Mock(base_url='/hub/'),
),
cookie_secret=os.urandom(32),
db=Mock(rollback=Mock(return_value=None)),
**settings,
)
request = HTTPServerRequest(
method=method,
uri=uri,
connection=Mock(),
)
handler = RequestHandler(
application=application,
request=request,
)
handler._transforms = []
return handler
return _make_mock_request_handler
@pytest.fixture(scope='function')
def make_http_response() -> HTTPResponse:
async def _make_http_response(
handler: RequestHandler,
code: int = 200,
reason: str = 'OK',
headers: HTTPHeaders = HTTPHeaders({'content-type': 'application/json'}),
effective_url: str = 'http://hub.example.com/',
body: Dict[str, str] = {'foo': 'bar'},
) -> HTTPResponse:
"""
Creates an HTTPResponse object from a given request. The buffer key is used to
add data to the response's body using an io.StringIO object. This factory method assumes
the body's buffer is an encoded JSON string.
This awaitable factory method requires a tornado.web.RequestHandler object with a valid
request property, which in turn requires a valid jupyterhub.auth.Authenticator object. Use
a dictionary to represent the StringIO body in the response.
Example:
response_args = {'handler': local_handler.request, 'body': {'code': 200}}
http_response = await factory_http_response(**response_args)
Args:
handler: tornado.web.RequestHandler object.
code: response code, e.g. 200 or 404
reason: reason phrase describing the status code
headers: HTTPHeaders (response header object), use the dict within the constructor, e.g.
{"content-type": "application/json"}
effective_url: final location of the resource after following any redirects
body: dictionary that represents the StringIO (buffer) body
Returns:
A tornado.client.HTTPResponse object
"""
dict_to_buffer = StringIO(json.dumps(body)) if body is not None else None
return HTTPResponse(
request=handler,
code=code,
reason=reason,
headers=headers,
effective_url=effective_url,
buffer=dict_to_buffer,
)
return _make_http_response
@pytest.fixture(scope='function')
def http_async_httpclient_with_simple_response(request, make_http_response, make_mock_request_handler):
"""
Creates a patch of AsyncHttpClient.fetch method, useful when other tests are making http request
"""
local_handler = make_mock_request_handler(RequestHandler)
test_request_body_param = request.param if hasattr(request, 'param') else {'message': 'ok'}
with patch.object(
AsyncHTTPClient,
'fetch',
return_value=make_http_response(handler=local_handler.request, body=test_request_body_param),
):
yield AsyncHTTPClient()
@pytest.fixture(scope='function')
def make_auth_state_dict() -> Dict[str, str]:
"""
Creates an authentication dictionary with default name and auth_state k/v's
"""
def _make_auth_state_dict(
username: str = 'foo', course_id: str = 'intro101', lms_user_id: str = 'abc123', user_role: str = 'Learner'
):
return {
'name': username,
'auth_state': {
'course_id': course_id,
'lms_user_id': lms_user_id,
'user_role': user_role,
}, # noqa: E231
}
return _make_auth_state_dict
@pytest.fixture(scope='function')
def make_lti11_basic_launch_request_args() -> Dict[str, str]:
def _make_lti11_basic_launch_args(
oauth_consumer_key: str = 'my_consumer_key',
oauth_consumer_secret: str = 'my_shared_secret',
):
oauth_timestamp = str(int(time.time()))
oauth_nonce = secrets.token_urlsafe(32)
args = {
'lti_message_type': 'basic-lti-launch-request',
'lti_version': 'LTI-1p0'.encode(),
'resource_link_id': '88391-e1919-bb3456',
'oauth_consumer_key': oauth_consumer_key,
'oauth_timestamp': str(int(oauth_timestamp)),
'oauth_nonce': str(oauth_nonce),
'oauth_signature_method': 'HMAC-SHA1',
'oauth_callback': 'about:blank',
'oauth_version': '1.0',
'user_id': '123123123',
}
extra_args = {'my_key': 'this_value'}
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
launch_url = 'http://jupyterhub/hub/lti/launch'
args.update(extra_args)
base_string = signature.signature_base_string(
'POST',
signature.base_string_uri(launch_url),
signature.normalize_parameters(signature.collect_parameters(body=args, headers=headers)),
)
args['oauth_signature'] = signature.sign_hmac_sha1(base_string, oauth_consumer_secret, None)
return args
return _make_lti11_basic_launch_args
@pytest.fixture(scope='function')
def make_lti11_success_authentication_request_args():
def _make_lti11_success_authentication_request_args(
lms_vendor: str = 'canvas', role: str = 'Instructor'
) -> Dict[str, str]:
"""
Return a valid request arguments make from LMS to our tool (when authentication steps were success)
"""
args = {
'oauth_callback': ['about:blank'.encode()],
'oauth_consumer_key': ['my_consumer_key'.encode()],
'oauth_signature_method': ['HMAC-SHA1'.encode()],
'oauth_timestamp': ['1585947271'.encode()],
'oauth_nonce': ['01fy8HKIASKuD9gK9vWUcBj9fql1nOCWfOLPzeylsmg'.encode()],
'oauth_signature': ['abc123'.encode()],
'oauth_version': ['1.0'.encode()],
'context_id': ['888efe72d4bbbdf90619353bb8ab5965ccbe9b3f'.encode()],
'context_label': ['intro101'.encode()],
'context_title': ['intro101'.encode()],
'course_lineitems': ['my.platform.com/api/lti/courses/1/line_items'.encode()],
'custom_canvas_assignment_title': ['test-assignment'.encode()],
'custom_canvas_course_id': ['616'.encode()],
'custom_canvas_enrollment_state': ['active'.encode()],
'custom_canvas_user_id': ['1091'.encode()],
'custom_canvas_user_login_id': ['<EMAIL>'.encode()],
'ext_roles': ['urn:lti:instrole:ims/lis/Learner'.encode()],
'launch_presentation_document_target': ['iframe'.encode()],
'launch_presentation_height': ['1000'.encode()],
'launch_presentation_locale': ['en'.encode()],
'launch_presentation_return_url': [
'https: //illumidesk.instructure.com/courses/161/external_content/success/external_tool_redirect'.encode()
],
'launch_presentation_width': ['1000'.encode()],
'lis_outcome_service_url': [
'http://www.imsglobal.org/developers/LTI/test/v1p1/common/tool_consumer_outcome.php?b64=MTIzNDU6OjpzZWNyZXQ='.encode()
],
'lis_person_contact_email_primary': ['<EMAIL>'.encode()],
'lis_person_name_family': ['Bar'.encode()],
'lis_person_name_full': ['<NAME>'.encode()],
'lis_person_name_given': ['Foo'.encode()],
'lti_message_type': ['basic-lti-launch-request'.encode()],
'lis_result_sourcedid': ['feb-123-456-2929::28883'.encode()],
'lti_version': ['LTI-1p0'.encode()],
'resource_link_id': ['888efe72d4bbbdf90619353bb8ab5965ccbe9b3f'.encode()],
'resource_link_title': ['IllumiDesk'.encode()],
'roles': [role.encode()],
'tool_consumer_info_product_family_code': [lms_vendor.encode()],
'tool_consumer_info_version': ['cloud'.encode()],
'tool_consumer_instance_contact_email': ['<EMAIL>'.encode()],
'tool_consumer_instance_guid': ['srnuz6h1U8kOMmETzoqZTJiPWzbPXIYkAUnnAJ4u:test-lms'.encode()],
'tool_consumer_instance_name': ['myorg'.encode()],
'user_id': ['185d6c59731a553009ca9b59ca3a885100000'.encode()],
'user_image': ['https://lms.example.com/avatar-50.png'.encode()],
}
return args
return _make_lti11_success_authentication_request_args
@pytest.fixture(scope='function')
def make_lti13_resource_link_request() -> Dict[str, str]:
"""
Returns valid json after decoding JSON Web Token (JWT) for resource link launch (core).
"""
jws = {
'https://purl.imsglobal.org/spec/lti/claim/message_type': 'LtiResourceLinkRequest',
'https://purl.imsglobal.org/spec/lti/claim/version': '1.3.0',
'https://purl.imsglobal.org/spec/lti/claim/resource_link': {
'id': '<KEY>',
'description': None,
'title': None,
'validation_context': None,
'errors': {'errors': {}},
},
'aud': '125900000000000071',
'azp': '125900000000000071',
'https://purl.imsglobal.org/spec/lti/claim/deployment_id': '847:b81accac78543cb7cd239f3792bcfdc7c6efeadb',
'exp': 1589843421,
'iat': 1589839821,
'iss': 'https://canvas.instructure.com',
'nonce': '125687018437687229621589839822',
'sub': '8171934b-f5e2-4f4e-bdbd-6d798615b93e',
'https://purl.imsglobal.org/spec/lti/claim/target_link_uri': 'https://edu.example.com/hub',
'https://purl.imsglobal.org/spec/lti/claim/context': {
'id': 'b81accac78543cb7cd239f3792bcfdc7c6efeadb',
'label': 'intro101',
'title': 'intro101',
'type': ['http://purl.imsglobal.org/vocab/lis/v2/course#CourseOffering'],
'validation_context': None,
'errors': {'errors': {}},
},
'https://purl.imsglobal.org/spec/lti/claim/tool_platform': {
'guid': 'srnuz6h1U8kOMmETzoqZTJiPWzbPXIYkAUnnAJ4u:canvas-lms',
'name': 'IllumiDesk',
'version': 'cloud',
'product_family_code': 'canvas',
'validation_context': None,
'errors': {'errors': {}},
},
'https://purl.imsglobal.org/spec/lti/claim/launch_presentation': {
'document_target': 'iframe',
'height': 400,
'width': 800,
'return_url': 'https://illumidesk.instructure.com/courses/147/external_content/success/external_tool_redirect',
'locale': 'en',
'validation_context': None,
'errors': {'errors': {}},
},
'locale': 'en',
'https://purl.imsglobal.org/spec/lti/claim/roles': [
'http://purl.imsglobal.org/vocab/lis/v2/institution/person#Student',
'http://purl.imsglobal.org/vocab/lis/v2/membership#Learner',
'http://purl.imsglobal.org/vocab/lis/v2/system/person#User',
],
'https://purl.imsglobal.org/spec/lti/claim/custom': {'email': '<EMAIL>'},
'errors': {'errors': {}},
'https://purl.imsglobal.org/spec/lti-ags/claim/endpoint': {
'scope': [
'https://purl.imsglobal.org/spec/lti-ags/scope/lineitem',
'https://purl.imsglobal.org/spec/lti-ags/scope/lineitem.readonly',
'https://purl.imsglobal.org/spec/lti-ags/scope/result.readonly',
'https://purl.imsglobal.org/spec/lti-ags/scope/score',
],
'lineitems': 'https://illumidesk.instructure.com/api/lti/courses/147/line_items',
'validation_context': None,
'errors': {'errors': {}},
},
'picture': 'https://canvas.instructure.com/images/messages/avatar-50.png',
'email': '<EMAIL>',
'name': '<NAME>',
'given_name': 'Foo',
'family_name': 'Bar',
'https://purl.imsglobal.org/spec/lti/claim/lis': {
'person_sourcedid': None,
'course_offering_sourcedid': None,
'validation_context': None,
'errors': {'errors': {}},
},
'https://purl.imsglobal.org/spec/lti-nrps/claim/namesroleservice': {
'context_memberships_url': 'https://illumidesk.instructure.com/api/lti/courses/147/names_and_roles',
'service_versions': [
'2.0',
], # noqa: E231
'validation_context': None,
'errors': {'errors': {}},
}, # noqa: E231
}
return jws
@pytest.fixture(scope='function')
def make_lti13_resource_link_request_privacy_enabled() -> Dict[str, str]:
"""
Returns valid json after decoding JSON Web Token (JWT) for resource link launch (core)
when Privacy is enabled.
"""
jws = {
'https://purl.imsglobal.org/spec/lti/claim/message_type': 'LtiResourceLinkRequest',
'https://purl.imsglobal.org/spec/lti/claim/version': '1.3.0',
'https://purl.imsglobal.org/spec/lti/claim/resource_link': {
'id': '41c4731cc7668aef2eddeeb99132ba6239d8e058',
'description': None,
'title': None,
'validation_context': None,
'errors': {'errors': {}},
},
'https://purl.imsglobal.org/spec/lti-ags/claim/endpoint': {
'scope': [
'https://purl.imsglobal.org/spec/lti-ags/scope/lineitem',
'https://purl.imsglobal.org/spec/lti-ags/scope/lineitem.readonly',
'https://purl.imsglobal.org/spec/lti-ags/scope/result.readonly',
'https://purl.imsglobal.org/spec/lti-ags/scope/score',
],
'lineitems': 'https://illumidesk.instructure.com/api/lti/courses/169/line_items',
'validation_context': None,
'errors': {'errors': {}},
},
'aud': '125900000000000094',
'azp': '125900000000000094',
'https://purl.imsglobal.org/spec/lti/claim/deployment_id': '905:41c4731cc7668aef2eddeeb99132ba6239d8e058',
'exp': 1595538982,
'iat': 1595535382,
'iss': 'https://canvas.instructure.com',
'nonce': '02cb8d54c4e0f7678a3c5bcbff70095349d18994cf727c1f5efe7724e40ca766',
'sub': '8171934b-f5e2-4f4e-bdbd-6d798615b93e',
'https://purl.imsglobal.org/spec/lti/claim/target_link_uri': 'https://test.illumidesk.com/',
'https://purl.imsglobal.org/spec/lti/claim/context': {
'id': '41c4731cc7668aef2eddeeb99132ba6239d8e058',
'label': 'intro101',
'title': 'intro101',
'type': ['http://purl.imsglobal.org/vocab/lis/v2/course#CourseOffering'],
'validation_context': None,
'errors': {'errors': {}},
},
'https://purl.imsglobal.org/spec/lti/claim/tool_platform': {
'guid': 'srnuz6h1U8kOMmETzoqZTJiPWzbPXIYkAUnnAJ4u:canvas-lms',
'name': 'Illumidesk',
'version': 'cloud',
'product_family_code': 'canvas',
'validation_context': None,
'errors': {'errors': {}},
},
'https://purl.imsglobal.org/spec/lti/claim/launch_presentation': {
'document_target': 'iframe',
'height': 400,
'width': 800,
'return_url': 'https://illumidesk.instructure.com/courses/169/external_content/success/external_tool_redirect',
'locale': 'en',
'validation_context': None,
'errors': {'errors': {}},
},
'locale': 'en',
'https://purl.imsglobal.org/spec/lti/claim/roles': [
'http://purl.imsglobal.org/vocab/lis/v2/institution/person#Administrator',
'http://purl.imsglobal.org/vocab/lis/v2/institution/person#Instructor',
'http://purl.imsglobal.org/vocab/lis/v2/membership#Instructor',
'http://purl.imsglobal.org/vocab/lis/v2/system/person#User',
],
'https://purl.imsglobal.org/spec/lti/claim/custom': {'lms_user_id': 4},
'https://purl.imsglobal.org/spec/lti-nrps/claim/namesroleservice': {
'context_memberships_url': 'https://illumidesk.instructure.com/api/lti/courses/169/names_and_roles',
'service_versions': ['2.0'],
'validation_context': None,
'errors': {'errors': {}},
},
'errors': {'errors': {}},
}
return jws
@pytest.fixture(scope='function')
def make_lti13_platform_jwks() -> Dict[str, List[Dict[str, str]]]:
def _make_lti13_platform_jwks():
"""
Valid response when retrieving jwks from the platform.
"""
jwks = {
"keys": [
{
"kty": "RSA",
"e": "AQAB",
"n": "<KEY>",
"kid": "2020-03-01T00:00:01Z",
"alg": "RS256",
"use": "sig",
},
{
"kty": "RSA",
"e": "AQAB",
"n": "<KEY>",
"kid": "2020-04-01T00:00:04Z",
"alg": "RS256",
"use": "sig",
},
{
"kty": "RSA",
"e": "AQAB",
"n": "<KEY>",
"kid": "2020-05-01T00:00:01Z",
"alg": "RS256",
"use": "sig",
},
]
}
return jwks
return _make_lti13_platform_jwks
@pytest.fixture(scope='function')
def build_lti13_jwt_id_token() -> str:
def _make_lti13_jwt_id_token(json_lti13_launch_request: Dict[str, str]):
"""
Returns a valid jwt lti13 id token from a json
We can use the `make_lti13_resource_link_request` or `make_lti13_resource_link_request_privacy_enabled`
fixture to create the json then call this method.
"""
encoded_jwt = jwt.encode(json_lti13_launch_request, 'secret', algorithm='HS256')
return encoded_jwt
return _make_lti13_jwt_id_token
``` |
{
"source": "jgwerner/jupyter-pgweb-proxy",
"score": 2
} |
#### File: jupyter-pgweb-proxy/jupyter_pgweb_proxy/__init__.py
```python
import os
import shutil
import logging
logger = logging.getLogger(__name__)
logger.setLevel('INFO')
def setup_pgweb():
"""Setup commands and icon paths and return a dictionary compatible
with jupyter-server-proxy.
"""
def _get_icon_path():
return os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'icons', 'pgweb.svg'
)
# Make sure executable is in $PATH
def _get_pgweb_command(port):
executable = 'pgweb'
if not shutil.which(executable):
raise FileNotFoundError('Can not find pgweb executable in $PATH')
# Create working directory
home_dir = os.environ.get('HOME') or '/home/jovyan'
working_dir = f'{home_dir}/pgweb'
if not os.path.exists(working_dir):
os.makedirs(working_dir)
logger.info("Created directory %s" % working_dir)
else:
logger.info("Directory %s already exists" % working_dir)
return ['pgweb', '--bind=0.0.0.0', '--listen=' + str(port)]
return {
'command': _get_pgweb_command,
'timeout': 20,
'new_browser_tab': True,
'launcher_entry': {
'title': 'pgweb',
'icon_path': _get_icon_path()
},
}
``` |
{
"source": "jgwerner/nbexchange",
"score": 2
} |
#### File: alembic/versions/20190402_add_collected_action.py
```python
from datetime import datetime
from enum import Enum
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "20190202"
down_revision = "d500457efb3b"
branch_labels = None
depends_on = None
class NewAssignmentActions(Enum):
released = "released"
fetched = "fetched"
submitted = "submitted"
removed = "removed"
collected = "collected"
class OldAssignmentActions(Enum):
released = "released"
fetched = "fetched"
submitted = "submitted"
removed = "removed"
def upgrade():
with op.batch_alter_table("action") as batch_op:
batch_op.alter_column(
"action",
"action",
existing_type=sa.Enum(OldAssignmentActions, name="assignmentactions"),
type_=sa.Enum(NewAssignmentActions, name="assignmentactions"),
)
def downgrade():
with op.batch_alter_table("action") as batch_op:
batch_op.alter_column(
"action",
"action",
existing_type=sa.Enum(NewAssignmentActions, name="assignmentactions"),
type_=sa.Enum(OldAssignmentActions, name="assignmentactions"),
)
```
#### File: alembic/versions/2805bf7747e5_change_feedback_timestamp_column_type_.py
```python
from datetime import datetime
import sqlalchemy as sa
from alembic import op
from sqlalchemy import orm
# revision identifiers, used by Alembic.
from sqlalchemy.engine.reflection import Inspector
revision = "<KEY>"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
from datetime import datetime
from sqlalchemy import Column, DateTime, ForeignKey, Integer, Unicode
from nbexchange.models import Base
def try_convert(datestr, default):
try:
return datetime.fromisoformat(datestr)
except:
try:
return datetime.strptime(datestr, "%Y-%m-%d %H:%M:%S.%f %Z")
except:
try:
return datetime.strptime(datestr, "%Y-%m-%d %H:%M:%S.%f").isoformat()
except:
return default
def upgrade():
from nbexchange.models import Feedback as FeedbackNew
class FeedbackOld(Base):
__tablename__ = "feedback"
id = Column(Integer(), primary_key=True, autoincrement=True)
notebook = None
notebook_id = Column(
Integer(), ForeignKey("notebook.id", ondelete="CASCADE"), index=True
)
instructor = None
instructor_id = Column(
Integer, ForeignKey("user.id", ondelete="CASCADE"), index=True
)
student = None
student_id = Column(
Integer, ForeignKey("user.id", ondelete="CASCADE"), index=True
)
location = Column(
Unicode(200), nullable=True
) # Location for the file of this action
checksum = Column(Unicode(200), nullable=True) # Checksum for the feedback file
timestamp = Column(Unicode(12), nullable=False)
created_at = Column(DateTime, default=datetime.utcnow)
bind = op.get_bind()
inspector = Inspector.from_engine(bind)
tables = inspector.get_table_names()
if "feedback_2" not in tables:
FeedbackNew.__table__.create(bind)
session = orm.Session(bind=bind)
if "feedback" in tables:
feedbacks = [
FeedbackNew(
notebook_id=feedback.notebook_id,
instructor_id=feedback.instructor_id,
student_id=feedback.student_id,
location=feedback.location,
checksum=feedback.checksum,
timestamp=try_convert(feedback.timestamp, feedback.created_at),
created_at=feedback.created_at,
)
for feedback in session.query(FeedbackOld)
]
session.add_all(feedbacks)
session.commit()
def downgrade():
from nbexchange.models import Feedback as FeedbackOld
class FeedbackNew(Base):
__tablename__ = "feedback_2"
id = Column(Integer(), primary_key=True, autoincrement=True)
notebook = None
notebook_id = Column(
Integer(), ForeignKey("notebook.id", ondelete="CASCADE"), index=True
)
instructor = None
instructor_id = Column(
Integer, ForeignKey("user.id", ondelete="CASCADE"), index=True
)
student = None
student_id = Column(
Integer, ForeignKey("user.id", ondelete="CASCADE"), index=True
)
location = Column(Unicode(200), nullable=True)
checksum = Column(Unicode(200), nullable=True) # Checksum for the feedback file
timestamp = Column(DateTime(timezone=True), nullable=False)
created_at = Column(DateTime(timezone=True), default=datetime.utcnow)
bind = op.get_bind()
session = orm.Session(bind=bind)
inspector = Inspector.from_engine(bind)
tables = inspector.get_table_names()
if "feedback" not in tables:
FeedbackOld.__table__.create(bind)
if "feedback_2" in tables:
feedbacks = [
FeedbackOld(
notebook_id=feedback.notebook_id,
instructor_id=feedback.instructor_id,
student_id=feedback.student_id,
location=feedback.location,
checksum=feedback.checksum,
timestamp=feedback.timestamp.isoformat(),
created_at=feedback.created_at,
)
for feedback in session.query(FeedbackNew)
]
session.add_all(feedbacks)
session.commit()
```
#### File: handlers/auth/naas_user_handler.py
```python
import logging
import os
import jwt
from tornado import web
from nbexchange.handlers.auth.user_handler import BaseUserHandler
class NaasUserHandler(BaseUserHandler):
jwt_key = os.environ.get("SECRET_KEY")
def get_current_user(self, request: web.RequestHandler) -> dict:
cookies = dict()
# Pass through cookies
for name in request.request.cookies:
cookies[name] = request.get_cookie(name)
if "noteable_auth" not in cookies:
logging.debug(
f"No noteable_auth cookie found - got {','.join(request.request.cookies)}"
)
return None
encoded = cookies["noteable_auth"]
result = jwt.decode(encoded, self.jwt_key, algorithms=["HS256"])
# TODO this _ to - transformation is unfortunate but the alternatives are also bad
# Due to changes in the API in aug/sept 2020 the username was transformed for the UI to appear
# as 1-xyz instead of 1_xyz. This was due to K8S only supporting DNS compatible characters for some reasources
# which _ isn't. The other nice benefit was to get rid of %2F in places. Unfortunately nbexchange used this
# same API and its username format was changed at the same time.
# The username is used in the path to user assignment submissions and is recorded in the nbexchange database
# and on the NFS filesystem. Changing this back would require these usernames are reformatted from their
# 1-xyz format back to 1_xyz
transformed_username = result["username"].replace("_", "-", 1)
## We need to strip out forward slashes from the username. If not, the created paths will be invalid
transformed_username = transformed_username.replace("/", "-")
return {
"name": transformed_username,
"full_name": result.get("n_fn", ""),
"course_id": result["n_cid"],
"course_title": result["n_cnm"],
"course_role": result["n_rl"],
"org_id": result["n_oid"],
"cust_id": result["n_cust_id"],
}
```
#### File: nbexchange/handlers/base.py
```python
import functools
import os
import re
from typing import Awaitable, Callable, Optional
from urllib.parse import unquote, unquote_plus
from tornado import web
from tornado.log import app_log
from nbexchange.database import scoped_session
from nbexchange.models.courses import Course
from nbexchange.models.subscriptions import Subscription
from nbexchange.models.users import User
def authenticated(
method: Callable[..., Optional[Awaitable[None]]]
) -> Callable[..., Optional[Awaitable[None]]]:
"""Decorate methods with this to require that the user be logged in.
If the user is not logged in, raise a 403 error
"""
@functools.wraps(method)
def wrapper( # type: ignore
self: web.RequestHandler, *args, **kwargs
) -> Optional[Awaitable[None]]:
if not self.current_user:
raise web.HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class BaseHandler(web.RequestHandler):
"""An nbexchange base handler"""
# register URL patterns
urls = []
def __init__(self, application, request, **kwargs):
super(BaseHandler, self).__init__(application, request, **kwargs)
self.set_header("Content-type", "application/json")
# Root location for data to be written to
@property
def base_storage_location(self):
return self.settings["base_storage_location"]
@property
def user_plugin(self):
return self.settings["user_plugin"]
@property
def max_buffer_size(self):
return self.settings["max_buffer_size"]
def get_current_user(self):
return self.user_plugin.get_current_user(self)
@property
def nbex_user(self):
hub_user = self.get_current_user()
hub_username = hub_user.get("name")
full_name = hub_user.get("full_name")
current_course = hub_user.get("course_id")
current_role = hub_user.get("course_role")
course_title = hub_user.get("course_title", "no_title")
org_id = hub_user.get("org_id", 1)
if not (current_course and current_role):
return
self.org_id = org_id
with scoped_session() as session:
user = User.find_by_name(db=session, name=hub_username, log=self.log)
if user is None:
self.log.debug(
f"New user details: name:{hub_username}, org_id:{org_id}"
)
user = User(name=hub_username, org_id=org_id)
session.add(user)
if user.full_name != full_name:
user.full_name = full_name
course = Course.find_by_code(
db=session, code=current_course, org_id=org_id, log=self.log
)
if course is None:
self.log.debug(
f"New course details: code:{current_course}, org_id:{org_id}"
)
course = Course(org_id=org_id, course_code=current_course)
if course_title:
self.log.debug(f"Adding title {course_title}")
course.course_title = course_title
session.add(course)
# Check to see if we have a subscription (for this course)
self.log.debug(
f"Looking for subscription for: user:{user.id}, course:{course.id}, role:{current_role}"
)
subscription = Subscription.find_by_set(
db=session, user_id=user.id, course_id=course.id, role=current_role
)
if subscription is None:
self.log.debug(
f"New subscription details: user:{user.id}, course:{course.id}, role:{current_role}"
)
subscription = Subscription(
user_id=user.id, course_id=course.id, role=current_role
)
session.add(subscription)
courses = {}
for subscription in user.courses:
if not subscription.course.course_code in courses:
courses[subscription.course.course_code] = {}
courses[subscription.course.course_code][subscription.role] = 1
model = {
"kind": "user",
"id": user.id,
"name": user.name,
"org_id": user.org_id,
"current_course": current_course,
"current_role": current_role,
"courses": courses,
}
return model
@property
def log(self):
"""I can't seem to avoid typing self.log"""
return self.settings.get("log", app_log)
def param_decode(self, value):
unquote(value) if re.search("%20", value) else unquote_plus(value)
return value
def get_params(self, param_list):
return_params = []
for param in param_list:
value = (
self.request.arguments[param][0].decode("utf-8")
if param in self.request.arguments
else None
)
value = self.param_decode(value) if value else None
return_params.append(value)
return return_params
class Template404(BaseHandler):
"""Render nbexchange's 404 template"""
urls = [".*"]
def prepare(self):
raise web.HTTPError(404)
```
#### File: nbexchange/models/subscriptions.py
```python
from sqlalchemy import Column, ForeignKey, Integer, Unicode, UniqueConstraint
from sqlalchemy.orm import relationship
from nbexchange.models import Base
class Subscription(Base):
"""Table to map multiple users to a single course
# assume some main objects
usr = User(hubuser_id = self.get_current_user().id
username = self.get_current_user().username
)
crs = Course(org_id=1, course_code=$couurse_code)
# create a new subscription, hard-coded linking
subscr = Subscription(role='Student', user_id = usr.id)
subscr.course_id = crs.id
"""
__tablename__ = "subscription"
__table_args__ = (UniqueConstraint("user_id", "course_id", "role"),)
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(Integer, ForeignKey("user.id", ondelete="CASCADE"), index=True)
course_id = Column(Integer, ForeignKey("course.id", ondelete="CASCADE"), index=True)
role = Column(Unicode(50), nullable=False)
# These are the relationship handles: a specific subscription has a single user to a single course
user = relationship("User", back_populates="courses")
course = relationship("Course", back_populates="subscribers")
@classmethod
def find_by_pk(cls, db, pk, log=None):
"""Find a subscription by Primary Key.
Returns None if not found.
"""
if log:
log.debug(f"Subscription.find_by_pk - pk:{pk}")
if pk is None:
raise ValueError(f"Primary Key needs to be defined")
if isinstance(pk, int):
return db.query(cls).filter(cls.id == pk).first()
else:
raise TypeError(f"Primary Keys are required to be Ints")
@classmethod
def find_by_set(cls, db, user_id, course_id, role, log=None):
"""Find a subscription by user, course, and role.
Returns None if not found.
"""
if log:
log.debug(
f"Subscription.find_by_set - user_id:{user_id}, course_id:{course_id}, role:{role}"
)
return (
db.query(cls)
.filter(
cls.user_id == user_id, cls.course_id == course_id, cls.role == role
)
.first()
)
def __repr__(self):
return f"Subscription for user {self.user_id} to course {self.course_id} as a {self.role}"
```
#### File: nbexchange/plugin/collect.py
```python
import io
import json
import os
import re
import shutil
import tarfile
from urllib.parse import quote_plus
import nbgrader.exchange.abc as abc
from nbgrader.api import Gradebook, MissingEntry
from .exchange import Exchange
class ExchangeCollect(abc.ExchangeCollect, Exchange):
def do_copy(self, src, dest):
pass
def init_src(self):
pass
def init_dest(self):
pass
def download(self, submission, dest_path):
self.log.debug(f"ExchangeCollect.download - record {submission} to {dest_path}")
r = self.api_request(
f"collection?course_id={quote_plus(self.course_id)}&assignment_id={quote_plus(self.coursedir.assignment_id)}&path={quote_plus(submission['path'])}"
)
self.log.debug(
f"Got back {r.status_code} {r.headers['content-type']} after file download"
)
tgz = r.content
try:
tar_file = io.BytesIO(tgz)
with tarfile.open(fileobj=tar_file) as handle:
handle.extractall(path=dest_path)
except Exception as e: # TODO: exception handling
if hasattr(e, "message"):
self.fail(e.message)
else:
self.fail(e)
def do_collect(self):
"""
Downloads submitted files
If coursedir.student_id, then we're only looking for that user"""
# Get a list of submissions
url = f"collections?course_id={quote_plus(self.course_id)}&assignment_id={quote_plus(self.coursedir.assignment_id)}"
if self.coursedir.student_id != "*":
url = url + f"&user_id={quote_plus(self.coursedir.student_id)}"
r = self.api_request(url)
self.log.debug(f"Got back {r} when listing collectable assignments")
try:
data = r.json()
except json.decoder.JSONDecodeError:
self.log.error(f"Got back an invalid response when listing assignments")
return []
if not data["success"]:
self.fail("Error looking for assignments to collect")
submissions = data["value"]
self.log.debug(
f"ExchangeCollect.do_collection found the following items: {submissions}"
)
if len(submissions) == 0:
self.log.warning(
f"No submissions of '{self.coursedir.assignment_id}' for course '{self.course_id}' to collect"
)
else:
self.log.debug(
f"Processing {len(submissions)} submissions of '{self.coursedir.assignment_id}' for course '{self.course_id}'"
)
for submission in submissions:
student_id = submission["student_id"]
full_name = submission.get("full_name") or ""
if " " in full_name:
first_name, last_name = full_name.rsplit(" ", 1)
else:
first_name, last_name = (
full_name,
"",
) # TODO: should we prefer first or last name here?
if student_id:
local_dest_path = self.coursedir.format_path(
self.coursedir.submitted_directory,
student_id,
self.coursedir.assignment_id,
)
if not os.path.exists(os.path.dirname(local_dest_path)):
os.makedirs(os.path.dirname(local_dest_path))
self.log.debug(
f"ExchangeCollect.do_collection - collection dest : {local_dest_path}"
)
take_a_copy = False
updated_version = False
if os.path.isdir(local_dest_path):
existing_timestamp = self.coursedir.get_existing_timestamp(
local_dest_path
)
existing_timestamp = (
existing_timestamp.strftime(self.timestamp_format)
if existing_timestamp
else None
)
new_timestamp = submission["timestamp"]
if self.update and (
existing_timestamp is None or new_timestamp > existing_timestamp
):
take_a_copy = True
updated_version = True
else:
take_a_copy = True
if take_a_copy:
if updated_version:
self.log.info(
f"Updating submission: {student_id} {self.coursedir.assignment_id}"
)
# clear existing
shutil.rmtree(local_dest_path)
else:
self.log.info(
f"Collecting submission: {student_id} {self.coursedir.assignment_id}"
)
with Gradebook(
self.coursedir.db_url, self.coursedir.course_id
) as gb:
try:
gb.update_or_create_student(
student_id, first_name=first_name, last_name=last_name
)
except MissingEntry:
self.log.info(
f"Unable to update: {student_id} with first_name={first_name}, last_name={last_name}"
)
self.download(submission, local_dest_path)
else:
if self.update:
self.log.info(
f"No newer submission to collect: {student_id} {self.coursedir.assignment_id}"
)
else:
self.log.info(
f"Submission already exists, use --update to update: {student_id} {self.coursedir.assignment_id}"
)
def copy_files(self):
self.do_collect()
```
#### File: nbexchange/plugin/fetch_assignment.py
```python
import glob
import io
import os
import shutil
import tarfile
import tempfile
from urllib.parse import quote_plus
import nbgrader.exchange.abc as abc
from nbgrader.api import new_uuid
from traitlets import Bool
from .exchange import Exchange
class ExchangeFetchAssignment(abc.ExchangeFetchAssignment, Exchange):
def _load_config(self, cfg, **kwargs):
if "ExchangeFetch" in cfg:
self.log.warning(
"Use ExchangeFetchAssignment in config, not ExchangeFetch. Outdated config:\n%s",
"\n".join(
"ExchangeFetch.{key} = {value!r}".format(key=key, value=value)
for key, value in cfg.ExchangeFetch.items()
),
)
cfg.ExchangeFetchAssignment.merge(cfg.ExchangeFetch)
del cfg.ExchangeFetch
super(ExchangeFetchAssignment, self)._load_config(cfg, **kwargs)
# where the downloaded files are placed
def init_src(self):
self.log.debug(
f"ExchangeFetch.init_src using {self.course_id} {self.coursedir.assignment_id}"
)
location = os.path.join(
"/tmp/",
new_uuid(),
self.course_id,
self.coursedir.assignment_id,
"assignment.tar.gz",
)
os.makedirs(os.path.dirname(location), exist_ok=True)
self.src_path = location
self.log.debug(f"ExchangeFetch.init_src ensuring {self.src_path}")
# where in the user tree
def init_dest(self):
if self.path_includes_course:
root = os.path.join(self.course_id, self.coursedir.assignment_id)
else:
root = self.coursedir.assignment_id
self.dest_path = os.path.abspath(os.path.join(self.assignment_dir, root))
# Lets check there are no notebooks already in the dest_path dir
if (
os.path.isdir(self.dest_path)
and glob.glob(self.dest_path + "/*.ipynb")
and not self.replace_missing_files
):
self.fail(
f"You already have notebook documents in directory: {root}. Please remove them before fetching again"
)
else:
os.makedirs(os.path.dirname(self.dest_path + "/"), exist_ok=True)
self.log.debug(f"ExchangeFetch.init_dest ensuring {self.dest_path}")
def download(self):
self.log.debug(f"Download from {self.service_url}")
r = self.api_request(
f"assignment?course_id={quote_plus(self.course_id)}&assignment_id={quote_plus(self.coursedir.assignment_id)}"
)
self.log.debug(
f"Got back {r.status_code} {r.headers['content-type']} after file download"
)
tgz = r.content
try:
tar_file = io.BytesIO(tgz)
with tarfile.open(fileobj=tar_file) as handle:
handle.extractall(path=self.src_path)
except Exception as e: # TODO: exception handling
self.fail(str(e))
def copy_if_missing(self, src, dest, ignore=None):
filenames = sorted(os.listdir(src))
if ignore:
bad_filenames = ignore(src, filenames)
filenames = sorted(list(set(filenames) - bad_filenames))
for filename in filenames:
srcpath = os.path.join(src, filename)
destpath = os.path.join(dest, filename)
relpath = os.path.relpath(destpath, os.getcwd())
if not os.path.exists(destpath):
if os.path.isdir(srcpath):
self.log.warning("Creating missing directory '%s'", relpath)
os.mkdir(destpath)
else:
self.log.warning("Replacing missing file '%s'", relpath)
shutil.copy(srcpath, destpath)
if os.path.isdir(srcpath):
self.copy_if_missing(srcpath, destpath, ignore=ignore)
def do_copy(self, src, dest):
"""Copy the src dir to the dest dir omitting the self.coursedir.ignore globs."""
self.download()
if os.path.isdir(self.dest_path):
self.copy_if_missing(
src, dest, ignore=shutil.ignore_patterns(*self.coursedir.ignore)
)
else:
shutil.copytree(
src, dest, ignore=shutil.ignore_patterns(*self.coursedir.ignore)
)
# clear tmp having downloaded file
shutil.rmtree(self.src_path)
def copy_files(self):
self.log.debug(f"Source: {self.src_path}")
self.log.debug(f"Destination: {self.dest_path}")
self.do_copy(self.src_path, self.dest_path)
self.log.debug(f"Fetched as: {self.course_id} {self.coursedir.assignment_id}")
```
#### File: nbexchange/tests/test_handlers_auth.py
```python
import logging
from abc import ABCMeta
import pytest
from nbexchange.handlers.auth.user_handler import BaseUserHandler
logger = logging.getLogger(__file__)
logger.setLevel(logging.ERROR)
@pytest.mark.gen_test
def test_base_abstract_class(app):
BaseUserHandler.__abstractmethods__ = set()
assert isinstance(BaseUserHandler, ABCMeta)
```
#### File: nbexchange/tests/test_plugin_release_assignment.py
```python
import datetime
import logging
import os
import re
import shutil
from shutil import copyfile
import pytest
from mock import patch
from nbgrader.coursedir import CourseDirectory
from nbgrader.exchange import ExchangeError
from nbgrader.utils import make_unique_key, notebook_hash
import nbexchange
from nbexchange.plugin import Exchange, ExchangeReleaseAssignment
from nbexchange.tests.utils import get_feedback_file
logger = logging.getLogger(__file__)
logger.setLevel(logging.ERROR)
release_dir = "release_test"
source_dir = "source_test"
notebook1_filename = os.path.join(
os.path.dirname(__file__), "data", "assignment-0.6.ipynb"
)
notebook1_file = get_feedback_file(notebook1_filename)
notebook2_filename = os.path.join(
os.path.dirname(__file__), "data", "assignment-0.6-2.ipynb"
)
notebook2_file = get_feedback_file(notebook2_filename)
def test_release_assignment_methods_init_src(plugin_config, tmpdir, caplog):
plugin_config.CourseDirectory.root = "/"
plugin_config.CourseDirectory.source_directory = str(
tmpdir.mkdir(source_dir).realpath()
)
plugin_config.CourseDirectory.release_directory = str(
tmpdir.mkdir(release_dir).realpath()
)
plugin_config.CourseDirectory.assignment_id = "assign_1"
plugin = ExchangeReleaseAssignment(
coursedir=CourseDirectory(config=plugin_config), config=plugin_config
)
# No release file, no source file
with pytest.raises(ExchangeError) as e_info:
plugin.init_src()
assert "Assignment not found at:" in str(e_info.value)
# No release, source file exists
os.makedirs(
os.path.join(plugin_config.CourseDirectory.source_directory, "assign_1"),
exist_ok=True,
)
copyfile(
notebook1_filename,
os.path.join(
plugin_config.CourseDirectory.source_directory, "assign_1", "release.ipynb"
),
)
with pytest.raises(ExchangeError) as e_info:
plugin.init_src()
assert re.match(
r"Assignment found in '.+' but not '.+', run `nbgrader assign` first.",
str(e_info.value),
)
# release file exists
os.makedirs(
os.path.join(plugin_config.CourseDirectory.release_directory, "assign_1"),
exist_ok=True,
)
copyfile(
notebook1_filename,
os.path.join(
plugin_config.CourseDirectory.release_directory, "assign_1", "release.ipynb"
),
)
with open(
os.path.join(
plugin_config.CourseDirectory.release_directory, "assign_1", "timestamp.txt"
),
"w",
) as fp:
fp.write("2020-01-01 00:00:00.0 UTC")
plugin.init_src()
assert re.search(
r"test_release_assignment_method0/release_test/./assign_1$", plugin.src_path
)
assert os.path.isdir(plugin.src_path)
@pytest.mark.gen_test
def test_release_assignment_methods_the_rest(plugin_config, tmpdir, caplog):
plugin_config.CourseDirectory.root = "/"
plugin_config.CourseDirectory.release_directory = str(
tmpdir.mkdir(release_dir).realpath()
)
plugin_config.CourseDirectory.assignment_id = "assign_1"
plugin = ExchangeReleaseAssignment(
coursedir=CourseDirectory(config=plugin_config), config=plugin_config
)
os.makedirs(
os.path.join(plugin_config.CourseDirectory.release_directory, "assign_1"),
exist_ok=True,
)
copyfile(
notebook1_filename,
os.path.join(
plugin_config.CourseDirectory.release_directory, "assign_1", "release.ipynb"
),
)
with open(
os.path.join(
plugin_config.CourseDirectory.release_directory, "assign_1", "timestamp.txt"
),
"w",
) as fp:
fp.write("2020-01-01 00:00:00.0 UTC")
plugin.init_src()
plugin.init_dest()
with pytest.raises(AttributeError) as e_info:
foo = plugin.dest_path
assert (
str(e_info.value)
== "'ExchangeReleaseAssignment' object has no attribute 'dest_path'"
)
file = plugin.tar_source()
assert len(file) > 1000
plugin.get_notebooks()
assert plugin.notebooks == ["release"]
@pytest.mark.gen_test
def test_release_assignment_normal(plugin_config, tmpdir):
plugin_config.CourseDirectory.root = "/"
plugin_config.CourseDirectory.release_directory = str(
tmpdir.mkdir(release_dir).realpath()
)
plugin_config.CourseDirectory.assignment_id = "assign_1"
os.makedirs(
os.path.join(plugin_config.CourseDirectory.release_directory, "assign_1"),
exist_ok=True,
)
copyfile(
notebook1_filename,
os.path.join(
plugin_config.CourseDirectory.release_directory, "assign_1", "release.ipynb"
),
)
with open(
os.path.join(
plugin_config.CourseDirectory.release_directory, "assign_1", "timestamp.txt"
),
"w",
) as fp:
fp.write("2020-01-01 00:00:00.0 UTC")
plugin = ExchangeReleaseAssignment(
coursedir=CourseDirectory(config=plugin_config), config=plugin_config
)
def api_request(*args, **kwargs):
assert args[0] == (f"assignment?course_id=no_course" f"&assignment_id=assign_1")
assert kwargs.get("method").lower() == "post"
assert kwargs.get("data").get("notebooks") == ["release"]
assert "assignment" in kwargs.get("files")
assert "assignment.tar.gz" == kwargs.get("files").get("assignment")[0]
assert len(kwargs.get("files").get("assignment")[1]) > 0
return type(
"Request",
(object,),
{"status_code": 200, "json": (lambda: {"success": True})},
)
with patch.object(Exchange, "api_request", side_effect=api_request):
plugin.start()
print(f"plugin.src_path: {plugin.src_path}")
assert re.search(
r"test_release_assignment_normal0/release_test/./assign_1$", plugin.src_path
)
@pytest.mark.gen_test
def test_release_assignment_several_normal(plugin_config, tmpdir):
plugin_config.CourseDirectory.root = "/"
plugin_config.CourseDirectory.release_directory = str(
tmpdir.mkdir(release_dir).realpath()
)
plugin_config.CourseDirectory.assignment_id = "assign_1"
os.makedirs(
os.path.join(plugin_config.CourseDirectory.release_directory, "assign_1"),
exist_ok=True,
)
copyfile(
notebook1_filename,
os.path.join(
plugin_config.CourseDirectory.release_directory,
"assign_1",
"release1.ipynb",
),
)
with open(
os.path.join(
plugin_config.CourseDirectory.release_directory, "assign_1", "timestamp.txt"
),
"w",
) as fp:
fp.write("2020-01-01 00:00:00.0 UTC")
copyfile(
notebook1_filename,
os.path.join(
plugin_config.CourseDirectory.release_directory,
"assign_1",
"release1.ipynb",
),
)
copyfile(
notebook2_filename,
os.path.join(
plugin_config.CourseDirectory.release_directory,
"assign_1",
"release2.ipynb",
),
)
plugin = ExchangeReleaseAssignment(
coursedir=CourseDirectory(config=plugin_config), config=plugin_config
)
def api_request(*args, **kwargs):
assert args[0] == (f"assignment?course_id=no_course" f"&assignment_id=assign_1")
assert kwargs.get("method").lower() == "post"
assert kwargs.get("data").get("notebooks") == ["release1", "release2"]
assert "assignment" in kwargs.get("files")
assert "assignment.tar.gz" == kwargs.get("files").get("assignment")[0]
assert len(kwargs.get("files").get("assignment")[1]) > 0
return type(
"Request",
(object,),
{"status_code": 200, "json": (lambda: {"success": True})},
)
with patch.object(Exchange, "api_request", side_effect=api_request):
plugin.start()
@pytest.mark.gen_test
def test_release_assignment_fail(plugin_config, tmpdir):
plugin_config.CourseDirectory.root = "/"
plugin_config.CourseDirectory.release_directory = str(
tmpdir.mkdir(release_dir).realpath()
)
plugin_config.CourseDirectory.assignment_id = "assign_1"
os.makedirs(
os.path.join(plugin_config.CourseDirectory.release_directory, "assign_1"),
exist_ok=True,
)
copyfile(
notebook1_filename,
os.path.join(
plugin_config.CourseDirectory.release_directory,
"assign_1",
"feedback.ipynb",
),
)
with open(
os.path.join(
plugin_config.CourseDirectory.release_directory, "assign_1", "timestamp.txt"
),
"w",
) as fp:
fp.write("2020-01-01 00:00:00.0 UTC")
plugin = ExchangeReleaseAssignment(
coursedir=CourseDirectory(config=plugin_config), config=plugin_config
)
def api_request(*args, **kwargs):
return type(
"Request",
(object,),
{
"status_code": 200,
"json": (lambda: {"success": False, "note": "failure note"}),
},
)
with patch.object(Exchange, "api_request", side_effect=api_request):
with pytest.raises(ExchangeError) as e_info:
plugin.start()
assert str(e_info.value) == "failure note"
@pytest.mark.gen_test
def test_release_oversize_blocked(plugin_config, tmpdir):
plugin_config.CourseDirectory.root = "/"
plugin_config.CourseDirectory.release_directory = str(
tmpdir.mkdir(release_dir).realpath()
)
plugin_config.CourseDirectory.assignment_id = "assign_1"
os.makedirs(
os.path.join(plugin_config.CourseDirectory.release_directory, "assign_1"),
exist_ok=True,
)
copyfile(
notebook1_filename,
os.path.join(
plugin_config.CourseDirectory.release_directory, "assign_1", "release.ipynb"
),
)
with open(
os.path.join(
plugin_config.CourseDirectory.release_directory, "assign_1", "timestamp.txt"
),
"w",
) as fp:
fp.write("2020-01-01 00:00:00.0 UTC")
plugin = ExchangeReleaseAssignment(
coursedir=CourseDirectory(config=plugin_config), config=plugin_config
)
# Set the max-buffer-size to 50 bytes
plugin.max_buffer_size = 50
def api_request(*args, **kwargs):
assert args[0] == (f"assignment?course_id=no_course" f"&assignment_id=assign_1")
assert kwargs.get("method").lower() == "post"
assert kwargs.get("data").get("notebooks") == ["release"]
assert "assignment" in kwargs.get("files")
assert "assignment.tar.gz" == kwargs.get("files").get("assignment")[0]
assert len(kwargs.get("files").get("assignment")[1]) > 0
return type(
"Request",
(object,),
{"status_code": 200, "json": (lambda: {"success": True})},
)
with patch.object(Exchange, "api_request", side_effect=api_request):
with pytest.raises(ExchangeError) as e_info:
plugin.start()
assert (
str(e_info.value)
== "Assignment assign_1 not released. The contents of your assignment are too large:\nYou may have data files, temporary files, and/or working files that should not be included - try deleting them."
)
```
#### File: nbexchange/tests/utils.py
```python
import asyncio
import base64
import io
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from urllib.parse import urljoin
import pytest
import requests
from nbexchange.models.actions import Action
from nbexchange.models.assignments import Assignment as AssignmentModel
from nbexchange.models.courses import Course
from nbexchange.models.feedback import Feedback
from nbexchange.models.notebooks import Notebook
from nbexchange.models.subscriptions import Subscription
from nbexchange.models.users import User
user_kiz = {"name": "1-kiz"}
user_bert = {"name": "1-bert"}
user_kiz_instructor = {
"name": "1-kiz",
"course_id": "course_2",
"course_role": "Instructor",
"course_title": "A title",
}
user_kiz_student = {
"name": "1-kiz",
"course_id": "course_2",
"course_role": "Student",
"course_title": "A title",
}
user_zik_student = {
"name": "1-zik",
"course_id": "course_2",
"course_role": "Student",
"course_title": "A title",
"full_name": "<NAME>",
}
user_brobbere_instructor = {
"name": "1-brobbere",
"course_id": "course_2",
"course_role": "Instructor",
"course_title": "A title",
}
user_brobbere_student = {
"name": "1-brobbere",
"course_id": "course_2",
"course_role": "Student",
}
user_lkihlman_instructor = {
"name": "1-lkihlman",
"course_id": "course_1",
"course_role": "Instructor",
"course_title": "A title",
}
user_lkihlman_student = {
"name": "1-lkihlman",
"course_id": "course_1",
"course_role": "Student",
}
def tar_source(filename):
import tarfile
tar_file = io.BytesIO()
with tarfile.open(fileobj=tar_file, mode="w:gz") as tar_handle:
tar_handle.add(filename, arcname=".")
tar_file.seek(0)
return tar_file.read()
def api_request(self, url, method="GET", *args, **kwargs):
headers = {}
if method == "GET":
get_req = partial(requests.get, url, headers=headers)
return get_req(*args, **kwargs)
elif method == "POST":
post_req = partial(requests.post, url, headers=headers)
return post_req(*args, **kwargs)
elif method == "DELETE":
delete_req = partial(requests.delete, url, headers=headers)
return delete_req(*args, **kwargs)
else:
raise NotImplementedError(f"HTTP Method {method} is not implemented")
def get_files_dict(filename):
import tarfile
tar_file = io.BytesIO()
with tarfile.open(fileobj=tar_file, mode="w:gz") as tar_handle:
tar_handle.add(filename, arcname=".")
tar_file.seek(0)
tar_file = tar_file.read()
files = {"assignment": ("assignment.tar.gz", tar_file)}
return files
def get_feedback_dict(filename):
with open(filename) as feedback_file:
files = {"feedback": ("feedback.html", feedback_file.read())}
return files
def get_feedback_file(filename):
with open(filename, "rb") as feedback_file:
files = base64.b64encode(feedback_file.read())
return files
class _AsyncRequests:
"""Wrapper around requests to return a Future from request methods
A single thread is allocated to avoid blocking the IOLoop thread.
"""
def __init__(self):
self.executor = ThreadPoolExecutor(1)
real_submit = self.executor.submit
self.executor.submit = lambda *args, **kwargs: asyncio.wrap_future(
real_submit(*args, **kwargs)
)
def __getattr__(self, name):
requests_method = getattr(requests, name)
return lambda *args, **kwargs: self.executor.submit(
requests_method, *args, **kwargs
)
# async_requests.get = requests.get returning a Future, etc.
async_requests = _AsyncRequests()
class AsyncSession(requests.Session):
"""requests.Session object that runs in the background thread"""
def request(self, *args, **kwargs):
return async_requests.executor.submit(super().request, *args, **kwargs)
# fixture to clear the database completely
@pytest.fixture
def clear_database(db):
"""Clears the database.
requires the db handler
"""
db.query(Action).delete()
db.query(AssignmentModel).delete()
db.query(Course).delete()
db.query(Feedback).delete()
db.query(Notebook).delete()
db.query(Subscription).delete()
db.query(User).delete()
``` |
{
"source": "JGX020/picture-search-talk",
"score": 2
} |
#### File: JGX020/picture-search-talk/app.py
```python
import sqlite3
import os
import uuid
import histsimilar
import client
import time
import datetime
import random
import textreplace
import json
import dygeneratehtml
import geo
from flask import Flask, render_template,session, g,abort,flash, request, redirect, url_for, \
send_from_directory,jsonify
app = Flask(__name__)
UPLOAD_FOLDER = 'uploads/'
UPLOADFILE_FOLDER='uploadsfile'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif','mp4'])
app.config['UPLOAD_FOLDER'] = UPLOADFILE_FOLDER
app.config['UPLOADFILE_FOLDER'] = UPLOAD_FOLDER
app.config['STATIC_FOLDER'] = UPLOAD_FOLDER
path = r'testpic/TEST%d/%d.JPG'
DATABASE = 'sqldb.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = '<PASSWORD>'
rootdir = 'F:\flask-file-upload-example-master\uploads'
filename = "database/test.txt"
filename1 = "database/test1.txt"
filename2 = "database/test2.txt"
f1 = open("database/test1.txt")
fp=open('database/testtopic1.txt','w')
lii=histsimilar.data(filename)
#dict=[{'imgurl':'../uploads/6f7088b4.jpg','title':'1aaa'},{'imgurl':'../uploads/7bf7191e.jpg','title':'2bbb'},{'imgurl':'../uploads/4b721df0.jpg','title':'s9999'}]
app.config.from_object(__name__)
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def get_connection():
db = getattr(g, '_db', None)
if db is None:
db = g._db = connect_db()
return db
def get_conn(path):
conn = sqlite3.connect(path)
if os.path.exists(path) and os.path.isfile(path):
print('s:[{}]'.format(path))
return conn
else:
conn = None
print('n:[:memory:]')
return sqlite3.connect(':memory:')
def get_cursor(conn):
if conn is not None:
return conn.cursor()
else:
return get_conn('').cursor()
def drop_table(conn, table):
if table is not None and table != '':
sql = 'DROP TABLE IF EXISTS ' + table
if SHOW_SQL:
print('sql:[{}]'.format(sql))
cu = get_cursor(conn)
cu.execute(sql)
conn.commit()
print('[{}]!'.format(table))
close_all(conn, cu)
else:
print('the [{}] is empty or equal None!'.format(sql))
def create_table(conn, sql):
if sql is not None and sql != '':
cu = get_cursor(conn)
if SHOW_SQL:
print('sql:[{}]'.format(sql))
cu.execute(sql)
conn.commit()
print('[student]!')
close_all(conn, cu)
else:
print('the [{}] is empty or equal None!'.format(sql))
def close_all(conn, cu):
try:
if cu is not None:
cu.close()
finally:
if cu is not None:
cu.close()
def save(conn, sql, data):
if sql is not None and sql != '':
if data is not None:
cu = get_cursor(conn)
for d in data:
if SHOW_SQL:
print('sql:[{}],s:[{}]'.format(sql, d))
cu.execute(sql, d)
conn.commit()
close_all(conn, cu)
else:
print('the [{}] is empty or equal None!'.format(sql))
def fetchall(conn, sql):
if sql is not None and sql != '':
cu = get_cursor(conn)
if SHOW_SQL:
print('sql:[{}]'.format(sql))
cu.execute(sql)
r=[dict(id=row[0],title=row[1],url=row[2],address=row[3])for row in cu.fetchall()]
if len(r) > 0:
return r
#for e in range(len(r)):
# print(r[e])
else:
print('the [{}] is empty or equal None!'.format(sql))
def fetchone(conn, sql, data):
if sql is not None and sql != '':
if data is not None:
#Do this instead
d = (data,)
cu = get_cursor(conn)
if SHOW_SQL:
print('tsql:[{}],c:[{}]'.format(sql, data))
cu.execute(sql, d)
r = cu.fetchall()
if len(r) > 0:
for e in range(len(r)):
print(r[e])
else:
print('the [{}] equal None!'.format(data))
else:
print('the [{}] is empty or equal None!'.format(sql))
def update(conn, sql, data):
if sql is not None and sql != '':
if data is not None:
cu = get_cursor(conn)
for d in data:
if SHOW_SQL:
print('tsql:[{}],c:[{}]'.format(sql, d))
cu.execute(sql, d)
conn.commit()
close_all(conn, cu)
else:
print('the [{}] is empty or equal None!'.format(sql))
def delete(conn, sql, data):
if sql is not None and sql != '':
if data is not None:
cu = get_cursor(conn)
for d in data:
if SHOW_SQL:
print('tsql:[{}],c:[{}]'.format(sql, d))
cu.execute(sql, d)
conn.commit()
close_all(conn, cu)
else:
print('the [{}] is empty or equal None!'.format(sql))
def drop_table_test():
print('ss...')
conn = get_conn(DB_FILE_PATH)
drop_table(conn, TABLE_NAME)
def create_table_test():
print('cs...')
create_table_sql = '''CREATE TABLE `pic` (
`id` int(11) NOT NULL,
`title` varchar(20) NOT NULL,
`url` varchar(4) DEFAULT NULL,
`address` int(11) DEFAULT NULL,
PRIMARY KEY (`id`)
)'''
conn = get_conn(DB_FILE_PATH)
create_table(conn, create_table_sql)
def save_test():
print('bs...')
save_sql = '''INSERT INTO pic values (?, ?, ?, ?, ?, ?)'''
data = [(1, 'Hongten', '../uploads/6f7088b4.jpg', ''),
(2, 'Tom', '../uploads/6f7088b4.jpg', ''),
(3, 'Jake', '../uploads/6f7088b4.jpg', ''),
(4, 'Cate', '../uploads/6f7088b4.jpg', '')]
conn = get_conn(DB_FILE_PATH)
save(conn, save_sql, data)
def fetchall_test():
print('sd...')
fetchall_sql = '''SELECT * FROM pic'''
conn = get_conn(DB_FILE_PATH)
fetchall(conn, fetchall_sql)
def fetchone_test():
print('select a data from database...')
fetchone_sql = 'SELECT * FROM student WHERE ID = ? '
data = 1
conn = get_conn(DB_FILE_PATH)
fetchone(conn, fetchone_sql, data)
def update_test():
print('update data...')
update_sql = 'UPDATE student SET name = ? WHERE ID = ? '
data = [('HongtenAA', 1),
('HongtenBB', 2),
('HongtenCC', 3),
('HongtenDD', 4)]
conn = get_conn(DB_FILE_PATH)
update(conn, update_sql, data)
def delete_test():
print('delete data...')
delete_sql = 'DELETE FROM student WHERE NAME = ? AND ID = ? '
data = [('HongtenAA', 1),
('HongtenCC', 3)]
conn = get_conn(DB_FILE_PATH)
delete(conn, delete_sql, data)
def init():
global DB_FILE_PATH
DB_FILE_PATH = 'F:\sqlite-shell-win32-x86-3090200\\sqldb.db'
global TABLE_NAME
TABLE_NAME = 'student'
global SHOW_SQL
SHOW_SQL = True
print('show_sql : {}'.format(SHOW_SQL))
drop_table_test()
create_table_test()
save_test()
def main():
init()
#delete_test()
#fetchall_test()
fetchall_test()
print('#' * 50)
fetchone_test()
print('#' * 50)
update_test()
fetchall_test()
print('#' * 50)
#app = Flask(__name__)
# delete_test()
# fetchall_test()
def connect_db():
return sqlite3.connect('F:\sqlite-shell-win32-x86-3090200\\sqldb.db')
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def generate_unique_filename(filename):
return str(uuid.uuid4())[:8] + '.' + filename.rsplit('.', 1)[1]
def similar():
#path = r'testpic/TEST%d/%d.JPG'
for i in xrange(1, 10):
print 'test_case_%d: %.3f%%'%(i, \
histsimilar.calc_similar_by_path('testpic/TEST%d/%d.JPG'%(i, 1), 'testpic/TEST%d/%d.JPG'%(i, 2))*100)
def list():
for root,dirs,files in os.walk(r'F:\\flask-file-upload-example-master\\uploads'):
for file in files:
print root + os.sep + file
def equals(dict):
list="[{'imgurl':'"+dict[1]['imgurl']+"','title':'"+dict[1]['title']+"','name':'"+dict[1]['name']+"'}]"
return eval(list)
def data(f1):
#f = open("database/test.txt")
dict=f1.read()
#f.close()
return eval(dict)
@app.route('/', methods=['GET', 'POST'])
def index():
# histsimilar.data()
#dict=data(f1)
#histsimilar.list2('F:\\flask-file-upload-example-master\\uploads',histsimilar.data(filename))
#similar()
#dict=[{'imgurl':'../uploads/6f7088b4.jpg','title':'1aaa'},{'imgurl':'../uploads/7bf7191e.jpg','title':'2bbb'},{'imgurl':'../uploads/4b721df0.jpg','title':'s9999'}]
#list()
#init()
#delete_test()
#fetchall_test()
#r=fetchall_test()
ftext=open('iptest.txt').read()
histsimilar.write('iptest.txt',ftext+','+request.remote_addr)
if request.method == 'POST':
file = request.files['file']
#print lii.append({'imgurl':'','title':'','name':'12.jpg'})
if file and allowed_file(file.filename):
file.save(os.path.join(app.config['UPLOAD_FOLDER'], "a.jpg"))
return render_template('index.html',dict=histsimilar.list2('F:\\flask-file-upload-example-master\\uploads'))
return render_template('index.html',dict=histsimilar.data(filename),user_ip = request.remote_addr)
@app.route('/loginup', methods=['GET', 'POST'])
def loginup():
if request.method == 'POST':
return render_template('index.html',dict=histsimilar.data(filename),username=client.selname(request.form['username'],request.form['password']),ipadd='/space?unicode='+request.form['username'])
@app.route('/regisiterup', methods=['GET', 'POST'])
def regisiterup():
if request.method == 'POST':
client.insertmes('(\''+request.form['username']+'\',\''+request.form['password']+'\',\''+request.form['email']+'\')')
return render_template('index.html',dict=histsimilar.data(filename),user_ip = request.remote_addr)
@app.route('/space', methods=['GET', 'POST'])
def space():
if request.method == 'POST':
file = request.files['file']
dygeneratehtml.replace()
#print lii.append({'imgurl':'','title':'','name':'12.jpg'})
if file and allowed_file(file.filename):
file.save(os.path.join(app.config['UPLOADFILE_FOLDER'],file.filename))
if file and allowed_file(file.filename):
client.send_file2 ('validate.txt','172.16.58.3')
if file and allowed_file(file.filename):
client.send_file ('uploadsfile/'+file.filename,'172.16.58.3')
return render_template('space.html',hrefadd='ftp://'+request.args.get('fileadd')+'/'+file.filename)
if open('temp/tmp.txt').read()=='true' and request.args.get('unicode')=='jgx020': #advanced
return render_template('space.html',hrefadd=client.seladdr('jgx020')[3:len(open('temp/tmp2.txt').read())-4])
else:
return render_template('404.html')
@app.route('/fileuploadsys')
def fileuploadsys():
return render_template('fileuploadsys.html')
@app.route('/map',methods=['POST', 'GET'])
def map():
if request.method =='POST':
if geo.position(request.form['position']) is not None:
return render_template('map.html',longs=geo.position(request.form['position']).split(':')[0],lati=geo.position(request.form['position']).split(':')[1])
else:
return render_template('404.html')
return render_template('map.html',longs='116.391248',lati='39.9059631')
@app.route('/uploadstext',methods=['POST', 'GET'])
def uploadtext():
if request.method == 'POST':
histsimilar.writecontent(request.form['content'])
if request.args.get('i') is not None:
b=int(request.args.get('i'))
if len(histsimilar.writelist())/9+1==b:
return render_template('editor.html',content1=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-9]).read(),content2=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-8]).read(),content3=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-7]).read(),content4=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-6]).read(),content5=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-5]).read(),content6=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-4]).read(),content7=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-3]).read(),content8=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-2]).read(),content9=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-1]).read())
return render_template('editor.html',content1=open('database/'+histsimilar.writelist()[b*9-9]).read(),content2=open('database/'+histsimilar.writelist()[b*9-8]).read(),content3=open('database/'+histsimilar.writelist()[b*9-7]).read(),content4=open('database/'+histsimilar.writelist()[b*9-6]).read(),content5=open('database/'+histsimilar.writelist()[b*9-5]).read(),content6=open('database/'+histsimilar.writelist()[b*9-4]).read(),content7=open('database/'+histsimilar.writelist()[b*9-3]).read(),content8=open('database/'+histsimilar.writelist()[b*9-2]).read(),content9=open('database/'+histsimilar.writelist()[b*9-1]).read())
return render_template('index.html',dict=histsimilar.data(filename))
@app.route('/jump', methods=['GET', 'POST'])
def find():
if request.method == 'POST':
file = request.files['file']
#print lii.append({'imgurl':'','title':'','name':'12.jpg'})
if file and allowed_file(file.filename):
file.save(os.path.join(app.config['UPLOAD_FOLDER'], histsimilar.filesave(time.strftime('%Y-%m-%d %H-%M-%S',time.localtime(int(time.time())))+".jpg")))
return render_template('index.html',dict=histsimilar.list2('F:\\flask-file-upload-example-master\\uploads'))
@app.route('/login',methods=['GET','POST'])
def login():
return render_template('login.html')
@app.route('/direct',methods=['POST', 'GET'])
def derictfile():
return render_template('editor.html',img_from_args=request.args.get('img'),page=request.args.get('i'),content1=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-9]).read(),content2=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-8]).read(),content3=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-7]).read(),content4=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-6]).read(),content5=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-5]).read(),content6=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-4]).read(),content7=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-3]).read(),content8=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-2]).read(),content9=open('database/'+histsimilar.writelist()[int(request.args.get('i'))*9-1]).read())
@app.route('/right',methods=['POST', 'GET'])
def rights():
return render_template('editor.html',data=json.dumps(histsimilar.addlistup(0)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down',methods=['POST', 'GET'])
def downs():
return render_template('editor.html',datad=json.dumps(histsimilar.addlistdown(0)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/right1',methods=['POST', 'GET'])
def rights1():
return render_template('editor.html',datad=json.dumps(histsimilar.addlistdown(0)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down1',methods=['POST', 'GET'])
def downs1():
return render_template('editor.html',datad1=json.dumps(histsimilar.addlistdown(1)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/right2',methods=['POST', 'GET'])
def rights2():
return render_template('editor.html',data2=json.dumps(histsimilar.addlistup(2)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down2',methods=['POST', 'GET'])
def downs2():
return render_template('editor.html',datad2=json.dumps(histsimilar.addlistdown(2)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/right3',methods=['POST', 'GET'])
def rights3():
return render_template('editor.html',data3=json.dumps(histsimilar.addlistup(3)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down3',methods=['POST', 'GET'])
def downs3():
return render_template('editor.html',datad3=json.dumps(histsimilar.addlistdown(3)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/right4',methods=['POST', 'GET'])
def rights4():
return render_template('editor.html',data4=json.dumps(histsimilar.addlistup(4)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down4',methods=['POST', 'GET'])
def downs4():
return render_template('editor.html',datad4=json.dumps(histsimilar.addlistdown(4)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/right5',methods=['POST', 'GET'])
def rights5():
return render_template('editor.html',data5=json.dumps(histsimilar.addlistup(5)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down5',methods=['POST', 'GET'])
def downs5():
return render_template('editor.html',datad5=json.dumps(histsimilar.addlistdown(5)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/right6',methods=['POST', 'GET'])
def rights6():
return render_template('editor.html',data6=json.dumps(histsimilar.addlistup(6)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down6',methods=['POST', 'GET'])
def downs6():
return render_template('editor.html',datad6=json.dumps(histsimilar.addlistdown(6)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/right7',methods=['POST', 'GET'])
def rights7():
return render_template('editor.html',data7=json.dumps(histsimilar.addlistup(7)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/down7',methods=['POST', 'GET'])
def downs7():
return render_template('editor.html',datad7=json.dumps(histsimilar.addlistdown(7)),img_from_args=request.args.get('img'),page=request.args.get('i'),content1=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-9]['text'],content2=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-8]['text'],content3=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-7]['text'],content4=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-6]['text'],content5=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-5]['text'],content6=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-4]['text'],content7=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-3]['text'],content8=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-2]['text'],content9=histsimilar.data0('database/test4.txt')[int(request.args.get('i'))*9-1]['text'])
@app.route('/register',methods=['GET','POST'])
def register():
return render_template('register.html')
@app.route('/pagejump',methods=['POST', 'GET'])
def index2():
if request.args.get('pages') is None:
return render_template('index.html',dict=histsimilar.data2(filename,request.args.get('pages')))
else:
return render_template('index.html',dict=histsimilar.data2(filename,2))
@app.route('/_add_numbersss')
def add_numbersss():
a = request.args.get('a', 0, type=int)
b = request.args.get('b', 0, type=int)
return jsonify(result=a + b)
@app.route('/_add_numbers',methods=['POST', 'GET'])
def add_numbers():
textreplace.replace(open('database/test1.txt').readlines())
return render_template('index.html',dict=histsimilar.data(filename))
if __name__ == '__main__':
app.run(host="0.0.0.0",port=80)
```
#### File: JGX020/picture-search-talk/curve.py
```python
from PIL import Image
def sketch(img, threshold):
if threshold < 0: threshold = 0
if threshold > 100: threshold = 100
width, height = img.size
img = img.convert('L') # convert to grayscale mode
pix = img.load() # get pixel matrix
for w in xrange(width):
for h in xrange(height):
if w == width-1 or h == height-1:
continue
src = pix[w, h]
dst = pix[w+1, h+1]
diff = abs(src - dst)
if diff >= threshold:
pix[w, h] = 0
else:
pix[w, h] = 255
return img
if __name__ == "__main__":
import sys, os
path = os.path.dirname(__file__) + os.sep.join(['', 'uploads', '4.jpg'])
threshold = 15
if len(sys.argv) == 2:
try:
threshold = int(sys.argv[1])
except ValueError:
path = sys.argv[1]
elif len(sys.argv) == 3:
path = sys.argv[1]
threshold = int(sys.argv[2])
img = Image.open(path)
img = sketch(img, threshold)
img.save(os.path.splitext(path)[0]+'.sketch.jpg', 'JPEG')
```
#### File: JGX020/picture-search-talk/geo.py
```python
from geopy.geocoders import Nominatim
def position(pname):
geolocator = Nominatim()
location = geolocator.geocode(pname)
if location is None:
return None
print((location.latitude,location.longitude))
strs=str(location.longitude)+':'+str(location.latitude)
return strs
```
#### File: JGX020/picture-search-talk/histsimilar.py
```python
import Image
import os
import time
import datetime
from decimal import *
fp1=open('database/test3.txt','w')
fp=open('database/test1.txt','w')
up=eval(open('database/test4.txt').read())
down=eval(open('database/test4.txt').read())
fp.truncate()
fp1.truncate()
def make_regalur_image(img, size = (256, 256)):
return img.resize(size).convert('RGB')
def split_image(img, part_size = (64, 64)):
w, h = img.size
pw, ph = part_size
assert w % pw == h % ph == 0
return [img.crop((i, j, i+pw, j+ph)).copy() \
for i in xrange(0, w, pw) \
for j in xrange(0, h, ph)]
def hist_similar(lh, rh):
assert len(lh) == len(rh)
return sum(1 - (0 if l == r else float(abs(l - r))/max(l, r)) for l, r in zip(lh, rh))/len(lh)
def calc_similar(li, ri):
# return hist_similar(li.histogram(), ri.histogram())
return sum(hist_similar(l.histogram(), r.histogram()) for l, r in zip(split_image(li), split_image(ri))) / 16.0
def calc_similar_by_path(lf, rf):
li, ri = make_regalur_image(Image.open(lf)), make_regalur_image(Image.open(rf))
return calc_similar(li, ri)
def make_doc_data(lf, rf):
li, ri = make_regalur_image(Image.open(lf)), make_regalur_image(Image.open(rf))
li.save(lf + '_regalur.png')
ri.save(rf + '_regalur.png')
fd = open('stat.csv', 'w')
fd.write('\n'.join(l + ',' + r for l, r in zip(map(str, li.histogram()), map(str, ri.histogram()))))
# print >>fd, '\n'
# fd.write(','.join(map(str, ri.histogram())))
fd.close()
import ImageDraw
li = li.convert('RGB')
draw = ImageDraw.Draw(li)
for i in xrange(0, 256, 64):
draw.line((0, i, 256, i), fill = '#ff0000')
draw.line((i, 0, i, 256), fill = '#ff0000')
li.save(lf + '_lines.png')
def list(filename):
for root,dirs,files in os.walk(filename):
for file in files:
print 'test_case_: %.3f%%'%( \
calc_similar_by_path('uploads/1.jpg', 'uploads'+os.sep + file)*100)
#print 'uploads'+os.sep + file
def dicts(dicts):
list="{'imgurl':'"+dicts['imgurl']+"','title':'"+dicts['title']+"','name':'"+dicts['name']+"'}"
return eval(list)
def clean(file):
file.truncate()
def list2(file):
list=[]
for dicts in data0('database/test.txt'):
r=calc_similar_by_path('uploads/'+open('database/temp.txt').read(), 'uploads'+os.sep + dicts['name'])*100
# list="[{'imgurl':'"+dict[1]['imgurl']+"','title':'"+dict[1]['title']+"','name':'"+dict[1]['name']+"'}]"
# b=20.000
#print '%.3f%%'(calc_similar_by_path('uploads/1.JPG', 'uploads'+os.sep + dict[0]['name']))
if Decimal(r)>=Decimal(50.000):
list.append(dicts)
print list
fp=open('database/test1.txt','w')
fp1=open('database/test3.txt','w')
if len(list)>9:
fp1.write(str(list))
if len(list)<=9:
fp.write(str(list))
return list[0:9]
def data0(filename):
return eval(open(filename).read())
def filesave(filetxt):
open('database/temp.txt','w').write(filetxt)
print filetxt
return filetxt
def data(filename):
#f = open(filename)
dict = open(filename).read()
#f.close()
lines=eval(dict)
if len(lines)>=9:
return lines[0:9]
else:
return lines[0:len(lines)]
def writecontent(content):
filesfile=open('database/test5.txt')
filestr=filesfile.read()
files=filestr.split(',')
b =len(files)
for file in files:
b=b-1
if len(open('database/'+file).read())==0:
open('database/'+file,'w').write(content)
if b>0:
break
open('database/'+str(time.time())+'.txt','w') #
open('database/test5.txt','a').write(','+str(time.time())+'.txt')
filesfile.close()
def writecontent1(content,i):
open('database/testtopic'+(i*9-7)+'.txt','w').write(content)
def writecontent2(content):
open('database/testtopic'+(i*9-6)+'.txt','w').write(content)
def writecontent3(content):
open('database/testtopic'+(i*9-5)+'.txt','w').write(content)
def writecontent4(content):
open('database/testtopic'+(i*9-4)+'.txt','w').write(content)
def writecontent5(content):
open('database/testtopic'+(i*9-3)+'.txt','w').write(content)
def writecontent6(content):
open('database/testtopic'+(i*9-2)+'.txt','w').write(content)
def writecontent7(content):
open('database/testtopic'+(i*9-1)+'.txt','w').write(content)
def writecontent8(content):
open('database/testtopic'+(i*9)+'.txt','w').write(content)
def addlistup(i):
up[i]['up']=int(up[i]['up'])+1
if up[i]['up']>up[i]['down']:
up[i]['value']='1'
else:
up[i]['value']='0'
write('database/test4.txt',str(up))
return up[i]['up']
def addlistdown(i):
down[i]['down']=int(down[i]['down'])+1
if down[i]['up']>down[i]['down']:
down[i]['value']='1'
else:
down[i]['value']='0'
write('database/test4.txt',str(down))
return down[i]['down']
def writelist():
filesfile1=open('database/test5.txt')
filestr1=filesfile1.read()
file1=filestr1.split(',')
filesfile1.close()
return file1
def writecontentcount(content,i):
if len(open('database/testtopic'+int(i*9-8)+'.txt').read()) == 0: #
writecontent(content,i)
else:
if len(open('database/testtopic'+(int(i)*9-7)+'.txt').read()) == 0:
writecontent1(content,i)
else:
if len(open('database/testtopic'+(int(i)*9-6)+'.txt').read()) == 0:
writecontent2(content,i)
else:
if len(open('database/testtopic'+(int(i)*9-5)+'.txt').read()) == 0:
writecontent3(content,i)
else:
if len(open('database/testtopic'+(int(i)*9-4)+'.txt').read()) == 0:
writecontent4(content,i)
else:
if len(open('database/testtopic'+(int(i)*9-3)+'.txt').read()) == 0:
writecontent5(content,i)
else:
if len(open('database/testtopic'+(int(i)*9-2)+'.txt').read()) == 0:writecontent6(content,i)
else:
if len(open('database/testtopic'+(int(i)*9-1)+'.txt').read())==0:writecontent7(content,i)
else:
if len(open('database/testtopic'+(int(i)*9)+'.txt').read())==0:writecontent8(content,i)
def data2(filename,i):
#f = open(filename)
dict = open(filename).read()
dict1=open("database/test3.txt").read()
dict3=open("database/test1.txt").read()
#f.close()
lines=eval(dict)
t=9*int(i)
t2=int(t)-9
if dict1!="" and len(eval(dict1))>t:
return eval(dict1)[t2:t]
if dict1!="" and len(eval(dict1))<=t:
return eval(dict1)[t2:len(eval(dict1))]
if dict3!="" and len(eval(dict3))>t:
return eval(dict3)[t2:t]
if dict3!="" and len(eval(dict3))<=t:
return lines[t2:len(eval(dict3))]
if len(lines)>t:
return lines[t2:t]
if len(lines)<=t:
return lines[t2:len(lines)]
def write(filename,txt):
file_object = open(filename, 'w')
file_object.write(txt)
file_object.close( )
if __name__ == '__main__':
path = r'testpic/TEST%d/%d.jpg'
for i in xrange(1, 10):
print 'test_case_%d: %.3f%%'%(i, \
calc_similar_by_path('testpic/TEST%d/%d.jpg'%(i, 1), 'testpic/TEST%d/%d.jpg'%(i, 2))*100)
# make_doc_data('test/TEST4/1.JPG', 'test/TEST4/2.JPG')
``` |
{
"source": "JGX020/python-hadoop-web-restful-api",
"score": 3
} |
#### File: python-hadoop-web-restful-api/mysqlautosave/mysqlautosave.py
```python
import os
def exportfrommysql(ip,password,database,savefile):
os.system('mysqldump -h'+ip+' -uroot -p'+password+' -d '+database+'>/root/'+savefile)
def importfrommysql(ip,password,database,savefile):
os.system('mysql -h'+ip+' -uroot -p'+password+' -d '+database+'</root/'+savefile)
``` |
{
"source": "jgyllinsky/How-to-Learn-from-Little-Data",
"score": 3
} |
#### File: MANN/Utils/Images.py
```python
import tensorflow as tf
import numpy as np
import scipy.misc
import matplotlib.pyplot as plt
import os
import random
from scipy.ndimage import rotate,shift
from scipy.misc import imread,imresize
def get_shuffled_images(paths, labels, nb_samples=None):
if nb_samples is not None:
sampler = lambda x: random.sample(x, nb_samples)
else:
sampler = lambda x:x
images = [(i, os.path.join(path, image)) for i,path in zip(labels,paths) for image in sampler(os.listdir(path)) ]
random.shuffle(images)
return images
def time_offset_label(labels_and_images):
labels, images = zip(*labels_and_images)
time_offset_labels = (None,) + labels[:-1]
return zip(images, time_offset_labels)
def load_transform(image_path, angle=0., s=(0,0), size=(20,20)):
#Load the image
original = imread(image_path, flatten=True)
#Rotate the image
rotated = np.maximum(np.minimum(rotate(original, angle=angle, cval=1.), 1.), 0.)
#Shift the image
shifted = shift(rotated, shift=s)
#Resize the image
resized = np.asarray(imresize(rotated, size=size), dtype=np.float32) / 255 #Note here we coded manually as np.float32, it should be tf.float32
#Invert the image
inverted = 1. - resized
max_value = np.max(inverted)
if max_value > 0:
inverted /= max_value
return inverted
```
#### File: MANN/Utils/similarities.py
```python
import tensorflow as tf
def cosine_similarity(x, y, eps=1e-6):
z = tf.batch_matmul(x, tf.transpose(y, perm=[0,2,1]))
z /= tf.sqrt(tf.multiply(tf.expand_dims(tf.reduce_sum(tf.multiply(x,x), 2), 2),tf.expand_dims(tf.reduce_sum(tf.multiply(y,y), 2), 1)) + eps)
return z
``` |
{
"source": "jg-you/cascading_detection",
"score": 2
} |
#### File: cdtools/LCA/detection.py
```python
import subprocess
import locale
import yaml
import logging
from os import path, remove
from ..utilities import DeleteCommas, drange
from ..generic import PurgeClusters
def Detect(edgeListPath, tmpDirectory, minSize,
nwSize=0, uniqueId="tmp", verbose=False):
'''
Interface of the LCA algorithm.
"nwSize" is useless but kept for polymorphism purposes.
'''
log = logging.getLogger("cascading_logger")
# LCA configurations.
configurationFile = path.join(path.dirname(__file__), "LCA_conf.yml")
conf = yaml.load(open(configurationFile).read())
T_increment = conf['threshold_increments']
min_T = conf['min_threshold']
max_T = conf['max_threshold']
ignoreSingleton = conf["ignore_singleton"]
# Compute Jaccard coefficients.
CalcJaccards(edgeListPath, tmpDirectory, uniqueId)
# Identify maximal density.
optimal_T = min_T
optimal_D = 0
optimalClustersPath = ""
discardList = []
for T in drange(min_T, max_T, T_increment):
D, clustersPath = Cluster(T, edgeListPath, tmpDirectory,
uniqueId, ignoreSingleton)
if D > optimal_D:
optimal_D = D
optimal_T = T
discardList.append(optimalClustersPath)
optimalClustersPath = clustersPath
else:
discardList.append(clustersPath)
# Determine if further detection should be applied:
log.info("optimal_T=" + str(optimal_T) + " optimal_D=" + str(optimal_D))
if optimal_T == min_T and optimal_D < 1:
log.info("LCA.Detect(): Too sparse. Rejecting.")
tooSparse = True
discardList.append(optimalClustersPath)
else:
log.info("LCA.Detect(): Dense enough. Proceeding.")
tooSparse = False
DeleteCommas(optimalClustersPath)
PurgeClusters(optimalClustersPath, minSize)
# Cleanup: remove unused files (more portable than writing to /dev/null).
discardList.append(path.join(tmpDirectory, uniqueId + ".stats"))
discardList.append(path.join(tmpDirectory, uniqueId + ".jaccs"))
Cleanup(discardList)
return optimalClustersPath, tooSparse
def CalcJaccards(edgeListPath, tmpDirectory, uniqueId):
''' Comupte jaccards coefficients using a C++ subroutine. '''
log = logging.getLogger("cascading_logger")
try:
absPath = path.dirname(path.abspath(__file__))
calcJaccCall = [path.join(absPath, "bin", "LCA_calc"),
edgeListPath,
path.join(tmpDirectory, uniqueId + ".jaccs")]
subprocess.check_call(calcJaccCall)
except subprocess.CalledProcessError:
log.error("Unexpected error with ", calcJaccCall)
def Cluster(T, edgeListPath, tmpDirectory, uniqueId, ignoreSingleton):
''' Cluster edges that are more similar than a threshold value together.'''
log = logging.getLogger("cascading_logger")
T_str = str(T).replace(".", "_") # Underscores are safer than "_".
fileJacc = path.join(tmpDirectory, uniqueId + ".jaccs")
fileClus = path.join(tmpDirectory, T_str + uniqueId + ".dat")
fileStat = path.join(tmpDirectory, uniqueId + ".stats")
# Cluster edges call
absPath = path.dirname(path.abspath(__file__))
clusterCall = [path.join(absPath, "bin", "LCA_cluster"),
edgeListPath, fileJacc, fileClus, fileStat, str(T)]
try:
# Execute clustering function
proc = subprocess.Popen(clusterCall, stdout=subprocess.PIPE)
# Fetch density from stdout (WILL break on LCA_cluster API change)
#
# NOTE: This fragile API forces one to read and strip a specific line
# of the stdout. The density of the partition is found on line 2,
# formatted as:
# D = FLOAT_VALUE
# whereas it is found on line 4 if one does not consider singletons.
if ignoreSingleton:
D_lineNo = 4
else:
D_lineNo = 2
proc_out, proc_err = proc.communicate()
encodingLocale = None
try:
encodingLocale = locale.getdefaultlocale()[1]
except:
pass
if encodingLocale is None:
encodingLocale = 'UTF-8'
tmp = proc_out.decode(encodingLocale).split("\n")
current_D = float(tmp[D_lineNo].replace(" ", "").replace("D=", ""))
except subprocess.CalledProcessError:
log.error("Unexpected error with ", clusterCall)
return current_D, fileClus
def Cleanup(discardList):
''' Cleanup temporary files after the detection algorithm is done.'''
for f in discardList:
try:
remove(f)
except:
pass
```
#### File: jg-you/cascading_detection/configure.py
```python
import logging
import yaml
from os import chdir, listdir, mkdir, path, remove
from shutil import move
import subprocess
import argparse
def main():
absBase = path.dirname(path.abspath(__file__))
# Options parser.
prs = argparse.ArgumentParser(description='Setup directories and compile\
subroutines for the cascading\
detection meta-algorithm.')
prs.add_argument('-c', '--configuration', type=str, dest='confPath',
default=path.join(absBase, "conf/configuration.yml"),
help='Relative path of the global configuration file.')
prs.add_argument('-n', '--no_compile', action='store_true',
help='Do not compile subroutines.')
args = prs.parse_args()
# Load the script configuration.
conf = yaml.load(open(args.confPath).read())
# Initialize logging.
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] %(asctime)s : %(message)s',
datefmt='%d-%m-%y %H:%M:%S')
log = logging.getLogger("setup")
# Create temporary and result.
log.info("Setup the temporary directory: " + conf['temporary_dir'] + ".")
try:
mkdir(conf['temporary_dir'], 0o755)
log.info(conf['temporary_dir'] + " created with 755 privilege.")
except OSError:
log.info(conf['temporary_dir'] + " already exists.")
pass
log.info("Setup the cluster directory: " + conf['result_dir'] + ".")
try:
mkdir(conf['result_dir'], 0o755)
log.info(conf['result_dir'] + " created with 755 privilege.")
except OSError:
log.info(conf['result_dir'] + " already exists.")
pass
# Compile cpp subroutines.
if not args.no_compile:
# GCE
bpth = path.join(absBase, "cdtools", "GCE", "src", "build")
try:
compilerCall = ["make"]
log.info("Compiling: " + str(compilerCall) + "(GCE)")
log.info("Entering " + bpth)
chdir(bpth)
returnVal = subprocess.check_call(compilerCall)
move(path.join(bpth, "GCECommunityFinder"),
path.join(absBase, "cdtools", "GCE", "bin", "GCE"))
log.info("Exit value: " + str(returnVal))
except subprocess.CalledProcessError as exc:
log.error("Compiler error!")
log.error(exc)
pass
finally:
log.info("Cleaning up.")
dellist = [path.join(bpth, x) for x in listdir(bpth)]
dellist.remove(path.join(bpth, "makefile"))
for f in dellist:
remove(f)
log.info("Entering " + absBase)
chdir(absBase)
# LCA
bpth = path.join(absBase, "cdtools", "LCA")
try:
compilerCall = [conf["cpp_compiler"],
conf["compiler_flag"],
path.join(bpth, "src/calcAndWrite_Jaccards.cpp"),
"-o", path.join(bpth, "bin/LCA_calc")]
log.info("Compiling: " + str(compilerCall))
returnVal = subprocess.check_call(compilerCall)
log.info("Exit value: " + str(returnVal))
except subprocess.CalledProcessError as exc:
log.error("Compiler error!")
log.error(exc)
pass
try:
compilerCall = [conf["cpp_compiler"],
conf["compiler_flag"],
path.join(bpth, "src/clusterJaccsFile.cpp"),
"-o", path.join(bpth, "bin/LCA_cluster")]
log.info("Compiling: " + str(compilerCall))
returnVal = subprocess.check_call(compilerCall)
log.info("Exit value: " + str(returnVal))
except subprocess.CalledProcessError as exc:
log.error("Compiler error!")
log.error(exc)
pass
if __name__ == "__main__":
main()
``` |
{
"source": "JGZL/breeze",
"score": 2
} |
#### File: breeze/callback_plugins/log_back.py
```python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import time
import json
import http.client
from collections import MutableMapping
from ansible.module_utils._text import to_bytes
from ansible.plugins.callback import CallbackBase
# NOTE: in Ansible 1.2 or later general logging is available without
# this plugin, just set ANSIBLE_LOG_PATH as an environment variable
# or log_path in the DEFAULTS section of your ansible configuration
# file. This callback is an example of per hosts logging for those
# that want it.
class CallbackModule(CallbackBase):
"""
logs playbook results, per host, to 127.0.0.1:8080
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'log_plays'
CALLBACK_NEEDS_WHITELIST = True
TIME_FORMAT = "%b %d %Y %H:%M:%S"
MSG_FORMAT = "%(now)s - %(category)s - %(data)s\n\n"
def __init__(self):
super(CallbackModule, self).__init__()
self.task = dict()
self.data = dict()
self.stage = ""
def log(self, host, category, data):
state = "processing"
if isinstance(data, MutableMapping):
if "state" in data.keys():
state = data["state"]
if "changed" in data.keys():
self.data["changed"] = data["changed"]
else:
self.data["changed"] = False
if "msg" in data.keys():
self.data["msg"] = data["msg"]
else:
self.data["msg"] = ""
now = time.strftime(self.TIME_FORMAT, time.localtime())
self.task["state"] = category
h1 = http.client.HTTPConnection('127.0.0.1:8080')
h1.request(
"POST",
"/v1/notify",
json.dumps(
dict(
time=now,
data=self.data,
task=self.task,
host=host,
state=state,
stage=self.stage
)
)
)
# V2 METHODS, by default they call v1 counterparts if possible
def v2_on_any(self, *args, **kwargs):
self.on_any(args, kwargs)
def v2_runner_on_failed(self, result, ignore_errors=False):
host = result._host.get_name()
self.log(host, 'failed', result._result)
def v2_runner_on_ok(self, result):
host = result._host.get_name()
self.log(host, 'ok', result._result)
def v2_runner_on_skipped(self, result):
host = result._host.get_name()
self.log(host, 'skipped', self._get_item_label(getattr(result._result, 'results', {})))
def v2_runner_on_unreachable(self, result):
host = result._host.get_name()
self.log(host, 'unreachable', result._result)
# FIXME: not called
def v2_runner_on_async_poll(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
# FIXME, get real clock
clock = 0
self.runner_on_async_poll(host, result._result, jid, clock)
# FIXME: not called
def v2_runner_on_async_ok(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_ok(host, result._result, jid)
# FIXME: not called
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_failed(host, result._result, jid)
def v2_playbook_on_start(self, playbook):
name, _ = os.path.basename(os.path.dirname(playbook._basedir)).split('-')
self.stage = name
def v2_playbook_on_notify(self, handler, host):
self.playbook_on_notify(host, handler)
def v2_playbook_on_no_hosts_matched(self):
self.playbook_on_no_hosts_matched()
def v2_playbook_on_no_hosts_remaining(self):
self.playbook_on_no_hosts_remaining()
def v2_playbook_on_task_start(self, task, is_conditional):
self.task['name'] = task.name
self.log("all", "starting", dict())
# FIXME: not called
def v2_playbook_on_cleanup_task_start(self, task):
pass # no v1 correspondence
def v2_playbook_on_handler_task_start(self, task):
pass # no v1 correspondence
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default)
# FIXME: not called
def v2_playbook_on_import_for_host(self, result, imported_file):
host = result._host.get_name()
self.playbook_on_import_for_host(host, imported_file)
# FIXME: not called
def v2_playbook_on_not_import_for_host(self, result, missing_file):
host = result._host.get_name()
self.playbook_on_not_import_for_host(host, missing_file)
def v2_playbook_on_play_start(self, play):
self.playbook_on_play_start(play.name)
def v2_playbook_on_stats(self, stats):
state = ""
if len(stats.failures) > 0 or len(stats.dark) > 0:
state = "failed"
else:
state = "ok"
self.task['name'] = "ending"
self.log("all", state, dict(state=state))
def v2_on_file_diff(self, result):
if 'diff' in result._result:
host = result._host.get_name()
self.on_file_diff(host, result._result['diff'])
def v2_playbook_on_include(self, included_file):
pass # no v1 correspondence
def v2_runner_item_on_ok(self, result):
pass
def v2_runner_item_on_failed(self, result):
pass
def v2_runner_item_on_skipped(self, result):
pass
def v2_runner_retry(self, result):
pass
``` |
{
"source": "jh0ker/from_twitter_to_telegram",
"score": 3
} |
#### File: jh0ker/from_twitter_to_telegram/twitterSays.py
```python
import os
import time
import pickle
from twython import Twython
from telegram import Bot
from SETTINGS import *
api = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
bot = Bot(telegram_token)
def first_run():
file_exists = os.path.exists('sav.p')
if not file_exists:
user_timeline = api.get_user_timeline(screen_name=user_name, count=2)
tweet_id = user_timeline[-1]['id']
file_pickle(tweet_id)
def get_timeline(latest_tweet_id):
return api.get_user_timeline(screen_name=user_name, since_id=latest_tweet_id)
def read_latest_id():
line = file_unpickle()
if len(str(line)) < 2:
return 0
else:
return line
def send_message(msg):
bot.send_message(chat_id=channel_name, text=msg)
def file_pickle(var):
pickle.dump(var, open("sav.p", "wb"))
def file_unpickle():
saved = pickle.load(open('sav.p', "rb"))
return saved
def main():
latest_tweet_id = read_latest_id()
user_timeline = get_timeline(latest_tweet_id)
for tweet in reversed(user_timeline):
if tweet['text']:
print(tweet['text'])
send_message(tweet['text'])
time.sleep(4)
latest_tweet_id = tweet['id']
file_pickle(latest_tweet_id)
first_run()
main()
``` |
{
"source": "jh0nnyb3g00d/DALLE-pytorch",
"score": 4
} |
#### File: jh0nnyb3g00d/DALLE-pytorch/Vocabulary.py
```python
class Vocabulary:
#PAD_token = 0 # Used for padding short sentences
#SOS_token = 1 # Start-of-sentence token
#EOS_token = 2 # End-of-sentence token
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "PAD", 1: "SOS", 2: "EOS"}
self.num_words = 3
self.num_sentences = 0
self.longest_sentence = 0
def add_word(self, word):
if word not in self.word2index:
# First entry of word into vocabulary
self.word2index[word] = self.num_words
self.word2count[word] = 1
self.index2word[self.num_words] = word
self.num_words += 1
else:
# Word exists; increase word count
self.word2count[word] += 1
def add_sentence(self, sentence):
sentence_len = 0
for word in sentence.split(' '):
sentence_len += 1
self.add_word(word)
if sentence_len > self.longest_sentence:
# This is the longest sentence
self.longest_sentence = sentence_len
# Count the number of sentences
self.num_sentences += 1
def to_word(self, index):
return self.index2word[index]
def to_index(self, word):
return self.word2index[word]
``` |
{
"source": "jH0ward/s3-app-cache",
"score": 2
} |
#### File: s3-app-cache/s3_app_cache/cache_config.py
```python
import boto3
import os
class CacheConfig(object):
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super(CacheConfig, cls).__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(
self,
s3_address: str = None,
boto_profile: str = None,
bucket_name: str = None,
cache_location: str = None,
s3_client=None,
):
if self._initialized:
return
self.s3_address = s3_address or os.environ.get("S3_APP_CACHE_PATH")
self.boto_profile = boto_profile or os.environ.get(
"S3_APP_CACHE_PROFILE", "default"
)
self.bucket_name = bucket_name or self.s3_address.split("/")[2]
self.cache_location = cache_location or "/".join(self.s3_address.split("/")[3:])
assert (
self.cache_location
), "S3_APP_CACHE_PATH env var should be of form s3://<bucket-name>/a/path/to/cache/"
self._initialized = True
self.s3_client = s3_client
print("Done INIT")
def set_cache_location(self, path: str):
self.cache_location = path
def set_bucket_name(self, bucket_name: str):
self.bucket_name = bucket_name
def set_boto_profile(self, profile_name):
self.boto_profile = profile_name
self.set_s3_client_from_profile(profile_name)
def set_s3_client_from_profile(self, profile_name):
session = boto3.session.Session(profile_name=profile_name)
s3 = session.client("s3")
self.s3_client = s3
def set_s3_client(self, _client):
self.s3_client = _client
def get_s3_client(self):
if self.s3_client is None:
self.set_s3_client_from_profile(self.boto_profile)
return self.s3_client
``` |
{
"source": "Jh123x/Bin-there-done-that",
"score": 3
} |
#### File: Jh123x/Bin-there-done-that/img_rec.py
```python
import cv2 #Computer vision capabilities
import time #Add time delay
import multiprocessing #Process frames in another thread
import numpy as np #numpy functions
import socket #For the use of networking capabilities
class Server():
def __init__(self, hostname = 'localhost' ,port = 1234):
'''Load the server object'''
self.port = port
self.hostname = hostname
self.connected = False
self.client = None
#Bind the socket go the ip
self.start()
def start(self):
'''Start the server object'''
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((self.hostname,self.port))
print("Server started:\nIP:", self.hostname, "\nPort:", self.port)
def listen(self):
'''Listen to the socket'''
self.socket.listen(5)
while(True):
if(not self.connected):
self.client,self.addr = self.socket.accept()
print(f"Client {self.addr} has connected")
self.connected = True
else:
return
def send(self,message):
"""Send a message to the client"""
print(f"{str(time.ctime(int(time.time())))}: {message}")
data = "".join(str(message) + "\n").encode()
try:
self.client.send(data)
except:
print("Client not available")
print("Conection Closed")
self.client.close()
self.connected = False
self.client = None
self.addr = None
return True
def close(self):
'''Closes the connection to the client and remove client from the connected client list'''
if(self.client):
self.client.close()
else:
pass
def __del__(self):
"""Destructor"""
self.close()
def update(self,queue):
'''Update the next few frames to fill up the queue'''
#Capture object
cap = cv2.VideoCapture(1)
#Loop to update the queues
while cap.isOpened():
#If the thread should be stopped return
if self.stopped or cv2.waitKey(1) & 0xFF == ord('q'):
break
# otherwise, ensure the queue has room in it
if not queue.full():
#Read the next frame from the file
item = cap.read()
# Check if the frame is valid
if not item[0]:
return
# add the frame to the queue
queue.put(item)
#Release the capture device
cap.release()
class VideoC():
def __init__(self, queue):
'''Initialiser for the capture'''
self.prev = []
self.time = 0
self.count = 0
self.queue = queue
self.stopped = False
self.server = Server('192.168.1.47',1234)
def read(self):
'''Get the next frame in the queue'''
if(self.queue.qsize() > 0):
return self.queue.get()
else:
return False,[]
def process(self, frame):
'''Process the frame'''
#If there is a previous frame captured
if len(self.prev) > 0:
#Gray previous frame
pframe = cv2.cvtColor(self.prev,cv2.COLOR_BGR2GRAY)
#Gray current frame
cframe = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#Getting the difference in the frame in terms of gray scale
frame = cv2.absdiff(pframe,cframe)
#Return the frame
return frame
else:
#If there is no previous frame, there is no need to process it
# self.time = time.time()
return frame
def display(self):
'''Display what is being captured'''
#Loop to display the frames 1 by 1
while True:
#Reading the next frame from the input
ret,frame = self.read()
#Check if the btn q is pressed (Quit if the button us pressed)
if cv2.waitKey(1) & 0xFF == ord('q'): #The 0xFF is needed when dealing with 64 bit operating system
break
#If there is an image that is captured (Ret will return true if there is input and False if otherwise)
if ret:
#Do modifications to the frame here/ Other operations
nframe = self.process(frame)
#Show the original image
cv2.imshow('Video', frame)
#Show the Img delta
cv2.imshow('Delta', nframe)
#Print the x midpt
# print(self.calculate(nframe))
#Listen for the arduino
self.server.listen()
#Send the midpt to the arduino
sector = self.calculate(nframe)
if(self.server.send(sector)):
self.queue_clear()
#Set the current frame as prev frame
self.count += 1
if(self.count % 4 == 0):
self.prev = frame
self.count = 0
#To close the camera properly
self.quit()
def queue_clear(self):
"""Empty the queue"""
while not self.queue.empty():
self.queue.get()
def calculate(self,frame):
"""Calculate the midpoint of the white pixels in frame"""
white_pixels = np.where(frame > [100])
#If there are pixels get the coord of the first white and the last white
try:
left_white = white_pixels[1][0]
right_white = white_pixels[1][-1]
return int(round((right_white + left_white)/640*3,0))
except:
return -1
def quit(self):
'''Release all of the cams which are active'''
#Stop the thread
self.stopped = True
#Close all of the windows
cv2.destroyAllWindows()
def __del__(self):
'''Destructor for the VideoC object'''
self.quit()
def main():
'''Main function to run'''
#Queue
queue = multiprocessing.Manager().Queue(120)
#Create the video capture object
vc = VideoC(queue)
#Create a separate process to render the frames into a queue
p = multiprocessing.Process(target = update, args= (vc,queue))
p.start()
#Display the video frame by frame
vc.display()
#Check if it is the main file before running
if __name__ == '__main__':
main()
``` |
{
"source": "Jh123x/BrainF-Interpreter",
"score": 3
} |
#### File: BrainF-Interpreter/core/Command.py
```python
import sys
def inc_pointer(array: list, current_ptr: int) -> int:
"""Increment the pointer"""
while len(array) <= current_ptr + 1:
array.append(0)
return current_ptr + 1
def dec_pointer(_, current_ptr: int) -> int:
"""Decrement the pointer"""
return current_ptr - 1
def inc_data(array: list, current_ptr: int) -> None:
"""Increment the data"""
array[current_ptr] += 1
def dec_data(array: list, current_ptr: int) -> None:
"""Decrement the data"""
array[current_ptr] -= 1
def output_byte(array: list, current_ptr: int) -> None:
"""Output the byte at the data"""
print(chr(array[current_ptr]), end="")
def input_byte(array: list, current_ptr: int) -> None:
"""Input a byte and store it at the data"""
curr_char = sys.stdin.read(1)
array[current_ptr] = ord(curr_char)
```
#### File: BrainF-Interpreter/core/Interpreter.py
```python
from . import Environment
class Interpreter(object):
def __init__(self, code:str, verbose:bool = False) -> None:
"""The Interpreter to run the code"""
self.environment = Environment()
self.code = code
self.index = 0
self.verbose = verbose
def _normal_cmd(self, command:str) -> None:
"""Execute normal commands that doesn't branch / loop"""
if command.isspace():
self.index += 1
return
try:
self.environment.parse_command(command)
except ValueError as exp:
print(f"Error at {self.index + 1}: {exp}")
self.index = len(self.code)
self.index += 1
def _find_closing_bracket(self, start_index:int) -> int:
"""Finds the index of the next closing bracket"""
curr_index = start_index
curr_token = self.code[curr_index]
while(curr_token != ']'):
if curr_token == ']':
self._find_closing_bracket(curr_index)
curr_index += 1
if curr_index > len(self.code):
raise ValueError('Unmatched brackets')
curr_token = self.code[curr_index]
return curr_index
def loop(self, code:str) -> None:
"""
The code fragment to loop
Excluding the brackets
"""
while(self.environment.array[self.environment.current_ptr] != 0):
for cmd in code:
self.environment.parse_command(cmd)
self.index += len(code) + 2
def run(self, code: str = None) -> None:
"""Run the code"""
if code is not None:
code = self.code
while(self.index < len(self.code)):
current_token = self.code[self.index]
if self.verbose:
print(f"{self.index}: {current_token}")
# Loop conditional
if current_token == '[':
# Look for the end of the loop
closing_index = self._find_closing_bracket(self.index)
assert self.code[closing_index] == ']'
loop_fragment = self.code[self.index + 1: closing_index]
self.loop(loop_fragment)
else:
self._normal_cmd(current_token)
``` |
{
"source": "Jh123x/COMP-550-Algorithms-and-Analysis",
"score": 4
} |
#### File: COMP-550-Algorithms-and-Analysis/Basic/MergeSort.py
```python
from typing import List
def merge_sort(array: List[int], start: int, end: int) -> None:
"""Merge sort algorithm"""
if start < end:
mid = (start + end) // 2
merge_sort(array, start, mid)
merge_sort(array, mid + 1, end)
merge(array, start, mid, end)
return array
def merge(array: List[int], start: int, mid: int, end: int) -> None:
"""Merging subroutine"""
left = array[start:mid + 1]
right = array[mid + 1:end + 1]
left.append(float("inf"))
right.append(float("inf"))
i = j = 0
for k in range(start, end + 1):
if left[i] <= right[j]:
array[k] = left[i]
i += 1
else:
array[k] = right[j]
j += 1
if __name__ == "__main__":
array = [5, 3, 1, 2, 4]
merge_sort(array, 0, len(array) - 1)
print(array)
```
#### File: COMP-550-Algorithms-and-Analysis/Data Structures/test_ufds.py
```python
from UnionFindDisjointSet import UFDS
def test_ufds_for_items():
ufds = UFDS(['a', 'b', 'c', 'd'])
ufds.union('a', 'b')
ufds.union('c', 'd')
assert ufds.get_top('a') == 'a', f"{ufds.get_top('a')} != a"
assert ufds.get_top('b') == 'a', f"{ufds.get_top('b')} != a"
assert ufds.get_top('c') == 'c', f"{ufds.get_top('c')} != c"
assert ufds.get_top('d') == 'c', f"{ufds.get_top('d')} != c"
ufds.union('b', 'd')
assert ufds.get_top('a') == 'a', f"{ufds.get_top('a')} != a"
assert ufds.get_top('b') == 'a', f"{ufds.get_top('b')} != a"
assert ufds.get_top('c') == 'a', f"{ufds.get_top('c')} != a"
assert ufds.get_top('d') == 'a', f"{ufds.get_top('d')} != a"
```
#### File: COMP-550-Algorithms-and-Analysis/Dynamic Programming/LongestCommonSubseq.py
```python
from typing import List
# Define directions
DIAGONOAL = (-1, -1)
UP = (0, -1)
LEFT = (-1, 0)
def trace_table(backtrack_table, seq1, seq2) -> List[str]:
"""Trace back the backtrack table to find the longest common subsequence"""
i, j = len(seq1)-1, len(seq2)-1
lcs = []
while i >= 0 and j >= 0:
dx, dy = backtrack_table[i][j]
if dx + dy == -2:
lcs.append(seq1[i])
# break
i += dx
j += dy
return lcs[::-1]
def longest_common_subsequence(seq1, seq2):
"""Find the longest common subsequence between 2 strings"""
count_table = [[0] * (len(seq2)+1) for _ in range(len(seq1)+1)]
backtrack_table = [[(0, 0)] * (len(seq2)) for _ in range(len(seq1))]
for i in range(1, len(seq1) + 1):
for j in range(1, len(seq2) + 1):
# If the word is found
if (seq1[i-1] == seq2[j-1]):
count_table[i][j] = count_table[i-1][j-1] + 1
backtrack_table[i-1][j-1] = DIAGONOAL
# If left > up
elif count_table[i-1][j] >= count_table[i][j-1]:
count_table[i][j] = count_table[i-1][j]
backtrack_table[i-1][j-1] = LEFT
# Otherwise up
else:
count_table[i][j] = count_table[i][j-1]
backtrack_table[i-1][j-1] = UP
return count_table[-1][-1], trace_table(backtrack_table, seq1, seq2)
if __name__ == '__main__':
X = ["A", "T", "C", "A", "C", "C", "T", "A", "T", "C", "A", "C", "C", "T"]
Y = ["A", "T", "A", "A", "C", "T", "A", "T", "A", "A", "C", "T"]
print(longest_common_subsequence(X, Y))
```
#### File: COMP-550-Algorithms-and-Analysis/Dynamic Programming/test_rod_cutting_max_pieces.py
```python
from RodCuttingMaxPieces import rod_cutting_max_pieces
def test_trivial_case():
pieces = [1]
length = 16
result, dp = rod_cutting_max_pieces(pieces, length)
assert result == length, f"{dp}"
def test_case_1():
piece_size = [2, 3, 8]
length = 15
result, dp = rod_cutting_max_pieces(piece_size, length)
assert result == 7, f"{dp}"
def test_case_2():
piece_size = [2, 3, 5]
length = 9
result, dp = rod_cutting_max_pieces(piece_size, length)
assert result == 4, f"{dp}"
```
#### File: COMP-550-Algorithms-and-Analysis/Flow/test_ford_fulkerson.py
```python
from FordFulkerson import ford_fulkerson
def test_fordfulkerson_trivial_graph():
graph = {
"s": {"t": 10}
}
assert ford_fulkerson(graph, "s", "t") == 10
def test_fordfulkerson_no_flow():
graph = {
"s": {"a": 0},
"t": {}
}
assert ford_fulkerson(graph, "s", "t") == 0
def test_fordfulkerson_inf_flow():
graph = {
"s": {}
}
assert ford_fulkerson(graph, "s", "s") == float('inf')
def test_fordfulkerson_random():
graph1 = {
"s": {"a": 10, "c": 10},
"a": {"b": 4, "c": 2, "d": 8},
"b": {"t": 10},
"c": {"d": 9},
"d": {"b": 6, "t": 10},
"t": {}
}
graph2 = {
"s": {'a': 100, "b": 100},
"a": {'c': 7},
"b": {"c": 3},
"c": {"t": 1},
"t": {}
}
graph3 = {
"s": {"a": 3, "b": 2},
"a": {"t": 2, "b": 5},
"b": {"t": 3},
"t": {}
}
graph4 = {
"s": {"t": 20}
}
graphs = [
graph1,
graph2,
graph3,
graph4
]
results = [
19,
1,
5,
20
]
for graph, expected in zip(graphs, results):
r = ford_fulkerson(graph, "s", "t")
assert r == expected, f"Expected: {expected} - Got: {r}"
```
#### File: COMP-550-Algorithms-and-Analysis/Greedy/HuffmanCompression.py
```python
from math import log2
import time
from typing import Optional
from heapq import heapify, heappush, heappop
class Node(object):
def __init__(self, freq: int, value: Optional[str] = None, left: 'Node' = None, right: 'Node' = None) -> None:
"""Node object for representing the huffman tree"""
# Check if it is a valid node
if None in (left, right) and value is None:
raise ValueError("Either nodes or value must be defined")
if None not in (left, right) and value is not None:
raise ValueError(
f"Cannot define both left or right and value. Value: {value}, Left: {left}, Right: {right}")
self.value = value
self.freq = freq
self.left = left
self.right = right
def merge(self, other: 'Node') -> 'Node':
"""Merge other node with current node"""
return Node(self.freq + other.freq, left=self, right=other)
def get_compressed_string(self, letter: str) -> Optional[str]:
"""Get the compressed string based on the tree"""
# Leaf node
if self.value is not None:
return None if letter != self.value else ''
# Not a leaf
lresult = self.left.get_compressed_string(letter)
if lresult is not None:
return "0" + lresult
rresult = self.right.get_compressed_string(letter)
if rresult is not None:
return "1" + rresult
return None
def __eq__(self, __o: object) -> bool:
if type(__o) != type(self):
return False
return self.value == self.freq
def __lt__(self, __o: object) -> bool:
if type(__o) != type(self):
raise ValueError(f"Cannot compare {type(self)} with {type(__o)}")
return self.freq < __o.freq
def __repr__(self) -> str:
# Leaf
if self.value is not None:
return f"\tLeaf Node: '{self.value}': {self.freq}\n"
# Node
return f"""Node {self.freq}:\n{self.left}{self.right}"""
def get_frequency(text: str) -> dict:
"""Get word frequency from text"""
freq_dict = {}
for letter in text:
freq_dict[letter] = freq_dict.get(letter, 0) + 1
return freq_dict
def huffman_tree(text: str) -> Node:
"""Text data compression"""
frequency = get_frequency(text)
if len(frequency) == 0:
return None
# Min heap with key (freq, value)
min_heap = list(
map(
lambda x: Node(x[1], x[0]),
frequency.items()
)
)
heapify(min_heap)
# Merge nodes
while len(min_heap) > 1:
min1 = heappop(min_heap)
min2 = heappop(min_heap)
new_node = min1.merge(min2)
heappush(min_heap, new_node)
return min_heap[0]
def huffman_compression_dict(text: str) -> dict[str, str]:
"""Compressions using huffman tree"""
# Get tree
tree = huffman_tree(text)
# Get set of letters
letter_set = set(text)
# Encoding dict
encoding_d = {}
# Get encoding
for letter in letter_set:
result = tree.get_compressed_string(letter)
if result is None:
raise ValueError(f"Value not found in huffman tree {letter}")
encoding_d[letter] = result
return encoding_d
def conventional_compression_dict(text: str) -> dict[str, str]:
"""Compress the string using bits to represent them"""
letters = list(set(text))
# Get number of bits required to represent each character
bits_required = log2(len(letters))
if int(bits_required) != bits_required:
bits_required = int(bits_required) + 1
# Make encoding dict
encode_d = {}
# Assign bits to index
for index, letter in enumerate(letters):
encoded_val = bin(index)[2:]
# Padding
if len(encoded_val) < bits_required:
encoded_val = "0" * \
(bits_required - len(encoded_val)) + encoded_val
encode_d[letter] = encoded_val
return encode_d
def compress(compress_dict: dict[str, str], text: str) -> str:
"""Compression routine"""
# Compression
acc = []
for letter in text:
acc.append(compress_dict[letter])
return ''.join(acc)
def decompress(compress_dict: dict[str, str], encoded_text: str) -> str:
"""Decompression routine"""
# Invert dictionary
decompress_dict = dict(map(lambda x: x[::-1], compress_dict.items()))
index = 0
length = 1
buffer = []
while index < len(encoded_text):
curr = encoded_text[index: index+length+1]
if index + length > len(encoded_text):
raise ValueError(f"Error decoding: {buffer}, {curr}")
if curr not in decompress_dict:
length += 1
continue
buffer.append(decompress_dict[curr])
index += length + 1
length = 1
return ''.join(buffer)
if __name__ == "__main__":
text = "Hello world! This is the power of huffman encoding."
print(f"Text to encode: {text}\n")
start_time = time.time_ns()
huffman_dict = huffman_compression_dict(text)
huffman = compress(huffman_dict, text)
huffman_time = time.time_ns() - start_time
start_time = time.time_ns()
conventional_dict = conventional_compression_dict(text)
conven = compress(conventional_dict, text)
conven_time = time.time_ns() - start_time
print(
f"Length of huffman compression result = {len(huffman)} Time taken: {huffman_time}ns")
print(
f"Length of conventional compression result = {len(conven)} Time taken: {conven_time}ns")
print(f"Bits saved: {len(conven) - len(huffman)}")
print(
f"Huffman decompress to text string: {decompress(huffman_dict, huffman)}")
print(
f"Conventional decompress to text string: {decompress(conventional_dict, conven)}")
```
#### File: COMP-550-Algorithms-and-Analysis/Greedy/Prims.py
```python
import heapq
def prims(graph: dict[str, dict[str, int]]) -> list:
"""Prim's algorithm"""
source = list(graph.keys())[0]
arr = list(map(lambda x: (x[1], source, x[0]), graph[source].items()))
heapq.heapify(arr)
visited = set(source)
edges = []
if len(arr) == 1:
weight, from_edge, to_edge = heapq.heappop(arr)
edges.append((from_edge, to_edge, weight))
return edges
while len(visited) < len(graph):
# Pop the top
weight, from_edge, to_edge = heapq.heappop(arr)
# Check if edge is visited
if to_edge in visited:
continue
# Add edge to tree
visited.add(to_edge)
edges.append((from_edge, to_edge, weight))
# Add Adjacent edges
for new_to_edge, weight in graph[to_edge].items():
new_add = (weight, to_edge, new_to_edge)
heapq.heappush(arr, new_add)
return edges
if __name__ == '__main__':
graph = {
'A': {'B': 5, 'D': 5, 'E': 7},
'B': {'A': 5, 'C': 4, 'D': 8, 'E': 2},
'C': {'B': 4, 'D': 8, 'F': 4},
'D': {'A': 5, 'B': 8, 'C': 8, 'F': 9, 'E': 3},
'E': {'A': 7, 'B': 2, 'D': 3, 'F': 2},
'F': {'C': 4, 'D': 9, 'E': 2}
}
result = prims(graph)
print(result)
```
#### File: COMP-550-Algorithms-and-Analysis/Greedy/test_huffman.py
```python
from HuffmanCompression import huffman_compression_dict, compress, decompress
def test_huffman_compression_empty_string():
assert huffman_compression_dict("") == {}
def test_huffman_compression_single_char():
assert huffman_compression_dict("aaab") == {'a': '1', 'b': '0'}
def test_huffman_compression_multiple_char():
assert huffman_compression_dict("aaabbbccccddddd") == {'a': '01', 'b': '00', 'c':'10', 'd': '11'}
def test_conpress_decompress():
text = "Hello World!"
huffman_dict = huffman_compression_dict(text)
huffman = compress(huffman_dict, text)
decompressed = decompress(huffman_dict, huffman)
assert decompressed == text
```
#### File: COMP-550-Algorithms-and-Analysis/Hashing/CukooHashMap.py
```python
from typing import Any, List, Tuple
from CukooHashSet import CukooSet
Pair = Tuple[str, str]
class CukooHashMap(CukooSet):
def __init__(self, curr: List[Tuple[str, str]] = {}) -> None:
"""A Hash map based on Cukoo hashing
:param curr: Must be a list of tuples of Strings
"""
for index, item in enumerate(curr):
if len(item) == 2:
continue
raise ValueError(f"Invalid input at index {index}: {item}")
super().__init__(curr)
def __getitem__(self, key: str) -> Any:
"""Get the value of the key"""
h1 = super()._get_hash1_of(key)
h2 = super()._get_hash2_of(key)
if h1 is None or h2 is None:
return None
if self.t1[h1][0] == key:
return self.t1[h1][1]
return self.t2[h2][1]
def _get_hash1_of(self, pair: Tuple[str, Any]) -> int:
return super()._get_hash1_of(pair[0])
def _get_hash2_of(self, pair: Tuple[str, Any]) -> int:
return super()._get_hash2_of(pair[0])
def insert(self, pair: Tuple[str, Any]) -> bool:
"""Insert the key and value into the map"""
key, value = pair
if key in self:
h1 = super()._get_hash1_of(key)
h2 = super()._get_hash2_of(key)
if self.t1[h1][0] == key:
self.t1[h1] = (key, value)
return True
self.t2[h2] = (key, value)
return True
return super().insert((key, value))
def remove(self, key: str) -> bool:
"""Remove key value pair"""
if key not in self:
return False
h1 = super()._get_hash1_of(key)
h2 = super()._get_hash2_of(key)
self.just_rehash = False
self.size -= 1
if self.t1[h1] is not None and self.t1[h1][0] == key:
self.t1[h1] = None
return True
self.t2[h2] = None
return True
def __initialize__(self, curr: List[Tuple[str, Any]]) -> None:
"""Initialize the map with the given list"""
for k, v in curr:
super().insert((k, v))
def _contains(self, pair: str):
"""Check if the map contains the key"""
return super()._contains(pair[0])
def __contains__(self, key: str):
"""Check if the map contains the key"""
h1 = super()._get_hash1_of(key)
h2 = super()._get_hash2_of(key)
if h1 is None or h2 is None:
return False
v1, v2 = self.t1[h1], self.t2[h2]
return v1 is not None and v1[0] == key or v2 is not None and v2[0] == key
def __len__(self):
"""Get the size of the map"""
return super().__len__()
```
#### File: COMP-550-Algorithms-and-Analysis/Hashing/test_cukooset.py
```python
from CukooHashSet import CukooSet
def test_set_add_1_element():
"""Add 1 element to CukooSet"""
mySet = CukooSet()
assert mySet.insert("1")
def test_set_search_1_element():
"""Searching for element in cukoo set"""
mySet = CukooSet()
assert mySet.insert("1")
assert mySet._contains("1")
def test_set_add_many_and_search_many():
"""Add many items"""
items = [str(i) for i in range(100)]
mySet = CukooSet()
for index, item in enumerate(items):
assert mySet.insert(item)
assert len(mySet) == index + 1
for item in items:
assert mySet._contains(item)
assert item in mySet
for index, item in enumerate(items):
assert mySet.remove(item)
assert not mySet._contains(item)
assert item not in mySet
assert len(mySet) == len(items) - index - 1
```
#### File: COMP-550-Algorithms-and-Analysis/Selection/QuickSelect.py
```python
from random import randint
import time
## Exerpt from merge sort ###################
from typing import List
def merge_sort(array: List[int], start: int, end: int) -> None:
"""Merge sort algorithm"""
if start < end:
mid = (start + end) // 2
merge_sort(array, start, mid)
merge_sort(array, mid + 1, end)
merge(array, start, mid, end)
return array
def merge(array: List[int], start: int, mid: int, end: int) -> None:
"""Merging subroutine"""
left = array[start:mid + 1]
right = array[mid + 1:end + 1]
left.append(float("inf"))
right.append(float("inf"))
i = j = 0
for k in range(start, end + 1):
if left[i] <= right[j]:
array[k] = left[i]
i += 1
else:
array[k] = right[j]
j += 1
#############################################
def get_median(arr: list[int]) -> int:
"""Get the median of a list"""
if len(arr) == 1:
return arr[0]
sorted_arr = sorted(arr)
size = len(arr)
if size % 2 == 0:
return sorted_arr[size // 2 - 1]
return sorted_arr[size // 2]
def get_pivot(arr: list, lower: int, higher: int) -> int:
"""Getting a good pivot"""
acc = []
for i in range(lower, higher + 1, 5):
end = min(higher + 1, i + 5)
# Check that the each partition is < 5
assert end - i <= 5, (i, end, higher, lower)
a = arr[i:end]
if len(a) == 0:
continue
acc.append(a)
medians = []
for grp in acc:
me = get_median(grp)
medians.append(me)
if len(medians) > 5:
return get_pivot(medians, 0, len(medians) - 1)
return get_median(medians)
def swap(arr: list[int], index1: int, index2: int) -> None:
assert index1 < len(arr) and index2 < len(arr), (index1, index2, len(arr))
arr[index1], arr[index2] = arr[index2], arr[index1]
def quick_select(arr: list, lower: int, higher: int, target: int) -> int:
"""O(n) Order selection algorithm"""
# Base Case
if lower == higher == target - 1:
return arr[lower]
if higher - lower + 1 <= 5:
return sorted(arr)[target - 1]
median = get_pivot(arr, lower, higher)
# Partition
left_index = lower + 1
right_index = higher
while(left_index <= right_index):
curr = arr[left_index]
if curr <= median and (arr[lower] == median or curr < median):
left_index += 1
elif curr > median:
swap(arr, left_index, right_index)
right_index -= 1
else:
swap(arr, lower, left_index)
swap(arr, lower, right_index)
# Recursion
if target == right_index + 1:
return median
if target < right_index:
return quick_select(arr, lower, right_index - 1, target)
return quick_select(arr, right_index + 1, higher, target)
if __name__ == '__main__':
for size in range(1, 500):
arr = [randint(0, 100) for _ in range(size)]
total_sort_time = 0
total_select_time = 0
for target in range(size):
array = arr.copy()
# Sort and select time
sort_start = time.time()
sorted_arr = merge_sort(array, 0, len(arr) - 1)
total_sort_time += time.time() - sort_start
# Select Time
start_time = time.time()
select_target = quick_select(arr, 0, size - 1, target)
total_select_time += time.time() - start_time
assert select_target == sorted_arr[target -
1], "Failed at {}".format(target)
print(
f"Size: {size}, Sort Time: {total_sort_time / size}, Avg Select Time: {total_select_time / size}")
```
#### File: COMP-550-Algorithms-and-Analysis/Selection/test_median.py
```python
from FindMedianOf2SortedArrays import median
def test_find_median_trivial():
arr1 = [1]
arr2 = [2]
n = 1
assert median(arr1, 0, arr2, 0, n) == 1
def test_find_median_single_node():
arr1 = [1, 2]
arr2 = [2, 3]
n = len(arr1)
assert median(arr1, 0, arr2, 0, n) == 2
def test_find_median_random():
cases = [
# a1 , a2, n, ans #
([1], [2], 1, 1),
([41, 42], [43, 44], 2, 42),
([2, 3, 6], [1, 4, 5], 3, 3),
([1, 2, 3, 4, 5], [6, 7, 8, 9, 10], 5, 5),
([4, 5, 6, 10, 11, 12, 13], [1, 2, 3, 7, 8, 9, 14], 7, 7),
]
for index, (arr1, arr2, n, ans) in enumerate(cases):
result = median(arr1, 0, arr2, 0, n)
assert result == ans, f"Case {index+1} failed. Expected {ans} but got {result}"
``` |
{
"source": "Jh123x/Discord-Bot",
"score": 3
} |
#### File: Jh123x/Discord-Bot/redditbot.py
```python
import random, praw, urllib.request, os
from PIL import Image
class RedditBot:
def __init__(self,username,password, secret, id, default_sub = 'memes'):
#Initialise the cache
self.cache = set()
#Set the counter
self.count = 0
#Authenticate with reddit
self.reddit = praw.Reddit(client_id=id,
client_secret=secret,
password=password,
user_agent='Redditbot',
username=username)
#Set the bot to be only able to read posts
self.reddit.read_only = True
#Set the default subreddit to r/memes
self.subreddit = self.reddit.subreddit(default_sub)
#Load the memes into cache
self.load_meme()
def sub_exist(self, name :str) -> bool:
try:
self.reddit.subreddits.search_by_name(name, exact = True)
except:
return False
else:
return True
def set_subreddit(self, subreddit : str) -> bool:
'''Set the subreddit for the reddit bot'''
#Validate the validity of the subreddit
if(self.sub_exist(subreddit)):
if(subreddit != self.subreddit.display_name):
#Change the subreddit
self.subreddit = self.reddit.subreddit(subreddit)
print("Subreddit changed to", subreddit)
#Reset the cache to be loaded again later when memes are fetched
self.cache = set()
self.load_meme()
return True
return False
def load_cache(self):
print("Loading Cache")
#Get the 25 posts from the subreddit based on the 3 categories and add them to cache
memes = [self.subreddit.hot(limit = 3),self.subreddit.top(limit = 3),self.subreddit.new(limit = 14)]
self.cache = self.cache.union(*map(lambda x: set(map(lambda y : y.url,x)),memes))
#If the cache is stll empty, then the subreddit is empty
if(len(self.cache) == 0):
print("Empty sub")
return False
#Else the cache is successfully reloaded
print('Cache Loaded')
return True
def load_meme(self) -> bool:
'''Return the image file for the meme'''
#If the cache is empty, fetch the memes
if(not len(self.cache)):
self.load_cache()
#Choose a random post from the cache
post = random.sample(self.cache, 1)[0]
#Remove the current post from the cache
self.cache.discard(post)
#Check if the temp folder exists
if(not os.path.isdir('temp')):
#If it does not exist make the ath
os.mkdir('temp')
try:
#Open the URL and store the meme to temp/temp.png
with urllib.request.urlopen(post) as url:
with open('temp/temp.png','wb') as file:
file.write(url.read())
except ConnectionError as exp:
print("There was a connection error:", exp)
except PermissionError as exp:
print("There was a permission error:", exp)
except Exception as exp:
print("Something went wrong: ", exp)
else:
print('Meme fetched')
try:
Image.open('temp/temp.png')
except Exception as exp:
if(self.count == 10):
print("The sub doesnt have pictures")
return False
print(exp)
self.load_meme()
self.count += 1
else:
self.count = 0
return True
return False
def main():
'''Main function to test the bot'''
pass
#Run the main command when the file is run as the main
if(__name__ == '__main__'):
main()
``` |
{
"source": "Jh123x/DYOM-Object-Detection-Web-Service",
"score": 3
} |
#### File: Server/functions/object_detect.py
```python
import torch
from PIL import Image
# Create the Model
model = torch.hub.load('ultralytics/yolov5', 'yolov5m')
def draw_bounding_boxes(image_binary: bytes) -> Image:
"""Generate the image with the bounding boxes"""
image = Image.open(image_binary)
# Run the detector to generate the boxes
img = model(image)
# Load the bounding boxes into the img
img.render()
# Transform image
result = Image.fromarray(img.imgs[0])
return result
``` |
{
"source": "Jh123x/Language-Tester",
"score": 4
} |
#### File: files/GUI/gui.py
```python
import tkinter
from tkinter import filedialog
from tkinter import messagebox as msg
from .. import Tester
class Window(tkinter.Frame):
def __init__(self, master = None, debug:bool = True):
"""Constructor for the main gui window"""
#Call the super class
super().__init__(master)
#Store variables
self.master = master
#Create dictionary to store buttons and labels
self.buttons = {}
self.labels = {}
self.answer_var = tkinter.StringVar()
self.filename = None
self.debug = debug
#Create the widgets
self.create_widgets()
def create_widgets(self) -> None:
"""Create the widgets on the GUI"""
#Create the main label
self.labels['main'] = tkinter.Label(text = "Language Tester")
#Grid the main label
self.labels['main'].grid(row = 0, column = 5, columnspan = 10)
#Create an entry box for the person to key in the answer
self.entry = tkinter.Entry(textvariable = self.answer_var)
#Grid the entry box
self.entry.grid(row = 5, column = 2, padx = 10, pady = 10, columnspan = 7)
#Create a browse button for the user to browse files
self.browse = tkinter.Button(text = "Browse", command = self.browse_file)
#Grid the browse button
self.browse.grid(row = 1, column = 10)
#Generate a question label
self.labels['question'] = tkinter.Label(text = "Please Load a CSV file")
self.labels['question'].grid(row = 1, column = 0, columnspan = 10)
#Next question button
self.buttons['next'] = tkinter.Button(text = "Next Question", command = self.generate_question)
self.buttons['next'].grid(row = 5, column = 10)
#Create the submit buttonm
self.buttons['submit'] = tkinter.Button(text = "Submit", command = self.handle)
self.buttons['submit'].grid(row = 5, column = 9)
#Create the check label
self.labels['check'] = tkinter.Label(text = "")
self.labels['check'].grid(row = 2, column = 0, columnspan = 5)
#Create the accuracy label
self.labels['accuracy'] = tkinter.Label(text = "Accuracy: 0.0%")
self.labels['accuracy'].grid(row = 2, column = 5, columnspan = 5)
def browse_file(self) -> str:
"""Allows the user to browse a file and return the filepath"""
#Get the name of the file chosen
self.filename = filedialog.askopenfilename(title = "Select A File", filetype = (("CSV files","*.csv"),))
#If the user did not choose a file
if self.filename == '':
#Do nothing
return
#Check if there are any errors
try:
#Create an instance of the tester
self.tester = Tester(self.filename, debug = self.debug)
#Generate the next question
self.generate_question()
except Exception as exp:
#Print an error message
if self.debug:
print(f"Error: {exp}")
#Set it onto the screen
self.labels['question']['text'] = f"Error: {exp}"
def file_loaded(self) -> bool:
"""Checks whether the file is loaded properly"""
return self.filename
def handle(self) -> None:
"""Handle the question checking"""
if not self.file_loaded():
self.show_filenotloaded()
return
#If the answer is correct
if self.check_answer():
#Add the number of correct tests
self.tester.add_correct()
#Add a Label to show the user is correct
self.labels['check']['text'] = f"You are correct"
else:
if self.debug:
print(f"Answer is incorrect")
#Add a Label to show the user is correct
self.labels['check']['text'] = f"You are incorrect"
#Update accuracy
self.labels['accuracy']['text'] = f"Accuracy: {self.tester.get_accuracy()}"
#Generates next question
self.generate_question()
def clear_entry(self):
"""Clear the entry"""
self.entry.delete(0, tkinter.END)
def generate_question(self) -> None:
"""Check the submission of the user"""
if self.file_loaded():
#Check the submission
self.key,self.answer = self.tester.generate_test()
#Load the key into the label
self.labels['question']['text'] = f"What is the translation for: {self.key}"
#Clear the entry:
self.clear_entry()
else:
#Create warning box to ask user to browse for a valid data file
self.show_filenotloaded()
def check_answer(self) -> bool:
"""Check if the answer that is keyed is correct"""
#Get the answer from the text variable
ans = self.answer_var.get()
if self.debug:
print(f"Answer {self.answer}, User answer {ans}")
#Compare it with the answer from the database and return
return ans.strip().lower() in self.answer.strip().lower().split()
def show_filenotloaded(self) -> None:
msg.showwarning("Please Select a valid file", "Please select a valid csv file for the database\n")
``` |
{
"source": "Jh123x/Orbital",
"score": 3
} |
#### File: ai_invader/model/actor_critic.py
```python
import torch
import torch.nn as nn
from torch.distributions import Categorical
from .basemodel import BaseModel
class Actor(BaseModel):
def __init__(self, input_shape, num_actions):
super(Actor,self).__init__(input_shape,num_actions)
self.fc = nn.Sequential(
nn.Linear(self.feature_size(),512),
nn.LeakyReLU(),
nn.Linear(512,self.num_actions),
nn.Softmax(dim=1)
)
def forward(self,x):
x = self.features(x)
x = x.view(x.size(0),-1)
x = self.fc(x)
dist = Categorical(x)
return dist
class Critic(BaseModel):
def __init__(self, input_shape, num_actions):
super(Critic, self).__init__(input_shape,num_actions)
self.fc = nn.Sequential(
nn.Linear(self.feature_size(),512),
nn.LeakyReLU(),
nn.Linear(512,1)
)
def forward(self,x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
```
#### File: gym_game/envs/AI_game.py
```python
import os
import sys
import pygame
import numpy as np
import matplotlib.pyplot as plt
try:
from .classes import *
except ImportError:
from classes import *
def get_curr_path():
"""Get the path to the current file depending on state of application"""
#Return correct directory
return sys.executable if getattr(sys, 'frozen', False) else __file__
def map_dir(*args):
"""Map the abs path for the files in the folder"""
return list_dir(form_abs_path(get_curr_path(), os.path.join(*args)))
class PyGame_2D(object):
def __init__(self, settings:str, t:str = 'Play'):
"""Pygame_2d object for AI to be trained on"""
#Read the configuration file for space invaders
all_cfg = read_all(form_abs_path(__file__,settings))
#Main configurations
config = all_cfg['Space Invaders']
config['icon_img_path'] = form_abs_path(__file__, config['icon_img_path'])
#Load all
d = {}
d["bullet_img_paths"] = map_dir("images", "bullets")
d["player_img_paths"] = map_dir("images", "player")
d["enemy_img_paths"] = map_dir("images", "enemies")
d["background_img_paths"] = map_dir("images", "backgrounds")
d["explosion_img_paths"] = map_dir("images", "explosions")
d["menu_music_paths"] = map_dir("sounds", "menu_music")
d["powerup_img_path"] = map_dir("images", "powerups")
d["mothership_img_path"] = map_dir("images", "bosses", "mothership")
d["trophy_img_path"] = map_dir("images", "trophys")
d["scout_img_path"] = map_dir("images", "bosses", "scout")
d["brute_img_path"] = map_dir("images", "bosses", "brute")
d["crabs_img_path"] = map_dir("images", "bosses", "crabs")
d["story_img_path"] = map_dir("images", "story assets")
d["place_holder_path"] = map_dir("images", "place_holder")
#Load the other sprites
d["player_img_paths"] = list_dir(form_abs_path(__file__, "images/player"))
d["enemy_img_paths"] = list_dir(form_abs_path(__file__, "images/enemies"))
d["explosion_img_paths"] = list_dir(form_abs_path(__file__, "images/explosions"))
d["powerup_img_path"] = list_dir(form_abs_path(__file__,"images/powerups"))
d["mothership_img_path"] = list_dir(form_abs_path(__file__,"images/bosses/mothership"))
#Load sprites
load_sprites((Player, Bullet, EnemyShip, Background, Explosion, MotherShip, VictoryScreen, Scout, Brute, Crabs),
(d['player_img_paths'], d['bullet_img_paths'], d['enemy_img_paths'], d['background_img_paths'], d['explosion_img_paths'],
d['mothership_img_path'], d['trophy_img_path'], d['scout_img_path'], d['brute_img_path'], d['crabs_img_path']))
load_sprites_dict((StoryTemplate, PowerUp, MobInstructionsScreen),
(d['story_img_path'], d['powerup_img_path'], d['place_holder_path']))
#Load sounds
self.sound = Sound({}, False, False, False)
#Load the sounds into the relavant Sprites
Bullet.sound = self.sound
#Add explosion sound
Explosion.sound = self.sound
#Initialise pygame
pygame.init()
pygame.font.init()
self.screen_width = 600
self.screen_height = 800
self.fps = 60
#Init screen
self.screen = pygame.display.set_mode((self.screen_width, self.screen_height))
#Init playscreen
self.state = self.create_screen()
self.written = False
#Set fps
self.clock = pygame.time.Clock()
#Player Object
self.player = self.state.player1
self.nextState = -1
def create_screen(self) -> Screen:
"""Create the playscreen by default"""
return PlayScreen(self.screen_width, self.screen_height, self.screen, 5, self.fps, Difficulty(4))
def mainloop(self) -> None:
"""Mainloop"""
running = True
while running:
#Clock the fps
self.clock.tick(self.fps)
#Fill background with black
self.screen.fill((0,0,0))
#Draw the play screen
self.nextState = self.state.handle()
#Draw the hitbox
self.state.draw_hitboxes()
#Print score if the game is over
if self.nextState == State.GAMEOVER:
#Print the score and quit the game
print(f"Score: {self.state.get_score()}")
#Update the display with the screen
pygame.display.update()
#print(self.get_space_boolean())
#If the state is quit or player closes the game
for item in pygame.event.get():
if item.type == pygame.QUIT:
running = False
def action(self, number:int) -> None:
"""Performs the action based on the number
0: Shoot
1: move left
2: move_right
3: Do nothing
"""
self.get_action()[number]()
def get_wave(self):
return self.state.get_wave()
def move_shoot(self, b:bool) -> bool:
'''
To encompass both move_left and shoot, and move_right and shoot action
from Original Space Invaders Gym Environment
'''
if b:
self.player.move_left()
return self.player.shoot
self.player.move_right()
return self.player.shoot
def get_action(self) -> tuple:
"""List of actions that the player can take (tuple of functions)"""
return (self.player.shoot,
self.player.move_left,
self.player.move_right,
lambda : 1,
self.move_shoot(True),
self.move_shoot(False))
def get_space(self):
"""
Returns the pixel space of the screen
Performs preliminary Preprocessing by making values
"""
#load the hitbox
self.state.draw_hitboxes()
#Returns the array
return pygame.surfarray.array3d(self.state.surface)
def show_space(self):
"""Show the space in a matplotlib diagram"""
image_transp = np.transpose(self.get_space())
plt.imshow(image_transp, interpolation='none')
plt.show()
def is_over(self) -> bool:
'''Returns if game state is over or quit'''
return self.state.is_over()
def get_score(self) -> int:
"""Get the current score"""
return self.state.get_score()
def reset(self) -> None:
'''Wrapper method for reseting the screen'''
self.state.reset()
def get_player(self) -> tuple:
'''Get the player character position -- for Debugging Purposes'''
return (self.player.get_x(),self.player.get_y())
def get_player_lives(self):
"""Get the number of lives of the player"""
return self.player.get_lives()
def get_enemies(self) -> tuple:
'''Get positions of each enemy'''
return tuple(map(lambda e: (e.get_x(),e.get_y()), self.state.get_enemies()))
def handle(self):
'''Draws the hitboxes of each enemy after updating the state of the enemy'''
self.state.handle()
self.state.draw_hitboxes()
def close(self):
# self.state.close()
pass
if __name__ == '__main__':
settings = "settings.cfg"
game = PyGame_2D(settings)
game.mainloop()
```
#### File: classes/Game/game.py
```python
import pygame
import datetime
import asyncio
import os
from pygame.locals import *
from . import *
#Initialise pygame
pygame.init()
#Initialise the font
pygame.font.init()
#Initialise the sound
pygame.mixer.init()
def load_sprites(obj_list:list, paths:list):
"""Load the sprites for each of the items in parallel"""
#Run functions concurrently
for i, obj in enumerate(obj_list):
#Add image sprites to each class concurrently
asyncio.run(add_to_sprite(obj, paths[i]))
def load_sprites_dict(obj_list:list, paths:list) -> None:
"""Load the sprites as dict"""
#Run functions concurrently
for i, obj in enumerate(obj_list):
#Add image sprites to each class concurrently
asyncio.run(add_to_sprite_dict(obj, paths[i]))
async def add_to_sprite(obj, sprite_path:list) -> None:
"""Add the pygame image to the object"""
#For each object load the image and append it to the object
for path in sprite_path:
#Append the sprite
obj.sprites.append(pygame.image.load(path))
async def add_to_sprite_dict(obj, sprite_path:list) -> None:
"""Add the pygame image to the object"""
#For each object load the image and append it to the object
for path in sprite_path:
#Added the name to the sprite dict
obj.sprites[os.path.basename(path)[:-4]] = (pygame.image.load(path))
async def load_sound(sound_path:str, settings:int, volume:float, debug:bool) -> Sound:
"""Load the sound object"""
return Sound(dict(map(lambda x: (x[0], pygame.mixer.Sound(x[1])), sound_path.items())), bool(int(settings)), volume, debug)
class GameWindow(object):
def __init__(self, sensitivity:int, maxfps:int, game_width:int, game_height:int, icon_img_path:str, player_img_paths:tuple,
enemy_img_paths:tuple, bullet_img_paths:tuple, background_img_paths:tuple, explosion_img_paths:tuple,
db_path:str, sound_path:dict, bg_limit:int, menu_music_paths:tuple, powerup_img_path:tuple, mothership_img_path:tuple,
trophy_img_path:tuple, scout_img_path:tuple, brute_img_path:tuple, screenshot_path:str, story_img_path:str, crabs_img_path:str,
place_holder_path:str, wave:int = 1, debug:bool = False):
"""The Main window for the Space defenders game"""
#Load sprites
load_sprites((Player, Bullet, EnemyShip, Background, Explosion, MotherShip, VictoryScreen, Scout, Brute, Crabs),
(player_img_paths, bullet_img_paths, enemy_img_paths, background_img_paths, explosion_img_paths, mothership_img_path, trophy_img_path, scout_img_path, brute_img_path, crabs_img_path))
load_sprites_dict((StoryTemplate, PowerUp, MobInstructionsScreen),
(story_img_path, powerup_img_path, place_holder_path))
#Store debug variable
self.debug = debug
#Storing the path for screenshots
self.screenshot_dir = screenshot_path
#Load setting menu settings
self.settingsdb = SettingsDB(db_path)
self.settings_data = dict(map(lambda x: x[1:], self.settingsdb.fetch_all()))
#Load sounds
self.sound = asyncio.run(load_sound(sound_path, self.settings_data['music'], float(self.settings_data['volume']), self.debug))
#Set the title
pygame.display.set_caption("Space Invaders")
#Load and set the Icon
icon = pygame.image.load(icon_img_path)
pygame.display.set_icon(icon)
#Set the dimensions
self.main_screen = pygame.display.set_mode((game_width,game_height), pygame.DOUBLEBUF | pygame.HWSURFACE, 32)
#Initialise the pygame vars
self.clock = pygame.time.Clock()
self.score = 0
self.fps = maxfps
self.sensitivity = sensitivity
self.game_width = game_width
self.game_height = game_height
#Starting State
self.state = State.MENU
#Check if highscore is written
self.written = False
#Load the highscores
self.score_board = ScoreBoard(db_path)
#Set difficulty
difficulty = int(self.settings_data['difficulty'])
self.difficulty = Difficulty(difficulty if difficulty < 6 else 6)
#Load the sounds into the game
pygame.mixer.music.load(menu_music_paths[0])
#Create the background object
self.bg = Background(int(self.settings_data['background']), game_width, game_height, bg_limit, debug)
#Create the Menu Screen objects
self.instructions = InstructionScreen(game_width, game_height, self.main_screen, debug = self.debug)
self.menu = MenuScreen(game_width, game_height, self.main_screen, debug = self.debug)
self.play_menu = PlayModeScreen(game_width, game_height, self.main_screen, debug)
self.highscore = HighscoreScreen(game_width, game_height, self.main_screen, self.score_board.fetch_all(), debug = self.debug)
self.pvp_menu = PVPInstructionsScreen(game_width, game_height, self.main_screen, debug)
self.inst_menu = InstructionsMenuScreen(game_width, game_height, self.main_screen, debug)
self.settings = SettingsScreen(game_width, game_height, self.main_screen, self.fps, self.sound, self.bg, self.difficulty, debug)
self.one_player_menu = OnePlayerModeScreen(game_width, game_height, self.main_screen, debug)
self.story_mode = StoryModeScreen(game_width, game_height, self.main_screen, debug)
self.powerup_instructions = PowerupInstructionsScreen(game_width, game_height, self.main_screen, self.fps, debug)
self.mobs_instructions = MobInstructionsScreen(game_width, game_height, self.main_screen, self.fps, debug)
self.ai_menu = AIMenuScreen(game_width, game_height, self.main_screen, debug)
#Create playing screens
self.play = PlayScreen(game_width, game_height, self.main_screen, sensitivity, maxfps, self.difficulty, 3, debug = self.debug)
self.two_player = TwoPlayerScreen(game_width, game_height, self.main_screen, self.debug)
self.pvp = LocalPVPScreen(game_width, game_height, self.main_screen, sensitivity, maxfps, 3, debug)
self.classic = ClassicScreen(game_width, game_height, self.main_screen, sensitivity, maxfps, self.difficulty, debug = self.debug)
self.coop = CoopScreen(game_width, game_height, self.main_screen, sensitivity, maxfps, self.difficulty, 3, debug)
self.ai_vs = AIPVPScreen(game_width, game_height, self.main_screen, sensitivity, maxfps, 3, debug)
self.online = OnlinePVPScreen(game_width, game_height, self.main_screen, sensitivity, maxfps, 3, debug)
self.ai_coop = AICoopScreen(game_width, game_height, self.main_screen, sensitivity, maxfps, self.difficulty, 3, debug)
#Create the stages
self.tutorial = TutorialScreen(game_width, game_height, self.main_screen, sensitivity, maxfps, debug)
self.stage1 = Stage1Screen(game_width, game_height, self.main_screen, sensitivity, maxfps, debug)
self.stage2 = Stage2Screen(game_width, game_height, self.main_screen, sensitivity, maxfps, debug)
self.stage3 = Stage3Screen(game_width, game_height, self.main_screen, sensitivity, maxfps, debug)
self.stage4 = Stage4Screen(game_width, game_height, self.main_screen, sensitivity, maxfps, debug)
self.stage5 = Stage5Screen(game_width, game_height, self.main_screen, sensitivity, maxfps, debug)
self.stage6 = Stage6Screen(game_width, game_height, self.main_screen, sensitivity, maxfps, debug)
#Dynamic screens
self.stage_gameover = StageGameoverScreen(game_width, game_height, self.main_screen, debug)
self.victory = None
self.newhighscore = None
self.pause = None
self.pvp_gameover = None
self.game_over = None
self.stage_pause = None
#Sort the highscore
self.highscore.sort_scores()
#Store the variables
self.popup = None
self.prev = State.NONE
self.cooldown = self.fps // 5
#Store the different screens in state
self.screens = {
State.MENU:self.menu,
State.PLAYMODE:self.play_menu,
State.PLAY:self.play,
State.HIGHSCORE:self.highscore,
State.INSTRUCTIONS_MENU: self.inst_menu,
State.INSTRUCTIONS:self.instructions,
State.PVP_INSTRUCTIONS: self.pvp_menu,
State.TWO_PLAYER_MENU: self.two_player,
State.AI_COOP: self.ai_coop,
State.AI_VS: self.ai_vs,
State.PVP: self.pvp,
State.CLASSIC: self.classic,
State.SETTINGS: self.settings,
State.COOP: self.coop,
State.ONLINE: self.online,
State.TUTORIAL: self.tutorial,
State.ONE_PLAYER_MENU: self.one_player_menu,
State.STORY_MENU: self.story_mode,
State.STAGE1:self.stage1,
State.STAGE2:self.stage2,
State.STAGE3:self.stage3,
State.STAGE4:self.stage4,
State.STAGE5:self.stage5,
State.STAGE6:self.stage6,
State.POWERUP_INSTRUCTIONS: self.powerup_instructions,
State.MOBS_INSTRUCTIONS: self.mobs_instructions,
State.STAGE_GAMEOVER: self.stage_gameover,
State.STAGE_PAUSE: self.stage_pause,
State.AI_MENU: self.ai_menu
}
#Store the different states the menu has
self.states = {
State.NEWHIGHSCORE:self.handle_newhighscore,
State.GAMEOVER:self.handle_gameover,
State.PAUSE:self.handle_pause,
State.TWO_PLAYER_GAMEOVER:self.handle_two_player_gameover,
State.TWO_PLAYER_PAUSE: self.handle_two_player_pause,
State.ONLINE: self.handle_online,
State.QUIT:self.__del__,
State.VICTORY: self.handle_victory,
State.STAGE_PAUSE: self.handle_stage_pause,
}
#Load sound state:
self.sound_state = self.sound.get_state()
#Initialise the music
pygame.mixer.music.load(menu_music_paths[0])
#Play the music if sound is enabled
if self.sound_state:
#Loop forever
pygame.mixer.music.play(-1)
#Load the sounds into the relavant Sprites
Bullet.sound = self.sound
#Add explosion sound
Explosion.sound = self.sound
#Add Powerup sound
PowerUp.sound = self.sound
#Add pause sound
TwoPlayerPauseScreen.sound = self.sound
PauseScreen.sound = self.sound
GameoverScreen.sound = self.sound
TwoPlayerGameoverScreen.sound = self.sound
def handle_victory(self) -> State:
"""Handle the drawing of the victory screen"""
#Get the cleared stage
stage = self.screens[self.prev]
#Get the name of the stage cleared
cleared_stage = stage.get_stage_name()
#Reset the cleared stage
stage.reset()
#If there is no victory screen or it is a different victory screen
if not self.victory or self.victory.get_stage_name() != cleared_stage:
self.victory = VictoryScreen(self.game_width, self.game_height, self.main_screen, cleared_stage, self.sound)
#Handle the victory screen
return self.victory.handle()
def handle_online(self) -> State:
"""Handle the online game"""
#Currently waiting for a place to host the server
self.popup = Popup(320, 40, "Under Construction", self.fps, self.game_width//2 - 80, self.game_height//2, self.main_screen,font = Screen.end_font, debug = self.debug)
#Return playmode for now until server is found
return State.PLAYMODE
#Handle the online mode
# return self.online.handle()
def handle_two_player_pause(self) -> State:
"""Handle the PVP pause screen"""
#Check based on previous state
prev_screen = self.screens[self.prev]
scores = self.screens[self.prev].get_scores()
#Create the pause screen
self.two_player_pause = TwoPlayerPauseScreen(self.game_width, self.game_height, self.main_screen, *scores, self.prev, self.debug)
#Return the function
state = self.two_player_pause.handle()
#If new state is menu state
if state == State.MENU:
#Reset the state
prev_screen.reset()
#Return menu state
return state
#If it goes back to the game
elif state != State.TWO_PLAYER_PAUSE:
#Return previous state
return self.prev
#Otherwise return the state
return state
def handle_newhighscore(self) -> State:
"""Handle the displaying of the highscore screen"""
#If there is a new highscore that is different from before
if not self.newhighscore or self.newhighscore.get_score() != self.play.get_score():
#Create the new highscore screen
self.newhighscore = NewhighscoreScreen(self.game_width, self.game_height, self.main_screen, self.play.get_score())
#Get the next state
state = self.newhighscore.handle()
#If the state is gameover
if state == State.GAMEOVER:
#Debugging information
if self.debug:
print(f"Name:{NewhighscoreScreen.get_name()}\nScore:{self.play.get_score()}")
#Update the score of the player
self.highscore.update_score(self.newhighscore.get_name(), self.play.get_score())
#Clear the input box
NewhighscoreScreen.inputbox.clear()
#Mark as highscore written
self.written = True
#Return the next state
return state
def handle_pause(self) -> State:
"""Handle the displaying of the pause screen"""
#Get the correct score from the correct state
score = self.screens[self.prev].get_score()
#Create the pause screen if it is not already created
if not self.pause or self.pause.get_score() != score:
self.pause = PauseScreen(self.game_width,self.game_height, self.main_screen, score, self.prev, self.debug)
#Handle the pause screen
state = self.pause.handle()
#If it is exiting out of the pause state
if state != State.PAUSE and state != self.prev:
#Reset the screen
self.screens[self.prev].reset()
#Return the next state
return state
def handle_stage_pause(self) -> State:
"""Handle the pause screen for stages"""
#Get the current stage
stage = self.screens[self.prev].get_stage_name()
#Create the pause screen if it is not already created
if not self.stage_pause or self.stage_pause.get_stage() != stage:
self.stage_pause = StagePauseScreen(self.game_width,self.game_height, self.main_screen, self.prev, self.debug)
#Handle the pause screen
state = self.stage_pause.handle()
#If it is exiting out of the pause state
if state != State.STAGE_PAUSE and state != self.prev:
#Reset the screen
self.screens[self.prev].reset()
#Return the next state
return state
def handle_two_player_gameover(self) -> State:
"""Handle the PVP gameover screen"""
#Set variables based on previous state
prev_screen = self.screens[self.prev]
scores = prev_screen.get_scores()
#Generate gameover screen if it is not found
if not self.pvp_gameover or self.pvp_gameover.get_scores() != scores:
self.pvp_gameover = TwoPlayerGameoverScreen(self.game_width, self.game_height, self.main_screen, *scores)
#Get next state
state = self.pvp_gameover.handle()
#If the gameover screen is over
if state != State.TWO_PLAYER_GAMEOVER:
#Reset the environment
prev_screen.reset()
#Return the state
return state
def handle_gameover(self) -> State:
"""Handle the displaying of the gameover screen"""
#Check previous state
if self.prev == State.PLAY or self.prev == State.NEWHIGHSCORE:
#If it is a new highscore
if self.highscore.beat_highscore(self.play.get_score()) and not self.written:
#Go to the new highscore state
return State.NEWHIGHSCORE
#Create the gameover screen
self.game_over = GameoverScreen(self.game_width,self.game_height, self.main_screen, self.play.get_score(), self.debug)
#Check the state given by gameover
state = self.game_over.handle()
#If player is going back
if state == State.MENU:
#Mark written as false
self.written = False
#Reset the play screen
self.play.reset()
#If it is classic mode
else:
#Get the score of the previous screen
score = self.screens[self.prev].get_score()
#Create the gameover screen
if not self.game_over or self.game_over.get_score() != score:
self.game_over = GameoverScreen(self.game_width,self.game_height, self.main_screen, score, self.debug)
#Check the state given by gameover
state = self.game_over.handle()
#If player is going back
if state == State.MENU:
#Mark written as false
self.written = False
#Reset the play screen
self.screens[self.prev].reset()
#Return the state
return state
def get_state(self) -> State:
"""Return the state the game is in"""
return self.state
async def screenshot(self) -> None:
"""Take a screenshot
Runs in parallel to the game to reduce lag while screenshotting in game
"""
#Check if the screenshot folder is not there
if not os.path.isdir(self.screenshot_dir):
#Create the screenshot_dir folder
os.mkdir(self.screenshot_dir)
#Save a screenshot named based on date and time
name = os.path.join(self.screenshot_dir, f'{datetime.datetime.now().strftime("%d_%m_%Y_%H_%M_%S")}.png')
#Play the screenshot sound
self.sound.play('screenshot')
#Print debug message
if self.debug:
print(f"Saved at: {name}")
#Save the image
pygame.image.save(self.main_screen, name)
def check_keypresses(self) -> None:
"""Check global keypresses within the game"""
#Get the keys which are pressed
keys = pygame.key.get_pressed()
#Check each key individually
if keys[K_x] and self.state != State.NEWHIGHSCORE:
#Run the screenshot in parallel
asyncio.run(self.screenshot())
#Create a 1 second popup saying screenshot is taken
self.popup = Popup(20*8, 30, "Screenshot taken", self.fps, self.game_width//2, 15, self.main_screen, font = Screen.font, debug = self.debug)
def fill_background(self) -> None:
"""Set the background"""
#If the background is present
if self.bg.is_present():
#Fill it with the background img
self.bg.update(self.main_screen)
#Otherwise
else:
#Fill the background to black
self.main_screen.fill(BLACK)
def find_method(self, prev) -> State:
"""Find the appropriate method to execute"""
#Try to find the method in self.states
f = self.states.get(prev, None)
#If method is found execute it else call the default handle
return f() if f else self.screens.get(self.state).handle()
def check_sound(self):
"""Check if bg should be playing"""
#Check if background music should be playing
if self.sound.get_state() != self.sound_state:
#Get the state of sound
self.sound_state = self.sound.get_state()
#If sound is enabled
if self.sound_state:
#Play the music
pygame.mixer.music.play(-1)
#Otherwise
else:
#Pause the music
pygame.mixer.music.pause()
def update(self) -> None:
"""Update the main screen"""
#Set the background
self.fill_background()
#Check sound
self.check_sound()
#Save previous state
prev = self.state
#If on cooldown
if self.cooldown:
#Lower cooldown
self.cooldown -= 1
#Run the state
self.find_method(prev)
#Otherwise
else:
#Check if there is new state
self.state = self.find_method(prev)
#If the state is different
if prev != self.state:
#Play the click sound
self.sound.play('click')
#Set the self.prev state
self.prev = prev
#Reset the cooldown
self.cooldown = self.fps//5
#Reset Popups
self.popup = None
#Check popups
if self.popup:
#Update popups
self.popup.update()
def post_process(self):
"""Do the post processing"""
#Get the screen it is using
screen = self.screens.get(self.state, None)
#Do post process for the screen
if screen:
#Call the post process method
screen.post_process()
def mainloop(self) -> None:
"""The mainloop to load the screen of the game"""
#Print the mainloop run based on debug mode
if self.debug:
print("Running the main loop")
#Loop variables
running = True
#Mainloop for pygame GUI
while running:
#Set the FPS
self.clock.tick(self.fps)
#Update game states
self.update()
#Check Global keypresses
self.check_keypresses()
#Update the display with the screen
pygame.display.update()
#Do post processes
self.post_process()
#If the state is quit or player closes the game
if self.state == State.QUIT or pygame.QUIT in tuple(map(lambda x: x.type, pygame.event.get())):
#Set running to false
running = False
#Play the exit sound
self.sound.play('exit')
def __del__(self) -> None:
"""Destructor for the game window. Closes all the relavent processes"""
#Add the new highscores into DB
self.score_board.add_all(*self.highscore.get_scores())
#Remove all entries beyond 5
self.score_board.remove_all(*self.highscore.get_removed())
#Close the score board
self.score_board.__del__()
#Get the settings that was saved and save it to the Database
self.settingsdb.update('volume',str(self.settings.get_volume()))
self.settingsdb.update('background',str(self.settings.get_bg_no()))
self.settingsdb.update('music',int(self.settings.get_music_enabled()))
self.settingsdb.update('difficulty', int(self.settings.get_difficulty_no()))
#Close the settingsdb
self.settingsdb.__del__()
#Quit the game
pygame.quit()
#Debug message
if self.debug:
print("Closed Game Window")
```
#### File: Game/Screens/NewhighscoreScreen.py
```python
import pygame
from pygame.locals import *
from . import Screen
from .. import InputBox, State, WHITE
class NewhighscoreScreen(Screen):
#Input box for the player to key in his name
inputbox = InputBox(300, 400, 30, Screen.end_font)
def __init__(self, screen_width:int, screen_height:int, screen, score:int, debug:bool = False):
"""Main screen for the new highscore screen"""
#Call the superclass
super().__init__(screen_width, screen_height, State.NEWHIGHSCORE, screen, 0, 0, debug)
#Define new variables
self.score = score
#Start pixel to start drawing
start_px = 100
#Tell the user he has a new high score
self.write(self.title_font, WHITE, f"NEW HIGH SCORE", self.screen_width//2, start_px)
#Write the score the user got
self.write(self.end_font, WHITE, f"Score: {self.score}", self.screen_width//2, start_px + self.screen_height // 10)
#Tell the user to key in his name
self.write(self.font, WHITE, f"Please enter your name and press enter", self.screen_width//2, start_px + self.screen_height//5)
#Draw the sprites
self.draw()
def get_score(self) -> int:
"""Get the score of the player"""
return self.score
@staticmethod
def get_name() -> str:
"""Get the name of that was keyed into the inputbox"""
return NewhighscoreScreen.inputbox.get_text()
def draw(self) -> None:
"""Draw the Screen onto the Surface"""
#Draw the inputbox
NewhighscoreScreen.inputbox.blit(self.screen)
def handle(self) -> State:
"""Handles the new highscore and get user's name from input"""
#Check each keydown event in pygame event queue
for event in tuple(filter(lambda x: x.type==pygame.KEYDOWN, pygame.event.get())):
#Check if it is the backspace key
if event.key == K_BACKSPACE:
#Do backspace on the inputbox
NewhighscoreScreen.inputbox.backspace()
#Check if the player hit any other key
elif event.key != K_RETURN:
#Add the character to the inputbox
NewhighscoreScreen.inputbox.add(event.unicode)
#If the player hit the return key
else:
#Return Gameover State
return State.GAMEOVER
#Update the Inputbox
NewhighscoreScreen.inputbox.update()
#Draw the sprites
self.draw()
#Update the surface
self.update()
#Check if the player wants to pause or quit
if self.check_quit():
return State.QUIT
#Return the Current game state
return State.NEWHIGHSCORE
```
#### File: Game/Screens/OnlinePVPScreen.py
```python
import pygame
import random
from . import LocalPVPScreen
from .. import Network, State, WHITE, Bullet
from pygame.locals import *
class OnlinePVPScreen(LocalPVPScreen):
def __init__(self, screen_width:int, screen_height:int, screen, sensitivity:int, fps:int, player_lives:int = 3, debug:bool = False):
"""The main class for the online PVP screen"""
#Call the superclass
super().__init__(screen_width, screen_height, screen, sensitivity, fps, player_lives, debug)
#Set the state to the correct state
self.set_state(State.ONLINE)
#Reset the screen to default
self.reset()
def reset(self) -> None:
"""Reset the online screen"""
#Set the seed to none
self.seed = None
#Set player shot to none
self.shot = False
#Get which player current player is
self.first = None
#Reset variables
self.network = None
self.waiting = True
self.disconnected = False
#Call the superclass reset
super().reset()
def create_network(self):
"""Create the network for the player to be hosted on"""
#Create the network
self.network = Network("192.168.127.12", 8080)
def pack_player_data(self):
"""Pack the data into the correct form to be sent"""
return (self.player2.get_x(), self.p2_score, self.shot)
def communicate(self):
"""Communicate with the server"""
#Send information on current player
data = self.network.send(self.pack_player_data())
#If player is waiting
if self.waiting:
#Check if player should continue to wait
self.waiting = data['waiting']
#If the data is empty
if not self.waiting:
#Unpack the data
p1_x, self.p1_score, p1_shot = data['data']
#If not sure which player he is get from network
self.first = data['isfirst']
#Set the coordinate for player 2
self.player1.set_coord((p1_x,self.player1.get_y()))
#If player 2 shot
if p1_shot:
#Make player 2 shoot
self.player1.shoot()
#If there is no seed get the seed
self.set_seed(data['seed'])
def set_seed(self, seed:int):
"""Set the seed for the random"""
if self.seed == None:
self.seed = seed
random.seed(seed)
def generate_random_no(self):
"""Generate a random number from the server"""
return random.random()
def generate_direction(self):
"""Generate the direction the bullet was suppose to go"""
#If this is the first player
if self.first:
#Give the direction given by the superclass
return super().generate_direction()
#Otherwise
else:
#Give the 1- direction given by the superclass
return 1- super().generate_direction()
def shoot_bullet(self, enemy):
"""Modified shoot bullet for online"""
#If the set is non-empty
if enemy:
#Get the x_coord of the enmy
x_coord = enemy.get_x()
#Get direction of bullet
direction = self.bullet_direction()
#Add bullet to the mob_bullets
self.mob_bullet.add(Bullet(self.sensitivity, x_coord, self.screen_height//2, direction, self.screen_width, self.screen_height, self.debug))
def check_keypresses(self) -> bool:
"""Check the keys which are pressed"""
#Get all the number of keys
keys = pygame.key.get_pressed()
#Flag to check if online player shot
shot = False
if not self.player2.is_destroyed():
#Check player 1 keys
if keys[K_a]:
#Move player 1 to the left
self.player2.move_left()
if keys[K_d]:
#Move player 1 to the right
self.player2.move_right()
if keys[K_SPACE]:
#Let player 2 shoot
self.shot = self.player2.shoot()
#Return to false screen
return False
def handle(self) -> State:
"""Handle the drawing of the screen"""
#If it is not connected to the network
if not self.network:
#Create the network
self.create_network()
#Communicate with network
try:
self.communicate()
#If there is a value error
except ValueError:
#Opponent has disconnected
print("Opponent disconnected")
#Close the network
self.network.close()
#Go to next state
return State.TWO_PLAYER_GAMEOVER
#Otherwise there is an error in communication
except Exception as exp:
print(f"Error communicating: {exp}")
#Draw the loading screen
if self.waiting:
#Draw loading screen
self.write_main(self.end_font, WHITE, f"Loading", self.screen_width // 2, self.screen_height//2)
self.back_rect = self.write_main(self.end_font, WHITE, f"Back", self.screen_width // 2, self.screen_height//2 + self.screen_height//15)
#Check if player clicked the back button
if self.check_clicked(self.back_rect):
#Close the network
self.close_network()
#Return to the playmode screen
return State.PLAYMODE
#Otherwise return current state
return self.state
#If player disconnected go to gameover screen
elif self.disconnected:
#Close the network
self.close_network()
#Return next state
return State.TWO_PLAYER_GAMEOVER
#Otherwise draw the game
else:
#Set waiting to false
if self.waiting:
self.waiting = False
#Call the superclass handle
state = super().handle()
#Set if the next state is gameoveer
if state == State.TWO_PLAYER_GAMEOVER:
#Close the network
self.close_network()
#Return next state
return state
def close_network(self):
"""Close the network and reset variable"""
#If there is a socket previously
if self.network:
self.network.close()
#Reset variables
self.network = None
self.waiting = True
#Reset game
self.reset()
def __del__(self):
"""Destructor for the object"""
#If the network is connected
if self.network:
#Close the network
self.close_network()
```
#### File: Game/Screens/Popup.py
```python
import pygame
from . import Screen
from .. import State, WHITE
class Popup(Screen):
def __init__(self, popup_width:int, popup_height:int, sentence:str, tick_life:int, initial_x:int, initial_y:int, screen, font = False, debug:bool = False):
"""Main Popup class"""
#Store variables
self.ttl = tick_life
self.sentence = sentence
#Call the Screen superclass init
super().__init__(popup_width, popup_height, State.NONE, screen, initial_x - (len(sentence) * 5), initial_y, debug)
#Fill itself black
self.set_background((0,0,0))
#If no font is set
if not font:
#Default to screen.font
font = self.font
#Render the words for the popup
self.write(font, WHITE, sentence, popup_width // 2, popup_height//2)
def update(self):
"""Update function for the popup"""
#If time to live > 0
if self.ttl:
#Reduce the ttl of the popup
self.ttl -= 1
#Call the superclass update
super().update()
#Return itself
return self
else:
return None
```
#### File: Game/Screens/StoryModeScreen.py
```python
from . import Screen
from .. import WHITE, State, Direction
class StoryModeScreen(Screen):
videos = []
def __init__(self, screen_width:int, screen_height:int, screen, debug:bool = False):
"""Constructor for Story mode screen"""
#Call the superclass
super().__init__(screen_width, screen_height, State.STORY_MENU, screen, 0, 0, debug)
#Draw the title
self.write(self.title_font, WHITE, "Story modes", self.screen_width /2, self.screen_height /5)
#Store the current number of stages
self.stages = 6
#Create list to store the buttons
self.video_buttons = []
#Put a video for the StoryMode:
self.create_videos()
#Draw the back button
self.back = self.write(self.end_font, WHITE, "Back", self.screen_width // 2, self.screen_height / 1.2)
def get_stages(self) -> int:
"""Get the total number of stages in the story mode"""
return self.stages
def create_videos(self) -> None:
"""Create videos buttons for the videos"""
#Iterate through all the stage videos
for i in range(1,self.get_stages()//2+1):
#Append the button to the buttons list for checking later
self.video_buttons.append(self.write(self.end_font, WHITE, f"Stage {i} ", self.screen_width // 2, self.screen_height // 2 + (40*i) - 30, Direction.RIGHT))
for i in range(1, self.get_stages() - self.get_stages()//2 + 1):
#Append the button to the buttons list for checking later
self.video_buttons.append(self.write(self.end_font, WHITE, f" Stage {self.get_stages()//2 + i}", self.screen_width // 2, self.screen_height // 2 + (40*i - self.get_stages() // 2 + 1) - 30, Direction.LEFT))
def check_mousepress(self) -> State:
"""Check the mouse press of the user"""
#Check if the user clicked the back button
if self.check_clicked(self.back):
#Return menu state
return State.ONE_PLAYER_MENU
#Loop through the the video buttons
for index,rect in enumerate(self.video_buttons):
if self.check_clicked(rect):
return State(100 + index)
return self.state
def handle(self) -> None:
"""Handle the drawing of the screen"""
#Call the superclass update
super().update()
#Check mousepress of user
return self.check_mousepress()
```
#### File: Game/Screens/TwoPlayerPauseScreen.py
```python
import pygame
from pygame.locals import *
from . import Screen
from .. import WHITE, State, Direction
class TwoPlayerPauseScreen(Screen):
#Check if the paused sound is played
sound = None
played = False
def __init__(self, screen_width:int, screen_height:int, screen, p1_score:int, p2_score:int, prev_state:State, debug:bool = False):
"""Main class for PVP pause screen"""
#Call the superclass
super().__init__(screen_width, screen_height, State.TWO_PLAYER_PAUSE, screen, 0, 0, debug)
#Play the pause sound
if TwoPlayerPauseScreen.sound and not TwoPlayerPauseScreen.played:
#Play the sound
TwoPlayerPauseScreen.sound.play('pause')
TwoPlayerPauseScreen.played = True
#Store the vars
self.p1 = p1_score
self.p2 = p2_score
self.prev = prev_state
#Draw the header
self.write(self.title_font, WHITE, "Paused", screen_width//2, screen_height//5)
#First pixel used for alignment
first_pixel = screen_height // 2
#Draw the player 1 score
self.write(self.end_font, WHITE, f"Player 1: {p1_score}", screen_width//4, first_pixel, Direction.LEFT)
#Draw the player 2 score
self.write(self.end_font, WHITE, f"Player 2: {p2_score}", screen_width//4, first_pixel + self.screen_height//15, Direction.LEFT)
#Draw the instructions to unpause
self.write(self.end_font, WHITE, "Press P to unpause", self.screen_width//4, first_pixel + self.screen_height//7.5, Direction.LEFT)
#Draw the instructions to quit
self.write(self.end_font, WHITE, "Escape to quit", self.screen_width//4, first_pixel + self.screen_height//5, Direction.LEFT)
def get_scores(self) -> tuple:
"""Return the score of the 2 players"""
return self.p1,self.p2
def update_keypresses(self) -> State:
"""Check for the keypresses within the pause screen
Arguments:
No arguments:
Returns:
No return
"""
#Getting the keys which are pressed
keys = pygame.key.get_pressed()
#Return the play state if the player unpause his game
if keys[K_p]:
TwoPlayerPauseScreen.played = False
return self.prev
#If the player press the escape key, quit the game
elif keys[K_ESCAPE]:
TwoPlayerPauseScreen.played = False
return State.MENU
#Otherwise
else:
#Return the current state if the player has not unpaused
return State.TWO_PLAYER_PAUSE
def handle(self) -> State:
"""Handle the drawing of the pause screen"""
#Update the screen
self.update()
#Check keypresses
return self.update_keypresses()
```
#### File: Game/Screens/TwoPlayerScreen.py
```python
import pygame
from pygame.locals import *
from . import Screen
from .. import WHITE, State
class TwoPlayerScreen(Screen):
def __init__(self, screen_width:int, screen_height:int, screen, debug:bool = False):
"""Constructor for the main class for the 2 player screen"""
#Call the superclass screen
super().__init__(screen_width, screen_height, State.TWO_PLAYER_MENU, screen, 0, 0, debug)
#Draw the first pixel used for alignment
first_pixel = self.screen_height // 2
#Draw the header
self.write(self.title_font, WHITE, "2 player modes", self.screen_width // 2 ,self.screen_height // 5)
#Draw the modes
#Draw the Player vs player mode
self.local_vs = self.write(self.end_font, WHITE, "Player VS Player", self.screen_width // 2, first_pixel)
#Draw the Player and Player Coop
self.coop = self.write(self.end_font, WHITE, "2 Player Coop", self.screen_width // 2, first_pixel + self.screen_height // 15)
#Draw the back button
self.back = self.write(self.end_font, WHITE, "Back", self.screen_width // 2, self.screen_height // 1.2)
#Popup
self.popup = None
def check_mouse_press(self) -> State:
"""Check the mouse press of the user"""
#Check if the pereson clicked on the
if self.check_clicked(self.local_vs):
#Return the local PVP state
return State.PVP
#If the player clicked on the back key
elif self.check_clicked(self.back):
#Return the previous screen
return State.PLAYMODE
#If the person clicked on coop
elif self.check_clicked(self.coop):
#Return the Coop mode
return State.COOP
#Otherwise
return State.TWO_PLAYER_MENU
def handle(self) -> State:
"""Handle the drawing of the 2 players screen"""
#If there is a popup
if self.popup:
#Update the popup
self.popup.update()
#Update the screen
self.update()
#Check the mouse press and return the correct state
return self.check_mouse_press()
```
#### File: Game/Sprites/Explosion.py
```python
import pygame
from . import ImageObject
class Explosion(ImageObject):
#Init for the sound
sound = None
#To store the sprites for the explosions
sprites = []
def __init__(self, tick_life:int, initial_x:int, initial_y:int, game_width:int, game_height:int, image_no:int = 0, debug:bool = False):
"""The main class for the explosion"""
#Get the correct image of the explosion
if image_no < len(Explosion.sprites):
image = Explosion.sprites[image_no]
else:
image = Explosion.sprites[0]
#Call the superclass method
super().__init__(initial_x, initial_y, game_width, game_height, image, debug)
#Play it
self.sound.play('explosion')
#Set the time to live for the explosion
self.tts = tick_life
def update(self):
"""Update the explosion"""
#If the explosion still has time to live
if self.tts:
#Decrease time to live
self.tts -= 1
#Otherwise kill it
else:
self.kill()
#Call the superclass update method
super().update()
```
#### File: Game/Stages/Stage1Screen.py
```python
from . import StoryTemplate
from .. import State, ImageObject, Direction, WHITE
class Stage1Screen(StoryTemplate):
def __init__(self, screen_width:int, screen_height:int, screen, sensitivity:int, max_fps:int, debug:bool):
"""The constructor for the Stage 1 screen"""
#Call the superclass init method
super().__init__(screen_width, screen_height, screen, State(100), sensitivity, max_fps, 0, debug)
#Commander brief image
self.bg = ImageObject(300, 285, 600, 570, StoryTemplate.sprites["commander_brief"], debug)
#Image of figure head
self.dill_bates = ImageObject(300, 210, 217, 217, StoryTemplate.sprites['bates'], debug)
self.dill_bates.scale(217,217)
#Image of the commander
self.commander = ImageObject(300, 210, 217, 217, StoryTemplate.sprites['silloette_commander'], debug)
self.commander.scale(217,217)
#Textbox
self.tb = ImageObject(300, 685, 600, 230, StoryTemplate.sprites['textbox'], debug)
def draw_bg(self):
"""Draw the background"""
#Draw the commander brief
self.bg.draw(self.screen)
#Draw the textbox
self.tb.draw(self.screen)
def pre_cutscene(self):
"""The pre_cutscene for the class"""
#Insert the Icon for the char speaking
self.dill_bates.draw(self.screen)
#Draw the background
self.draw_bg()
#Draw the next button
self.next_btn = self.write_main(self.end_font, WHITE, "Next", 580, self.tb.rect.top - 30, Direction.RIGHT)
#Lower cd of click if it is still on cooldown
if self.click_cd:
self.click_cd -= 1
#Check if the next button is clicked
if self.check_clicked(self.next_btn) and not self.click_cd:
#Increment the clicks
self.clicks += 1
#Reset the cooldown
self.click_cd = self.fps//5
#Write the character name text
self.write_main(self.end_font, WHITE, "<NAME>", 33, self.tb.rect.top + 15, Direction.LEFT)
#Pixel vars for alignment
first_px = self.tb.rect.top + 75
left_px = 40
if self.clicks == 0:
#Write the character speech text
self.render_speech(first_px, left_px, ("Commander, the enemy is at our doorstep, and we are in dire straits,",
"the enemy has surrounded Earth and is threatening our very survival",
"The enemies here are the cannon fodder of their invasion.",
"However we cannot underestimate their strength."))
elif self.clicks == 1:
#Write part 2 of the speech
self.render_speech(first_px, left_px, ("The enemies here are the cannon fodder of their invasion. ",
"However we cannot underestimate their strength.",
"As we are unable to access our main weapon caches ",
"on our Moon Base either..."))
else:
#Reset the clicks
self.clicks = 0
#Move to the next scene
self.next_scene()
#Return the current state
return self.state
def post_cutscene(self):
"""The post cutscene for stage 1"""
#Lower cd of click if it is still on cooldown
if self.click_cd:
self.click_cd -= 1
#Check if the next button is clicked
if self.check_clicked(self.next_btn) and not self.click_cd:
#Increment the clicks
self.clicks += 1
#Reset the cooldown
self.click_cd = self.fps//5
#If it is the dill_bates scene
if self.clicks <= 2:
#Insert the Icon for the char speaking
self.dill_bates.draw(self.screen)
else:
#Insert the icon for the commander
self.commander.draw(self.screen)
#Draw the background
self.draw_bg()
#Draw the next button
self.next_btn = self.write_main(self.end_font, WHITE, "Next", 580, self.tb.rect.top - 30, Direction.RIGHT)
#Pixels for alignment
first_px = self.tb.rect.top + 75
left_px = 40
if self.clicks == 0:
#Write the character name text
self.write_main(self.end_font, WHITE, "<NAME>", 33, self.tb.rect.top + 15, Direction.LEFT)
#Write the character speech text
self.render_speech(first_px, left_px, ("Good job clearing the way. Now we can prepare to",
"take our Moon Base. "))
elif self.clicks == 1:
#Write the character name text
self.write_main(self.end_font, WHITE, "<NAME>", 33, self.tb.rect.top + 15, Direction.LEFT)
#Write the character speech text
self.render_speech(first_px, left_px, ("However, there is something weird about the remains",
"of these invaders.",))
elif self.clicks == 2:
#Write the character name text
self.write_main(self.end_font, WHITE, "<NAME>", 33, self.tb.rect.top + 15, Direction.LEFT)
#Write the character speech text
self.render_speech(first_px, left_px, ("They seem to be made of some kind of biochemical alloy",
"we had been researching on Pluto..."))
elif self.clicks == 3:
#Write the character name text
self.write_main(self.end_font, WHITE, "Commander", 33, self.tb.rect.top + 15, Direction.LEFT)
#Write the character speech text
self.render_speech(first_px, left_px, ("After all these years, war… war never changes",))
else:
#Reset the clicks
self.clicks = 0
#Move to the next scene
return self.get_victory_state()
#Return the current state
return self.state
def play(self):
"""The playing stage for the game"""
#Call the superclass play
return super().play()
def win_condition(self):
"""The win condition of the player"""
return self.wave == 4
```
#### File: gym_game/envs/classic_env.py
```python
import gym
import sys
import pygame
from gym import spaces
import numpy as np
import logging
from .classes import *
from .custom_env import CustomEnv
np.set_printoptions(threshold=sys.maxsize)
logging.basicConfig(level=logging.DEBUG, format = '%(asctime)s - %(levelname)s - %(message)s')
logging.disable(logging.DEBUG)
class ClassicEnv(CustomEnv):
def __init__(self, settings = "settings.cfg"):
super().__init__(settings,'classic')
def create_screen(self):
"""Create the classic screen"""
return ClassicScreen(self.screen_width, self.screen_height, self.screen, 5, self.fps, Difficulty(4))
```
#### File: Game/Instructions/InstructionsMenuScreen.py
```python
from .. import State, WHITE, MenuTemplate
class InstructionsMenuScreen(MenuTemplate):
def __init__(self, screen_width: int, screen_height: int, screen, debug: bool = False):
"""Main Instructions menu screen"""
# A list to store the rects
self.rects = []
# Initialise the effects
self.effects = (
State.INSTRUCTIONS, State.PVP_INSTRUCTIONS, State.POWERUP_INSTRUCTIONS, State.MOBS_INSTRUCTIONS, State.MENU)
# Call the superclass
super().__init__(screen_width, screen_height, State.INSTRUCTIONS_MENU, screen, debug)
def write_lines(self) -> None:
"""Write the lines for the instruction menus"""
# Draw the header
self.header = self.write(self.title_font, WHITE, "Instructions", self.screen_width // 2,
self.screen_height // 5)
# For alignment
first_px = self.screen_height // 2
# Draw the endless mode button
self.rects.append(self.write(self.end_font, WHITE, "Single Player Modes", self.screen_width // 2, first_px))
# Draw the PVP mode Instructions
self.rects.append(self.write(self.end_font, WHITE, "2 Player Modes", self.screen_width // 2,
first_px + self.screen_height // 15))
# Draw the Powerup instructions
self.rects.append(
self.write(self.end_font, WHITE, "Powerups", self.screen_width // 2, first_px + self.screen_height // 7.5))
# Draw the sprites instructions
self.rects.append(
self.write(self.end_font, WHITE, "Enemies", self.screen_width // 2, first_px + self.screen_height // 5))
# Draw the back button
self.rects.append(self.write(self.end_font, WHITE, "Back", self.screen_width // 2, self.screen_height // 1.2))
def get_rects(self) -> tuple:
"""Get the rects on the screen"""
return tuple(self.rects)
def get_effects(self) -> tuple:
"""Get the effect on the screen"""
return self.effects
```
#### File: Game/Instructions/MobInstructionScreen.py
```python
import pygame
from . import PowerupInstructionsScreen
from .. import State, WHITE, ImageObject, Scout, EnemyShip, Crabs, Brute, MotherShip
class MobInstructionsScreen(PowerupInstructionsScreen):
# Store the sprites
sprites_dict = {}
# Store the description
description = {
'enemyship': ("Normal Enemy",
"The typical foot soldier, the color of the enemy represents its health, randomly shoots bullets"),
'brute': ("Brute", "Moves straight down, shoots downwards too. They plough through everything in their path"),
'scout': ("Scout",
"Moves diagonally from left to right, shoot bullets that moves straight down. Known to juke bullets like a badass"),
'mothership': ("Mother Ship",
"Moves rapidly from left to right at the top of the screen, does not shoot. Sometimes we are not sure what they are doing here"),
'crabs': ("Crabs",
"Moves diagonally and fires bullets diagonally as well. Warning diagonal bullets bounces off the wall"),
's-net': ("???", "Unknown information about the ship. All we know is that it is controlling all the chaos here")
}
def __init__(self, screen_width: int, screen_height: int, screen, fps, debug: bool = False):
"""Main constructor for Mob instruction screen"""
# Call the superclass constructor
super().__init__(screen_width, screen_height, screen, fps, debug)
# Set the current state
self.set_state(State.MOBS_INSTRUCTIONS)
def preprocess(self):
"""Load other variables"""
# Init the sprites
self.items = (('enemyship',
EnemyShip(0, self.screen_width // 2, self.screen_height // 5 + self.screen_height // 15, 1,
self.screen_width, self.screen_height, None, None, self.debug)),
('mothership',
MotherShip(self.screen_width // 2, self.screen_height // 5 + self.screen_height // 15,
self.screen_width, self.screen_height, 0, self.debug)),
('brute', Brute(0, self.screen_width // 2, self.screen_height // 5 + self.screen_height // 15,
self.screen_width, self.screen_height, None, self.debug)),
('scout', Scout(0, self.screen_width // 2, self.screen_height // 5 + self.screen_height // 15, 1,
self.screen_width, self.screen_height, None, self.debug)),
('crabs', Crabs(0, self.screen_width // 2, self.screen_height // 5 + self.screen_height // 15, 1,
self.screen_width, self.screen_height, None, self.debug)),
('s-net',
ImageObject(self.screen_width // 2, self.screen_height // 5 + self.screen_height // 15, 50, 50,
self.sprites_dict['unknown'], self.debug)))
# Load the current page
self.page = 1
self.total_pages = len(self.items)
def write_header(self):
"""Write the header"""
# Draw the header
self.header = self.write(self.title_font, WHITE, "Enemies", self.screen_width // 2, self.screen_height // 5)
```
#### File: Game/Instructions/PowerupInstructionsScreen.py
```python
from .. import State, PowerUp, WHITE, GREY, ImageObject, MenuTemplate
class PowerupInstructionsScreen(MenuTemplate):
description = {
'bullet_up': ("Increase bullet speed", "Increases the speed of the bullets"),
'bullet_attack_up': ("Increase bullet Damage", "Increases the damage of the bullets"),
"debuff_bullet": ("Decrease bullet Damage", "Decreases the damage of the bullet"),
"deflector": ("Deflector", "Shift all enemies up"),
"emp": ("EMP Bomb", "Destroy x lives of enemies (x is based on current wave)"),
"hp_up": ("1 up", "Increases life of player"),
"shield_up": ("Shields up", "Create blocks to shield player")
}
def __init__(self, screen_width: int, screen_height: int, screen, fps, debug: bool = False):
"""Constructor for the powerup instructions screen"""
# Store the fps
self.fps = fps
# Call the superclass
super().__init__(screen_width, screen_height, State.POWERUP_INSTRUCTIONS, screen, debug)
def preprocess(self):
"""Load other variables which will be used later"""
# Load the powerups on the screen
self.items = tuple(map(lambda x: (x[0], ImageObject(self.screen_width // 2,
self.screen_height // 5 + self.screen_height // 15, 50, 50,
x[1], self.debug)), PowerUp.sprites_dict.items()))
# Load the current page
self.page = 1
self.total_pages = len(self.items)
def write_lines(self):
"""Write the header"""
# Draw the header
self.header = self.write(self.title_font, WHITE, "Power Ups", self.screen_width // 2, self.screen_height // 5)
# Draw the back button
self.back = self.write(self.end_font, WHITE, "Back", self.screen_width // 2, self.screen_height // 1.2)
# Write data onto the main screen
self.main_write()
def get_rects(self) -> tuple:
"""Get the rects within the game"""
return (self.prev, self.back, self.next)
def get_effects(self) -> tuple:
"""Get the effects within the game"""
return (self.dec_page, self._back, self.inc_page)
def _back(self):
"""The command to run when the player backs"""
# Reset the page when player backs
self.page = 1
# Return the instructions menu screen
return State.INSTRUCTIONS_MENU
def inc_page(self):
"""Function to be executed when player clicks next"""
# If current page is less than next page
if self.page < self.total_pages:
# Increment the page by 1
self.page += 1
# Return the current state
return self.state
def dec_page(self):
"""Function to execute when player clicks prev"""
# If it is more than page 1
if self.page > 1:
# Decrease the page by 1
self.page -= 1
# Return the current state
return self.state
def insert_description(self, first_px, description: str) -> None:
"""Writes the description for the powerup"""
# Shift by screen_height //7.5 from first pixel
first_px += + self.screen_height // 7.5
# Splits the words
words = description.split()
written = 0
# While loop to write content
while written < len(words):
# Accumulator
curr = []
# While the character limit is not exceeded
while sum(map(lambda x: len(x), curr)) < 20 and written < len(words):
# Add words to list
curr.append(words[written])
# Increase written words
written += 1
# Write the accumulated words
self.write_main(self.end_font, WHITE, " ".join(curr), self.screen_width // 2, first_px)
# Increment first_px to next pt
first_px += self.screen_height // 15
def main_write(self):
"""Writing the main information onto the screen"""
# First pixel used for alignment
first_px = self.screen_height // 5 + self.screen_height // 15 + 50
# Unpack powerup sprites
name, img = self.items[self.page - 1]
# Draw the powerups name
self.write_main(self.end_font, WHITE, f"{self.description[name][0]}", self.screen_width // 2, first_px)
# Draw the description
self.insert_description(first_px, self.description[name][1])
# Draw the icon
img.draw(self.screen)
# Draw page related items
self.draw_pages()
def draw_pages(self):
"""Draw page related items"""
# Draw the next button
if self.page < self.total_pages:
self.next = self.write_main(self.end_font, WHITE, "Next", self.screen_width // (4 / 3),
self.screen_height // 1.2)
else:
self.next = self.write_main(self.end_font, GREY, "Next", self.screen_width // (4 / 3),
self.screen_height // 1.2)
# Draw the prev button
if self.page > 1:
self.prev = self.write_main(self.end_font, WHITE, "Prev", self.screen_width // 4, self.screen_height // 1.2)
else:
self.prev = self.write_main(self.end_font, GREY, "Prev", self.screen_width // 4, self.screen_height // 1.2)
# Draw the pages
self.write_main(self.end_font, WHITE, f"{self.page} / {self.total_pages}", self.screen_width // 2,
self.screen_height // 1.1)
def handle(self) -> State:
"""Handle the drawing of the PVP instructions screen"""
# Do the main writing on the screen
self.main_write()
# Call the superclass handle
return super().handle()
```
#### File: Game/Misc/Difficulty.py
```python
from . import Difficulty_enum
class Difficulty(object):
def __init__(self, difficulty: int):
"""Constructor for the difficulty class"""
# Load the enum
self.difficulty = Difficulty_enum(difficulty)
# Load the variables
self.load()
def load(self) -> None:
"""Load the value and name value for the difficulty"""
# Load the value
self.value = self.difficulty.value
# Load the name
self.name = self.difficulty.name
def toggle(self) -> None:
"""Toggles the difficulty"""
# Check if it is not the max difficulty
if self.difficulty.value < 6:
# Set the difficulty to 1 higher
self.difficulty = Difficulty_enum(self.difficulty.value + 1)
# Otherwise
else:
# Reset it to 1
self.difficulty = Difficulty_enum(1)
# Reload the value and name
self.load()
def get_multiplier(self, value: int) -> int:
"""Get multiplier for the current difficulty
Minimum multiplier: 1
"""
# Get multiplier
mul = self.value / 2
# If multiplier is above 1
if mul * value > 1:
# Return the multiplier with the value
return int(mul * value)
else:
# Otherwise return 1
return 1
```
#### File: Game/Popups/Popup.py
```python
import pygame
from .. import State, WHITE, Screen, ImageObject
class Popup(Screen):
def __init__(self, popup_width: int, popup_height: int, sentence: str, tick_life: int, initial_x: int,
initial_y: int, screen, font=False, debug: bool = False):
"""Main Popup class"""
# Store variables
self.ttl = tick_life
self.sentence = sentence
self.image = []
width, height = font.size(sentence)
# Call the Screen superclass init
super().__init__(popup_width, popup_height, State.NONE, screen, initial_x - popup_width // 2, initial_y,
debug)
# Fill itself black
self.set_background((0, 0, 0))
# If no font is set
if not font:
# Default to screen.font
font = self.font
# Render the words for the popup
self.write(font, WHITE, sentence, popup_width // 2, popup_height // 2)
def add_sprite(self, image, x: int, y: int):
"""Add the sprite to x,y in the popup"""
self.image.append(ImageObject(x, y, 50, 50, image))
def update(self):
"""Update function for the popup"""
# If time to live > 0
if self.ttl:
# Draw the images
for img in self.image:
img.draw(self.screen)
# Reduce the ttl of the popup
self.ttl -= 1
# Call the superclass update
super().update()
# Return itself
return self
else:
return None
```
#### File: Game/Screens/ClassicScreen.py
```python
import random
import pygame
from pygame.locals import *
from . import Screen
from .. import *
class ClassicScreen(Screen):
def __init__(self, screen_width: int, screen_height: int, screen, sensitivity: int, max_fps: int,
difficulty: Difficulty,
tracker, wave: int = 1, player_lives: int = 3, debug: bool = False):
"""Constructor for Classic screen for the game"""
# Call the superclass init
super().__init__(screen_width, screen_height, State.CLASSIC, screen, 0, 0, debug)
# Store the variables
self.p1_score = 0
self.wave = wave - 1
self.sensitivity = int(sensitivity * self.screen_width / 600)
self.fps = max_fps
self.difficulty = difficulty
self.player_lives = player_lives
self.over = False
# Define a set of constants in the set
self.session_stats = {'sf': 0, 'en_k': 0, 'el_k': 0, 'sl': 0, 'pu': 0, 'mpu': 0, 'ek_c': 0, 'ek_e': 0,
'tut_n_clr': 0, 'st_1_clr': 0,
'st_2_clr': 0, 'st_3_clr': 0, 'st_4_clr': 0, 'st_5_clr': 0, 'st_6_clr': 0, 'coop': 0,
'pvp': 0, 'aivs': 0,
'aicoop': 0}
self.max_stat = {'ek_c', 'mpu', 'ek_c', 'ek_e'}
self.main_stats = {}
self.tracker = tracker
# Create the groups
# Bullets shot by player
self.player1_bullet = pygame.sprite.Group()
# Bullet from Mobs
self.mob_bullet = pygame.sprite.Group()
# Enemyships
self.enemies = EnemyShips(self.state)
self.other_enemies = pygame.sprite.Group()
# Blocks
self.blocks = pygame.sprite.Group()
# Explosions
self.explosions = pygame.sprite.Group()
# Spawn the players
self.spawn_players()
# Initialise values for tracker
self._killed = None
self.max_kill = None
# Set resetted to false
self.resetted = False
# Reset the variables
self.reset()
def spawn_players(self):
"""Spawn the players"""
# Create the player
self.player1 = Player(self.sensitivity, self.screen_width, self.screen_height, self.screen_width // 2,
self.screen_height - 50, 3, self.fps, self.player1_bullet, Direction.UP, self.debug)
def reset(self) -> None:
"""Reset the play screen and variables"""
# Update the tracker
self.update_trackers()
# Reset the session tracker
for key in self.session_stats.keys():
# Reset the value
self.session_stats[key] = 0
# If already resetted
if self.resetted:
# Do nothing
return
# Store mothership cooldown
self.ms_cooldown = 0
self._shots = 0
# Reset the over
self.over = False
# Zero the score and the wave
self.p1_score = 0
self.wave = 0
# Set resetted to True
self.resetted = True
# Empty the sprite groups
self.player1_bullet.empty()
self.mob_bullet.empty()
self.enemies.empty()
self.other_enemies.empty()
self.explosions.empty()
# Reset the blocks to original block group
self.blocks = BlockGroup(self.screen_width, self.screen_height, self.screen_width, self.screen_height // 1.2,
self.screen, 5, self.player1.get_height() + 10)
# Reset the player
self.player1.reset()
# Fetch achievement stats
self.fetch_stats()
def fetch_stats(self, keys: tuple = None) -> dict:
"""Fetch the stats"""
# If there are no keys
if not keys:
# Call the classic trackers
keys = ("sf", "en_k", "sl", "el_k", "ek_c")
# For each item
for key in keys:
# Store the stat in the screen
self.main_stats[key] = self.tracker.get_stat(key)
def get_hitboxes(self) -> list:
"""Get a list of hitboxes of mobs"""
return [(self.player1.get_coord()), tuple(x.get_coord() for x in self.player1_bullet),
tuple(x.get_coord() for x in self.enemies), tuple(x.get_coord() for x in self.mob_bullet)]
def get_enemies(self) -> tuple:
"""Get a tuple of the enemies"""
return tuple(self.enemies)
def comparator(self) -> int:
"""Variable used for comparison"""
return self.get_score()
def update_keypresses(self) -> None:
"""Update the screen based on what the player has pressed"""
# Get all the number of keys
keys = pygame.key.get_pressed()
# Check if they want to pause game
if keys[K_p] or keys[K_ESCAPE]:
return True
if not self.player1.is_destroyed():
# Check player 1 keys
if keys[K_a]:
# Move player 1 to the left
self.player1.move_left()
if keys[K_d]:
# Move player 1 to the right
self.player1.move_right()
if keys[K_SPACE]:
# Let the player shoot
self.player1.shoot()
def spawn_enemy_bullets(self) -> None:
"""Spawns a bullet randomly for the enemy"""
# Check if the enemy can shoot randomly
rand = self.generate_random_no() * self.fps * 4
# If it does not hits the probability allow the mob to shoot
if rand > 10:
# Return and no need to shoot
return
# Get a random bullet for the entity to shoot
if len(self.enemies):
# Get a random enemy
enemy = self.get_random_enemy()
else:
# Set enemy to none
enemy = None
# Make the mob shoot the bullet a random direction
self.shoot_bullet(enemy)
def bullet_direction(self) -> Direction:
"""Generate bullet direction for the mob"""
return Direction.DOWN
def shoot_bullet(self, enemy):
"""Make the mob shoot the bullet a random direction"""
# If the set is non-empty
if enemy:
# Get direction of bullet
direction = self.bullet_direction()
# Make the mob shoot
enemy.shoot(direction)
def get_hitboxes_copy(self) -> pygame.Surface:
"""Get a copy of the hitboxes surface"""
# Create a surface
surface = pygame.Surface((self.screen_width, self.screen_height))
# Fill the screen with black
surface.fill((0, 0, 0))
# Draw on the surface
self.draw_hitboxes(surface)
# Return the surface that is drawn on
return surface
def get_entities(self):
"""Get the entities for AI to process"""
return self.enemies, self.other_enemies, self.mob_bullet
def draw_hitboxes(self, screen=None):
"""Draw hitboxes for players and objects"""
# Check if screen is none
if screen == None:
# Set to self.surface if it is none
screen = self.surface
# Draw the hitbox for the blocks at the bottom
for sprite in self.blocks:
pygame.draw.rect(screen, (0, 255, 0), sprite.rect, 0)
# Draw hitbox for the enemies
for sprite in self.enemies:
c = (sprite.get_lives()) * 3
pygame.draw.rect(screen, (200, 5 * c, 5 * c), sprite.rect, 0)
# Draw the hitbox for the bosses
for sprite in self.other_enemies:
pygame.draw.rect(screen, (150, 150, 150), sprite.rect, 0)
# Draw hitbox for the bullets
for sprite in self.player1_bullet:
pygame.draw.rect(screen, (100, 255, 0), sprite.rect, 0)
for sprite in self.mob_bullet:
pygame.draw.rect(screen, (25, 0, 255), sprite.rect, 0)
# Draw the hitbox for the player
pygame.draw.rect(screen, (55, 255, 10 * self.player1.get_lives()), self.player1.rect, 0)
def update(self) -> None:
"""Update the player and the sprites"""
# If the game was resetted
if self.resetted:
# set resetted as false
self.resetted = False
# Reset the surface
self.reset_surface()
# Update the player position
self.player1.update()
# Update the enemy group
self.enemies.update()
# Update the other sprites group
self.other_enemies.update()
# Attempts to spawn the mothership
self.randomly_spawn_mothership()
# Update the explosions group
self.explosions.update()
# Spawn bullets for enemies
self.spawn_enemy_bullets()
# Update the position of all bullets
self.player1_bullet.update()
self.mob_bullet.update()
# Draw the sprites
self.draw_sprites()
# Call superclass update
super().update()
def draw_sprites(self):
"""Draw the sprites"""
# Draw bullets
self.player1_bullet.draw(self.screen)
self.mob_bullet.draw(self.screen)
# Draw the enemy
self.enemies.draw(self.screen)
# Draw the other_mobs
self.other_enemies.draw(self.screen)
# Draw the explosions
self.explosions.draw(self.screen)
# Draw player object
self.player1.draw(self.screen)
# Draw the block
self.blocks.draw(self.screen)
def get_score(self) -> int:
"""Gets the score of the player in the current state"""
return self.p1_score
def generate_random_no(self) -> int:
"""Generates a random float from 0 to 1"""
# Generate random number
return random.random()
def wave_random(self) -> int:
"""Generate a random number for the life of the enemy"""
# Generate wave random
num = int(self.difficulty.get_multiplier(self.generate_random_no() * self.wave))
return num if num >= 1 else 1
def get_random_enemy(self) -> EnemyShip:
"""Get a random enemy"""
# Get a list of enemies
lst = tuple(self.enemies)
# Randomly choose 1 of them
return lst[int(self.generate_random_no() * (len(lst) - 1))]
def check_block_collision(self):
"""Check collisions of the blocks"""
# Check if the player or the enemies shot the blocks
pygame.sprite.groupcollide(self.player1_bullet, self.blocks, True, True)
pygame.sprite.groupcollide(self.blocks, self.mob_bullet, True, True)
def check_other_mob_collision(self) -> int:
"""Check collisions for special mobs"""
# Get list of ships collided
sprites = list(pygame.sprite.groupcollide(self.player1_bullet, self.other_enemies, True, False).values())
# If there are ships
if sprites:
# Get the first ship
ship = sprites[0][0]
# Destroy the ship 1 time
ship.destroy(self.player1.get_bullet_power())
# Remove the ship from groupp if it has 0 lives
if ship.is_destroyed():
# Spawn an explosion in its place
self.spawn_explosion(ship.get_x(), ship.get_y())
# Remove ship from all groups
ship.kill()
self.accumulate('en_k', 1)
self.accumulate('el_k', 1)
# Return the points of the ship
return ship.get_points()
# return 0 score
return 0
def accumulate(self, k, v):
'''Accumulate a stat in the dictionary'''
self.session_stats[k] += v
if k in self.main_stats:
self.main_stats[k] += v
def update_trackers(self):
'''Update stat tracker stats that we want to add'''
# Update relavant stats
for k, v in self.main_stats.items():
if k in self.max_stat:
self.tracker.set_max_value(k, self.session_stats[k])
else:
self.tracker.set_value(k, v)
# Check if any achievements was achieved
self.update_achievement()
def handle_threshold(self) -> None:
''' Handle updating threshold value for given statistics -> throws popup on screen'''
if self.session_stats['en_k'] == self.main_stats['ek_c'] + 1 and self.session_stats['ek_c'] == 0:
self.tracker.add_popup("This is the highest kills you got!")
if self.session_stats['en_k'] >= self.main_stats['ek_c'] + 1:
self.session_stats['ek_c'] = self.session_stats['en_k']
def check_collisions(self):
"""Check the objects which collided"""
# Check block collisions
self.check_block_collision()
# Check if player has collided with bullets
bullet_collide = pygame.sprite.spritecollide(self.player1, self.mob_bullet, True)
# If the set is not empty reduce player life
if len(bullet_collide) > 0:
# If it is in debug mode, print the event
if self.debug:
print("Player hit")
# Destroy 1 of the player's life
self.player1.destroy()
# Update Statistic Tracker to track 1 Ship destroyed
self.accumulate('sl', 1)
# Add explosion to the player's position
self.spawn_explosion(self.player1.get_x(), self.player1.get_y())
# Remove bullets that collide with one another
pygame.sprite.groupcollide(self.player1_bullet, self.mob_bullet, True, True)
# Get list of ships destroyed
ships = list(pygame.sprite.groupcollide(self.player1_bullet, self.enemies, True, False).values())
# Initialise score
score = self.check_other_mob_collision()
# Initialise ship to None
ship = None
# If the list of collision is non-empty
if ships:
# Get the ship it collided with
ship = ships[0][0]
# Destroy the first ship in the list (Ensures 1 bullet kill 1 ship only)
ship.destroy(self.player1.get_bullet_power())
if self.debug:
print(f"Ship destroyed")
# Remove the ship from groupp if it has 0 lives
if ship.is_destroyed():
# Spawn an explosion in its place
self.spawn_explosion(ship.get_x(), ship.get_y())
# Remove the ship from all groups
ship.kill()
# Track Statistics for killing ship
self.accumulate('en_k', 1)
# Remove sprites that collide with bullets and return the sum of all the scores
score = ship.get_points()
# Add score to player score
self.p1_score += score
# Returns destroyed ship
return ship
def update_achievement(self):
"""Updates achievements for the game"""
for key, value in self.main_stats.items():
self.tracker.check_unlocked(key, value)
def spawn_explosion(self, x: int, y: int) -> None:
"""Spawn an explosion at specified x and y coordinate"""
# Spawn an explosion
self.explosions.add(Explosion(self.fps // 4, x, y, self.screen_width, self.screen_height, 0, self.debug))
def spawn_enemies(self, number: int) -> None:
"""Spawn n enemies into the game"""
# Make the enemies into rows of 6
for j in range(number // 6 if number // 6 < 5 else 5):
self.enemies.add([EnemyShip(self.sensitivity,
self.screen_width // 4 + i * self.screen_width // 10 * self.screen_width // 600,
self.screen_height // 10 + EnemyShip.sprites[
0].get_height() * j * self.screen_height // 800,
self.wave_random(), self.screen_width, self.screen_height, Direction.DOWN,
self.mob_bullet, self.debug) for i in range(6)])
def randomly_spawn_mothership(self) -> bool:
"""Spawns a mothership randomly, returns if mothership is spawned"""
# If the mothership does not exist and the random roll hits
if not self.ms_cooldown and self.generate_random_no() < 1 / 900:
# Create the mothership
self.other_enemies.add(MotherShip(0, 50, self.screen_width, self.screen_height, 500))
# Set the cooldown for the mothership
self.ms_cooldown = self.fps * 3
# Return True to signify that mothership spawned
return True
# If mothership is still under cooldown
elif self.ms_cooldown:
# Reduce cooldown by 1
self.ms_cooldown -= 1
# return False if a mothership does not spawn
return False
def enemy_touched_bottom(self) -> bool:
"""Check if enemy touched the bottom of the screen"""
return any(
filter(lambda x: (x.get_y() + x.get_height() // 2) > (self.screen_height - self.player1.get_height() - 100),
self.enemies)) or \
any(filter(
lambda x: (x.get_y() + x.get_height() // 2) > (self.screen_height - self.player1.get_height() - 100),
self.other_enemies))
def is_over(self) -> bool:
"""Checks if the game is over"""
return self.over
def draw_letters(self) -> None:
"""Draw the letters on the screen"""
# Draw the score
self.write_main(self.font, WHITE, f"Score : {self.p1_score}", 10, 10, Direction.LEFT)
# Draw the live count
self.write_main(self.font, WHITE, f"Lives : {self.player1.get_lives()}", self.screen_width - 10, 10,
Direction.RIGHT)
# Draw the wave number
self.write_main(self.font, WHITE, f"Wave : {self.wave}", self.screen_width // 2, 15)
def get_pause_state(self) -> State:
"""Get the pause state for the game"""
return State.PAUSE
def get_gameover_state(self) -> State:
"""Get the gameover state for the game"""
return State.GAMEOVER
def end_game(self) -> None:
"""Ends the game and updates relevent statistics"""
self.over = True
def handle(self) -> State:
"""Handle the drawing of the classic screen"""
# If it was initially resetted
if self.resetted:
# Set resetted to False
self.resetted = False
# If player is destroyed, go to gameover state
if self.player1.is_destroyed():
# Set the game to be over
self.end_game()
self.update_trackers()
# Return the gameover state
return self.get_gameover_state()
# Check if any of the enemies touched the bottom of the screen
if self.enemy_touched_bottom():
# If it is debugging mode, print out what happened
if self.debug:
print("Alienship hit the player")
# Set the game to be over
self.end_game()
# Update Trackers for the game variables
self.update_trackers()
# If so it is gameover for the player
return self.get_gameover_state()
# Spawn if there are no enemies
if not len(self.enemies):
# Increase the wave number
self.wave += 1
# Reset wave powerup
self.power_up_numbers = 0
# Spawn the aliens
self.spawn_enemies(int(6 * self.wave))
if self._shots < len(self.player1_bullet):
self.accumulate('sf', len(self.player1_bullet) - self._shots)
# Get the number of player 1 bullets
self._shots = len(self.player1_bullet)
# Check object collisions
self.check_collisions()
# Update the max_killed if threshold is reached
self.handle_threshold()
# Return a list of achievement achieved
self.update_achievement()
# Draw the letters on the screen
self.draw_letters()
# Check the player keypress
if self.update_keypresses():
return self.get_pause_state()
# Update the moving objs
self.update()
# Check if player wants to quit
if self.check_quit():
self.update_trackers()
return State.QUIT
# Return play state
return self.state
```
#### File: Game/Screens/CoopScreen.py
```python
import random
import pygame
from pygame.locals import *
from . import PlayScreen
from .. import State, Player, Direction, WHITE, Explosion, Difficulty
class CoopScreen(PlayScreen):
def __init__(self, screen_width: int, screen_height: int, screen, sensitivity: int, fps: int,
difficulty: Difficulty, tracker, player_lives: int = 3, debug: bool = False):
"""Main Coop screen for local cooperative mode"""
# Bullet groups for player 2
self.player2_bullet = pygame.sprite.Group()
# Call the super class
super().__init__(screen_width, screen_height, screen, sensitivity, fps, difficulty, tracker, 1, player_lives,
0.1, debug)
# Set to the correct state
self.set_state(State.COOP)
# Set the state of the enemy ship
self.enemies.set_state(self.state)
def comparator(self):
"""Return variable used for comparison"""
return self.get_score()
def handle_threshold(self) -> None:
"""Not tracking stats for the Coop screen"""
pass
def fetch_stats(self, key: tuple = None) -> dict:
"""Fetch stats for Coop screen"""
# If no key is found
if not key:
key = ("coop",)
# Call the superclass fetch stats
return super().fetch_stats(key)
def reset(self) -> None:
"""Reset the environment"""
# If the environment is already resetted
if self.resetted:
# Do nothing
return
# Reset player 2 score
self.p2_score = 0
# Reset player model
self.player2.reset()
# Empty groups
self.player2_bullet.empty()
# Call the superclass
super().reset()
def update(self) -> None:
"""Update the movement of the sprites"""
# Update player 2 information
self.player2.update()
self.player2_bullet.update()
self.player2_bullet.draw(self.screen)
self.player2.draw(self.screen)
# Call the superclass update
super().update()
def draw_hitboxes(self, screen=None) -> None:
"""Draw hitboxes for players and objects"""
# Check if screen is none
if screen == None:
# Set to self.surface if it is none
screen = self.surface
# Draw player 2 bullets
for sprite in self.player2_bullet:
pygame.draw.rect(screen, (100, 255, 0), sprite.rect, 0)
# Draw the player 2
pygame.draw.rect(screen, (55, 255, 10 * self.player2.get_lives()), self.player2.rect, 0)
# Call the superclass draw
super().draw_hitboxes(screen)
def spawn_players(self) -> None:
"""Spawn the players"""
# Recreate the players
self.player1 = Player(self.sensitivity, self.screen_width, self.screen_height, self.screen_width // (3 / 2),
self.screen_height - 50, self.player_lives, self.fps, self.player1_bullet, Direction.UP,
self.debug)
self.player2 = Player(self.sensitivity, self.screen_width, self.screen_height, self.screen_width // 3,
self.screen_height - 50, self.player_lives, self.fps, self.player2_bullet, Direction.UP,
self.debug)
def check_players_collision(self) -> None:
"""Check collisions for players"""
# Check if bullet hit player 2
bullet_hit_m = len(pygame.sprite.spritecollide(self.player1, self.mob_bullet, True))
if bullet_hit_m > 0 and not self.player1.is_destroyed():
self.player1.destroy()
self.explosions.add(Explosion(self.fps // 4, self.player1.get_x(), self.player1.get_y(), self.screen_width,
self.screen_height, 0, self.debug))
# Check if bullet hit the player 1
bullet_hit_m = len(pygame.sprite.spritecollide(self.player2, self.mob_bullet, True))
if bullet_hit_m > 0 and not self.player2.is_destroyed():
self.player2.destroy()
self.explosions.add(Explosion(self.fps // 4, self.player2.get_x(), self.player2.get_y(), self.screen_width,
self.screen_height, 0, self.debug))
def get_score(self) -> int:
"""Return the combined score of the players"""
return super().get_score() + self.p2_score
def get_gameover_state(self) -> State:
"""Returns the gameover mode for this state"""
return State.GAMEOVER
def bullet_direction(self) -> Direction:
"""Set the bullet direction to always go down"""
return Direction.DOWN
def update_keypresses(self) -> bool:
"""Check the keys which are pressed"""
# Get all the number of keys
keys = pygame.key.get_pressed()
# If player 2 is not destroyed
if not self.player2.is_destroyed():
# Check player 2 keys
if keys[K_LEFT]:
self.player2.move_left()
if keys[K_RIGHT]:
self.player2.move_right()
if keys[K_0]:
self.player2.shoot()
# Call the superclass update keypress
return super().update_keypresses()
def check_powerups(self, ship) -> None:
"""Check if the powerups should be spawned"""
# If powerups are not disabled
if self.powerup_chance > 0:
# Roll for chance of powerup spawning
if self.generate_random_no() < self.powerup_chance:
# Spawn the powerup
super().spawn_powerups(ship.get_x(), ship.get_y())
# If the power has not spawned during the wave
elif self.powerup_numbers == 0 and len(self.enemies) == 0:
# Spawn the powerup
super().spawn_powerups(ship.get_x(), ship.get_y())
def check_player_mob_collision(self, player_bullet):
"""Check the collision between the enemies and the players"""
# Check collision of mobs with player 1 bullet
ships = list(pygame.sprite.groupcollide(player_bullet, self.enemies, True, False).values()) + list(
pygame.sprite.groupcollide(player_bullet, self.other_enemies, True, False).values())
# Initialise the points the player got so far
pts = 0
# If the list is non-empty
if ships:
# Iterate through the ship
for ship in ships[0]:
# Destroy the ship
ship.destroy()
# If the ship is destroyed
if ship.is_destroyed():
# Spawn an explosion in its place
self.explosions.add(
Explosion(self.fps // 4, ship.get_x(), ship.get_y(), self.screen_width, self.screen_height, 0,
self.debug))
# Remove the ship from all groups
ship.kill()
# Remove sprites that collide with bullets and return the sum of all the scores
pts += ship.get_points()
# Check if powerups can spawn
self.check_powerups(ship)
# Return with the points the player got
return pts
def check_block_collision(self) -> None:
"""Check collisions with blocks"""
# Check if the players or the enemies shot the blocks
pygame.sprite.groupcollide(self.player1_bullet, self.blocks, True, True)
pygame.sprite.groupcollide(self.blocks, self.mob_bullet, True, True)
pygame.sprite.groupcollide(self.player2_bullet, self.blocks, True, True)
def check_collisions(self) -> None:
"""Check collisions method for Coop"""
# Check collisions of bullets
pygame.sprite.groupcollide(self.player1_bullet, self.player2_bullet, True, True)
pygame.sprite.groupcollide(self.player1_bullet, self.mob_bullet, True, True)
pygame.sprite.groupcollide(self.player2_bullet, self.mob_bullet, True, True)
# Check collisions of players
self.check_players_collision()
# Check powerup collisions
self.check_powerup_collision()
# Check collision of mobs with player 1 bullet
self.p1_score += self.check_player_mob_collision(self.player1_bullet)
# Check collision of mobs with player 2 bullet
self.p2_score += self.check_player_mob_collision(self.player2_bullet)
# Check block collisions
self.check_block_collision()
def check_powerup_collision(self):
"""Check the collisions of the powerups"""
# Call superclass check powerup collisions
super().check_powerup_collision()
# Check the same for player 2
if len(self.powerups):
# Check if player hit the powerups
hit = pygame.sprite.groupcollide(self.player2_bullet, self.powerups, True, True)
# If player hit the powerups
if len(hit):
# For each list of powerups hit
for l in hit.values():
# For each power up in powerup list
for p in l:
# Mutate player and current screen
p.get_ability()(self, self.player2)
def draw_letters(self) -> None:
"""Draw the words on the screen"""
# Draw the wave number
self.write_main(self.font, WHITE, f"Wave: {self.wave}", self.screen_width // 2, 20)
# Draw the lives of player 1
self.write_main(self.font, WHITE, f"P1 Lives: {self.player1.get_lives()}", self.screen_width - 10, 10,
Direction.RIGHT)
# Draw score of player 1
self.write_main(self.font, WHITE, f"P1 Score: {self.p1_score}", 10, 10, Direction.LEFT)
# Draw the lives of player 2
self.write_main(self.font, WHITE, f"P2 Lives: {self.player2.get_lives()}", self.screen_width - 10, 30,
Direction.RIGHT)
# Draw score of player 2
self.write_main(self.font, WHITE, f"P2 Score: {self.p2_score}", 10, 30, Direction.LEFT)
def handle(self) -> State:
"""Handle the drawing of the screen"""
# Check if both players are destroyed
if self.player2.is_destroyed():
# Mark the game as over
self.over = True
# add a game played to the tracker
self.update_trackers()
# Return the gameover state
return self.get_gameover_state()
# Otherwise return the current state
return super().handle()
```
#### File: Game/Screens/HighscoreScreen.py
```python
from . import MenuTemplate
from .. import State, WHITE, Direction
class HighscoreScreen(MenuTemplate):
def __init__(self, screen_width: int, screen_height: int, screen, scoreboard, debug: bool = False):
"""Constructor for the Highscore screen"""
#Score the scoreboard
self.scoreboard = scoreboard
#Check if it is refreshed
self.refreshed = False
#Refresh the board
self.refresh()
# Keep track of the scores removed
self.removed = []
# Call the superclass
super().__init__(screen_width, screen_height, State.HIGHSCORE, screen, debug)
def refresh(self):
"""Refresh the board"""
if self.refreshed:
return
# Keep track of the scores added
self.scores = sorted(self.scoreboard.fetch_all(), key=lambda x: x[-1], reverse=True)
self.refreshed = True
def update_score(self, name: str, score: int) -> None:
"""Update the new player's name and score into the score board"""
# Add the score to the score board
self.scores.append((None, name, score))
# Sort the scores
self.sort_scores()
# Redraw the sprites
self.draw()
def sort_scores(self) -> None:
"""Sort the scores stored internally"""
# Sort the score
self.scores.sort(key=lambda x: x[-1], reverse=True)
# Remove the other scores while there are more than 5 of them
while len(self.scores) > 5:
self.removed.append(self.scores.pop())
def beat_highscore(self, score: int) -> bool:
"""Check if the highscore was beaten"""
return len(self.scores) < 5 or score > self.scores[-1][-1]
def get_scores(self) -> tuple:
"""Get the high score list"""
return tuple(self.scores)
def get_removed(self) -> tuple:
"""Get a list of people removed from the database"""
return tuple(self.removed)
def write_lines(self) -> None:
"""Draws the highscore screen onto the predefined surface"""
# Reset the screen
super().reset_surface()
# Start pixel to print the score
start_px = 200
# Draw the button for back
self.end_rect = self.write(self.end_font, WHITE, "Back", self.screen_width // 2,
self.screen_height // 2 + self.screen_height // 3)
# Draw the highscore header
self.write(self.title_font, WHITE, f"High Scores", self.screen_width // 2, 100)
# Draw the scores of the players
for index, item in enumerate(self.scores[:5]):
# Draw the first half of the scoreboard
self.write(self.end_font, WHITE, f"{index + 1}. {item[1]}".ljust(15, ' '), self.screen_width // 4,
start_px + self.screen_height // (15 / (index + 1)), Direction.LEFT)
# Draw the 2nd half of the scoreboard
self.write(self.end_font, WHITE, f"{item[2]:<5}", self.screen_width // 1.6,
start_px + self.screen_height // (15 / (index + 1)), Direction.LEFT)
def _back(self):
"""Define a back function"""
self.refreshed = False
return State.STAT_MENU_SCREEN
def get_rects(self):
"""Get the rects in the highscore screen"""
return (self.end_rect,)
def get_effects(self):
"""Get the effects of the rect in the highscore screen"""
return (self._back,)
```
#### File: Game/Screens/MenuScreen.py
```python
import pygame
from pygame.locals import *
from . import MenuTemplate
from .. import State, WHITE, ImageObject, Direction
class MenuScreen(MenuTemplate):
def __init__(self, screen_width: int, screen_height: int, screen, debug: bool = False):
"""Constructor for the Main Menu screen"""
# Call the superclass
super().__init__(screen_width, screen_height, State.MENU, screen, debug)
def write_lines(self) -> None:
"""Write lines for the menu"""
# Draw the title
self.write(self.title_font, WHITE, "Space Invaders", self.screen_width // 2, self.screen_height // 5)
# Draw the Play button
self.rect_play = self.write(self.end_font, WHITE, "Play", self.screen_width // 2, self.screen_height // 2)
#Draw the highscore button
self.stats = self.write(self.end_font, WHITE, "Statistics", self.screen_width//2, self.screen_height//15 + self.screen_height//2)
#Draw the instructions button
self.rect_instruction = self.write(self.end_font, WHITE, "Game Info", self.screen_width//2, self.screen_height//7.5 + self.screen_height//2)
# Draw the settings button
self.rect_settings = self.write(self.end_font, WHITE, "Settings", self.screen_width // 2,
self.screen_height // 5 + self.screen_height // 2)
#Draw the quit button
self.rect_end = self.write(self.end_font, WHITE, "Quit", self.screen_width//2, self.screen_height//1.2)
#Get the dimensions of the credits
text = "Created by <NAME>"
_, text_height = self.font.size(text)
#Draw the credits
self.write(self.font, WHITE, text, 0, self.screen_height - text_height, direction = Direction.LEFT)
def get_rects(self):
"""Get the rects for the Menu Screen"""
return (self.rect_play, self.stats, self.rect_instruction, self.rect_settings, self.rect_end)
def get_effects(self):
"""Get the effects for Menu Screen"""
return (State.PLAYMODE, State.STAT_MENU_SCREEN, State.INSTRUCTIONS_MENU, State.SETTINGS, State.QUIT)
```
#### File: Game/Screens/PauseScreen.py
```python
import pygame
from pygame.locals import *
from . import MenuTemplate
from .. import State, WHITE
class PauseScreen(MenuTemplate):
# Check if the pause sound has been played
sound = None
def __init__(self, screen_width: int, screen_height: int, screen, score: int, previous_state: State,
debug: bool = False):
"""Main class for the pause screen"""
# Store the score
self.p1_score = score
# Store the previous state
self.previous_state = previous_state
# Call the superclass
super().__init__(screen_width, screen_height, State.PAUSE, screen, debug)
# Set sound played to false
self.played = False
# Set a cd
self.cd = 20
# Write the lines
self.write_lines()
def write_lines(self):
"""Write the lines required for Pause screen"""
# Draw the title of the pause screen
self.write(self.title_font, WHITE, "Paused", self.screen_width // 2, self.screen_height // 5)
# Draw the score of the person currently
self.write(self.subtitle_font, WHITE, f"Score: {self.p1_score}", self.screen_width // 2,
self.screen_height // 2)
# Draw the instructions on how to quit/unpause
self.write(self.end_font, WHITE, f"Click on the button or the shortcut", self.screen_width // 2,
self.screen_height // 2 + self.screen_height // 15)
# Draw the instructions to unpause
self.unpause = self.write(self.end_font, WHITE, "Unpause (P)", self.screen_width // 2,
self.screen_height // 7.5 + self.screen_height // 2)
# Draw the instructions to quit
self.quit = self.write(self.end_font, WHITE, "Quit (Esc)", self.screen_width // 2,
self.screen_height // 5 + self.screen_height // 2)
def get_score(self) -> int:
"""Get the score displayed for the pause screen"""
return self.p1_score
def comparator(self) -> int:
"""Comparison function"""
return self.get_score()
def get_rects(self):
return (self.quit, self.unpause)
def get_effects(self):
return (State.MENU, self.previous_state)
def update_keypresses(self) -> State:
"""Check for the keypresses within the pause screen"""
# Getting the keys which are pressed
keys = pygame.key.get_pressed()
# If the button is still under cooldown
if not self.cd:
# Return the play state if the player unpause his game
if keys[K_p]:
self.played = False
self.cd = 20
return self.previous_state
# If the player press the escape key, quit the game
if keys[K_ESCAPE]:
self.played = False
self.cd = 20
return State.MENU
# Return the current state if the player has not unpaused
return super().update_keypresses()
def handle(self) -> State:
"""Handles the drawing of the pause screen"""
# If there is a pausescreen sound and it has not played
if not self.played and self.sound:
# Play the pause screen sound
self.sound.play('pause')
self.played = True
# If the cooldown is still there
if self.cd:
self.cd -= 1
# Call the superclass handle
return super().handle()
```
#### File: Game/Screens/PlayModesScreen.py
```python
import pygame
from pygame.locals import *
from . import MenuTemplate
from .. import State, WHITE
class PlayModeScreen(MenuTemplate):
def __init__(self, screen_width: int, screen_height: int, screen, debug: bool = False):
"""Main screen for the different play modes"""
# Call the super class
super().__init__(screen_width, screen_height, State.PLAYMODE, screen, debug)
def write_lines(self) -> None:
"""Write the lines for the Play Mode screen"""
# First pixel for alignment
first_pixel = self.screen_height // 2
# Draw the Header
self.header = self.write(self.title_font, WHITE, "Modes", self.screen_width // 2, self.screen_height // 5)
# Draw the rectangles for the different game modes
# Rect for tutorial
self.tutorial = self.write(self.end_font, WHITE, "Tutorial", self.screen_width // 2,
first_pixel + self.screen_height // 5)
# Rect for AI modes
self.ai_modes = self.write(self.end_font, WHITE, "AI Modes", self.screen_width // 2,
first_pixel + self.screen_height // 7.5)
# Rect for the single player mode
self.one_player = self.write(self.end_font, WHITE, "1 Player Modes", self.screen_width // 2, first_pixel)
# 2 Player mode (2 player mode menu)
self.two_player = self.write(self.end_font, WHITE, "2 Player Modes", self.screen_width // 2,
first_pixel + self.screen_height // 15)
# Back button
self.back = self.write(self.end_font, WHITE, "Back", self.screen_width // 2, self.screen_height // 1.2)
def get_rects(self):
# Store all the buttons
return (self.one_player, self.two_player, self.ai_modes, self.tutorial, self.back)
def get_effects(self):
# Get the desired output states
return (State.ONE_PLAYER_MENU, State.TWO_PLAYER_MENU, State.AI_MENU, State.TUTORIAL, State.MENU)
```
#### File: Game/Screens/SettingsScreen.py
```python
from . import MenuTemplate
from .. import State, WHITE, Direction, Sound, Background, Difficulty, GREY
class SettingsScreen(MenuTemplate):
def __init__(self, screen_width: int, screen_height: int, screen, fps: int, sound: Sound, background: Background,
difficulty: Difficulty, volume: float, tracker, debug: bool = False):
"""Constructor for the settings screen"""
# Store the variables
self.fps = fps
self.sound = sound
self.bgr = background
self.difficulty = difficulty
self.tracker = tracker
# Call the superclass
super().__init__(screen_width, screen_height, State.SETTINGS, screen, debug)
def write_lines(self) -> None:
"""Write the lines for the settings screen"""
# Draw the Header
self.write(self.title_font, WHITE, "Settings", self.screen_width // 2, self.screen_height // 5)
#Draw the reset button
self.reset_btn = self.write(self.end_font, WHITE, "Reset all", self.screen_width // 2, self.screen_height //1.2 - self.screen_height//15)
# Draw the back button
self.back = self.write(self.end_font, WHITE, "Back", self.screen_width // 2, self.screen_height // 1.2)
# Write the updated stats
self.write_main_words()
def write_main_words(self) -> None:
"""Write the main lines"""
# First pixel used for alignment
first_pixel = self.screen_height // 2
# Draw the different settings options
self.background = self.write_main(self.end_font, WHITE, f"Background: {self.get_bg_no()}",
self.screen_width // 4, first_pixel, Direction.LEFT)
self.music = self.write_main(self.end_font, WHITE, f"Music: {'On' if self.get_music_enabled() else 'Off'}",
self.screen_width // 4, first_pixel + self.screen_height // 15, Direction.LEFT)
self.difficulty_rect = self.write_main(self.end_font, WHITE, f"Difficulty: {self.get_difficulty().title()}",
self.screen_width // 4, first_pixel + self.screen_height // 7.5,
Direction.LEFT)
# If sound is enabled
if self.get_music_enabled():
# Write to the screen that the music is enabled
self.sound_btn = self.write_main(self.end_font, WHITE, f"Sound: {int(self.get_volume() * 100)}",
self.screen_width // 4, first_pixel + self.screen_height // 5,
Direction.LEFT)
# Otherwise
else:
# Write that the music is disabled
self.sound_btn = self.write_main(self.end_font, GREY, f"Sound: {int(self.get_volume() * 100)}",
self.screen_width // 4, first_pixel + self.screen_height // 5,
Direction.LEFT)
def get_rects(self):
"""Return the rects of the rect"""
return (self.background, self.music, self.difficulty_rect, self.sound_btn, self.reset_btn, self.back)
def get_effects(self):
"""Return the effects for the rects"""
return (self.cycle_background, self.toggle_sound, self.toggle_difficulty, self.toggle_volume, State.RESET_SCREEN, State.MENU)
def get_bg_no(self) -> int:
"""Get the current background that the user chose"""
return self.bgr.get_bg_no()
def get_music_enabled(self) -> bool:
"""Get the state of music"""
return self.sound.get_state()
def get_difficulty(self) -> str:
"""Get the difficulty name"""
return self.difficulty.name
def get_difficulty_no(self) -> int:
"""Get the difficulty number"""
return self.difficulty.value
def get_volume(self) -> float:
"""Return the volume of the sound"""
return self.sound.get_volume()
def cycle_background(self) -> State:
"""Cycle the background"""
self.bgr.cycle()
return self.state
def toggle_sound(self) -> State:
"""Toggle the sound"""
self.sound.toggle()
return self.state
def toggle_difficulty(self) -> State:
self.difficulty.toggle()
return self.state
def toggle_volume(self) -> State:
self.sound.volume_toggle()
return self.state
def handle(self) -> State:
"""Handle the drawing of the settings screen"""
# Write the main words
self.write_main_words()
# Call the superclass handle
return super().handle()
```
#### File: Game/Sprites/Crabs.py
```python
import random
from . import EnemyShip, Bullet
from .. import Direction
class Crabs(EnemyShip):
# Sprites for the Crabs
sprites = []
def __init__(self, sensitivity: int, initial_x: int, initial_y: int, lives: int, game_width: int, game_height: int,
bullet_grp, debug: bool):
"""The main class for Crabs"""
# Call the superclass init with 1.2 times the sensitivity to make it move faster
super().__init__(sensitivity, initial_x, initial_y, lives, game_width, game_height, None, bullet_grp, debug)
# Scale according to the fps
self.scale(50 * game_width // 600, 50 * game_height // 800)
# Store the x_velocity and y_velocity (To be fine tuned later)
self.delta_x = self.sensitivity // 4
self.delta_y = self.sensitivity // 4
# Set the score for the crabs
self.set_points(200 * lives)
def shoot(self, direction: Direction = None):
"""Lets the mob shoot"""
# If the direction is not set
if not direction:
# Choose a random btm left/ right direction
direction = random.choice([Direction.BOTTOM_LEFT, Direction.BOTTOM_RIGHT])
# Add the bullet to the bullet group
self.bullet_grp.add(
Bullet(self.sensitivity * 1.5, self.get_center()[0], self.get_y(), direction, self.game_width,
self.game_height, self.debug))
def update(self) -> None:
"""Overridden update class for the scout boss"""
# If the object has touched the edge
if self.touch_edge():
# Change x direction
self.delta_x = -self.delta_x
# Update the position of the ship
self.move(self.delta_x, self.delta_y)
# Call the superclass update
return super().update(1)
```
#### File: Game/Sprites/Explosion.py
```python
import pygame
from . import ImageObject
class Explosion(ImageObject):
# Init for the sound
sound = None
# To store the sprites for the explosions
sprites = []
def __init__(self, tick_life: int, initial_x: int, initial_y: int, game_width: int, game_height: int,
image_no: int = 0, debug: bool = False):
"""The main class for the explosion"""
# Get the correct image of the explosion
if image_no < len(Explosion.sprites):
image = Explosion.sprites[image_no]
else:
image = Explosion.sprites[0]
# Call the superclass method
super().__init__(initial_x, initial_y, game_width, game_height, image, debug)
# Play it
self.sound.play('explosion')
# Set the time to live for the explosion
self.tts = tick_life
def update(self):
"""Update the explosion"""
# If the explosion still has time to live
if self.tts:
# Decrease time to live
self.tts -= 1
# Otherwise kill it
else:
self.kill()
# Call the superclass update method
super().update()
```
#### File: Game/Sprites/Player.py
```python
import pygame
from . import MovingObject, Bullet
from .. import Direction
class Player(MovingObject):
# Static method to store sprites
sprites = []
def __init__(self, sensitivity: int, game_width: int, game_height: int, initial_x: int, initial_y: int,
init_life: int,
fps: int, bullet_grp: pygame.sprite.Group(), bullet_direction: Direction,
debug: bool = False, isAI: bool = False):
"""Main class for the player object"""
# Store the items
self.is_AI = isAI
self.bullet_direction = bullet_direction
# Call the superclass
super().__init__(sensitivity, initial_x, initial_y, game_width, game_height, Player.sprites[-1], debug)
# Scale the image to 50x50
self.scale(50 * game_width // 600, 50 * game_height // 800)
# Invicibility when it just spawned
self.invincible = fps
# Player bullet group
self.bullet_grp = bullet_grp
# If the life is not valid set it to 3 by default
if init_life <= 0:
init_life = 3
# Initial amount of life
self.init_life = init_life
# Set rotation to None
self.rotation = 0
# Store game variables
self.fps = fps
# Reset player character
self.reset()
def reset(self) -> None:
"""Reset the player stats to original stats"""
# Reset life
self.life = self.init_life
# Reset shooting cooldown
self.maxcooldown = self.fps // 2.5
# Keep track of bullet cooldown
self.cooldown = 0
# Reset position
self.x = self.initial_x
self.y = self.initial_y
# Reset the bullet power
self.bullet_power = 1
# Rerender rect
self.changed = True
# Give player 1s invisibility
self.invincible = self.fps
def isInvincible(self) -> bool:
"""Check if the player is invincible"""
return self.invincible > 0
def increase_bullet_power(self, inc: int):
"""Increase the player bullet power by inc"""
self.bullet_power += inc
def get_bullet_power(self) -> int:
"""Return the bullet power of the player"""
return self.bullet_power
def add_lifes(self, no: int) -> None:
"""Adds life to the player"""
assert no > 0
self.life += no
def isAI(self) -> bool:
"""Check if it is an ai instance of the Player"""
return self.is_AI
def on_cooldown(self) -> bool:
"""Check if shooting is on cooldown"""
return self.cooldown > 0
def shoot(self) -> bool:
"""Lets the player shoot a bullet if the player is not on cooldown"""
# If the player is not on cooldown
if not self.on_cooldown():
# Add the bullet to the bullet group
self.bullet_grp.add(
Bullet(self.sensitivity * 1.5, self.get_center()[0], self.get_y(), self.bullet_direction,
self.game_width, self.game_height, self.debug))
# Reset the cooldown
self.cooldown = self.maxcooldown
# Return True if the player has shot
return True
# Return false if player fails to shoot
return False
def move_up(self) -> None:
"""Do not allow the player to move up"""
pass
def move_down(self) -> None:
"""Do not allow the player to move down"""
pass
def move_left(self) -> None:
"""Move the player to the left"""
# If the player is not at the leftmost part of the screen
if self.x > self.image.get_width() // 8:
# allow the player to move left
super().move_left()
# Otherwise print debug message
elif self.debug:
print("Hit left most")
def move_right(self) -> None:
"""Move the player right"""
# If the player is not at the right most
if self.x <= self.game_width:
# allow the player to move right
super().move_right()
# Otherwise print debug message
elif self.debug:
print("Hit right most")
def is_destroyed(self) -> bool:
"""Returns whether the player is destroyed"""
return self.get_lives() == 0
def destroy(self, lives: int = 1) -> None:
"""Destroys the ship 1 time"""
# If the player is no invincible
if not self.invincible:
# Reduce the life of the player
if self.life < lives:
self.life = 0
else:
self.life -= lives
# Make the player invincible for 1 second
self.invincible = self.fps
def get_lives(self) -> int:
"""Get the number of lives left"""
return self.life
def rotate(self, angle: int):
"""Store the rotation to be updated when sprite changes"""
# Store the angle rotation
self.rotation = angle
# Call the super rotate class method
return super().rotate(self.rotation)
def update(self) -> None:
"""Update the position of the player"""
# If the player is invincible
if self.invincible > 0:
# Reduce invincibility amount
self.invincible -= 1
# If the player gun is on cooldown
if self.cooldown > 0:
# Reduce cooldown
self.cooldown -= 1
# Load the Image of the player based on his life
self.image = Player.sprites[
self.get_lives() - 1 if self.get_lives() < len(Player.sprites) else len(Player.sprites) - 1]
# Rotate the corresponding image
self.rotate(self.rotation)
# Scale the image to 50x50
self.scale(50 * self.game_width // 600, 50 * self.game_height // 800)
# Call the super update
return super().update()
```
#### File: Game/Stages/Stage5Screen.py
```python
from . import StoryTemplate
from .. import State, ImageObject, Direction, WHITE, Crabs, Brute, Scout, AchievmentTracker
class Stage5Screen(StoryTemplate):
def __init__(self, screen_width: int, screen_height: int, screen, sensitivity: int, max_fps: int,
tracker: AchievmentTracker, debug: bool):
"""The constructor for the Stage 3 screen"""
# Call the superclass init method
super().__init__(screen_width, screen_height, screen, State(104), sensitivity, max_fps, 0.2, tracker, debug)
# Commander brief image
self.bg = ImageObject(self.screen_width // 2, int(self.screen_height * 285 / 800), 600, 570,
StoryTemplate.sprites_dict["commander_brief"], debug)
self.bg.scale(self.screen_width, int(self.screen_height * 57 / 80))
# Image of figure head
self.alon_dusk = ImageObject(self.screen_width // 2, int(self.screen_height * 210 / 800), 217, 217,
StoryTemplate.sprites_dict['alon_sama'], debug)
self.alon_dusk.scale(int(217 * screen_width // 600), int(217 * screen_height // 800))
# Image of the commander
self.commander = ImageObject(self.screen_width // 2, int(self.screen_height * 210 / 800), 217, 217,
StoryTemplate.sprites_dict['silloette_commander'], debug)
self.commander.scale(int(217 * screen_width // 600), int(217 * screen_height // 800))
# Textbox
self.tb = ImageObject(self.screen_width // 2, int(self.screen_height * 685 / 800), 600, 230,
StoryTemplate.sprites_dict['textbox'], debug)
self.tb.scale(self.screen_width, int(self.screen_height * 23 / 80))
def draw_bg(self):
"""Draw the background"""
# Draw the commander brief
self.bg.draw(self.screen)
# Draw the textbox
self.tb.draw(self.screen)
def pre_cutscene(self):
"""The pre_cutscene for the class"""
# Lower cd of click if it is still on cooldown
if self.click_cd:
self.click_cd -= 1
# Insert the Icon for the char speaking
if self.clicks <= 1:
self.alon_dusk.draw(self.screen)
else:
self.commander.draw(self.screen)
# Draw the background
self.draw_bg()
# Draw the next button
self.next_btn = self.write_main(self.end_font, WHITE, "Next", (580 / 600 * self.screen_width),
self.tb.rect.top - 30, Direction.RIGHT)
# Check if the next button is clicked
if self.check_clicked(self.next_btn) and not self.click_cd:
# Increment the clicks
self.clicks += 1
# Reset the cooldown
self.click_cd = self.fps // 5
# Pixel vars for alignment
first_px = self.tb.rect.top + 75
left_px = 40
if self.clicks == 0:
# Write the character name text
self.write_main(self.end_font, WHITE, "<NAME>", 33, self.tb.rect.top + 15, Direction.LEFT)
# Write the character speech text
self.render_speech(first_px, left_px,
["The main forces of the enemy are here, and they have strange tricks",
"up their sleeves."])
elif self.clicks == 1:
# Write the character name text
self.write_main(self.end_font, WHITE, "<NAME>", 33, self.tb.rect.top + 15, Direction.LEFT)
# Write part 2 of the speech
self.render_speech(first_px, left_px, ["Be wary of the CRABS who attack in unorthodox ways,",
"and break our formations. We must break through with ",
"a full frontal assault!"])
elif self.clicks == 2:
# Write the character name text
self.write_main(self.end_font, WHITE, "Commander", 33, self.tb.rect.top + 15, Direction.LEFT)
# Write part 3 of the speech
self.render_speech(first_px, left_px,
["I will crush their will to fight and end this invasion in their tracks"])
else:
# Reset the clicks
self.clicks = 0
# Move to the next scene
self.next_scene()
# Return the current state
return self.state
def post_cutscene(self):
"""The post cutscene for stage 3"""
# Lower cd of click if it is still on cooldown
if self.click_cd:
self.click_cd -= 1
# Insert the Icon for the char speaking
if self.clicks <= 2:
self.alon_dusk.draw(self.screen)
else:
self.commander.draw(self.screen)
# Draw the background
self.draw_bg()
# Draw the next button
self.next_btn = self.write_main(self.end_font, WHITE, "Next", (580 / 600 * self.screen_width),
self.tb.rect.top - 30, Direction.RIGHT)
# Check if the next button is clicked
if self.check_clicked(self.next_btn) and not self.click_cd:
# Increment the clicks
self.clicks += 1
# Reset the cooldown
self.click_cd = self.fps // 5
# Pixels for alignment
first_px = self.tb.rect.top + 75
left_px = 40
# Drawing of the speech
if self.clicks == 0:
# Write the character name text
self.write_main(self.end_font, WHITE, "<NAME>", 33, self.tb.rect.top + 15, Direction.LEFT)
# Write the character speech text
self.render_speech(first_px, left_px, ["Strange, results of the analysis of the enemy have come back,",
"and it appears that they all come from a common source in Pluto."])
elif self.clicks == 1:
# Write the character name text
self.write_main(self.end_font, WHITE, "<NAME>", 33, self.tb.rect.top + 15, Direction.LEFT)
# Write part 2 of speech
self.render_speech(first_px, left_px, ["This means that the threat… is not from an alien at all! ", ])
elif self.clicks == 2:
# Write the character name text
self.write_main(self.end_font, WHITE, "<NAME>", 33, self.tb.rect.top + 15, Direction.LEFT)
# Write part 3 of speech
self.render_speech(first_px, left_px, ["It points towards instead that something in Pluto is manufacturing",
"these Invading Creatures and sending them against us."])
elif self.clicks == 3:
# Write the character name text
self.write_main(self.end_font, WHITE, "Commander", 33, self.tb.rect.top + 15, Direction.LEFT)
# Write part 4 of the speech
self.render_speech(first_px, left_px, ["I have a bad feeling about this, you should consolidate the",
"main forces here before proceeding."])
elif self.clicks == 4:
# Write the character name text
self.write_main(self.end_font, WHITE, "Commander", 33, self.tb.rect.top + 15, Direction.LEFT)
# Write part 5 of the speech
self.render_speech(first_px, left_px, ["I will be the vanguard and set up an encampment first."])
else:
# Reset the clicks
self.clicks = 0
# Move to the next scene
self.next_scene()
# Move to the next scene
return self.get_victory_state()
# Return the current state
return self.state
def spawn_enemies(self, number: int) -> None:
"""Spawn enemies if the conditions are met"""
# Check the spawning of mobs based on the wave
if self.wave == 1:
rows = 2
elif self.wave >= 2 and self.wave < 5:
rows = 3
elif self.wave == 5:
rows = 1
elif self.wave >= 6 and self.wave < 8:
rows = 4
else:
row = 5
return super().spawn_enemies(rows * 6)
def _spawn_brute(self, x):
"""Spawn a brute at position X, Y"""
# Add a brute to the enemies group
self.other_enemies.add(
Brute(self.sensitivity, x, self.screen_height // 10, self.screen_width, self.screen_height, self.mob_bullet,
self.debug))
def _spawn_scout(self, x):
"""Spawn a scout at position x and y"""
# Add the scout to the other enemies grp
self.other_enemies.add(
Scout(self.sensitivity, x, self.screen_height // 10, 1, self.screen_width, self.screen_height,
self.mob_bullet, self.debug))
def _spawn_crabs(self, x):
# Add the scout to the other enemies grp
self.other_enemies.add(
Crabs(self.sensitivity, x, self.screen_height // 10, 1, self.screen_width, self.screen_height,
self.mob_bullet, self.debug))
def spawn_scout(self):
"""Spawn scouts"""
# Spawn scouts for the appropriate wave
if self.wave == 3:
self._spawn_scout(self.screen_width // 2)
elif self.wave >= 7:
for i in range(1, 3):
self._spawn_scout(self.screen_width // (3 / i))
def spawn_brute(self):
"""Spawn brutes do not spawn brutes"""
if self.wave == 2:
self._spawn_brute(self.screen_width // 2)
elif self.wave == 5:
for i in range(1, 3):
self._spawn_brute(self.screen_width // (3 / i))
def spawn_crabs(self):
"""Spawn crabs"""
if self.wave == 4:
self._spawn_crabs(self.screen_width // 3)
self._spawn_crabs(self.screen_width // (3 / 2))
elif self.wave == 5 or self.wave == 6:
for i in range(1, 5):
self._spawn_crabs(self.screen_width // (4 / i))
elif self.wave == 8:
for i in range(1, 9):
self._spawn_crabs(self.screen_width // (8 / i))
def play(self):
"""The playing stage for the game"""
# Call the superclass play
return super().play()
def win_condition(self):
"""The win condition of the player"""
return self.wave == 9
```
#### File: Game/Stages/StoryTemplate.py
```python
from .. import PlayScreen, State, Difficulty, Direction, WHITE, AchievmentTracker
class StoryTemplate(PlayScreen):
# Store the sprites to be used for stories
sprites_dict = {}
def __init__(self, screen_width: int, screen_height: int, screen, state: State, sensitivity: int, max_fps: int,
powerup_chance: float, tracker: AchievmentTracker, debug: bool):
"""The template for the stage to be built on"""
#Set the state
self.something = state
# Call the superclass
super().__init__(screen_width, screen_height, screen, sensitivity, max_fps, Difficulty(3),
tracker, 1, 3, powerup_chance, debug)
# Reset the game
self.reset()
def fetch_stats(self, keys: tuple = None):
if not keys:
keys = ('ek_e', 'en_k', 'el_k', 'pu', 'sf', f'st_{self.get_stage()}_clr')
return super().fetch_stats(keys)
def handle_threshold(self) -> None:
pass
def draw_letters(self) -> None:
"""Draw the letters on the screen"""
# Draw the score
self.write_main(self.font, WHITE, self.get_stage_name(), 10, 10, Direction.LEFT)
# Draw the live count
self.write_main(self.font, WHITE, f"Lives : {self.player1.get_lives()}", self.screen_width - 10, 10,
Direction.RIGHT)
# Draw the wave number
self.write_main(self.font, WHITE, f"Wave : {self.wave}", self.screen_width // 2, 15)
def reset(self) -> None:
"""Reset the state of the game"""
# Set the state
self.set_state(self.something)
# Set the number of clicks to 0
self.clicks = 0
# Set the cooldown to max
self.click_cd = self.fps // 5
# Reset the flag to play the cutscenes
self.curr = 0
# Call the superclass reset
return super().reset()
def render_speech(self, first_px: int, left_px: int, speech: tuple) -> None:
"""Render the speech onto the screen"""
# Loop through the speech
for index, text in enumerate(speech):
# Break if the speech is more than 6 lines
if index == 6:
break
# Render the speech in 15 spaces
self.write_main(self.font, WHITE, text, left_px, first_px + index * 15, Direction.LEFT)
def next_scene(self):
"""Increment the scene counter to the next scene"""
self.curr += 1
def win_condition(self) -> bool:
"""Check if the player has won"""
raise NotImplementedError("Please override this method")
def get_stage(self) -> int:
"""Return the stage number"""
return self.state.value - 99
def get_stage_name(self):
"""Get the name of the stage"""
return f"Stage {self.get_stage()}"
def pre_cutscene(self):
"""Plays the precutscene for the story"""
raise NotImplementedError("Please override the pre_cutscene method")
def post_cutscene(self):
"""Plays the postcutscene for the story"""
raise NotImplementedError("Please override the post_cutscene method")
def play(self):
"""Stage where the player plays the game"""
return super().handle()
def get_victory_state(self):
"""Get the state of the game when the player wins"""
return State.VICTORY
def get_pause_state(self):
"""Get the pause state of the stages"""
return State.STAGE_PAUSE
def get_gameover_state(self):
"""Get the state of the game when the player loses"""
return State.STAGE_GAMEOVER
def comparator(self):
"""Variable used for comparison"""
return self.get_stage()
def update_trackers(self, win=False):
'''
If there is a win, add one to the clear state and continue normally with tracked stats
'''
if win:
text = f'st_{self.get_stage()}_clr'
self.accumulate(text, 1)
return super().update_trackers()
def handle(self):
"""Handles the playing out of the screen"""
# If player is destroyed or enemies hit the bottom, go to gameover state
if self.player1.is_destroyed() or self.enemy_touched_bottom():
# Cause the game to end
self.reset()
# update the statistic values
self.update_trackers()
# Return the gameover state
return self.get_gameover_state()
# Otherwise if player wins
elif self.win_condition():
self.update_trackers(True)
# Go to next state
self.next_scene()
self.wave = 0
# If it is the pre_cutscene stage
if self.curr == 0:
return self.pre_cutscene()
# If it is the play state
elif self.curr == 1:
return self.play()
# If it is the post_cutscene state
else:
return self.post_cutscene()
```
#### File: Orbital/Space_invaders Server/CheckableQueue.py
```python
import queue
#Main class for the checkable queue
class CheckableQueue(queue.Queue): # or OrderedSetQueue
def __contains__(self, item):
"""Check if the item is inside the queue"""
with self.mutex:
return item in self.queue
```
#### File: Orbital/Space_invaders Server/server.py
```python
import socketserver
import logging
import pickle
import random
#Configure logging format
logging.basicConfig(level=logging.CRITICAL, format = '%(asctime)s - %(levelname)s - %(message)s')
class Request_Handle(socketserver.BaseRequestHandler):
player = {}
pair = {}
first = set()
waiting_list = set()
random = random.random()
@staticmethod
def remove_from_all(player):
"""Remove player from all vars"""
del Request_Handle.player[player]
del Request_Handle.pair[player]
Request_Handle.first.remove(player)
Request_Handle.waiting_list.remove(player)
def handle(self):
"""Method to handle the client"""
#Main loop to handle the client
data = pickle.loads(self.request.recv(2048))
player = self.client_address[0]
#Get id of the player
logging.critical(f"{self.client_address[0]} wrote: {data}")
logging.debug(f"Request handle data: {Request_Handle.player}")
#If data is empty
if not data:
#Remove the players
Request_Handle.remove_from_all(player)
self.request.close()
return
#If player is new player
if player not in Request_Handle.player:
#Place data into dict
Request_Handle.player[player] = data
#Check if there is anyone waiting
if len(Request_Handle.waiting_list) > 0:
#Get the first player and remove from the list
partner = Request_Handle.waiting_list.pop()
#Form the players as pairs
Request_Handle.pair[partner] = player
Request_Handle.pair[player] = partner
#Otherwise
else:
#Add the player to the waiting list
Request_Handle.waiting_list.add(player)
Request_Handle.first.add(player)
#If player is waiting
if player in Request_Handle.waiting_list:
#Msg is waiting
msg = {'waiting':True}
#If player is not waiting send his partner data over and pairing exist
else:
#Get the partner
partner = Request_Handle.pair[player]
#If the data is blank
if not Request_Handle.player[partner]:
#Disconnectd player player
msg = {'disconnected': True}
else:
#Send the partner data
msg = {'data':Request_Handle.player[partner],
'waiting':False,
'seed':Request_Handle.random,
'isfirst': player in Request_Handle.first,
'disconnected': False}
#Send the msg
self.request.sendall(pickle.dumps(msg))
self.request.close()
class Server(socketserver.TCPServer):
def __init__(self, address, request_handle):
super().__init__(address, request_handle)
if __name__ == '__main__':
ip = '192.168.1.215'
port = 9999
server = Server((ip,port), Request_Handle)
logging.critical(f"Starting server on: {ip}:{port}")
server.serve_forever()
```
#### File: Space_Invaders/tests/BaseObjectTest.py
```python
import os
import sys
import unittest
import pygame
# Change directory to that of the main path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from classes import *
class BaseObjectTester(unittest.TestCase):
def setup(self):
return super().setUp()
def tearDown(self):
return super().tearDown()
def testCase1(self):
"""
Test case 1: Check get coord method
"""
obj = BaseObject(0, 0, False)
assert obj.get_coord() == (0, 0)
def testCase2(self):
"""
Test case 2: Check the set coord method
"""
obj = BaseObject(0, 0, False)
obj.set_coord((1, 2))
assert obj.get_coord() == (1, 2)
def testCase3(self):
"""
Test case 3: Check the get_x method
"""
obj = BaseObject(0, 0, False)
assert obj.get_x() == 0
def testCase4(self):
"""
Test case 4: Check the get_y method
"""
obj = BaseObject(0, 1, False)
assert obj.get_y() == 1
# Main function to run the tests
if __name__ == "__main__":
unittest.main()
```
#### File: Space_Invaders/tests/ColorTest.py
```python
import os
import sys
import unittest
# Change directory to that of the main path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from classes import *
class ColorTest(unittest.TestCase):
"""Testing the if the colors are accurate"""
def setUp(self):
return super().setUp()
def tearDown(self):
return super().tearDown()
def testCase1(self):
"""
Test case 1: White color
"""
assert WHITE == (255, 255, 255), "White color is off"
def testCase2(self):
"""
Test case 2: Grey color
"""
assert GREY == (60, 60, 60), "Grey color is off"
def testCase3(self):
"""
Test case 3: Black color
"""
assert BLACK == (0, 0, 0), "Black color is off"
def testCase4(self):
"""
Test case 4: Red color
"""
assert RED == (255, 0, 0), "Red color is off"
def testCase5(self):
"""
Test case 5: Blue color
"""
assert BLUE == (0, 0, 255), "Blue color is off"
def testCase6(self):
"""
Test case 6: Green color
"""
assert GREEN == (0, 128, 0), "Green color is off"
def testCase7(self):
"""
Test case 7: Lime color
"""
assert LIME == (0, 255, 0), "Lime color is off"
def testCase8(self):
"""
Test case 8: Yellow color
"""
assert YELLOW == (255, 255, 0), "Yellow color is off"
if __name__ == "__main__":
unittest.main()
```
#### File: Space_Invaders/tests/DifficultyTest.py
```python
import os
import sys
import unittest
# Change directory to that of the main path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from classes import *
class DifficultyTest(unittest.TestCase):
def setUp(self):
return super().setUp()
def tearDown(self):
return super().tearDown()
def testCase1(self):
"""
Test case 1: Check if get multiplier returns an int
"""
self.difficulty = Difficulty(1)
assert type(self.difficulty.get_multiplier(1)) == int
def testCase2(self):
"""
Test case 2: Check if the value is correct
"""
# Check the value for range between [1, 6]
for i in range(1, 7):
assert Difficulty(i).value == i
def testCase3(self):
"""
Test case 3: Check if the names are correct
"""
names = ["CASUAL", "EASY", "MEDIUM", "HARD", "IMPOSSIBLE", "OUTRAGEOUS"]
for i in range(1, 7):
d = Difficulty(i)
assert d.name == names[i - 1], f"Name {d.name} is not {names[i]}"
if __name__ == "__main__":
unittest.main()
```
#### File: Space_Invaders/tests/settingstest.py
```python
import os
import sys
import unittest
# Change directory to that of the main path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from classes import *
# Relavant functions to be tested
class SettingsTester(unittest.TestCase):
def setUp(self) -> None:
"""Set up for main class"""
# Call the superclass setup
super().setUp()
# Read the config file from the settings
self.config = read_settings(form_abs_path(__file__, "../settings.cfg"), "Space Invaders")
def tearDown(self):
"""Tear down for the main class"""
# Remove the config
del self.config
# Nothing to Teardown
return super().tearDown()
def testConfigA(self):
"""
Test if config file is converted to a dictionary
"""
assert type(self.config) == dict, "Read setting not returning a dictionary"
def testConfigB(self):
"""
Test For integer parsing and converting to correct type
"""
assert type(self.config['sensitivity']) == int, "Not parsing and converting digit correctly"
def testConfigC(self):
"""
Test for boolean parsing and converting to correct type
"""
assert type(self.config['debug']) == bool, "Not parsing string to boolean correctly"
def testConfigD(self):
"""
Test for string parsing properly
"""
assert type(self.config['icon_img_path']) == str, "Not parsing string to correct type"
def testConfigE(self):
"""
Test if the game_width is correct
"""
assert type(self.config['game_width']) == int, "Not parsing the game width correctly"
def testConfigF(self):
"""
Test if the game_height is loaded correctly
"""
assert type(self.config['game_height']) == int, "Not parsing the game width correctly"
def testConfigG(self):
"""
Test if the maxfps is loaded correctly
"""
assert type(self.config['maxfps']) == int
class AbsPathTest(unittest.TestCase):
def testConfig1(self) -> None:
"""
Test case 1: "C:\\hello1\\hello2" and "hello3\\hello4"
"""
path1 = os.path.join("C:", "hello1", "hello2")
path2 = os.path.join("hello3", "hello4")
test1 = os.path.join(os.path.dirname(path1), path2)
test2 = form_abs_path(path1, path2)
assert test1 == test2, f"{test1} and {test2} is different"
class numFilesTest(unittest.TestCase):
def testConfig1(self) -> None:
"""
Test case 1: Number of files in current folder
"""
assert len(os.listdir(os.path.dirname(__file__))) == len(
list_dir(os.path.dirname(__file__))), "Results are not the same for the 2 functions"
class ConvertTypeTest(unittest.TestCase):
def testConfig1(self) -> None:
"""
Test case 1: Converting ("hello","3")
"""
result = convertType(("hello", "3"))
assert type(result[0]) == str and type(result[1]) == int, '''("hello","3") is not converterd correctly'''
def testConfig2(self) -> None:
"""
Test case 2: Converting ("Hello", "world")
"""
result = convertType(("Hello", "world"))
assert type(result[0]) == str and type(result[1]) == str, '''("Hello", "world") is not converterd correctly'''
def testConfig3(self) -> None:
"""
Test case 3: Converting (3, "$")
"""
result = convertType((3, "$"))
assert type(result[0]) == int and type(result[1]) == str, '''(3, "$") is not converterd correctly'''
def testConfig4(self) -> None:
"""
Test case 4: Converting (3, "False")
"""
result = convertType((3, "False"))
assert type(result[0]) == int and type(result[1]) == bool, '''(3, "False") is not converterd correctly'''
def testConfig5(self) -> None:
"""
Test case 5: Converting ("False", "tRuE")
"""
result = convertType(("False", "tRuE"))
assert type(result[0]) == str and type(result[1]) == bool, '''("False", "tRuE") is not converterd correctly'''
def testConfig6(self) -> None:
"""
Test case 6: Converting ("True", "12346")
"""
result = convertType(("True", "12346"))
assert type(result[0]) == str, """True is not converted correctly"""
assert type(result[1]) == int, """'12346' is not converterd correctly"""
def testConfig7(self) -> None:
"""
Test case 7: Converting ()
"""
result = convertType(())
assert result == (), '''() is not converterd correctly'''
def testConfig8(self) -> None:
"""
Test case 8: Converting (1,2,3,4)
"""
result = convertType((1, 2, 3, 4))
assert result == (1, 2, 3, 4), '''(1,2,3,4) is not converterd correctly'''
# Main function
if __name__ == "__main__":
# Runs all of the tests defined above
unittest.main()
``` |
{
"source": "Jh123x/PDF-Combiner",
"score": 4
} |
#### File: Jh123x/PDF-Combiner/main.py
```python
from PyPDF2 import PdfFileMerger, PdfFileReader #For PDF functions
import argparse
import os #For getting files
class PDFConcat(object):
def __init__(self, path_name:str):
"""Constructor for the PDFConcat obj"""
#Store the paths
self.path_name = path_name
def get_pdf_names(self) -> tuple:
"""Check the pdf names of those who are in the folder"""
return [f for f in os.listdir(self.path_name) if os.path.isfile(os.path.join(self.path_name, f))]
def concat(self) -> None:
"""Concatenate the PDF files"""
mergedPDF = PdfFileMerger()
for filename in self.get_pdf_names():
print(f"Merging {filename}")
mergedPDF.append(PdfFileReader(os.path.join(self.path_name,filename)))
print("Writing to output file")
mergedPDF.write("Output.pdf")
def main() -> None:
"""The main function to run the file"""
#Create the PDFConcat obj
pdfconcat = PDFConcat('input')
#Create the PDF file
pdfconcat.concat()
if __name__ == '__main__':
main()
``` |
{
"source": "jh16g15/fpga-scripts",
"score": 2
} |
#### File: jh16g15/fpga-scripts/gen_instance.py
```python
import os
import sys
import argparse
import pathlib
def parse_vhdl_file(filename):
print("Found " + filename)
found_entity = False
in_entity = False
in_generics = False
in_ports = False
generic_list = []
port_list = []
template = []
entity_name = ""
with open(filename) as f:
for line in f.readlines():
line = filter_vhdl_comment(line)
# Wait until we find an entity declaration
if "entity" in line.lower() and (not in_generics) and (not in_ports):
if found_entity == True:
print("ERROR: found multiple entites in " + filename)
else:
found_entity = True
in_entity = True
template_filename = get_template_name(filename)
entity_name = get_entity_name(line)
print("Creating template " + template_filename +
" for entity " + entity_name)
# There are several ways to end our entity declaration
#
# 1. end;
# 2. end entity;
# 3. end $NAME;
# 4. end ;
##
first_word = get_first_word(line.lower())
if ("end" == first_word or "end;" == first_word) and in_entity:
in_generics = False
in_ports = False
in_entity = False
break
# TODO: handle generic/port region start on the same line as a generic/port declatarion
if in_entity:
if "generic" in line.lower() and (not in_generics) and (not in_ports):
# move into finding the Generics
in_generics = True
in_ports = False
continue # nothing more to do with this line
if "port" in line.lower() and (not in_ports):
# move into finding the Ports
in_generics = False
in_ports = True
continue # nothing more to do with this line
if in_generics and not in_ports:
name = get_generic_port_name(line)
if len(name) != 0: # ignore empty strings
print("Found Generic: " + name)
generic_list.append(name)
if in_ports and not in_generics:
name = get_generic_port_name(line)
if len(name) != 0: # ignore empty strings
print("Found Port: " + name)
port_list.append(name)
# after we have finished parsing the file
template = gen_template(
src_name=filename, entity_name=entity_name, generics=generic_list, ports=port_list)
save_list_to_file(template, get_template_name(filename))
def get_generic_port_name(line):
# print("old: " + line)
# grab everything before the ':' so we don't get the
line = line.split(":")[0]
line = line.split(")")[0] # now we only have the port name, ");" or ")"
line = line.strip() # remove leading/trailing whitespace
# print("new: " + line)
return line
def get_entity_name(line):
line = line.split(" ")
i = 0
for word in line:
if word == "entity":
return line[i + 1]
i = i + 1
return "UNKNOWN_ENTITY"
def gen_template(src_name, entity_name, generics, ports, library="work"):
instance_name = "u_" + entity_name
template = []
template.append(
"-- Autogenerated Instantiation Template for " + str(entity_name) + "\n")
template.append(
"-- Generated by gen_instance.py (available at https://github.com/jh16g15/fpga-scripts)" + "\n")
template.append("-- Original Source File: " +
str(os.getcwd()) + "/" + src_name + "\n")
template.append("" + "\n")
template.append(instance_name + " : entity " +
library + "." + entity_name + "\n")
# template.append(" -- Generics Found: " + str(generics) + "\n")
# template.append(" -- Ports Found: " + str(ports) + "\n")
num_generics = len(generics)
if num_generics != 0:
template.append(" Generic map (" + "\n")
for i, generic in enumerate(generics):
if i == num_generics - 1: # final generic has no comma
template.append(" " + generic + " => " + generic + "\n")
else:
template.append(" " + generic +
" => " + generic + "," + "\n")
template.append(" )" + "\n")
num_ports = len(ports)
if num_ports != 0:
template.append(" Port map (" + "\n")
for i, port in enumerate(ports):
if i == num_ports - 1: # final port has no comma
template.append(" " + port + " => " + port + "\n")
else:
template.append(" " + port + " => " + port + "," + "\n")
template.append(" );" + "\n")
template.append("" + "\n")
return template
def save_list_to_file(items, filename):
with open(filename, "w") as f_out:
f_out.writelines(items)
def get_template_name(src_name):
base_name = src_name.split(".vhd")
return "instantiation_templates/" + base_name[0] + "_template.vho"
def filter_vhdl_comment(line):
# print("input line: " + line)
line = line.split("--", 1)
# print("output line: " + line[0])
return line[0]
def get_first_word(line):
line = line.split(" ")
return line[0].strip() # remove newlines on the end
# def try_funcs():
# get_generic_port_name(" ); ")
# # parse_vhdl_file("/home/joehi/fpga-scripts/example_src/cpu_regs.vhd")
# # tmp_list = []
# # print(str(tmp_list))
# # tmp_list.append(None)
# # print(str(tmp_list))
# # input("Continue:")
def main(args):
# try_funcs()
print(str(args))
parser = argparse.ArgumentParser(
description='generate VHDL Instantiation Templates from a folder of source files')
# nargs = ? means could be 0 or 1 arg provided
parser.add_argument(
'dir', nargs='?', default=os.getcwd(), type=pathlib.Path)
# 'dir', nargs='?', default="/home/joehi/fpga-scripts/example_src", type=pathlib.Path) # for the VSCode debugger
args = parser.parse_args()
src_dir = args.dir
print("Current Working Dir: " + os.getcwd())
print("Generating Instantiation Templates for " + str(src_dir))
os.chdir(src_dir)
print("Updated Working Dir: " + os.getcwd())
# TODO: Add a "clean function to delete the old folder and its contents"
# make new folder in $WD
os.makedirs("instantiation_templates", exist_ok=True)
src_dir = pathlib.Path.resolve(src_dir)
print("src dir = " + str(src_dir))
dest_dir = pathlib.Path.joinpath(src_dir, "instantiation_templates")
print("dest dir = " + str(dest_dir))
#
for filename in os.listdir(): # uses current working directory (src_dir)
if filename.endswith(".vhd"):
parse_vhdl_file(filename)
else:
print("INFO: Skipped " + str(filename) + " as not a VHDL file")
if __name__ == "__main__":
sys.exit(main(sys.argv)) # pass the args
``` |
{
"source": "jh199897/hello",
"score": 3
} |
#### File: hello/test/login.py
```python
a = 10
b = 20
c = 30
num = 23
nn = 300
num1 = 22
def index():
return index
``` |
{
"source": "jh20s/stupid-week-2021",
"score": 3
} |
#### File: jh20s/contest#225/1743.py
```python
class Solution:
def restoreArray(self, adjacentPairs: List[List[int]]) -> List[int]:
h = defaultdict(list)
for pair in adjacentPairs: # map each element to its neighbours
h[pair[0]].append(pair[1])
h[pair[1]].append(pair[0])
arr = [0] * (len(adjacentPairs) + 1)
extremes = [] # stores the values of the first and last elements
for key, value in h.items():
if len(value) == 1: # extreme elements have only one neighbour
extremes.append(key)
arr[0], arr[-1] = extremes
curr = h[arr[0]][0]
for i in range(1, len(arr) - 1):
# at every step, find the next neighbour and proceed
arr[i] = curr
first, second = h[curr]
# check which neighbour is the previous element
curr = second if first == arr[i - 1] else first
return arr
```
#### File: jh20s/contest#225/1744.py
```python
class Solution:
def canEat(self, candiesCount, queries):
A = [0] + list(accumulate(candiesCount))
return [A[t] // c <= d < A[t + 1] for t, d, c in queries]
```
#### File: 14/parkinhyo/7569.py
```python
from typing import List
from collections import deque
from sys import stdin
class Solution:
def tomato(self, M: int, N: int, H: int, graph: List[List[List[int]]]):
queue = deque()
count = 0
answer = 0
visited = [[[False] * (M + 1) for i in range(N + 1)] for j in range(H + 1)]
# print(graph)
tomato_complete = False
for h in range(H):
for n in range(N):
if 0 not in graph[h][n]:
tomato_complete = True
if tomato_complete:
print(0)
return
for h in range(H):
for n in range(N):
for m in range(M):
if graph[h][n][m] == 1:
queue.append([h, n, m, count])
if graph[h][n][m] == -1:
visited[h][n][m] = True
if not queue:
print(-1)
return
while queue:
h, n, m, count = queue.popleft()
if not visited[h][n][m]:
visited[h][n][m] = True
answer = max(answer, count)
count += 1
if h - 1 >= 0:
if graph[h - 1][n][m] == 0:
graph[h - 1][n][m] = 1
queue.append([h - 1, n, m, count])
if h + 1 < H:
if graph[h + 1][n][m] == 0:
graph[h + 1][n][m] = 1
queue.append([h + 1, n, m, count])
if n - 1 >= 0:
if graph[h][n - 1][m] == 0:
graph[h][n - 1][m] = 1
queue.append([h, n - 1, m, count])
if n + 1 < N:
if graph[h][n + 1][m] == 0:
graph[h][n + 1][m] = 1
queue.append([h, n + 1, m, count])
if m - 1 >= 0:
if graph[h][n][m - 1] == 0:
graph[h][n][m - 1] = 1
queue.append([h, n, m - 1, count])
if m + 1 < M:
if graph[h][n][m + 1] == 0:
graph[h][n][m + 1] = 1
queue.append([h, n, m + 1, count])
tomato_false = False
for h in range(H):
for n in range(N):
if 0 in graph[h][n]:
tomato_false = True
if tomato_false:
print(-1)
return
print(answer)
# [[[0, -1, 0, 0, 0], [-1, -1, 0, 1, 1], [0, 0, 0, 1, 1]]]
# [[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# [[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]]]
# Solution().tomato(4, 3, 2, [[[1,1,1,1],[1,1,1,1],[1,1,1,1]],[[1,1,1,1],[-1,-1,-1,-1],[1,1,1,-1]]])
graph = []
M, N, H = map(int, stdin.readline().split())
for h in range(H):
tmp = []
for n in range(N):
tmp.append(list(map(int, stdin.readline().split())))
graph.append(tmp)
Solution().tomato(M, N, H, graph)
```
#### File: parkinhyo/Algorithm/1.py
```python
def solution(a, b):
date = {1: 31, 2: 29, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}
myDict = {1: "FRI", 2: "SAT", 3: "SUN", 4: "MON", 5: "TUE", 6: "WED", 0: "THU"}
day = 0
for mon in range(1, a):
day += date[mon]
day += b
return myDict[day % 7]
```
#### File: parkinhyo/Algorithm/2.py
```python
def solution(numbers, hand):
curLeft = '*'
curRight = '#'
midDicttwo = {1: ['1', '3', '5'], 2: ['4', '8', '6'], 3: ['7', '9', '0'], 4: ['*', '#']}
midDicttfive = {1: ['2', '4', '6', '8'], 2: ['1', '3', '7', '9', '0'], 3: ['*', '#']}
midDictteight = {1: ['5', '7', '0', '9'], 2: ['2', '4', '6', '*', '#'], 3: ['1', '3']}
midDicttzero = {1: ['8', '*', '#'], 2: ['5', '7', '9'], 3: ['2', '4', '6'], 4: ['1', '3']}
left = [1, 4, 7]
right = [3, 6, 9]
mid = [2, 5, 8, 0]
answer = ''
for number in numbers:
if number in left:
answer += 'L'
curLeft = str(number)
elif number in right:
answer += 'R'
curRight = str(number)
if number in mid:
if number == 2:
rightDistance = 0
leftDistance = 0
for key in midDicttwo.keys():
if curRight in midDicttwo[key]:
rightDistance = key
if curLeft in midDicttwo[key]:
leftDistance = key
if rightDistance > leftDistance:
answer += 'L'
curLeft = str(number)
elif rightDistance < leftDistance:
answer += 'R'
curRight = str(number)
else:
if hand == 'left':
answer += 'L'
curLeft = str(number)
else:
answer += 'R'
curRight = str(number)
elif number == 5:
rightDistance = 0
leftDistance = 0
for key in midDicttfive.keys():
if curRight in midDicttfive[key]:
rightDistance = key
if curLeft in midDicttfive[key]:
leftDistance = key
if rightDistance > leftDistance:
answer += 'L'
curLeft = str(number)
elif rightDistance < leftDistance:
answer += 'R'
curRight = str(number)
else:
if hand == 'left':
answer += 'L'
curLeft = str(number)
else:
answer += 'R'
curRight = str(number)
elif number == 8:
rightDistance = 0
leftDistance = 0
for key in midDictteight.keys():
if curRight in midDictteight[key]:
rightDistance = key
if curLeft in midDictteight[key]:
leftDistance = key
if rightDistance > leftDistance:
answer += 'L'
curLeft = str(number)
elif rightDistance < leftDistance:
answer += 'R'
curRight = str(number)
else:
if hand == 'left':
answer += 'L'
curLeft = str(number)
else:
answer += 'R'
curRight = str(number)
elif number == 0:
rightDistance = 0
leftDistance = 0
for key in midDicttzero.keys():
if curRight in midDicttzero[key]:
rightDistance = key
if curLeft in midDicttzero[key]:
leftDistance = key
if rightDistance > leftDistance:
answer += 'L'
curLeft = str(number)
elif rightDistance < leftDistance:
answer += 'R'
curRight = str(number)
else:
if hand == 'left':
answer += 'L'
curLeft = str(number)
else:
answer += 'R'
curRight = str(number)
return answer
``` |
{
"source": "jh247247/Firefly",
"score": 2
} |
#### File: Firefly/monitor/firefly.py
```python
from pymongo import MongoClient
import json
from bson import json_util
from flask import request
import flask_restful as fr
import time
fireflies = MongoClient()['redo']['firefly']
# TODO: add list api for fireflies.
class firefly_res(fr.Resource):
def get(self, firefly_id):
# grab the data from the database, send it back out
# maybe authenticate? idk
lookup = fireflies.find_one({"fireflyId" : firefly_id}, {'_id' : False})
if lookup is not None:
return lookup
return 204
def put(self, firefly_id):
dat = {"usrData" : request.form}
dat["fireflyId"] = firefly_id
dat["timestamp"] = int(time.time())
fireflies.update_one({"fireflyId":firefly_id}, {'$set': dict(dat)}, upsert=True)
return dat
class fireflylist_res(fr.Resource):
def get(self):
return {'ids':fireflies.distinct('fireflyId')}
def setup(api):
api.add_resource(fireflylist_res,'/firefly')
api.add_resource(firefly_res,'/firefly/<string:firefly_id>')
if __name__ == '__main__':
print('WRITE TESTS FOR MEH')
```
#### File: Firefly/monitor/main.py
```python
from flask import Flask
import flask_restful as fr
import core, mqtt
import threading
app = Flask(__name__)
api = fr.Api(app)
def flask_run():
app.run(debug=True,
host='0.0.0.0')
mqtt_thread = threading.Thread(target=mqtt.run, args = ())
if __name__ == '__main__':
core.api_setup(api)
# TODO: make sure these threads quit when we do?
mqtt_thread.start()
flask_run()
``` |
{
"source": "jh247247/FIT3036-Project",
"score": 4
} |
#### File: FIT3036-Project/sim/Daisy.py
```python
import random
class Daisy:
MAX_GROWTH = 30
TEMP_FUDGE = 0.05
# TODO: add optimal temps for types of daisy
def __init__(self, parentTile, albedo, optTemp):
self.growth = 0
self.living = True
self.albedo = albedo
self.age = 0
self.optTemp = optTemp
self.parent = parentTile
def grow(self):
self.growth += 1
def update(self):
"""This should be called at every time interval"""
self.age += 1
# TODO: Growth dependent on tile incident radiation
# TODO: Death dependent on age and temp at tile
# TODO: death dependent on things other than age...
if self.age/self.MAX_GROWTH + \
abs(self.parent.temp-self.optTemp)*self.TEMP_FUDGE \
> random.random():
self.die()
def die(self):
self.living = False
```
#### File: FIT3036-Project/sim/InterfaceHandler.py
```python
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
# handles the interface to the user in the "sidebar"
class InterfaceHandler(QScrollArea):
def __init__(self):
super().__init__()
self.setMaximumWidth(300)
self.layout = QVBoxLayout(self)
self.layout.setAlignment(Qt.AlignTop)
self.setLayout(self.layout);
def addWidget(self, widget):
# make a nice seperator between new widgets
if(self.layout.count() != 0):
sep = QFrame()
sep.setFrameShape(QFrame.HLine)
sep.setFrameShadow(QFrame.Sunken)
self.layout.addWidget(sep)
self.layout.addWidget(widget)
```
#### File: FIT3036-Project/sim/Sun.py
```python
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class Sun(QWidget):
def __init__(self,args):
super().__init__()
# radiation value of 1 means that if albedo is 0.5,
# temperature should eventually become 22.5
self.radiation = args.radiation[0]
self.delta = args.delta[0]
self.initUI()
if self.delta is not 0:
self.deltaEnable.setChecked(True)
def initUI(self):
# make it so the widget doesnt take up the entire screen
self.setMaximumWidth(300)
self.mainLayout = QVBoxLayout(self);
self.radSpinner = QDoubleSpinBox(self)
self.radSpinner.setDecimals(4)
self.radSpinner.setValue(1) # set the default radiation value
self.radSpinner.setSingleStep(0.0001)
self.radSpinner.valueChanged.connect(self.handleRadSpinner)
self.deltaSpinner = QDoubleSpinBox(self)
self.deltaSpinner.setDecimals(4)
self.deltaSpinner.setSingleStep(0.0001)
self.deltaSpinner.setMinimum(-1)
self.deltaSpinner.setMaximum(1)
self.deltaSpinner.valueChanged.connect(self.handleDeltaSpinner)
self.deltaEnable = QCheckBox(self)
self.deltaEnable.setText("Enable radiation delta")
# text that we don't need to keep a reference to
title = QLabel("<b>Sun Options<\b>",self)
self.radText = QLabel("Current Radiation: " + str(round(self.radiation,4)),self)
seedRadText = QLabel("Seed radiation:",self)
deltaText = QLabel("Change per tick",self)
# add widgets to layout
self.mainLayout.addWidget(title)
self.mainLayout.addWidget(self.radText)
self.mainLayout.addWidget(seedRadText)
self.mainLayout.addWidget(self.radSpinner)
self.mainLayout.addWidget(deltaText)
self.mainLayout.addWidget(self.deltaSpinner)
self.mainLayout.addWidget(self.deltaEnable)
def handleRadSpinner(self, val):
self.radiation = val
def handleDeltaSpinner(self,val):
self.delta = val
def update(self):
if self.deltaEnable.isChecked():
self.radiation += self.delta
self.radText.setText("Current Radiation: " + str(round(self.radiation,4)))
```
#### File: FIT3036-Project/sim/World.py
```python
import sys, random
import threading
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from Tile import Tile
from Sun import Sun
import DaisyFactory
class World(QWidget):
SIZE_X = 35
SIZE_Y = 35
START_TEMP = 22.5
BOLTZMANN_CONSTANT = 5.670373e-8
SUN_WATTMETER_PER_UNIT = 3458.62
def __init__(self, sun, args):
super().__init__()
self.sun = sun
self.tick = 0
self.stop_tick = args.stop_tick
if args.no_gui is True:
self.tick_time = 0
else:
self.tick_time = 0.001
# calculate start temp from sun
self.avgTemp = args.temp[0]
self.avgAlbedo = 0
# init temp from given val
self.worldTiles = [[Tile(self, args.temp[0], (x,y)) \
for x in range(self.SIZE_X)] \
for y in range(self.SIZE_Y)]
self.worldLock = threading.Lock()
self.initOptionsUI()
DaisyFactory.setWorld(self)
if args.iblack is not 0:
self.enableInvasiveBlack.setChecked(True)
self.invasiveBlackTemp.setValue(args.iblack)
if args.iwhite is not 0:
self.enableInvasiveWhite.setChecked(True)
self.invasiveWhiteTemp.setValue(args.iwhite)
self.threadRunning = True
self.update()
def closeEvent(self, event):
self.threadRunning = False
def initOptionsUI(self):
self.avgTempLabel = QLabel("Average temp: " + str(self.START_TEMP))
self.enableInvasiveBlack = QCheckBox("Enable invasive black")
self.enableInvasiveBlack.stateChanged.connect(DaisyFactory.setInvasiveBlack)
self.enableInvasiveWhite = QCheckBox("Enable invasive white")
self.enableInvasiveWhite.stateChanged.connect(DaisyFactory.setInvasiveWhite)
self.invasiveBlackTemp = QDoubleSpinBox()
self.invasiveBlackTemp.valueChanged.connect(DaisyFactory.setInvasiveBlackTemp)
self.invasiveBlackTemp.setValue(32.5) # TODO: not hardcode defaults
self.invasiveWhiteTemp = QDoubleSpinBox()
self.invasiveWhiteTemp.valueChanged.connect(DaisyFactory.setInvasiveWhiteTemp)
self.invasiveWhiteTemp.setValue(12.5) # TODO: not hardcode defaults
self.resetButton = QPushButton("Reset Simulation")
self.resetButton.released.connect(self.resetWorld)
def resetWorld(self):
self.worldLock.acquire()
self.worldTiles = [[Tile(self, World.START_TEMP, (x,y)) \
for x in range(self.SIZE_X)] \
for y in range(self.SIZE_Y)]
self.worldLock.release()
self.tick = 0
def updateOptionsUI(self):
self.avgTempLabel.setText("Average temp: " + str(round(self.avgTemp,4)))
def update(self):
self.worldLock.acquire()
# calculate average temperature and albedo of world
self.avgTemp = 0
self.avgAlbedo = 0
for i in range(self.SIZE_X):
for j in range(self.SIZE_Y):
self.avgTemp += self.worldTiles[i][j].temp
self.avgAlbedo += self.worldTiles[i][j].getAlbedo()
self.avgTemp /= self.SIZE_X*self.SIZE_Y
self.avgAlbedo /= self.SIZE_X*self.SIZE_Y
# calculate emission temp of world (i.e: temp of world if
# daisies do not die and left to stabilize at current radiation)
self.expectedTemp = (self.sun.radiation*self.SUN_WATTMETER_PER_UNIT/ \
(4*self.BOLTZMANN_CONSTANT)* \
(self.avgAlbedo)) \
**(1/4)
self.emissionTemp = (self.sun.radiation*self.SUN_WATTMETER_PER_UNIT/ \
(4*self.BOLTZMANN_CONSTANT)* \
(0.5)) \
**(1/4)
print(str(self.avgTemp) + " , " + \
str(self.sun.radiation) + \
" , "+ str(self.tick) \
+ " , " + str(1-self.avgAlbedo) \
+ " , " + str(self.emissionTemp-273)
+ " , " + str(self.expectedTemp-273))
self.tick += 1
if self.stop_tick is not 0 and self.tick > self.stop_tick:
self.threadRunning = False
quit() # could be more elegant..
tempTileArr = []
# let the tiles update their temps
for i in range(self.SIZE_X):
tempTileArr.extend(self.worldTiles[i])
random.shuffle(tempTileArr)
for t in tempTileArr:
t.update(self.sun.radiation,self.emissionTemp-273)
deltaTempTiles = [[0 for x in range(self.SIZE_X)] \
for y in range(self.SIZE_Y)]
# let adjacent tiles affect one another
for i in range(self.SIZE_X):
for j in range(self.SIZE_Y):
deltaTempTiles[i][j] += self.worldTiles[i][j].temp - \
self.worldTiles[i-1][j].temp
deltaTempTiles[i][j] += self.worldTiles[i][j].temp - \
self.worldTiles[i][j-1].temp
deltaTempTiles[i][j] += self.worldTiles[i][j].temp - \
self.worldTiles[(i+1)%self.SIZE_X][j].temp
deltaTempTiles[i][j] += self.worldTiles[i][j].temp - \
self.worldTiles[i][(j+1)%self.SIZE_Y].temp
deltaTempTiles[i][j] /= 4 # TODO: fix factor...
deltaTempTiles[i][j] *= 0.2
for i in range(self.SIZE_X):
for j in range(self.SIZE_Y):
self.worldTiles[i][j].temp -= deltaTempTiles[i][j]
self.worldLock.release()
self.sun.update()
self.updateOptionsUI()
if self.threadRunning is True:
threading.Timer(self.tick_time,self.update).start()
def draw(self, qp):
size = self.size()
incX = size.width()/self.SIZE_X
incY = size.height()/self.SIZE_Y
# let the tiles draw themselves
self.worldLock.acquire()
for i in range(self.SIZE_X):
for j in range(self.SIZE_Y):
self.worldTiles[i][j].draw(qp,i*incX,j*incY,
incX+1,incY+1)
self.worldLock.release()
def paintEvent(self, e):
qp = QPainter()
qp.begin(self)
self.draw(qp)
qp.end()
super().update()
def getOptionsWidget(self):
layout = QVBoxLayout()
layout.addWidget(QLabel("<b>World Options<\b>"))
layout.addWidget(self.avgTempLabel)
layout.addWidget(self.enableInvasiveBlack)
layout.addWidget(QLabel("Invasive black optimal temp"))
layout.addWidget(self.invasiveBlackTemp)
layout.addWidget(self.enableInvasiveWhite)
layout.addWidget(QLabel("Invasive white optimal temp"))
layout.addWidget(self.invasiveWhiteTemp)
layout.addWidget(self.resetButton)
container = QWidget()
container.setLayout(layout)
return container
def getTile(self,coords):
return self.worldTiles[coords[0]%self.SIZE_X][coords[1]%self.SIZE_Y]
``` |
{
"source": "jh27539/osf.io",
"score": 2
} |
#### File: api_tests/registries_moderation/test_submissions.py
```python
import pytest
import datetime
from api.base.settings.defaults import API_BASE
from api.providers.workflows import Workflows
from osf.utils.workflows import RequestTypes, RegistrationModerationTriggers, RegistrationModerationStates
from osf_tests.factories import (
AuthUserFactory,
RegistrationFactory,
RegistrationProviderFactory,
NodeRequestFactory,
EmbargoFactory,
RetractionFactory,
)
from tests.base import get_default_metaschema
from osf.models import NodeRequest
from osf.migrations import update_provider_auth_groups
@pytest.mark.django_db
class TestRegistriesModerationSubmissions:
@pytest.fixture()
def moderator(self):
return AuthUserFactory()
@pytest.fixture()
def moderator_wrong_provider(self):
user = AuthUserFactory()
provider = RegistrationProviderFactory()
update_provider_auth_groups()
provider.schemas.add(get_default_metaschema())
provider.get_group('moderator').user_set.add(user)
provider.save()
return user
@pytest.fixture()
def provider(self, moderator):
provider = RegistrationProviderFactory()
update_provider_auth_groups()
provider.schemas.add(get_default_metaschema())
provider.get_group('moderator').user_set.add(moderator)
provider.reviews_workflow = Workflows.PRE_MODERATION.value
provider.save()
return provider
@pytest.fixture()
def admin(self, provider):
user = AuthUserFactory()
provider.get_group('admin').user_set.add(user)
provider.save()
return user
@pytest.fixture()
def registration_with_withdraw_request(self, provider):
registration = RegistrationFactory(provider=provider)
NodeRequest.objects.create(
request_type=RequestTypes.WITHDRAWAL.value,
target=registration,
creator=registration.creator
)
return registration
@pytest.fixture()
def access_request(self, provider):
request = NodeRequestFactory(request_type=RequestTypes.ACCESS.value)
request.target.provider = provider
request.target.save()
return request
@pytest.fixture()
def reg_creator(self):
return AuthUserFactory()
@pytest.fixture()
def registration(self, provider, reg_creator):
return RegistrationFactory(provider=provider, creator=reg_creator)
@pytest.fixture()
def embargo_registration(self, provider, reg_creator):
one_month_from_now = datetime.datetime.now() + datetime.timedelta(days=30)
embargo = EmbargoFactory(end_date=one_month_from_now, user=reg_creator)
registration = embargo.target_registration
registration.provider = provider
registration.update_moderation_state()
registration.save()
return registration
@pytest.fixture()
def retract_registration(self, provider, reg_creator):
retract = RetractionFactory(user=reg_creator)
registration = retract.target_registration
registration.provider = provider
registration.update_moderation_state()
registration.save()
return registration
@pytest.fixture()
def provider_requests_url(self, provider):
return f'/{API_BASE}providers/registrations/{provider._id}/requests/'
@pytest.fixture()
def registration_requests_url(self, registration_with_withdraw_request):
return f'/{API_BASE}registrations/{registration_with_withdraw_request._id}/requests/'
@pytest.fixture()
def registrations_url(self, provider):
return f'/{API_BASE}providers/registrations/{provider._id}/registrations/'
@pytest.fixture()
def registration_detail_url(self, registration):
return f'/{API_BASE}registrations/{registration._id}/'
@pytest.fixture()
def registration_log_url(self, registration):
return f'/{API_BASE}registrations/{registration._id}/logs/'
@pytest.fixture()
def provider_actions_url(self, provider):
return f'/{API_BASE}providers/registrations/{provider._id}/actions/'
@pytest.fixture()
def registration_actions_url(self, registration):
return f'/{API_BASE}registrations/{registration._id}/actions/'
@pytest.fixture()
def embargo_registration_actions_url(self, embargo_registration):
return f'/{API_BASE}registrations/{embargo_registration._id}/actions/'
@pytest.fixture()
def retract_registration_actions_url(self, retract_registration):
return f'/{API_BASE}registrations/{retract_registration._id}/actions/'
@pytest.fixture()
def actions_payload_base(self):
payload = {
'data': {
'attributes': {
},
'relationships': {
'target': {
'data': {
'type': 'registrations'
}
}
},
'type': 'registration-actions'
}
}
return payload
def test_get_provider_requests(self, app, provider_requests_url, registration_with_withdraw_request, access_request, moderator, moderator_wrong_provider):
resp = app.get(provider_requests_url, expect_errors=True)
assert resp.status_code == 401
resp = app.get(provider_requests_url, auth=moderator_wrong_provider.auth, expect_errors=True)
assert resp.status_code == 403
resp = app.get(provider_requests_url, auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 2
resp = app.get(f'{provider_requests_url}?filter[request_type]=withdrawal', auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 1
assert resp.json['data'][0]['relationships']['target']['data']['id'] == registration_with_withdraw_request._id
def test_get_registration_requests(self, app, registration_requests_url, registration_with_withdraw_request, access_request, moderator, moderator_wrong_provider):
resp = app.get(registration_requests_url, expect_errors=True)
assert resp.status_code == 401
resp = app.get(registration_requests_url, auth=moderator_wrong_provider.auth, expect_errors=True)
assert resp.status_code == 403
resp = app.get(registration_requests_url, auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 1
resp = app.get(f'{registration_requests_url}?filter[request_type]=withdrawal', auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 1
assert resp.json['data'][0]['relationships']['target']['data']['id'] == registration_with_withdraw_request._id
def test_get_registrations(self, app, registrations_url, registration, moderator, moderator_wrong_provider):
resp = app.get(registrations_url, expect_errors=True)
assert resp.status_code == 401
resp = app.get(registrations_url, auth=moderator_wrong_provider.auth, expect_errors=True)
assert resp.status_code == 403
resp = app.get(registrations_url, auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 1
assert resp.json['data'][0]['id'] == registration._id
assert resp.json['data'][0]['attributes']['reviews_state'] == RegistrationModerationStates.INITIAL.db_name
assert resp.json['data'][0]['relationships']['requests']
assert resp.json['data'][0]['relationships']['review_actions']
def test_get_registrations_reviews_state_filter(self, app, registrations_url, registration, moderator):
resp = app.get(f'{registrations_url}?filter[reviews_state]=initial', auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 1
assert resp.json['data'][0]['id'] == registration._id
resp = app.get(f'{registrations_url}?filter[reviews_state]=accepted', auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 0
# RegistrationFactory auto-approves the initial RegistrationApproval
registration.update_moderation_state()
resp = app.get(f'{registrations_url}?filter[reviews_state]=accepted&meta[reviews_state_counts]=true', auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 1
assert resp.json['data'][0]['id'] == registration._id
assert resp.json['data'][0]['attributes']['reviews_state'] == RegistrationModerationStates.ACCEPTED.db_name
assert resp.json['meta']['reviews_state_counts']['accepted'] == 1
@pytest.mark.enable_quickfiles_creation
def test_get_registration_actions(self, app, registration_actions_url, registration, moderator):
resp = app.get(registration_actions_url, expect_errors=True)
assert resp.status_code == 401
resp = app.get(registration_actions_url, auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 0
registration.is_public = True
retraction = registration.retract_registration(
user=registration.creator, justification='because')
retraction.approve(
user=registration.creator,
token=retraction.token_for_user(registration.creator, 'approval')
)
registration.save()
resp = app.get(registration_actions_url, auth=moderator.auth)
assert len(resp.json['data']) == 1
assert resp.json['data'][0]['attributes']['trigger'] == RegistrationModerationTriggers.REQUEST_WITHDRAWAL.db_name
assert resp.json['data'][0]['relationships']['creator']['data']['id'] == registration.creator._id
@pytest.mark.enable_quickfiles_creation
def test_get_provider_actions(self, app, provider_actions_url, registration, moderator):
resp = app.get(provider_actions_url, expect_errors=True)
assert resp.status_code == 401
resp = app.get(provider_actions_url, auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 0
registration.require_approval(user=registration.creator)
approval = registration.registration_approval
approval.approve(
user=registration.creator,
token=approval.token_for_user(registration.creator, 'approval')
)
resp = app.get(provider_actions_url, auth=moderator.auth)
assert len(resp.json['data']) == 1
assert resp.json['data'][0]['attributes']['trigger'] == RegistrationModerationTriggers.SUBMIT.db_name
assert resp.json['data'][0]['relationships']['creator']['data']['id'] == registration.creator._id
def test_registries_moderation_permission(self, app, registration_detail_url, registration, moderator, moderator_wrong_provider):
# Moderators should be able to view registration details once the registration is pending
registration.moderation_state = RegistrationModerationStates.PENDING.db_name
registration.save()
resp = app.get(registration_detail_url, expect_errors=True)
assert resp.status_code == 401
resp = app.get(registration_detail_url, auth=moderator_wrong_provider.auth, expect_errors=True)
assert resp.status_code == 403
resp = app.get(registration_detail_url, auth=moderator.auth)
assert resp.status_code == 200
def test_registries_moderation_permission_log(self, app, registration_log_url, registration, moderator, moderator_wrong_provider):
# Moderators should be able to view registration logs once the registration is pending
registration.moderation_state = RegistrationModerationStates.PENDING.db_name
registration.save()
resp = app.get(registration_log_url, expect_errors=True)
assert resp.status_code == 401
resp = app.get(registration_log_url, auth=moderator_wrong_provider.auth, expect_errors=True)
assert resp.status_code == 403
resp = app.get(registration_log_url, auth=moderator.auth)
assert resp.status_code == 200
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_accept(self, app, registration, moderator, registration_actions_url, actions_payload_base, reg_creator):
registration.require_approval(user=registration.creator)
registration.registration_approval.accept()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Best registration Ive ever seen'
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_reject_moderator(self, app, registration, reg_creator, moderator, registration_actions_url, actions_payload_base):
registration.require_approval(user=registration.creator)
registration.registration_approval.accept()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.REJECT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Worst registration Ive ever seen'
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.REJECT_SUBMISSION.db_name
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.REJECTED.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_embargo(self, app, embargo_registration, moderator, provider, embargo_registration_actions_url, actions_payload_base, reg_creator):
assert embargo_registration.moderation_state == RegistrationModerationStates.INITIAL.db_name
embargo_registration.sanction.accept()
embargo_registration.refresh_from_db()
assert embargo_registration.moderation_state == RegistrationModerationStates.PENDING.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Looks good! (Embargo)'
actions_payload_base['data']['relationships']['target']['data']['id'] = embargo_registration._id
resp = app.post_json_api(embargo_registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
embargo_registration.refresh_from_db()
assert embargo_registration.moderation_state == RegistrationModerationStates.EMBARGO.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_embargo_reject(self, app, embargo_registration, moderator, provider, embargo_registration_actions_url, actions_payload_base, reg_creator):
assert embargo_registration.moderation_state == RegistrationModerationStates.INITIAL.db_name
embargo_registration.sanction.accept()
embargo_registration.refresh_from_db()
assert embargo_registration.moderation_state == RegistrationModerationStates.PENDING.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.REJECT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Looks good! (Embargo)'
actions_payload_base['data']['relationships']['target']['data']['id'] = embargo_registration._id
resp = app.post_json_api(embargo_registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.REJECT_SUBMISSION.db_name
embargo_registration.refresh_from_db()
assert embargo_registration.moderation_state == RegistrationModerationStates.REJECTED.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_withdraw_accept(self, app, retract_registration, moderator, retract_registration_actions_url, actions_payload_base, provider):
retract_registration.sanction.accept()
retract_registration.refresh_from_db()
assert retract_registration.moderation_state == RegistrationModerationStates.PENDING_WITHDRAW.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_WITHDRAWAL.db_name
actions_payload_base['data']['attributes']['comment'] = 'Bye bye'
actions_payload_base['data']['relationships']['target']['data']['id'] = retract_registration._id
resp = app.post_json_api(retract_registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.ACCEPT_WITHDRAWAL.db_name
retract_registration.refresh_from_db()
assert retract_registration.moderation_state == RegistrationModerationStates.WITHDRAWN.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_withdraw_reject(self, app, retract_registration, moderator, retract_registration_actions_url, actions_payload_base, provider):
retract_registration.sanction.accept()
retract_registration.refresh_from_db()
assert retract_registration.moderation_state == RegistrationModerationStates.PENDING_WITHDRAW.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.REJECT_WITHDRAWAL.db_name
actions_payload_base['data']['attributes']['comment'] = 'Bye bye'
actions_payload_base['data']['relationships']['target']['data']['id'] = retract_registration._id
resp = app.post_json_api(retract_registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.REJECT_WITHDRAWAL.db_name
retract_registration.refresh_from_db()
assert retract_registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_force_withdraw(self, app, registration, moderator, registration_actions_url, actions_payload_base, provider, reg_creator):
registration.require_approval(user=registration.creator)
registration.registration_approval.accept()
registration.registration_approval.accept(user=moderator) # Gotta make it Accepted
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.FORCE_WITHDRAW.db_name
actions_payload_base['data']['attributes']['comment'] = 'Bye bye'
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.FORCE_WITHDRAW.db_name
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.WITHDRAWN.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_accept_errors(self, app, registration, moderator, registration_actions_url, actions_payload_base, reg_creator):
registration.require_approval(user=registration.creator)
#Moderator can't submit
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Submitting registration'
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth, expect_errors=True)
assert resp.status_code == 403
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.INITIAL.db_name
registration.registration_approval.accept()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING.db_name
#Admin contributor can't approve
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Best registration Ive ever seen'
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=reg_creator.auth, expect_errors=True)
assert resp.status_code == 403
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_withdraw_admin_cant_accept(self, app, retract_registration, reg_creator, retract_registration_actions_url, actions_payload_base, provider):
retract_registration.sanction.accept()
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_WITHDRAWAL.db_name
actions_payload_base['data']['attributes']['comment'] = 'Bye bye'
actions_payload_base['data']['relationships']['target']['data']['id'] = retract_registration._id
resp = app.post_json_api(retract_registration_actions_url, actions_payload_base, auth=reg_creator.auth, expect_errors=True)
assert resp.status_code == 403
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_embargo_admin_cant_accept(self, app, embargo_registration, provider, embargo_registration_actions_url, actions_payload_base, reg_creator):
embargo_registration.require_approval(user=embargo_registration.creator)
embargo_registration.registration_approval.accept()
embargo_registration.refresh_from_db()
assert embargo_registration.moderation_state == RegistrationModerationStates.INITIAL.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Looks good! (Embargo)'
actions_payload_base['data']['relationships']['target']['data']['id'] = embargo_registration._id
resp = app.post_json_api(embargo_registration_actions_url, actions_payload_base, auth=reg_creator.auth, expect_errors=True)
assert resp.status_code == 403
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_admin_cant_force_withdraw(self, app, registration, moderator, registration_actions_url, actions_payload_base, provider, reg_creator):
registration.require_approval(user=registration.creator)
registration.registration_approval.accept()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Best registration Ive ever seen'
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.FORCE_WITHDRAW.db_name
actions_payload_base['data']['attributes']['comment'] = 'Bye bye'
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=reg_creator.auth, expect_errors=True)
assert resp.status_code == 403
@pytest.mark.parametrize(
'moderator_trigger',
[
RegistrationModerationTriggers.ACCEPT_SUBMISSION,
RegistrationModerationTriggers.REJECT_SUBMISSION
]
)
@pytest.mark.enable_quickfiles_creation
def test_post_submission_action_persists_comment(self, app, registration, moderator, registration_actions_url, actions_payload_base, moderator_trigger):
assert registration.actions.count() == 0
registration.require_approval(user=registration.creator)
registration.registration_approval.accept()
moderator_comment = 'inane comment'
actions_payload_base['data']['attributes']['trigger'] = moderator_trigger.db_name
actions_payload_base['data']['attributes']['comment'] = moderator_comment
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.json['data']['attributes']['comment'] == moderator_comment
persisted_action = registration.actions.get(trigger=moderator_trigger.db_name)
assert persisted_action.comment == moderator_comment
@pytest.mark.parametrize(
'moderator_trigger',
[
RegistrationModerationTriggers.ACCEPT_WITHDRAWAL,
RegistrationModerationTriggers.REJECT_WITHDRAWAL,
]
)
@pytest.mark.enable_quickfiles_creation
def test_post_withdrawal_action_persists_comment(self, app, registration, moderator, registration_actions_url, actions_payload_base, moderator_trigger):
assert registration.actions.count() == 0
registration.is_public = True
registration.retract_registration(user=registration.creator)
registration.retraction.accept()
moderator_comment = 'inane comment'
actions_payload_base['data']['attributes']['trigger'] = moderator_trigger.db_name
actions_payload_base['data']['attributes']['comment'] = moderator_comment
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.json['data']['attributes']['comment'] == moderator_comment
persisted_action = registration.actions.get(trigger=moderator_trigger.db_name)
assert persisted_action.comment == moderator_comment
@pytest.mark.enable_quickfiles_creation
def test_post_force_withdraw_action_persists_comment(self, app, registration, moderator, registration_actions_url, actions_payload_base):
assert registration.actions.count() == 0
registration.is_public = True
registration.update_moderation_state() # implicit ACCEPTED state from RegistrationFactory
moderator_comment = 'inane comment'
force_withdraw_trigger = RegistrationModerationTriggers.FORCE_WITHDRAW.db_name
actions_payload_base['data']['attributes']['trigger'] = force_withdraw_trigger
actions_payload_base['data']['attributes']['comment'] = moderator_comment
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth)
expected_comment = 'Force withdrawn by moderator: ' + moderator_comment
assert resp.json['data']['attributes']['comment'] == expected_comment
persisted_action = registration.actions.get(trigger=force_withdraw_trigger)
assert persisted_action.comment == expected_comment
```
#### File: osf/models/registrations.py
```python
import logging
import datetime
import html
from future.moves.urllib.parse import urljoin
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from guardian.models import (
GroupObjectPermissionBase,
UserObjectPermissionBase,
)
from dirtyfields import DirtyFieldsMixin
from framework.auth import Auth
from framework.exceptions import PermissionsError
from osf.utils.fields import NonNaiveDateTimeField
from osf.utils.permissions import ADMIN, READ, WRITE
from osf.exceptions import NodeStateError, DraftRegistrationStateError
from website.util import api_v2_url
from website import settings
from website.archiver import ARCHIVER_INITIATED
from osf.metrics import RegistriesModerationMetrics
from osf.models import (
Embargo,
EmbargoTerminationApproval,
DraftRegistrationApproval,
DraftRegistrationContributor,
Node,
OSFUser,
RegistrationApproval,
RegistrationSchema,
Retraction,
)
from osf.models.action import RegistrationAction
from osf.models.archive import ArchiveJob
from osf.models.base import BaseModel, ObjectIDMixin
from osf.models.draft_node import DraftNode
from osf.models.node import AbstractNode
from osf.models.mixins import (
EditableFieldsMixin,
Loggable,
GuardianMixin,
)
from osf.models.nodelog import NodeLog
from osf.models.provider import RegistrationProvider
from osf.models.mixins import RegistrationResponseMixin
from osf.models.tag import Tag
from osf.models.validators import validate_title
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.utils.workflows import (
RegistrationModerationStates,
RegistrationModerationTriggers,
SanctionStates,
SanctionTypes
)
import osf.utils.notifications as notify
logger = logging.getLogger(__name__)
class Registration(AbstractNode):
WRITABLE_WHITELIST = [
'article_doi',
'description',
'is_public',
'node_license',
'category',
]
provider = models.ForeignKey(
'RegistrationProvider',
related_name='registrations',
null=True,
on_delete=models.SET_NULL
)
registered_date = NonNaiveDateTimeField(db_index=True, null=True, blank=True)
# This is a NullBooleanField because of inheritance issues with using a BooleanField
# TODO: Update to BooleanField(default=False, null=True) when Django is updated to >=2.1
external_registration = models.NullBooleanField(default=False)
registered_user = models.ForeignKey(OSFUser,
related_name='related_to',
on_delete=models.SET_NULL,
null=True, blank=True)
# TODO: Consider making this a FK, as there can be one per Registration
registered_schema = models.ManyToManyField(RegistrationSchema)
registered_meta = DateTimeAwareJSONField(default=dict, blank=True)
registered_from = models.ForeignKey('self',
related_name='registrations',
on_delete=models.SET_NULL,
null=True, blank=True)
# Sanctions
registration_approval = models.ForeignKey('RegistrationApproval',
related_name='registrations',
null=True, blank=True,
on_delete=models.SET_NULL)
retraction = models.ForeignKey('Retraction',
related_name='registrations',
null=True, blank=True,
on_delete=models.SET_NULL)
embargo = models.ForeignKey('Embargo',
related_name='registrations',
null=True, blank=True,
on_delete=models.SET_NULL)
embargo_termination_approval = models.ForeignKey('EmbargoTerminationApproval',
related_name='registrations',
null=True, blank=True,
on_delete=models.SET_NULL)
files_count = models.PositiveIntegerField(blank=True, null=True)
moderation_state = models.CharField(
max_length=30,
choices=RegistrationModerationStates.char_field_choices(),
default=RegistrationModerationStates.INITIAL.db_name
)
@staticmethod
def find_failed_registrations():
expired_if_before = timezone.now() - settings.ARCHIVE_TIMEOUT_TIMEDELTA
node_id_list = ArchiveJob.objects.filter(sent=False, datetime_initiated__lt=expired_if_before, status=ARCHIVER_INITIATED).values_list('dst_node', flat=True)
root_nodes_id = AbstractNode.objects.filter(id__in=node_id_list).values_list('root', flat=True).distinct()
stuck_regs = AbstractNode.objects.filter(id__in=root_nodes_id, is_deleted=False)
return stuck_regs
@property
def registration_schema(self):
# For use in RegistrationResponseMixin
if self.registered_schema.exists():
return self.registered_schema.first()
return None
def get_registration_metadata(self, schema):
# Overrides RegistrationResponseMixin
registered_meta = self.registered_meta or {}
return registered_meta.get(schema._id, None)
@property
def file_storage_resource(self):
# Overrides RegistrationResponseMixin
return self.registered_from
@property
def registered_schema_id(self):
schema = self.registration_schema
return schema._id if schema else None
@property
def is_registration(self):
"""For v1 compat."""
return True
@property
def is_stuck_registration(self):
return self in self.find_failed_registrations()
@property
def is_collection(self):
"""For v1 compat."""
return False
@property
def archive_job(self):
return self.archive_jobs.first() if self.archive_jobs.count() else None
@property
def sanction(self):
root = self._dirty_root
sanction = (
root.retraction or
root.embargo_termination_approval or
root.embargo or
root.registration_approval
)
if sanction:
return sanction
else:
return None
@property
def is_registration_approved(self):
root = self._dirty_root
if root.registration_approval is None:
return False
return root.registration_approval.is_approved
@property
def is_pending_embargo(self):
root = self._dirty_root
if root.embargo is None:
return False
return root.embargo.is_pending_approval
@property
def is_pending_embargo_for_existing_registration(self):
""" Returns True if Node has an Embargo pending approval for an
existing registrations. This is used specifically to ensure
registrations pre-dating the Embargo feature do not get deleted if
their respective Embargo request is rejected.
"""
root = self._dirty_root
if root.embargo is None:
return False
return root.embargo.pending_registration
@property
def is_retracted(self):
root = self._dirty_root
if root.retraction is None:
return False
return root.retraction.is_approved
@property
def is_pending_registration(self):
root = self._dirty_root
if root.registration_approval is None:
return False
return root.registration_approval.is_pending_approval
@property
def is_pending_retraction(self):
root = self._dirty_root
if root.retraction is None:
return False
return root.retraction.is_pending_approval
@property
def is_pending_embargo_termination(self):
root = self._dirty_root
if root.embargo_termination_approval is None:
return False
return root.embargo_termination_approval.is_pending_approval
@property
def is_embargoed(self):
"""A Node is embargoed if:
- it has an associated Embargo record
- that record has been approved
- the node is not public (embargo not yet lifted)
"""
root = self._dirty_root
if root.is_public or root.embargo is None:
return False
return root.embargo.is_approved
@property
def embargo_end_date(self):
root = self._dirty_root
if root.embargo is None:
return False
return root.embargo.embargo_end_date
@property
def archiving(self):
job = self.archive_job
return job and not job.done and not job.archive_tree_finished()
@property
def is_moderated(self):
if not self.provider:
return False
return self.provider.is_reviewed
@property
def _dirty_root(self):
"""Equivalent to `self.root`, but don't let Django fetch a clean copy
when `self == self.root`. Use when it's important to reflect unsaved
state rather than database state.
"""
if self.id == self.root_id:
return self
return self.root
def date_withdrawn(self):
return getattr(self.root.retraction, 'date_retracted', None)
@property
def withdrawal_justification(self):
return getattr(self.root.retraction, 'justification', None)
def can_view(self, auth):
if super().can_view(auth):
return True
if not auth or not auth.user or not self.is_moderated:
return False
moderator_viewable_states = {
RegistrationModerationStates.PENDING.db_name,
RegistrationModerationStates.PENDING_WITHDRAW.db_name,
RegistrationModerationStates.EMBARGO.db_name,
RegistrationModerationStates.PENDING_EMBARGO_TERMINATION.db_name,
}
user_is_moderator = auth.user.has_perm('view_submissions', self.provider)
if self.moderation_state in moderator_viewable_states and user_is_moderator:
return True
return False
def _initiate_approval(self, user, notify_initiator_on_complete=False):
end_date = timezone.now() + settings.REGISTRATION_APPROVAL_TIME
self.registration_approval = RegistrationApproval.objects.create(
initiated_by=user,
end_date=end_date,
notify_initiator_on_complete=notify_initiator_on_complete
)
self.save() # Set foreign field reference Node.registration_approval
admins = self.get_admin_contributors_recursive(unique_users=True)
for (admin, node) in admins:
self.registration_approval.add_authorizer(admin, node=node)
self.registration_approval.save() # Save approval's approval_state
return self.registration_approval
def require_approval(self, user, notify_initiator_on_complete=False):
if not self.is_registration:
raise NodeStateError('Only registrations can require registration approval')
if not self.is_admin_contributor(user):
raise PermissionsError('Only admins can initiate a registration approval')
approval = self._initiate_approval(user, notify_initiator_on_complete)
self.registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_INITIATED,
params={
'node': self.registered_from._id,
'registration': self._id,
'registration_approval_id': approval._id,
},
auth=Auth(user),
save=True,
)
self.update_moderation_state()
def _initiate_embargo(self, user, end_date, for_existing_registration=False,
notify_initiator_on_complete=False):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param end_date: Date when the registration should be made public
"""
end_date_midnight = datetime.datetime.combine(
end_date,
datetime.datetime.min.time()
).replace(tzinfo=end_date.tzinfo)
self.embargo = Embargo.objects.create(
initiated_by=user,
end_date=end_date_midnight,
for_existing_registration=for_existing_registration,
notify_initiator_on_complete=notify_initiator_on_complete
)
self.update_moderation_state()
self.save() # Set foreign field reference Node.embargo
admins = self.get_admin_contributors_recursive(unique_users=True)
for (admin, node) in admins:
self.embargo.add_authorizer(admin, node)
self.embargo.save() # Save embargo's approval_state
return self.embargo
def embargo_registration(self, user, end_date, for_existing_registration=False,
notify_initiator_on_complete=False):
"""Enter registration into an embargo period at end of which, it will
be made public
:param user: User initiating the embargo
:param end_date: Date when the registration should be made public
:raises: NodeStateError if Node is not a registration
:raises: PermissionsError if user is not an admin for the Node
:raises: ValidationError if end_date is not within time constraints
"""
if not self.is_admin_contributor(user) and not user.has_perm('accept_submissions', self.provider):
raise PermissionsError('Only admins may embargo a registration')
if not self._is_embargo_date_valid(end_date):
if (end_date - timezone.now()) >= settings.EMBARGO_END_DATE_MIN:
raise ValidationError('Registrations can only be embargoed for up to four years.')
raise ValidationError('Embargo end date must be at least three days in the future.')
self.embargo = self._initiate_embargo(user, end_date,
for_existing_registration=for_existing_registration,
notify_initiator_on_complete=notify_initiator_on_complete)
self.registered_from.add_log(
action=NodeLog.EMBARGO_INITIATED,
params={
'node': self.registered_from._id,
'registration': self._id,
'embargo_id': self.embargo._id,
},
auth=Auth(user),
save=True,
)
if self.is_public:
self.set_privacy('private', Auth(user))
def request_embargo_termination(self, user):
"""Initiates an EmbargoTerminationApproval to lift this Embargoed Registration's
embargo early."""
if not self.is_embargoed:
raise NodeStateError('This node is not under active embargo')
if not self.root == self:
raise NodeStateError('Only the root of an embargoed registration can request termination')
approval = EmbargoTerminationApproval(
initiated_by=user,
embargoed_registration=self,
)
admins = [admin for admin in self.root.get_admin_contributors_recursive(unique_users=True)]
for (admin, node) in admins:
approval.add_authorizer(admin, node=node)
approval.save()
approval.ask(admins)
self.embargo_termination_approval = approval
self.update_moderation_state()
self.save()
return approval
def terminate_embargo(self, forced=False):
"""Handles the completion of an Embargoed registration.
Adds a log to the registered_from Node.
:param bool forced: False if the embargo is expiring,
True if the embargo is being terminated early
"""
if not self.is_embargoed:
raise NodeStateError('This node is not under active embargo')
action = NodeLog.EMBARGO_COMPLETED if not forced else NodeLog.EMBARGO_TERMINATED
self.registered_from.add_log(
action=action,
params={
'project': self._id,
'node': self.registered_from._id,
'registration': self._id,
},
auth=None,
save=True
)
self.embargo.mark_as_completed()
for node in self.node_and_primary_descendants():
node.set_privacy(
self.PUBLIC,
auth=None,
log=False,
save=True
)
return True
def get_contributor_registration_response_keys(self):
"""
Returns the keys of the supplemental responses whose answers
contain author information
:returns QuerySet
"""
return self.registration_schema.schema_blocks.filter(
block_type='contributors-input', registration_response_key__isnull=False,
).values_list('registration_response_key', flat=True)
def copy_registered_meta_and_registration_responses(self, draft, save=True):
"""
Sets the registration's registered_meta and registration_responses from the draft.
If contributor information is in a question, build an accurate bibliographic
contributors list on the registration
"""
if not self.registered_meta:
self.registered_meta = {}
registration_metadata = draft.registration_metadata
registration_responses = draft.registration_responses
bibliographic_contributors = ', '.join(
draft.branched_from.visible_contributors.values_list('fullname', flat=True)
)
contributor_keys = self.get_contributor_registration_response_keys()
for key in contributor_keys:
if key in registration_metadata:
registration_metadata[key]['value'] = bibliographic_contributors
if key in registration_responses:
registration_responses[key] = bibliographic_contributors
self.registered_meta[self.registration_schema._id] = registration_metadata
self.registration_responses = registration_responses
if save:
self.save()
def _initiate_retraction(self, user, justification=None, moderator_initiated=False):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param justification: Justification, if given, for retraction
"""
self.retraction = Retraction.objects.create(
initiated_by=user,
justification=justification or None, # make empty strings None
state=Retraction.UNAPPROVED,
)
self.save()
if not moderator_initiated:
admins = self.get_admin_contributors_recursive(unique_users=True)
for (admin, node) in admins:
self.retraction.add_authorizer(admin, node)
self.retraction.save() # Save retraction approval state
return self.retraction
def retract_registration(self, user, justification=None, save=True, moderator_initiated=False):
"""Retract public registration. Instantiate new Retraction object
and associate it with the respective registration.
"""
if not self.is_public and not (self.embargo_end_date or self.is_pending_embargo):
raise NodeStateError('Only public or embargoed registrations may be withdrawn.')
if self.root_id != self.id:
raise NodeStateError('Withdrawal of non-parent registrations is not permitted.')
if moderator_initiated:
justification = 'Force withdrawn by moderator: ' + justification
if not self.is_moderated:
raise ValueError('Forced retraction is only supported for moderated registrations.')
if not user.has_perm('withdraw_submissions', self.provider):
raise PermissionsError(
f'User {user} does not have moderator privileges on Provider {self.provider}')
retraction = self._initiate_retraction(
user, justification, moderator_initiated=moderator_initiated)
self.retraction = retraction
self.registered_from.add_log(
action=NodeLog.RETRACTION_INITIATED,
params={
'node': self.registered_from._id,
'registration': self._id,
'retraction_id': retraction._id,
},
auth=Auth(user),
)
# Automatically accept moderator_initiated retractions
if moderator_initiated:
self.retraction.approval_stage = SanctionStates.PENDING_MODERATION
self.retraction.accept(user=user, comment=justification)
self.refresh_from_db() # grab updated state
if save:
self.update_moderation_state()
self.save()
return retraction
def delete_registration_tree(self, save=False):
logger.debug('Marking registration {} as deleted'.format(self._id))
self.is_deleted = True
self.deleted = timezone.now()
for draft_registration in DraftRegistration.objects.filter(registered_node=self):
# Allow draft registration to be submitted
if draft_registration.approval:
draft_registration.approval = None
draft_registration.save()
if not getattr(self.embargo, 'for_existing_registration', False):
self.registered_from = None
if save:
self.save()
self.update_search()
for child in self.nodes_primary:
child.delete_registration_tree(save=save)
def update_files_count(self):
# Updates registration files_count at archival success or
# at the end of forced (manual) archive for restarted (stuck or failed) registrations.
field = AbstractNode._meta.get_field('modified')
field.auto_now = False
self.files_count = self.files.filter(deleted_on__isnull=True).count()
self.save()
field.auto_now = True
def update_moderation_state(self, initiated_by=None, comment=''):
'''Derive the RegistrationModerationState from the state of the active sanction.
:param models.User initiated_by: The user who initiated the state change;
used in reporting actions.
:param str comment: Any comment moderator comment associated with the state change;
used in reporting Actions.
'''
from_state = RegistrationModerationStates.from_db_name(self.moderation_state)
active_sanction = self.sanction
if active_sanction is None: # Registration is ACCEPTED if there are no active sanctions.
to_state = RegistrationModerationStates.ACCEPTED
else:
to_state = RegistrationModerationStates.from_sanction(active_sanction)
if to_state is RegistrationModerationStates.UNDEFINED:
# An UNDEFINED state is expected from a rejected retraction.
# In other cases, report the error.
if active_sanction.SANCTION_TYPE is not SanctionTypes.RETRACTION:
logger.warning(
'Could not update moderation state from unsupported sanction/state '
'combination {sanction}.{state}'.format(
sanction=active_sanction.SANCTION_TYPE,
state=active_sanction.approval_stage.name)
)
# Use other underlying sanctions to compute the state
if self.embargo:
to_state = RegistrationModerationStates.from_sanction(self.embargo)
elif self.registration_approval:
to_state = RegistrationModerationStates.from_sanction(self.registration_approval)
else:
to_state = RegistrationModerationStates.ACCEPTED
self._write_registration_action(from_state, to_state, initiated_by, comment)
self.moderation_state = to_state.db_name
self.save()
def _write_registration_action(self, from_state, to_state, initiated_by, comment):
'''Write a new RegistrationAction on relevant state transitions.'''
trigger = RegistrationModerationTriggers.from_transition(from_state, to_state)
if trigger is None:
return # Not a moderated event, no need to write an action
initiated_by = initiated_by or self.sanction.initiated_by
if not comment and trigger is RegistrationModerationTriggers.REQUEST_WITHDRAWAL:
comment = self.withdrawal_justification or '' # Withdrawal justification is null by default
action = RegistrationAction.objects.create(
target=self,
creator=initiated_by,
trigger=trigger.db_name,
from_state=from_state.db_name,
to_state=to_state.db_name,
comment=comment
)
action.save()
RegistriesModerationMetrics.record_transitions(action)
moderation_notifications = {
RegistrationModerationTriggers.SUBMIT: notify.notify_submit,
RegistrationModerationTriggers.ACCEPT_SUBMISSION: notify.notify_accept_reject,
RegistrationModerationTriggers.REJECT_SUBMISSION: notify.notify_accept_reject,
RegistrationModerationTriggers.REQUEST_WITHDRAWAL: notify.notify_moderator_registration_requests_withdrawal,
RegistrationModerationTriggers.REJECT_WITHDRAWAL: notify.notify_reject_withdraw_request,
RegistrationModerationTriggers.ACCEPT_WITHDRAWAL: notify.notify_withdraw_registration,
RegistrationModerationTriggers.FORCE_WITHDRAW: notify.notify_withdraw_registration,
}
notification = moderation_notifications.get(trigger)
if notification:
notification(
resource=self,
user=initiated_by,
action=action,
states=RegistrationModerationStates
)
def add_tag(self, tag, auth=None, save=True, log=True, system=False):
if self.retraction is None:
super(Registration, self).add_tag(tag, auth, save, log, system)
else:
raise NodeStateError('Cannot add tags to withdrawn registrations.')
def add_tags(self, tags, auth=None, save=True, log=True, system=False):
if self.retraction is None:
super(Registration, self).add_tags(tags, auth, save, log, system)
else:
raise NodeStateError('Cannot add tags to withdrawn registrations.')
def remove_tag(self, tag, auth, save=True):
if self.retraction is None:
super(Registration, self).remove_tag(tag, auth, save)
else:
raise NodeStateError('Cannot remove tags of withdrawn registrations.')
def remove_tags(self, tags, auth, save=True):
if self.retraction is None:
super(Registration, self).remove_tags(tags, auth, save)
else:
raise NodeStateError('Cannot remove tags of withdrawn registrations.')
class Meta:
# custom permissions for use in the OSF Admin App
permissions = (
('view_registration', 'Can view registration details'),
)
class DraftRegistrationLog(ObjectIDMixin, BaseModel):
""" Simple log to show status changes for DraftRegistrations
Also, editable fields on registrations are logged.
field - _id - primary key
field - date - date of the action took place
field - action - simple action to track what happened
field - user - user who did the action
"""
date = NonNaiveDateTimeField(default=timezone.now)
action = models.CharField(max_length=255)
draft = models.ForeignKey('DraftRegistration', related_name='logs',
null=True, blank=True, on_delete=models.CASCADE)
user = models.ForeignKey('OSFUser', db_index=True, null=True, blank=True, on_delete=models.CASCADE)
params = DateTimeAwareJSONField(default=dict)
SUBMITTED = 'submitted'
REGISTERED = 'registered'
APPROVED = 'approved'
REJECTED = 'rejected'
EDITED_TITLE = 'edit_title'
EDITED_DESCRIPTION = 'edit_description'
CATEGORY_UPDATED = 'category_updated'
CONTRIB_ADDED = 'contributor_added'
CONTRIB_REMOVED = 'contributor_removed'
CONTRIB_REORDERED = 'contributors_reordered'
PERMISSIONS_UPDATED = 'permissions_updated'
MADE_CONTRIBUTOR_VISIBLE = 'made_contributor_visible'
MADE_CONTRIBUTOR_INVISIBLE = 'made_contributor_invisible'
AFFILIATED_INSTITUTION_ADDED = 'affiliated_institution_added'
AFFILIATED_INSTITUTION_REMOVED = 'affiliated_institution_removed'
CHANGED_LICENSE = 'license_changed'
TAG_ADDED = 'tag_added'
TAG_REMOVED = 'tag_removed'
def __repr__(self):
return ('<DraftRegistrationLog({self.action!r}, date={self.date!r}), '
'user={self.user!r} '
'with id {self._id!r}>').format(self=self)
class Meta:
ordering = ['-created']
get_latest_by = 'created'
def get_default_id():
from django.apps import apps
RegistrationProvider = apps.get_model('osf', 'RegistrationProvider')
return RegistrationProvider.get_default().id
class DraftRegistration(ObjectIDMixin, RegistrationResponseMixin, DirtyFieldsMixin,
BaseModel, Loggable, EditableFieldsMixin, GuardianMixin):
# Fields that are writable by DraftRegistration.update
WRITABLE_WHITELIST = [
'title',
'description',
'category',
'node_license',
]
URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/drafts/{draft_id}'
# Overrides EditableFieldsMixin to make title not required
title = models.TextField(validators=[validate_title], blank=True, default='')
_contributors = models.ManyToManyField(OSFUser,
through=DraftRegistrationContributor,
related_name='draft_registrations')
affiliated_institutions = models.ManyToManyField('Institution', related_name='draft_registrations')
node_license = models.ForeignKey('NodeLicenseRecord', related_name='draft_registrations',
on_delete=models.SET_NULL, null=True, blank=True)
datetime_initiated = NonNaiveDateTimeField(auto_now_add=True)
datetime_updated = NonNaiveDateTimeField(auto_now=True)
deleted = NonNaiveDateTimeField(null=True, blank=True)
# Original Node a draft registration is associated with
branched_from = models.ForeignKey('AbstractNode', related_name='registered_draft',
null=True, on_delete=models.CASCADE)
initiator = models.ForeignKey('OSFUser', null=True, on_delete=models.CASCADE)
provider = models.ForeignKey(
'RegistrationProvider',
related_name='draft_registrations',
null=False,
on_delete=models.CASCADE,
default=get_default_id,
)
# Dictionary field mapping question id to a question's comments and answer
# {
# <qid>: {
# 'comments': [{
# 'user': {
# 'id': <uid>,
# 'name': <name>
# },
# value: <value>,
# lastModified: <datetime>
# }],
# 'value': <value>
# }
# }
registration_metadata = DateTimeAwareJSONField(default=dict, blank=True)
registration_schema = models.ForeignKey('RegistrationSchema', null=True, on_delete=models.CASCADE)
registered_node = models.ForeignKey('Registration', null=True, blank=True,
related_name='draft_registration', on_delete=models.CASCADE)
approval = models.ForeignKey('DraftRegistrationApproval', null=True, blank=True, on_delete=models.CASCADE)
# Dictionary field mapping extra fields defined in the RegistrationSchema.schema to their
# values. Defaults should be provided in the schema (e.g. 'paymentSent': false),
# and these values are added to the DraftRegistration
# TODO: Use "FIELD_ALIASES"?
_metaschema_flags = DateTimeAwareJSONField(default=dict, blank=True)
notes = models.TextField(blank=True)
# For ContributorMixin
guardian_object_type = 'draft_registration'
READ_DRAFT_REGISTRATION = 'read_{}'.format(guardian_object_type)
WRITE_DRAFT_REGISTRATION = 'write_{}'.format(guardian_object_type)
ADMIN_DRAFT_REGISTRATION = 'admin_{}'.format(guardian_object_type)
# For ContributorMixin
base_perms = [READ_DRAFT_REGISTRATION, WRITE_DRAFT_REGISTRATION, ADMIN_DRAFT_REGISTRATION]
groups = {
'read': (READ_DRAFT_REGISTRATION,),
'write': (READ_DRAFT_REGISTRATION, WRITE_DRAFT_REGISTRATION,),
'admin': (READ_DRAFT_REGISTRATION, WRITE_DRAFT_REGISTRATION, ADMIN_DRAFT_REGISTRATION,)
}
group_format = 'draft_registration_{self.id}_{group}'
class Meta:
permissions = (
('read_draft_registration', 'Can read the draft registration'),
('write_draft_registration', 'Can edit the draft registration'),
('admin_draft_registration', 'Can manage the draft registration'),
)
def __repr__(self):
return ('<DraftRegistration(branched_from={self.branched_from!r}) '
'with id {self._id!r}>').format(self=self)
def get_registration_metadata(self, schema):
# Overrides RegistrationResponseMixin
return self.registration_metadata
@property
def file_storage_resource(self):
# Overrides RegistrationResponseMixin
return self.branched_from
# lazily set flags
@property
def flags(self):
if not self._metaschema_flags:
self._metaschema_flags = {}
meta_schema = self.registration_schema
if meta_schema:
schema = meta_schema.schema
flags = schema.get('flags', {})
dirty = False
for flag, value in flags.items():
if flag not in self._metaschema_flags:
self._metaschema_flags[flag] = value
dirty = True
if dirty:
self.save()
return self._metaschema_flags
@flags.setter
def flags(self, flags):
self._metaschema_flags.update(flags)
@property
def branched_from_type(self):
if isinstance(self.branched_from, (DraftNode, Node)):
return self.branched_from.__class__.__name__
else:
raise DraftRegistrationStateError
@property
def url(self):
return self.URL_TEMPLATE.format(
node_id=self.branched_from._id,
draft_id=self._id
)
@property
def _primary_key(self):
return self._id
@property
def absolute_url(self):
return urljoin(settings.DOMAIN, self.url)
@property
def absolute_api_v2_url(self):
# Old draft registration URL - user new endpoints, through draft registration
node = self.branched_from
branched_type = self.branched_from_type
if branched_type == 'DraftNode':
path = '/draft_registrations/{}/'.format(self._id)
elif branched_type == 'Node':
path = '/nodes/{}/draft_registrations/{}/'.format(node._id, self._id)
return api_v2_url(path)
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def requires_approval(self):
return self.registration_schema.requires_approval
@property
def is_pending_review(self):
return self.approval.is_pending_approval if (self.requires_approval and self.approval) else False
@property
def is_approved(self):
if self.requires_approval:
if not self.approval:
return bool(self.registered_node)
else:
return self.approval.is_approved
else:
return False
@property
def is_rejected(self):
if self.requires_approval:
if not self.approval:
return False
else:
return self.approval.is_rejected
else:
return False
@property
def status_logs(self):
""" List of logs associated with this node"""
return self.logs.all().order_by('date')
@property
def log_class(self):
# Override for EditableFieldsMixin
return DraftRegistrationLog
@property
def state_error(self):
# Override for ContributorMixin
return DraftRegistrationStateError
@property
def contributor_class(self):
# Override for ContributorMixin
return DraftRegistrationContributor
def get_contributor_order(self):
# Method needed for ContributorMixin
return self.get_draftregistrationcontributor_order()
def set_contributor_order(self, contributor_ids):
# Method needed for ContributorMixin
return self.set_draftregistrationcontributor_order(contributor_ids)
@property
def contributor_kwargs(self):
# Override for ContributorMixin
return {'draft_registration': self}
@property
def contributor_set(self):
# Override for ContributorMixin
return self.draftregistrationcontributor_set
@property
def order_by_contributor_field(self):
# Property needed for ContributorMixin
return 'draftregistrationcontributor___order'
@property
def admin_contributor_or_group_member_ids(self):
# Overrides ContributorMixin
# Draft Registrations don't have parents or group members at the moment, so this is just admin group member ids
# Called when removing project subscriptions
return self.get_group(ADMIN).user_set.filter(is_active=True).values_list('guids___id', flat=True)
@property
def creator(self):
# Convenience property for testing contributor methods, which are
# shared with other items that have creators
return self.initiator
@property
def is_public(self):
# Convenience property for sharing code with nodes
return False
@property
def log_params(self):
# Override for EditableFieldsMixin
return {
'draft_registration': self._id,
}
@property
def visible_contributors(self):
# Override for ContributorMixin
return OSFUser.objects.filter(
draftregistrationcontributor__draft_registration=self,
draftregistrationcontributor__visible=True
).order_by(self.order_by_contributor_field)
@property
def contributor_email_template(self):
# Override for ContributorMixin
return 'draft_registration'
@property
def institutions_url(self):
# For NodeInstitutionsRelationshipSerializer
path = '/draft_registrations/{}/institutions/'.format(self._id)
return api_v2_url(path)
@property
def institutions_relationship_url(self):
# For NodeInstitutionsRelationshipSerializer
path = '/draft_registrations/{}/relationships/institutions/'.format(self._id)
return api_v2_url(path)
def update_search(self):
# Override for AffiliatedInstitutionMixin, not sending DraftRegs to search
pass
def can_view(self, auth):
"""Does the user have permission to view the draft registration?
Checking permissions directly on the draft, not the node.
"""
if not auth:
return False
return auth.user and self.has_permission(auth.user, READ)
def can_edit(self, auth=None, user=None):
"""Return if a user is authorized to edit this draft_registration.
Must specify one of (`auth`, `user`).
:param Auth auth: Auth object to check
:param User user: User object to check
:returns: Whether user has permission to edit this draft_registration.
"""
if not auth and not user:
raise ValueError('Must pass either `auth` or `user`')
if auth and user:
raise ValueError('Cannot pass both `auth` and `user`')
user = user or auth.user
return (user and self.has_permission(user, WRITE))
def get_addons(self):
# Override for ContributorMixin, Draft Registrations don't have addons
return []
# Override Taggable
def add_tag_log(self, tag, auth):
self.add_log(
action=DraftRegistrationLog.TAG_ADDED,
params={
'draft_registration': self._id,
'tag': tag.name
},
auth=auth,
save=False
)
@property
def license(self):
if self.node_license_id:
return self.node_license
return None
@property
def all_tags(self):
"""Return a queryset containing all of this draft's tags (incl. system tags)."""
# Tag's default manager only returns non-system tags, so we can't use self.tags
return Tag.all_tags.filter(draftregistration_tagged=self)
@property
def system_tags(self):
"""The system tags associated with this draft registration. This currently returns a list of string
names for the tags, for compatibility with v1. Eventually, we can just return the
QuerySet.
"""
return self.all_tags.filter(system=True).values_list('name', flat=True)
@classmethod
def create_from_node(cls, user, schema, node=None, data=None, provider=None):
if not provider:
provider = RegistrationProvider.get_default()
if provider.is_default:
# If the default provider doesn't have schemas specified yet, allow all schemas
if provider.schemas.exists():
provider.validate_schema(schema)
else:
provider.validate_schema(schema)
if not node:
# If no node provided, a DraftNode is created for you
node = DraftNode.objects.create(creator=user, title='Untitled')
if not (isinstance(node, Node) or isinstance(node, DraftNode)):
raise DraftRegistrationStateError()
draft = cls(
initiator=user,
branched_from=node,
registration_schema=schema,
registration_metadata=data or {},
provider=provider,
)
draft.save()
draft.copy_editable_fields(node, Auth(user), save=True, contributors=False)
draft.update(data)
return draft
def get_root(self):
return self
def copy_contributors_from(self, resource):
"""
Copies the contibutors from the resource (including permissions and visibility)
into this draft registration.
Visibility, order, draft, and user are stored in DraftRegistrationContributor table.
Permissions are stored in guardian tables (use add_permission)
"""
contribs = []
current_contributors = self.contributor_set.values_list('user_id', flat=True)
for contrib in resource.contributor_set.all():
if contrib.user.id not in current_contributors:
permission = contrib.permission
new_contrib = DraftRegistrationContributor(
draft_registration=self,
_order=contrib._order,
visible=contrib.visible,
user=contrib.user
)
contribs.append(new_contrib)
self.add_permission(contrib.user, permission, save=True)
DraftRegistrationContributor.objects.bulk_create(contribs)
def update_metadata(self, metadata):
changes = []
# Prevent comments on approved drafts
if not self.is_approved:
for question_id, value in metadata.items():
old_value = self.registration_metadata.get(question_id)
if old_value:
old_comments = {
comment['created']: comment
for comment in old_value.get('comments', [])
}
new_comments = {
comment['created']: comment
for comment in value.get('comments', [])
}
old_comments.update(new_comments)
metadata[question_id]['comments'] = sorted(
old_comments.values(),
key=lambda c: c['created']
)
if old_value.get('value') != value.get('value'):
changes.append(question_id)
else:
changes.append(question_id)
self.registration_metadata.update(metadata)
# Write to registration_responses also (new workflow)
registration_responses = self.flatten_registration_metadata()
self.registration_responses.update(registration_responses)
return changes
def update_registration_responses(self, registration_responses):
"""
New workflow - update_registration_responses. This should have been
validated before this method is called. If writing to registration_responses
field, persist the expanded version of this to Draft.registration_metadata.
"""
registration_responses = self.unescape_registration_file_names(registration_responses)
self.registration_responses.update(registration_responses)
registration_metadata = self.expand_registration_responses()
self.registration_metadata = registration_metadata
return
def unescape_registration_file_names(self, registration_responses):
if registration_responses.get('uploader', []):
for upload in registration_responses.get('uploader', []):
upload['file_name'] = html.unescape(upload['file_name'])
return registration_responses
def submit_for_review(self, initiated_by, meta, save=False):
approval = DraftRegistrationApproval(
meta=meta
)
approval.save()
self.approval = approval
self.add_status_log(initiated_by, DraftRegistrationLog.SUBMITTED)
if save:
self.save()
def register(self, auth, save=False, child_ids=None):
node = self.branched_from
if not self.title:
raise NodeStateError('Draft Registration must have title to be registered')
# Create the registration
registration = node.register_node(
schema=self.registration_schema,
auth=auth,
draft_registration=self,
child_ids=child_ids,
provider=self.provider
)
self.registered_node = registration
self.add_status_log(auth.user, DraftRegistrationLog.REGISTERED)
self.copy_contributors_from(node)
if save:
self.save()
registration.save()
return registration
def approve(self, user):
self.approval.approve(user)
self.refresh_from_db()
self.add_status_log(user, DraftRegistrationLog.APPROVED)
self.approval.save()
def reject(self, user):
self.approval.reject(user)
self.add_status_log(user, DraftRegistrationLog.REJECTED)
self.approval.save()
def add_status_log(self, user, action):
params = {
'draft_registration': self._id,
},
log = DraftRegistrationLog(action=action, user=user, draft=self, params=params)
log.save()
def validate_metadata(self, *args, **kwargs):
"""
Validates draft's metadata
"""
return self.registration_schema.validate_metadata(*args, **kwargs)
def validate_registration_responses(self, *args, **kwargs):
"""
Validates draft's registration_responses
"""
return self.registration_schema.validate_registration_responses(*args, **kwargs)
def add_log(self, action, params, auth, save=True):
"""
Tentative - probably need to combine with add_status_log
"""
user = auth.user if auth else None
params['draft_registration'] = params.get('draft_registration') or self._id
log = DraftRegistrationLog(
action=action, user=user,
params=params, draft=self
)
log.save()
return log
# Overrides ContributorMixin
def _add_related_source_tags(self, contributor):
# The related source tag behavior for draft registration is currently undefined
# Therefore we don't add any source tags to it
pass
def save(self, *args, **kwargs):
if 'old_subjects' in kwargs.keys():
kwargs.pop('old_subjects')
return super(DraftRegistration, self).save(*args, **kwargs)
def update(self, fields, auth=None, save=True):
"""Update the draft registration with the given fields.
:param dict fields: Dictionary of field_name:value pairs.
:param Auth auth: Auth object for the user making the update.
:param bool save: Whether to save after updating the object.
"""
if not fields: # Bail out early if there are no fields to update
return False
for key, value in fields.items():
if key not in self.WRITABLE_WHITELIST:
continue
if key == 'title':
self.set_title(title=value, auth=auth, save=False, allow_blank=True)
elif key == 'description':
self.set_description(description=value, auth=auth, save=False)
elif key == 'category':
self.set_category(category=value, auth=auth, save=False)
elif key == 'node_license':
self.set_node_license(
{
'id': value.get('id'),
'year': value.get('year'),
'copyrightHolders': value.get('copyrightHolders') or value.get('copyright_holders', [])
},
auth,
save=save
)
if save:
updated = self.get_dirty_fields()
self.save()
return updated
class DraftRegistrationUserObjectPermission(UserObjectPermissionBase):
"""
Direct Foreign Key Table for guardian - User models - we typically add object
perms directly to Django groups instead of users, so this will be used infrequently
"""
content_object = models.ForeignKey(DraftRegistration, on_delete=models.CASCADE)
class DraftRegistrationGroupObjectPermission(GroupObjectPermissionBase):
"""
Direct Foreign Key Table for guardian - Group models. Makes permission checks faster.
This table gives a Django group a particular permission to a DraftRegistration.
For example, every time a draft reg is created, an admin, write, and read Django group
are created for the draft reg. The "write" group has write/read perms to the draft reg.
Those links are stored here: content_object_id (draft_registration_id), group_id, permission_id
"""
content_object = models.ForeignKey(DraftRegistration, on_delete=models.CASCADE)
@receiver(post_save, sender='osf.DraftRegistration')
def create_django_groups_for_draft_registration(sender, instance, created, **kwargs):
if created:
instance.update_group_permissions()
initiator = instance.initiator
if instance.branched_from.contributor_set.filter(user=initiator).exists():
initiator_node_contributor = instance.branched_from.contributor_set.get(user=initiator)
initiator_visibility = initiator_node_contributor.visible
initiator_order = initiator_node_contributor._order
DraftRegistrationContributor.objects.get_or_create(
user=initiator,
draft_registration=instance,
visible=initiator_visibility,
_order=initiator_order
)
else:
DraftRegistrationContributor.objects.get_or_create(
user=initiator,
draft_registration=instance,
visible=True,
)
instance.add_permission(initiator, ADMIN)
``` |
{
"source": "jh3ex/cirp",
"score": 3
} |
#### File: envs/production_system/ProductionGraph.py
```python
import numpy as np
import pandas as pd
from Buffer import Buffer
from Product import Product
from Machine import Machine
class ProductionGraph:
def __init__(self, adj_list, machines, buffers, incoming_buffer, completed_buffer):
"""
Construct a graph for production system
Parameters
----------
adj_list : list like
Adjacency list.
buffer_cap : list like or array
Buffer capacity for each machine, null for end-of-line machine
stepsize : float
Stepsize for simulation
Returns
-------
None.
"""
self.adj_list = adj_list
self.n_machine = len(machines)
# Generate all matrices
self.__matrices()
self.machines = machines
self.buffers = buffers
self.incoming_buffer = incoming_buffer
self.completed_buffer = completed_buffer
def __matrices(self):
"""
Create adjacency matrix
Returns
-------
adj_matrix: Adjacency matrix
deg_matrix: Degree matrix
"""
self.adj_matrix = np.zeros((self.n_machine, self.n_machine), dtype=int)
self.deg_matrix = np.zeros((self.n_machine, self.n_machine), dtype=int)
for i in range(self.n_machine):
self.adj_matrix[i, self.adj_list[i]] = 1
self.deg_matrix[i, i] = len(self.adj_list[i])
return
def adjacency_matrix(self):
return self.adj_matrix
def degree_matrix(self):
return self.deg_matrix
def initialize(self, sim_duration, stepsize, random_seed):
"""
Set the initial conditions
Parameters
----------
sim_duration : float
Simulation duration.
random_seed : int
random seed for simulation.
Returns
-------
None.
"""
# Random seed for simulation
self.RD = np.random.RandomState(seed=random_seed)
# Simulation time horizon
self.sim_duration = sim_duration
self.stepsize = stepsize
# Total steps given stepsize
self.total_step = int(self.sim_duration / self.stepsize)
# Initialize everthing
for m in self.machines:
m.initialize(self.RD)
for b in self.buffers:
b.initialize()
self.incoming_buffer.initialize()
self.completed_buffer.initialize()
self.time = 0
self.terminate = False
def run(self, parameters):
"""
Run the system for one time step (stepsize)
Returns
-------
None.
"""
assert not self.terminate, "The simulation is terminated"
self.time += self.stepsize
output_before, yield_before = self.completed_buffer.output_and_yield()
parameter_request = [None] * self.n_machine
for i, m in enumerate(self.machines):
# Iterates over all machines
status, product = m.quote()
if status == "processing":
m.processing(self.stepsize)
elif status == "to release":
for b in m.buffer_down:
if b.put(product, self.time):
m.release()
break
elif status == "to load":
for b in m.buffer_up:
product = b.take()
if product is not None:
existing_feature = m.load(product)
parameter_request[i] = existing_feature
break
elif status == "awaiting parameter":
m.set_process_parameter(parameters[i])
output_after, yield_after = self.completed_buffer.output_and_yield()
output_step = output_after - output_before
yield_step = yield_after - yield_before
self.terminate = (self.time >= self.sim_duration)
return parameter_request, output_step, yield_step, self.terminate
def get_node_feature(self, i):
return self.machines[i].get_node_feature()
def get_yield(self):
return self.completed_buffer.output_and_yield()
if __name__ == "__main__":
pass
```
#### File: envs/production_system/production.py
```python
from envs.multiagentenv import MultiAgentEnv
from envs.production_system.Buffer import Buffer
from envs.production_system.GrindingRF import GrindingRF
from envs.production_system.GrindingCB import GrindingCB
from envs.production_system.Product import Product
from envs.production_system.IncomingBuffer import IncomingBuffer
import numpy as np
class production_discrete(MultiAgentEnv):
#class production_discrete():
def __init__(self, **env_args):
"""
Env parameters
"""
self.args = env_args
# self.adj = self.args["adj"]
self.stepsize = self.args["stepsize"]
self.episode_limit = self.args["episode_limit"]
self.sim_duration = self.args["sim_duration"]
# Reward setting
self.yield_reward = self.args["yield_reward"]
self.defect_reward = self.args["defect_reward"]
self.template_product = Product(n_feature=self.args["n_feature"],
n_process=self.args["n_stage"],
index=0)
self._build_buffers()
self._build_machines()
self._build_actions()
self.n_agents = len(self.machines)
# self.seed = self.args["seed"]
# self.RD = np.random.RandomState(seed=self.seed)
def _build_actions(self):
v_ops = self.args["actions"]["v"]
w_ops = self.args["actions"]["w"]
a_ops = self.args["actions"]["a"]
self.n_actions = len(v_ops) * len(w_ops) * len(a_ops) + 1
self.action_to_param = [[0, 0, 0]]
for v in v_ops:
for w in w_ops:
for a in a_ops:
self.action_to_param.append([v, w, a])
return
def _build_buffers(self):
# Build buffers
self.buffers = {}
self.buffers["incoming_buffer"] = IncomingBuffer(self.template_product, 1/self.args["obs_scale"].get("b_up", 1.0))
self.buffers["completed_buffer"] = GrindingCB(self.args["q_star"], 1/self.args["obs_scale"].get("b_down", 1.0))
for b in self.args["buffers"]:
self.buffers[b] = Buffer(self.args["buffers"][b])
return
def _build_machines(self):
# Build machines
self.machines = []
for idx, name in enumerate(self.args["machines"]):
m = self.args["machines"][name]
self.machines.append(GrindingRF(p1=m["p1"],
p2=m["p2"],
p3=m["p3"],
p4=m["p4"],
p5=m["p5"] * self.args["p5_scale"],
features=None,
stage=m["stage"],
buffer_up=[self.buffers[x] for x in m["buffer_up"]],
buffer_down=[self.buffers[x] for x in m["buffer_down"]],
MTTR=m["MTTR"],
MTBF=m["MTBF"],
n_product_feature=self.args["n_feature"],
name=name))
return
def _action_check(self, actions):
avail_actions = self.get_avail_actions()
for idx, a in enumerate(actions):
assert avail_actions[idx][a] == 1, "At time [{}], agent {} is given an infeasible action {}".format(self.time, idx, a)
def _last_action(self, actions):
if self.args["obs_last_action"]:
if self.args["last_action_one_hot"]:
self.last_action = [[0] * self.n_actions] * self.n_agents
for idx, a in enumerate(actions):
self.last_action[idx][a] = 1
else:
self.last_action = []
for a in actions:
self.last_action.append(self.action_to_param[a])
def step(self, actions):
""" Returns reward, terminated, info """
# raise NotImplementedError
# Get the output and yield before this step
# self._action_check(actions)
self.steps += 1
output_before, yield_before = self.buffers["completed_buffer"].output_and_yield()
self._last_action(actions)
decision_time = False
while not decision_time:
self.time += self.stepsize
for idx, m in enumerate(self.machines):
# Iterate over all machines
# Quote machine current product and status
status, product = m.quote(self.stepsize)
if status == "processing":
m.processing(self.stepsize)
elif status == "to release":
for b in m.buffer_down:
if b.put(product, self.time):
m.release()
break
elif status == "to load":
for b in m.buffer_up:
product = b.take()
if product is not None:
m.load(product)
# parameter_request[idx] = existing_feature
decision_time = True
break
elif status == "awaiting parameter":
parameters = self.action_to_param[actions[idx]]
m.set_process_parameter(parameters)
self.output, self.yields = self.buffers["completed_buffer"].output_and_yield()
output_step = self.output - output_before
yield_step = self.yields - yield_before
defect_step = output_step - yield_step
reward = self.yield_reward * yield_step + self.defect_reward * defect_step
if self.args["reward_scale"]:
reward *= self.args["reward_scale_rate"]
# self.episode_return += reward
terminated = (self.steps >= self.episode_limit) or (self.time > self.sim_duration)
# terminated = (self.time >= self.args["sim_duration"])
info = {}
if terminated:
info["output"] = self.output
info["yield"] = self.yields
info["duration"] = self.time
info["yield_rate"] = self.yields/self.time
# info["reward"] = self.episode_return
return reward, terminated, info
def get_obs(self):
""" Returns all agent observations in a list """
# raise NotImplementedError
obs_all = []
for agent_id in range(self.n_agents):
obs_all.append(self.get_obs_agent(agent_id))
return obs_all
def get_obs_agent(self, agent_id):
""" Returns observation for agent_id """
# raise NotImplementedError
obs = []
node_feature, need_decision = self.machines[agent_id].get_node_feature()
stage_one_hot = [0] * self.args["n_stage"]
stage_one_hot[node_feature["stage"]] = 1
node_feature["stage"] = stage_one_hot
for key, value in node_feature.items():
scale = self.args["obs_scale"].get(key, 1)
if isinstance(value, list):
obs += [v * scale for v in value]
else:
obs.append(value * scale)
if need_decision:
obs.append(1)
else:
obs.append(0)
if self.args["obs_last_action"]:
if self.args["last_action_one_hot"]:
obs += self.last_action[agent_id]
else:
scale = self.args["obs_scale"].get("actions", [1, 1, 1])
for idx, action in enumerate(self.last_action[agent_id]):
obs.append(action * scale[idx])
if self.args["obs_agent_id"]:
obs.append(agent_id / self.n_agents)
return np.array(obs, dtype=np.float32)
def get_obs_size(self):
""" Returns the shape of the observation """
# raise NotImplementedError
# Features from machine
size = self.machines[0].get_feature_size() + self.args["n_stage"] - 1
# Stage is one-hot after processing
# Features
size += self.template_product.n_feature
size += 1 # Include decision or not
if self.args["obs_last_action"]:
if self.args["last_action_one_hot"]:
size += self.n_actions
else:
size += len(self.action_to_param[0])
if self.args["obs_agent_id"]:
size += 1
return size
def get_state(self):
# raise NotImplementedError
if self.args["obs_instead_of_state"]:
obs_all = self.get_obs()
state = [item for sublist in obs_all for item in sublist]
return state
else:
return NotImplementedError
def get_state_size(self):
""" Returns the shape of the state"""
# raise NotImplementedError
if self.args["obs_instead_of_state"]:
return len(self.machines) * self.get_obs_size()
else:
return NotImplementedError
def get_avail_actions(self):
# raise NotImplementedError
avail_actions = [self.get_avail_agent_actions(agent_id) for agent_id in range(self.n_agents)]
return avail_actions
def get_avail_agent_actions(self, agent_id):
""" Returns the available actions for agent_id """
# raise NotImplementedError
obs, need_decision = self.machines[agent_id].get_node_feature()
# assert not need_decision, "At time {}, agent {} needs decision".format(self.time, agent_id)
if need_decision:
avail_agent_actions = [0] + [1] * (self.n_actions - 1)
else:
avail_agent_actions = [1] + [0] * (self.n_actions - 1)
return avail_agent_actions
def get_total_actions(self):
""" Returns the total number of actions an agent could ever take """
# TODO: This is only suitable for a discrete 1 dimensional action space for each agent
# raise NotImplementedError
return self.n_actions
def reset(self, seed=None):
""" Returns initial observations and states"""
# raise NotImplementedError
# Random seed for simulation
if seed:
np.random.seed(seed)
# Simulation time horizon
self.stepsize = self.args["stepsize"]
self.steps = 0
self.time = 0.0
# self.episode_return = 0.0
self.output, self.yields = 0, 0
if self.args["obs_last_action"]:
if self.args["last_action_one_hot"]:
self.last_action = [[0] * self.n_actions] * self.n_agents
else:
self.last_action = [self.action_to_param[0]] * self.n_agents
# Initialize everthing
for m in self.machines:
m.initialize()
for b in self.buffers:
self.buffers[b].initialize()
# self.incoming_buffer.initialize()
# self.completed_buffer.initialize()
def get_stats(self):
# Add per warning during running
self.stats = {"output": self.output,
"yield": self.yields,
"duration": self.time,
"yield_rate": self.yields/self.time}
return self.stats
def render(self):
raise NotImplementedError
def close(self):
# raise NotImplementedError
pass
def seed(self):
raise NotImplementedError
def save_replay(self):
raise NotImplementedError
def get_env_info(self):
env_info = {"state_shape": self.get_state_size(),
"obs_shape": self.get_obs_size(),
"n_actions": self.get_total_actions(),
"n_agents": self.n_agents,
"episode_limit": self.episode_limit}
return env_info
if __name__ == "__main__":
from types import SimpleNamespace as SN
import yaml
import numpy as np
with open('D:/OneDrive/Script/graph/pymarl_adaptive_graph/src/config/envs/production.yaml', 'r') as f:
_config = yaml.load(f)
env_config = _config['env_args']
env = production_discrete(**env_config)
env.reset()
terminated = False
while not terminated:
rand_action = np.random.rand(env.n_agents, env.n_actions)
print('At time [{}], available actions are {}'.format(env.time, env.get_avail_actions()))
print("state is {}".format(env.get_state()))
logits = rand_action * np.array(env.get_avail_actions())
p = logits / np.sum(logits, axis=1, keepdims=True)
rand_action = np.argmax(p, axis=1)
r, terminated, info = env.step(rand_action)
print(r)
print(info)
print(env.get_env_info())
```
#### File: envs/smart_man_sim/core_2.py
```python
import time
import numpy as np
import random
import math
from absl import logging
from .scenes import get_map_params
"""
Machines only have two actions in this batch of experiments: run, maintain
"""
class Transition():
"""
machine health states transition rules:
-There are 4 health states: pre-mature, mature, slightly damaged, severely damaged
"""
def __init__(self, tran_matrix, length=40, schedule=False):
self._init_matrix(tran_matrix)
self.schedule = schedule
self.length = length
if schedule:
self.init = np.array([[[1., 0, 0, 0],
[0, 1., 0, 0],
[0, 0, 1., 0],
[0, 0, 0, 1.]],
[[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]]])
self.decay = (self.init - self.T) / length
def _init_matrix(self, tran_matrix):
"""
T = [[[], [], [], []],
[[], [], [], []],
[[], [], [], []]]
"""
self.T = tran_matrix
assert len(self.T.shape) == 3 and self.T.shape[0] == 2 and self.T.shape[1] == 4 and self.T.shape[2] == 4
def transit(self, init_state, action, steps=None):
if not self.schedule:
T_a = self.T[action]
p = T_a[init_state]
next_state = np.random.choice(4, 1, p=p)[0]
return next_state
else:
if steps[init_state] > self.length:
steps = self.length
T = self.init - self.decay * steps
T_a = T[action]
p = T_a[init_state] + 1e-9
p /= np.sum(p)
next_state = np.random.choice(4, 1, p=p)[0]
return next_state
ACTION = {0: 1000,
1: 0,
2: (12, 3)}
class Machine():
def __init__(self, id, T, cell_id, config):
self.T = T
self.id = id
self.cell_id = cell_id
self.init_h_state()
self.under_m = False
self.deque = 0
self.anti_jobs = 0
self.action = None
self.m_cd = 0
self.ACTION = config['actions']
self.COST = config['costs']
self.h_history = np.zeros(4)
self.restart = 0
self.breakdown = False
def step(self, action):
if action == 0:
assert self.h_state != 3
self.action = action
n = self.ACTION[self.action]
self.request_parts(n)
self.state_time[self.h_state] += 1
elif action == 1:
self.action = action
self.request_parts(0)
self.register_m()
self.state_time[self.h_state] += 1
else:
raise ValueError('action [%d] is out of range'%action)
self.h_history[self.h_state] += 1
def init_h_state(self, random=False):
self.h_state = 0
self.state_time = [0] * 4
@property
def health(self):
return self.h_state, self.state_time[self.h_state]
def request_parts(self, n):
self.request_jobs = n
def recieve_parts(self, n):
self.anti_jobs = n
def register_m(self):
self.under_m = True
self.anti_jobs = 0
self.request_jobs = 0
if self.m_cd == 0:
self.m_cd = math.floor(np.random.normal(12, 3))
def proceed(self):
if not self.under_m:
self.deque = self.anti_jobs
self.anti_jobs = 0
self.request_jobs = 0
new_h_state = self.T.transit(self.h_state, self.action, self.state_time)
self.h_state = new_h_state
else:
self.m_cd -= 1
self.deque = self.anti_jobs
if self.m_cd == 0:
self.under_m = False
new_h_state = self.T.transit(self.h_state, self.action, self.state_time)
self.h_state = new_h_state
self.state_time = [0] * 4
self.restart += 1
@property
def valid_actions(self):
if self.h_state == 3 or self.under_m:
valid_actions = [0., 1.]
else:
valid_actions = [1] * 2
return valid_actions
@property
def cost(self):
if self.action == 0 and self.h_state != 3:
cost = self.COST[0]
# print('Agent {} took action {} and incur {} cost'.format(self.id, self.action, cost))
elif self.h_state == 3 and self.state_time[3] == 0:
cost = self.COST[-1]
if self.action == 0:
cost += self.COST[0]
# print('Agent {} took action {}, and breakdown, and incur {} cost'.format(self.id, self.action, cost))
else:
raise ValueError('self.action cannot take {}'.format(self.action))
elif self.action == 1:
cost = self.COST[1]
# print('Agent {} took action {} and incur {} cost'.format(self.id, self.action, cost))
else:
raise ValueError("Our agent going to wrong state, action pair {}{}".format(self.h_state, self.action))
return cost
class Cell():
def __init__(self, id):
self.id = id
self.deque = 0
self.anti_jobs = 0
self.p_cell = None
self.f_cell = None
def add_machines(self, m_list):
self.machines = m_list
def add_cells(self, p_cell=None, f_cell=None):
self.p_cell = p_cell
self.f_cell = f_cell
if self.p_cell:
self.p_cell.f_cell = self
if self.f_cell:
self.f_cell.p_cell = self
def assign_jobs(self):
self.deque = 0
if not self.p_cell:
assert self.anti_jobs >= 0, 'anti_jobs {} should be always greater than 0'.format(self.anti_jobs)
recieve_requests = np.sum(list(map(lambda x: x.request_jobs, self.machines)))
self.anti_jobs += recieve_requests
for machine in self.machines:
machine.recieve_parts(machine.request_jobs)
assert self.anti_jobs >= np.sum(list(map(lambda x: x.anti_jobs, self.machines))), 'anti_jobs is {}, machines in cell {} actually get {}'.format(self.anti_jobs, self.id, np.sum(list(map(lambda x: x.anti_jobs, self.machines))))
if self.f_cell:
self.f_cell.anti_jobs += np.sum(list(map(lambda x: x.anti_jobs, self.machines)))
else:
if self.anti_jobs > 0:
recieve_requests = np.sum(list(map(lambda x: x.request_jobs, self.machines)))
if self.anti_jobs >= recieve_requests:
for machine in self.machines:
machine.recieve_parts(machine.request_jobs)
if self.f_cell:
self.f_cell.anti_jobs += np.sum(list(map(lambda x: x.anti_jobs, self.machines)))
else:
request_jobs = np.array(list(map(lambda x: x.request_jobs, self.machines)), dtype=np.float32)
jobs_pool = np.zeros_like(self.machines, dtype=np.float32)
while np.sum(request_jobs - jobs_pool) > 0:
p = (request_jobs - jobs_pool) / np.sum(request_jobs - jobs_pool)
idx = np.random.choice(len(self.machines), 1, p=p)[0]
jobs_pool[idx] += 1.
for idx, machine in enumerate(self.machines):
machine.recieve_parts(jobs_pool[idx])
if self.f_cell:
self.f_cell.anti_jobs += np.sum(list(map(lambda x: x.anti_jobs, self.machines)))
def proceed(self):
for m in self.machines:
m.proceed()
self.deque = np.sum(list(map(lambda x: x.deque, self.machines)))
self.anti_jobs -= self.deque
# assert self.anti_jobs >= 0, 'anti_jobs is {}, and deques is {}'.format(self.anti_jobs, self.deque)
@property
def buffer_size(self):
return self.anti_jobs
class Simulation():
def __init__(self, map_name):
self._initialize(map_name)
def _initialize(self, map_name):
config = get_map_params(map_name)
cell_ids = config['cells']
machine_ids = config['machines']
transitions = config['transitions']
self.sale_price = config['sale_price']
machine_ids = np.array(machine_ids).reshape([len(cell_ids), -1])
self.machines = []
self.cells = []
for i in range(machine_ids.shape[0]):
cell_id = i
self.cells.append(Cell(cell_id))
transition = transitions[i]
T = Transition(transition, schedule=False)
for j in range(machine_ids.shape[1]):
machine_id = machine_ids[i, j]
self.machines.append(Machine(machine_id, T, cell_id, config))
self.cells[-1].add_machines(self.machines[-machine_ids.shape[1]:])
if i > 0:
p_cell = self.cells[i-1]
self.cells[-1].add_cells(p_cell)
def step(self, actions):
for idx, machine in enumerate(self.machines):
machine.step(actions[idx])
for cell in self.cells:
cell.assign_jobs()
for cell in self.cells:
cell.proceed()
def get_avail_agent_actions(self, agent_id):
return self.machines[agent_id].valid_actions
@property
def products(self):
final_cell = self.cells[-1]
products = final_cell.deque
return products
@property
def profit(self):
products = self.products
cost = np.sum(list(map(lambda x: x.cost, self.machines)))
return products * self.sale_price - cost
def get_buffers_agent(self, agent_id):
total_buffer = np.sum(list(map(lambda x:x.buffer_size, self.cells)))
if total_buffer == 0:
return 0., 0.
agent = self.machines[agent_id]
cell_id = agent.cell_id
front_buffer = self.cells[cell_id].buffer_size
following_buffer = 0
if cell_id + 1 < len(self.cells) -1:
following_buffer = self.cells[cell+1].buffer_size
return front_buffer / total_buffer, following_buffer / total_buffer
def get_cost_agent(self, agent_id):
return self.machines[agent_id].cost
if __name__ == '__main__':
from scenes import get_map_params
import matplotlib.pyplot as plt
import numpy as np
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
def pie_plot(*args, **kw_args):
labels = kw_args['labels']
colors = tableau20[:len(labels)]
sizes = args
def func(pct, allvals):
absolute = pct/100.*np.sum(allvals)
return "{:.1f}%\n({:.1f} unit time)".format(pct, absolute)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, colors=colors, labels=labels, autopct=lambda pct:func(pct, sizes),
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
map_name = 226
sim = Simulation(map_name)
sim.step([0]*6)
num = 0
profit = 0
while num < 128:
actions = []
for i in range(6):
action_p = np.array([1., 0.])
valid_actions = sim.get_avail_agent_actions(i)
# print('valid_action is {}'.format(valid_actions))
action_p = np.array(valid_actions, dtype=np.float32) * (action_p + 1e-9)
p = action_p / np.sum(action_p)
# print('p is {}'.format(p))
action = np.random.choice(2, 1, p=p)[0]
actions.append(action)
sim.step(actions)
print("Actions are {}".format(actions))
print("States are {}".format([machine.h_state for machine in sim.machines]))
num += 1
print(sim.profit)
profit += sim.profit
cells = []
for m in sim.machines:
cells.append(m.h_history/m.restart)
print("States history are {}".format(m.h_history/m.restart))
cells = np.asarray(cells)
print(np.mean(cells[:3], axis=0))
#pie_plot(*list(np.mean(cells[:3], axis=0)),
# **{'labels': ['pre-mature', 'mature', 'slightly-worn', 'severely-worn']})
print(np.mean(cells[3:], axis=0))
#pie_plot(*list(np.mean(cells[3:], axis=0)),
# **{'labels': ['pre-mature', 'mature', 'slightly-worn', 'severely-worn']})
print(profit)
```
#### File: envs/smart_man_sim/core.py
```python
import time
import numpy as np
import random
import math
from absl import logging
from .scenes import get_map_params
from functools import partial
import math
class Transition():
"""
machine health states transition rules:
-There are 4 health states: pre-mature, mature, slightly damaged, severely damaged
"""
def __init__(self, tran_matrix, length=10, exp=False, schedule=False):
self._init_matrix(tran_matrix)
self.schedule = schedule
self.length = length
self.exp = exp
if schedule:
if not self.exp:
self.init = np.ones_like(self.T)
self.init[self.T==0] = 0
self.decay = (self.init - self.T) / length
else:
return NotImplementedError
# self.init = np.ones_like(self.T)
# print(self.T.shape, self.init.shape)
# self.init[self.T==0] = 0
# self.T[self.T == 0] = 1
# self.exp_scaling = (-1) * self.length / np.log(self.T)
def _init_matrix(self, tran_matrix):
"""
T = [[[], [], [], []],
[[], [], [], []],
[[], [], [], []]]
"""
self.T = tran_matrix
def transit(self, init_state, action, steps=None):
if not self.schedule:
T_a = self.T[action]
p = T_a[init_state]
next_state = np.random.choice(4, 1, p=p)[0]
return next_state
else:
if not self.exp:
# if steps[init_state] > self.length:
# steps = self.length
T = self.init - self.decay * np.array(steps)
T_a = T[action]
p = T_a[init_state]
p /= np.sum(p)
next_state = np.random.choice(4, 1, p=p)[0]
# print('The state is {} and the probability of not trans is {}, we trans {}, the action is {}'.format(init_state, p, trans, action))
return next_state
else:
return NotImplementedError
# T = np.exp(- np.array(steps) / self.exp_scaling)
# T_a = T[action]
# p = T_a[init_state]
# if p == 1.:
# p = 0
# else:
# p = max(self.T[action, init_state], p)
# trans = np.random.choice(2, 1, p=[p, 1-p])
# # print('The state is {} and the probability of not trans is {}, we trans {}, the action is {}'.format(init_state, p, trans, action))
# if trans:
# next_state = init_state + 1
# if next_state > 3:
# next_state %= 3
# else:
# next_state = init_state
# return next_state
class Continue_Transition():
def __init__(self, dist, first_params, second_params, lower_bounds):
assert type(dist) == str, "dist must be string"
self.dist_name = dist
self.first_params = first_params
self.second_params = second_params
self.lower_bounds = lower_bounds
if dist == 'log-normal':
self.dist = np.random.lognormal
elif dist == 'exponential':
self.dist = np.random.exponential
elif dist == 'gamma':
self.dist = np.random.gamma
elif dist == 'static':
self.dist = None
else:
raise ValueError("{} is not a predefined distributions, which has to be in [log-normal, exponential, gamma]".format(dist))
def init_trans(self, init_state):
first_param = self.first_params[init_state]
second_param = self.second_params[init_state]
lower_bound = self.lower_bounds[init_state]
if self.dist_name == 'log-normal':
mean = first_param
sigma = second_param
self.end_time = max(lower_bound, math.ceil(self.dist(mean, sigma)))
elif self.dist_name == 'exponential':
offset = first_param
scale = second_param
self.end_time = max(lower_bound, math.ceil(offset + self.dist(scale)))
elif self.dist_name == 'gamma':
shape = first_param
scale = second_param
self.end_time = max(lower_bound, math.ceil(self.dist(shape, scale)))
elif self.dist_name == 'static':
shape = first_param
scale = second_param
self.end_time = first_param
else:
raise ValueError("{} is not a predefined distributions, which has to be in [log-normal, exponential, gamma]".format(self.dist_name))
steps = random.randint(0, self.end_time-1)
self.end_time -= steps
return steps
def transit(self, init_state, action, steps=None):
if steps[init_state] == 0:
raise ValueError("wrong steps!")
if init_state != 3:
if steps[init_state] == 1:
first_param = self.first_params[init_state]
second_param = self.second_params[init_state]
lower_bound = self.lower_bounds[init_state]
if self.dist_name == 'log-normal':
mean = first_param
sigma = second_param
self.end_time = max(lower_bound, math.ceil(self.dist(mean, sigma)))
elif self.dist_name == 'exponential':
offset = first_param
scale = second_param
self.end_time = max(lower_bound, math.ceil(offset + self.dist(scale)))
elif self.dist_name == 'gamma':
shape = first_param
scale = second_param
self.end_time = max(lower_bound, math.ceil(self.dist(shape, scale)))
elif self.dist_name == 'static':
shape = first_param
self.end_time = first_param
else:
raise ValueError("{} is not a predefined distributions, which has to be in [log-normal, exponential, gamma]".format(self.dist_name))
if action != 1:
self.end_time -= 1
else:
self.end_time -= .75
if self.end_time <= 0:
init_state += 1
if init_state > 3:
init_state %= 3
# else:
# if init_state != 3:
# self.end_time -= 1
# if self.end_time == 0:
# init_state += 1
# if init_state > 3:
# init_state %= 3
else:
init_state = 0
return init_state
ACTION = {0: 1000,
1: 0,
2: (12, 3)}
class Machine():
def __init__(self, id, T, cell_id, config, time_base=False):
self.T = T
self.id = id
self.cell_id = cell_id
self.under_m = False
self.deque = 0
self.anti_jobs = 0
self.action = None
self.m_cd = 0
self.ACTION = config['actions']
self.COST = config['costs']
self.restart = 0
self.h_tracker = [[],[],[],[]]
self.init_time_base = time_base
if time_base:
self.counter = self.init_time_base
def reset(self, random_init):
self.under_m = False
self.deque = 0
self.anti_jobs = 0
self.action = None
self.m_cd = 0
self.h_tracker = [[],[],[],[]]
self.random_init = random_init
self.init_h_state(random_init)
if self.init_time_base:
self.counter = self.init_time_base
def step(self, action):
# print("machine {} takes action {} with {} time in health state {}".format(self.id, action, self.state_time[self.h_state], self.h_state))
if action == 0:
if self.init_time_base:
self.counter -= 1
assert self.h_state != 3
self.action = action
n = self.ACTION[self.action]
self.request_parts(n)
self.state_time[self.h_state] += 1
elif action == 1:
if self.init_time_base:
self.counter -= 1
assert self.h_state != 3
self.action = action
n = self.ACTION[self.action]
self.request_parts(n)
self.state_time[self.h_state] += 1
elif action == 2:
# assert self.h_state != 0 and self.h_state != 1
self.action = action
self.request_parts(0)
self.register_m()
self.state_time[self.h_state] += 1
else:
raise ValueError('action [%d] is out of range'%action)
def init_h_state(self, random_init):
if type(self.T) == Continue_Transition:
if random_init:
# print("Machine {} is random inited".format(self.id))
self.h_state = random.randint(0, 3)
steps = self.T.init_trans(self.h_state)
self.state_time = [0] * 4
self.state_time[self.h_state] = steps
if self.h_state == 3:
self.register_m()
steps = random.randint(0, self.m_cd-1)
self.m_cd -= steps
assert self.m_cd > 0
self.state_time = [0] * 4
self.state_time[self.h_state] = steps
else:
self.h_state = 0
self.state_time = [0] * 4
else:
raise ValueError("We currently only support continuous transitions")
@property
def health(self):
return self.h_state, self.state_time[self.h_state]
def request_parts(self, n):
self.request_jobs = n
def recieve_parts(self, n):
self.anti_jobs = n
def register_m(self):
self.under_m = True
self.anti_jobs = 0
self.h_state = 3
self.request_jobs = 0
if self.m_cd == 0:
self.m_cd = max(1, math.floor(np.random.normal(12, 3)))
def proceed(self):
if not self.under_m:
self.deque = self.anti_jobs
self.anti_jobs = 0
self.request_jobs = 0
new_h_state = self.T.transit(self.h_state, self.action, self.state_time)
if new_h_state != self.h_state:
self.h_tracker[self.h_state].append(self.state_time[self.h_state])
self.h_state = new_h_state
else:
self.m_cd -= 1
assert self.m_cd >= 0, 'self.m_cd value is {}'.format(self.m_cd)
self.deque = self.anti_jobs
if self.m_cd == 0:
self.under_m = False
new_h_state = self.T.transit(self.h_state, self.action, self.state_time)
assert new_h_state != self.h_state, 'new state {} should be different from the original state {}'.format(new_h_state, self.h_state)
if new_h_state != self.h_state:
self.h_tracker[self.h_state].append(self.state_time[self.h_state])
self.h_state = new_h_state
if self.init_time_base:
self.counter = self.init_time_base
self.state_time = [0] * 4
self.restart += 1
@property
def valid_actions(self):
if not self.init_time_base:
if self.h_state == 3 or self.under_m:
valid_actions = [0., 0., 1.]
elif self.h_state == 0 or self.h_state == 1:
valid_actions = [1., 1., 1.]
elif self.h_state == 2:
valid_actions = [1., 1., 1.]
else:
raise ValueError("we are in wrong {} state".format(self.h_state))
else:
if self.counter == 0:
valid_actions = [0., 0., 1.]
else:
if self.h_state == 3 or self.under_m:
valid_actions = [0., 0., 1.]
elif self.h_state == 0 or self.h_state == 1:
valid_actions = [1., 1., 0.]
elif self.h_state == 2:
valid_actions = [1., 1., 1.]
else:
raise ValueError("we are in wrong {} state".format(self.h_state))
return valid_actions
@property
def cost(self):
if self.action == 0 and self.h_state != 3:
cost = self.COST[0]
# print('Agent {} took action {} and incur {} cost'.format(self.id, self.action, cost))
elif self.action == 1 and self.h_state != 3:
cost = self.COST[1]
# print('Agent {} took action {} and incur {} cost'.format(self.id, self.action, cost))
elif self.h_state == 3 and self.state_time[3] == 0:
cost = self.COST[-1]
if self.action == 0:
cost += self.COST[0]
# print('Agent {} took action {}, and breakdown, and incur {} cost'.format(self.id, self.action, cost))
elif self.action == 1:
cost += self.COST[1]
# print('Agent {} took action {}, and breakdown, and incur {} cost'.format(self.id, self.action, cost))
else:
raise ValueError('self.action cannot take {}, the current state time is {}, state is {}, m_cd is {}, under_m is {}'.format(self.action, self.state_time, self.h_state, self.m_cd, self.under_m))
elif self.action == 2:
cost = self.COST[2]
# print('Agent {} took action {} and incur {} cost'.format(self.id, self.action, cost))
else:
raise ValueError("Our agent going to wrong state, action pair {}{}".format(self.h_state, self.action))
return cost
# class Machine():
# def __init__(self, id, T, cell_id, config, time_base=False):
# self.T = T
# self.id = id
# self.cell_id = cell_id
# self.under_m = False
# self.deque = 0
# self.anti_jobs = 0
# self.action = None
# self.m_cd = 0
# self.ACTION = config['actions']
# self.COST = config['costs']
# self.h_history = np.zeros_like(self.COST)
# self.restart = 0
# # self.h_tracker = [[],[],[],[]]
# self.init_h_state()
# self.init_time_base = time_base
# if time_base:
# self.counter = self.init_time_base
#
#
# def step(self, action):
# if action == 0:
# if self.init_time_base:
# self.counter -= 1
# assert self.h_state != 3
# self.action = action
# n = self.ACTION[self.action]
# self.request_parts(n)
# self.state_time[self.h_state] += 1
# elif action == 1:
# if self.init_time_base:
# self.counter -= 1
# assert self.h_state != 3
# self.action = action
# n = self.ACTION[self.action]
# self.request_parts(n)
# self.state_time[self.h_state] += 1
# elif action == 2:
# self.action = action
# self.request_parts(0)
# self.register_m()
# self.state_time[self.h_state] += 1
# else:
# raise ValueError('action [%d] is out of range'%action)
# self.h_history[self.h_state] += 1
#
# def init_h_state(self):
# if type(self.T) == Continue_Transition:
# self.h_state = random.randint(0, 3)
# steps = self.T.init_trans(self.h_state)
# self.state_time = [0] * 4
# self.state_time[self.h_state] = steps
# if self.h_state == 3:
# self.register_m()
# steps = random.randint(0, self.m_cd-1)
# self.m_cd -= steps
# self.state_time = [0] * 4
# self.state_time[self.h_state] = steps
# else:
# self.h_state = 0
# self.state_time = [0] * 4
#
# @property
# def health(self):
# return self.h_state, self.state_time[self.h_state]
#
# def request_parts(self, n):
# self.request_jobs = n
#
# def recieve_parts(self, n):
# self.anti_jobs = n
#
# def register_m(self):
# self.under_m = True
# self.anti_jobs = 0
# self.h_state = 3
# self.request_jobs = 0
# if self.m_cd == 0:
# self.m_cd = math.floor(np.random.normal(12, 3))
#
# def proceed(self):
# if not self.under_m:
# self.deque = self.anti_jobs
# self.anti_jobs = 0
# self.request_jobs = 0
# new_h_state = self.T.transit(self.h_state, self.action, self.state_time)
# # if new_h_state != self.h_state:
# # self.h_tracker[self.h_state].append(self.state_time[self.h_state])
# self.h_state = new_h_state
# else:
# self.m_cd -= 1
# self.deque = self.anti_jobs
# if self.m_cd == 0:
# self.under_m = False
# new_h_state = self.T.transit(self.h_state, self.action, self.state_time)
# # if new_h_state != self.h_state:
# # self.h_tracker[self.h_state].append(self.state_time[self.h_state])
# self.h_state = new_h_state
# if self.init_time_base:
# self.counter = self.init_time_base
# self.state_time = [0] * 4
# self.restart += 1
#
# @property
# def valid_actions(self):
# if self.h_state == 3 or self.under_m:
# valid_actions = [0., 0., 1.]
# else:
# if self.init_time_base:
# if self.counter == 0:
# valid_actions = [0., 0., 1.]
# valid_actions = [1] * 3
# return valid_actions
#
# @property
# def cost(self):
# if self.action == 0 and self.h_state != 3:
# cost = self.COST[0]
# # print('Agent {} took action {} and incur {} cost'.format(self.id, self.action, cost))
# elif self.action == 1 and self.h_state != 3:
# cost = self.COST[1]
# # print('Agent {} took action {} and incur {} cost'.format(self.id, self.action, cost))
# elif self.h_state == 3 and self.state_time[3] == 0:
# cost = self.COST[-1]
# if self.action == 0:
# cost += self.COST[0]
# # print('Agent {} took action {}, and breakdown, and incur {} cost'.format(self.id, self.action, cost))
# elif self.action == 1:
# cost += self.COST[1]
# # print('Agent {} took action {}, and breakdown, and incur {} cost'.format(self.id, self.action, cost))
# else:
# raise ValueError('self.action cannot take {}'.format(self.action))
# elif self.action == 2:
# cost = self.COST[2]
# # print('Agent {} took action {} and incur {} cost'.format(self.id, self.action, cost))
# else:
# raise ValueError("Our agent going to wrong state, action pair {}{}".format(self.h_state, self.action))
# return cost
class Cell():
def __init__(self, id):
self.id = id
self.deque = 0
self.anti_jobs = 0
self.p_cell = None
self.f_cell = None
def add_machines(self, m_list):
self.machines = m_list
def add_cells(self, p_cell=None, f_cell=None):
self.p_cell = p_cell
self.f_cell = f_cell
if self.p_cell:
self.p_cell.f_cell = self
if self.f_cell:
self.f_cell.p_cell = self
def assign_jobs(self):
self.deque = 0
if not self.p_cell:
assert self.anti_jobs >= 0, 'anti_jobs {} should be always greater than 0'.format(self.anti_jobs)
recieve_requests = np.sum(list(map(lambda x: x.request_jobs, self.machines)))
self.anti_jobs += recieve_requests
for machine in self.machines:
machine.recieve_parts(machine.request_jobs)
assert self.anti_jobs >= np.sum(list(map(lambda x: x.anti_jobs, self.machines))), 'anti_jobs is {}, machines in cell {} actually get {}'.format(self.anti_jobs, self.id, np.sum(list(map(lambda x: x.anti_jobs, self.machines))))
if self.f_cell:
self.f_cell.anti_jobs += np.sum(list(map(lambda x: x.anti_jobs, self.machines)))
else:
if self.anti_jobs > 0:
recieve_requests = np.sum(list(map(lambda x: x.request_jobs, self.machines)))
if self.anti_jobs >= recieve_requests:
for machine in self.machines:
machine.recieve_parts(machine.request_jobs)
if self.f_cell:
self.f_cell.anti_jobs += np.sum(list(map(lambda x: x.anti_jobs, self.machines)))
else:
request_jobs = np.array(list(map(lambda x: x.request_jobs, self.machines)), dtype=np.float32)
jobs_pool = np.zeros_like(self.machines, dtype=np.float32)
while np.sum(request_jobs - jobs_pool) > 0:
p = (request_jobs - jobs_pool) / np.sum(request_jobs - jobs_pool)
idx = np.random.choice(len(self.machines), 1, p=p)[0]
jobs_pool[idx] += 1.
for idx, machine in enumerate(self.machines):
machine.recieve_parts(jobs_pool[idx])
if self.f_cell:
self.f_cell.anti_jobs += np.sum(list(map(lambda x: x.anti_jobs, self.machines)))
def proceed(self):
for m in self.machines:
m.proceed()
self.deque = np.sum(list(map(lambda x: x.deque, self.machines)))
self.anti_jobs -= self.deque
# assert self.anti_jobs >= 0, 'anti_jobs is {}, and deques is {}'.format(self.anti_jobs, self.deque)
@property
def buffer_size(self):
return self.anti_jobs
def reset(self):
self.deque = 0
self.anti_jobs = 0
class Simulation():
def __init__(self, map_name, time_base=False):
self._initialize(map_name, time_base)
def _initialize(self, map_name, time_base):
config = get_map_params(map_name)
cell_ids = config['cells']
machine_ids = config['machines']
self.sale_price = config['sale_price']
continuous = config['continuous_trans']
machine_ids = np.array(machine_ids).reshape([len(cell_ids), -1])
self.machines = []
self.cells = []
for i in range(machine_ids.shape[0]):
cell_id = i
self.cells.append(Cell(cell_id))
for j in range(machine_ids.shape[1]):
machine_id = machine_ids[i, j]
if not continuous:
transition = config['transitions'][i]
T = Transition(transition, schedule=False)
else:
T = Continue_Transition(config['dist'], config['first_params'], config['second_params'], config['lower_bounds'])
self.machines.append(Machine(machine_id, T, cell_id, config, time_base))
self.cells[-1].add_machines(self.machines[-machine_ids.shape[1]:])
if i > 0:
p_cell = self.cells[i-1]
self.cells[-1].add_cells(p_cell)
def reset(self, random_init_sim):
for cell in self.cells:
cell.reset()
random_list = [0] * len(self.machines)
if random_init_sim:
for ele in random_init_sim:
random_list[ele] = 1
for idx, machine in enumerate(self.machines):
machine.reset(random_list[idx])
def step(self, actions):
for idx, machine in enumerate(self.machines):
machine.step(actions[idx])
for cell in self.cells:
cell.assign_jobs()
for cell in self.cells:
cell.proceed()
def get_avail_agent_actions(self, agent_id):
return self.machines[agent_id].valid_actions
@property
def products(self):
final_cell = self.cells[-1]
products = final_cell.deque
return products
@property
def profit(self):
products = self.products
cost = np.sum(list(map(lambda x: x.cost, self.machines)))
return products * self.sale_price - cost
def get_buffers_agent(self, agent_id):
total_buffer = np.sum(list(map(lambda x:x.buffer_size, self.cells)))
if total_buffer == 0:
return 0., 0.
agent = self.machines[agent_id]
cell_id = agent.cell_id
front_buffer = self.cells[cell_id].buffer_size
following_buffer = 0
if cell_id + 1 < len(self.cells) -1:
following_buffer = self.cells[cell_id+1].buffer_size
return front_buffer / total_buffer, following_buffer / total_buffer
def get_cost_agent(self, agent_id):
return self.machines[agent_id].cost
if __name__ == '__main__':
from scenes import get_map_params
import matplotlib.pyplot as plt
import numpy as np
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
def pie_plot(*args, **kw_args):
labels = kw_args['labels']
colors = tableau20[:len(labels)]
sizes = args
def func(pct, allvals):
absolute = pct/100.*np.sum(allvals)
return "{:.1f}%\n({:.1f} unit time)".format(pct, absolute)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, colors=colors, labels=labels, autopct=lambda pct:func(pct, sizes),
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
map_name = 263
profit = 0
sim = Simulation(map_name)
for i in range(20):
random_init_sim = list(np.random.choice(6, 3, replace=False))
# print(random_init_sim)
print("At iteration {}".format(i))
sim.reset(random_init_sim)
# sim.step([0]*6)
num = 0
while num < 64:
actions = []
for i in range(6):
action_p = np.array([1., 0., 0.])
valid_actions = sim.get_avail_agent_actions(i)
# print('valid_action is {}'.format(valid_actions))
action_p = np.array(valid_actions, dtype=np.float32) * (action_p + 1e-9)
p = action_p / np.sum(action_p)
# print('p is {}'.format(p))
action = np.random.choice(3, 1, p=p)[0]
actions.append(action)
sim.step(actions)
# print("Actions are {}".format(actions))
# print("States are {}".format([machine.h_state for machine in sim.machines]))
num += 1
# print(sim.profit)
profit += sim.profit
# cells = []
# for m in sim.machines:
# cells.append(m.h_history/m.restart)
# print("States history are {}".format(m.h_history/m.restart))
#
for i in range(4):
h = np.concatenate(list(map(lambda x:np.array(x.h_tracker[i]), sim.machines[:3])))
# if i == 1:
# print(h)
print("Health state %d has mean %.3f, std %.3f"%(i, np.mean(h), np.std(h)))
for i in range(4):
h = np.concatenate(list(map(lambda x:np.array(x.h_tracker[i]), sim.machines[3:])))
print("Health state %d has mean %.3f, std %.3f"%(i, np.mean(h), np.std(h)))
# print(np.mean(cells[3:], axis=0))
# #pie_plot(*list(np.mean(cells[3:], axis=0)),
# # **{'labels': ['pre-mature', 'mature', 'slightly-worn', 'severely-worn']})
print(profit)
```
#### File: envs/smart_man_sim/core_traffic.py
```python
import time
import numpy as np
import random
import math
from absl import logging
from .scenes import get_map_params
from functools import partial
import math
class Vehicle():
def __init__(self, id, x_init, y_init, x_goal, y_goal, **mid_xys):
self.id = id
self.x_init = x_init
self.y_init = y_init
self.mid_points_dict = mid_xys
def gas(self):
assert self.x_mid == self.x_mid or self.y_mid
def brake(self):
# do nothing here
return
class TrafficJunction:
def __init__(self, difficulty='easy'):
self._init_board(difficulty)
def _init_board(self, difficulty):
if difficulty == "easy":
#N, W
self.init_pos_dict = {'W': (0, 3), 'N': (3, 0)}
self.goal_pos_dict = {'E': (6, 3), 'S': (3, 6)}
self.mid_point_dict = {'WE': [], 'NS': []}
else:
#NW, NE, WN, WS, EN, ES, SW, SE
self.init_pos_dict = {'NW': (0, 4), 'NE':(0, 12),
'WN': (5, 0), 'WS': (13, 0),
'EN': (4, 17), 'ES': (12, 17),
'SW': (17, 5), 'SE': (17, 13)}
self.goal_pos_dict = {'NW': (0, 5), 'NE':(0, 13),
'WN': (4, 0), 'WS': (12, 0),
'EN': (5, 17), 'ES': (13, 17),
'SW': (17, 4), 'SE': (17, 12)}
self.mid_point_dict = {'NWNE': [(5, 4), (5, 13)], 'NWWN': [(4, 4)], 'NWWS': [(12, 4)], 'NWEN': [(5, 4)], 'NWES': [(13, 4)], 'NWSW': [], 'NWSE':[(13, 4), (13, 12)],
'NENW': [(4, 12), (4, 5)], 'NEWN': [(4, 12)], 'NEWS': [(12, 12)], 'NEEN': [(5, 12)], 'NEES': [(13, 12)], 'NESW': [(12, 12), (12, 4)], 'NESE':[],
'WNNW': [(5, 5)], 'WNWE': [(5, 13)], 'WNWS': [(5, 4), (12, 4)], 'WNEN': [], 'WNES': [(5, 12), (13, 12)], 'WNSW': [(5, 4)], 'WNSE':[(5, 12)],
'WSNW': [(13, 5)], 'WSNE': [(13, 13)], 'WSWN': [(13, 5), (4, 5)], 'WSEN': [(13, 13), (5, 13)], 'WSES': [], 'WSSW': [(13, 4)], 'WSSE':[(13, 12)],
'ENNW': [(4, 5)], 'ENNE': [(4, 13)], 'ENWN': [], 'ENWS': [(4, 4), (12, 4)], 'ENES': [(4, 12), (13, 12)], 'ENSW': [(4, 4)], 'ENSE':[(4, 12)],
'ESNW': [(12, 5)], 'ESNE': [(12, 13)], 'ESWS': [], 'ESWN': [(12, 5), (4, 5)], 'ESEN': [(12, 13), (5, 13)], 'ESSW': [(12, 4)], 'NESE':[(12, 12)],
'SWNW': [], 'SWNE': [(5, 5), (5, 13)], 'SWWS': [(12, 5)], 'SWWN': [(4, 5)], 'SWEN': [(5, 5)], 'SWES': [(13, 5)], 'SWSE':[(13, 5), (13, 12)],
'SENW': [(4, 13), (4, 5)], 'SENE': [], 'SEWS': [(12, 13)], 'SEWN': [(4, 13)], 'SEEN': [(5, 13)], 'SEES': [(13, 13)], 'SESW':[(12, 13),(12, 4)]}
def _add_vehicle(self):
``` |
{
"source": "JH456/super-duper-spork",
"score": 4
} |
#### File: super-duper-spork/super_duper_spork/super_duper_spork.py
```python
from sys import argv
import wiki_summarize
def prettify_text(text, max_line_length):
"""
Adds line endings to a string so each line is of 80 characters each at most.
Will also add in pipes to pretty print the output.
Keyword arguments:
text -- a string of words separated by spaces to pretty print.
max_line_length -- int
"""
words = text.split(' ')
line_length = 2
line = '| '
pretty_text = ''
for word in words:
if line_length + len(word) + 2 <= max_line_length:
line_length += len(word) + 1
line += word + " "
else:
pretty_text += line + ' ' * (max_line_length - 1 - line_length) + '|' + '\n'
line = '| ' + word + ' '
line_length = len(word) + 3
if line != '':
pretty_text += (line + ' ' * (max_line_length - 1 - line_length) + '|')
return pretty_text
def print_article_summary(markup):
sections = wiki_summarize.extract_sections(markup)
for title, text in sections:
print('+' + '-' * 78 + '+')
print(prettify_text(title, 80))
print('+' + '-' * 78 + '+')
print(prettify_text(wiki_summarize.summarize_text(text), 80))
print('+' + '-' * 78 + '+')
def print_disambiguation(markup):
print('+' + '-' * 78 + '+')
print(prettify_text("Your search may refer to:", 80))
print('+' + '-' * 78 + '+')
for suggestion in wiki_summarize.get_disambiguation_results(markup):
print(prettify_text("- " + suggestion, 80))
print('+' + '-' * 78 + '+')
def print_search_suggestions(markup):
print('+' + '-' * 78 + '+')
print(prettify_text("No article found! Here are some suggestions:", 80))
print('+' + '-' * 78 + '+')
for suggestion in wiki_summarize.get_search_results(markup):
print(prettify_text("- " + suggestion, 80))
print('+' + '-' * 78 + '+')
if __name__ == '__main__':
if len(argv) != 2:
print(__doc__)
else:
markup = wiki_summarize.fetch_article(argv[1])
if wiki_summarize.is_page_search_results(markup):
print_search_suggestions(markup)
elif wiki_summarize.is_page_disambiguation(markup):
print_disambiguation(markup)
else:
print_article_summary(markup)
```
#### File: super-duper-spork/super_duper_spork/wiki_summarize.py
```python
from bs4 import BeautifulSoup
import urllib.request
import nltk
import string
import re
search_url = 'http://wikipedia.org/w/index.php?search='
def fetch_article(search_term):
"""
Fetches an article from wikipedia using urllib.
Keyword arguments:
search_term -- the search term.
Returns:
A String containing the page markup.
"""
def prepare_search_term(search_term):
return search_term.replace(' ', '+')
search_term = prepare_search_term(search_term)
response = urllib.request.urlopen(search_url + search_term)
return response.read()
def is_page_search_results(markup):
"""
Determines if the given markup is for a search results page.
Keyword arguments:
markup -- a string containing the markup for the page
Returns:
True if the page is a search results page.
"""
soup = BeautifulSoup(markup, 'html.parser')
return soup.title.string.find('Search results') >= 0
def is_page_disambiguation(markup):
"""
Determines if a page is a disambiguation page.
Keyword arguments:
markup -- a string containing the markup for the page
Returns:
True if the page is a disambiguation page.
"""
soup = BeautifulSoup(markup, 'html.parser')
lis = soup.select("#mw-normal-catlinks li a")
return any(a.string.find('Disambiguation') >= 0 for a in lis)
def get_disambiguation_results(markup):
"""
Gets the disambiguation results from a disambiguation page.
Keyword arguments:
markup -- a string containing the markup for the page
Returns:
An array of strings that are pages the search term could refer to.
"""
soup = BeautifulSoup(markup, 'html.parser')
anchorTags = soup.select('.mw-parser-output ul li a')
anchorTags = filter(lambda a: 'title' in a.attrs, anchorTags)
return map(lambda x: x.attrs['title'], anchorTags)
def get_search_results(markup):
"""
Gets the search results from a search results page.
Keyword arguments:
markup -- a string containing the markup for the page
Returns:
An array of strings that are search suggestions. At most 20 in length.
"""
soup = BeautifulSoup(markup, 'html.parser')
anchorTags = soup.select('div.mw-search-result-heading a')
return list(map(lambda x: x.attrs['title'], anchorTags))
def extract_sections(markup):
"""
Gets a list of tuples in the form (title, text) for each article section.
Returns:
[(string, string)]
"""
soup = BeautifulSoup(markup, 'html.parser')
contents = soup.select_one('.mw-parser-output').contents
text = []
title = soup.select_one('#firstHeading').get_text()
title_text_map = []
for content in contents:
if content.name == 'h2':
title_text_map.append((title, ''.join(text)))
text = []
title = content.get_text()
elif content.name == 'p':
p_text = content.get_text()
p_text = p_text.replace('.', '. ')
text.append(p_text)
return list(filter(lambda e: e[1] != '', title_text_map))
def summarize_text(text):
"""
Summarizes text
Keyword arguments:
text -- string
Returns:
A string which is the summarized text.
"""
def clean(words):
stop_words = set(nltk.corpus.stopwords.words('english'))
return [word for word in words
if word.lower() not in string.punctuation
and word.lower() not in stop_words]
words = nltk.word_tokenize(text)
sentences = list(nltk.sent_tokenize(text))
clean_words = clean(words)
count_map = {}
for word in clean_words:
if word in count_map.keys():
count_map[word] += 1
else:
count_map[word] = 1
sentence_importance = []
i = 0
for sentence in sentences:
sentence_words = clean(nltk.word_tokenize(sentence))
importance = 0
for word in sentence_words:
importance += count_map[word]
sentence_importance.append((i, importance))
i += 1
sentence_importance = sorted(sentence_importance, key=lambda x: x[1],
reverse=True)
important_sentences = []
for j in range(min(3, len(sentence_importance))):
important_sentences.append(sentence_importance[j])
important_sentences = sorted(important_sentences, key=lambda x: x[0])
summary = ''
for sent in important_sentences:
summary += sentences[sent[0]]
return summary.replace('.', '. ').replace(' ', ' ')
``` |
{
"source": "jh88/enas",
"score": 2
} |
#### File: src/nasbench101/child.py
```python
from nasbench import api
class Child():
INPUT = 'input'
OUTPUT = 'output'
OPS = ['conv1x1-bn-relu', 'conv3x3-bn-relu', 'maxpool3x3']
def __init__(self, nasbench, **kwargs):
self.nasbench = nasbench
def get_model_spec(self, arc):
ops = [self.INPUT]
matrix = [[0 for _ in range(7)] for _ in range(7)]
for i in range(5):
arc_index = arc[i * 2]
arc_op = arc[i * 2 + 1]
ops.append(self.OPS[arc_op])
matrix[arc_index][i + 1] = 1
ops.append(OUTPUT)
for row in matrix:
if not sum(row):
row[-1] = 1
model_spec = api.ModelSpec(matrix=matrix, ops=ops)
return model_spec
def get_accuracies(self, arc):
model_spec = self.get_model_spec(arc)
data = self.nasbench.query(model_spec)
return data
def build_valid_rl(self, key='test_accuracy'):
data = self.get_accuracies(self.sample_arc)
self.accuracy = data[key]
self.train_acc = data['train_accuracy']
self.valid_acc = data['validation_accuracy']
self.test_acc = data['test_accuracy']
def connect_controller(self, controller_model):
self.sample_arc = controller_model.sample_arc
```
#### File: src/nasbench101/main.py
```python
from nasbench import api
import tensorflow as tf
from src.nasbench101.child import Child
from src.nasbench101.controller import Controller
def get_ops(nasbench):
controller_model = Controller(
num_branches=4,
num_cells=7,
lstm_size=32,
lstm_num_layers=2,
tanh_constant=None,
op_tanh_reduce=1.0,
temperature=None,
lr_init=1e-3,
lr_dec_start=0,
lr_dec_every=100,
lr_dec_rate=0.9,
l2_reg=0,
entropy_weight=None,
clip_mode=None,
grad_bound=None,
use_critic=False,
bl_dec=0.999,
optim_algo="adam",
sync_replicas=False,
num_aggregate=None,
num_replicas=None,
name="controller",
)
child_model = Child(nasbench)
child_model.connect_controller(controller_model)
controller_model.build_trainer(child_model)
controller_ops = {
"train_step": controller_model.train_step,
"loss": controller_model.loss,
"train_op": controller_model.train_op,
"lr": controller_model.lr,
"grad_norm": controller_model.grad_norm,
"reward": controller_model.reward,
"optimizer": controller_model.optimizer,
"baseline": controller_model.baseline,
"entropy": controller_model.sample_entropy,
"sample_arc": controller_model.sample_arc,
"skip_rate": controller_model.skip_rate
}
child_ops = {
'train_acc': child_model.train_acc,
'valid_acc': child_model.valid_acc,
'test_acc': child_model.test_acc
}
ops = {
'controller': controller_ops,
'child': child_opsl
}
return ops
def train(nasbench, epoch=2):
g = tf.Graph()
with g.as_default():
ops = get_ops(nasbench)
child_ops = ops['child']
controller_ops = ops['controller']
with tf.train.SingularMonitoredSession() as sess:
for i in range(epoch):
run_ops = [
controller_ops['sample_arc'],
controller_ops['reward']
]
arc, acc = sess.run(run_ops)
print('epoch: {}\narc: {}\nacc: {}'.format(i, arc, acc))
def main():
nasbench = api.NASBench('nasbench_only108.tfrecord')
train(nasbench, epoch=1)
``` |
{
"source": "jh88/fbnet",
"score": 2
} |
#### File: fbnet/fbnet/lookup_table.py
```python
import json
import tensorflow as tf
from time import perf_counter as timer
from tqdm import tqdm
def get_lookup_table(super_net, inputs_shape=(1, 32, 32, 3), n=10):
lookup_table = []
x = tf.random.uniform(inputs_shape, minval=0, maxval=1)
for layer in tqdm(super_net):
if isinstance(layer, list):
lookup_table.append([get_latency(block, x, n) for block in layer])
x = layer[0](x)
else:
lookup_table.append(None)
x = layer(x)
return lookup_table
def timeit(op, x):
t0 = timer()
x = op(x)
return timer() - t0
def get_latency(op, x, n=10, init=True):
if init:
op(x)
return sum(timeit(op, x) for _ in range(n)) / n * 1e6
def save(data, filename):
with open(filename, 'w') as f:
json.dump(data, f)
def read(filename):
with open(filename, 'r') as f:
return json.load(f)
```
#### File: fbnet/fbnet/utils.py
```python
import tensorflow as tf
from tensorflow.keras import backend as K
def channel_shuffle(inputs, group, data_format='channels_last'):
if data_format == 'channels_first':
_, c_in, h, w = inputs.shape.as_list()
x = tf.reshape(inputs, [-1, group, c_in // group, h * w])
x = tf.transpose(x, [0, 2, 1, 3])
x = tf.reshape(x, [-1, c_in, h, w])
else:
_, h, w, c_in = inputs.shape.as_list()
x = tf.reshape(inputs, [-1, h * w, group, c_in // group])
x = tf.transpose(x, [0, 1, 3, 2])
x = tf.reshape(x, [-1, h, w, c_in])
return x
def exponential_decay(initial_value, decay_rate, decay_steps, step):
return initial_value * decay_rate ** (step / decay_steps)
def gumbel_softmax(logits, tau, axis=-1):
shape = K.int_shape(logits)
# Gumbel(0, 1)
if len(shape) == 1:
gumbels = K.log(tf.random.gamma(shape, 1))
else:
gumbels = K.log(
tf.random.gamma(shape[:-1], [1 for _ in range(shape[-1])])
)
# Gumbel(logits, tau)
gumbels = (logits + gumbels) / tau
y_soft = K.softmax(gumbels, axis=axis)
return y_soft
def latency_loss(latency, alpha=0.2, beta=0.6):
return alpha * K.pow(K.log(latency), beta)
```
#### File: jh88/fbnet/test_tflite_model.py
```python
import numpy as np
import tensorflow as tf
from time import perf_counter as timer
def main():
x = np.load('data/cifar_test_x.npy')
y = np.load('data/cifar_test_y.npy').flatten()
interpreter = tf.lite.Interpreter(model_path='data/fbnet.tflite')
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
pred = []
t0 = timer()
for i in range(len(x)):
interpreter.set_tensor(input_details[0]['index'], x[i:i+1])
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
pred.append(output_data.argmax())
t = timer() - t0
print('total time: {:.2f}s, average: {:.2f}ms'.format(t, t * 1000 / len(x)))
print('accuracy: {}/{}'.format(sum(y == pred), len(x)))
return output_data
if __name__ == '__main__':
main()
``` |
{
"source": "jha929/py-hangul-checker",
"score": 3
} |
#### File: py-hangul-checker/tests/test_hangul_checker.py
```python
import unittest
from hangul_checker.hangul_checker import KoreanSpellChecker
class TestKoreanSpellChecker(unittest.TestCase):
def setUp(self):
self.korean_spell_checker = KoreanSpellChecker()
def tearDown(self):
del self.korean_spell_checker
def test_check_spelling(self):
sentence_in = u'부담갖지 말고 드세요!'
sentence_out = self.korean_spell_checker.check_spelling(sentence_in)
self.assertEqual(sentence_out, u'부담 갖지 말고 드세요!')
def test_check_sentence_len(self):
sentence_in = u'가' * 501
self.assertRaises(
Exception,
lambda: self.korean_spell_checker.check_spelling(sentence_in)
)
``` |
{
"source": "jha929/pyMODI",
"score": 3
} |
#### File: examples/creation_examples/dodge.py
```python
import modi
from playscii import GameManager, GameObject
from random import randint
import time
"""
This example requires you to install playascii package
"""
PLAYER_RENDER = " O \n" \
"/|\\ \n" \
"/ \\"
MODI_RENDER = "------\n" \
"|MODI|\n" \
"______"
class DodgeManager(GameManager):
def __init__(self, controller):
super().__init__((50, 20))
self.player = self.GameObject(
pos=(25, 2),
render=PLAYER_RENDER)
self.fire = self.GameObject(render=MODI_RENDER)
self.gyro = controller.gyros[0]
self.button = controller.buttons[0]
def setup(self):
self.set_title("PyMODI Dodge")
self.add_object(self.player)
self.add_object(self.fire)
self.fire.x, self.fire.y = 25, 20
def update(self):
pitch = self.gyro.pitch
if pitch < -5 and self.player.x < 48:
self.player.x += 30 * self.delta_time
elif pitch > 5 and self.player.x > 0:
self.player.x -= 30 * self.delta_time
self.fire.y -= 15 * self.delta_time
if self.fire.y < 0:
self.fire.x, self.fire.y = randint(0, 40), 25
if self.fire.y < 3 and (self.fire.x - 4 <= self.player.x <=
self.fire.x + 4):
self.set_title("GAME OVER")
self.set_flag('quit', True)
class GameObject(GameObject):
def update(self):
pass
if __name__ == "__main__":
bundle = modi.MODI(3)
game_manager = DodgeManager(bundle)
game_manager.start()
time.sleep(3)
```
#### File: module/output_module/display.py
```python
from modi.module.output_module.output_module import OutputModule
class Display(OutputModule):
TEXT = 17
CLEAR = 21
VARIABLE = 22
SET_HORIZONTAL = 25
SET_VERTICAL = 26
def __init__(self, id_, uuid, msg_send_q):
super().__init__(id_, uuid, msg_send_q)
self._text = ""
@property
def text(self):
return self._text
@text.setter
def text(self, text: str) -> None:
"""Clears the display and show the input string on the display.
Returns the json serialized signal sent to the module
to display the text
:param text: Text to display.
:type text: str
:return: None
"""
self.clear()
self._set_property(
self._id,
Display.TEXT,
str(text)[:27] + '\0', # 27 characters can be shown on the display
OutputModule.STRING
)
self._text = text
def show_variable(self, variable: float, position_x: int,
position_y: int) -> None:
"""Clears the display and show the input variable on the display.
Returns the json serialized signal sent to
the module to display the text
:param variable: variable to display.
:type variable: float
:param position_x: x coordinate of the desired position
:type position_x: int
:param position_y: y coordinate of te desired position
:type position_y: int
:return: A json serialized signal to module
:rtype: string
"""
self._set_property(
self._id,
Display.VARIABLE,
(variable, position_x, position_y),
OutputModule.DISPLAY_VAR,
)
self._text += str(variable)
def set_horizontal(self, offset) -> None:
"""Set the horizontal offset on the screen
:param offset: offset in pixels
:type offset: float
:return: None
"""
self._set_property(
self.id,
Display.SET_HORIZONTAL, (offset, ),
OutputModule.FLOAT,
)
def set_vertical(self, offset) -> None:
"""Set the vertical offset on the screen
:param offset: offset in pixels
:type offset: float
:return: None
"""
self._set_property(
self.id,
Display.SET_VERTICAL, (offset, ),
OutputModule.FLOAT,
)
def clear(self) -> None:
"""Clear the screen.
:return: json serialized message to te module
:rtype: string
"""
self._set_property(
self._id,
Display.CLEAR,
(0, 0),
OutputModule.RAW
)
self._text = ""
```
#### File: modi/util/msgutil.py
```python
import json
import struct
from base64 import b64encode, b64decode
from typing import Tuple
def parse_message(command: int, source: int, destination: int,
byte_data: Tuple =
(None, None, None, None, None, None, None, None)):
message = dict()
message['c'] = command
message['s'] = source
message['d'] = destination
message['b'] = __encode_bytes(byte_data)
message['l'] = len(byte_data)
return json.dumps(message, separators=(",", ":"))
def __extract_length(begin: int, src: Tuple) -> int:
length = 1
for i in range(begin + 1, len(src)):
if not src[i]:
length += 1
else:
break
return length
def __encode_bytes(byte_data: Tuple):
idx = 0
data = bytearray(len(byte_data))
while idx < len(byte_data):
if not byte_data[idx]:
idx += 1
elif byte_data[idx] > 256:
length = __extract_length(idx, byte_data)
data[idx: idx + length] = int.to_bytes(
byte_data[idx], byteorder='little', length=length, signed=True
)
idx += length
elif byte_data[idx] < 0:
data[idx: idx + 4] = int.to_bytes(
int(byte_data[idx]), byteorder='little', length=4, signed=True
)
idx += 4
elif byte_data[idx] < 256:
data[idx] = int(byte_data[idx])
idx += 1
return b64encode(bytes(data)).decode('utf8')
def decode_message(message: str):
message = json.loads(message)
command = message['c']
source = message['s']
destination = message['d']
data = message['b']
length = message['l']
return command, source, destination, data, length
def unpack_data(data: str, structure: Tuple = (1, 1, 1, 1, 1, 1, 1, 1)):
data = bytearray(b64decode(data.encode('utf8')))
idx = 0
result = []
for size in structure:
result.append(int.from_bytes(data[idx:idx + size], byteorder='little'))
idx += size
return result
def parse_data(values, data_type: str) -> Tuple:
data = []
if data_type == 'int':
for value in values:
if value >= 0:
data += int.to_bytes(int(value), byteorder='little', length=2)
else:
data += int.to_bytes(
int(value), byteorder='little', length=4, signed=True
)
elif data_type == 'float':
for value in values:
data += struct.pack("f", float(value))
elif data_type == 'string':
data = map(ord, str(values))
elif data_type == 'raw':
data = values
elif data_type == 'display_var':
data = struct.pack("f", float(values[0])) + bytearray(
[values[1], 0x00, values[2], 0x00])
return tuple(data)
def decode_data(data: str) -> float:
return round(struct.unpack("f", bytes(unpack_data(data)[:4]))[0], 2)
```
#### File: tests/task/test_can_task.py
```python
import json
import unittest
from queue import Queue
from modi.task.can_task import CanTask
from modi.util.msgutil import parse_message
class MockCan:
def __init__(self):
self.recv_buffer = Queue()
def recv(self, timeout):
json_pkt = parse_message(0x03, 0, 1)
return CanTask.compose_can_msg(json.loads(json_pkt))
def send(self, item):
self.recv_buffer.put(item)
class TestCanTask(unittest.TestCase):
"""Tests for 'CanTask' class"""
def setUp(self):
"""Set up test fixtures, if any."""
self.can_task = CanTask()
self.can_task._bus = MockCan()
def tearDown(self):
"""Tear down test fixtures, if any."""
del self.can_task
CanTask._instances.clear()
def test_recv(self):
"""Test _recv_data method"""
self.assertEqual(self.can_task.recv(), parse_message(0x03, 0, 1))
def test_send(self):
"""Test _send_data method"""
json_pkt = parse_message(0x03, 0, 1)
self.can_task.send(json_pkt)
self.assertEqual(self.can_task.bus.recv_buffer.get().data,
CanTask.compose_can_msg(json.loads(json_pkt)).data
)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "JhaAjeet/Uber-Data-Analysis_DataScience",
"score": 3
} |
#### File: JhaAjeet/Uber-Data-Analysis_DataScience/Data Analysis gui.py
```python
from tkinter import*
import pandas
import seaborn
root = Tk()
root.geometry('300x300')
l = Label(root, text='Uber Data Analysis')
l.pack()
global data
def Action():
data = pandas.read_csv('C:/Users/hp/Documents/PROJECT/Uber_Data_Analysis 1/uber-raw-data-apr14.csv')
data.head()
b1 = Button(root, text='data',command=Action)
b1.pack()
root.mainloop()
``` |
{
"source": "JhaAman/lihax",
"score": 3
} |
#### File: lihax/MachineLearning/potential_field.py
```python
import math, socket, struct, numpy as np, sys
# Get the gradient of the potential of an obstacle
# particle at (ox, oy) with the origin at (mx, my)
# Get the potential of an obstacle particle at (ox, oy)
# with the origin at (mx, my)
def potential(mx, my, ox, oy):
1.0 / ((mx - ox)**2 + (my - oy)**2)**0.5
class PotentialField():
def __init__(self):
#socket initialization
self.host_ip = socket.gethostname()
self.receiving_port = 5510
self.sending_port = 6510
self.sockR = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sockS = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sockS.connect((self.host_ip, self.sending_port))
self.sockR.bind((self.host_ip, self.receiving_port))
# cumulative speed - used to build up momentum
self.speed_c = 0
def grad(self,dist, mx, my, ox, oy):
c = -1/((mx - ox)**2 + (my - oy)**2)**1.5
return c*(mx - ox), c*(my - oy)
# calculate the total gradient from an array of lidar ranges
# with origin at (my_x, my_y)
def calc_gradient(self, ranges, my_x, my_y):
gradient_x = 0 # sum of dU/dx
gradient_y = 0 # sum of dU/dy
# ignore the edges of the lidar FOV, usually noisy
for i in range(len(ranges) - 180, 180, -1):
r = ranges[i]
deg = -(270.0/1080) * i # convert index of range to degree of range
deg += 225 # lidar FOV starts at -45 deg
px = r * math.cos(math.radians(deg)) # convert from polar to x coord
py = r * math.sin(math.radians(deg)) # convert from polar to y coord
gx, gy = self.grad(r, my_x, my_y, px, py) # compute gradient at rectangular coordinates
# add point's gradient into sum
gradient_x += gx
gradient_y += gy
return (gradient_x, gradient_y)
# lidar subscriber callback
def receive_lidar(self, STEER_BIAS=0, PUSH_MULTIPLIER=19.7, STEER_GRAD_PROPORTION=20.0, SPEED_GRAD_PROPORTION=-0.001, MOMENTUM_MU=0.95, UPDATE_INFLUENCE=0.11, REVERSE_SPEED_MULTIPLIER=-2.3, MIN_SPEED_CLAMP=-0.9, MAX_SPEED_CLAMP=1.0):
while True:
packet = self.sockR.recvfrom(65565)[0]
ranges = struct.unpack("1080f", packet)
# compute gradient sums from lidar ranges
grad_x, grad_y = self.calc_gradient(ranges, 0, 0)
grad_x += STEER_BIAS * self.grad(0.1, 0, 0, 0.1, 0)[0]
# place repelling particle behind origin (the car) to
# push the car forward. 14 is a multiplier to give more push.
grad_y += PUSH_MULTIPLIER * self.grad(0.1, 0, 0, 0, -0.1)[1]
# magnitude of gradient (euclidian dist)
grad_magnitude = math.sqrt(grad_x**2 + grad_y**2)
# steering proportional to potential gradient w.r.t. x
steer = grad_x / STEER_GRAD_PROPORTION # OR? math.atan2(grad_x, grad_y)
# the speed update at this instance: proportional to gradient magnitude
# and sign depends of sign of gradient w.r.t y
speed = (SPEED_GRAD_PROPORTION * grad_magnitude * np.sign(grad_y))*100-194
# update the cumulative momentum using the speed update at this instance.
# speed_c is multiplied by some constant < 1 to simulate friction and
# speed is multiplied by some constant > 0 to determine the influence of the
# speed update at this instance.
self.speed_c = MOMENTUM_MU*self.speed_c + UPDATE_INFLUENCE * speed
# if speed is less than -1, clamp it. also, the steering is multiplied
# by a negative constant < -1 to make it back out in a way that
# orients the car in the direction it would want to turn if it were
# not too close.
speed_now = self.speed_c
if self.speed_c < 0:
if self.speed_c > -0.2:
speed_now = -0.7
steer *= REVERSE_SPEED_MULTIPLIER
# print("reversing")
if self.speed_c < MIN_SPEED_CLAMP:
speed_now = MIN_SPEED_CLAMP
elif self.speed_c > MAX_SPEED_CLAMP:
# if speed is greater than 1, clamp it
speed_now = MAX_SPEED_CLAMP
# create and publish drive message using steer and speed_c
# print "Speed: " + str(speed)
# print "Speed c: " + str(self.speed_c)
# print "Speed now: " + str(speed_now)
message = struct.pack("2f", speed_now, -steer)
self.sockS.send(message)
self.sockR.close()
self.sockS.close()
print "STOPPED!!!"
sys.exit(1)
pf = PotentialField()
pf.receive_lidar()
```
#### File: lihax/MachineLearning/winter_potential.py
```python
import numpy as np
import math
# simple class to contain the node's variables and code
class PotentialField:
# class constructor; subscribe to topics and advertise intent to publish
def __init__(self):
# initialize potential field variables
self.charge_laser_particle = 0.07
self.charge_forward_boost = 25.0
self.boost_distance = 0.5
self.p_speed = 0.05
self.p_steering = 1.0
#socket initialization
self.host_ip = socket.gethostname()
self.receiving_port = 5510
self.sending_port = 6510
self.sockR = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sockS = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sockS.connect((self.host_ip, self.sending_port))
self.sockR.bind((self.host_ip, self.recieving_port))
self.speedHist = 1.0
def scan_callback(self):
while True:
packet = self.sockR.recvfrom(65565)[0]
ranges = struct.unpack("1080f", packet)[180:901]
# Create potential gradients for middle 180 degrees of laser scan particles
scan_rad_angles = ( (0.25 * np.arange(1081, dtype=float)) ) # + msg.angle_min??
scan_rad_angles = scan_rad_angles[180:901]
scan_x_unit_vectors = -np.cos(scan_rad_angles)
scan_y_unit_vectors = -np.sin(scan_rad_angles)
scan_x_components = (self.charge_laser_particle * scan_x_unit_vectors) / np.square(msg.ranges)
scan_y_components = (self.charge_laser_particle * scan_y_unit_vectors) / np.square(msg.ranges)
# Add the potential for the point behind the robot (to give it a kick)
kick_x_component = np.ones(1) * self.charge_forward_boost / self.boost_distance**2.0
kick_y_component = np.zeros(1)
# Add together the gradients to create a global gradient showing the robot which direction to travel in
total_x_component = np.sum(scan_x_components) + kick_x_component
total_y_component = np.sum(scan_y_components) + kick_y_component
# Now, create a steering command to send to the vesc.
steering_angle = (self.p_steering * np.sign(total_x_component) * math.atan2(total_y_component, total_x_component))
speed = (self.p_speed * np.sign(total_x_component) * math.sqrt(total_x_component**2 + total_y_component**2))
speed = self.kickOut(command_msg.drive.speed)
# send to socket
message = struct.pack("2f", speed, steering_angle)
self.sockS.send(message)
self.sockR.close()
def kickOut(self, speed):
self.speedHist = (self.speedHist * .75) + (abs(speed) * .25)
print self.speedHist
if self.speedHist < 0.2:
return -1.0
else:
return speed
pf = PotentialField()
pf.scan_callback()
``` |
{
"source": "jhaapako/tcf",
"score": 3
} |
#### File: tcf/examples/test_efi_http_boot.py
```python
import os
import socket
import urllib.parse
import tcfl.biosl
import tcfl.tc
import tcfl.tl
class _test(tcfl.pos.tc_pos_base):
# tcfl.pos.tc_pos_base.deploy_10_flash(self, target) does the BIOS
# flashing for us
# ignore tcfl.pos.tc_pos_base template actions
def deploy_50(self, ic, target):
pass
# ignore tcfl.pos.tc_pos_base template actions
def start_50(self, ic, target):
pass
def eval(self, target):
IMG_URL = os.environ.get("IMG_URL", None)
if IMG_URL == None:
raise tcfl.tc.blocked_e("No IMG_URL environment given")
# Resolve the URL in the client
#
# While the BIOS and iPXE can resolve DNS, sometimes they are not
# as robust, so let's have it resolved here, since we have better
# caps.
url = urllib.parse.urlparse(IMG_URL)
url_resolved = url._replace(
netloc = url.hostname.replace(url.hostname,
socket.gethostbyname(url.hostname)))
target.power.cycle()
tcfl.biosl.boot_network_http(
target, r"HTTP %(ID)s", url_resolved.geturl())
# expect something to happen:
## >>Start HTTP Boot over IPv4.....^M$
## Station IP address is 10.291.183.9^M$
## ^M$
## URI: http://file.server/boot.img^M$
## File Size: 110100480 Bytes^M$
## Downloading...1%^M Downloading...2%^M Downloading...
## Downloading...100
target.expect("Start HTTP Boot")
# let's just expect one Downloading because dep on the file
# size not sure how many progress' will be printed
target.expect("Downloading...")
```
#### File: tcf/examples/test_pos_deploy_N.py
```python
import os
import sys
import tcfl
import tcfl.tc
import tcfl.tl
import tcfl.pos
TARGETS = int(os.environ.get('TARGETS', 4))
MODE = os.environ.get('MODE', 'one-per-type')
@tcfl.tc.interconnect("ipv4_addr", mode = MODE)
@tcfl.tc.target('pos_capable', count = TARGETS)
class _test(tcfl.tc.tc_c):
image_requested = None
def configure_00(self):
if self.image_requested == None:
if not 'IMAGE' in os.environ:
raise tcfl.tc.blocked_e(
"No image to install specified, set envar IMAGE")
self.image_requested = os.environ["IMAGE"]
# select the targets that can be flashed...this is basically
# all of them (target, target1, target2...) except for the
# interconnect (ic).
self.roles = []
for role, target in self.target_group.targets.items():
if 'pos_capable' in target.rt:
self.roles.append(role)
def deploy_50(self, ic):
ic.power.cycle()
@self.threaded
def _target_pos_deploy(target, ic):
return target.pos.deploy_image(ic, self.image_requested)
self.run_for_each_target_threaded(
_target_pos_deploy, (ic, ), targets = self.roles)
def start_00(self, ic):
ic.power.on()
@self.threaded
def _target_start(target):
target.pos.boot_normal()
# higher timeout, some VM implementations take more when
# you spin up 80 of them at the same time...
target.shell.up(user = 'root', timeout = 120)
self.run_for_each_target_threaded(
_target_start, targets = self.roles)
def eval(self):
@self.threaded
def _target_eval(target):
target.shell.run("echo 'I ''booted'", "I booted")
self.run_for_each_target_threaded(
_target_eval, targets = self.roles)
def teardown(self):
tcfl.tl.console_dump_on_failure(self)
```
#### File: tcf/examples/test_pos_list_images.py
```python
import os
import subprocess
import tcfl.tc
import tcfl.tl
import tcfl.pos
image_env = os.environ.get("IMAGE", None)
def _target_ic_kws_get(target, ic, kw, default = None):
return target.kws.get(kw, ic.kws.get(kw, default))
@tcfl.tc.interconnect(mode = "all")
@tcfl.tc.target("pos_capable")
class _(tcfl.tc.tc_c):
"""
List images available in a POS rsync server
"""
def eval(self, ic, target):
ic.power.on()
rsync_server = _target_ic_kws_get(
target, ic, 'pos.rsync_server',
_target_ic_kws_get(target, ic, 'pos_rsync_server', None))
self.report_info("POS rsync server: %s" % rsync_server)
rsync_host = rsync_server.split("::", 1)[0]
if rsync_host == ic.rtb.parsed_url.hostname:
rsync_port = ic.tunnel.add(873, ic.kws['ipv4_addr'])
else:
rsync_port = 873
output = subprocess.check_output(
[ 'rsync', '--port', str(rsync_port), rsync_server + '/' ],
close_fds = True, stderr = subprocess.PIPE)
# output looks like:
#
# drwxrwxr-x 4,096 2018/10/19 00:41:04 .
# drwxr-xr-x 4,096 2018/10/11 06:24:44 clear:live:25550
# dr-xr-xr-x 4,096 2018/04/24 23:10:02 fedora:cloud-base-x86-64:28
# drwxr-xr-x 4,096 2018/10/11 20:52:34 rtk::114
# ...
#
# so we parse for 5 fields, take last
imagel = tcfl.pos.image_list_from_rsync_output(output)
for image in imagel:
print(ic.fullid, ":".join(image))
if image_env:
image_match = tcfl.pos.image_select_best(image_env, imagel, ic)
self.report_info("Image '%s' (from env IMAGE) matches: %s"
% (image_env, ":".join(image_match)), level = 1)
```
#### File: tcf/examples/test_qemu_bios_N.py
```python
import collections
import os
import re
import subprocess
import sys
import threading
import commonl
import tcfl
import tcfl.tc
import tcfl.tl
import tcfl.pos
TARGETS = int(os.environ.get('TARGETS', 4))
MODE = os.environ.get('MODE', 'one-per-type')
@tcfl.tc.interconnect("ipv4_addr", mode = MODE)
@tcfl.tc.target('pos_capable and interfaces.images.bios.instrument', count = TARGETS)
class _test(tcfl.tc.tc_c):
def configure_00(self):
if not 'EDK2_DIR' in os.environ:
raise tcfl.tc.skip_e(
"please export env EDK2_DIR pointing to path of "
"configured, built or ready-to-build tree", dict(level = 0))
self.builddir = os.environ["EDK2_DIR"]
# select the targets that can be flashed with the images
# interface all of them (target, target1, target2...) except
# for the interconnect (ic).
self.roles = []
for role, target in self.target_group.targets.items():
if 'images' in target.rt['interfaces']:
self.roles.append(role)
def build_00(self):
# Modify the BIOS vendor string to showcase a change
#
# Backslashes here are killing us; the original C code is
#
## #define TYPE0_STRINGS \
## "EFI Development Kit II / OVMF\0" /* Vendor */ \
## "0.0.0\0" /* BiosVersion */ \
## "02/06/2015\0" /* BiosReleaseDate */
#
# So we need to replace all in the Vendor string until the \0,
# but we need to escape that \\ for both python and the
# shell. bleh.
self.shcmd_local(
r"sed -i"
" '/Vendor/s|.*\\\\0\"|\"I am the vendor now\\\\0\"|'"
" '%s/OvmfPkg/SmbiosPlatformDxe/SmbiosPlatformDxe.c'"
% self.builddir)
#
# Build the new BIOS
#
# I lifted the build instructions of the Fedora 29 spec file
# and simplified to the max, but I only know them to work on
# this git version; complain otherwise
rev = subprocess.check_output(
"git -C '%s' rev-parse HEAD" % self.builddir,
shell = True)
if rev.strip() != "cb5f4f45ce1fca390b99dae5c42b9c4c8b53deea":
self.report_info(
"WARNING!! WARNING!!! These build process only verified to"
" workwith git version cb5f4f45ce, found %s" % rev,
level = 0)
env = dict(
EDK_DIR = self.builddir,
GCC5_X64_PREFIX = "x86_64-linux-gnu-",
CC_FLAGS = "-t GCC5 -n 4 --cmd-len=65536 -b DEBUG --hash" ,
EDK_TOOLS_PATH = os.path.join(self.builddir, "BaseTools"),
)
env['OVMF_FLAGS'] = "%(CC_FLAGS)s -FD_SIZE_2MB" % env
self.report_pass("re/building BaseTools in %s" % self.builddir)
self.shcmd_local(
"cd %s;"
" source ./edksetup.sh;"
" ${MAKE:-make} -C BaseTools -j4" % self.builddir, env = env)
# remove -Werror from the configuratio, as there are warnings
# that otherwise kill the build
self.shcmd_local(
"sed -i -e 's/-Werror//' '%s/Conf/tools_def.txt'" % self.builddir)
self.report_pass("re/building OVMF in %s" % self.builddir)
self.shcmd_local(
"cd %(EDK_DIR)s;"
" source ./edksetup.sh;"
" build -a X64 -t GCC5 -p OvmfPkg/OvmfPkgX64.dsc"
% env, env = env)
self.report_pass("built BIOS")
def deploy_50(self, ic):
class rtb_data_c(object):
def __init__(self):
self.lock = threading.Lock()
self.remote_file = None
rtb_datas = collections.defaultdict(rtb_data_c)
# Flash the new BIOS before power cycling; make sure we upload
# the file only once per server
local_file = os.path.join(
self.builddir,
"Build/OvmfX64/DEBUG_GCC5/FV/OVMF_CODE.fd")
@self.threaded
def _target_bios_flash(target, testcase, local_file):
# upload only once to each server
with self.lock:
rtb_data = rtb_datas[target.rtb]
with rtb_data.lock:
if not rtb_data.remote_file:
rtb_data.remote_file = \
"OVMF_CODE.fd-" + self.kws['tc_hash']
target.report_info("uploading", level = 0)
target.store.upload(rtb_data.remote_file, local_file)
target.report_info("uploaded", level = 0)
target.images.flash({ "bios" : rtb_data.remote_file },
upload = False)
self.report_info("flashing BIOS", dlevel = -1)
self.run_for_each_target_threaded(
_target_bios_flash, (self, local_file, ), targets = self.roles)
self.report_pass("flashed BIOS", dlevel = -1)
def start_00(self, ic):
ic.power.on() # need the network to boot POS
@self.threaded
def _target_start(target):
target.pos.boot_to_pos()
self.run_for_each_target_threaded(
_target_start, targets = self.roles)
def eval(self):
@self.threaded
def _target_eval(target):
target.shell.run("dmidecode -t bios",
re.compile("Vendor:.*I am the vendor now"))
self.run_for_each_target_threaded(
_target_eval, targets = self.roles)
```
#### File: tcf/tcfl/report_mariadb.py
```python
import functools
import os
import threading
import logging
import mariadb
import commonl
import tcfl.tc
class driver_summary(tcfl.tc.report_driver_c):
"""Report a summary of test case execution to an SQL database
Summaries report only aggregated data and not detailed testcase
execution data. Read on.
:param str hostname: *USER:PASSWORD@HOSTNAME*
- *USER:PASSWORD* has to have schema powers to create tables
and modify columns.
*PASSWORD* can be given as described
in :func:`commonl.split_user_pwd_hostname` to obtain the
password from keyrings or other locations.
:param str database: name in the datase in the given host where
data is to be stored.
:param str password: (optional) password to use to connect,
overriding whatever is set in *hostname*. Note it will be
passed through :func:`commonl.password_get`, thus it can use
the *FILE* and *KEYRING* metdhos to describe.
:param int port: (optional; default 3307) port where the database
server is listening on.
:param bool ssl: (optional; default *True*) use SSL or not.
:param dict mariadb_extra_opts: (optional) extra options to the
MadriaDB connection initialization
(:meth:`mariadb.connect`). This takes the form of a dictionary
keyed by string (valid Python identifiers):
>>> mariadb_extra_opts = {
>>> "read_timeout": 3,
>>> "ssl_cert": "/path/to/SSL.crt"
>>> }
:param str table_name_prefix: (optional; default none) Name to
prefix the created tables with.
This is usually set to be the name of a pipeline which is
running testcases on logically grouped hardware (eg: all
testcases running performance tests on HW type A), so that the
tables reported can be differentiated by the hardware types.
Design
^^^^^^
Entry point is :meth:`report`, which is called by the reporting
API when the testcase reports any message or data.
The driver will accumulate all the data in memory until a
*COMPLETION* message is received, marking the ned of the testcase
execution and then it will flush it to the database. This is done
like that to avoid it causing impact in testcase timing due to
possible networking issues.
The reporting will take data from the testcase execution and put it in
different tables in the SQL database.
- tables will be prefixed with the configured *table_name_prefix*
and *prefix_bare* (if given).
- tables will have a primary key corresponding to the *Run
ID* given to *tcf run* with *-i* or *--runid*. If no *RunID* has
been given, *no RunID* is chosen.
This means that data coming from testcases executed with a RunID
may override data from the same testcase exectued in the past
with the same RunID.
- columns will be added dynamically when not present.
Note any table name or column which is longer than 64 chars will
be encoded as *fieldid:10CHARHASH* -- this is because SQL can't do
anything longer than that.
In case of the table name, there is the limitation on putting
together *TABLE_NAME_PREFIX PREFIX_BARE [NAME]*--if
*TABLE_NAME_PREFIX PREFIX_BARE* is longer than 45 chars, we'll
still break the limit (FIXME: assert this)
Currently, the following tables will be created:
- Summary: contains summaries about how many testcases where
executed and how many passed, failed, errored, blocked or
skipped
Data from more recent testcases with same RunIDs accumulate from
previous execution using the same RunID.
- History: a column for each executed testcase and their result on
each run. This will be a letter (see :data:`tcfl.tc.valid_results`):
- *P*: Passed
- *F*: Failed
- *E*: Errored
- *B*: Blocked
- *S*: Skipped
Data from more recent testcases with same RunIDs overwrite
previous execution using the same RunID
- data reported with :meth:`target.report_data
<tcfl.tc.reporter_c.report_data>` or :meth:`self.report_data
<tcfl.tc.reporter_c.report_data>`: these will be put in a table
named after the data domain, with the data name on each column.
Data from more recent testcases with same RunIDs overwrite
previous execution using the same RunID
Note data is not segregated by target; it is up to the execution
pipeline and testcases to either define proper domains so
reports don't collide or are overriden.
Tables and columns will be created dynamically when they do not
exist, since much of the columns details come from the testcases
and it can't be known ahead of time what they will be.
Assumptions
^^^^^^^^^^^
- Python 3 dictionaries are ordered by insertion order (true as of
v3.6+)
- RunIDs fit in the length of a column identifier
- SQL injection! All `{FIELD}` references (between backticks) are
not protected, so all the calls that feed stuff like table names
and column names HAVE to have been passed by
_id_maybe_encode_cache(), which will encode backticks away (to
avoid SQL injections) and also replace too long names with a
hashed field ID.
Note the table names we do it in the call sites, so it doesn't
have to be done on each SQL statement expansion.
System setup / Requirements
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Install the MariaDB connector::
$ pip3 install --user mariadb
PENDING
^^^^^^^
- Create more config options that allows to give more info about data
types or defaults in class specific fashion (eg: to give the history
table a varchar(255)); suggest default_type to complement
index_column, index_value.
"""
def __init__(self, hostname, database, password = None,
port = 3307, ssl = True,
table_name_prefix = "", mariadb_extra_opts = None):
assert isinstance(hostname, str)
assert isinstance(database, str)
assert password == None or isinstance(password, str)
assert isinstance(port, int)
assert isinstance(ssl, bool)
assert isinstance(table_name_prefix, str)
if mariadb_extra_opts:
commonl.assert_dict_key_strings(mariadb_extra_opts, "mariadb_extra_opts")
self.mariadb_extra_opts = mariadb_extra_opts
else:
self.mariadb_extra_opts = {}
self.user, self.password, self.host = \
commonl.split_user_pwd_hostname(hostname)
if password:
self.password = commonl.password_get(self.host,
self.user, password)
self.port = port
self.database = database
self.ssl = ssl
self.table_name_prefix_raw = table_name_prefix
self.table_name_prefix = self._sql_id_esc(table_name_prefix)
self.docs = {}
tcfl.tc.report_driver_c.__init__(self)
@staticmethod
def _sql_id_esc(key):
# we'll enclose every table/column identifier in backticks, so
# we have to encode them in the value to avoid injection
# attacks
return key.replace("`", "``")
#: Map of Python types to SQL types
#:
#: Used when auto-creating columns, which kinda limits what kind
#: of data we can feed to _table_row_*()
sql_type_by_python_type = {
bytes: "binary",
bool: "boolean",
#datetime: "datetime",
float: "float",
str: "text", # I guess limited... to 65k in MariaDB
float: "double",
int: "int",
bytes: "varbinary",
}
defaults_map = {
"Blocked": 0,
"Errored": 0,
"Failed": 0,
"Passed": 0,
"RunID": "no RunID",
"RunID-TestcaseName": "NULL",
# key length if fixed to 10 in _id_maybe_encode_cache()
"FieldID": "NULL",
# because we use this as primary key for the History tables
"Testcase name": "no testcase",
"Skipped": 0,
"Total Count": 0,
}
sql_type_by_field = {
"Blocked": "int",
"Errored": "int",
"Failed": "int",
"Passed": "int",
# because we use this as primary key for most tables
"RunID": "varchar(255)",
# there is the posibility that we won't fit RunID+TestcaseName
# in this limit...
"RunID-TestcaseName": "varchar(1024)",
# key length if fixed to 10 in _id_maybe_encode_cache()
"FieldID": "varchar(11)",
# because we use this as primary key for the History tables
"Testcase name": "varchar(255)",
"Skipped": "int",
"Total Count": "int",
}
def sql_type(self, field, value):
sql_type = self.sql_type_by_field.get(field, None)
if not sql_type:
sql_type = self.sql_type_by_python_type.get(type(value), None)
return sql_type
@functools.lru_cache(maxsize = 200)
def _connection_get_cache(self, _tls, _made_in_pid):
# we don't use _tls and _made_in_pid; they are just there for
# functools.lru_cache to do its caching magic and have them
# pinned to a thread or a PID (based on if this is being
# executed in a thread pool or in a process pool).
connection = mariadb.connect(
user = self.user, password = self.password,
host = self.host, port = self.port,
database = self.database,
ssl = self.ssl,
**self.mariadb_extra_opts)
connection.auto_reconnect = True
return connection
def _connection_get(self):
# Return a connection to the databse
#
# We cache them by process and thread, so we always reuse them
# (since they are costly) and only have one of each.
tls = threading.get_ident()
connection = self._connection_get_cache(tls, os.getpid())
return connection
def _table_create(self, cursor, table_name,
defaults = False, index_column = None,
**fields):
# Create a table with the given fields
#
# Maybe set the field's values as defaults.
#
# Set as primary key the index_column (if given); this will
# normally be the RunID and there is a limitation in that it
# can't be an unlimited length SQL type; hence we set it in
# self.sql_types_by_field to be a vachar(255) (if anyone sets
# a RunID longer than 255, ... their problem. FIXME: needs
# runtime verification)
#
# The SQL command is basically
#
# create table TABLENAME (
# FIELD1 TYPE1 [default DEFAULT1],
# FIELD2 TYPE2 [default DEFAULT2],
# ...,
# [primary key ( FIELDx );
#
if defaults:
cmd = f"create table if not exists `{table_name}` ( " \
+ ", ".join(
f"`{field}` {self.sql_type(field, value)} default ?"
for field, value in fields.items()
) \
+ ( f", primary key (`{index_column}`)" if index_column else "" ) \
+ " );"
values = tuple(self.defaults_map.get(column, None)
for column in fields)
cursor.execute(cmd, values)
else:
cmd = f"create table if not exists `{table_name}` ( " \
+ ", ".join(
f"`{field}` {self.sql_type(field, value)}"
for field, value in fields.items()
) \
+ (
f", primary key (`{index_column}`)"
if index_column else ""
) \
+ " );"
cursor.execute(cmd)
def _table_columns_update(self, cursor, table_name,
defaults = False, **fields):
# Add missing columns to a table, maybe setting defaults
#
# First list the current columns
cmd = \
f"select column_name" \
f" from information_schema.columns" \
f" where table_name = '{table_name}'"
cursor.execute(cmd)
columns = set(row[0] for row in cursor)
fields_wanted = fields.keys()
columns_missing = fields_wanted - columns
if not columns_missing:
return
# add new missing new columns
if defaults:
cmd = \
f"alter table `{table_name}` add ( " \
+ f", ".join(
f" `{column}` {self.sql_type(column, fields[column])}"
f" default {self.defaults_map.get(column, None)} "
for column in columns_missing
) + f" );"
else:
cmd = \
f"alter table `{table_name}` add ( " \
+ f", ".join(
f" `{column}` {self.sql_type(column, fields[column])}"
for column in columns_missing
) + " );"
cursor.execute(cmd)
def _table_row_insert(self, cursor, table_name, **fields):
# insert all the values in the specific columns
#
## insert into TABLENAME ( FIELD1, FIELD2...) data ( VALUE1, VALUE2...)
#
# note we use %s placeholders for the values, to let the
# python itnerface type them properly and pass execute() a
# tuple with the values
cmd = \
f"insert into `{table_name}` ( " \
" `" + "`, `".join(fields.keys()) + "` )" \
" values ( " + " , ".join("?" for _ in fields.values()) + " );"
cursor.execute(cmd, tuple(fields.values()))
@functools.lru_cache(maxsize = 2048)
def _id_maybe_encode_cache(self, _tls, _made_in_pid, identifier, max_len):
"""
If an identifier is longer than the maximum, convert it and
register it.
Register it in the *Field IDs* table so we can later refer to
it as needed.
:param str identifier: identifier to check and maybe convert
:return str: identifier if (shorter than :data:`id_max_len`)
or the encoded name if it was longer.
"""
if len(identifier) >= max_len:
fieldid = commonl.mkid(identifier, 10)
self.table_row_update("Field IDs", "FieldID", fieldid,
**{ "Field Name": identifier })
return "fieldid:" + fieldid
return self._sql_id_esc(identifier)
def _id_maybe_encode(self, identifier, max_len = 32):
return self._id_maybe_encode_cache(
threading.get_ident(), os.getpid(),
identifier, max_len)
def _table_name_prepare(self, table_name, prefix_bare):
prefix_esc = self.table_name_prefix + self._sql_id_esc(prefix_bare)
prefix_len = len(self.table_name_prefix_raw) + len(prefix_bare)
_table_name = prefix_esc + self._id_maybe_encode(table_name, 64 - prefix_len)
return _table_name.strip() # table names can't start/end w space
def table_row_update(self, table_name, index_column, index_value,
prefix_bare = "", **fields):
# insert/update fields in a table
#
# Use the index value of the index column to find the row to
# update or insert a new one if not present.
#
# If the table does not exist, create it; if any column is
# missing, add them.
_table_name = self._table_name_prepare(table_name, prefix_bare)
connection = self._connection_get()
with connection.cursor() as cursor:
while True:
# Now try to insert the row
#
# - if the primary key is duplicated, update the
# values
#
# Seriously this SQL thing...
#
# insert into TABLENAME (
# INDEX_COLUMN, FIELD1, FIELD2 ...)
# values ( INDEX_VALUE, VALUE1, VALUE2 ... )
# on duplicate key update
# FIELD1 = value(FIELD1),
# FIELD2 = value(FIELD2),
# ...;
#
# If there is no row with INDEX_COLUMN with
# INDEX_VALUE, insert it with those FIELDs,
# otherwise, update it. Clear as mud--especially the
# code.
#
# Thanks https://stackoverflow.com/a/41894298
#
# - if we get errors because the table or columns
# still do not exist, fix'em and
# try again
try:
cmd = \
f"insert into `{_table_name}` (`{index_column}`, " \
+ ", ".join(
f"`{self._sql_id_esc(column)}`"
for column in fields
) \
+ " ) values ( ?, " + ", ".join(
"?"
for column in fields
) + " ) on duplicate key update " \
+ ", ".join(
f"`{self._sql_id_esc(column)}` = values(`{self._sql_id_esc(column)}`)"
for column in fields
) + ";"
values = ( index_value, ) + tuple(fields.values())
cursor.execute(cmd, values)
# In theory python MariaDB does autocommit, but I
# guess not?
connection.commit()
break # success, get out of the retry loop
except mariadb.ProgrammingError as e:
# if the database doesn't exist, the error will read
#
## mariadb.ProgrammingError: Table 'DBNAME.TABLENAME table' doesn't exist
#
# if there is a better way, I am all ears
if not str(e).endswith(f"Table '{self.database}.{_table_name}' doesn't exist"):
raise
# ops, the table does not exist, create it with
# these fields; guess the types from the field
# values and retry; but we want to have FIRST the
# index column -- we rely on python3 keeping
# insertion order for dictionaries
f = { index_column: index_value }
f.update(fields)
try:
self._table_create(cursor, _table_name,
defaults = True,
index_column = index_column,
**f)
connection.commit()
except mariadb.OperationalError as e:
if str(e).endswith(f"Table '{_table_name}' already exists"):
# someone did it already, retry
continue
raise
continue # now try to insert/update again
except mariadb.OperationalError as e:
# If any column does not exist, we'll get
#
## mariadb.OperationalError: Unknown column ...blablah
#
# if there is a better way, I am all ears
if not str(e).startswith("Unknown column"):
raise
self._table_columns_update(cursor, _table_name, **fields)
f = { index_column: index_value }
f.update(fields)
continue
def table_row_inc(self, table_name, index_column, index_value,
prefix_bare = "", **fields):
# Increment by one the listed fileds in the row matching
# index_value
#
# If the row does not exist, add it with the given fields set
# to one.
_table_name = self._table_name_prepare(table_name, prefix_bare)
connection = self._connection_get()
with connection.cursor() as cursor:
while True:
# Now try to inser the row; if we get errors because
# the table or columns still do not exist, fix'em and
# try again
try:
f = list(fields.keys())
f.remove(index_column) # no need to increase this field
# increase by one values of the specified columns in the row
# whose primary key (index_column) has the given index_value
#
## insert into TABLENAME (INDEX_COLUMN, FIELD1, FIELD2...)
## values (INDEX_VALUE, 1, 1, ...)
## on duplicate key update
## FIELD1 = value(FIELD1) + 1,
## FIELD2 = value(FIELD2) + 1,
## ...;
#
# note we use %s placeholders for the values, to let the
# python itnerface type them properly and pass execute() a
# tuple with the values
cmd = \
f"insert into `{_table_name}` (`{index_column}`, " \
+ ", ".join(f"`{column}`" for column in f) \
+ " ) values ( ?" + ", 1" * len(f) \
+ " ) on duplicate key update " \
+ ", ".join(
f"`{column}` = `{column}` + 1"
for column in f
) + ";"
cursor.execute(cmd, (index_value, ))
# In theory python MariaDB does autocommit, but I
# guess not?
connection.commit()
break # success, get out of the retry loop
except mariadb.ProgrammingError as e:
# if the database doesn't exist, the error will read
#
## mariadb.ProgrammingError: Table 'DBNAME.TABLENAME table' doesn't exist
#
# if there is a better way, I am all ears
if not str(e).endswith(f"Table '{self.database}.{_table_name}' doesn't exist"):
raise
# ops, the table does not exist, create it with
# these fields; guess the types from the field
# values and retry
try:
self._table_create(cursor, _table_name,
index_column = index_column,
**fields)
connection.commit()
except mariadb.OperationalError as e:
if str(e).endswith(f"Table '{_table_name}' already exists"):
# someone did it already, retry
continue
raise
# now insert the row, we can't increase because we
# know there was nothing -- FIXME: what about if
# someone tried before us?
try:
self._table_row_insert(cursor, _table_name, **fields)
connection.commit()
except mariadb.OperationalError as e:
# see next, it is the same
if not str(e).startswith("Unknown column"):
raise
continue # just retry and let if fail
# note we break vs continue, because we already inserted
break
except mariadb.OperationalError as e:
# If any column does not exist, we'll get
#
## mariadb.OperationalError: Unknown column ...blablah
#
# if there is a better way, I am all ears
if not str(e).startswith("Unknown column"):
raise
# note we set defauls to True; this will take
# the defaults based on the field name from
# defaults_map defined above.
self._table_columns_update(cursor, _table_name,
defaults = True, **fields)
connection.commit()
continue
def report(self, reporter, tag, ts, delta,
level, message,
alevel, attachments):
# Entry point for the reporting driver from the reporting API
#
# We filter the messages we report, since we only do
# summaries--thus we skip anything we are not interested on,
# then we collect data and on COMPLETION (end of test case),
# upload data to the database.
# We only do summaries, so skip anything that is not reporting
# data or testcase completions
if tag != "DATA" and not message.startswith("COMPLETION"):
return
# skip global reporter, not meant to be used here
if reporter == tcfl.tc.tc_global:
return
runid = reporter.kws.get('runid', "no RunID")
hashid = reporter.kws.get('tc_hash', None)
if not hashid: # can't do much if we don't have this
return
if not runid:
runid = "no RunID"
# Extract the target name where this message came from (if the
# reporter is a target)
if isinstance(reporter, tcfl.tc.target_c):
tc_name = reporter.testcase.name
target = reporter
elif isinstance(reporter, tcfl.tc.tc_c):
tc_name = reporter.name
target = None
else:
raise AssertionError(
"reporter is not tcfl.tc.{tc,target}_c but %s" % type(reporter))
doc = self.docs.setdefault((runid, hashid, tc_name),
dict(data = {}))
if tag == "DATA":
# DATA tags indicate KPIs or similar, which are store in a
# table called "PREFIX DATA DOMAIN"; for now we just store
# them and then we report them upon COMPLETION
domain = attachments['domain']
assert isinstance (domain, str), \
"data domain name '%s' is a %s, need a string" \
% (domain, type(domain).__name__)
name = attachments['name']
assert isinstance (domain, str), \
"data name '%s' is a %s, need a string" \
% (name, type(name).__name__)
value = attachments['value']
doc['data'].setdefault(domain, {})
if isinstance(value, str):
# fix bad UTF8
value = commonl.mkutf8(value)
# append target name to the column -- otherwise summaries
# loose that information
if target and target.fullid not in name:
name = name + f" ({target.fullid})"
doc['data'][domain][name] = value
return
if message.startswith("COMPLETION"):
# The *tag* for COMPLETION says what was the final result
data = {
# we need to store this in the table; we only want to
# set the fields we want to increase -- so works
# table_row_inc()
"RunID": runid,
"Total Count": 1,
}
result = None
if tag == "PASS":
data['Passed'] = 1
result = "P"
elif tag == "FAIL":
data['Failed'] = 1
result = "F"
elif tag == "ERRR":
data['Errored'] = 1
result = "E"
elif tag == "BLCK":
data['Blocked'] = 1
result = "B"
elif tag == "SKIP":
data['Skipped'] = 1
result = "S"
# we specify runid here to filter which row we want to
# update/create, since if it is already existing we want
# to update the count
# No need to encode here, all the field names are valid SQL
try:
self.table_row_inc("Summary", "RunID", runid, **data)
except mariadb.Error as e:
logging.error(f"Summary: {tc_name}:{hashid}: MariaDB error: %s" % e)
# Record a mapping of runid-testcasename -> hashid; this
# is needed so we can refer to various things that use the
# hashid, like for example reports
# (report-RUNID:HASHID.ANYTHING)
#
try:
self.table_row_update(
"HashIDs", "RunID-TestcaseName", runid + "##" + tc_name,
**{ 'HashID': hashid })
except mariadb.Error as e:
logging.error(f"HashIDs: {tc_name}:{hashid}: MariaDB error: {str(e)}")
# Update --id-extra KEY=VALUE
if reporter.runid_extra:
# Note we might be overriding existing values--in
# theory we shouldn't because reporters.runid_extra
# should be always all the same.
try:
self.table_row_update("Summary", "RunID", runid,
**reporter.runid_extra)
except mariadb.Error as e:
logging.error(f"HashIDs: {tc_name}:{hashid}: MariaDB error: {str(e)}")
# Any field name over 64 chars will make SQL (at least
# MariaDB) complain sooo..encoding time; we have a table
# mapping field name by hash to a name
# Flush the collected KPI data
# note the test case name is not used here at all, just
# the domain
for domain, data in doc['data'].items():
# convert subdictionaries and lists into columns
data_flat = {}
for key, value in commonl.dict_to_flat(data):
key = self._id_maybe_encode(key)
data_flat[key] = value
try:
self.table_row_update(domain, "RunID", runid,
prefix_bare = "DATA ", **data_flat)
except mariadb.Error as e:
logging.error(f"domain: {tc_name}:{hashid}: MariaDB error: {str(e)}")
# Add to the table of executed testcases/results
# We need to index by test case and column by RunID. Why?
# because if we index by RunId and column by testcase
# name...SQL complains our test case names get too long
# (they do) for column IDs; if we encode those so they
# are short, it complains that we have too many.... welp.
if result:
# FIXME: add more info so we can do a link to
# result
try:
self.table_row_update(
"History", "Testcase name", tc_name,
**{ self._id_maybe_encode(runid, max_len = 63): result })
except mariadb.Error as e:
logging.error(f"History: {tc_name}:{hashid}: MariaDB error: {str(e)}")
```
#### File: tcf/tcfl/report_taps.py
```python
from . import tc
class driver(tc.report_driver_c):
def __init__(self):
tc.report_driver_c.__init__(self)
self.count = 1
def report(self, reporter, tag, ts, delta,
level, message,
alevel, attachments):
"""
Report into TAPS driver
Note this driver only produces messages upon completion.
"""
# Extreme chitchat we ignore it -- this is mainly the failed
# to acquire (busy, retrying), which can add to a truckload of
# messages
if tag == "INFO" and level >= 6:
return
# this only reports the final completion status
if not message.startswith("COMPLETION"):
return
assert isinstance(reporter, tc.tc_c)
testcase = reporter
# translate from TCF result to TAPS result
tag_prefix = ""
tag_suffix = ""
if tag == "PASS":
result = "ok"
elif tag == "FAIL":
result = "not ok"
tag_suffix = "(failed)"
elif tag == "ERRR":
result = "not ok"
tag_suffix = "(errored)"
elif tag == "BLCK":
result = "not ok"
tag_suffix = "(blocked)"
elif tag == "SKIP":
result = "not ok"
tag_prefix = "# skip"
else:
result = "not ok"
if reporter.target_group:
location = "@ " + reporter.target_group.name
else:
location = ""
print("%s %d %s %s/%s %s %s %s\n" \
% (result, self.count, tag_prefix, tag, testcase.ident(),
reporter.name, location, tag_suffix))
self.count += 1
```
#### File: tcf/tcfl/target_ext_power.py
```python
import collections
import json
import re
import commonl
from . import tc
from . import msgid_c
class extension(tc.target_extension_c):
"""
Extension to :py:class:`tcfl.tc.target_c` to interact with the
server's power control interface.
Use as:
>>> target.power.on()
>>> target.power.off()
>>> target.power.cycle()
>>> target.power.get()
>>> target.power.list()
"""
def __init__(self, target):
if 'power' in target.rt.get('interfaces', []):
return
raise self.unneeded
def get(self):
"""
Return a target's power status, *True* if powered, *False*
otherwise.
A target is considered *on* when all of its power rail
components are on; fake power components report power state as
*None* and those are not taken into account.
A more detailed picture of the target's power state can be
obtained with :meth:list.
"""
state, _, _ = self.list()
return state
def list(self):
# FIXME: add component to make it faster when we only need one component
"""
Return a list of a target's power rail components and their status
:returns tuple(state, substate, data):
- state: *True* on, *False* off, *None* not available
- substate: "normal", "full", "inconsistent"; if
inconsistent, it would be a good idea to power cycle
- data: dictionary keyed by
component name listing their state and other flags about
them:
.. code-block:: python
{
"NAME1": {
"state": STATE1,
["explicit": "on|off|both" ]
},
"NAME2": {
"state": STATE2,
["explicit": "on|off|both" ]
},
...
}
- *state*: *True* if powered, *False* if not, *None* if not
applicable, for fake power controls
- *explicit*: (see :ref:`ttbd_power_explicit`) if missing,
not explicit, will be turned on/off normally:
- *on*: only powered on if explicitly named
- *off*: only powered off if explicitly named
- *both*: only powered on/off if explicitly named
"""
self.target.report_info("listing", dlevel = 2)
r = self.target.ttbd_iface_call(
"power", "list", method = "GET",
# extra time, since power ops can take long
timeout = 60)
if 'power' in r:
data = collections.OrderedDict()
# backwards compat
#
## [
## [ NAME1, STATE2 ],
## [ NAME2, STATE2 ],
## ...
## ]
#
for i in r.get('power', []):
data[i[0]] = dict(state = i[1])
substate = 'normal' # older doesn't support substates
state = all(i['state'] in (True, None) for i in list(data.values()))
elif isinstance(r, collections.Mapping):
# proper response format
#
## {
## NAME1: { state: STATE1, [explicit: "on|off|both" ] },
## NAME2: { state: STATE2, [explicit: "on|off|both" ] },
## ...
## }
#
# FIXME: verify the format
state = r['state']
substate = r['substate']
data = r['components']
else:
raise AssertionError("can't parse response")
self.target.report_info("listed", dlevel = 2)
return state, substate, data
@staticmethod
def _estimated_duration_get(data, operation):
return data.get(
'estimated_duration_%s' % operation,
data.get('estimated_duration', 0))
@staticmethod
def _compute_duration(target, component, operation):
timeout = 0
if component:
data = target.rt.get('interfaces', {})\
.get('power', {})\
.get(component, None)
if data:
timeout += extension._estimated_duration_get(data, operation)
else:
# collect all the timeouts from the different components
# to get an estimate on what to wait
for name, data in target.rt.get('interfaces', {})\
.get('power', {})\
.items():
if isinstance(data, dict):
# components are dictionaries, the rest are not components
timeout += extension._estimated_duration_get(data, operation)
return timeout
def off(self, component = None, explicit = False):
"""
Power off a target or parts of its power rail
:param str component: (optional) name of component to
power off, defaults to whole target's power rail
"""
if component != None:
assert isinstance(component, str)
component_s = f" component {component}"
else:
component_s = ""
assert isinstance(explicit, bool)
target = self.target
target.report_info("powering off" + component_s, dlevel = 1)
timeout = 60 + self._compute_duration(target, component, "off")
if timeout > 120:
target.report_info(
"WARNING: long power-off--estimated duration %s seconds"
% timeout)
target.ttbd_iface_call(
"power", "off", component = component, explicit = explicit,
timeout = timeout)
target.report_info("powered off" + component_s)
def on(self, component = None, explicit = False):
"""
Power on a target or parts of its power rail
:param str component: (optional) name of component to
power on, defaults to whole target's power rail
"""
if component != None:
assert isinstance(component, str)
component_s = f" component {component}"
else:
component_s = ""
assert isinstance(explicit, bool)
target = self.target
target.report_info("powering on" + component_s, dlevel = 1)
timeout = 60 + self._compute_duration(target, component, "on")
if timeout > 120:
target.report_info(
"WARNING: long power-on--estimated duration %s seconds"
% timeout)
target.ttbd_iface_call(
"power", "on", component = component, explicit = explicit,
# extra time, since power ops can take long
timeout = timeout)
target.report_info("powered on" + component_s)
if hasattr(target, "console"):
target.console._set_default()
def cycle(self, component = None, wait = None, explicit = False):
"""
Power cycle a target or one of its components
:param float wait: (optional) seconds to wait before powering on
:param str component: (optional) name of component to
power-cycle, defaults to whole target's power rail
"""
assert wait == None or wait >= 0
if component != None:
assert isinstance(component, str)
component_s = f" component {component}"
else:
component_s = ""
assert isinstance(explicit, bool)
target = self.target
target.report_info("power cycling" + component_s, dlevel = 1)
timeout = 60 \
+ self._compute_duration(target, component, "on") \
+ self._compute_duration(target, component, "off")
if timeout > 120:
target.report_info(
"WARNING: long power-cycle--estimated duration %s seconds"
% timeout)
target.ttbd_iface_call(
"power", "cycle",
component = component, wait = wait, explicit = explicit,
timeout = timeout)
target.report_info("power cycled" + component_s)
if hasattr(target, "console"):
target.console._set_default()
def reset(self):
"""
Reset a target
This interface is **deprecated**.
"""
self.target.report_info("resetting", dlevel = 1)
self.target.report_info("DEPRECATED: reset()", level = 0)
# reset is deprecated at the server level
self.target.ttbd_iface_call(
"power", "cycle",
# extra time, since power ops can take long
timeout = 60)
self.target.report_info("reset")
if hasattr(self.target, "console"):
self.target.console._set_default()
def sequence(self, sequence, timeout = None):
"""
Execute a sequence of power actions on a target
:param str sequence: a list of pairs:
>>> ( OPERATION, ARGUMENT )
*OPERATION* is a string that can be:
- *on*, *off* or *cycle*; *ARGUMENT* is a string being:
- *all*: do the operation on all the components except
:ref:`explicit <ttbd_power_explicit>` ones
- *full*: perform the operation on all the components
including the :ref:`explicit <ttbd_power_explicit>` ones
- *COMPONENT NAME*: perform the operation only on the given
component
- *wait*: *ARGUMENT* is a number describing how many seconds
to wait
For example:
>>> [ ( 'off', 'full' ), ( 'wait', 2 ), ( 'on', 'all' ) ]
powers off every single component of the power rail, waits
two seconds and then powers on all the components needed for
normal system's power on.
:param float timeout: (optional) maximum seconds to wait
before giving up; default is whatever calculated based on
how many *wait* operations are given or if none, whatever
the default is set in
:meth:`tcfl.tc.target_c.ttbd_iface_call`.
"""
kwargs = {}
if timeout != None:
kwargs['timeout'] = timeout
# FIXME: compute length for timeout
self.target.report_info("running sequence: %s" % (sequence, ), dlevel = 1)
self.target.ttbd_iface_call("power", "sequence", method = "PUT",
sequence = sequence, **kwargs)
self.target.report_info("ran sequence: %s" % (sequence, ))
def _healthcheck(self):
target = self.target
target.power.off()
power = target.power.get()
if power != False:
raise tc.failed_e("power should be False, reported %s" % power)
target.report_pass("power is reported correctly as %s" % power)
target.power.on()
power = target.power.get()
state, substate, components = target.power.list()
if power != True:
raise tc.failed_e("power should be True, reported %s" % power,
dict(state = state, substate = substate,
components = components, power = power))
target.report_pass("power is reported correctly as %s" % power)
target.power.cycle()
components = target.power.list()
target.report_pass("power components listed",
dict(components = components))
target.power.off()
power = target.power.get()
if power != False:
raise tc.failed_e("power should be False, reported %s" % power)
target.report_pass("power is reported correctly as %s" % power)
def _cmdline_power_off(args):
tc.tc_global = tc.tc_c("cmdline", "", "builtin")
tc.report_driver_c.add( # FIXME: hack console driver
tc.report_console.driver(0, None))
with msgid_c("cmdline"):
for target_name in args.targets:
target = tc.target_c.create_from_cmdline_args(
args, target_name, iface = "power",
extensions_only = [ 'power' ])
target.power.off(args.component, explicit = args.explicit)
def _cmdline_power_on(args):
tc.tc_global = tc.tc_c("cmdline", "", "builtin")
tc.report_driver_c.add( # FIXME: hack console driver
tc.report_console.driver(0, None))
with msgid_c("cmdline"):
for target_name in args.targets:
target = tc.target_c.create_from_cmdline_args(
args, target_name, iface = "power",
extensions_only = [ 'power' ])
target.power.on(args.component, explicit = args.explicit)
def _cmdline_power_cycle(args):
tc.tc_global = tc.tc_c("cmdline", "", "builtin")
tc.report_driver_c.add( # FIXME: hack console driver
tc.report_console.driver(0, None))
with msgid_c("cmdline"):
for target_name in args.targets:
target = tc.target_c.create_from_cmdline_args(
args, target_name, iface = "power",
extensions_only = [ 'power' ])
target.power.cycle(
wait = float(args.wait) if args.wait else None,
component = args.component, explicit = args.explicit)
def _cmdline_power_reset(args):
with msgid_c("cmdline"):
for target_name in args.targets:
target = tc.target_c.create_from_cmdline_args(
args, iface = "power", extensions_only = [ 'power' ])
target.power.reset()
def _cmdline_power_list(args):
with msgid_c("cmdline"):
target = tc.target_c.create_from_cmdline_args(
args, iface = "power", extensions_only = [ 'power' ])
state, substate, components = target.power.list()
def _state_to_str(state):
if state == True:
return 'on'
if state == False:
return 'off'
if state == None:
return "n/a"
return "BUG:unknown-state"
if args.verbosity < 2:
_state = _state_to_str(state)
print("overall: %s (%s)" % (_state, substate))
for component, data in components.items():
state = data['state']
explicit = data.get('explicit', None)
_state = _state_to_str(state)
if explicit and args.verbosity == 0:
continue
if not explicit:
explicit = ""
else:
explicit = " (explicit/" + explicit + ")"
print(" %s: %s%s" % (component, _state, explicit))
elif args.verbosity == 2:
r = dict(state = state, substate = substate, components = components)
commonl.data_dump_recursive(r, prefix = target.fullid)
else: # args.verbosity >= 2:
r = dict(state = state, substate = substate, components = components)
print(json.dumps(r, skipkeys = True, indent = 4))
def _cmdline_power_get(args):
with msgid_c("cmdline"):
target = tc.target_c.create_from_cmdline_args(
args, iface = "power", extensions_only = [ 'power' ])
r = target.power.get()
print("%s: %s" % (target.id, 'on' if r == True else 'off'))
# this is a very loose match in the format, so we can easily support
# new functionailities in the server
_sequence_valid_regex = re.compile(
r"^("
r"(?P<wait>wait):(?P<time>[\.0-9]+)"
r"|"
r"(?P<action>\w+):(?P<component>[ /\w]+)"
r")$")
def _cmdline_power_sequence(args):
with msgid_c("cmdline"):
target = tc.target_c.create_from_cmdline_args(
args, iface = "power", extensions_only = [ 'power' ])
sequence = []
total_wait = 0
for s in args.sequence:
m = _sequence_valid_regex.match(s)
if not m:
raise ValueError("%s: invalid specification, see --help" % s)
gd = m.groupdict()
if gd['wait'] == 'wait':
time_to_wait = float(gd['time'])
sequence.append(( 'wait', time_to_wait))
total_wait += time_to_wait
else:
sequence.append(( gd['action'], gd['component']))
target.power.sequence(sequence,
timeout = args.timeout + 1.5 * total_wait)
def _cmdline_setup(arg_subparser):
ap = arg_subparser.add_parser(
"power-on",
help = "Power on target's power rail (or individual components)")
ap.add_argument(
"--component", "-c",
metavar = "COMPONENT", action = "store", default = None,
help = "Operate only on the given component of the power rail")
ap.add_argument(
"--explicit", "-e",
action = "store_true", default = False,
help = "Operate also on all the explicit components; "
" explicit components are only powered on if"
" --explicit is given or if they are explicitly selected"
" with --component")
ap.add_argument(
"targets",
metavar = "TARGET", action = "store", nargs = "+", default = None,
help = "Names of targets to power on")
ap.set_defaults(func = _cmdline_power_on)
ap = arg_subparser.add_parser(
"power-off",
help = "Power off target's power rail (or individual components)")
ap.add_argument(
"--component", "-c", metavar = "COMPONENT",
action = "store", default = None,
help = "Operate only on the given component of the power rail")
ap.add_argument(
"--explicit", "-e",
action = "store_true", default = False,
help = "Operate also on all the explicit components; "
" explicit components are only powered off if"
" --explicit is given or if they are explicitly selected"
" with --component")
ap.add_argument(
"targets",
metavar = "TARGET", action = "store", nargs = "+", default = None,
help = "Names of targets to power off")
ap.set_defaults(func = _cmdline_power_off)
ap = arg_subparser.add_parser(
"power-cycle",
help = "Power cycle target's power rail (or individual components)")
ap.add_argument(
"--explicit", "-e",
action = "store_true", default = False,
help = "Operate also on all the explicit components; explicit"
" components are only power cycled if --explicit is given or"
" if they are explicitly selected with --component")
ap.add_argument(
"-w", "--wait",
metavar = "SECONDS", action = "store", default = None,
help = "How long to wait between power off and power on;"
" default to server configuration")
ap.add_argument(
"--component", "-c", metavar = "COMPONENT",
action = "store", default = None,
help = "Operate only on the given component of the power rail")
ap.add_argument(
"targets",
metavar = "TARGET", action = "store", nargs = "+", default = None,
help = "Names of targets to power cycle")
ap.set_defaults(func = _cmdline_power_cycle)
ap = arg_subparser.add_parser(
"power-sequence",
help = "Execute a power sequence")
ap.add_argument(
"target",
metavar = "TARGET", action = "store",
help = "Names of target to execute the sequence on")
ap.add_argument(
"sequence",
metavar = "STEP", action = "store", nargs = "+",
help = "sequence steps (list {on,off,cycle}:{COMPONENT,all,full}"
" or wait:SECONDS; *all* means all components except explicit ones,"
" *full* means all components including explicit ones")
ap.add_argument("-t", "--timeout",
action = "store", default = 60, type = int,
help = "timeout in seconds [%(default)d, plus "
" all the waits +50%%]")
ap.set_defaults(func = _cmdline_power_sequence)
ap = arg_subparser.add_parser(
"power-ls",
help = "List power rail components and their state")
ap.add_argument(
"-v", dest = "verbosity", action = "count", default = 0,
help = "Increase verbosity of information to display "
"(default displays state of non-explicit components,"
" -v adds component flags and lists explicit components,"
" -vv python dictionary, -vvv JSON format)")
ap.add_argument(
"target", metavar = "TARGET", action = "store", default = None,
help = "Name of target")
ap.set_defaults(func = _cmdline_power_list)
ap = arg_subparser.add_parser(
"power-get",
help = "Print target's power state."
"A target is considered *on* when all of its power rail"
"components are on; fake power components report power state as"
"*n/a* and those are not taken into account.")
ap.add_argument(
"target",
metavar = "TARGET", action = "store", default = None,
help = "Target")
ap.set_defaults(func = _cmdline_power_get)
```
#### File: tcf/tcfl/target_ext_tunnel.py
```python
import pprint
from . import msgid_c
import commonl
from . import tc
class tunnel(tc.target_extension_c):
"""
Extension to :py:class:`tcfl.tc.target_c` to create IP tunnels to
targets with IP connectivity.
Use by indicating a default IP address to use for interconnect
*ic* or explicitly indicating it in the :meth:`add` function:
>>> target.tunnel.ip_addr = target.addr_get(ic, "ipv4")
>>> target.tunnel.add(PORT)
>>> target.tunnel.remove(PORT)
>>> target.tunnel.list()
Note that for tunnels to work, the target has to be acquired and
IP has to be up on it, which might requires it to be connected to
some IP network (it can be a TCF interconnect or any other
network).
"""
def __init__(self, target):
self.target = target
# Tunnels can always be added, even the target is not in an
# interconnect
self.ip_addr = None
def _ip_addr_get(self, ip_addr):
# FIXME: this shall validate the IP address using python-ipaddress
if ip_addr:
return ip_addr
target = self.target
interconnects = list(target.rt.get('interconnects', {}).keys())
boot_ic = target.rt.get('pos_boot_interconnect', None)
if boot_ic:
# if the boot interconnect is available in the list of
# interconnect, arrange the list so it is the first one we
# try
if boot_ic in interconnects:
interconnects.remove(boot_ic)
interconnects = [ boot_ic ] + interconnects
for ic_name in interconnects:
ic = target.rt['interconnects'][ic_name]
ipv4_addr = ic.get('ipv4_addr', None)
if ipv4_addr:
return ipv4_addr
ipv6_addr = ic.get('ipv6_addr', None)
if ipv6_addr:
return ipv6_addr
raise RuntimeError(
"Cannot identify any IPv4 or IPv6 address to use; "
"please set it in "
"`TARGET.tunnel.ip_addr = TARGET.addr_get(ic, \"ipv4\")` "
"or pass it explicitly")
def add(self, port, ip_addr = None, protocol = None):
"""
Setup a TCP/UDP/SCTP v4 or v5 tunnel to the target
A local port of the given protocol in the server is forwarded
to the target's port. Teardown with :meth:`remove`.
If the tunnel already exists, it is not recreated, but the
port it uses is returned.
Example: redirect target's TCP4 port 3000 to a port in the server
that provides ``target`` (target.kws['server']).
>>> server_port = target.tunnel.add(3000)
>>> server_name = target.rtb.parsed_url.hostname
>>> server_name = target.kws['server'] # alternatively
Now connecting to ``server_name:server_port`` takes you to the
target's port 3000.
:param int port: port to redirect to
:param str ip_addr: (optional) target's IP address to use (it
must be listed on the targets's tags *ipv4_address* or
*ipv6_address*).
:param str protocol: (optional) Protocol to tunnel:
{udp,sctp,tcp}[{4,6}] (defaults to TCP v4)
:returns int local_port: port in the server where to connect
to in order to access the target.
"""
if protocol == None:
protocol = 'tcp'
else:
assert isinstance(protocol, str), \
"protocol shall be a string; got %s" % type(protocol)
assert isinstance(port, int)
target = self.target
ip_addr = self._ip_addr_get(ip_addr)
r = self.target.ttbd_iface_call("tunnel", "tunnel",
ip_addr = ip_addr,
protocol = protocol,
port = port,
method = "PUT")
server_port = r['result']
if isinstance(server_port, str):
# COMPAT: work around server with unfixed bug
server_port = int(server_port)
self.target.report_info(
"%s tunnel added from %s:%d to %s:%d" % (
protocol,
target.rtb.parsed_url.hostname, server_port,
ip_addr, port)
)
return server_port
def remove(self, port, ip_addr = None, protocol = None):
"""
Teardown a TCP/UDP/SCTP v4 or v5 tunnel to the target
previously created with :meth:`add`.
:param int port: port to redirect to
:param str ip_addr: (optional) target's IP address to use (it
must be listed on the targets's tags *ipv4_address* or
*ipv6_address*).
:param str proto: (optional) Protocol to tunnel:
{udp,sctp,tcp}[{4,6}] (defaults to v4 and to TCP)
"""
if protocol == None:
protocol = 'tcp'
else:
assert isinstance(protocol, str), \
"protocol shall be a string; got %s" % type(protocol)
assert isinstance(port, int)
ip_addr = self._ip_addr_get(ip_addr)
self.target.ttbd_iface_call("tunnel", "tunnel",
ip_addr = ip_addr,
protocol = protocol,
port = port,
method = "DELETE")
def list(self):
"""
List existing IP tunnels
:returns: dictionary keyed by server port of each existing
tunnels:
.. code-block:: python
{
SERVER-PORT1: {
"protocol": 'tcp', # tcp, udp, sctp, ...
"ip_addr": "A.B.C.D",
"port": NNN
},
...
}
*SERVER-PORT* is the same port returned by the :meth:`add`
call, so that the endpoint for the tunnel would be the
server's hostname (available at
*target.rtb.parsed_hostname*) and the *SERVER-PORT*.
"""
d = dict()
r = self.target.properties_get("interfaces.tunnel")
# we get: interfaces: { tunnel: { LOCALPORT: { DATA },
# LOCALPORT: { DATA }...}
r = r.get("interfaces", {}).get("tunnel", {})
for local_port, data in r.items():
try:
# we get local_port as a string because it is a field
# name in the database (versus a value)
d[int(local_port)] = dict(
protocol = data['protocol'],
ip_addr = data['ip_addr'],
port = data['port']
)
except KeyError as e:
pass # ignore, bad data stored
if not d: # COMPAT
# if didn't found anything in the inventory, maybe this is
# an old style server, try calling the old deprecated
# method
try:
r = self.target.ttbd_iface_call("tunnel", "list", method = "GET")
for protocol, ip_addr, port, local_port in r['result']:
d[int(local_port)] = dict(
protocol = protocol,
ip_addr = ip_addr,
port = int(port))
return d
except tc.error_e as e:
if not 'unsupported' in str(e):
raise
return d
def _healthcheck(self):
target= self.target
interconnects = target.rt.get('interconnects', {})
if interconnects == {}:
target.report_skip("skipping tunnel healthcheck,"
" no IP connectivity in configuration, ")
return
target = self.target
tunnels = target.tunnel.list()
n_existing_tunnels = len(tunnels)
server_port_22 = target.tunnel.add(22)
target.report_pass("added tunnel to port 22")
tunnels = target.tunnel.list()
if len(tunnels) != n_existing_tunnels + 1:
raise tc.failed_e(
"list() lists %d tunnels; expected 1" % len(tunnels),
dict(tunnels = tunnels))
target.report_pass("list() lists only one tunnel")
if server_port_22 not in tunnels \
or tunnels[server_port_22].get("port", None) != 22:
raise tc.failed_e(
"list() didn't report target port as 22 as requested",
dict(tunnels = tunnels))
target.report_pass("list() reports tunnel to 22 as requested")
server_port_23 = target.tunnel.add(23)
target.report_pass("added tunnel to port 23")
tunnels = target.tunnel.list()
if len(tunnels) != n_existing_tunnels + 2:
raise tc.failed_e(
"list() lists %d tunnels; expected 2" % len(tunnels),
dict(tunnels = tunnels))
target.report_pass("list() lists two tunnels")
if server_port_22 not in tunnels \
or tunnels[server_port_22].get("port", None) != 22:
raise tc.failed_e(
"list() didn't report target port as 22 as requested",
dict(tunnels = tunnels))
if server_port_23 not in tunnels \
or tunnels[server_port_23].get("port", None) != 23:
raise tc.failed_e(
"list() didn't report target port as 23 as requested",
dict(tunnels = tunnels))
target.report_pass("list() reports tunnel to 22 and 23 as requested")
target.tunnel.remove(22)
target.report_pass("removed tunnel to port 22")
tunnels = target.tunnel.list()
if len(tunnels) != n_existing_tunnels + 1:
raise tc.failed_e(
"list() lists %d tunnels; expected 1" % len(tunnels),
dict(tunnels = tunnels))
# leftover tunnel is the one to port 23
if server_port_23 not in tunnels \
or tunnels[server_port_23].get("port", None) != 23:
raise tc.failed_e(
"list() didn't report target port as 23 as requested",
dict(tunnels = tunnels))
target.report_pass("list() lists only tunnel to port 23")
target.tunnel.remove(23)
target.report_pass("removed tunnel to port 23")
tunnels = target.tunnel.list()
if len(tunnels) != n_existing_tunnels + 0:
raise tc.failed_e(
"list() reports %d tunnels; expected none" % len(tunnels),
dict(tunnels = tunnels))
target.report_pass("no tunnels listed after removing all")
# can't really test the tunnel because we don't know if the
# target is listening, has a real IP interface, etc...this is
# a very basic healhcheck on the server side
def _cmdline_tunnel_add(args):
with msgid_c("cmdline"):
target = tc.target_c.create_from_cmdline_args(args, iface = "tunnel")
server_port = target.tunnel.add(args.port, args.ip_addr, args.protocol)
print("%s:%d" % (target.rtb.parsed_url.hostname, server_port))
def _cmdline_tunnel_remove(args):
with msgid_c("cmdline"):
target = tc.target_c.create_from_cmdline_args(args, iface = "tunnel")
target.tunnel.remove(args.port, args.ip_addr, args.protocol)
def _cmdline_tunnel_list(args):
with msgid_c("cmdline"):
target = tc.target_c.create_from_cmdline_args(args, iface = "tunnel")
for local_port, data in target.tunnel.list().items():
print("%s %s:%s %s:%s" % (
data['protocol'],
target.rtb.parsed_url.hostname, local_port,
data['ip_addr'], data['port']
))
def cmdline_setup(argsp):
ap = argsp.add_parser("tunnel-add", help = "create an IP tunnel")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.add_argument("port", metavar = "PORT", action = "store", type = int,
help = "Port to tunnel to")
ap.add_argument("protocol", metavar = "PROTOCOL", action = "store",
nargs = "?", default = None, type = str,
help = "Protocol to tunnel {tcp,udp,sctp}[{4,6}] "
"(defaults to TCPv4)")
ap.add_argument("ip_addr", metavar = "IP-ADDR", action = "store",
nargs = "?", default = None, type = str,
help = "target's IP address to tunnel to "
"(default is the first IP address the target declares)")
ap.set_defaults(func = _cmdline_tunnel_add)
ap = argsp.add_parser("tunnel-rm",
help = "remove an existing IP tunnel")
commonl.argparser_add_aka(argsp, "tunnel-rm", "tunnel-remove")
commonl.argparser_add_aka(argsp, "tunnel-rm", "tunnel-delete")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.add_argument("port", metavar = "PORT", action = "store", type = int,
help = "Port to tunnel to")
ap.add_argument("protocol", metavar = "PROTOCOL", action = "store",
nargs = "?", default = None,
help = "Protocol to tunnel {tcp,udp,sctp}[{4,6}] "
"(defaults to tcp and to IPv4)")
ap.add_argument("ip_addr", metavar = "IP-ADDR", action = "store",
nargs = "?", default = None,
help = "target's IP address to tunnel to "
"(default is the first IP address the target declares)")
ap.set_defaults(func = _cmdline_tunnel_remove)
ap = argsp.add_parser("tunnel-ls", help = "List existing IP tunnels")
ap.add_argument("target", metavar = "TARGET", action = "store", type = str,
default = None, help = "Target's name or URL")
ap.set_defaults(func = _cmdline_tunnel_list)
```
#### File: tests/bitrotten/conf_test_things.py
```python
import ttbl
import ttbl.config
raise ImportError("bitrotten")
class example_target(ttbl.test_target):
pass
class example_thing(ttbl.test_target):
pass
class example_thing_plugger(ttbl.thing_plugger_mixin):
def __init__(self, _name):
ttbl.thing_plugger_mixin.__init__(self)
@staticmethod
def plug(target, thing):
target.log.debug("thing %s plugged to %s", thing.id, target.id)
@staticmethod
def unplug(target, thing):
target.log.debug("thing %s unplugged from %s", thing.id, target.id)
def example_target_add(name):
ttbl.config.target_add(example_thing(name))
example_target_add('target')
ttbl.config.target_add(example_thing('thing1'))
ttbl.config.target_add(example_thing('thing2'))
ttbl.config.target_add(example_thing('thing3'))
ttbl.test_target.get('target').thing_add('thing1', example_thing_plugger('p1'))
ttbl.test_target.get('target').thing_add('thing2', example_thing_plugger('p2'))
ttbl.test_target.get('target').thing_add('thing3', example_thing_plugger('p3'))
```
#### File: tests/bitrotten/test_fullrun.py
```python
import filecmp
import getpass
import logging
import os
import random
import re
import requests
import shutil
import signal
import sys
import tempfile
import time
import ttbl
from tcfl import commonl.testing
import unittest
# I bet there is a better way to do this...but we need the symbol to
# be in the logging module so that it is not included in the "function
# that called this" by the logging's internals.
# For debugging, levels are D2: 9, D3: 8, D4:7 ...
import logging
setattr(logging, "logc", logging.root.critical)
setattr(logging, "logx", logging.root.exception)
setattr(logging, "loge", logging.root.error)
setattr(logging, "logw", logging.root.warning)
setattr(logging, "logi", logging.root.info)
setattr(logging, "logd", logging.root.debug)
setattr(logging, "logdl", logging.root.log)
from logging import logc, loge, logx, logw, logi, logd, logdl
class test_run(unittest.TestCase, commonl.testing.test_ttbd_mixin):
"""
Runs a full test run acquire/image-set/power-on/check/power-off-
"""
@classmethod
def setUpClass(cls):
commonl.testing.test_ttbd_mixin.setUpClass(cls.configfile())
@classmethod
def tearDownClass(cls):
commonl.testing.test_ttbd_mixin.tearDownClass()
target = "viper-qemu-a"
@classmethod
def configfile(cls):
return """\
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
import ttbl.tt_qemu as tt_qemu
ttbl.config.target_add(tt_qemu.tt_qemu("%s", [ 'x86' ],
_tags = {
'bsps': {
'x86': dict(board='qemu_x86',
qemu_cmdline=
"qemu-system-i386 -m 32 -cpu qemu32 -no-reboot -nographic -display none " \
"-net none -clock dynticks -no-acpi -balloon none " \
"-L /usr/share/qemu/bios.bin " \
"-bios bios.bin -machine type=pc-0.14 " \
"-nodefaults -serial stdio"),
'arm': dict(board='qemu_cortex_m3',
qemu_cmdline=
"qemu-system-arm -cpu cortex-m3 " \
"-machine lm3s6965evb -nographic " \
"-nodefaults -serial stdio")
}
}))
""" \
% cls.target
def test_00_acquire(self):
sp = commonl.subpython(
self.srcdir + "/tcf -vvv --config-path : --url http://localhost:%d "
"acquire %s"
% (self.port, self.target))
self.assertEqual(sp.join(), 0, msg = sp.output_str)
def test_01_images_flash(self):
kernel = commonl.testing.test_ttbd_mixin.srcdir + "/tests/data/philosophers-uk-generic_pc.elf"
image = os.path.basename(kernel)
shutil.copy(kernel, self.wd)
# FIXME copy file
sp = commonl.subpython(
self.srcdir + "/tcf --config-path : --url http://localhost:%d "
"images-flash %s kernel:%s"
% (self.port, self.target, image))
self.assertEqual(sp.join(), 0, msg = sp.output_str)
def test_02_power_on(self):
sp = commonl.subpython(
self.srcdir + "/tcf --config-path : --url http://localhost:%d "
"power-on %s"\
% (self.port, self.target))
self.assertEqual(sp.join(), 0, msg = sp.output_str)
logi("letting it run three seconds")
time.sleep(3)
def test_03_console_read_all(self):
sp = commonl.subpython(
self.srcdir + "/tcf --config-path : --url http://localhost:%d "
"console-read --all --filter-ansi %s"\
% (self.port, self.target))
self.assertEqual(sp.join(), 0, msg = sp.output_str)
regex = re.compile("^Philosopher [0-9] (EATING|THINKING)")
count = 0
for _line in sp.stdout_lines:
for line in _line.split("\n"):
line = line.strip()
m = regex.match(line)
if m:
count += 1
self.assertGreater(count, 3, msg = "not enough matches found")
def test_04_power_off(self):
sp = commonl.subpython(
self.srcdir + "/tcf --config-path : --url http://localhost:%d "
"power-off %s"\
% (self.port, self.target))
self.assertEqual(sp.join(), 0, msg = sp.output_str)
@unittest.expectedFailure
def test_05_store_delete(self):
sp = commonl.subpython(
self.srcdir + "/tcf --config-path : --url http://localhost:%d "
"store-delete %s %s"\
% (self.port, self.image))
self.assertEqual(sp.join(), 0, msg = sp.output_str)
def test_06_release(self):
sp = commonl.subpython(
self.srcdir + "/tcf --config-path : --url http://localhost:%d "
"release %s"\
% (self.port, self.target))
self.assertEqual(sp.join(), 0, msg = sp.output_str)
if __name__ == "__main__":
commonl.testing.logging_init(sys.argv)
unittest.main(failfast = True)
```
#### File: tcf/tests/conf_test_allocate_hook.py
```python
class interface(ttbl.tt_interface):
def _allocate_hook(self, target, iface_name, allocdb):
target.fsdb.set("test_property", iface_name + " " + allocdb.allocid)
target = ttbl.test_target("t0")
target.interface_add(
"sample",
interface()
)
ttbl.config.target_add(target)
```
#### File: tcf/tests/conf_test_alloc_release_on_rm.py
```python
class fake_if(ttbl.tt_interface):
def _target_setup(self, target, _):
target.fsdb.set("release_hook_called", None)
def _release_hook(self, target, _force):
target.fsdb.set("release_hook_called", True)
t0 = ttbl.test_target('t0')
ttbl.config.target_add(t0)
t0.interface_add("fake", fake_if())
```
#### File: tcf/tests/conf_test_console_read_write_loopback.py
```python
import ttbl
import ttbl.console
class console_loopback_c(ttbl.console.generic_c):
def enable(self, target, component):
write_file_name = os.path.join(target.state_dir,
"console-%s.write" % component)
# ensure it exists
with open(write_file_name, "w") as wf:
wf.write("")
# now symlink the read to the write file, so what we write is
# read right away
os.symlink(
write_file_name,
os.path.join(target.state_dir, "console-%s.read" % component),
)
ttbl.console.generic_c.enable(self, target, component)
target = ttbl.test_target("t0")
ttbl.config.target_add(target)
console_loopback = console_loopback_c()
target.interface_add("console", ttbl.console.interface(
c1 = console_loopback,
c2 = console_loopback,
c3 = console_loopback,
c4 = console_loopback,
))
```
#### File: tcf/tests/conf_test_daemon_c.py
```python
import commonl
import ttbl.power
class daemon_test_c(ttbl.power.daemon_c):
def verify(self, target, component, cmdline_expanded):
return True
target = ttbl.test_target("t0")
target.interface_add(
"power",
ttbl.power.interface(
c0 = daemon_test_c(cmdline = [ "/usr/bin/sleep", "20d" ]),
# this just blocks reading but socat starts and blocks, which
# is enought to test
socat = ttbl.power.socat_pc("TCP-LISTEN:11234", "TCP-LISTEN:11234"),
)
)
ttbl.config.target_add(target)
```
#### File: tcf/tests/test_alloc_basic.py
```python
import json
import os
import commonl.testing
import tcfl
import tcfl.tc
srcdir = os.path.dirname(__file__)
ttbd = commonl.testing.test_ttbd(config_files = [
# strip to remove the compiled/optimized version -> get source
os.path.join(srcdir, "conf_%s" % os.path.basename(__file__.rstrip('cd')))
])
class _test(commonl.testing.shell_client_base):
"""
"""
def eval_00(self):
self.ttbd = ttbd
self.mk_tcf_config()
self.run_local("SERVER=%s %s/../ttbd/allocation-raw-test.sh" %
(ttbd.url, os.path.abspath(srcdir)))
```
#### File: tcf/tests/test_cmdline_environment.py
```python
import os
import shutil
import tcfl
import tcfl.tc
class _test(tcfl.tc.tc_c):
"""
Test both methods of running and asking for subcases (with commas
or hashes) work
"""
def eval(self):
# copy the cmdline_environment_run.py file to temp as test_ ->
# we do it like this so when we 'tcf run tests/', we don't
# run that one.
tmp_testcasename = os.path.join(self.tmpdir, "test_cmdline_environment_run.py")
shutil.copy(os.path.join(self.kws['srcdir_abs'], "cmdline_environment_run.py"), tmp_testcasename)
tcf_path = os.path.join(self.kws['srcdir_abs'], os.path.pardir, "tcf")
output = self.run_local(
tcf_path
+ f" -e VAR1=value1 -e VAR2=value2 run -vvv {tmp_testcasename}")
## PASS0/suzhzlE#1 .../test_subcase_basic.py @localic-localtg [+0.0s]: SUBCASES=subcase1:subcase2
## PASS0/ toplevel @local [+2.4s]: 1 tests (1 passed, 0 error, 0 failed, 0 blocked, 0 skipped, in 0:00:00.025167) - passed
#
# extract that subcase list from SUBCASES=XYZ...
if 'on the environment as expected' in output:
raise tcfl.tc.error_e("can't find environment variables expected output",
dict(output = output), level = 1, alevel = 0)
```
#### File: tcf/tests/test_debug_loopback.py
```python
import os
import commonl.testing
import tcfl
import tcfl.tc
srcdir = os.path.dirname(__file__)
ttbd = commonl.testing.test_ttbd(config_files = [
# strip to remove the compiled/optimized version -> get source
os.path.join(srcdir, "conf_%s" % os.path.basename(__file__.rstrip('cd')))
])
@tcfl.tc.target(ttbd.url_spec)
class _test(tcfl.tc.tc_c):
@staticmethod
def eval(target):
target.debug.start()
i = target.debug.list()['debug0'].get('state', None)
assert i == 'started', "info reports %s" % i
target.debug.stop()
i = target.debug.list()['debug0']
assert i == None, "info reports %s, expeced None" % i
target.debug.halt()
i = target.debug.list()['debug0'].get('state', None)
assert i == 'halted', "info reports %s" % i
target.debug.resume()
i = target.debug.list()['debug0'].get('state', None)
assert i == 'resumed', "info reports %s" % i
target.debug.reset()
i = target.debug.list()['debug0'].get('state', None)
assert i == 'reset', "info reports %s" % i
target.debug.reset_halt()
i = target.debug.list()['debug0'].get('state', None)
assert i == 'reset_halted', "info reports %s" % i
def teardown_90_scb(self):
ttbd.check_log_for_issues(self)
@tcfl.tc.target(ttbd.url_spec)
class release_hooks(tcfl.tc.tc_c):
"""
If we start debugging, when the target is released the debugging
is stopped, signalling that
ttbd.ttbl.debug.interface._release_hook() has run
"""
def eval(self, target):
target.debug.start()
i = target.debug.list()['debug0'].get('state', None)
assert i == 'started', "info reports %s" % i
state = target.property_get("debug_state")
if state != "started":
raise tcfl.tc.error_e(
f"debug_state '{state}', expected 'started'")
target.release()
state = target.property_get("debug_state")
if state != "stopped":
raise tcfl.tc.failed_e(
f"release hook was not called; state '{state}',"
" expected 'stopped'")
self.report_pass("release hook was called")
def teardown_90_scb(self):
ttbd.check_log_for_issues(self)
```
#### File: tcf/tests/test_expect_empty.py
```python
import time
import tcfl.tc
class _none(tcfl.tc.tc_c):
"""
When expecting nothing, we we still get the top level timeout
"""
def eval(self):
ts0 = time.time()
r = self.expect(timeout = 3)
ts = time.time()
assert ts - ts0 >= 3 and ts - ts0 - 3 < 0.5, \
"empty wait of three seconds took %.fs instead" % (ts - ts0)
assert r == {}, \
"expected empty r, got %s" % r
self.report_pass("empty expect took expected %.fs" % (ts - ts0),
dict(r = r))
class _delay_expectation_c(tcfl.tc.expectation_c):
"""
Expectator for tcfl.tc.expect() that just delays @delay seconds
before returning success in detecting
"""
def __init__(self, delay, **kwargs):
# we rely on a short poll period to adjust the timeouts below
tcfl.tc.expectation_c.__init__(self, None, 0.1, **kwargs)
self.delay = delay
def poll_context(self):
return ""
def poll(self, testcase, run_name, buffers_poll):
# first time we poll, take a timestamp
if buffers_poll.get('ts0', None) == None:
buffers_poll['ts0'] = time.time()
def detect(self, testcase, run_name, buffers_poll, buffers):
ts0 = buffers_poll['ts0']
ts = time.time()
if ts - ts0 > self.delay:
testcase.report_pass("%s: detected after %.fs" % (
self.name, ts - ts0))
return { "message": "detected after %.fs" % (ts - ts0) }
return None
def flush(self, testcase, run_name, buffers_poll, buffers, results):
pass
class _all_zero_timeouts(tcfl.tc.tc_c):
"""
When waiting for multiple zero timeouts, we still get the top
level timeout
"""
def eval(self):
ts0 = time.time()
r = self.expect(
_delay_expectation_c(1, timeout = 0),
_delay_expectation_c(2, timeout = 0),
# note we won't get to detect this one because we timeout
# right at 3 seconds
_delay_expectation_c(3, timeout = 0),
_delay_expectation_c(4, timeout = 0),
_delay_expectation_c(5, timeout = 0),
timeout = 3)
ts = time.time()
assert ts - ts0 >= 3 and ts - ts0 - 3 < 0.5, \
"empty wait of three seconds took %.fs instead" % (ts - ts0)
assert len(r) == 2, \
"expected two expectations found, got %d (%s)" % (len(r), r)
self.report_pass("empty expect took expected %.fs" % (ts - ts0),
dict(r = r))
class _mix_zero_timeouts(tcfl.tc.tc_c):
"""
When waiting for a combination of zero timeouts and non-zero
timeouts, it returns the first found expectation.
"""
def eval(self):
ts0 = time.time()
r = self.expect(
_delay_expectation_c(3, timeout = 3.5),
_delay_expectation_c(10, timeout = 0),
timeout = 5)
ts = time.time()
assert len(r) == 1, \
"expected one delays found, got %s" % r
assert ts - ts0 >= 3 and ts - ts0 - 3 < 0.5, \
"expect expected of 3 seconds, took %.fs instead" % (ts - ts0)
self.report_pass("expect took expected %.fs and returned "
"%d detected expectation" % (ts - ts0, len(r)),
dict(r = r))
class _no_zero_timeouts(tcfl.tc.tc_c):
"""
When waiting for a combination of zero timeouts and non-zero
timeouts, it returns the first found expectation.
"""
def eval(self):
ts0 = time.time()
r = self.expect(
_delay_expectation_c(1, timeout = 3.5),
_delay_expectation_c(2, timeout = 3.5),
_delay_expectation_c(3, timeout = 3.5),
timeout = 3.5)
ts = time.time()
assert len(r) == 3, \
"expected two delays found, got %s" % r
assert ts - ts0 >= 3 and ts - ts0 - 3 < 0.5, \
"expect expected of 3 seconds, took %.fs instead" % (ts - ts0)
self.report_pass("expect took expected %.fs and returned "
"%d detected expectation" % (ts - ts0, len(r)),
dict(r = r))
```
#### File: tcf/tests/test_images_flash_shell.py
```python
import os
import commonl.testing
import tcfl.tc
srcdir = os.path.dirname(__file__)
ttbd = commonl.testing.test_ttbd(
config_files = [
# strip to remove the compiled/optimized version -> get source
os.path.join(srcdir, "conf_%s" % os.path.basename(__file__.rstrip('cd')))
],
errors_ignore = [
"Traceback"
])
@tcfl.tc.target(ttbd.url_spec + " and t0")
class _test(tcfl.tc.tc_c):
"""
Verify that different drivers that shall pass/fail do what is
expected, exercising the checks in :class:ttbl.images.flash_shell_cmd_c
"""
def eval(self, target):
target.images.flash({ "image_works_0_0": __file__ })
self.report_pass("driver 0-0 expected to pass passes")
target.images.flash({ "image_works_1_1": __file__ })
self.report_pass("driver 1-1 expected to pass passes")
try:
target.images.flash({ "image_fails_0_3": __file__ })
raise tcfl.tc.failed_e(
"flash() didn't raise error when flashing 0_3 as expected")
except tcfl.tc.error_e:
self.report_pass("driver 0-3 expected to fail fails")
try:
target.images.flash({ "image_fails_3_2": __file__ })
raise tcfl.tc.failed_e(
"flash() didn't raise error when flashing 3_2 as expected")
except tcfl.tc.error_e as e:
self.report_pass("driver 3_2 expected to fail fails")
def teardown_90_scb(self):
ttbd.check_log_for_issues(self)
```
#### File: tcf/tests/test_images_parallel_retry.py
```python
import os
import time
import commonl.testing
import tcfl.tc
srcdir = os.path.dirname(__file__)
ttbd = commonl.testing.test_ttbd(
config_files = [
# strip to remove the compiled/optimized version -> get source
os.path.join(srcdir, "conf_%s" % os.path.basename(__file__.rstrip('cd')))
],
errors_ignore = [
"Traceback"
])
@tcfl.tc.target(ttbd.url_spec + ' and t0')
class flashes0(tcfl.tc.tc_c):
@staticmethod
def eval(target):
target.images.flash({ "image0": __file__ })
def teardown_90_scb(self):
ttbd.check_log_for_issues(self)
@tcfl.tc.target(ttbd.url_spec + ' and t0')
class flashes2(tcfl.tc.tc_c):
@staticmethod
def eval(target):
target.images.flash({ "image2": __file__ })
def teardown_90_scb(self):
ttbd.check_log_for_issues(self)
```
#### File: tcf/tests/test_power_fake.py
```python
import os
import commonl.testing
import tcfl.tc
srcdir = os.path.dirname(__file__)
ttbd = commonl.testing.test_ttbd(config_files = [
# strip to remove the compiled/optimized version -> get source
os.path.join(srcdir, "conf_%s" % os.path.basename(__file__.rstrip('cd')))
])
@tcfl.tc.target(ttbd.url_spec + " and t0")
class _test(tcfl.tc.tc_c):
@staticmethod
def eval(target):
target.power.on()
r = target.power.get()
assert r == True, "power state is %s" % r
target.power.off()
r = target.power.get()
assert r == False, "power state is %s" % r
target.power.cycle()
r = target.power.get()
assert r == True, "power state is %s" % r
target.power.off()
r = target.power.get()
assert r == False, "power state is %s" % r
target.power.reset()
r = target.power.get()
assert r == True, "power state is %s" % r
def teardown_90_scb(self):
ttbd.check_log_for_issues(self)
```
#### File: tcf/tests/test_properties.py
```python
import collections
import json
import os
import commonl.testing
import tcfl
import tcfl.tc
srcdir = os.path.dirname(__file__)
ttbd = commonl.testing.test_ttbd(config_files = [
# strip to remove the compiled/optimized version -> get source
os.path.join(srcdir, "conf_%s" % os.path.basename(__file__.rstrip('cd')))
])
@tcfl.tc.target(ttbd.url_spec)
class _test(tcfl.tc.tc_c):
def eval(self, target):
n = dict(
T = True,
F = False,
i = 3,
f = 3.0,
s = "string",
)
n_nested = dict(n)
n_nested['d'] = n
d = dict(
a = n_nested,
b = n_nested,
c = n_nested,
)
target.properties_set(d)
def _property_check(name, original):
with self.subcase(name):
val = target.property_get(name)
if val != original:
self.report_fail("%s mismatch" % name,
dict(original = original, obtained = val))
else:
self.report_pass("%s matches" % name)
with self.subcase("property_check"):
_property_check('a', d['a'])
_property_check('b', d['b'])
_property_check('c', d['c'])
# get nested attributes
_property_check('a.d', d['a']['d'])
_property_check('a.d.i', d['a']['d']['i'])
def _unexistant_property_check(prop_name):
val = target.property_get(prop_name)
if val != None:
self.report_fail(
f"unexistant property '{prop_name}':"
f" expected None; got {val}")
else:
self.report_pass(
f"unexistant property '{prop_name}': returns None")
with self.subcase("unexistant_property"):
for prop_name in [
"a.d.this doesnt exist",
"this doesnt exist",
"this.doesnt.exist" ]:
with self.subcase(prop_name):
_unexistant_property_check(prop_name)
def teardown_90_scb(self):
ttbd.check_log_for_issues(self)
def field_get_verify(r, property_name, do_raise = False):
# unfold a.b.c.d which returns { a: { b: { c: { d: value } } } }
propertyl = property_name.split(".")
acc = []
for prop_name in propertyl:
r = r.get(prop_name, None)
acc.append(prop_name)
if r == None:
if do_raise:
raise tcfl.tc.failed_e(
'field %s is non-existing' % ".".join(acc), dict(r))
val = None
break
else:
val = r
return val
@tcfl.tc.target(ttbd.url_spec)
class _test_sorted(tcfl.tc.tc_c):
"""The power components listed by the properties need to be sorted
according to their declaration order in the configuration
"""
power_rail = [
"IOC/YK23406-2",
"ADB/YK23406-3",
"main/sp7/8",
"wait /dev/tty-gp-64b-soc",
"serial0_soc",
"wait /dev/tty-gp-64b-ioc",
"serial1_ioc",
]
def eval(self, target):
r = target.rtb.send_request(
"GET", "targets/" + target.id,
data = { "projection": json.dumps(["interfaces.power" ]) },
raw = True)
# Even though JSON and python dicts are unordered, the server
# provides the resposnes in the right order
rt = json.loads(r.text, object_pairs_hook = collections.OrderedDict)
power_impls = field_get_verify(rt, "interfaces.power")
power_impl_list = list(power_impls.keys())
if power_impl_list != self.power_rail:
raise tcfl.tc.failed_e("server didn't keep the power-rail order",
dict(
reported_rail = power_impl_list,
sorted_rail = self.power_rail,
))
self.report_pass("server kept power-rail order",
dict(
reported_rail = power_impls.keys(),
sorted_rail = self.power_rail,
))
def teardown_90_scb(self):
ttbd.check_log_for_issues(self)
```
#### File: tcf/tests/test_report_subcase.py
```python
import tcfl
import tcfl.tc
class _test(tcfl.tc.tc_c):
"""
Excercise subcase reporting
"""
def eval(self):
self.report_info("something happened", subcase = "sub1")
self.report_info("something happened", subcase = "sub1")
self.report_info("something happened", subcase = "sub2")
self.report_pass("this one passed", subcase = "sub2")
self.report_fail("this one failed", subcase = "sub3")
self.report_error("this one errored", subcase = "sub4")
self.report_blck("this one blocked", subcase = "sub5")
self.report_skip("this one failed", subcase = "sub6")
self.report_data("domain", "name", "value", subcase = "sub7")
raise tcfl.tc.pass_e("exception passed", dict(subcase = "sub8"))
with tcfl.msgid_c(subcase = "sub9"):
self.report_fail("9 failed")
@tcfl.tc.subcase('somename')
def eval_10(self):
self.report_info("method subcase with name")
@tcfl.tc.subcase() # take it from the method name
def eval_10_some_name(self):
self.report_info("method subcase with default name")
@tcfl.tc.subcase()
def eval_20(self):
with tcfl.msgid_c(subcase = 'deeper'):
raise AssertionError("bleh, asserting a failure")
@tcfl.tc.subcase()
def eval_40(self):
self.report_pass("I kept executing methods because"
" they were marked subcases")
```
#### File: tcf/tests/test_subcase_basic.py
```python
import tcfl
class _test(tcfl.tc.tc_c):
def eval_00(self):
self.report_pass(f"SUBCASES={':'.join(self.subcases)}", level = 0)
```
#### File: tcf/tests/test_tunnels_released.py
```python
import os
import socket
import commonl.testing
import tcfl
import tcfl.tc
srcdir = os.path.dirname(__file__)
ttbd = commonl.testing.test_ttbd(config_files = [
# strip to remove the compiled/optimized version -> get source
os.path.join(srcdir, "conf_%s" % os.path.basename(__file__.rstrip('cd')))
])
@tcfl.tc.target(ttbd.url_spec)
class release_hooks(tcfl.tc.tc_c):
"""
We allocate a target, create tunnels and then we release it; when
released, the tunnels are destroyed.
"""
def eval(self, target):
target.tunnel.add(22, "127.0.0.1", 'tcp')
self.report_pass("release hooks were called on target release")
def teardown_90_scb(self):
ttbd.check_log_for_issues(self)
```
#### File: tcf/tests/test_utf8_invalid.py
```python
import tcfl.tc
class _test(tcfl.tc.tc_c):
"""
Report an string with invalid UTF-8, see what happens.
All the paths should succeed and ignore/map the bad UTF-8 to a
character.
"""
def eval(self):
self.report_info("pass bad UTF-8\xff",
dict(message = "pass bad UTF-8\xff"))
```
#### File: tcf/ttbd/capture-example.py
```python
import random
import sys
import time
import ttbl.capture_cli
def _f(rng):
# note testcase test/test-capture-streams.py relies in this
# message
print(f"DEBUG: capture-example sampling {time.time()}",
file = sys.stderr)
# Sample
errors = {}
data = {}
data['r1'] = rng.randint(0, 1000000)
data['r2'] = rng.randint(0, 1000000)
data['r3'] = rng.randint(0, 1000000)
return data, errors
def _xlat(d, data):
d['r1'] = data['r1']/1000000
d['r2'] = data['r2']/1000000
d['r3'] = data['r3']/1000000
ttbl.capture_cli.main(
sys.argv[1], _f, _xlat,
# define a random number generator we'll pass to _f via
# ttbl.capture_cli.main
random.Random(int(sys.argv[2])),
period_s = float(sys.argv[3]))
```
#### File: tcf/ttbd/conf_00_lib_capture.py
```python
import signal
import ttbl.capture
#: A capturer to take screenshots from a v4l device using ffmpeg
#:
#: Note the fields are target's tags and others specified in
#: :class:`ttbl.capture.generic_snapshot` and
#: :class:`ttbl.capture.generic_stream`.
#:
#: To use:
#:
#: - define a target
#:
#: - physically connect the capture interface to it and to the
#: server
#:
#: - Create a *udev* configuration so the capture device exposes
#: itself as */dev/video-TARGETNAME-INDEX*.
#:
#: This requires creating a *udev* configuration so that the v4l
#: device gets recognized and an alias created, which can be
#: accomplished by dropping a udev rule in */etc/udev/rules.d* such
#: as::
#:
#: SUBSYSTEM == "video4linux", ACTION == "add", \
#: KERNEL=="video*", \
#: ENV{ID_SERIAL_SHORT} == "SOMESERIALNUMBER", \
#: SYMLINK += "video-nuc-01A-$attr{index}"
#:
#: note some USB devices don't offer a serial number, then you
#: can use a device path, such as::
#:
#: ENV{ID_PATH} == "pci-0000:00:14.0-usb-0:2.1:1.0", \
#:
#: this is shall be a last resort, as then moving cables to
#: different USB ports will change the paths and you will have to
#: reconfigure.
#:
#: See :ref:`methods to find device information <find_usb_info>`
#:
#: - add the configuration snippet::
#:
#: ttbl.test_target.get(TARGETNAME).interface_add(
#: "capture",
#: ttbl.capture.interface(
#: screen = "hdmi0_screenshot",
#: screen_stream = "hdmi0_vstream",
#: hdmi0_screenshot = capture_screenshot_ffmpeg_v4l,
#: hdmi0_vstream = capture_vstream_ffmpeg_v4l,
#: ))
#:
#: Note in this case we have used an
#:
#: This has tested with with:
#:
#: - https://www.agptek.com/AGPTEK-USB-3-0-HDMI-HD-Video-Capture-1089-212-1.html
#:
#: Which shows in USB as::
#:
#: 3-2.2.4 1bcf:2c99 ef 3.10 5000MBit/s 512mA 4IFs (VXIS Inc ezcap U3 capture)
#: 3-2.2.4:1.2 (IF) 01:01:00 0EPs (Audio:Control Device) snd-usb-audio sound/card5
#: 3-2.2.4:1.0 (IF) 0e:01:00 1EP (Video:Video Control) uvcvideo video4linux/video5 video4linux/video4 input/input15
#: 3-2.2.4:1.3 (IF) 01:02:00 0EPs (Audio:Streaming) snd-usb-audio
#: 3-2.2.4:1.1 (IF) 0e:02:00 1EP (Video:Video Streaming) uvcvideo
#:
#: Note this also can be used to capture video of the HDMI stream
#: using capture_vstream_ffmpeg_v4l and audio played over HDMI via
#: an exposed ALSA interface (see capture_astream_ffmpeg_v4l below).
capture_screenshot_ffmpeg_v4l = ttbl.capture.generic_snapshot(
"screenshot:/dev/video-%(id)s-0",
"ffmpeg -i /dev/video-%(id)s-0"
# -ss .50 to let the capturer warm up; 0 will come a
# black frame always
" -ss 0.5 -frames 1 -c:v png -f image2pipe "
"-y %(output_file_name)s",
mimetype = "image/png", extension = ".png"
)
#: A capturer to take screenshots from VNC
#:
#: Note the fields are target's tags and others specified in
#: :class:`ttbl.capture.generic_snapshot` and
#: :class:`ttbl.capture.generic_stream`.
#:
#: Deprecated in favour of :func:`mk_capture_screenshot_vnc`
capture_screenshot_vnc = ttbl.capture.generic_snapshot(
# dont set the port for the name, otherwise the UPID keeps
# changing
"VNC %(id)s@%(vnc-host)s",
# need to make sure vnc-host/port are defined in the target's tags
# needs the .png, otherwise it balks at guessing extensions
# don't do -q, otherwise when it fails, it fails silently; for
# QEMU, it is *localhost*.
"gvnccapture %(vnc-host)s:%(vnc-port)s %(output_file_name)s",
mimetype = "image/png",
extension = ".png"
)
def mk_capture_screenshot_vnc(name):
"""
Create a VNC screenshot capturer that captures off a VNC source
declared in inventory entry *vnc.NAME*
Note the fields are target's tags and others specified in
:class:`ttbl.capture.generic_snapshot` and
:class:`ttbl.capture.generic_stream`.
to use, add in a :ref:`server configuration file
<ttbd_configuration>` to any target that offers a VNC source:
>>> target.interface_add("capture", ttbl.capture.interface(
>>> vnc0_screenshot = mk_capture_screenshot_vnc("vnc0"),
>>> screen = "vnc0_screenshot",
>>> ))
"""
assert isinstance(name, str)
# note the %(FIELD)s will be mapped to entries in the target's
# inventory when the capture is going to be done, so if name is
# ABC, it will capture off vnc.ABC,host
return ttbl.capture.generic_snapshot(
# dont set the port for the name, otherwise the UPID keeps
# changing
f"VNC %(id)s@%(vnc.{name}.host)s",
# need to make sure vnc-host/port are defined in the target's tags
# needs the .png, otherwise it balks at guessing extensions
# don't do -q, otherwise when it fails, it fails silently; for
# QEMU, it is *localhost*.
f"gvnccapture %(vnc.{name}.host)s:%(vnc.{name}.port)s %(output_file_name)s",
mimetype = "image/png",
extension = ".png"
)
#: Capture a screenshot off VNC port declared in inventory *vnc.vnc0*
capture_screenshot_vnc0 = mk_capture_screenshot_vnc("vnc0")
#: Capture video off a v4l device using ffmpeg
#:
#: See capture_screenshot_ffmpeg_v4l for setup instructions, as they
#: are common.
capture_vstream_ffmpeg_v4l = ttbl.capture.generic_stream(
"video:/dev/video-%(id)s-0",
"ffmpeg -y -nostdin -i /dev/video-%(id)s-0"
" -flush_packets" # disable some buffering
" -f avi -qscale:v 10 -y %(stream_filename)s",
mimetype = "video/avi", extension = ".avi",
wait_to_kill = 4, use_signal = signal.SIGINT # flushes ffmpeg
)
#: Capture audio off an Alsa device using ffmpeg
#:
#: See capture_screenshot_ffmpeg_v4l for setup instructions, as they
#: are similar.
#:
#: Note the udev setup instructions for Alsa devices are slightly
#: different; instead of *SYMLINKS* we have to set *ATTR{id}*::
#:
#: SUBSYSTEM == "sound", ACTION == "add", \
#: ENV{ID_PATH} == "pci-0000:00:14.0-usb-0:2.1:1.2", \
#: ATTR{id} = "TARGETNAME"
#:
#: Once this configuration is completed, udev is reloaded (*sudo
#: udevadm control --reload-rules*) and the
#: device is triggered (with *udevadm trigger /dev/snd/controlCX* or
#: the machine restarted), */proc/asound* should contain a symlink to
#: the actual card::
#:
#: $ ls /proc/asound/ -l
#: total 0
#: dr-xr-xr-x. 3 root root 0 Jun 21 21:52 card0
#: dr-xr-xr-x. 7 root root 0 Jun 21 21:52 card4
#: ..
#: lrwxrwxrwx. 1 root root 5 Jun 21 21:52 TARGETNAME -> card4
#: ...
#:
#: Device information for Alsa devices (Card 0, Card 1, etc...) can be
#: found with::
#:
#: $ udevadm info /dev/snd/controlC0
#: P: /devices/pci0000:00/0000:00:1f.3/sound/card0/controlC0
#: N: snd/controlC0
#: S: snd/by-path/pci-0000:00:1f.3
#: E: DEVLINKS=/dev/snd/by-path/pci-0000:00:1f.3
#: E: DEVNAME=/dev/snd/controlC0
#: E: DEVPATH=/devices/pci0000:00/0000:00:1f.3/sound/card0/controlC0
#: E: ID_PATH=pci-0000:00:1f.3
#: E: ID_PATH_TAG=pci-0000_00_1f_3
#: E: MAJOR=116
#: E: MINOR=11
#: E: SUBSYSTEM=sound
#: E: TAGS=:uaccess:
#: E: USEC_INITIALIZED=30391111
#:
#: As indicated in capture_screenshot_ffmpeg_v4l, using
#: *ENV{ID_SERIAL_SHORT}* is preferred if available.
capture_astream_ffmpeg_v4l = ttbl.capture.generic_stream(
"audio:%(id)s",
"ffmpeg -f alsa -i sysdefault:%(id)s"
" -f avi -qscale:v 10 -y %(output_file_name)s",
mimetype = "audio/wav"
)
#:
#: Capture HDMI Audio from an AGPTEK USB 3.0 HDMI HD Video Capture
#:
#: - https://www.agptek.com/AGPTEK-USB-3-0-HDMI-HD-Video-Capture-1089-212-1.html
#:
#: We can't use a generic ALSA capturer because there seem to be
#: glitches in the device
#:
capture_agptek_hdmi_astream = ttbl.capture.generic_stream(
"hdmi0-audio:%(id)s",
"ffmpeg -f alsa -i sysdefault:%(id)s-hdmi"
" -f avi -qscale:v 10 -y %(output_file_name)s",
mimetype = "audio/wav",
pre_commands = [
# somehow the adapter doesn't work right unless "reset" it
# with the USB kernel interface.
#
# This gets the path in the
# /sys sysfs filesystem of /dev/video-%(id)s-0 (wih 'udevadm
# info') that yiedls something like:
#
# $ udevadm info /dev/video-%(id)s-0 -q path
# /devices/pci0000:00/0000:00:14.0/usb1/1-4/1-4.2/1-4.2:1.0/video4linux/video0
#
# three levels up (removing 1-4.2:1.0/video4linux/video0) gets
# us to the top level USB device information node:
#
# /devices/pci0000:00/0000:00:14.0/usb1/1-4/1-4.2
#
# so in /sys/devices/pci0000:00/0000:00:14.0/usb1/1-4/1-4.2
# there is a file called 'authorized' that will force the USB
# device to be disconnected or connected to the
# system. Writing 0 we soft-disconnect it, writing 1 we ask
# for it to be connected.
"echo 0 > /sys/$(udevadm info video-%(id)s-0 -q path)/../../../authorized",
"sleep 0.5s",
"echo 1 > /sys/$(udevadm info video-%(id)s-0 -q path)/../../../authorized",
"sleep 1s",
# vtop HW has "Digital In" for an input name
# FIXME: we have issues with the spaces, somewhere it is being
# split?
"amixer -c %(id)s-hdmi sset 'Digital In' 75%%"
]
)
#: Capture audio with the USB capturer VTOP/JMTEK 0c76:161e
#:
#: https://www.amazon.com/Digital-Audio-Capture-Windows-10-11/dp/B019T9KS04
#:
#: This is for capturing audio on the audio grabber connected to the
#: main builtin sound output of the target (usually identified as
#: *front* by the Linux driver subsystem), which UDEV has configured
#: to be called TARGETNAME-front::
#:
#: SUBSYSTEM == "sound", ACTION == "add", \
#: ENV{ID_PATH} == "pci-0000:00:14.0-usb-0:2.3.1:1.0", \
#: ATTR{id} = "TARGETNAME-front"
#:
capture_front_astream_vtop_0c76_161e = ttbl.capture.generic_stream(
"audio:%(id)s-front",
"ffmpeg -f alsa -i sysdefault:%(id)s-front"
" -f wav -qscale:v 10 -y %(output_file_name)s",
mimetype = "audio/wav",
# vtop HW has Mic for an input name
pre_commands = [ "amixer -c %(id)s-front sset Mic 75%%" ]
)
```
#### File: tcf/ttbd/conf_00_lib.py
```python
import copy
import errno
import logging
import os
import random
import re
import shutil
import subprocess
import time
import ipaddress
import netifaces
import commonl
import ttbl
import ttbl.dhcp
import ttbl.pc
import ttbl.pc_ykush
import ttbl.rsync
import ttbl.socat
import ttbl.usbrly08b
class vlan_pci(ttbl.power.impl_c):
"""Power controller to implement networks on the server side.
This allows to:
- connect a server to a test network (NUT) to provide services
suchs as DHCP, HTTP, network tapping, proxying between NUT and
upstream networking, etc
- connect virtual machines running inside virtual networks in the
server to physical virtual networks.
This behaves as a power control implementation that when turned:
- on: sets up the interfaces, brings them up, start capturing
- off: stops all the network devices, making communication impossible.
**Capturing with tcpdump**
FIXME: deprecate and replace with capture interface
Can be enabled setting the target's property *tcpdump*::
$ tcf property-set TARGETNAME tcpdump FILENAME
this will have the target dump all traffic capture to a file
called *FILENAME* in the daemon file storage area for the user who
owns the target. The file can then be recovered with::
$ tcf store-download FILENAME
*FILENAME* must be a valid file name, with no directory
components.
.. note:: Note this requires the property *tcpdump* being
registered in the configuration with
>>> ttbl.test_target.properties_user.add('tcpdump')
so normal users can set/unset it.
Example configuration (see :ref:`naming networks <bp_naming_networks>`):
>>> target = ttbl.test_target("nwa")
>>> target.interface_add("power", ttbl.power.interface(vlan_pci()))
>>> ttbl.config.interconnect_add(
>>> target,
>>> tags = {
>>> 'ipv4_addr': '192.168.97.1',
>>> 'ipv4_prefix_len': 24,
>>> 'ipv6_addr': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b',
>>> 'ipv6_prefix_len': 104,
>>> 'mac_addr': '02:61:00:00:00:01:',
>>> })
Now QEMU targets (for example), can declare they are part of this
network and upon start, create a tap interface for themselves::
$ ip tuntap add IFNAME mode tap
$ ip link set IFNAME up master bnwa
$ ip link set IFNAME promisc on up
which then is given to QEMU as::
-device virtio-net-pci,netdev=nwa,mac=MACADDR,romfile=
-netdev tap,id=nwa,script=no,if_name=IFNAME
(targets implemented by
:func:`conf_00_lib_pos.target_qemu_pos_add` and
:py:func:`conf_00_lib_mcu.target_qemu_zephyr_add` with VMs
implement this behaviour).
If a tag named *mac_addr* is given, containing the MAC address
of a physical interface in the system, then it will be taken over
as the point of connection to external targets. Connectivity from
any virtual machine in this network will be extended to said
network interface, effectively connecting the physical and virtual
targets.
.. warning:: PHYSICAL mode (mac_addr) not re-tested
.. warning:: DISABLE Network Manager's (or any other network
manager) control of this interface, otherwise it will
interfere with it and network will not operate.
Follow :ref:`these steps <howto_nm_disable_control>`
System setup:
- *ttbd* must be ran with CAP_NET_ADMIN so it can create network
interfaces. For that, either add to systemd's
``/etc/systemd/system/[email protected]``::
CapabilityBoundingSet = CAP_NET_ADMIN
AmbientCapabilities = CAP_NET_ADMIN
or as root, give ttbd the capability::
# setcap cap_net_admin+pie /usr/bin/ttbd
- *udev*'s */etc/udev/rules.d/ttbd-vlan*::
SUBSYSTEM == "macvtap", ACTION == "add", DEVNAME == "/dev/tap*", \
GROUP = "ttbd", MODE = "0660"
This is needed so the tap devices can be accessed by user
*ttbd*, which is the user that runs the daemon.
Remember to reload *udev*'s configuration with `udevadm control
--reload-rules`.
This is already taken care by the RPM installation.
**Fixture setup**
- Select a network interface to use (it can be a USB or PCI
interface); find out it's MAC address with *ip link show*.
- add the tag *mac_addr* with said address to the tags of the
target object that represents the network to which which said
interface is to be connected; for example, for a network called
*nwc*
>>> target = ttbl.test_target("nwa")
>>> target.interface_add("power", ttbl.power.interface(vlan_pci()))
>>> ttbl.config.interconnect_add(
>>> target,
>>> tags = {
>>> 'ipv4_addr': '192.168.97.1',
>>> 'ipv4_prefix_len': 24,
>>> 'ipv6_addr': 'fdf8:f53e:61e4::18',
>>> 'ipv6_prefix_len': 104,
>>> 'mac_addr': "a0:ce:c8:00:18:73",
>>> })
or for an existing network (such as the configuration's default
*nwa*):
.. code-block:: python
# eth dongle mac 00:e0:4c:36:40:b8 is assigned to NWA
ttbl.test_target.get('nwa').tags_update(dict(mac_addr = '00:e0:4c:36:40:b8'))
Furthermore, default networks *nwa*, *nwb* and *nwc* are defined
to have a power control rail (versus an individual power
controller), so it is possible to add another power controller
to, for example, power on or off a network switch:
.. code-block:: python
ttbl.test_target.get('nwa').pc_impl.append(
ttbl.pc.dlwps7("http://USER:PASSWORD@sp5/8"))
This creates a power controller to switch on or off plug #8 on
a Digital Loggers Web Power Switch named *sp5* and makes it part
of the *nwa* power control rail. Thus, when powered on, it will
bring the network up up and also turn on the network switch.
- add the tag *vlan* to also be a member of an ethernet VLAN
network (requires also a *mac_addr*):
>>> target = ttbl.test_target("nwa")
>>> target.interface_add("power", ttbl.power.interface(vlan_pci()))
>>> ttbl.config.interconnect_add(
>>> target,
>>> tags = {
>>> 'ipv4_addr': '192.168.97.1',
>>> 'ipv4_prefix_len': 24,
>>> 'ipv6_addr': 'fdf8:f53e:61e4::18',
>>> 'ipv6_prefix_len': 104,
>>> 'mac_addr': "a0:ce:c8:00:18:73",
>>> 'vlan': 30,
>>> })
in this case, all packets in the interface described by MAC addr
*a0:ce:c8:00:18:73* with tag *30*.
- lastly, for each target connected to that network, update it's
tags to indicate it:
.. code-block:: python
ttbl.test_target.get('TARGETNAME-NN').tags_update(
{
'ipv4_addr': "192.168.10.30",
'ipv4_prefix_len': 24,
'ipv6_addr': "fdf8:f53e:61e4::18",
'ipv6_prefix_len': 104,
},
ic = 'nwc')
By convention, the server is .1, the QEMU Linux virtual machines
are set from .2 to .10 and the QEMU Zephyr virtual machines from
.30 to .45. Physical targets are set to start at 100.
Note the networks for targets and infrastructure :ref:`have to be
kept separated <separated_networks>`.
"""
def __init__(self):
ttbl.power.impl_c.__init__(self)
@staticmethod
def _if_rename(target):
if 'mac_addr' in target.tags:
# We do have a physical device, so we are going to first,
# rename it to match the IC's name (so it allows targets
# to find it to run IP commands to attach to it)
ifname = commonl.if_find_by_mac(target.tags['mac_addr'])
if ifname == None:
raise ValueError("Cannot find network interface with MAC '%s'"
% target.tags['mac_addr'])
if ifname != target.id:
subprocess.check_call("ip link set %s down" % ifname,
shell = True)
subprocess.check_call("ip link set %s name b%s"
% (ifname, target.id), shell = True)
@staticmethod
def _get_mode(target):
if 'vlan' in target.tags and 'mac_addr' in target.tags:
# we are creating ethernet vlans, so we do not own the
# device exclusively and will create new links
return 'vlan'
elif 'vlan' in target.tags and 'mac_addr' not in target.tags:
raise RuntimeError("vlan ID specified without a mac_addr")
elif 'mac_addr' in target.tags:
# we own the device exclusively
return 'physical'
else:
return 'virtual'
def on(self, target, _component):
# Bring up the lower network interface; lower is called
# whatever (if it is a physical device) or _bNAME; bring it
# up, make it promiscuous
mode = self._get_mode(target)
if mode == 'vlan':
# our lower is a physical device, our upper is a device
# which till tag for eth vlan %(vlan)
ifname = commonl.if_find_by_mac(target.tags['mac_addr'],
physical = True)
if not commonl.if_present("b%(id)s" % target.kws):
# Do create the new interface only if not already
# created, otherwise daemons that are already running
# will stop operating
# This function might be being called to restablish a
# half baked operating state.
kws = dict(target.kws)
kws['ifname'] = ifname
subprocess.check_call(
"/usr/sbin/ip link add"
" link %(ifname)s name b%(id)s"
" type vlan id %(vlan)s"
#" protocol VLAN_PROTO"
#" reorder_hdr on|off"
#" gvrp on|off mvrp on|off loose_binding on|off"
% kws, shell = True)
subprocess.check_call( # bring lower up
"/usr/sbin/ip link set dev %s up promisc on" % ifname,
shell = True)
elif mode == 'physical':
ifname = commonl.if_find_by_mac(target.tags['mac_addr'])
subprocess.check_call( # bring lower up
"/usr/sbin/ip link set dev %s up promisc on" % ifname,
shell = True)
self._if_rename(target)
elif mode == 'virtual':
# We create a bridge, to serve as lower
if not commonl.if_present("b%(id)s" % target.kws):
# Do create the new interface only if not already
# created, otherwise daemons that are already running
# will stop operating
# This function might be being called to restablish a
# half baced operating state.
commonl.if_remove_maybe("b%(id)s" % target.kws)
subprocess.check_call(
"/usr/sbin/ip link add"
" name b%(id)s"
" type bridge"
% target.kws, shell = True)
subprocess.check_call( # bring lower up
"/usr/sbin/ip link set"
" dev b%(id)s"
" up promisc on"
% target.kws, shell = True)
else:
raise AssertionError("Unknown mode %s" % mode)
# Configure the IP addresses for the top interface
subprocess.check_call( # clean up existing address
"/usr/sbin/ip add flush dev b%(id)s "
% target.kws, shell = True)
subprocess.check_call( # add IPv6
# if this fails, check Network Manager hasn't disabled ipv6
# sysctl -a | grep disable_ipv6 must show all to 0
"/usr/sbin/ip addr add"
" %(ipv6_addr)s/%(ipv6_prefix_len)s dev b%(id)s "
% target.kws, shell = True)
subprocess.check_call( # add IPv4
"/usr/sbin/ip addr add"
" %(ipv4_addr)s/%(ipv4_prefix_len)d"
" dev b%(id)s" % target.kws, shell = True)
# Bring up the top interface, which sets up ther outing
subprocess.check_call(
"/usr/sbin/ip link set dev b%(id)s up promisc on"
% target.kws, shell = True)
target.fsdb.set('power_state', 'on')
# Start tcpdump on the network?
#
# The value of the tcpdump property, if not None, is the
# filename we'll capture to.
tcpdump = target.fsdb.get('tcpdump')
if tcpdump:
assert not os.path.sep in tcpdump \
and tcpdump != "" \
and tcpdump != os.path.pardir \
and tcpdump != os.path.curdir, \
"Bad filename for TCP dump capture '%s' specified as " \
" value to property *tcpdump*: must not include" % tcpdump
# per ttbd:make_ticket(), colon splits the real username
# from the ticket
owner = target.owner_get().split(":")[0]
assert owner, "BUG? target not owned on power on?"
capfile = os.path.join(target.files_path, owner, tcpdump)
# Because it is in the user's area,
# we assume the user knows what he is doing to overwrite it,
# so we'll remove any first
commonl.rm_f(capfile)
pidfile = os.path.join(target.state_dir, "tcpdump.pid")
logfile = os.path.join(target.state_dir, "tcpdump.log")
cmdline = [
"/usr/sbin/tcpdump", "-U",
"-i", "b%(id)s" % target.kws,
"-w", capfile
]
try:
logf = open(logfile, "a")
target.log.info("Starting tcpdump with: %s", " ".join(cmdline))
p = subprocess.Popen(
cmdline, shell = False, cwd = target.state_dir,
close_fds = True, stdout = logf,
stderr = subprocess.STDOUT)
except OSError as e:
raise RuntimeError("tcpdump failed to start: %s" % e)
ttbl.daemon_pid_add(p.pid) # FIXME: race condition if it died?
with open(pidfile, "w") as pidfilef:
pidfilef.write("%d" % p.pid)
pid = commonl.process_started( # Verify it started
pidfile, "/usr/sbin/tcpdump",
verification_f = os.path.exists,
verification_f_args = ( capfile, ),
timeout = 20, tag = "tcpdump", log = target.log)
if pid == None:
raise RuntimeError("tcpdump failed to start after 5s")
def off(self, target, component):
# Kill tcpdump, if it was started
pidfile = os.path.join(target.state_dir, "tcpdump.pid")
commonl.process_terminate(pidfile, tag = "tcpdump",
path = "/usr/sbin/tcpdump")
# remove the top level device
mode = self._get_mode(target)
if mode == 'physical':
# bring down the lower device
ifname = commonl.if_find_by_mac(target.tags['mac_addr'])
subprocess.check_call(
# flush the IP addresses, bring it down
"/usr/sbin/ip add flush dev %s; "
"/usr/sbin/ip link set dev %s down promisc off"
% (ifname, ifname),
shell = True)
elif mode == 'vlan':
commonl.if_remove_maybe("b%(id)s" % target.kws)
# nothing; we killed the upper and on the lwoer, a
# physical device we do nothing, as others might be using it
pass
elif mode == 'virtual':
commonl.if_remove_maybe("b%(id)s" % target.kws)
else:
raise AssertionError("Unknown mode %s" % mode)
target.fsdb.set('power_state', 'off')
@staticmethod
def _find_addr(addrs, addr):
for i in addrs:
if i['addr'] == addr:
return i
return None
def get(self, target, _component):
# we know we have created an interface named bNWNAME, so let's
# check it is there
if not os.path.isdir("/sys/class/net/b" + target.id):
return False
mode = self._get_mode(target)
# FIXME: check bNWNAME exists and is up
if mode == 'vlan':
pass
elif mode == 'physical':
pass
elif mode == 'virtual':
pass
else:
raise AssertionError("Unknown mode %s" % mode)
# Verify IP addresses are properly assigned
iface_name = "b" + target.id
addrs = netifaces.ifaddresses(iface_name)
if 'ipv4_addr' in target.kws:
addrs_ipv4 = addrs.get(netifaces.AF_INET, None)
if addrs_ipv4 == None:
target.log.info(
"vlan_pci/%s: off because no ipv4 addresses are assigned"
% iface_name)
return False # IPv4 address not set
addr = self._find_addr(addrs_ipv4, target.kws['ipv4_addr'])
if addr == None:
target.log.info(
"vlan_pci/%s: off because ipv4 address %s not assigned"
% (iface_name, target.kws['ipv4_addr']))
return False # IPv4 address mismatch
prefixlen = ipaddress.IPv4Network(
str('0.0.0.0/' + addr['netmask'])).prefixlen
if prefixlen != target.kws['ipv4_prefix_len']:
target.log.info(
"vlan_pci/%s: off because ipv4 prefix is %s; expected %s"
% (iface_name, prefixlen, target.kws['ipv4_prefix_len']))
return False # IPv4 prefix mismatch
if 'ipv6_addr' in target.kws:
addrs_ipv6 = addrs.get(netifaces.AF_INET6, None)
if addrs_ipv6 == None:
target.log.info(
"vlan_pci/%s: off because no ipv6 address is assigned"
% iface_name)
return False # IPv6 address not set
addr = self._find_addr(addrs_ipv6, target.kws['ipv6_addr'])
if addr == None:
target.log.info(
"vlan_pci/%s: off because ipv6 address %s not assigned"
% (iface_name, target.kws['ipv6_addr']))
return False # IPv6 address mismatch
prefixlen = ipaddress.IPv6Network(str(addr['netmask'])).prefixlen
if prefixlen != target.kws['ipv6_prefix_len']:
target.log.info(
"vlan_pci/%s: off because ipv6 prefix is %s; expected %s"
% (iface_name, prefixlen, target.kws['ipv6_prefix_len']))
return False # IPv6 prefix mismatch
return True
# FIXME: replace tcpdump with a interconnect capture interface
# declare the property we normal users to be able to set
ttbl.test_target.properties_user.add('tcpdump')
```
#### File: ttbd/hw-healthmonitor/ttbd-hw-healthmonitor.py
```python
import argparse
import bisect
import collections
import logging
import os
import pprint
import re
import select
import subprocess
import time
import systemd.journal
import systemd.daemon
import ttbl._install
import commonl
usb_root_regex = re.compile("^(?P<bus>[0-9]+)-(?P<port>[0-9]+)$")
def _usb_special_case(path):
if not path.startswith("/sys/bus/usb/drivers"):
return path
filename = os.path.basename(path)
# we only can workaround root-ports, which look like
# /sys/bus/usb/drivers/usb/3-2
match = usb_root_regex.match(filename)
if not match:
return path
# Sometimes /sys/bus/usb/drivers/usb/3-2 (for example) doesn't
# exist because it has been blown to pieces somehow, but there is
# a:
#
# $ find /sys/ -iname usb3-port2
# /sys/devices/pci0000:80/0000:80:01.0/0000:81:00.0/0000:82:00.2/usb3/3-0:1.0/usb3-port2
#
# so if it doesn't exist, we are going to use that one
if os.path.exists(path):
return path
# $ readlink -e /sys/bus/usb/drivers/usb/3-2
# /sys/devices/pci0000:80/0000:80:01.0/0000:81:00.0/0000:82:00.2/usb3/3-2
#
# and when it doesn't exist
#
# $ find /sys/ -iname usb3-port2
# /sys/devices/pci0000:80/0000:80:01.0/0000:81:00.0/0000:82:00.2/usb3/3-0:1.0/usb3-port2
def _find(filename):
for parent, dirs, _files in os.walk("/sys"):
if filename in dirs: # usb3-port2 is a dir
# return just
# /sys/devices/pci0000:80/0000:80:01.0/0000:81:00.0/0000:82:00.2/usb3/3-0:1.0,
# so it is at the same level as
# /sys/devices/pci0000:80/0000:80:01.0/0000:81:00.0/0000:82:00.2/usb3/3-2.
logging.info("%s: doesn't exist, but %s does, dead controller",
path, parent)
return parent
return None
gd = match.groupdict()
return _find("usb" + gd['bus'] + "-port" + gd['port'])
def _driver_rebind(bus_name, driver_name, device_name, strip_generations):
global _ttbd_hw_health_monitor_driver_rebind_path
# let's start by componsing the /sys path from the arguments
path = os.path.join("/", "sys", "bus", bus_name, "drivers", driver_name,
device_name)
_path = _usb_special_case(path)
if _path == None:
logging.error("%s: doesn't exist, can't rebind", path)
return
path = _path
if strip_generations:
# Strip children from the device path up because we want to
# rebind a parent device, not the children
#
# For example, for /sys/bus/usb/devices/3-4.1:1.0
# parent is 3-4.1
# grandpa is 3-4
# great-grandpa is usb3
# great-great-grandpa is 0000:05.00.2
#
# we know because
#
# $ readlink -e /sys/bus/usb/devices/3-4.1:1.0
# /sys/devices/pci0000:00/0000:00:03.0/0000:04:00.0/0000:05:00.2/usb3/3-4/3-4.1/3-4.1:1.0
assert strip_generations > 0
# Now see where that points to, that's the
# $ readlink -e /sys/bus/usb/devices/3-4.1:1.0
# /sys/devices/pci0000:00/0000:00:03.0/0000:04:00.0/0000:05:00.2/usb3/3-4/3-4.1/3-4.1:1.0
real_path = os.path.realpath(path).split(os.path.sep)
# So now chop strip-generations on the right, that's our new device
# /sys/devices/pci0000:00/0000:00:03.0/0000:04:00.0/0000:05:00.2
stripped_path = os.path.join(real_path[:-strip_generations])
# got device name
new_device_name = stripped_path[-1]
# Now let's get what bus and driver this device is attached to
# by following /DEVICEPATH/driver
#
# /sys/devices/pci0000:00/0000:00:03.0/0000:04:00.0/0000:05:00.2/driver
driver_path = os.path.realpath(os.path.join(*(
[ "/" ] + stripped_path +[ "driver" ])))
# this will give /sys/bus/BUSNAME/drivers/DRIVERNAME, so let's split
# it and extract the data
driver_path_components = driver_path.split("/")
new_bus_name = driver_path_components[3]
new_driver_name = driver_path_components[5]
logging.info("%s/%s/%s: stripped %d generations yields %s/%s/%s",
bus_name, driver_name, device_name,
strip_generations,
new_bus_name, new_driver_name, new_device_name)
device_name = new_device_name
driver_name = new_driver_name
bus_name = new_bus_name
cmdline = [
"sudo", "-n",
_ttbd_hw_health_monitor_driver_rebind_path,
bus_name, driver_name, device_name
]
try:
logging.info("%s/%s/%s: rebinding with command '%s'",
bus_name, driver_name, device_name,
" ".join(cmdline))
output = subprocess.check_output(cmdline, stderr = subprocess.STDOUT)
except subprocess.CalledProcessError as cpe:
logging.error("%s/%s/%s: rebinding with command '%s' failed: %s",
bus_name, driver_name, device_name,
" ".join(cpe.cmd), cpe.output)
return # well, nothing we can really do...
logging.warning("%s/%s/%s: rebound with command '%s': %s",
bus_name, driver_name, device_name,
" ".join(cmdline), output)
def action_driver_rebind(bus_name, driver_name, device_name,
condition, entry, strip_generations = 0):
"""
Rebind a device to it's driver to reset it
A device that is in a hosed state will be re-bound to its driver
to try to reset it and bring it back to life.
:param str bus_name: name of bus in */sys/bus*
:param str driver_name: name of driver in
*/sys/bus/BUS_NAME/drivers*
:param str device_name: name of the device in
*/sys/bus/BUS_NAME/drivers/DRIVER_NAME*
:param str condition: condition in the configuration given to
:func:`config_watch_add` that caused this call
:param dict entry: Systemd journal entry that caused this call
"""
ts = entry.get('__REALTIME_TIMESTAMP', None)
logging.error("%s: ACTION: reloading driver due to '%s' @ %s",
device_name, condition, ts)
_driver_rebind(bus_name, driver_name, device_name, strip_generations)
_thresholds = collections.defaultdict(list)
def action_driver_rebind_threshold(bus_name, driver_name, device_name,
condition, entry,
max_hits, period, strip_generations = 0):
"""
Rebind a device to its driver to reset it if a condition happens often
When the condition is reported more than *max_hits* time in
*period* seconds, then the device will be reset via driver
rebinding.
See :func:`action_driver_rebind` for information on the common
paramenters
:param int period: (in second) amount of time to monitor
:param int max_hits: maximum number of occurrences of the
condition that can heppen in a period after which we'd rebind
the device.
"""
global _thresholds
logging.debug("%s/%s/%s rebind_threshold: considering entry %s",
bus_name, driver_name, device_name,
entry)
ts = entry.get('__REALTIME_TIMESTAMP', None)
threshold_name = device_name + "/" + condition
threshold = _thresholds[threshold_name]
bisect.insort(threshold, ts)
ts0 = threshold[0]
tse = threshold[-1]
while (tse - ts0).total_seconds() > period:
# the current list of thresholds we have in the list is longer
# than the period, so remove the older ones until we are
# within the period
threshold.pop(0)
ts0 = threshold[0]
logging.warning(
"%s/%s/%s: current queue has %d (max %d) hits "
"in %.1f minutes (max %.1f) for '%s'",
bus_name, driver_name, device_name, len(threshold), max_hits,
(tse - ts0).total_seconds() / 60, period / 60, condition)
if len(threshold) > max_hits:
logging.error("%s/%s/%s: ACTION: reload driver due to: '%s' @ %s "
"causing %d (max %d) hits in %.1f minutes (max %.1f)",
bus_name, driver_name, device_name,
condition, ts,
len(threshold), max_hits,
(tse - ts0).total_seconds() / 60, period / 60)
_driver_rebind(bus_name, driver_name, device_name, strip_generations)
# we start checking from scratch
_thresholds[threshold_name] = []
_kernel_device_regex = re.compile(r"^\+usb:(?P<busno>[0-9]+)-(?P<devno>[0-9]+)(\.[0-9]+)*$")
def action_driver_rebind_threshold_kernel_device(
bus_name, driver_name, device_name,
condition, entry,
max_hits, period, strip_generations = 0):
"""
This is the same as action_driver_rebind_threshold(), but adapted
to the case when the actual /sys/bus/usb/devices/M-N dissapears
due to a root port failure.
In this case we get a kernel device name +usb:BUSNUMBER-DEVICENO
(eg: +usb:3-2) which we have to translate to controller
/sys/bus/usb/devices/usb3.
Now we can't just replace with 3-2 becasue in some cases, that
sysfs node has dissapeared.
Note the slight change in configuration language:
>>> config_watch_add("usb", "usb", re.compile("[0-9]+-[0-9]+$"), {
>>> # Case happened where /sys/bus/usb/devices/3-2 dissapeared but:
>>>
>>> # May 03 20:44:51 HOSTNAME kernel: usb 3-2: device descriptor read/64, error -110
>>> # Apr 27 22:44:02 ... kernel: usb 3-2: clear tt 4 (85c0) error -71
>>> # Just reload the thing if we get more than five in a minute
>>> 'device descriptor read/64, error -110': (
>>> # 2 is the number of generations we want to strip from the
>>> # device path; because 3-2's parent is usb3, whose
>>> # parent is the actual PCI device we need to reset
>>> action_driver_rebind_threshold_kernel-device, 5, 60, 2
>>> )},
>>> kernel_device = re.compile("\+usb:[0-9]+-[0-9]+$"))
Note the trailing *kernel_device* argument, a regex used to latch
on a kernel device name dynamically.
"""
match = _kernel_device_regex.match(device_name)
if not match:
raise AssertionError("device name %s does not match +usb:M-N[.O]*"
% device_name)
busno = match.groupdict()['busno']
# so now we have /sys/bus/usb/devices/usbBUSNO
realpath = os.path.realpath("/sys/bus/usb/devices/usb" + busno)
if not os.path.exists(realpath):
logging.error("%s: doesn't exist -- can't do anything", realpath)
return
# which is a symlink to /sys/devices/pci0000:00/0000:00:14.0/usb3
parent_dev = os.path.dirname(realpath)
# which is a symlink to /sys/devices/pci0000:00/0000:00:14.0 and
# it's driver is
driver_path = os.path.realpath(parent_dev + "/driver")
# /sys/bus/pci/drivers/xhci_hcd
# ok, so extract now to [ '', 'sys, 'bus', 'usb', 'drivers', 'xhci_hcd' # ... ]
_driver_path_parts = driver_path.split('/')
# bus_name = pci, driver_name = xhci_hcd, device_name #
# 0000:00:14.0
_bus_name = _driver_path_parts[3]
_driver_name = _driver_path_parts[5]
_device_name = os.path.basename(parent_dev)
logging.warning("%s/%s/%s mapped to %s/%s/%s",
bus_name, driver_name, device_name,
_bus_name, _driver_name, _device_name)
# and let the other function do it for us
action_driver_rebind_threshold(_bus_name, _driver_name, _device_name,
condition, entry, max_hits, period)
_watch_rules = []
def config_watch_add(bus_name, driver_name, device_name, actions):
r"""
:param str bus_name: name of bus in */sys/bus* to watch
:param str driver_name: name of driver in
*/sys/bus/BUS_NAME/drivers* to watch
:param str device_name: device under
/sys/bus/BUS_NAME/drivers/DRIVER_NAME to watch; if *None*, watch
all of them
:param dict actions: dictionary describing actions to do; key is a
substring of a message, value is a function to call or a tuple
that starts with a function to call and the rest are arguments
to add
The action function has to follow this prototype:
>>> def action_function(bus_name, driver_name, device_name,
condition, entry, *args, **kwargs:
thus, when called, bus_name, driver_name and device_name are all
the names of the entity that is causing it; condition is the
condition string that was matched (the key) and *entry* is the
journal entry which matched. *\*args* and *\*\*kwargs* are the
extra arguments given in the *actions* value tuple.
"""
assert isinstance(bus_name, str)
assert isinstance(driver_name, str)
if device_name:
if isinstance(device_name, str):
_device_name = "/" + device_name
elif isinstance(device_name, re.Pattern):
_device_name = "/" + device_name.pattern
else:
raise AssertionError(
"'device_name' must be string or regex, found %s",
type(device_name).__name__)
else:
_device_name = ""
assert isinstance(actions, dict)
global _watch_rules
_actions = {}
origin = commonl.origin_get(2)
# verify arguments and transform all the actions to a unique
# form (all have to be a list)
for condition, action in actions.items():
assert isinstance(condition, str), \
"Key passed as condition is not a string"
try:
action_fn = action[0]
_actions[condition] = action
except TypeError:
action_fn = action
_actions[condition] = [ action_fn ]
assert callable(action_fn), \
"Argument passed as action function to condition '%s' " \
"is not callable" % condition
driver_path = os.path.join("/sys/bus", bus_name, "drivers", driver_name)
if not os.path.isdir(driver_path):
logging.warning(
"%s/%s%s @%s: driver path does not exist, will not monitor",
bus_name, driver_name, _device_name, origin)
return
_watch_rules.append((
bus_name, driver_name, device_name, _actions, origin
))
logging.info("%s/%s%s @%s: will monitor",
bus_name, driver_name, _device_name, origin)
# Given a journal entry, check it against the list of stuff we have to
# watch for. note an entry comes as:
#
# {'MESSAGE': u'usb 2-1.2.4: reset full-speed USB device number 17 using ehci-pci',
# 'PRIORITY': 6,
# 'SYSLOG_FACILITY': 0,
# 'SYSLOG_IDENTIFIER': u'kernel',
# '_BOOT_ID': UUID('dc527a86-fa21-4085-bac2-ed4eccf83d0b'),
# '_HOSTNAME': u'some.hsot.domain',
# '_KERNEL_DEVICE': u'c189:144',
# '_KERNEL_SUBSYSTEM': u'usb',
# '_MACHINE_ID': UUID('2c766c91-79da-41ab-bb1a-2c903adf2211'),
# '_SOURCE_MONOTONIC_TIMESTAMP': datetime.timedelta(2, 43626, 293600),
# '_TRANSPORT': u'kernel',
# '_UDEV_DEVNODE': u'/dev/bus/usb/002/000',
# '_UDEV_SYSNAME': u'2-1.2.4',
# '__CURSOR': 's=9228bb40b9d140a585632aaeaf6c60e5;i=1987771;b=dc527a86fa214085bac2ed4eccf83d0b;m=3263f58acd;t=56c7f257761cc;x=d1b8e5236bc5e591',
# '__MONOTONIC_TIMESTAMP': (datetime.timedelta(2, 43625, 401037),
# UUID('dc527a86-fa21-4085-bac2-ed4eccf83d0b')),
# '__REALTIME_TIMESTAMP': datetime.datetime(2018, 5, 19, 0, 0, 28, 780492)}
#
def _entry_matched(entry, bus_name, driver_name, devname, actions, origin):
msg = entry['MESSAGE']
if '__REALTIME_TIMESTAMP' in entry:
ts = " " + str(entry['__REALTIME_TIMESTAMP'])
else:
ts = ""
# Device messages usually start with 'DRIVERNAME DEVICE: msg', so
# if we have a driver name, we try to match against that
_driver_name = msg.split(None, 1)[0]
if driver_name:
if isinstance(driver_name, str) \
and driver_name == _driver_name:
logging.debug("%s/%s: match on driver name @%s",
driver_name, devname, origin)
elif isinstance(driver_name, re.Pattern) \
and driver_name.match(_driver_name):
logging.debug("%s/%s: match on driver name @%s",
driver_name, devname, origin)
else:
# No driver match
logging.debug("%s: mismatch on driver name (%s vs %s requested) "
"@%s", devname, _driver_name, driver_name, origin)
return
else:
driver_name = _driver_name
found_actions = False
for condition, action in actions.items():
if condition in msg:
action_fn = action[0]
_args = action[1:]
try:
if logging.getLogger().getEffectiveLevel() < logging.DEBUG:
entry_info = ": %s" % pprint.pformat(entry)
else:
entry_info = ""
found_actions = True
if args.dry_run:
logging.error(
"[dry run]%s ACTION %s (%s, %s, %s, %s) @%s%s",
ts, action_fn, bus_name, devname, condition, _args,
origin, entry_info)
else:
logging.info("%s/%s/%s:%s matched entry%s",
bus_name, driver_name, devname, ts,
entry_info)
action_fn(bus_name, driver_name, devname,
condition, entry, *_args)
except Exception as e: # pylint: disable = broad-except
logging.exception(
"%s/%s/%s:%s action function raised uncaught "
"exception: %s",
bus_name, driver_name, devname, ts, e)
if not found_actions:
logging.debug("%s/%s/%s: mismatch on actions @%s",
bus_name, driver_name, devname, origin)
# Given a journal entry, check it against the list of stuff we have to
# watch for. note an entry comes as:
#
# {'MESSAGE': u'usb 2-1.2.4: reset full-speed USB device number 17 using ehci-pci',
# 'PRIORITY': 6,
# 'SYSLOG_FACILITY': 0,
# 'SYSLOG_IDENTIFIER': u'kernel',
# '_BOOT_ID': UUID('dc527a86-fa21-4085-bac2-ed4eccf83d0b'),
# '_HOSTNAME': u'some.hsot.domain',
# '_KERNEL_DEVICE': u'c189:144',
# '_KERNEL_SUBSYSTEM': u'usb',
# '_MACHINE_ID': UUID('2c766c91-79da-41ab-bb1a-2c903adf2211'),
# '_SOURCE_MONOTONIC_TIMESTAMP': datetime.timedelta(2, 43626, 293600),
# '_TRANSPORT': u'kernel',
# '_UDEV_DEVNODE': u'/dev/bus/usb/002/000',
# '_UDEV_SYSNAME': u'2-1.2.4',
# '__CURSOR': 's=9228bb40b9d140a585632aaeaf6c60e5;i=1987771;b=dc527a86fa214085bac2ed4eccf83d0b;m=3263f58acd;t=56c7f257761cc;x=d1b8e5236bc5e591',
# '__MONOTONIC_TIMESTAMP': (datetime.timedelta(2, 43625, 401037),
# UUID('dc527a86-fa21-4085-bac2-ed4eccf83d0b')),
# '__REALTIME_TIMESTAMP': datetime.datetime(2018, 5, 19, 0, 0, 28, 780492)}
#
def _check_entry(entry):
msg = entry['MESSAGE']
_device_name = entry.get('_UDEV_SYSNAME', None)
_kernel_name = entry.get('_KERNEL_DEVICE', None)
bus_name = None
driver_name = None
device_name = None
actions = None
origin = None
while not _device_name and not _kernel_name:
# If the entry has no device message, then let's try to
# extract it from the message, things like:
#
# usb 3-2-port1: cannot reset (err = -110)',
regex_usb = re.compile("usb (?P<devname>[0-9]+-[0-9]+)-.*:")
m = regex_usb.match(msg)
if m:
_device_name = m.groupdict()['devname']
if _device_name:
logging.warning("guessed USB device %s from message (had "
"no entry for it)", _device_name)
break
logging.debug("ignored deviceless entry: %s",
pprint.pformat(entry))
return
for bus_name, driver_name, device_name, actions, origin \
in _watch_rules:
if device_name and _device_name:
if isinstance(device_name, str) \
and device_name == _device_name:
logging.debug("%s: match on device name @%s",
_device_name, origin)
devname = _device_name
_entry_matched(entry, bus_name, driver_name,
devname, actions, origin)
continue
elif isinstance(device_name, re.Pattern) \
and device_name.match(_device_name):
logging.debug("%s: match on device name @%s",
_device_name, origin)
devname = _device_name
_entry_matched(entry, bus_name, driver_name,
devname, actions, origin)
continue
if device_name and _kernel_name:
# lookup by kernel device name (for example, for USB
# they look like +usb:3-2
if isinstance(device_name, str) \
and device_name == _kernel_name:
logging.debug("%s: match on kernel name @%s",
_kernel_name, origin)
devname = _kernel_name
_entry_matched(entry, bus_name, driver_name,
devname, actions, origin)
continue
elif isinstance(device_name, re.Pattern) \
and device_name.match(_kernel_name):
logging.debug("%s: match on kernel name @%s",
_kernel_name, origin)
devname = _kernel_name
_entry_matched(entry, bus_name, driver_name,
devname, actions, origin)
continue
# Support for -v option to increase verbosity
def _logging_verbosity_inc(level):
if level == 0:
return
if level > logging.DEBUG:
delta = 10
else:
delta = 1
return level - delta
class _action_increase_level(argparse.Action):
def __init__(self, option_strings, dest, default = None, required = False,
nargs = None, **kwargs):
super(_action_increase_level, self).__init__(
option_strings, dest, nargs = 0, required = required,
**kwargs)
#
# Python levels are 50, 40, 30, 20, 10 ... (debug) 9 8 7 6 5 ... :)
def __call__(self, parser, namespace, values, option_string = None):
if namespace.level == None:
namespace.level = logging.ERROR
namespace.level = _logging_verbosity_inc(namespace.level)
logging.addLevelName(50, "C")
logging.addLevelName(40, "E")
logging.addLevelName(30, "W")
logging.addLevelName(20, "I")
logging.addLevelName(10, "D")
# Initialize command line argument parser
arg_parser = argparse.ArgumentParser(
description = __doc__,
formatter_class = argparse.RawDescriptionHelpFormatter)
arg_parser.set_defaults(level = logging.ERROR)
arg_parser.add_argument("-v", "--verbose",
dest = "level",
action = _action_increase_level, nargs = 0,
help = "Increase verbosity")
arg_parser.add_argument("--config-path",
action = "store", dest = "config_path",
default = "/etc/ttbd-hw-healthmonitor",
help = "Path from where to load conf_*.py "
"configuration files (in alphabetic order)")
arg_parser.add_argument("-b", "--bootid",
action = 'store', default = None,
help = "select bootid (from journalctl --list-boots)")
arg_parser.add_argument("--seek-realtime",
action = 'store', default = False,
help = "check from the given time")
arg_parser.add_argument("--seek-head",
action = 'store_true', default = False,
help = "check from the begining of the boot")
arg_parser.add_argument("-n", "--dry-run",
action = 'store_true', default = False,
help = "only show what would it do")
args = arg_parser.parse_args()
logging.basicConfig(
level = args.level,
format = "%(levelname)s: %(message)s")
#
# Read configuration and decide what to watch
#
_ttbd_hw_health_monitor_driver_rebind_path = \
commonl.ttbd_locate_helper("ttbd-hw-healthmonitor-driver-rebind.py",
ttbl._install.share_path,
log = logging)
logging.debug("Found helper %s", _ttbd_hw_health_monitor_driver_rebind_path)
args.config_path = os.path.expanduser(args.config_path)
if args.config_path != [ "" ]:
commonl.config_import([ args.config_path ], re.compile("^conf[-_].*.py$"))
journal = systemd.journal.Reader()
journal.log_level(systemd.journal.LOG_INFO)
logging.debug("opened journal")
systemd.daemon.notify("READY=1")
journal.this_boot(args.bootid)
journal.this_machine()
logging.debug("journal: filtering for kernel messages")
journal.add_match(_TRANSPORT = "kernel")
# We don't filter per-subsystem, because some of them messages (like
# USB's cannot reset) are not bound to it
poller = select.poll()
poller.register(journal, journal.get_events())
# Enter directly to iterate to consume all the records since we booted
if args.seek_head:
journal.seek_head()
elif args.seek_realtime:
journal.seek_realtime(time.mktime(time.strptime(
args.seek_realtime, "%Y-%m-%d %H:%M:%S")))
else:
journal.seek_tail()
_bark_ts0 = time.time()
def _bark_periodically(period, msg):
global _bark_ts0
ts = time.time()
if ts - _bark_ts0 > period: # every five seconds, bark
_bark_ts0 = ts
systemd.daemon.notify("WATCHDOG=1")
if msg:
logging.debug("currently checking: %s", msg)
else:
logging.debug("currently checking")
first_run = True
while True:
if not first_run:
poller.poll(5000)
if journal.process() != systemd.journal.APPEND:
continue
first_run = False
logging.debug("polled")
_bark_periodically(5, "main loop")
for _entry in journal:
logging.log(8, "entry %s", pprint.pformat(_entry))
_check_entry(_entry)
if '__REALTIME_TIMESTAMP' in _entry:
_bark_periodically(5, _entry.get('__REALTIME_TIMESTAMP'))
else:
_bark_periodically(5, _entry)
```
#### File: ttbd/ttbl/auth_ldap.py
```python
import collections
import logging
import time
import urllib.parse
import ldap
import commonl
import ttbl
class authenticator_ldap_c(ttbl.authenticator_c):
"""Use LDAP to authenticate users
To configure, create a config file that looks like:
>>> import ttbl.auth_ldap
>>>
>>> add_authenticator(timo.auth_ldap.authenticator_ldap_c(
>>> "ldap://URL:PORT",
>>> roles = {
>>> ....
>>> roles = {
>>> 'role1': { 'users': [ "john", "lamar", ],
>>> 'groups': [ "Occupants of building 3" ]
>>> },
>>> 'role2': { 'users': [ "anthony", "mcclay" ],
>>> 'groups': [ "Administrators",
>>> "Knights who say ni" ]
>>> },
>>> }))
The *roles* dictionary determines *who gets to be an admin* or who gets
access to XYZ resources.
This will make that *john*, *lamar* and any user on the group
*Occupants of building 3* to have the role *role1*.
Likewise for *anthony*, *mcclay* and any user who is a member of
either the group *Administrators* or the group *Knights who say
ni*, they are given role *role2*
If *'groups'* is a *None*, it means give that role to anyone, they
don't have to be part of any group. This allows to permit the role
to anyone that can authenticate with LDAP.
"""
def __init__(self, url, roles = None):
"""
:param str url: URL of the LDAP server
:param dict roles: map of roles to users and groups
"""
if not roles:
roles = {}
assert isinstance(url, str)
assert isinstance(roles, dict)
u = urllib.parse.urlparse(url)
if u.scheme == "" or u.netloc == "":
raise ValueError("%s: malformed LDAP URL?" % url)
self.url = u.geturl()
for role in roles:
if not isinstance(role, str):
raise ValueError("role specification keys must be strings")
for tag in roles[role]:
if not tag in ('users', 'groups'):
raise ValueError(
"subfield for role must be 'users' or 'groups'")
if roles[role][tag] != None:
if not isinstance(roles[role][tag], list):
raise ValueError(
"value of role[%s][%s] must be a "
"list of strings or None" % (role, tag))
for value in roles[role][tag]:
if not isinstance(value, str):
raise ValueError("members of role[%s][%s] must "
"be strings; '%s' is not" %
(role, tag, value))
self.conn = None
self.roles = roles
if ttbl.config.ssl_enabled_check_disregard == False \
and ttbl.config.ssl_enabled == False:
raise RuntimeError("LDAP can't run as HTTPS is disabled")
def __repr__(self):
return self.url
ldap_field_set = set()
def ldap_login_hook(self, record):
"""
Function called by :meth:login once a user is authenticated
sucessfully on LDAP
This function does nothing and is meant for being overloaded
in an inherited class to implement any extra needed
functionality, eg:
>>> class my_auth_c(ttbl.auth_ldap.authenticator_ldap_c)
>>>
>>> def ldap_login_hook(self, record):
>>>
>>>
>>> def ldap_login_hook(self, records):
>>> d = 0
>>> record = records[0]
>>> dn = record[0]
>>> fields = record[1]
>>>
>>> data = {}
>>> dnl = ldap.dn.explode_dn(dn)
>>> for i in dnl:
>>> if i.startswith("CN="):
>>> # CN=Lastname\\, Name -> unscape the value
>>> data['name'] = i.split("=", 1)[1].replace("\\", "")
>>> if i.startswith("DC=") and 'domain' not in data:
>>> # we take the first DC= component and store as domain,
>>> # ignore the rest from a string such as
>>> # u'DC=subdomain2', u'DC=subdomain3', u'DC=company', u'DC=com'
>>> data['domain'] = i.split("DC=")[1].replace("\\", "")
>>>
>>> data['login'] = fields['sAMAccountName'][0]
>>> return data
:param list record: All the records matching the email given
to :meth:login are passed in *record*, which has the
following structure:
>>> [
>>> ( DN, DICT-OF-FIELDS ),
>>> ( DN1, DICT-OF-FIELDS ),
>>> ( DN2, DICT-OF-FIELDS ),
>>> ...
>>> ]
In most properly configured LDAPs, there will be just ONE
entry for the matching user.
*DN* is a string containing the the distinguished name, and
might look as::
CN=Lastnames\\, Firstnames,OU=ORGUNIT,DC=DOMAIN1,DC=DOMAIN2,...
eg::
CN=Doe\\, <NAME>,OU=Staff,DC=company,DC=com
Dict of fields is a dictionary of all the fields listed in
:data:ldap_field_set plus *sAMAccountName*, *mail* and
*memberOf* (which :meth:login below always queries).
Each fields is a list of values; in some case it might be only
one, in others, multiple.
:returns: dictionary keyed by strings of fields we want the
user database to contain; only int, float, bool and strings
are allowed. Key name *roles* is reserved.
"""
return {}
def login(self, email, password, **kwargs):
"""
Validate a email|token/password combination and pull which roles it
has assigned
:returns: set listing the roles the token/password combination
has according to the configuration
:rtype: set
:raises: authenticator_c.invalid_credentials_e if the
token/password is not valid
:raises: authenticator_c.error_e if any kind of error
during the process happens
"""
assert isinstance(email, str)
assert isinstance(password, str)
# Always use a new connection, so it doesn't get invalidated
# after a while
self.conn = ldap.initialize(self.url)
self.conn.set_option(ldap.OPT_REFERRALS, 0)
# let the connection die reasonably fast so a new one is
# re-opened if the peer killed it.
self.conn.set_option(ldap.OPT_NETWORK_TIMEOUT, 5)
record = None
# First bind to LDAP and search the user's email
try:
ldap_fields = set([ 'sAMAccountName', 'mail', 'memberOf' ])
# add anything else the admin has said
ldap_fields.update(self.ldap_field_set)
self.conn.simple_bind_s(email, password.encode('utf8'))
# search_s is picky in the field list; has to be a list,
# can't be a set
record = self.conn.search_s(
"", ldap.SCOPE_SUBTREE, 'mail=%s' % email, list(ldap_fields))
self.conn.unbind_s()
except ldap.INVALID_CREDENTIALS as e:
raise self.invalid_credentials_e(
"%s: invalid credentials for LDAP %s: %s"
% (email, self, e))
except Exception as e:
logging.exception(e)
raise self.error_e(
"%s: generic error in LDAP %s: %s"
% (email, self, e))
token_roles = set()
# So the token/password combination exists and is valid, so
# now let's see what roles we need to assign the user
# depending on if it shows in the user list for that role
for role_name, role in list(self.roles.items()):
if email in role.get('users', []):
token_roles.add(role_name)
# Now let's check groups -- pull the LDAP groups from the record
data = {}
# get data from record
# record is a list of entries returned by conn.search_s
# conn.search_s -> conn.search_ext_s
# "Each result tuple is of the form (dn, attrs) where dn is a
# string containing the DN (distinguished name) of the entry,
# and attrs is a dictionary containing the attributes
# associated with the entry." from
# http://www.python-ldap.org/doc/html/ldap.html#ldap.LDAPObject.search_ext_s
if record and record[0] and record[0][1]:
data_record = record[0][1]
for key in list(data_record.keys()):
# here we extract the information from the dict, wich is a list
# if we have only 1 element, we remove the list object
data[key] = data_record[key][0] \
if len(data_record[key]) == 1 else data_record[key]
# extend memberOf here is to remove all the ldap fields for
# the group name, and save only the name the name is between
# CN=%GROUP NAME%,DC=...
groups = []
for group in data_record.get('memberOf', []):
group = group.decode('utf-8')
tmp = group.split(",")
for i in tmp:
if i.startswith('CN='):
group_name = i.replace('CN=', '')
groups.append(group_name)
break
groups = set(groups)
# Given the group list @groups, check which more roles we
# need to add based on group membership
for role_name, role in self.roles.items():
role_groups = role.get('groups', [])
if role_groups == None:
# any valid user can take this role
token_roles.add(role_name)
elif set(role_groups) & groups:
# the LDAP records describes groups that are also in
# the list of acceptable groups for this role
token_roles.add(role_name)
data = self.ldap_login_hook(record)
assert isinstance(data, dict), \
"%s.ldap_login_hook() returned '%s', expected dict" \
% (type(self).__name__, type(data))
assert 'roles' not in data, \
"%s.ldap_login_hook() returned a dictionary with a 'roles'" \
" field, which is reserved"
data['roles'] = token_roles
return data
class ldap_map_c(object):
"""General LDAP mapper
This objects allows maps and caches entities in an LDAP database, to
speed up looking up of values from other values.
For example, to get the *displayName* based on the *sAMAccountName*:
>>> account_name = self.lookup("Some User", 'displayName', 'sAMAccountName')
looks up an LDAP entity that has *Some User* as field
*displayName* and returns it's *sAMAccountName*.
This does the same, but caches the results so that the next
time it looks it up, it doesn't need to hit LDAP:
>>> account_name = self.lookup_cached("Some User", 'displayName', 'sAMAccountName')
this object caches the objects, as we assume LDAP behaves
mostly as a read-only database.
:param str url: URL of the LDAP server; in the form
*ldap[s]://[BIND_USERNAME[:BIND_PASSWORD]]@HOST:PORT*.
The bind username and password can be specified in the
arguments below (eg: when either have a *@* or *:* and they
follow the same rules for password discovery.
:param str bind_username: (optional) login for binding to
LDAP; might not be needed in all setups.
:param str bind_password: (optional) password to binding to
LDAP; migt not be needed in all setups.
Will be handled by :func:`commonl.password_get`, so
passwords such as:
- *KEYRING* will ask the accounts keyring for the password
for service *url* for username *bind_username*
- *KEYRING:SERVICE* will ask the accounts keyring for the password
for service *SERVICE* for username *bind_username*
- *FILENAME:PATH* will read the password from filename *PATH*.
otherwise is considered a hardcoded password.
:param int max_age: (optional) number of seconds each cached
entry is to live. Once an entry is older than this, the LDAP
server is queried again for that entry.
"""
class error_e(ValueError):
pass
class invalid_credentials_e(error_e):
pass
def __init__(self, url,
bind_username = None, bind_password = None,
max_age = 200):
assert isinstance(url, str)
assert bind_username == None or isinstance(bind_username, str)
assert bind_password == None or isinstance(bind_password, str)
assert max_age > 0
url_parsed = urllib.parse.urlparse(url)
if url_parsed.scheme != "ldap" or url_parsed.netloc == "":
raise ValueError("%s: malformed LDAP URL?" % url)
self.url = commonl.url_remove_user_pwd(url_parsed)
if bind_username == None:
self.bind_username = url_parsed.username
else:
self.bind_username = bind_username
if bind_password == None:
self.bind_password = url_parsed.password
else:
self.bind_password = <PASSWORD>
self.bind_password = <PASSWORD>(self.url, self.bind_username,
self.bind_password)
# dictionary of [ field_lookup, field_report ] = {
# VALUE: ( TIMESTAMP, REPORTED_FIELD ),
# }
self._cache = collections.defaultdict(dict)
self.conn = None
#: maximum number of seconds an entry will live in the cache
#: before it is considered old and refetched from the servers.
self.max_age = max_age
def _conn_setup(self):
if self.conn:
return
self.conn = ldap.initialize(self.url)
self.conn.set_option(ldap.OPT_REFERRALS, 0)
# let the connection die reasonably fast so a new one is
# re-opened if the peer killed it.
self.conn.set_option(ldap.OPT_NETWORK_TIMEOUT, 5)
self.conn.simple_bind_s(self.bind_username, self.bind_password)
def lookup(self, what, field_lookup, field_report):
"""
Lookup the first LDAP record whose field *field_lookup*
contains a value of *what*
:returns: the value of the field *field_record* for the
record found; *None* if not found or the record doesn't have
the field.
"""
assert isinstance(what, str)
assert isinstance(field_lookup, str)
assert isinstance(field_report, str)
while True: # retry
try:
self._conn_setup()
record = self.conn.search_s(
"", ldap.SCOPE_SUBTREE, '%s=%s' % (field_lookup, what),
[ field_lookup, field_report ])
return record
except ldap.INVALID_CREDENTIALS as e:
raise self.invalid_credentials_e(
"%s: invalid credentials for LDAP %s=%s: %s"
% (self.url, field_lookup, what, e))
except (ldap.error, ldap.CONNECT_ERROR) as e:
logging.warning("LDAP: connection error, retrying: %s / %s",
type(e), e)
# ok, reinit the connection, rebind, retry
self.conn = None
continue
except Exception as e:
logging.exception("error %s: %s", type(e), e)
raise self.error_e(
"%s: generic error in LDAP searching %s=%s: %s"
% (self.url, field_lookup, what, e))
def lookup_cached(self, value, field_lookup, field_report):
"""
Same operation as :meth:`lookup`; however, it caches the
result, so if the last lookup is younger than :data:`max_age`,
said result is used. Otherwise a new LDAP lookup is done and
the value cached.
"""
assert isinstance(value, str)
assert isinstance(field_lookup, str)
assert isinstance(field_report, str)
cache = self._cache[( field_lookup, field_report)]
if value in cache:
# hit in the cache
ts, mapped_value = cache[value]
now = time.time()
if now - ts < self.max_age:
return mapped_value # still fresh, use it
# cache entry is stale, delete it and lookup
del cache[value]
records = self.lookup(value, field_lookup, field_report)
# returns a list of records, so let's check each, although
# mostl likely it'll be only
now = time.time()
for _dn, record in records:
# displayName is also a list of names, so we match on one
# that contains exactly the name we are looking for
mapped_value = record[field_report][0]
if value in record[field_lookup]:
cache[value] = ( now, mapped_value )
return mapped_value
# nothing found, so bomb it.
return None
```
#### File: ttbd/ttbl/auth_userdb.py
```python
import errno
import hashlib
import os
import stat
import ttbl
class driver(ttbl.authenticator_c):
"""Authenticate users from a local database directory
To configure, create a database directory where a file per user
will be created, describing the user's roles and hashed password;
ensure the group *ttbd* can read it and also new files created on
it, and it is not available to other users (mode *02770*)::
# install -g ttbd -m 2770 -d /etc/ttbd-production/userdb
Configure in any server :ref:`configuration file
<ttbd_configuration>` such as
``/etc/ttbd-production/conf_04_auth.py``:
>>> import ttbl.auth_userdb
>>> import ttbl.config
>>>
>>> ttbl.config.add_authenticator(ttbl.auth_userdb.driver("/etc/ttbd-production/userdb"))
Restart to read the configuration::
# systemctl restart ttbd@production
- Add or modify new users with::
$ ttbd-passwd -p /etc/ttbd-production/userdb user1
Password for user1:
$ ttbd-passwd -p /etc/ttbd-production/userdb user2 password2
$ ttbd-passwd -p /etc/ttbd-production/userdb user3 password3 -r admin
Note how the password can be specified on the command line or
queried in stdin. Roles can be added with the *-r* option. See
*--help* for more options. After this::
$ ls -l /etc/ttbd-production/userdb
total 12
-rw-r----- 1 LOGIN ttbd 86 May 13 21:37 user1
-rw-r----- 1 LOGIN ttbd 86 May 13 21:38 user2
-rw-r----- 1 LOGIN ttbd 92 May 13 21:38 user3
$ cat /etc/ttbd-alloc/userdb/user3
user,admin:sha256:892147:64:13bbb3e5deaa8f42fa10b233278c7b480549d7c7cfa085bf9203f867c7ec3af2
- Delete users USERNAME by removing file
*/etc/ttbd-production/userdb/USERNAME*
The database can be placed in any directory wanted/needed.
The fields are in a single line, separated by colons:
- list of user roles (simple strings with chars *[_a-zA-Z0-9]*,
separated by commas; see :ref:`access control
<target_access_control>` for a description on roles.
- algorithm used to hash (from the list of names reported by
python's *hashlib.algorithms_available*).
- salt value (integer)
- hexdigest len (integer)
- hexdigest (obtained by hashing a string composed of joining the
salt as a string, the username and the password with the hashing
algorithm), converting to a hex representaion and taking the
first *hexdigest len* characters of it.
"""
def __init__(self, userdb):
"""
:param str userdb: path to directory where the user database
is stored (one file per user)
"""
assert isinstance(userdb, str)
if not os.path.isdir(userdb):
raise AssertionError(
"auth_userdb: %s: path is not a directory" % userdb)
st = os.stat(userdb)
if st.st_mode & stat.S_IRWXO:
raise AssertionError(
"auth_userdb: %s: path is accessible by other than"
" user/group (%04o); fix with 'chmod o= %s'"
% (userdb, st.st_mode, userdb))
#: path for the user database
#:
#: This is a directory,
self.userdb_path = userdb
def __repr__(self):
return "user database @%s" % self.userdb_path
def login(self, username, password, **kwargs):
"""
Validate a username/password combination and pull which roles it
has assigned in the user db :attr:`userdb_path`
:param str username: name of user to validate
:param str password: <PASSWORD> validate
:returns set: set listing the roles the token/password combination
has according to the configuration
:raises: :exc:`ttbl.authenticator_c.invalid_credentials_e` if the
token/password is not valid
:raises: :exc:`ttbl.authenticator_c.error_e` if any kind of error
during the process happens
"""
assert isinstance(username, str)
assert isinstance(password, str)
data_path = os.path.join(self.userdb_path, username)
try:
with open(data_path, "r") as f:
data = f.read()
except IOError as e:
if e.errno == errno.ENOENT:
raise self.unknown_user_e("unknown user '%s'" % username)
raise
# ttbd-passwd generates five fields separated by :
datal = data.strip().split(":")
if len(datal) != 5:
raise self.unknown_user_e(
"invalid user '%s': corrupted DB?" % username)
try:
roles = datal[0].split(",")
algorithm = datal[1]
salt = datal[2]
digest_len = int(datal[3])
hashed_password = datal[4]
except Exception as e:
raise self.unknown_user_e(
"invalid user '%s': corrupted data? %s" % (username, e))
hashed_password_input = hashlib.new(
algorithm,
(salt + username + password).encode("utf-8"))
hashed_password_input = hashed_password_input.hexdigest()[:digest_len]
if hashed_password_input != hashed_password:
raise self.invalid_credentials_e(
"invalid password for user '%s'" % username)
return set(roles)
```
#### File: ttbd/ttbl/dhcp.py
```python
import os
import pwd
import shutil
import stat
import subprocess
import commonl
import ttbl
import ttbl.config
import ttbl.power
import ttbl.pxe
#: Directory where the TFTP tree is located
tftp_dir = "/var/lib/tftpboot"
tftp_prefix = "ttbd" + ttbl.config.instance_suffix
def template_rexpand(text, kws):
"""
Expand Python keywords in a template repeatedly until none are
left.
if there are substitution fields in the config text,
replace them with the keywords; repeat until there are none left
(as some of the keywords might bring in new substitution keys).
Stop after ten iterations
"""
assert isinstance(text, str)
assert isinstance(kws, dict)
count = 0
while '%(' in text:
text = text % kws
count += 1
if count > 9:
raise RuntimeError('after ten iterations could not resolve '
'all configuration keywords')
return text
def _tag_get_from_ic_target(kws, tag, ic, target, default = ""):
# get first from the target
if tag in target.tags:
value = target.tags[tag]
elif tag in ic.tags:
value = ic.tags[tag]
else:
value = default
kws[tag] = value % kws
# FIXME: use daemon_pc
class pci(ttbl.power.impl_c):
class error_e(Exception):
pass
class start_e(error_e):
pass
dhcpd_path = "/usr/sbin/dhcpd"
"""
This class implements a power control unit that can be made part
of a power rail for a network interconnect.
When turned on, it would starts DHCP to provide on
the network.
With a configuration such as::
import ttbl.dhcp
ttbl.test_target.get('nwa').pc_impl.append(
ttbl.dhcp.pci("fc00:db20:35b:7399::5", "fc00:e968:6179::de52:7100", 24,
"fc00:e968:6179::de52:7100", "fc00:db20:35b:7399::5", ip_mode = 6)
)
It would start a DHCP IPv6 server on fc00:db20:35b:7399::5, network
fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/24 serving IPv6 address from :2 to :fe.
"""
def __init__(self,
if_addr,
if_net,
if_len,
ip_addr_range_bottom,
ip_addr_range_top,
mac_ip_map = None,
allow_unmapped = False,
debug = False,
ip_mode = 4):
assert ip_mode in (4, 6)
ttbl.power.impl_c.__init__(self)
self.allow_unmapped = allow_unmapped
if mac_ip_map == None:
self._mac_ip_map = {}
else:
self._mac_ip_map = mac_ip_map
# FIXME: move to power_on_do, to get this info from target's tags
self._params = dict(
ip_mode = ip_mode,
tftp_prefix = tftp_prefix,
if_net = if_net,
if_addr = if_addr,
if_len = if_len,
ip_addr_range_bottom = ip_addr_range_bottom,
ip_addr_range_top = ip_addr_range_top,
dhcp_architecture_types = self._mk_pxe_arch_type_config(),
)
self.ip_mode = ip_mode
if ip_mode == 4:
self._params['if_netmask'] = commonl.ipv4_len_to_netmask_ascii(if_len)
if allow_unmapped:
self._params["allow_known_clients"] = "allow known clients;"
else:
self._params["allow_known_clients"] = "# all clients allowed"
self.debug = debug
self.log = None
self.target = None # we find this when power_*_do() is called
self.state_dir = None
self.pxe_dir = None
self.dhcpd_pidfile = None
@staticmethod
def _mk_pxe_arch_type_config():
# Given information in the ttbl.pxe.architecture member of this
# class, generate a block of DHCP config language that looks
# like:
#
# if option architecture-type = 00:00 {
# filename "%(tftp_prefix)s/lpxelinux.0";
# } elsif option architecture-type = 00:09 {
# filename "%(tftp_prefix)s/efi-x86_64/syslinux.efi";
# } elsif option architecture-type = 00:07 {
# filename "%(tftp_prefix)s/efi-x86_64/syslinux.efi";
# } elsif option architecture-type = 00:06 {
# filename "%(tftp_prefix)s/efi-x86/syslinux.efi";
# } else {
# filename "%(tftp_prefix)s/lpxelinux.0";
# }
first = True
res = ""
for arch_name, arch_data in ttbl.pxe.architectures.items():
if first:
if_s = "if"
first = False
else:
if_s = "} elsif"
rfc_code = arch_data['rfc_code']
boot_filename = arch_data['boot_filename']
res += """\
%s option architecture-type = %s {
filename "%s/%s/%s";
""" % (if_s, rfc_code, tftp_prefix, arch_name, boot_filename)
res += """\
} else {
filename "%s/lpxelinux.0";
}
""" % tftp_prefix
return res
def _dhcp_conf_write(self, f):
kws = dict(self._params)
# generate DHCP configuration file based on hackish templating
self.log.info(
"%(if_name)s: IPv%(ip_mode)d addr/net/mask "
"%(if_addr)s/%(if_net)s/%(if_len)s", self._params)
if self.ip_mode == 4:
# We only do PXE over ipv4
# FIXME: make it so using pxelinux is a configuratio template
# (likewise on the tftp side, so we can switch to EFI boot or
# whatever we want)
# %(dhcp_architecture_types)s is the output of
# _mk_pxe_arch_type_config()
f.write("""\
option space pxelinux;
option pxelinux.magic code 208 = string;
option pxelinux.configfile code 209 = text;
option pxelinux.pathprefix code 210 = text;
option pxelinux.reboottime code 211 = unsigned integer 32;
# To be used in the pxeclients class
option architecture-type code 93 = unsigned integer 16;
subnet %(if_net)s netmask %(if_netmask)s {
pool {
%(allow_known_clients)s
range %(ip_addr_range_bottom)s %(ip_addr_range_top)s;
}
class "pxeclients" {
match if substring (option vendor-class-identifier, 0, 9) = "PXEClient";
# http://www.syslinux.org/wiki/index.php?title=PXELINUX#UEFI
%(dhcp_architecture_types)s
# Point to the TFTP server, which is the same as this
next-server %(if_addr)s;
}
}
""" % self._params)
else:
f.write("""\
# This one line must be outside any bracketed scope
option architecture-type code 93 = unsigned integer 16;
subnet6 %(if_net)s/%(if_len)s {
range6 %(ip_addr_range_bottom)s %(ip_addr_range_top)s;
class "pxeclients" {
match if substring (option vendor-class-identifier, 0, 9) = "PXEClient";
# http://www.syslinux.org/wiki/index.php?title=PXELINUX#UEFI
%(dhcp_architecture_types)s
# Point to the TFTP server, which is the same as this
# next-server %(if_addr)s;
}
}
""" % self._params)
# Now, enumerate the targets that are in this local
# configuration and figure out what's their IP address in
# this network; create a hardcoded entry for them.
#
# FIXME: This leaves a gap, as targets in other servers could
# be connected to this network. Sigh.
for target in ttbl.test_target.known_targets():
interconnects = target.tags.get('interconnects', {})
ic = self.target
boot_ic = target.tags.get('pos_boot_interconnect', None)
if boot_ic == None:
ic.log.info('%s: target has no "pos_boot_interconnect" '
'tag/property defined, ignoring' % target.id)
continue
# FIXME: these two checks shall be consistency done when
# the target is being added
if not boot_ic in target.tags['interconnects']:
raise RuntimeError('%s: target does not belong to the '
'boot interconnect "%s" defined in tag '
'"pos_boot_interconnect"'
% (target.id, boot_ic))
boot_ic_target = ttbl.test_target(boot_ic)
if boot_ic_target == None:
raise RuntimeError('%s: this target\'s boot interconnect %s '
'defined in "pos_boot_interconnect" tag '
'is not available in this server'
% (target.id, boot_ic))
if not 'bsp' in target.tags:
bsps = list(target.tags.get('bsps', {}).keys())
if bsps:
kws['bsp'] = sorted(bsps)[0]
kws.update(dict(
ipv4_gateway = ic.tags.get('ipv4_gateway', ""),
ipv4_netmask = commonl.ipv4_len_to_netmask_ascii(
ic.tags['ipv4_prefix_len']),
name = target.id,
))
# There might be a prefix to the path to the boot kernel and
# initrd; we let the target override it and default to the
# network's or nothing
# FIXME: need v6 nfs_server and http_url
_tag_get_from_ic_target(kws, 'pos_http_url_prefix', ic, target)
_tag_get_from_ic_target(kws, 'pos_nfs_server', ic, target)
_tag_get_from_ic_target(kws, 'pos_nfs_path', ic, target)
for ic_id, interconnect in list(interconnects.items()):
if '#' in ic_id:
real_ic_id, instance = ic_id.split("#", 1)
kws['hostname'] = target.id + "-" + instance
else:
real_ic_id = ic_id
kws['hostname'] = target.id
if real_ic_id != self.target.id:
continue
kws['mac_addr'] = interconnect.get('mac_addr', None)
kws['ipv4_addr'] = interconnect.get('ipv4_addr', None)
kws['ipv6_addr'] = interconnect.get('ipv6_addr', None)
if self.ip_mode == 4:
config = """\
host %(hostname)s {
hardware ethernet %(mac_addr)s;
fixed-address %(ipv4_addr)s;
option host-name "%(hostname)s";
# note how we are forcing NFSv3, as it might default to v2
# FIXME: parameter?
# Also UDP, more resilient for our use and soft so we can
# recover in some cases more easily
option root-path "%(pos_nfs_server)s:%(pos_nfs_path)s,soft,nfsvers=4";
}
"""
else:
config = """\
host %(hostname)s {
hardware ethernet %(mac_addr)s;
fixed-address6 %(ipv6_addr)s;
option host-name "%(hostname)s";
# note how we are forcing NFSv3, as it might default to v2
# FIXME: parameter?
# Also UDP, more resilient for our use and soft so we can
# recover in some cases more easily
# FIXME: pos_nfs_server6?
option root-path "%(pos_nfs_server)s:%(pos_nfs_path)s,soft,nfsvers=4";
}
"""
f.write(template_rexpand(config, kws))
def _dhcpd_start(self):
# Fire up the daemons
dhcpd_leases_name = os.path.join(self.state_dir, "dhcpd.leases")
# Create the leases file if it doesn't exist
with open(dhcpd_leases_name, 'a'):
# touch the access/modify time to now
os.utime(dhcpd_leases_name, None)
if self.ip_mode == 4:
ip_mode = "-4"
else:
ip_mode = "-6"
args = [
# Requires CAP_NET_BIND_SERVICE CAP_NET_ADMIN
#"strace", "-f", "-s2048", "-o/tmp/kk.log",
"dhcpd", "-d", "-q",
# Run it in foreground, so the process group owns it and
# kills it when exiting
"-f",
ip_mode,
"-cf", os.path.join(self.state_dir, "dhcpd.conf"),
"-lf", dhcpd_leases_name,
"-pf", self.dhcpd_pidfile,
self._params['if_name'],
]
logfile_name = os.path.join(self.state_dir, "dhcpd.log")
so = open(logfile_name, "wb")
try:
subprocess.Popen(args, shell = False, cwd = self.state_dir,
close_fds = True,
stdout = so, stderr = subprocess.STDOUT)
except OSError as e:
raise self.start_e("DHCPD failed to start: %s", e)
pid = commonl.process_started(
self.dhcpd_pidfile, self.dhcpd_path,
verification_f = os.path.exists,
verification_f_args = (self.dhcpd_pidfile,),
tag = "dhcpd", log = self.log)
# systemd might complain with
#
# Supervising process PID which is not our child. We'll most
# likely not notice when it exits.
#
# Can be ignored
if pid == None:
raise self.start_e("dhcpd failed to start")
ttbl.daemon_pid_add(pid) # FIXME: race condition if it died?
def _init_for_process(self, target):
# These are the entry points we always need to initialize, we
# might be in a different process
if self.log == None:
self.log = target.log
self.state_dir = os.path.join(target.state_dir,
"dhcpd-%d" % self.ip_mode)
self.pxe_dir = os.path.join(tftp_dir, tftp_prefix)
self.dhcpd_pidfile = os.path.join(self.state_dir, "dhcpd.pid")
def on(self, target, _component):
"""
Start DHCPd servers on the network interface
described by `target`
"""
if self.target == None:
self.target = target
else:
assert self.target == target
# FIXME: detect @target is an ipv4 capable network, fail otherwise
self._init_for_process(target)
# Create runtime directories where we place everything based
# on the infomation in ttbl.pxe.architectures
shutil.rmtree(self.state_dir, ignore_errors = True)
os.makedirs(self.state_dir)
ttbl.pxe.setup_tftp_root(os.path.join(tftp_dir, tftp_prefix))
# We set the parameters in a dictionary so we can use it to
# format strings
# FUGLY; relies on ttbl.conf_00_lib.vlan_pci renaming the
# network interfaces like this.
self._params['if_name'] = "b" + target.id
# FIXME: if we get the parameters from the network here, we
# have target -- so we don't need to set them on init
with open(os.path.join(self.state_dir, "dhcpd.conf"), "wb") as f:
self._dhcp_conf_write(f)
# FIXME: before start, filter out leases file, anything in the
# leases dhcpd.leases file that has a "binding state active"
# shall be kept ONLY if we still have that client in the
# configuration...or sth like that.
# FIXME: rm old leases file, overwrite with filtered one
self._dhcpd_start()
def off(self, target, _component):
if self.target == None:
self.target = target
else:
assert self.target == target
self._init_for_process(target)
commonl.process_terminate(self.dhcpd_pidfile,
path = self.dhcpd_path, tag = "dhcpd")
def get(self, target, _component):
if self.target == None:
self.target = target
else:
assert self.target == target
self._init_for_process(target)
dhcpd_pid = commonl.process_alive(self.dhcpd_pidfile, self.dhcpd_path)
if dhcpd_pid != None:
return True
else:
return False
# power_on_pre_pos_setup has moved!
def power_on_pre_pos_setup(target):
target.log.warning(
"UPDATE configuration: power_on_pre_pos_setup has moved to ttbl.pxe")
ttbl.pxe.power_on_pre_pos_setup(target)
```
#### File: ttbd/ttbl/images.py
```python
import codecs
import collections
import copy
import errno
import hashlib
import json
import numbers
import os
import subprocess
import time
import serial
import commonl
import ttbl
import ttbl.store
class impl_c(ttbl.tt_interface_impl_c):
"""Driver interface for flashing with :class:`interface`
Power control on different components can be done before and after
flashing; the process is executed in the folowing order:
- pre power sequence of power components
- flash
- post power sequence of power components
:param list(str) power_cycle_pre: (optional) before flashing,
power cycle the target. Argument is a list of power rail
component names.
- *None* (default) do not power cycle
- *[]*: power cycle all components
- *[ *COMP*, *COMP* .. ]*: list of power components to power
cycle
From this list, anything in :data:power_exclude will be
excluded.
:param list(str) power_sequence_pre: (optional) FIXME
:param list(str) power_sequence_post: (optional) FIXME
:param list(str) console_disable: (optional) before flashing,
disable consoles and then re-enable them. Argument is a list of
console names that need disabling and then re-enabling.
:param int estimated_duration: (optional; default 60) seconds the
imaging process is believed to take. This can let the client
know how long to wait for before declaring it took too long due
to other issues out of server's control (eg: client to server
problems).
:param str log_name: (optional, defaults to image name)
string to use to generate the log file name (*flash-NAME.log*);
this is useful for drivers that are used for multiple images,
where it is not clear which one will it be called to flash to.
"""
def __init__(self,
power_sequence_pre = None,
power_sequence_post = None,
consoles_disable = None,
log_name = None,
estimated_duration = 60):
assert isinstance(estimated_duration, int)
assert log_name == None or isinstance(log_name, str)
commonl.assert_none_or_list_of_strings(
consoles_disable, "consoles_disable", "console name")
# validation of this one by ttbl.images.interface._target_setup
self.power_sequence_pre = power_sequence_pre
self.power_sequence_post = power_sequence_post
if consoles_disable == None:
consoles_disable = []
self.parallel = False # this class can't do parallel
self.consoles_disable = consoles_disable
self.estimated_duration = estimated_duration
self.log_name = log_name
ttbl.tt_interface_impl_c.__init__(self)
def target_setup(self, target, iface_name, component):
target.fsdb.set(
"interfaces.images." + component + ".estimated_duration",
self.estimated_duration)
if self.power_sequence_pre:
target.power.sequence_verify(
target, self.power_sequence_pre,
f"flash {component} pre power sequence")
if self.power_sequence_post:
target.power.sequence_verify(
target, self.power_sequence_post,
f"flash {component} post power sequence")
def flash(self, target, images):
"""
Flash *images* onto *target*
:param ttbl.test_target target: target where to flash
:param dict images: dictionary keyed by image type of the
files (in the servers's filesystem) that have to be
flashed.
The implementation assumes, per configuration, that this
driver knows how to flash the images of the given type (hence
why it was configured) and shall abort if given an unknown
type.
If multiple images are given, they shall be (when possible)
flashed all at the same time.
"""
assert isinstance(target, ttbl.test_target)
assert isinstance(images, dict)
raise NotImplementedError
def flash_read(self, target, image, file_name, image_offset = 0, read_bytes = None):
"""
Read a flash image
:param ttbl.test_target target: target where to flash
:param str image: name of the image to read.
:param str file_name: name of file where to dump the image;
the implementation shall overwrite it by any means
necessary. Parent directories can be assumed to exist.
:param int offset: (optional, defaults to zero) offset in
bytes from which to start reading relative to the image's
beginning.
:param int size: (optional, default all) number of bytes to
read from offset.
If the implementation does not support reading, it can raise a
NotImplementedError (maybe we need a better exception).
"""
assert isinstance(target, ttbl.test_target)
assert isinstance(image, str)
raise NotImplementedError("reading not implemented")
class impl2_c(impl_c):
"""
Flasher interface implementation that is capable of execution of
multiple flashings in parallel.
The flashing infrastructure will call :meth:flash_start to get the
flashing process started and then call :meth:flash_check_done
periodically until it finishes; if it exceeds the declared timeout
in :attr:estimated_timeout, it will be killed with
:meth:flash_kill, otherwise, execution will be verified with
:meth:flash_check_done.
Falls back to serial execution if *parallel = False* (default) or
needed by the infrastructure for other reasons.
:param float check_period: (optional; defaults to 2) interval in
seconds in which we check how the flashing operation is going by
calling :meth:flash_check_done.
:param bool parallel: (optional; defaults to *False*) execute in
parallel or serially.
If enabled for parallel execution, no flasher specific pre/post
power sequences will be run, only the global ones specifed as
arguments to the :class:ttbl.images.interface.
:param int retries (optional; defaults to 3) how many times to
retry before giving up, on failure.
Note you can/should overload :meth:`flast_post_check` so that on
failure (if it returns anything but *None*) you might perform a
recovery action.
Other parameters as :class:ttbl.images.impl_c
.. note:: Rules!!!
- Don't store stuff in self, use *context* (this is to
allow future expansion)
"""
def __init__(self, check_period = 2, parallel = False, retries = 3, **kwargs):
assert isinstance(check_period, numbers.Real) and check_period > 0.5, \
"check_period must be a positive number of seconds " \
"greater than 0.5; got %s" % type(check_period)
assert isinstance(parallel, bool), \
"parallel must be a bool; got %s" % type(parallel)
assert isinstance(retries, int) and retries >= 0, \
"retries must be >= 0; got %s" % type(retries)
self.check_period = check_period
self.retries = retries
impl_c.__init__(self, **kwargs)
# otherwise it is overriden
self.parallel = parallel
def flash_start(self, target, images, context):
"""
Start the flashing process
:param ttbl.test_target target: target where to operate
:param dict images: dictionary keyed by image type with the
filenames to flash on each image type
:param dict context: dictionary where to store state; any
key/value can be stored in there for use of the driver.
- *ts0*: *time.time()* when the process started
This is meant to be a non blocking call, just background start
the flashing process, record in context needed tracking
information and return.
Do not use Python threads or multiprocessing, just fork().
"""
raise NotImplementedError
def flash_check_done(self, target, images, context):
"""
Check if the flashing process has completed
Same arguments as :meth:flash_start.
Eg: check the PID in *context['pid']* saved by
:meth:flash_start is still alive and corresponds to the same
path. See :class:flash_shell_cmd_c for an example.
"""
raise NotImplementedError
def flash_kill(self, target, images, context, msg):
"""
Kill a flashing process that has gone astray, timedout or others
Same arguments as :meth:flash_start.
:param str msg: message from the core on why this is being killed
Eg: kill the PID in *context['pid']* saved by
:meth:flash_start. See :class:flash_shell_cmd_c for an example.
"""
raise NotImplementedError
def flash_post_check(self, target, images, context):
"""
Check execution logs after a proces succesfully completes
Other arguments as :meth:flash_start.
Eg: check the logfile for a flasher doesn't contain any tell
tale signs of errors. See :class:flash_shell_cmd_c for an example.
"""
raise NotImplementedError
return None # if all ok
return {} # diagnostics info
class interface(ttbl.tt_interface):
"""Interface to flash a list of images (OS, BIOS, Firmware...) that
can be uploaded to the target server and flashed onto a target.
Any image type can be supported, it is up to the configuration to
set the image types and the driver that can flash them. E.g.:
>>> target.interface_add(
>>> "images",
>>> ttbl.images.interface({
>>> "kernel-x86": ttbl.openocd.pc(),
>>> "kernel-arc": "kernel-x86",
>>> "rom": ttbl.images.dfu_c(),
>>> "bootloader": ttbl.images.dfu_c(),
>>> })
>>> )
Aliases can be specified that will refer to the another type; in
that case it is implied that images that are aliases will all be
flashed in a single call. Thus in the example above, trying to
flash an image of each type would yield three calls:
- a single *ttbl.openocd.pc.flash()* call would be done for images
*kernel-x86* and *kernel-arc*, so they would be flashed at the
same time.
- a single *ttbl.images.dfu_c.flash()* call for *rom*
- a single *ttbl.images.dfu_c.flash()* call for *bootloader*
If *rom* were an alias for *bootloader*, there would be a single
call to *ttbl.images.dfu_c.flash()*.
The imaging procedure might take control over the target, possibly
powering it on and off (if power control is available). Thus,
after flashing no assumptions shall be made and the safest one is
to call (in the client) :meth:`target.power.cycle
<tcfl.target_ext_power.extension.cycle>` to ensure the right
state.
Whenever an image is flashed in a target's flash destination, a
SHA512 hash of the file flashed is exposed in metadata
*interfaces.images.DESTINATION.last_sha512*. This can be used to
determine if we really want to flash (if you want to assume the
flash doesn't change) or to select where do we want to run
(because you want an specific image flashed).
"""
def __init__(self, *impls,
# python2 doesn't support this combo...
#power_sequence_pre = None,
#power_sequence_post = None,
**kwimpls):
# FIXME: assert
self.power_sequence_pre = kwimpls.pop('power_sequence_pre', None)
self.power_sequence_post = kwimpls.pop('power_sequence_post', None)
ttbl.tt_interface.__init__(self)
self.impls_set(impls, kwimpls, impl_c)
def _target_setup(self, target, iface_name):
if self.power_sequence_pre:
target.power.sequence_verify(target, self.power_sequence_pre,
"flash pre power sequence")
if self.power_sequence_post:
target.power.sequence_verify(target, self.power_sequence_post,
"flash post power sequence")
def _release_hook(self, target, _force):
pass
def _hash_record(self, target, images):
# if update MD5s of the images we flashed (if succesful)
# so we can use this to select where we want to run
#
# The name is not a fully good reference, but still helpful
# sometimes; the name can change though, but the content stays
# the same, hence the hash is the first reference one.
#
# note this gives the same result as:
#
## $ sha512sum FILENAME
#
for image_type, name in list(images.items()):
ho = commonl.hash_file(hashlib.sha512(), name)
target.fsdb.set(
"interfaces.images." + image_type + ".last_sha512",
ho.hexdigest()
)
target.fsdb.set(
"interfaces.images." + image_type + ".last_name",
name
)
def _flash_parallel_do(self, target, parallel, image_names):
# flash a parallel-capable flasher in a serial fashion; when
# something fails, repeat it right away if it has retries
contexts = {}
estimated_duration = 0
check_period = 4
all_images = [ ]
for impl, images in list(parallel.items()):
context = dict()
context['ts0'] = time.time()
context['retry_count'] = 1 # 1 based, nicer for human display
contexts[impl] = context
estimated_duration = max(impl.estimated_duration, estimated_duration)
check_period = min(impl.check_period, check_period)
all_images += images.keys()
target.log.info("%s: flashing %s", target.id, image_names[impl])
impl.flash_start(target, images, context)
ts = ts0 = time.time()
done = set()
done_impls = set()
while ts - ts0 < estimated_duration:
target.timestamp() # timestamp so we don't idle...
time.sleep(check_period)
for impl, images in parallel.items():
if impl in done_impls: # already completed? skip
continue
context = contexts[impl]
retry_count = context['retry_count']
ts = time.time()
if ts - ts0 > impl.check_period \
and impl.flash_check_done(target, images, context) == True:
# says it is done, let's verify it
r = impl.flash_post_check(target, images, context)
if r == None:
# success! we are done in this one
self._hash_record(target, images)
done.update(images.keys())
done_impls.add(impl)
target.log.warning(
"%s/%s: flashing completed; done_impls: %s",
target.id, image_names[impl], done_impls)
elif retry_count <= impl.retries:
# failed, retry?
context['retry_count'] += 1
target.log.warning(
"%s/%s: flashing failed, retrying %d/%d: %s",
target.id, image_names[impl],
context['retry_count'], impl.retries, r)
impl.flash_start(target, images, context)
else:
# failed, out of retries, error as soon as possible
msg = "%s/%s: flashing failed %d times, aborting: %s" % (
target.id, image_names[impl], retry_count, r)
target.log.error(msg)
for _impl, _images in parallel.items():
_impl.flash_kill(target, _images, contexts[_impl], msg)
raise RuntimeError(msg)
ts = time.time()
if len(done_impls) == len(parallel):
target.log.info("flashed images" + " ".join(image_names.values()))
return
else:
msg = "%s/%s: flashing failed: timedout after %ds" \
% (target.id, " ".join(all_images), estimated_duration)
for impl, images in list(parallel.items()):
impl.flash_kill(target, images, contexts[impl], msg)
raise RuntimeError(msg)
def _flash_consoles_disable(self, target, parallel, image_names):
# in some flashers, the flashing occurs over a
# serial console we might be using, so we can
# disable it -- we'll renable on exit--or not.
# This has to be done after the power-cycle, as it might
# be enabling consoles
# FIXME: move this for parallel too?
for impl in parallel:
for console_name in impl.consoles_disable:
target.log.info(
"flasher %s/%s: disabling console %s to allow flasher to work",
target.id, image_names[impl], console_name)
target.console.put_disable(
target, ttbl.who_daemon(),
dict(component = console_name),
None, None)
def _flash_consoles_enable(self, target, parallel, image_names):
for impl in parallel:
for console_name in impl.consoles_disable:
target.log.info(
"flasher %s/%s: enabling console %s after flashing",
target.id, image_names[impl], console_name)
target.console.put_enable(
target, ttbl.who_daemon(),
dict(component = console_name),
None, None)
def _flash_parallel(self, target, parallel, power_sequence_pre, power_sequence_post):
if power_sequence_pre:
target.power.sequence(target, power_sequence_pre)
image_names = { }
for impl, images in parallel.items():
image_names[impl] = ",".join([ i + ":" + images[i] for i in images ])
try:
target.log.info("flasher %s/%s: starting",
target.id, image_names[impl])
self._flash_consoles_disable(target, parallel, image_names)
self._flash_parallel_do(target, parallel, image_names)
finally:
target.log.info("flasher %s/%s: done",
target.id, image_names[impl])
self._flash_consoles_enable(target, parallel, image_names)
# note the post sequence is not run in case of flashing error,
# this is intended, things might be a in a weird state, so a
# full power cycle might be needed
if power_sequence_post:
target.power.sequence(target, power_sequence_post)
def put_flash(self, target, who, args, _files, user_path):
images = self.arg_get(args, 'images', dict)
with target.target_owned_and_locked(who):
# look at all the images we are asked to flash and
# classify them, depending on what implementation will
# handle them
#
# We'll give a single call to each implementation with all
# the images it has to flash in the same order they are
# given to us (hence the OrderedDict)
#
# Note we DO resolve aliases here (imagetype whole
# implementation is a string naming another
# implementation); the implementations do not know / have
# to care about it; if the user specifies *NAME-AKA:FILEA
# NAME:FILEB*, then FILEB will be flashed and FILEA
# ignored; if they do *NAME:FILEB NAME-AKA:FILEA*, FILEA
# will be flashed.
#
# flashers that work serially bucketed separated from the
# ones that can do parallel
serial = collections.defaultdict(collections.OrderedDict)
parallel = collections.defaultdict(collections.OrderedDict)
for img_type, img_name in images.items():
# validate image types (from the keys) are valid from
# the components and aliases
impl, img_type_real = self.impl_get_by_name(img_type,
"image type")
if not os.path.isabs(img_name):
# file comes from the user's storage
file_name = os.path.join(user_path, img_name)
else:
# file from the system (mounted FS or similar);
# double check it is allowed
for path, path_translated in ttbl.store.paths_allowed.items():
if img_name.startswith(path):
img_name = img_name.replace(path, path_translated, 1)
break
else:
raise PermissionError(
"%s: absolute image path tries to read from"
" a location that is not allowed" % img_name)
file_name = img_name
# we need to lock, since other processes might be
# trying to decompress the file at the same time
# We want to have the lock in another directory
# because the source directory where the file name
# might be might not be writable to us
lock_file_name = os.path.join(
target.state_dir,
"images.flash.decompress."
+ commonl.mkid(file_name)
+ ".lock")
with ttbl.process_posix_file_lock_c(lock_file_name):
# if a decompressor crashed, we have no way to
# tell if the decompressed file is correct or
# truncated and thus corrupted -- we need manual
# for that
real_file_name = commonl.maybe_decompress(file_name)
if impl.parallel:
parallel[impl][img_type_real] = real_file_name
else:
serial[impl][img_type_real] = real_file_name
if real_file_name.startswith(user_path):
# modify the mtime, so the file storage cleanup knows
# we are still using this file and doesn't not attempt
# to clean it up too soon
commonl.file_touch(real_file_name)
target.timestamp()
# iterate over the real implementations only
for impl, subimages in serial.items():
# Serial implementation we just fake like it is
# parallel, but with a single implementation at the
# same time
self._flash_parallel(target, { impl: subimages },
impl.power_sequence_pre,
impl.power_sequence_post)
# FIXME: collect diagnostics here of what failed only if
# 'admin' or some other role?
if parallel:
self._flash_parallel(target, parallel,
self.power_sequence_pre,
self.power_sequence_post)
return {}
def get_flash(self, target, who, args, _files, user_path):
image = self.arg_get(args, 'image', str)
image_offset = self.arg_get(args, 'image_offset', int,
allow_missing = True, default = 0)
read_bytes = self.arg_get(args, 'read_bytes', int,
allow_missing = True, default = None)
file_name = "FIXME_temp"
with target.target_owned_and_locked(who):
impl, img_type_real = self.impl_get_by_name(image, "image type")
# FIXME: file_name needs making safe
real_file_name = os.path.join(user_path, file_name)
# FIXME: make parent dirs of real_file_name
# FIXME: should we lock so we don't try to write also? or
# shall that be left to the impl?
# we write the content to the user's storage area, that
# gets cleaned up regularly
if self.power_sequence_pre:
target.power.sequence(target, self.power_sequence_pre)
impl.flash_read(target, img_type_real, real_file_name,
image_offset, read_bytes)
if self.power_sequence_post:
target.power.sequence(target, self.power_sequence_post)
return dict(stream_file = real_file_name)
# FIXME: save the names of the last flashed in fsdb so we can
# query them? relative to USERDIR or abs to system where allowed
def get_list(self, _target, _who, _args, _files, _user_path):
return dict(
aliases = self.aliases,
result = list(self.aliases.keys()) + list(self.impls.keys()))
class arduino_cli_c(impl_c):
"""Flash with the `Arduino CLI <https://www.arduino.cc/pro/cli>`
For example:
>>> target.interface_add(
>>> "images",
>>> ttbl.images.interface(**{
>>> "kernel-arm": ttbl.images.arduino_cli_c(),
>>> "kernel": "kernel-arm",
>>> })
>>> )
:param str serial_port: (optional) File name of the device node
representing the serial port this device is connected
to. Defaults to */dev/tty-TARGETNAME*.
:param str sketch_fqbn: (optional) name of FQBN to be used to
program the board (will be passed on the *--fqbn* arg to
*arduino-cli upload*).
Other parameters described in :class:ttbl.images.impl_c.
*Requirements*
- Needs a connection to the USB programming port, represented as a
serial port (TTY)
.. _arduino_cli_setup:
- *arduino-cli* has to be available in the path variable :data:`path`.
To install Arduino-CLI::
$ wget https://downloads.arduino.cc/arduino-cli/arduino-cli_0.9.0_Linux_64bit.tar.gz
# tar xf arduino-cli_0.9.0_Linux_64bit.tar.gz -C /usr/local/bin
The boards that are going to be used need to be pre-downloaded;
thus, if the board FQBN *XYZ* will be used and the daemon will
be running as user *ttbd*::
# sudo -u ttbd arduino-cli core update-index
# sudo -u ttbd arduino-cli core install XYZ
Each user that will compile for such board needs to do the same
- target declares *sketch_fqbn* in the tags/properties for the BSP
corresponding to the image. Eg; for *kernel-arm*::
$ ~/t/alloc-tcf.git/tcf get arduino-mega-01 -p bsps
{
"bsps": {
"arm": {
"sketch_fqbn": "arduino:avr:mega:cpu=atmega2560"
}
}
}
Corresponds to a configuration in the:
.. code-block:: python
target.tags_update(dict(
bsps = dict(
arm = dict(
sketch_fqbn = "arduino:avr:mega:cpu=atmega2560",
),
),
))
- TTY devices need to be properly configured permission wise for
the flasher to work; it will tell the *console* subsystem to
disable the console so it can have exclusive access to the
console to use it for flashing.
SUBSYSTEM == "tty", ENV{ID_SERIAL_SHORT} == "95730333937351308131", \
SYMLINK += "tty-arduino-mega-01"
"""
def __init__(self, serial_port = None, sketch_fqbn = None,
**kwargs):
assert serial_port == None or isinstance(serial_port, str)
assert sketch_fqbn == None or isinstance(sketch_fqbn, str)
self.serial_port = serial_port
self.sketch_fqbn = sketch_fqbn
impl_c.__init__(self, **kwargs)
self.upid_set("Arduino CLI Flasher", serial_port = serial_port)
#: Path to *arduino-cli*
#:
#: Change with
#:
#: >>> ttbl.images.arduino_cli_c.path = "/usr/local/bin/arduino-cli"
#:
#: or for a single instance that then will be added to config:
#:
#: >>> imager = ttbl.images.arduino_cli_c.path(SERIAL)
#: >>> imager.path = "/usr/local/bin/arduino-cli"
path = "/usr/local/bin/arduino-cli"
def flash(self, target, images):
assert len(images) == 1, \
"only one image suported, got %d: %s" \
% (len(images), " ".join("%s:%s" % (k, v)
for k, v in list(images.items())))
image_name = list(images.values())[0]
if self.serial_port == None:
serial_port = "/dev/tty-%s" % target.id
else:
serial_port = self.serial_port
# remember this only handles one image type
bsp = list(images.keys())[0].replace("kernel-", "")
sketch_fqbn = self.sketch_fqbn
if sketch_fqbn == None:
# get the Sketch FQBN from the tags for the BSP
sketch_fqbn = target.tags.get('bsps', {}).get(bsp, {}).get('sketch_fqbn', None)
if sketch_fqbn == None:
raise RuntimeError(
"%s: configuration error, needs to declare a tag"
" bsps.BSP.sketch_fqbn for BSP %s or a sketch_fqbn "
"to the constructor"
% (target.id, bsp))
# Arduino Dues and others might need a flash erase
if sketch_fqbn in [ "arduino:sam:arduino_due_x_dbg" ]:
# erase the flash by opening the serial port at 1200bps
target.log.debug("erasing the flash")
with serial.Serial(port = serial_port, baudrate = 1200):
time.sleep(0.25)
target.log.info("erased the flash")
# now write it
cmdline = [
self.path,
"upload",
"--port", serial_port,
"--fqbn", sketch_fqbn,
"--verbose",
"--input", image_name
]
target.log.info("flashing image with: %s" % " ".join(cmdline))
try:
subprocess.check_output(
cmdline, stdin = None, cwd = "/tmp",
stderr = subprocess.STDOUT)
target.log.info("ran %s" % (" ".join(cmdline)))
except subprocess.CalledProcessError as e:
target.log.error("flashing with %s failed: (%d) %s"
% (" ".join(cmdline),
e.returncode, e.output))
raise
target.log.info("flashed image")
class bossac_c(impl_c):
"""Flash with the `bossac <https://github.com/shumatech/BOSSA>`_ tool
>>> target.interface_add(
>>> "images",
>>> ttbl.images.interface(**{
>>> "kernel-arm": ttbl.images.bossac_c(),
>>> "kernel": "kernel-arm",
>>> })
>>> )
:param str serial_port: (optional) File name of the device node
representing the serial port this device is connected
to. Defaults to */dev/tty-TARGETNAME*.
:param str console: (optional) name of the target's console tied
to the serial port; this is needed to disable it so this can
flash. Defaults to *serial0*.
Other parameters described in :class:ttbl.images.impl_c.
*Requirements*
- Needs a connection to the USB programming port, represented as a
serial port (TTY)
- *bossac* has to be available in the path variable :data:`path`.
- (for Arduino Due) uses the bossac utility built on the *arduino*
branch from https://github.com/shumatech/BOSSA/tree/arduino::
# sudo dnf install -y gcc-c++ wxGTK-devel
$ git clone https://github.com/shumatech/BOSSA.git bossac.git
$ cd bossac.git
$ git checkout -f 1.6.1-arduino-19-gae08c63
$ make -k
$ sudo install -o root -g root bin/bossac /usr/local/bin
- TTY devices need to be properly configured permission wise for
bossac to work; for such, choose a Unix group which can get
access to said devices and add udev rules such as::
# Arduino2 boards: allow reading USB descriptors
SUBSYSTEM=="usb", ATTR{idVendor}=="2a03", ATTR{idProduct}=="003d", \
GROUP="GROUPNAME", MODE = "660"
# Arduino2 boards: allow reading serial port
SUBSYSTEM == "tty", ENV{ID_SERIAL_SHORT} == "SERIALNUMBER", \
GROUP = "GROUPNAME", MODE = "0660", \
SYMLINK += "tty-TARGETNAME"
For Arduino Due and others, the theory of operation is quite
simple. According to
https://www.arduino.cc/en/Guide/ArduinoDue#toc4, the Due will
erase the flash if you open the programming port at 1200bps and
then start a reset process and launch the flash when you open the
port at 115200. This is not so clear in the URL above, but this is
what expermientation found.
So for flashing, we'll take over the console, set the serial
port to 1200bps, wait a wee bit and then call bossac.
"""
def __init__(self, serial_port = None, console = None, **kwargs):
assert serial_port == None or isinstance(serial_port, str)
assert console == None or isinstance(console, str)
impl_c.__init__(self, **kwargs)
self.serial_port = serial_port
self.console = console
self.upid_set("bossac jtag", serial_port = serial_port)
#: Path to *bossac*
#:
#: Change with
#:
#: >>> ttbl.images.bossac_c.path = "/usr/local/bin/bossac"
#:
#: or for a single instance that then will be added to config:
#:
#: >>> imager = ttbl.images.bossac_c.path(SERIAL)
#: >>> imager.path = "/usr/local/bin/bossac"
path = "/usr/bin/bossac"
def flash(self, target, images):
assert len(images) == 1, \
"only one image suported, got %d: %s" \
% (len(images), " ".join("%s:%s" % (k, v)
for k, v in list(images.items())))
image_name = list(images.values())[0]
if self.serial_port == None:
serial_port = "/dev/tty-%s" % target.id
else:
serial_port = self.serial_port
if self.console == None:
console = "serial0"
else:
console = self.console
target.power.put_cycle(target, ttbl.who_daemon(), {}, None, None)
# give up the serial port, we need it to flash
# we don't care it is off because then we are switching off
# the whole thing and then someone else will power it on
target.console.put_disable(target, ttbl.who_daemon(),
dict(component = console), None, None)
# erase the flash by opening the serial port at 1200bps
target.log.debug("erasing the flash")
with serial.Serial(port = serial_port, baudrate = 1200):
time.sleep(0.25)
target.log.info("erased the flash")
# now write it
cmdline = [
self.path,
"-p", os.path.basename(serial_port),
"-e", # Erase current
"-w", # Write a new one
"-v", # Verify,
"-b", # Boot from Flash
image_name
]
target.log.info("flashing image with: %s" % " ".join(cmdline))
try:
subprocess.check_output(
cmdline, stdin = None, cwd = "/tmp",
stderr = subprocess.STDOUT)
target.log.info("ran %s" % (" ".join(cmdline)))
except subprocess.CalledProcessError as e:
target.log.error("flashing with %s failed: (%d) %s"
% (" ".join(cmdline),
e.returncode, e.output))
raise
target.power.put_off(target, ttbl.who_daemon(), {}, None, None)
target.log.info("flashed image")
class dfu_c(impl_c):
"""Flash the target with `DFU util <http://dfu-util.sourceforge.net/>`_
>>> target.interface_add(
>>> "images",
>>> ttbl.images.interface(**{
>>> "kernel-x86": ttbl.images.dfu_c(),
>>> "kernel-arc": "kernel-x86",
>>> "kernel": "kernel-x86",
>>> })
>>> )
:param str usb_serial_number: target's USB Serial Number
Other parameters described in :class:ttbl.images.impl_c.
*Requirements*
- Needs a connection to the USB port that exposes a DFU
interface upon boot
- Uses the dfu-utils utility, available for most (if not all)
Linux distributions
- Permissions to use USB devices in */dev/bus/usb* are needed;
*ttbd* usually roots with group *root*, which shall be
enough.
- In most cases, needs power control for proper operation, but
some MCU boards will reset on their own afterwards.
Note the tags to the target must include, on each supported
BSP, a tag named *dfu_interface_name* listing the name of the
*altsetting* of the DFU interface to which the image for said
BSP needs to be flashed.
This can be found, when the device exposes the DFU interfaces
with the *lsusb -v* command; for example, for a tinyTILE
(output summarized for clarity)::
$ lsusb -v
...
Bus 002 Device 110: ID 8087:0aba Intel Corp.
Device Descriptor:
bLength 18
bDescriptorType 1
...
Interface Descriptor:
bInterfaceClass 254 Application Specific Interface
bInterfaceSubClass 1 Device Firmware Update...
iInterface 4 x86_rom
Interface Descriptor:
bInterfaceClass 254 Application Specific Interface
bInterfaceSubClass 1 Device Firmware Update...
iInterface 5 x86_boot
Interface Descriptor:
bInterfaceClass 254 Application Specific Interface
bInterfaceSubClass 1 Device Firmware Update
iInterface 6 x86_app
Interface Descriptor:
bInterfaceClass 254 Application Specific Interface
bInterfaceSubClass 1 Device Firmware Update
iInterface 7 config
Interface Descriptor:
bInterfaceClass 254 Application Specific Interface
bInterfaceSubClass 1 Device Firmware Update
iInterface 8 panic
Interface Descriptor:
bInterfaceClass 254 Application Specific Interface
bInterfaceSubClass 1 Device Firmware Update
iInterface 9 events
Interface Descriptor:
bInterfaceClass 254 Application Specific Interface
bInterfaceSubClass 1 Device Firmware Update
iInterface 10 logs
Interface Descriptor:
bInterfaceClass 254 Application Specific Interface
bInterfaceSubClass 1 Device Firmware Update
iInterface 11 sensor_core
Interface Descriptor:
bInterfaceClass 254 Application Specific Interface
bInterfaceSubClass 1 Device Firmware Update
iInterface 12 ble_core
In this case, the three cores available are x86 (x86_app), arc
(sensor_core) and ARM (ble_core).
*Example*
A Tiny Tile can be connected, without exposing a serial console:
>>> target = ttbl.test_target("ti-01")
>>> target.interface_add(
>>> "power",
>>> ttbl.power.interface({
>>> ( "USB present",
>>> ttbl.pc.delay_til_usb_device("5614010001031629") ),
>>> })
>>> )
>>> target.interface_add(
>>> "images",
>>> ttbl.images.interface(**{
>>> "kernel-x86": ttbl.images.dfu_c("5614010001031629"),
>>> "kernel-arm": "kernel-x86",
>>> "kernel-arc": "kernel-x86",
>>> "kernel": "kernel-x86"
>>> })
>>> )
>>> ttbl.config.target_add(
>>> target,
>>> tags = {
>>> 'bsp_models': { 'x86+arc': ['x86', 'arc'], 'x86': None, 'arc': None},
>>> 'bsps' : {
>>> "x86": dict(zephyr_board = "tinytile",
>>> zephyr_kernelname = 'zephyr.bin',
>>> dfu_interface_name = "x86_app",
>>> console = ""),
>>> "arm": dict(zephyr_board = "arduino_101_ble",
>>> zephyr_kernelname = 'zephyr.bin',
>>> dfu_interface_name = "ble_core",
>>> console = ""),
>>> "arc": dict(zephyr_board = "arduino_101_sss",
>>> zephyr_kernelname = 'zephyr.bin',
>>> dfu_interface_name = 'sensor_core',
>>> console = "")
>>> },
>>> },
>>> target_type = "tinytile"
>>> )
"""
def __init__(self, usb_serial_number, **kwargs):
assert usb_serial_number == None \
or isinstance(usb_serial_number, str)
impl_c.__init__(self, **kwargs)
self.usb_serial_number = usb_serial_number
self.upid_set("USB DFU flasher", usb_serial_number = usb_serial_number)
#: Path to the dfu-tool
#:
#: Change with
#:
#: >>> ttbl.images.dfu_c.path = "/usr/local/bin/dfu-tool"
#:
#: or for a single instance that then will be added to config:
#:
#: >>> imager = ttbl.images.dfu_c.path(SERIAL)
#: >>> imager.path = "/usr/local/bin/dfu-tool"
path = "/usr/bin/dfu-tool"
def flash(self, target, images):
cmdline = [ self.path, "-S", self.usb_serial_number ]
# for each image we are writing to a different interface, we
# add a -a IFNAME -D IMGNAME to the commandline, so we can
# flash multiple images in a single shot
for image_type, image_name in images.items():
# FIXME: we shall make sure all images are like this?
if not image_type.startswith("kernel-"):
raise RuntimeError(
"Unknown image type '%s' (valid: kernel-{%s})"
% (image_type, ",".join(list(target.tags['bsps'].keys()))))
bsp = image_type.replace("kernel-", "")
tags_bsp = target.tags.get('bsps', {}).get(bsp, None)
if tags_bsp == None:
raise RuntimeError(
"Unknown BSP %s from image type '%s' (valid: %s)"
% (bsp, image_type, " ".join(list(target.tags['bsps'].keys()))))
dfu_if_name = tags_bsp.get('dfu_interface_name', None)
if dfu_if_name == None:
raise RuntimeError(
"Misconfigured target: image type %s (BSP %s) has "
"no 'dfu_interface_name' key to indicate which DFU "
"interface shall it flash"
% (image_type, bsp))
cmdline += [ "-a", dfu_if_name, "-D", image_name ]
# Power cycle the board so it goes into DFU mode; it then
# stays there for five seconds (FIXME: all of them?)
target.power.put_cycle(target, ttbl.who_daemon(), {}, None, None)
# let's do this
try:
target.log.info("flashing image with: %s" % " ".join(cmdline))
subprocess.check_output(cmdline, cwd = "/tmp",
stderr = subprocess.STDOUT)
target.log.info("flashed with %s: %s" % (" ".join(cmdline)))
except subprocess.CalledProcessError as e:
target.log.error("flashing with %s failed: (%d) %s" %
(" ".join(cmdline), e.returncode, e.output))
raise
target.power.put_off(target, ttbl.who_daemon(), {}, None, None)
target.log.info("flashed image")
class fake_c(impl2_c):
"""
Fake flashing driver (mainly for testing the interfaces)
>>> flasher = ttbl.images.fake_c()
>>> target.interface_add(
>>> "images",
>>> ttbl.images.interface(**{
>>> "kernel-BSP1": flasher,
>>> "kernel-BSP2": flasher,
>>> "kernel": "kernel-BSPNAME"
>>> })
>>> )
Parameters like :class:ttbl.images.impl_c.
"""
def __init__(self, **kwargs):
impl2_c.__init__(self, **kwargs)
self.upid_set("Fake test flasher", _id = str(id(self)))
def flash_start(self, target, images, context):
target.fsdb.set(f"fake-{'.'.join(images.keys())}-{context}.ts0", time.time())
def flash_check_done(self, target, images, context):
ts0 = target.fsdb.get(f"fake-{'.'.join(images.keys())}-{context}.ts0", None)
ts = time.time()
return ts - ts0 > self.estimated_duration - self.check_period
def flash_kill(self, target, images, context, msg):
target.fsdb.set(f"fake-{'.'.join(images.keys())}-{context}.state", "started", None)
def flash_post_check(self, target, images, context):
return None
def flash(self, target, images):
for image_type, image in images.items():
target.log.info("%s: flashing %s" % (image_type, image))
time.sleep(self.estimated_duration)
target.log.info("%s: flashed %s" % (image_type, image))
target.log.info("%s: flashing succeeded" % image_type)
class esptool_c(impl_c):
"""
Flash a target using Tensilica's *esptool.py*
>>> target.interface_add(
>>> "images",
>>> ttbl.images.interface(**{
>>> "kernel-xtensa": ttbl.images.esptool_c(),
>>> "kernel": "kernel-xtensa"
>>> })
>>> )
:param str serial_port: (optional) File name of the device node
representing the serial port this device is connected
to. Defaults to */dev/tty-TARGETNAME*.
:param str console: (optional) name of the target's console tied
to the serial port; this is needed to disable it so this can
flash. Defaults to *serial0*.
Other parameters described in :class:ttbl.images.impl_c.
*Requirements*
- The ESP-IDF framework, of which ``esptool.py`` is used to
flash the target; to install::
$ cd /opt
$ git clone --recursive https://github.com/espressif/esp-idf.git
(note the ``--recursive``!! it is needed so all the submodules
are picked up)
configure path to it globally by setting
:attr:`path` in a /etc/ttbd-production/conf_*.py file:
.. code-block:: python
import ttbl.tt
ttbl.images.esptool_c.path = "/opt/esp-idf/components/esptool_py/esptool/esptool.py"
- Permissions to use USB devices in */dev/bus/usb* are needed;
*ttbd* usually roots with group *root*, which shall be
enough.
- Needs power control for proper operation; FIXME: pending to
make it operate without power control, using ``esptool.py``.
The base code will convert the *ELF* image to the required
*bin* image using the ``esptool.py`` script. Then it will
flash it via the serial port.
"""
def __init__(self, serial_port = None, console = None, **kwargs):
assert serial_port == None or isinstance(serial_port, str)
assert console == None or isinstance(console, str)
impl_c.__init__(self, **kwargs)
self.serial_port = serial_port
self.console = console
self.upid_set("ESP JTAG flasher", serial_port = serial_port)
#: Path to *esptool.py*
#:
#: Change with
#:
#: >>> ttbl.images.esptool_c.path = "/usr/local/bin/esptool.py"
#:
#: or for a single instance that then will be added to config:
#:
#: >>> imager = ttbl.images.esptool_c.path(SERIAL)
#: >>> imager.path = "/usr/local/bin/esptool.py"
path = "__unconfigured__ttbl.images.esptool_c.path__"
def flash(self, target, images):
assert len(images) == 1, \
"only one image suported, got %d: %s" \
% (len(images), " ".join("%s:%s" % (k, v)
for k, v in list(images.items())))
if self.serial_port == None:
serial_port = "/dev/tty-%s" % target.id
else:
serial_port = self.serial_port
if self.console == None:
console = "serial0"
else:
console = self.console
cmdline_convert = [
self.path,
"--chip", "esp32",
"elf2image",
]
cmdline_flash = [
self.path,
"--chip", "esp32",
"--port", serial_port,
"--baud", "921600",
"--before", "default_reset",
# with no power control, at least it starts
"--after", "hard_reset",
"write_flash", "-u",
"--flash_mode", "dio",
"--flash_freq", "40m",
"--flash_size", "detect",
"0x1000",
]
image_type = 'kernel'
image_name = list(images.values())[0]
image_name_bin = image_name + ".bin"
try:
cmdline = cmdline_convert + [ image_name,
"--output", image_name_bin ]
target.log.info("%s: converting with %s"
% (image_type, " ".join(cmdline)))
s = subprocess.check_output(cmdline, cwd = "/tmp",
stderr = subprocess.STDOUT)
except subprocess.CalledProcessError as e:
target.log.error("%s: converting image with %s failed: (%d) %s"
% (image_type, " ".join(cmdline),
e.returncode, e.output))
raise
target.power.put_cycle(target, ttbl.who_daemon(), {}, None, None)
# give up the serial port, we need it to flash
# we don't care it is off because then we are switching off
# the whole thing and then someone else will power it on
target.console.put_disable(target, ttbl.who_daemon(),
dict(component = console), None, None)
try:
cmdline = cmdline_flash + [ image_name_bin ]
target.log.info("%s: flashing with %s"
% (image_type, " ".join(cmdline)))
s = subprocess.check_output(cmdline, cwd = "/tmp",
stderr = subprocess.STDOUT)
target.log.info("%s: flashed with %s: %s"
% (image_type, " ".join(cmdline), s))
except subprocess.CalledProcessError as e:
target.log.error("%s: flashing with %s failed: (%d) %s"
% (image_type, " ".join(cmdline),
e.returncode, e.output))
raise
target.power.put_off(target, ttbl.who_daemon(), {}, None, None)
target.log.info("%s: flashing succeeded" % image_type)
class flash_shell_cmd_c(impl2_c):
"""
General flashing template that can use a command line tool to
flash (possibly in parallel)
:param list(str) cmdline: list of strings composing the command to
call; first is the path to the command, that can be overriden
with the *path* argument
>>> [ "/usr/bin/program", "arg1", "arg2" ]
all the components have to be strings; they will be templated
using *%(FIELD)s* from the target's metadata, including the
following fields:
- *cwd*: directory where the command is being executed
- *image.TYPE*: *NAME* (for all the images to be flashed, the
file we are flashing)
- *image.#<N>*: *NAME* (for all the images to be flashed, the
file we are flashing), indexed by number in declaration order.
This is mostly used when there is only one image, so we do not
need to know the name of the image (*image.#0*).
- *image_types*: all the image types being flashed separated
with "-".
- *pidfile*: Name of the PID file
- *logfile_name*: Name of the log file
:param str cwd: (optional; defaults to "/tmp") directory from
where to run the flasher program
:param str path: (optional, defaults to *cmdline[0]*) path to the
flashing program
:param dict env_add: (optional) variables to add to the environment when
running the command
"""
def __init__(self, cmdline, cwd = "/tmp", path = None, env_add = None,
**kwargs):
commonl.assert_list_of_strings(cmdline, "cmdline", "arguments")
assert cwd == None or isinstance(cwd, str)
assert path == None or isinstance(path, str)
self.p = None
if path == None:
path = cmdline[0]
self.path = path
self.cmdline = cmdline
self.cwd = cwd
if env_add:
commonl.assert_dict_of_strings(env_add, "env_add")
self.env_add = env_add
else:
self.env_add = {}
impl2_c.__init__(self, **kwargs)
def flash_start(self, target, images, context):
kws = dict(target.kws)
context['images'] = images
# make sure they are sorted so they are always listed the same
image_types = "-".join(sorted(images.keys()))
kws['image_types'] = image_types
if self.log_name:
kws['log_name'] = self.log_name
else:
kws['log_name'] = image_types
# this allows a class inheriting this to set kws before calling us
context.setdefault('kws', {}).update(kws)
kws = context['kws']
count = 0
for image_name, image in images.items():
kws['image.' + image_name] = image
kws['image.#%d' % count ] = image
count += 1
pidfile = "%(path)s/flash-%(image_types)s.pid" % kws
context['pidfile'] = kws['pidfile'] = pidfile
cwd = self.cwd % kws
context['cwd'] = kws['cwd'] = cwd
logfile_name = "%(path)s/flash-%(log_name)s.log" % kws
# hack so what the log file reading console (if defined) can
# be restarted properly
if hasattr(target, "console"):
console_name = "log-flash-" + kws['log_name']
if console_name in target.console.impls:
ttbl.console.generation_set(target, console_name)
context['logfile_name'] = kws['logfile_name'] = logfile_name
cmdline = []
count = 0
try:
for i in self.cmdline:
# some older Linux distros complain if this string is unicode
cmdline.append(str(i % kws))
count += 1
except KeyError as e:
message = "configuration error? can't template command line #%d," \
" missing field or target property: %s" % (count, e)
target.log.error(message)
raise RuntimeError(message)
cmdline_s = " ".join(cmdline)
context['cmdline'] = cmdline
context['cmdline_s'] = cmdline_s
if self.env_add:
env = dict(os.environ)
env.update(self.env_add)
else:
env = os.environ
ts0 = time.time()
context['ts0'] = ts0
try:
target.log.info("flashing %s image with: %s",
image_types, " ".join(cmdline))
with open(logfile_name, "w+") as logf:
self.p = subprocess.Popen(
cmdline, env = env, stdin = None, cwd = cwd,
bufsize = 0, # output right away, to monitor
stderr = subprocess.STDOUT, stdout = logf)
with open(pidfile, "w+") as pidf:
pidf.write("%s" % self.p.pid)
target.log.debug("%s: flasher PID %s file %s",
image_types, self.p.pid, pidfile)
except subprocess.CalledProcessError as e:
target.log.error("flashing with %s failed: (%d) %s"
% (cmdline_s, e.returncode, e.output))
raise
self.p.poll()
if self.p.returncode != None:
msg = "flashing with %s failed to start: (%s->%s) %s" % (
cmdline_s, self.p.pid, self.p.returncode, logfile_name)
target.log.error(msg)
with open(logfile_name) as logf:
for line in logf:
target.log.error('%s: logfile: %s', image_types, line)
raise RuntimeError(msg)
# this is needed so SIGCHLD the process and it doesn't become
# a zombie
ttbl.daemon_pid_add(self.p.pid) # FIXME: race condition if it died?
target.log.debug("%s: flasher PID %s started (%s)",
image_types, self.p.pid, cmdline_s)
return
def flash_check_done(self, target, images, context):
ts = time.time()
ts0 = context['ts0']
target.log.debug("%s: [+%.1fs] flasher PID %s checking",
context['kws']['image_types'], ts - ts0, self.p.pid)
self.p.poll()
if self.p.returncode == None:
r = False
else:
r = True
ts = time.time()
target.log.debug(
"%s: [+%.1fs] flasher PID %s checked %s",
context['kws']['image_types'], ts - ts0, self.p.pid, r)
return r
def flash_kill(self, target, images, context, msg):
ts = time.time()
ts0 = context['ts0']
target.log.debug(
"%s: [+%.1fs] flasher PID %s terminating due to timeout",
context['kws']['image_types'], ts - ts0, self.p.pid)
commonl.process_terminate(context['pidfile'], path = self.path)
def _log_file_read(self, context, max_bytes = 2000):
try:
with open(context['logfile_name'], 'rb') as logf:
try:
# SEEK to -MAX_BYTES or if EINVAL (too big), leave it
# at beginning of file
logf.seek(-max_bytes, 2)
except IOError as e:
if e.errno != errno.EINVAL:
raise
return logf.read().decode('utf-8')
except IOError as e:
if e.errno != errno.ENOENT:
raise
return "<no logls recorded>"
def flash_post_check(self, target, images, context,
expected_returncode = 0):
"""
Check for execution result.
:param int expected_returncode: (optional, default 0)
returncode the command has to return on success. If *None*,
don't check it.
"""
if expected_returncode != None and self.p.returncode != expected_returncode:
msg = "flashing with %s failed, returned %s: %s" % (
context['cmdline_s'], self.p.returncode,
self._log_file_read(context))
target.log.error(msg)
return { "message": msg }
return
# example, look at errors in the logfile
try:
with codecs.open(context['logfile_name'], errors = 'ignore') as logf:
for line in logf:
if 'Fail' in line:
msg = "flashing with %s failed, issues in logfile" % (
context['cmdline_s'])
target.log.error(msg)
return { "message": msg }
except IOError as e:
if e.errno != errno.ENOENT:
raise
class quartus_pgm_c(flash_shell_cmd_c):
"""
Flash using Intel's Quartus PGM tool
This allows to flash images to an Altera MAX10, using the Quartus
tools, freely downloadable from http://dl.altera.com.
Exports the following interfaces:
- power control (using any AC power switch, such as the
:class:`Digital Web Power Switch 7 <ttbl.pc.dlwps7>`)
- serial console
- image (in hex format) flashing (using the Quartus Prime tools
package)
Multiple instances at the same time are supported; however, due to
the JTAG interface not exporting a serial number, addressing has
to be done by USB path, which is risky (as it will change when the
cable is plugged to another port or might be enumerated in a
different number).
:param str device_id: USB serial number of the USB device to use
(USB-BlasterII or similar)
:param dict image_map:
:param str name: (optiona; default 'Intel Quartus PGM #<DEVICEID>')
instrument's name.
:param dict args: (optional) dictionary of extra command line options to
*quartus_pgm*; these are expanded with the target keywords with
*%(FIELD)s* templates, with fields being the target's
:ref:`metadata <finding_testcase_metadata>`:
FIXME: move to common flash_shell_cmd_c
:param dict jtagconfig: (optional) jtagconfig --setparam commands
to run before starting.
These are expanded with the target keywords with
*%(FIELD)s* templates, with fields being the target's
:ref:`metadata <finding_testcase_metadata>` and then run as::
jtagconfig --setparam CABLENAME KEY VALUE
Other parameters described in :class:ttbl.images.impl_c.
**Command line reference**
https://www.intel.com/content/dam/www/programmable/us/en/pdfs/literature/manual/tclscriptrefmnl.pdf
Section Quartus_PGM (2-50)
**System setup**
- Download and install Quartus Programmer::
$ wget http://download.altera.com/akdlm/software/acdsinst/20.1std/711/ib_installers/QuartusProgrammerSetup-20.1.0.711-linux.run
# chmod a+x QuartusProgrammerSetup-20.1.0.711-linux.run
# ./QuartusProgrammerSetup-20.1.0.711-linux.run --unattendedmodeui none --mode unattended --installdir /opt/quartus --accept_eula 1
- if installing to a different location than */opt/quartus*,
adjust the value of :data:`path` in a FIXME:ttbd configuration
file.
**Troubleshooting**
When it fails to flash, the error log is reported in the server in
a file called *flash-COMPONENTS.log* in the target's state
directory (FIXME: we need a better way for this--the admin shall
be able to read it, but not the users as it might leak sensitive
information?).
Common error messages:
- *Error (213019): Can't scan JTAG chain. Error code 87*
Also seen when manually running in the server::
$ /opt/quartus/qprogrammer/bin/jtagconfig
1) USB-BlasterII [3-1.4.4.3]
Unable to read device chain - JTAG chain broken
In many cases this has been:
- a powered off main board: power it on
- a misconnected USB-BlasterII: reconnect properly
- a broken USB-BlasterII: replace unit
- *Error (209012): Operation failed*
this usually happens when flashing one component of a multiple
component chain; the log might read something like::
Info (209060): Started Programmer operation at Mon Jul 20 12:05:22 2020
Info (209017): Device 2 contains JTAG ID code 0x038301DD
Info (209060): Started Programmer operation at Mon Jul 20 12:05:22 2020
Info (209016): Configuring device index 2
Info (209017): Device 2 contains JTAG ID code 0x018303DD
Info (209007): Configuration succeeded -- 1 device(s) configured
Info (209011): Successfully performed operation(s)
Info (209061): Ended Programmer operation at Mon Jul 20 12:05:22 2020
Error (209012): Operation failed
Info (209061): Ended Programmer operation at Mon Jul 20 12:05:22 2020
Error: Quartus Prime Programmer was unsuccessful. 1 error, 0 warnings
This case has been found to be because the **--bgp** option is
needed (which seems to map to the *Enable Realtime ISP
programming* in the Quartus UI, *quartus_pgmw*)
- *Warning (16328): The real-time ISP option for Max 10 is
selected. Ensure all Max 10 devices being programmed are in user
mode when requesting this programming option*
Followed by:
*Error (209012): Operation failed*
This case comes when a previous flashing process was interrupted
half way or the target is corrupted.
It needs a special one-time recovery; currently the
workaround seems to run the flashing with out the *--bgp* switch
that as of now is hardcoded.
FIXME: move the --bgp and --mode=JTAG switches to the args (vs
hardcoded) so a recovery target can be implemented as
NAME-nobgp
"""
#: Path to *quartus_pgm*
#:
#: We need to use an ABSOLUTE PATH if the tool is not in the
#: normal search path (which usually won't).
#:
#: Change by setting, in a :ref:`server configuration file
#: <ttbd_configuration>`:
#:
#: >>> ttbl.images.quartus_pgm_c.path = "/opt/quartus/qprogrammer/bin/quartus_pgm"
#:
#: or for a single instance that then will be added to config:
#:
#: >>> imager = ttbl.images.quartus_pgm_c(...)
#: >>> imager.path = "/opt/quartus/qprogrammer/bin/quartus_pgm"
path = "/opt/quartus/qprogrammer/bin/quartus_pgm"
path_jtagconfig = "/opt/quartus/qprogrammer/bin/jtagconfig"
def __init__(self, device_id, image_map, args = None, name = None,
jtagconfig = None,
**kwargs):
assert isinstance(device_id, str)
commonl.assert_dict_of_ints(image_map, "image_map")
commonl.assert_none_or_dict_of_strings(jtagconfig, "jtagconfig")
assert name == None or isinstance(name, str)
self.device_id = device_id
self.image_map = image_map
self.jtagconfig = jtagconfig
if args:
commonl.assert_dict_of_strings(args, "args")
self.args = args
else:
self.args = {}
cmdline = [
"stdbuf", "-o0", "-e0", "-i0",
self.path,
# FIXME: move this to args, enable value-less args (None)
"--bgp", # Real time background programming
"--mode=JTAG", # this is a JTAG
"-c", "%(device_path)s", # will resolve in flash_start()
# in flash_start() call we'll map the image names to targets
# to add these
#
#'--operation=PVB;%(image.NAME)s@1',
#'--operation=PVB;%(image.NAME)s@2',
#...
# (P)rogram (V)erify, (B)lank-check
#
# note like this we can support burning multiple images into the
# same chain with a single call
]
if args:
for arg, value in args.items():
if value != None:
cmdline += [ arg, value ]
# we do this because in flash_start() we need to add
# --operation as we find images we are supposed to flash
self.cmdline_orig = cmdline
flash_shell_cmd_c.__init__(self, cmdline, cwd = '%(file_path)s',
**kwargs)
if name == None:
name = "Intel Quartus PGM %s" % device_id
self.upid_set(name, device_id = device_id)
def flash_start(self, target, images, context):
# Finalize preparing the command line for flashing the images
# find the device path; quartus_pgm doesn't seem to be able to
# address by serial and expects a cable name as 'PRODUCT NAME
# [PATH]', like 'USB BlasterII [1-3.3]'; we can't do this on
# object creation because the USB path might change when we power
# it on/off (rare, but could happen).
usb_path, _vendor, product = ttbl.usb_serial_to_path(self.device_id)
port = target.fsdb.get("jtagd.tcp_port")
context['kws'] = {
# HACK: we assume all images are in the same directory, so
# we are going to cwd there (see in __init__ how we set
# cwd to %(file_path)s. Reason is some of our paths might
# include @, which the tool considers illegal as it uses
# it to separate arguments--see below --operation
'file_path': os.path.dirname(list(images.values())[0]),
'device_path': "%s on localhost:%s [%s]" % (product, port, usb_path)
# flash_shell_cmd_c.flash_start() will add others
}
# for each image we are burning, map it to a target name in
# the cable (@NUMBER)
# make sure we don't modify the originals
cmdline = copy.deepcopy(self.cmdline_orig)
for image_type, filename in images.items():
target_index = self.image_map.get(image_type, None)
# pass only the realtive filename, as we are going to
# change working dir into the path (see above in
# context[kws][file_path]
cmdline.append("--operation=PVB;%s@%d" % (
os.path.basename(filename), target_index))
# now set it for flash_shell_cmd_c.flash_start()
self.cmdline = cmdline
if self.jtagconfig:
for option, value in self.jtagconfig.items():
cmdline = [
self.path_jtagconfig,
"--setparam", "%s [%s]" % (product, usb_path),
option, value
]
target.log.info("running per-config: %s" % " ".join(cmdline))
subprocess.check_output(
cmdline, shell = False, stderr = subprocess.STDOUT)
flash_shell_cmd_c.flash_start(self, target, images, context)
class sf100linux_c(flash_shell_cmd_c):
"""Flash Dediprog SF100 and SF600 with *dpcmd* from
https://github.com/DediProgSW/SF100Linux
:param str dediprog_id: ID of the dediprog to use (when multiple
are available); this can be found by running *dpdmd --detect* with
super user privileges (ensure they are connected)::
# dpcmd
DpCmd Linux 1.11.2.01 Engine Version:
Last Built on May 25 2018
Device 1 (SF611445): detecting chip
By reading the chip ID, the chip applies to [ MX66L51235F ]
MX66L51235F chip size is 67108864 bytes.
in here, *Device 1* has ID *SF611445*. It is recommended to do
this step only on an isolated machine to avoid confusions with
other devices connected.
:param int timeout: (optional) seconds to give the flashing
process to run; if exceeded, it will raise an exception. This
usually depends on the size of the binary being flashed and the
speed of the interface.
:param str mode: (optional; default "--batch") flashing mode, this
can be:
- *--prog*: programs without erasing
- *--auto*: erase and update only sectors that changed
- *--batch*: erase and program
- *--erase*: erase
:param dict args: dictionary of extra command line options to
*dpcmd*; these are expanded with the target keywords with
*%(FIELD)s* templates, with fields being the target's
:ref:`metadata <finding_testcase_metadata>`:
.. code-block:: python
args = {
# extra command line arguments for dpcmd
'dediprog:id': 435,
}
Other parameters described in :class:ttbl.images.impl_c.
**System setup**
*dpcmd* is not packaged by most distributions, needs to be
manuallly built and installed.
1. build and install *dpcmd*::
$ git clone https://github.com/DediProgSW/SF100Linux sf100linux.git
$ make -C sf100linux.git
$ sudo install -o root -g root \
sf100linux.git/dpcmd sf100linux.git/ChipInfoDb.dedicfg \
/usr/local/bin
Note *dpcmd* needs to always be invoked with the full path
(*/usr/local/bin/dpmcd*) so it will pick up the location of its
database; otherwise it will fail to list, detect or operate.
2. (optionally, if installed in another location) configure the
path of *dpcmd* by setting :data:`path`.
**Detecting a Dediprog**
Dediprogs' USB serial numbers are often all the same, so for a
power-on sequence to wait until the device is detected by the
system after it has been plugged in (eg: with a
:class:`ttbl.pc_ykush.ykush` connector)
:class:`ttbl.pc.delay_til_usb_device` is usually not enough. In
such case, we can use *dpmcd* to do the detection for us:
.. code-block:: python
connector = ttbl.pc_ykush.ykush(ykush, port, explicit = 'on')
detector = ttbl.power.delay_til_shell_cmd_c(
[
# run over timeout, so if the command gets stuck due
# to HW, we can notice it and not get stuck -- so if
# it can't detect in five seconds--slice it
"/usr/bin/timeout", "--kill-after=1", "5",
ttbl.images.sf100linux_c.path,
"--detect", "--device", dediprog_id
],
"dediprog %s is detected" % dediprog_id,
explicit = 'on')
and then the power rail must include both:
.. code-block:: python
target.interface_add("power", ttbl.power.interface([
( "flasher connect", connector ),
( "flasher detected", detector ),
...
])
A console can be added to watch progress with::
target.interface_impl_add("console", "log-flash-IMAGENAME",
ttbl.console.logfile_c("flash-IMAGENAME.log"))
"""
def __init__(self, dediprog_id, args = None, name = None, timeout = 60,
sibling_port = None,
path = None,
mode = "--batch", **kwargs):
assert isinstance(dediprog_id, str)
assert isinstance(timeout, int)
assert path == None or isinstance(path, str)
assert mode in [ "--batch", "--auto", "--prog", "--erase" ]
commonl.assert_none_or_dict_of_strings(args, "args")
self.timeout = timeout
if path:
self.path = path
# FIXME: verify path works +x
# file_name and file_path are set in flash_start()
self.dediprog_id = dediprog_id
if sibling_port:
cmdline = [
self.path,
mode, "%(file_name)s",
]
self.sibling_port = sibling_port
else:
cmdline = [
self.path,
"--device", dediprog_id,
mode, "%(file_name)s",
]
self.sibling_port = None
if args:
for arg, value in args.items():
cmdline += [ arg, value ]
# when flashing, CD to where the image is, otherwise cpcmd
# crashes on very log filename :/ workaround
flash_shell_cmd_c.__init__(self, cmdline, cwd = '%(file_path)s',
**kwargs)
if name == None:
name = "Dediprog SF[16]00 " + dediprog_id
self.upid_set(name, dediprog_id = dediprog_id)
def flash_start(self, target, images, context):
if len(images) != 1:
# yeah, this shoul dbe done in flash_start() but
# whatever...I don't feel like overriding it.
raise RuntimeError(
"%s: Configuration BUG: %s flasher supports only one image"
" but it has been called to flash %d images (%s)" % (
target.id, type(self),
len(images), ", ".join(images.keys())))
# WORKAROUND for dpcmd crashing when the filename is too long;
# we chdir into where the image is and run with a basename
context['kws'] = {
# note this only works with #1 image
'file_path': os.path.dirname(list(images.values())[0]),
'file_name': os.path.basename(list(images.values())[0]),
}
if self.sibling_port:
devpath, busnum, devnum = ttbl.usb_device_by_serial(
self.dediprog_id, self.sibling_port,
"busnum", "devnum")
if devpath == None or busnum == None or devnum == None:
raise RuntimeError(
"%s: cannot find Dediprog flasher connected to"
" as sibling in port #%d of USB device %s" % (
target.id, self.dediprog_id, self.sibling_port))
# dpcmd can use these two variables to filter who do we
# use
self.env_add["DPCMD_USB_BUSNUM"] = busnum
self.env_add["DPCMD_USB_DEVNUM"] = devnum
flash_shell_cmd_c.flash_start(self, target, images, context)
#: Path to *dpcmd*
#:
#: We need to use an ABSOLUTE PATH, as *dpcmd* relies on it to
#: find its database.
#:
#: Change by setting, in a :ref:`server configuration file
#: <ttbd_configuration>`:
#:
#: >>> ttbl.images.sf100linux_c.path = "/usr/local/bin/dpcmd"
#:
#: or for a single instance that then will be added to config:
#:
#: >>> imager = ttbl.images.sf100linux_c.path(...)
#: >>> imager.path = "/opt/bin/dpcmd"
path = "/usr/local/bin/dpcmd"
def flash_post_check(self, target, images, context):
"""
Checks the process returned with no errors
Looks further in the log file to ensure that is the case
"""
if len(images) != 1:
# yeah, this shoul dbe done in flash_start() but
# whatever...I don't feel like overriding it.
raise RuntimeError(
"%s: Configuration BUG: %s flasher supports only one image"
" but it has been called to flash %d images (%s)" % (
target.id, type(self),
len(images), ", ".join(images.keys())))
return flash_shell_cmd_c.flash_post_check(self, target, images, context)
def flash_read(self, _target, _image, file_name, image_offset = 0, read_bytes = None):
"""
Reads data from the SPI and writes them to 'file_name'
"""
cmdline = [ self.path, "--device", self.dediprog_id,
"-r", file_name, "-a", str(image_offset) ]
if read_bytes != None:
cmdline += [ "-l", str(read_bytes) ]
subprocess.check_output(cmdline, shell = False)
```
#### File: ttbd/ttbl/noyito.py
```python
import logging
import os
import subprocess
import serial
import commonl
import ttbl._install
import ttbl.capture
import ttbl.power
class reader_pc(ttbl.power.daemon_c):
"""
Implement a multiplexor to read Noyitos' serial port to multiple users
Noyito reports at 2HZ the value of all the channels on the serial
port; we will have multiple capturers, belonging to different
users, taking its output.
This multiplexor with ncat takes the serial port output and pipes
it to a Unix domain socket.
- use ncat because if there is no readers in the domain socket, it
doesn't even open the serial port
- supports N readers without conflicts or buffering issues.
This allows then creating another capture device
:class:`channel_c`, which just takes the data from a single
channel (to enforce separation betwene users pullign data from
separate channels).
**Target setup**
This has to be added to the targets's power rail as an explicit
off component:
>>> target.interface_impl_add(
>>> "power",
>>> "data_acquisition_1",
>>> ttbl.noyito.reader_pc(
>>> "/dev/serial/by-path/pci-0000:00:14.0-usb-0:3.1.6:1.0-port0",
>>> explicit = "off"
>>> )
>>> )
(unfortunately, it lacks a serial number to ease up multiple
devices), see :class:`commonl.usb_path_by_sibling_late_resolve`
and similar to map based on other devices with a USB serial #.
"""
def __init__(self, serial_device, **kwargs):
assert isinstance(serial_device, str)
ttbl.power.daemon_c.__init__(
self,
cmdline = [
"/usr/bin/ncat",
"--listen", "--keep-open",
"-U", '%(path)s/%(component)s-ncat.socket'
],
check_path = "/usr/bin/ncat",
**kwargs)
self.serial_device = serial_device
self.stdin = None
self.upid_set(f"Noyito 12-bit 10 channel ADC @{serial_device}",
serial_device = serial_device)
def on(self, target, component):
# open serial port to set the baud rate, then ncat gets
# started and it keeps the setting; default is 9600 8n1 no
# flow control, so we explicitly set what the device needs 115200.
with serial.Serial(self.serial_device, 115200) as f:
self.stdin = f
kws = dict(target.kws)
kws['name'] = 'ncat'
kws['component'] = component
commonl.rm_f(os.path.join(target.state_dir,
f"{component}-ncat.socket"))
ttbl.power.daemon_c.on(self, target, component)
def verify(self, target, component, cmdline_expanded):
kws = dict(target.kws)
kws.update(self.kws)
# bring in runtime properties (override the rest)
kws.update(target.fsdb.get_as_dict())
kws['component'] = component
return commonl.process_alive(self.pidfile % kws, self.check_path) != None
class channel_c(ttbl.capture.impl_c):
def __init__(self, noyito_component, noyito_obj, channels, **kwargs):
"""
:param str channel_mode: passed straight to :mod:`ttbl.noyito-capture`.
- *boolean:cutoff=1.3* or *onoff:cutoff=1.3*: interpret
signal like a boolean value, cutting off at 1.3 Volts
"""
assert isinstance(noyito_component, str)
assert isinstance(channels, dict), \
"channels: expected a dictionary, got %s" % type(channels)
ttbl.capture.impl_c.__init__(
self, False, mimetype = "application/json",
**kwargs)
self.noyito_component = noyito_component
self.upid = noyito_obj.upid
self.capture_program = commonl.ttbd_locate_helper(
"noyito-capture.py", ttbl._install.share_path,
log = logging, relsrcpath = ".")
self.channell = []
for channel, data in channels.items():
assert isinstance(channel, int) and channel > 0 and channel <= 10, \
"channel: channel descriptor has to be an integer 0-10," \
" got %s" % type(channel)
# matches ttbd/noyito-capture.py.transform.mode
mode = data.get('mode', None)
assert mode in ( None, 'mode', 'bool', 'onoff' ), \
"channel mode has to be one of: None, mode, bool, onoff; " \
" got %s" % mode
name = data.get('mode', str(channel))
assert isinstance(name, str), \
"name: expected a string; got %s" % type(name)
l = [ "%s" % channel ]
for name, val in data.items():
l.append("%s=%s" % (name, val))
self.channell.append(":".join(l))
def start(self, target, capturer, path):
# power on the serial port capturer
target.power.put_on(target, ttbl.who_daemon(),
{ "component": self.noyito_component },
None, None )
stream_filename = capturer + ".data.json"
log_filename = capturer + ".capture.log"
pidfile = "%s/capture-%s.pid" % (target.state_dir, capturer)
logf = open(os.path.join(path, log_filename), "w+")
p = subprocess.Popen(
[
"stdbuf", "-e0", "-o0",
self.capture_program,
os.path.join(path, stream_filename),
"%s/%s-ncat.socket" % (target.state_dir, capturer),
] + self.channell,
bufsize = -1,
close_fds = True,
shell = False,
stderr = subprocess.STDOUT, stdout = logf.buffer,
)
with open(pidfile, "w+") as pidf:
pidf.write("%s" % p.pid)
ttbl.daemon_pid_add(p.pid)
return True, {
"default": stream_filename,
"log": log_filename
}
def stop(self, target, capturer, path):
pidfile = "%s/capture-%s.pid" % (target.state_dir, capturer)
commonl.process_terminate(pidfile, tag = "capture:" + capturer,
wait_to_kill = 2)
```
#### File: ttbd/ttbl/openocd.py
```python
raise ImportError("openocd driver is bitrotten and cannot be used")
#
# FIXME: clarify the whole target vs target_id in the addrmap, it is
# currently a mess
# FIXME: add a configuration methodology for
# .addrmap_add()
# .board_add()
import codecs
import contextlib
import errno
import logging
import os
import re
import socket
import time
import traceback
import types
import pexpect
try:
import pexpect.fdpexpect
except ImportError:
# RHEL 7 -> fdpexpect is a separate module, not a submod of pexpectg import fdpexpect
import fdpexpect
pexpect.fdpexpect = fdpexpect
# FIXME: remove -> cleanup
try:
from pexpect.exceptions import TIMEOUT as pexpect_TIMEOUT
from pexpect.exceptions import EOF as pexpect_EOF
except ImportError:
from pexpect import TIMEOUT as pexpect_TIMEOUT
from pexpect import EOF as pexpect_EOF
import commonl
import ttbl
import ttbl.debug
import ttbl.images
import ttbl.power
# FIXME: rename to address_maps
addrmaps = {
# Address maps for each (target/BSP) in a board
# Each entry is named after the target/BSP inside the board.
# target_id is the ID of the target we select for doing
# operations; if there is none, there is only one and we
# don't have to waste time running target selection commands.
# target is the ID of the target we use to write/read (might
# be different than 'target' for some boards.
'quark_se_a101': {
'rom': dict(load_addr=0xffffe400, target = None),
'bootloader': dict(load_addr=0x40000000, target = None),
'x86': dict(load_addr=0x40010000, target = 1, target_id = 0),
'arc': dict(load_addr=0x40034000, target = 1, target_id = 1),
},
'quark_se': {
# QMSI v1.1.0 firmware
'rom': dict(load_addr=0xffffe000, target = None),
'x86': dict(load_addr=0x40030000, target = 1, target_id = 0),
'arc': dict(load_addr=0x40000000, target = 1, target_id = 1),
},
'quark_d2000_crb': {
'x86': dict(load_addr=0x00180000, target = 0, target_id = 0),
'bootloader': dict(load_addr=0x00000000, target = None),
'rom': dict(load_addr=0x00000000, target = None),
},
'stm32f1': {
'arm': dict(load_addr = 0x08000000),
},
'quark_x1000': {
'x86': dict(load_addr = 0x00000000),
},
'frdm_k64f': {
'arm': dict(load_addr = 0x00000000),
},
'nrf5x': {
'arm': dict(target_id = 0),
},
'sam_e70_xplained': {
'arm': dict(load_addr = 0x00000000),
},
'sam_v71_xplained': {
'arm': dict(load_addr = 0x00000000),
},
'snps_em_sk': {
'arc': dict(load_addr = 0x00000000),
},
}
# FIXME: move to board[name].akas?
board_synonyms = {
'quark_se_devboard': 'quark_se_ctb',
"nrf51_blenano": "nrf51",
"nrf51_pca10028": "nrf51",
"nrf52840_pca10056": "nrf52840",
"nrf52_blenano2": "nrf52",
"nrf52_pca10040": "nrf52",
}
#: Board description dictionary
#:
#: This is a dictionary keyed by board / MCU name; when the
#: OpenOCD driver is loaded, it is given this name and the entry
#: is opened to get some operation values.
#:
#: Each entry is another dictionary of key/value where key is a
#: string, value is whatever.
#:
#: FIXME: many missing
#:
#: - :data:`hack_reset_halt_after_init
#: <ttbl.flasher.openocd_c.hack_reset_halt_after_init>`
boards = {
'arduino_101': dict(
addrmap = 'quark_se_a101',
targets = [ 'x86', 'arc' ],
interface = 'interface/ftdi/flyswatter2.cfg',
board = None,
# Well, Quarks are sometimes this quirky and the best way
# to reset halt them is by issuing a reset and then a
# reset halt.
reset_halt_command = "reset; reset halt",
hack_reset_after_power_on = True,
config = """
interface ftdi
ftdi_serial "%(serial_string)s"
# Always needed, or openocd fails -100
ftdi_vid_pid 0x0403 0x6010
source [find board/quark_se.cfg]
quark_se.quark configure -event gdb-attach {
reset halt
gdb_breakpoint_override hard
}
quark_se.quark configure -event gdb-detach {
resume
shutdown
}
"""
),
'galileo': dict(
addrmap = 'quark_x1000',
targets = [ 'x86' ],
config = """
interface ftdi
# Always needed, or openocd fails -100
ftdi_vid_pid 0x0403 0x6010
ftdi_serial "%(serial_string)s"
source [find board/quark_x10xx_board.cfg]
"""
),
'qc10000_crb' : dict(
addrmap = 'quark_se',
targets = [ 'x86', 'arc' ],
# Well, Quarks are sometimes this quirky and the best way
# to reset halt them is by issuing a reset and then a
# reset halt.
reset_halt_command = "reset; reset halt",
hack_reset_after_power_on = True,
config = """
interface ftdi
ftdi_serial "%(serial_string)s"
# Always needed, or openocd fails -100
ftdi_vid_pid 0x0403 0x6010
ftdi_channel 0
ftdi_layout_init 0x0010 0xffff
ftdi_layout_signal nTRST -data 0x0100 -oe 0x0100
source [find board/quark_se.cfg]
"""
),
# OpenOCD 0.8
'quark_d2000_crb_v8' : dict(
addrmap = 'quark_d2000_crb',
targets = [ 'x86' ],
#board = 'quark_d2000_onboard',
config = """
interface ftdi
ftdi_serial "%(serial_string)s"
# Always needed, or openocd fails -100
ftdi_vid_pid 0x0403 0x6014
ftdi_channel 0
ftdi_layout_init 0x0000 0x030b
ftdi_layout_signal nTRST -data 0x0100 -noe 0x0100
ftdi_layout_signal nSRST -data 0x0200 -oe 0x0200
# default frequency but this can be adjusted at runtime
#adapter_khz 1000
adapter_khz 6000
reset_config trst_only
source [find target/quark_d2000.cfg]
"""
),
# OpenOCD 0.10
'quark_d2000_crb' : dict(
addrmap = 'quark_d2000_crb',
targets = [ 'x86' ],
#board = 'quark_d2000_onboard',
config = """
interface ftdi
ftdi_serial "%(serial_string)s"
# Always needed, or openocd fails -100
ftdi_vid_pid 0x0403 0x6014
ftdi_channel 0
ftdi_layout_init 0x0000 0x030b
ftdi_layout_signal nTRST -data 0x0100 -noe 0x0100
ftdi_layout_signal nSRST -data 0x0200 -oe 0x0200
# default frequency but this can be adjusted at runtime
#adapter_khz 1000
adapter_khz 6000
reset_config trst_only
source [find target/quark_d20xx.cfg]
"""
),
'quark_se_ctb': dict(
addrmap = 'quark_se',
targets = [ 'x86', 'arc' ],
interface = None,
board = 'quark_se',
hack_reset_after_power_on = True,
config = """
interface ftdi
ftdi_serial "%(serial_string)s"
# Always needed, or openocd fails -100
ftdi_vid_pid 0x0403 0x6010
# oe_n 0x0200
# rst 0x0800
ftdi_channel 0
ftdi_layout_init 0x0000 0xffff
ftdi_layout_signal nTRST -data 0x0100 -oe 0x0100
"""
),
#
# This requires openocd v0.10.0 (pre-development as of 5/9/16)
#
'frdm_k64f': dict(
addrmap = 'frdm_k64f',
targets = [ 'arm' ],
target_id_names = { 0: 'k60.cpu'},
interface = None,
board = None,
write_command = "flash write_image erase %(file)s %(address)s",
config = """\
interface cmsis-dap
cmsis_dap_serial %(serial_string)s
source [find target/k60.cfg]
"""
),
'nrf51': dict(
addrmap = 'nrf5x', # Only to describe targets
targets = [ 'arm' ],
interface = None,
board = None,
write_command = "program %(file)s verify",
config = """\
source [find interface/jlink.cfg]
jlink serial %(serial_string)s
transport select swd
set WORKAREASIZE 0
source [find target/nrf51.cfg]
"""
),
'nrf52': dict(
addrmap = 'nrf5x', # Only to describe targets
targets = [ 'arm' ],
interface = None,
board = None,
write_command = "program %(file)s verify",
# We use the nrf51's config, works better
config = """
source [find interface/jlink.cfg]
jlink serial %(serial_string)s
transport select swd
set WORKAREASIZE 0
source [find target/nrf51.cfg]
"""
),
'nrf52840': dict(
addrmap = 'nrf5x', # Only to describe targets
targets = [ 'arm' ],
interface = None,
board = None,
write_command = "program %(file)s verify",
# We use the nrf51's config, works better
config = """
source [find interface/jlink.cfg]
jlink serial %(serial_string)s
transport select swd
set WORKAREASIZE 0
source [find target/nrf51.cfg]
"""
),
#
# This requires openocd v0.10.0 (pre-development as of 5/9/16)
#
'sam_e70_xplained': dict(
addrmap = 'sam_e70_xplained',
targets = [ 'arm' ],
target_id_names = { 0: 'atsame70q21.cpu'},
interface = None,
board = None,
write_command = "flash write_image erase %(file)s %(address)s",
config = """\
interface cmsis-dap
cmsis_dap_serial %(serial_string)s
source [find target/atsamv.cfg]
"""
),
#
# This requires openocd v0.10.0 (pre-development as of 5/9/16)
#
'sam_v71_xplained': dict(
addrmap = 'sam_v71_xplained',
targets = [ 'arm' ],
target_id_names = { 0: 'samv71.cpu'},
interface = None,
board = None,
write_command = "flash write_image erase %(file)s %(address)s",
config = """\
interface cmsis-dap
cmsis_dap_serial %(serial_string)s
source [find target/atsamv.cfg]
"""
),
'snps_em_sk': dict(
addrmap = 'snps_em_sk',
targets = [ 'arc' ],
target_id_names = { 0: 'arc-em.cpu'},
interface = None,
board = None,
config = """\
interface ftdi
ftdi_serial "%(serial_string)s"
# Always needed, or openocd fails -100
ftdi_vid_pid 0x0403 0x6014
source [find board/snps_em_sk.cfg]
"""
),
'': dict(
addrmap = '',
interface = None,
board = '',
config = ""
),
}
class action_logadapter_c(logging.LoggerAdapter):
"""
"""
def __init__(self, logger, extra):
logging.LoggerAdapter.__init__(self, logger, extra)
# this will be set later by the _test_target_link() method
self.prefix = ""
def process(self, msg, kwargs):
return 'OpenOCD/%s: %s: %s ' % (self.prefix, self.action, msg), kwargs
class pc(ttbl.power.daemon_c, ttbl.images.impl_c, ttbl.debug.impl_c):
"""
:param str serial: serial number of the target board; this is
usually a USB serial number.
:param str board: name of the board we are connecting against;
this has to be defined in :data:`boards` or
:data:`board_synonyms`.
:param bool debug: (optional) run OpenOCD in debugging mode,
printing extra information to the log (default *False*).
*target ID*
OpenOCD will operate on targets (different to TCF's targets);
these might one or more CPUs in the debugged system. Each has an
ID, which by default is zero.
*component to OpenOCD target mapping*
Each component configured in the target addition maps to an
OpenOCD target in *boards[X][targets]*.
**OLD OLD**
This is a flasher object that uses OpenOCD to provide flashing
and GDB server support.
The object starts an OpenOCD instance (that runs as a daemon)
-- it does this behaving as a power-control implementation
that is plugged at the end of the power rail.
.. note: OpenOCD will crash randomly for unknown reasons; this
implementation makes the power system think the target is
off when OpenOCD has crashed, so it can be restarted.
To execute commands, it connects to the daemon via TCL and
runs them using the ``'capture "OPENOCDCOMMAND"'`` TCL command
(FIXME: is there a better way?). The telnet port is open for
manual debugging (check your firewall! **no passwords!**); the GDB
ports are also available.
The class knows the configuration settings for different
boards (as given in the `board_name` parameter. It is also
possible to point it to specific OpenOCD paths when different
builds / versions need to be used.
Note how entry points from the flasher_c class all start with
underscore. Functions ``__SOMETHING()`` are those that have to be
called with a ``_expect_mgr`` context taken [see comments
on ``__send_command`` for the reason.
:param str board_name: name of the board to use, to select
proper configuration parameters. Needs to be declared in
*ttbl.flasher.openocd_c._boards*.
When starting OpenOCD, run a reset halt immediately after.
This is used when flashing, as we power cycle before to try
to have the target in a proper state--we want to avoid it
running any code that might alter the state again.
Now, this is used in combination with another setting,
board specific, that says if the reset has to be done or
not in method :meth:_power_on_do_openocd_verify().
But why? Because some Quark SE targets, when put in deep
sleep mode, OpenOCD is unable to reset halt them, returning
something like:
> reset halt
JTAG tap: quark_se.cltap tap/device found: 0x0e765013 (mfg: 0x009 (Intel), part: 0xe765, ver: 0x0)
Enabling arc core tap
JTAG tap: quark_se.arc-em enabled
Enabling quark core tap
JTAG tap: quark_se.quark enabled
target is still running!
target running, halt it first
quark_se_target_reset could not write memory
in procedure 'reset' called at file "command.c", line 787
So what we are trying to do, and it is a *horrible hack*,
is to hopefully catch the CPU before it gets into that
mode, and when it does, it bails out if it fails to reset
and restarts OpenOCD and maybe (maybe) it at some point
will get it.
Now, this is by NO MEANS a proper fix. The right fix would
be for OpenOCD to be able to reset in any circumstance
(which it doesn't). An alternative would be to find some
kind of memory location OpenOCD can write to that will take
the CPU out of whichever state it gets stuck at which we
can run when we see that.
Zephyr's sample samples/board/quark_se/power_mgr is very
good at making this happen.
"""
def __init__(self, serial, board, debug = False,
openocd_path = "/usr/bin/openocd",
openocd_scripts = "/usr/share/openocd/scripts"):
assert isinstance(serial, str)
assert isinstance(board, str)
assert isinstance(debug, bool)
assert isinstance(openocd_path, str)
assert isinstance(openocd_scripts, str)
self.serial = serial
self.board_name = board_synonyms.get(board, board)
if not self.board_name in boards:
raise ValueError("OpenOCD: unknown board '%s' (expected %s %s)" %
(self.board_name,
" ".join(list(boards.keys())),
" ".join(list(board_synonyms.keys()))))
self.debug = debug
self.board = boards[self.board_name]
if 'addrmap' in self.board:
self.addrmap = addrmaps[boards[self.board_name]['addrmap']]
else:
self.addrmap = None
self.openocd_path = openocd_path
self.openocd_scripts = openocd_scripts
self.log = None
# FIXME: Expose all these tags/fsdb properties too or move
# them to config?
#: FIXME
self.hard_recover_rest_time = None
#: FIXME:
self.hack_reset_after_power_on = False
#: Inmediately after running the OpenOCD initialization
#: sequence, reset halt the board.
#:
#: This is meant to be used when we know we are power cycling
#: before flashing. The board will start running as soon as we
#: power it on, thus we ask OpenOCD to stop it inmediately
#: after initializing. There is still a big window of time on
#: which the board can get itself in a bad state by running
#: its own code.
#:
#: (bool, default False)
self.hack_reset_halt_after_init = 0
#: Inmediately after running the OpenOCD initialization
#: sequence, reset the board.
#:
#: This is meant to be used for hacking some boards that don't
#: start properly OpenOCD unless this is done.
#:
#: (bool, default False)
self.hack_reset_after_init = 0
# the fields openocd-* are set in on() as target properties
cmdline = [
openocd_path,
"--log_output", "%(path)s/%(component)s-%(name)s.log",
"-c", 'tcl_port %(openocd-tcl-port)d',
"-c", 'telnet_port %(openocd-telnet-port)d',
"-c", 'gdb_port %(openocd-gdb-port)d',
"-s", self.openocd_scripts
]
if debug:
cmdline += [ "-d" ]
# add an specific config file for the itnerface
interface_cfg_file = self.board.get('interface', None)
if interface_cfg_file != None:
cmdline += [ "-f", interface_cfg_file ]
# if board is defined, load OPENOCD_SCRIPTS/board/BOARD.cfg
board_cfg_file = self.board.get('board', None)
if board_cfg_file != None:
openocd_scripts = self.openocd_scripts
if openocd_scripts == None:
openocd_scripts = ""
cmdline += [
"-f", os.path.join(openocd_scripts, "board",
board_cfg_file + ".cfg")
]
ttbl.power.daemon_c.__init__(self, cmdline = cmdline)
ttbl.images.impl_c.__init__(self)
ttbl.debug.impl_c.__init__(self)
self.upid_set("OpenOCD supported JTAG",
usb_serial_number = self.serial)
class error(Exception):
# FIXME: rename to exception
pass
# Something that went wrong interacting with OpenOCD, not other errors
class expect_connect_e(error):
pass
#
# Power interface
#
def verify(self, target, component, cmdline_expanded):
self.log = target.log
# ttbl.power.daemon_c -> verify if the process has started
# returns *True*, *False*
#
# Connect to the Qemu Monitor socket and issue a status command,
# verify we get 'running', giving it some time to report.
#
# :returns: *True* if the QEMU VM is running ok, *False* otherwise
# :raises: anything on errors
# DEBUG: check only the logfile exists, third command line field
# this is the log file name, that has been expanded already by
# the daemon_c class calling start
return os.path.exists(cmdline_expanded[2])
def __pending(self):
if False:
# Try up to 4 seconds to start properly -- experimentation has
# shown that if it fails with ECONNREFUSED the first two
# times, it means the thing has crashed
timedout = False
crashed = False
try:
# List targets to determine if we have a good
# initialization
self.log.action = "init verification (%d/%d)" \
% (count + 1, top)
with self._expect_mgr():
self.__send_command(
"init command JTAG (%d/%d)" % (count + 1, top), "init")
if self.board.get("hack_reset_after_power_on", False) \
and self.hack_reset_after_power_on:
# This board needs this hack because we are
# power cycling to flash
self._power_on_reset_hack(count, top)
hack_reset_after_init = self.board.get(
"hack_reset_after_init", self.hack_reset_after_init)
for cnt in range(hack_reset_after_init):
try:
self.__target_reset(
"for reset after init [%d/%d]"
% (cnt + 1, hack_reset_after_init))
break
except self.error as e:
if cnt >= hack_reset_after_init:
raise
logging.error(
"[%d/%d: error resetting, retrying: %s",
cnt, hack_reset_after_init, e)
else:
assert False # Should never get here
hack_reset_halt_after_init = self.board.get(
"hack_reset_halt_after_init",
self.hack_reset_halt_after_init)
for cnt in range(hack_reset_halt_after_init):
try:
self.__target_reset_halt(
"for reset/halt after init [%d/%d]"
% (cnt + 1, hack_reset_halt_after_init))
break
except self.error as e:
if cnt >= hack_reset_halt_after_init:
raise
logging.error(
"[%d/%d: error reset halting, retrying: %s",
cnt, hack_reset_halt_after_init, e)
else:
assert False # Should never get here
r = self.__send_command(
"init verification JTAG (%d/%d)" % (count + 1, top),
"targets",
[
re.compile(
r" [0-9]+\* .*(halted|reset|running|unknown)"),
# this is bad news
re.compile(
r" [0-9]+\* .*(tap-disabled)"),
])
if r == 1:
self.log.error("OpenOCD can't connect to the target"
" (tap is disabled)")
self._log_output()
return False
return True
except OSError as e:
if e.errno == errno.ECONNREFUSED and timedout:
self.log.error("connection refused afer a timeout; "
"crashed?")
self._log_output()
return False
if e.errno == errno.ECONNREFUSED and crashed:
self.log.error("connection refused afer an EOF; crashed?")
self._log_output()
return False
except self.error_timeout as e:
timedout = True
self.log.error("timedout, retrying: %s" % e)
except self.error_eof as e:
if crashed == True:
self.log.error("EOF again, seems crashed?")
self._log_output()
return False
crashed = True
self.log.error("EOF, retrying: %s" % e)
except self.error as e:
self.log.error("retrying: %s" % e)
def on(self, target, component):
self.log = target.log
self.log.action = "openocd start"
# Well, reusing the TCP port range is creating plenty of
# problems, as when we kill it and try to restart it, the
# sockets are lingering and it fails to reopen it...
#
# So we'll go random -- if it fails, it'll be restarted
# with another one
tcp_port_base = commonl.tcp_port_assigner(
2 + len(self.board['targets']) - 1,
ttbl.config.tcp_port_range)
# these are so the command line can be substituted
target.fsdb.set("openocd-serial-string", self.serial)
target.fsdb.set("openocd-tcp-port", tcp_port_base + 1)
target.fsdb.set("openocd-telnet-port", tcp_port_base)
target.fsdb.set("openocd-gdb-port", tcp_port_base + 2)
self.cmdline_extra = []
# configuration text for the board itself
#
# this can be read anytime, but can only be written once we
# know the target and thus it has to happen in the on() method
if self.board['config']:
name = os.path.join(target.state_dir,
"openocd-board-%s.cfg" % component)
with open(name, "w") as cfgf:
cfgf.write(self.board['config'] % kws)
self.cmdline_extra += [ "-f", name ]
ttbl.power.daemon_c.on(self, target, component)
#
# Still not tested
#
def _pattern_or_str(self, expect):
if hasattr(expect, "pattern"):
waiting_for = expect.pattern
elif isinstance(expect, str):
waiting_for = expect
else: # Iterable?
try:
waiting_for = []
for e in expect:
waiting_for.append(self._pattern_or_str(e))
except:
waiting_for = expect
return waiting_for
@contextlib.contextmanager
def _expect_mgr(self):
"""
Open up a socket to the OpenOCD TCL port and start a expect
object to talk to it.
This is a context manager; upon return, kill it all.
"""
def read_nonblocking_patched(self, size = 1, timeout = None):
try:
return self.read_nonblocking_original(size, timeout)
except OSError as e:
if e.args[0] == errno.EAGAIN:
return ""
raise
except:
raise
self.p = None
self.sk = None
self.pid = None
self.pid_s = None
tcp_port_base = -1
try:
try:
self.pid_s = self.tt.fsdb.get("openocd.pid")
if self.pid_s == None:
raise self.error("can't find OpenOCD's pid")
self.pid = int(self.pid_s)
tcp_port_base = int(self.tt.fsdb.get("openocd.port"))
self.log.debug("connecting to openocd pid %d port %d"
% (self.pid, tcp_port_base + 1))
self.sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# TCL conection!
self.sk.settimeout(5)
self.sk.connect(("localhost", tcp_port_base + 1))
self.p = pexpect.fdpexpect.fdspawn(
self.sk.fileno(),
# Open logfile with no codec anything, this seems to
# yield the best result to avoid UnicodeErrors; we
# open it, however, as utf-8,errors=replace
# Append to log file, so we can tell the full story
logfile = open(self.log_name + ".expect", "ab"),
timeout = 5)
# FDexpect seems to have a bug where an EAGAIN is just
# floated up instead of waiting
self.p.read_nonblocking_original = self.p.read_nonblocking
self.p.read_nonblocking = types.MethodType(
read_nonblocking_patched, self.p)
except (Exception, OSError) as e:
s = "expect init (pid %s port %d) failed: %s" \
% (self.pid_s, tcp_port_base + 1, e)
if type(e) == Exception: # Code BUG?
s += "\n" + traceback.format_exc()
self.log.warning(s)
raise self.expect_connect_e(s)
yield
finally:
# Make sure the cleanup is always executed no matter what
if self.p != None:
# Some pexpect versions don't close this file properly
if self.p.logfile:
del self.p.logfile
del self.p
if self.sk != None:
try:
self.sk.shutdown(socket.SHUT_RDWR)
except Exception as e:
self.log.warning("Error shutting down socket: %s", e)
self.sk.close()
del self.sk
def _log_error_output(self, msg = "n/a"):
self.log.error("Error condition: " + msg)
if self.p != None:
for line in self.p.before.splitlines():
self.log.error("output[before]: "
+ line.encode('utf-8').strip())
# FIXME: not really needed, it adds too much blub
#with codecs.open(self.log_name + ".expect", "r", encoding = 'utf-8',
# errors = 'replace') as inf:
# for line in inf:
# self.log.error("output: " + line.strip())
def _log_output(self):
with codecs.open(self.log_name, "r", encoding = 'utf-8',
errors = 'replace') as inf:
for line in inf:
self.log.error("log: " + line.strip())
def __send_command(self, action, command, expect = None,
timeout = 3):
"""
:param str|list|regex expect: what to expect. Don't use open
ended regular expressions (eg: "something.*") as
that would capture the last character that
expect sends (the transmission terminator) and
this function would not be able to find it.
:param int timeout: Default timeout for normal command
execution; commands that take longer to execute (like
memory writes, etc), shall increase it
Note this creates/connects a socket and a expect object which
each command we send. Sounds (and is) quite a lot of overhead,
but it has proben to work much more dependably than keeping a
expect object and socket around.
Note this has to be called from within a 'with
self._expect_mgr' block. If you run multiple commands, you
might want to use a single block for them, otherwise OpenOCD
runs out of sockets (and doesn't recycle them fast enough) and
connections are rejected, resulting in EOF
errors. Annoying. As we kill the OpenOCD process when we power
off the target, this seems to be good enough to not run out of sockets.
"""
self.log.action = action
waiting_for = self._pattern_or_str(expect)
r = None
try:
self.log.debug("running: %s" % command)
if command:
self.p.send(('capture "' + command + '"\x1a').encode("utf-8"))
self.p.timeout = timeout # spec in the fn does not work
if expect != None:
self.log.debug("waiting for response [%.fs]: %s",
timeout, expect)
r = self.p.expect(expect, timeout = timeout)
self.log.debug("got response: %d", r)
self.log.debug("waiting for response terminator [%.fs]", timeout)
waiting_for = "response terminator"
self.p.expect("\x1a")
self.log.info("completed, r = %s" % r)
except pexpect_TIMEOUT as e:
self.log.error("timeout waiting for '%s'" % waiting_for)
self._log_error_output()
raise self.error_timeout("%s: failed (timeout)" % self.log.action)
except pexpect_EOF as e:
self.log.error("can't find '%s' (EOF)" % waiting_for)
self._log_error_output()
# Is OpenOCD alive at this point?
try:
# note __send_command() is only called inside a 'with
# expect_mgr()' block, which will have initialized self.pid*
os.kill(self.pid, 0)
except OSError as e:
self.log.info("openocd[%d]: might be dead", self.pid)
raise self.error_eof("%s: failed (EOF)" % self.log.action)
except Exception as e:
msg = "unknown error: %s\n%s" % (e, traceback.format_exc())
self._log_error_output(msg)
raise RuntimeError("%s: failed (exception): %s"
% (self.log.action, e))
return r
def _per_target_setup(self, _target, _component):
# FIXME: not sure we'll really need this anymore
if self.serial:
self.log.prefix = "%s[%s]" % (self.board_name, self.serial)
else:
self.log.prefix = "%s" % (self.board_name)
def _to_target_id(self, component):
# each component is described in the address map and might
# have a target id assigned; if it is not assigned, we assume
# it is zero
return self.addrmap[component].get('target_id', 0)
def __target_id_select(self, target_id, for_what = ""):
# This doesn't give good output...
if 'target_id_names' in self.board:
target_id_name = self.board['target_id_names'].get(
target_id, "%d" % target_id)
else:
target_id_name = "%d" % target_id
self.__send_command("set target %s %s" % (target_id, for_what),
"targets %s" % target_id_name)
# ... so we look for TARGET* in the targets output
# ' 1* quark_se.arc-em arc32 little quark_se.arc-em halted'
self.__send_command(
"check target %s selected %s" % (target_id_name, for_what),
"targets",
re.compile(r" %d\* .*(halted|reset|running)" % target_id))
def __target_id_halt(self, target_id, for_what = ""):
try:
self.__target_id_select(target_id, for_what)
# This doesn't give good output...
# ... so we look for halted in the @targets output:
## 1* quark_se.arc-em arc32 little quark_se.arc-em halted
self.__send_command("halt target %d %s" % (target_id, for_what),
"halt")
r = self.__send_command(
"check target %d halted %s" % (target_id, for_what),
"targets",
[
re.compile(r" %d\* .*(halted|reset)" % target_id),
# Bug? it is not timing out, so we catch others here
re.compile(r" %d\* .*" % target_id),
])
if r != 0:
msg = "halt target #%d %s: failed; got r = %d" \
% (target_id, for_what, r)
self._log_error_output(msg)
raise self.error(msg)
return True
except self.error:
self.log.error("halt target %d %s: failed" % (target_id, for_what))
raise
def __target_reset(self, for_what):
# this is used by the power on sequence, the imaging sequence
# and the debug reset sequence
# Expects being in a _expect_mgr() block
r = self.__send_command("target reset/run %s" % for_what,
"reset run",
[
"could not halt target",
# Freedom Boards k64f
"MDM: Chip is unsecured. Continuing.",
"target running",
"", # nucleo-f103rb
])
if r == 0:
self._log_error_output()
raise self.error("Cannot reset %s (r %d)" % (for_what, r))
def __target_reset_halt(self, for_what = ""):
# called from _power_on_do_openocd_verify
# called from _target_reset_halt
# this assumes we are inside a 'with self._expect_mgr():' block
self.log.action = "target reset halt init"
command = self.board.get('reset_halt_command', "reset halt")
r = self.__send_command(
"target reset/halt %s" % for_what,
command,
[
"target state: halted",
"target halted due",
# ARC (seen w/ EM Starter Kit's) driver reports this
"JTAG tap: arc-em.cpu tap/device found:",
# Freedom Boards k64f
"MDM: Chip is unsecured. Continuing.",
# Errors
"could not halt target",
"timed out while waiting for target halted",
"Not halted",
])
if r > 3:
msg = "Cannot reset/halt %s (r %d)" % (for_what, r)
self._log_error_output(msg)
raise self.error("Cannot reset/halt %s (r %d)" % (for_what, r))
def tt_flasher_target_reset_halt(self, target, _components):
# - called from tt_flasher.images_do_set
# - FIXME: move self.flasher.target_reset_halt -> _target_reset_halt
tries = 1
tries_max = 2
# FIXME: current limitation, can't access the tags from the
# constructor as the ones we add in target_add() aren't there
# yet.
wait = \
float(self.tags.get('hard_recover_rest_time', 2))
while tries <= tries_max:
# The Arduino101 get's so stuck sometimes
try:
self.flasher.target_reset_halt(for_what)
break
except self.flasher.error:
pass
try_s = "%d/%d" % (tries, tries_max)
time.sleep(2)
try:
self.flasher.target_reset("[recover reset #1 %s] " % try_s
+ for_what)
except self.flasher.error:
pass
try:
self.flasher.target_reset_halt("[retry %s] " % try_s
+ for_what)
break
except self.flasher.error:
pass
# In some targets, this fails because maybe we just
# power-cycled and the JTAG said it was ready but it
# is really not ready...when that happens, just
# power-cycle again.
# well, that didn't work either; bring the big guns,
# power cycle it and try the whole thing again
wait_s = (1 + 2.0 * tries/tries_max) * wait
self.log.info("Failed to reset/halt, power-cycle (%.2fs) "
"and retrying (try %d/%d)"
% (wait_s, tries, tries_max))
self.power_cycle(self.owner_get(), wait_s)
tries += 1
else:
# FIXME: pass the exception we get or the log or something
raise self.error("Can't reset/halt the target")
def __target_id_resume(self, target_id, for_what = ""):
try:
self.__target_id_select(target_id, for_what)
# This doesn't give good output...
r = self.__send_command(
"target#%d: resume %s" % (target_id, for_what),
"resume",
[
"",
"Target not halted",
])
if r != 0:
self._log_error_output()
raise self.error("target#%d: resume %s failed: %s"
% (target_id, for_what, r))
# ... so we'd look at targets output, but by the time we
# look it might have transitioned to another state, so
# we'll just pray it works...
except self.error:
self._log_error_output()
self.log.error("target#%d: cannot resume %s"
% (target_id, for_what))
raise
#
# Debugging interface
#
def debug_start(self, target, components):
# not much to do
pass
def debug_stop(self, target, components):
# not much to do
pass
def debug_halt(self, target, components):
self.log = target.log
self.log.action = "target halt"
with self._expect_mgr():
for component in components:
# this operates on each component
self.__target_id_halt(self._to_target_id(component),
"debug halt")
def debug_reset(self, target, _components):
self.log = target.log
self.log.action = "target reset"
if set(_components) != set(self.components):
raise NotImplementedError(
"OpenOCD can only reset all components at the same time (%s)"
% (",".join(self.components)))
self.log.action = "target reset"
with self._expect_mgr():
# this operates on all targets at the same time
return self.__target_reset("debug reset")
def debug_reset_halt(self, target, _components):
self.log = target.log
self.log.action = "target reset halt"
if set(_components) != set(self.components):
raise NotImplementedError(
"OpenOCD can only reset/halt all components at the same time (%s)"
% (",".join(self.components)))
self.log.action = "target reset halt"
with self._expect_mgr():
# this operates on all targets at the same time
return self.__target_reset_halt("debug reset halt")
def debug_resume(self, target, _components):
self.log = target.log
self.log.action = "target resume"
if set(_components) != set(self.components):
raise NotImplementedError(
"OpenOCD can only reset/halt all components at the same time (%s)"
% (",".join(self.components)))
self.log.action = "target resume"
with self._expect_mgr():
for component in components:
# this operates on each component
self.__target_id_resume(self._to_target_id(component),
"debug resume")
def debug_list(self, target, components):
# FIXME: self.flasher should be providing this information, this
# is breaking segmentation
count = 2 # port #0 is for telnet, #1 for TCL
tcp_port_base_s = self.fsdb.get("openocd.port")
if tcp_port_base_s == None:
return "Debugging information not available, power on?"
tcp_port_base = int(tcp_port_base_s)
s = "OpenOCD telnet server: %s %d\n" \
% (socket.getfqdn('0.0.0.0'), tcp_port_base)
for target in self.flasher.board['targets']:
s += "GDB server: %s: tcp:%s:%d\n" % (target,
socket.getfqdn('0.0.0.0'),
tcp_port_base + count)
count +=1
if self.fsdb.get('powered') != None:
s += "Debugging available as target is ON"
else:
s += "Debugging not available as target is OFF"
return s
def debug_command(self, target, component):
self.log = target.log
self.log.action = "command run"
with self._expect_mgr():
self.__send_command("command from user", cmd)
return self.p.before
# Wrap actual reset with retries
def target_reset(self, for_what = ""):
tries = 1
tries_max = 5
# FIXME: current limitation, can't access the tags from the
# constructor as the ones we add in target_add() aren't there
# yet.
wait = \
float(self.tags.get('hard_recover_rest_time', 10))
while tries <= tries_max:
# The Arduino101 get's so stuck sometimes
try:
self.flasher.target_reset(for_what)
break
except self.flasher.error:
pass
# Try again
try:
self.flasher.target_reset(for_what)
break
except self.flasher.error:
pass
# Bring the big guns, power cycle it
if wait != None:
wait_s = tries * wait
self.log.info("Failed to reset/run, power-cycle (%.2fs) "
"and retrying (try %d/%d)"
% (wait_s, tries, tries_max))
self.power_cycle(self.owner_get(), wait_s)
tries += 1
else:
# FIXME: pass the exception we get or the log or something
raise self.error("Can't reset/run the target")
#
# Images interface
#
def _image_write(self, image_type, file_name, timeout_factor = 1,
verify = True):
if not image_type in self.addrmap:
raise ValueError("%s: unknown image type" % image_type)
load_addr = self.addrmap[image_type].get('load_addr', None)
target = self.addrmap[image_type].get('target', None)
fsize = os.stat(file_name).st_size
timeout = 10 + fsize * 0.001 # Proportional to size
timeout *= timeout_factor
self.log.action = "image write init"
with self._expect_mgr():
try:
# Note we assume the targets are already stopped
if target != None:
self.__target_id_select(target, "for writing")
# Verify target is halted
self.__send_command(
"check target %d is halted for writing" % (target),
"targets",
re.compile(r" %d\* .*(halted|reset)" % target))
write_command = self.board.get(
'write_command', "load_image %(file)s 0x%(address)x")
# write_image says 'wrote', load_image 'downloaded'
# Not only that, write_image (for flash) reports
# rounded up sizes to some blocks, so we can't really
# match on the size. Or if we are loading ELF, the
# sizes reported are different. So yeah, just don't
# worry about them sizes.
self.__send_command("load image",
write_command % dict(file = file_name,
address = load_addr),
[ "downloaded [0-9]+ bytes",
"wrote [0-9]+ bytes from file"],
timeout)
if verify == True and self.board.get("verify", True):
# Same comment about sizes here
r = self.__send_command(
"verify image",
'verify_image %s 0x%08x' % (file_name, load_addr),
[
"verified [0-9]+ bytes",
"diff [0-9]+ address 0x[0-9a-z]+\. Was 0x[0-9a-z]+ instead of 0x[0-9a-z]+",
],
timeout)
if r != 0:
raise self.error("Cannot verify image (r %d)" % r)
except self.error as e:
self.log.error("can't write image: %s" % e)
raise
def images_do_set(self, images):
# FIXME: current limitation, can't access the tags from the
# constructor as the ones we add in target_add() aren't there
# yet.
wait = \
float(self.tags.get('hard_recover_rest_time', 10))
if self.fsdb.get("disable_power_cycle_before_flash") != 'True':
# Make sure the target is really fresh before flashing it
try:
# See the documentation for this on class flasher_c
# for why we have to do it.
self.flasher.hack_reset_after_power_on = True
self.power_cycle(self.owner_get(), wait = wait)
finally:
self.flasher.hack_reset_after_power_on = False
self.log.info("sleeping 2s after power cycle")
# HACK: For whatever the reason, we need to sleep before
# resetting/halt, seems some of the targets are not ready
# inmediately after
time.sleep(2)
self.target_reset_halt(for_what = "for image flashing")
timeout_factor = self.tags.get('slow_flash_factor', 1)
verify = self.tags.get('flash_verify', 'True') == 'True'
# FIXME: replace this check for verifying which image types
# the flasher supports
for t, n in images.items():
if t == "kernel-x86":
it = "x86"
elif t == "kernel":
it = "x86"
elif t == "kernel-arc":
it = "arc"
elif t == "kernel-arm":
it = "arm"
elif t == "rom":
it = "rom"
elif t == "bootloader":
it = "bootloader"
else:
raise self.unsupported_image_e(
"%s: Unknown image type (expected "
"kernel|kernel-(x86,arc,arm), rom)"
% t)
try:
self.flasher.image_write(it, n, timeout_factor, verify)
except ValueError as e:
self.log.exception("flashing got exception: %s", e)
raise self.unsupported_image_e(e.message)
```
#### File: tcf/zephyr/setupl.py
```python
import glob
import os
import re
import site
import subprocess
import sys
import time
import distutils.command.install_data
import distutils.command.install_scripts
import distutils.command.install_lib
def mk_installs_py(base_dir, sysconfigdir, sharedir):
_sysconfigdir = os.path.join(sysconfigdir, "tcf").replace("\\", "\\\\")
_share_path = os.path.join(sharedir, "tcf").replace("\\", "\\\\")
with open(os.path.join(base_dir, "_install.py"), "w") as f:
f.write(f"""
#! /usr/bin/python3
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Note this file gets rewritten during installation
# By default, we point to the source
import os
import sys
# when running from source, we want the toplevel source dir
sysconfig_paths = [
"{_sysconfigdir}",
]
share_path = "{_share_path}"
"""
)
def mk_version_py(base_dir, version):
"""
Create a version.py file in a directory with whichever version
string is passed.
"""
with open(os.path.join(base_dir, "version.py"), "w") as f:
f.write("""\
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Generated by %s on %s
version_string = "%s"
""" % (__file__, time.asctime(), version))
def mk_windows_bat(base_dir, tcf_path):
"""
Create a windows .bat file to allow tcf to run without using "python tcf"
"""
with open(os.path.join(base_dir, "tcf.bat"), "w") as f:
f.write(f"""\
@py -3.9 {tcf_path}\\tcf %*
""")
def get_install_paths(
installer,
install, # = self.distribution.command_options.get('install', {}),
):
if 'user' in install:
# this means --user was given
installer.prefix = site.getuserbase()
sysconfigdir = os.path.join(installer.prefix, 'etc')
sharedir = os.path.join(installer.prefix, "share")
elif 'prefix' in install:
# this means --prefix was given
installer.prefix = install.get('prefix', (None, None))[1]
if sys.platform == "win32":
pass
else:
if installer.prefix == "/usr":
sysconfigdir = "/etc"
else:
sysconfigdir = os.path.join(installer.prefix, 'etc')
sharedir = os.path.join(installer.prefix, "share")
else:
if sys.platform == "win32":
sysconfigdir = 'C:\\ProgramData'
installer.prefix = 'C:\\Program Files\\'
sharedir = os.path.join(installer.prefix, "share")
else:
# these have to be absolute, otherwise they will be prefixed again
sysconfigdir = "/etc"
sharedir = os.path.join("/usr", "share")
installer.prefix = '/usr'
return sysconfigdir, sharedir
# Run a post-install on installed data file replacing paths as we need
class _install_data(distutils.command.install_data.install_data):
def run(self):
# Workaround that install_data doesn't respect --prefix
#
# If prefix is given (via --user or via --prefix), then
# extract it and add it to the paths in self.data_files;
# otherwise, default to /usr.
install = self.distribution.command_options.get('install', {})
sysconfigdir, _sharedir = get_install_paths(self, install)
new_data_files = []
for entry in self.data_files:
dest_path = entry[0].replace('@prefix@', self.prefix)
dest_path = dest_path.replace('@sysconfigdir@', sysconfigdir)
new_data_files.append((dest_path,) + entry[1:])
self.data_files = new_data_files
distutils.command.install_data.install_data.run(self)
# Run a post-install on installed data file replacing paths as we need
class _install_scripts(distutils.command.install_scripts.install_scripts):
def run(self):
install = self.distribution.command_options.get('install', {})
# Create a .bat file for windows to run tcf without invoking python first
if sys.platform == "win32":
# target_dir is the scripts folder in the python installation
target_dir = os.path.join(os.path.dirname(sys.executable),"Scripts")
# If --user is specified, need to change path to where the script is
if 'user' in install:
python_version = ''.join(str(i) for i in sys.version_info[:2])
python_folder = 'Python' + python_version
script_dir = os.path.join(site.USER_BASE, python_folder, "Scripts")
mk_windows_bat(target_dir, script_dir)
else:
mk_windows_bat(target_dir, target_dir)
distutils.command.install_scripts.install_scripts.run(self)
class _install_lib(distutils.command.install_lib.install_lib):
def run(self):
# Workaround that install_data doesn't respect --prefix
#
# If prefix is given (via --user or via --prefix), then
# extract it and add it to the paths in self.data_files;
# otherwise, default to /usr/local.
sysconfigdir, sharedir = get_install_paths(
self,
self.distribution.command_options.get('install', {}))
distutils.command.install_lib.install_lib.run(self)
# generate a new _install.py for an installed system
mk_installs_py(
os.path.join(self.install_dir, "tcfl"),
sysconfigdir, sharedir)
class _install_ttbd_lib(distutils.command.install_lib.install_lib):
def run(self):
# Workaround that install_data doesn't respect --prefix
#
# If prefix is given (via --user or via --prefix), then
# extract it and add it to the paths in self.data_files;
# otherwise, default to /usr/local.
sysconfigdir, sharedir = get_install_paths(
self,
self.distribution.command_options.get('install', {}))
distutils.command.install_lib.install_lib.run(self)
# generate a new _install.py for an installed system
mk_installs_py(
os.path.join(self.install_dir, "ttbl"),
sysconfigdir, sharedir)
# A glob that filters symlinks
def glob_no_symlinks(pathname):
l = []
for file_name in glob.iglob(pathname):
if not os.path.islink(file_name):
l.append(file_name)
return l
# Find which version string to settle on
version = None
try:
import tcfl.version
version = tcfl.version.version_string
except:
pass
if "VERSION" in os.environ:
version = os.environ['VERSION']
elif version:
""" already have something """
else:
_src = os.path.abspath(__file__)
_srcdir = os.path.dirname(_src)
try:
version = subprocess.check_output(
"git describe --tags --always --abbrev=7 --dirty".split(),
cwd = _srcdir, stderr = subprocess.PIPE, encoding = "utf-8")
# RPM versions can't have dash (-), so use underscores (_)
version = version.strip().replace("-", ".")
if re.match("^v[0-9]+.[0-9]+", version):
version = version[1:]
except subprocess.CalledProcessError as _e:
print("Unable to determine %s (%s) version: %s"
% ("tcf", _srcdir, _e.output), file = sys.stderr)
version = "vNA"
except OSError as e:
# At this point, logging is still not initialized; don't
# crash, just report a dummy version
print("Unable to determine %s (%s) version "
" (git not installed?): %s" % ("tcf", _srcdir, e),
file = sys.stderr)
version = "vNA"
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.