id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
11392807
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 18:20:41 2019
@author: <EMAIL>
"""
from .sunrise import *
from .terminator import *
|
StarcoderdataPython
|
3412660
|
<gh_stars>10-100
import sys
import tqdm
import json
import rdflib
from rdflib import Namespace
from rdflib.namespace import RDF, RDFS, OWL
oboInOwl = Namespace('http://www.geneontology.org/formats/oboInOwl#')
def owl_to_json(owl_file, json_file):
ent_dict = dict()
g = rdflib.Graph()
g.parse(owl_file)
for s in tqdm.tqdm(g.subjects(RDF.type, OWL.Class)):
labels = list(g.objects(s, RDFS.label))
all_labels = [str(l) for l in labels]
synonyms = list(g.objects(s, oboInOwl['hasExactSynonym']))
all_labels += [str(s) for s in synonyms]
if all_labels:
ent_dict[s] = all_labels
with open(json_file, 'wb') as f:
f.write(json.dumps(ent_dict).encode('utf-8'))
if __name__ == '__main__':
owl_file = sys.argv[1]
output_path = sys.argv[2]
owl_to_json(owl_file, output_path)
|
StarcoderdataPython
|
9787451
|
from flask import Flask, render_template, request, jsonify
from ruamel import yaml
import mysql.connector as my
import os
import shutil
# 設定読み込み(config.ymlが存在しなければconfig.yml.sampleをコピーする)
sampleConfigPath = "config.yml.sample"
configPath = "config.yml"
if not os.path.exists(configPath):
shutil.copyfile(sampleConfigPath, configPath)
print(f"{configPath} was created from {sampleConfigPath}")
with open(configPath) as f:
config = yaml.safe_load(f)
app = Flask(__name__)
@app.route('/')
def hello():
props ={}
return render_template("./index.html", props=props)
@app.route('/search')
def search():
query: str = request.args.get("query")
sort: str = request.args.get("sort")
cfg = config["db"]
con = my.connect(
host=cfg["host"], database=cfg["name"],
user=cfg["user"], password=cfg["pass"])
try:
cur = con.cursor()
try:
q: str = "select id, org, title, description, width, height, " \
"type, url, embeddable, thumbUrl, indexText, viewCount, likeCount, "\
"publishedAt from contents" \
" where LOWER(indexText) like %s"
if sort == "viewCount":
q += " order by viewCount desc"
elif sort == "likeCount":
q += " order by likeCount desc"
else:
q += " order by publishedAt desc"
cur.execute(q, ("%" + query.lower() + "%",))
result = list()
for r in cur:
result.append({"id": r[0], "org": r[1], "title": r[2],
"description": r[3], "width": r[4], "height": r[5],
"type": r[6], "url": r[7], "embeddable": r[8] != 0,
"thumbUrl": r[9], "indexText": r[10],
"viewCount": r[11], "likeCount": r[12],
"publishedAt": r[13].strftime("%Y-%m-%d")})
return jsonify(result)
finally:
cur.close()
finally:
con.close()
if __name__ == "__main__":
cfg = config["server"]
app.run(host=cfg["host"], port=cfg["port"], debug=cfg["debug"])
|
StarcoderdataPython
|
1934584
|
#!/usr/bin/env python3
from pgmpy.base import UndirectedGraph
from pgmpy.tests import help_functions as hf
import unittest
class TestUndirectedGraphCreation(unittest.TestCase):
def setUp(self):
self.graph = UndirectedGraph()
def test_class_init_without_data(self):
self.assertIsInstance(self.graph, UndirectedGraph)
def test_class_init_with_data_string(self):
self.G = UndirectedGraph([('a', 'b'), ('b', 'c')])
self.assertListEqual(sorted(self.G.nodes()), ['a', 'b', 'c'])
self.assertListEqual(hf.recursive_sorted(self.G.edges()),
[['a', 'b'], ['b', 'c']])
def test_add_node_string(self):
self.graph.add_node('a')
self.assertListEqual(self.graph.nodes(), ['a'])
def test_add_node_nonstring(self):
self.graph.add_node(1)
self.assertListEqual(self.graph.nodes(), [1])
def test_add_nodes_from_string(self):
self.graph.add_nodes_from(['a', 'b', 'c', 'd'])
self.assertListEqual(sorted(self.graph.nodes()),
['a', 'b', 'c', 'd'])
def test_add_nodes_from_non_string(self):
self.graph.add_nodes_from([1, 2, 3, 4])
def test_add_edge_string(self):
self.graph.add_edge('d', 'e')
self.assertListEqual(sorted(self.graph.nodes()), ['d', 'e'])
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['d', 'e']])
self.graph.add_nodes_from(['a', 'b', 'c'])
self.graph.add_edge('a', 'b')
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['d', 'e']])
def test_add_edge_nonstring(self):
self.graph.add_edge(1, 2)
def test_add_edges_from_string(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertListEqual(sorted(self.graph.nodes()), ['a', 'b', 'c'])
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['b', 'c']])
self.graph.add_nodes_from(['d', 'e', 'f'])
self.graph.add_edges_from([('d', 'e'), ('e', 'f')])
self.assertListEqual(sorted(self.graph.nodes()),
['a', 'b', 'c', 'd', 'e', 'f'])
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
hf.recursive_sorted([('a', 'b'), ('b', 'c'),
('d', 'e'), ('e', 'f')]))
def test_add_edges_from_nonstring(self):
self.graph.add_edges_from([(1, 2), (2, 3)])
def test_number_of_neighbors(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertEqual(len(self.graph.neighbors('b')), 2)
def tearDown(self):
del self.graph
|
StarcoderdataPython
|
3392164
|
"""Fix whitespace issues."""
import os
import re
import argparse
def find_files(top, exts):
"""Return a list of file paths with one of the given extensions.
Args:
top (str): The top level directory to search in.
exts (tuple): a tuple of extensions to search for.
Returns:
a list of matching file paths.
"""
return [os.path.join(dirpath, name)
for dirpath, dirnames, filenames in os.walk(top)
for name in filenames
if name.endswith(exts)]
def trim(top, exts):
"""Trim whitespace from files.
Args:
top (str): The top level directory to operate in.
exts (tuple): A tuple of extensions to process.
"""
files = find_files(top, exts)
for item in files:
lines = []
with open(item, 'r') as f:
for line in f:
lines.append(re.sub(r'[ \t]+$', '', line))
with open(item, 'w') as f:
f.writelines(lines)
def tabs2spaces(top, exts, n=2):
"""Convert tabs to spaces in a set of files. Ignores tabs enclosed in quotes.
Args:
top (str): The top level directory to operate in.
exts (tuple): A tuple of extensions to process.
n (optional): The number of spaces to replace each tab with. Default is 2.
"""
files = find_files(top, exts)
for item in files:
lines = []
with open(item, 'r') as f:
for line in f:
lines.append(re.sub(r'\t', ' ' * n, line))
with open(item, 'w') as f:
f.writelines(lines)
def spaces2tabs(top, exts):
"""Raise an exception. All in good fun."""
raise Exception('Nope!')
def main():
"""CLI hook."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('f')
parser.add_argument('top')
parser.add_argument('-n')
parser.add_argument('exts', nargs=argparse.REMAINDER)
args = vars(parser.parse_args())
args['exts'] = tuple(args['exts'])
FNMAP = {
'trim': trim,
'tabs2spaces': tabs2spaces
}
fn = FNMAP[args['f']]
if args['n']:
fn(args['top'], args['exts'], args['n'])
else:
fn(args['top'], args['exts'])
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3456705
|
# -*- coding:utf-8 -*-
"""
@author: Alden
@email: <EMAIL>
@date: 2018/4/3
@version: 1.0.0.0
"""
class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
res = 0
is_positive = None
for i, v in enumerate(str):
if res == 0:
if v == " " and is_positive is None:
continue
elif v == "+" and is_positive is None:
is_positive = True
elif v == "-" and is_positive is None:
is_positive = False
elif v.isdigit():
res += int(v)
else:
return 0
else:
if v.isdigit():
res = res * 10 + int(v)
else:
break
is_positive = True if is_positive is None else is_positive
res = res if is_positive else res * -1
if res > 2 ** 31 - 1:
res = 2 ** 31 - 1
elif res < -1 * 2 ** 31:
res = -1 * 2 ** 31
return res
if __name__ == "__main__":
s = Solution()
print s.myAtoi(" 01")
print s.myAtoi(" -11")
print s.myAtoi(" 2147483648")
print s.myAtoi(" a2147483648")
print s.myAtoi("+-2")
print s.myAtoi(" +0 123")
|
StarcoderdataPython
|
1606448
|
# -*- coding: UTF-8 -*
'''
@author: sintrb
'''
"""
PrintOnline Server.
This module refer to SimpleHTTPServer
"""
__version__ = "0.0.3"
import BaseHTTPServer
import SocketServer
import json
import os
import shutil
import socket
import sys
import urlparse
import cgi
import re
import inspect
import tempfile
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
reload(sys)
sys.setdefaultencoding("utf-8")
libdir = os.path.dirname(__file__)
if not libdir:
libdir = os.getcwd()
options = {
'tempdir':os.path.join(tempfile.gettempdir(), 'printonlie'),
'bind':'0.0.0.0',
'port':8000
}
class ApiException(Exception):
def __init__(self, res):
self.res = res
def ex(e, c=-1):
return ApiException({"code":c, "msg":e})
def get_printers():
try:
import win32print
return [{'name':d[2]} for d in win32print.EnumPrinters(win32print.PRINTER_ENUM_LOCAL)]
except:
return []
def get_files():
d = options['tempdir']
if os.path.exists(d) and os.path.isdir(d):
arr = []
for f in os.listdir(options['tempdir']):
if os.path.isfile(os.path.join(options['tempdir'], f)):
try:
arr.append({
'name':u'%s' % f.decode('windows-1252'),
'st_mtime':os.stat(os.path.join(d, f)).st_mtime
})
except Exception, e:
print e
arr.sort(key=lambda x:x['st_mtime'])
return arr
else:
return []
def del_file(f):
d = options['tempdir']
if os.path.exists(d) and os.path.isdir(d) and os.path.exists(os.path.join(d, f)):
os.remove(os.path.join(d, f))
def set_file(name, fp):
d = options['tempdir']
if not os.path.exists(d):
os.makedirs(d)
fn = os.path.join(d, name)
with open(fn, 'wb') as f:
f.write(fp.read())
def print_file(filename, printername):
import win32api, win32print
printername = win32print.GetDefaultPrinter()
device = '/d:"%s"' % printername
os.chdir(options['tempdir'])
# win32api.ShellExecute(0, "print", filename, device, options['tempdir'], 0)
# return True
try:
win32api.ShellExecute(0, "print", filename, device, options['tempdir'], 0)
return True
except Exception, e:
print e
return False
class PrintOnlineRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
server_version = "PrintOnline/" + __version__
protocol_version = "HTTP/1.1"
editortmpl = ''
def check_auth(self):
if not options.get('auth'):
return True
au = self.headers.getheader('authorization')
if au and len(au) > 6 and au.endswith(options.get('auth')):
return True
f = StringIO()
f.write('<center><h2>401 Unauthorized</h2></center>')
self.send_response(401, "Unauthorized")
self.send_header("Content-Type", "text/html")
self.send_header("Content-Length", str(f.tell()))
self.send_header("WWW-Authenticate", 'Basic realm="%s"' % (options.get('realm') or self.server_version))
self.send_header('Connection', 'close')
self.end_headers()
f.seek(0)
shutil.copyfileobj(f, self.wfile)
return False
def api_printers(self):
return get_printers()
def api_files(self):
return get_files()
def api_update(self):
return {
'files':self.api_files(),
'printers':self.api_printers()
}
def api_print(self, filename, printername):
return {'ok':print_file(filename, printername)}
def do_POST(self):
if not self.check_auth():
return
f = StringIO()
contenttype = 'text/html'
statuscode = 200
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type'],
})
uploadfile = form['file']
filename = uploadfile.filename
set_file(filename, uploadfile.file)
res = {
'name':filename
}
f.write(json.dumps(res))
self.send_response(statuscode)
self.send_header("Content-type", contenttype)
self.send_header("Content-Length", str(f.tell()))
self.send_header('Connection', 'close')
self.end_headers()
f.seek(0)
shutil.copyfileobj(f, self.wfile)
def do_GET(self):
if not self.check_auth():
return
self.path = self.path.replace('..', '')
url = urlparse.urlparse(self.path)
contenttype = 'text/html'
statuscode = 200
f = StringIO()
# print url
if url.path.startswith('/api/'):
try:
from urllib import unquote
contenttype = 'text/json'
apiname = 'api_%s' % (url.path.replace('/api/', ''))
if not hasattr(self, apiname):
raise ex('not such api: %s' % apiname)
param = dict([(r[0], unquote(r[1]).replace('+', ' ')) for r in re.findall('([^&^=]+)=([^&^=]*)', url.query)])
apifunc = getattr(self, apiname)
argspec = inspect.getargspec(apifunc)
kvargs = {}
funcagrs = argspec.args
defaults = argspec.defaults
if defaults:
for i, v in enumerate(funcagrs[-len(defaults):]):
kvargs[v] = defaults[i]
if len(funcagrs):
param['_param'] = param
argslen = len(funcagrs) - (len(defaults) if defaults else 0) - 1
missargs = []
for i, k in enumerate(funcagrs[1:]):
if k in param:
kvargs[k] = param[k]
elif i < argslen:
missargs.append(k)
if missargs:
raise ex('need argments: %s' % (', '.join(missargs)))
data = apifunc(**kvargs)
res = {'data':data, 'code':0}
except ApiException, e:
res = e.res
f.write(json.dumps(res))
else:
filepath = os.path.join(libdir, url.path.strip('/') or 'index.html')
tfilepath = os.path.join(options['tempdir'], url.path.strip('/'))
if os.path.exists(filepath) and os.path.isfile(filepath):
f.write(open(filepath, 'rb').read())
elif os.path.exists(tfilepath) and os.path.isfile(tfilepath):
f.write(open(tfilepath, 'rb').read())
contenttype = None
else:
print os.path.join(options['tempdir'], url.path.strip('/'))
statuscode = 404
f.write("404 not found")
self.send_response(statuscode)
if contenttype:
self.send_header("Content-type", contenttype)
self.send_header("Content-Length", str(f.tell()))
self.send_header('Connection', 'close')
self.end_headers()
f.seek(0)
shutil.copyfileobj(f, self.wfile)
class ThreadingHTTPServer(SocketServer.ThreadingTCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
def start():
port = options['port'] if 'port' in options else 8000
server_address = (options['bind'], port)
httpd = ThreadingHTTPServer(server_address, PrintOnlineRequestHandler)
sa = httpd.socket.getsockname()
print "Temp Directory: %s" % options.get('tempdir')
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
def config(argv):
import getopt
opts, args = getopt.getopt(argv, "u:p:r:ht:")
for opt, arg in opts:
if opt == '-u':
options['username'] = arg
elif opt == '-p':
options['password'] = arg
elif opt == '-r':
options['realm'] = arg
elif opt == '-t':
options['tempdir'] = arg
elif opt == '-h':
print 'Usage: python -m PrintOnline [-u username] [-p password] [-r realm] [-t tempdir] [bindaddress:port | port]'
print 'Report bugs to <<EMAIL>>'
exit()
if options.get('username') and options.get('password'):
import base64
options['auth'] = base64.b64encode('%s:%s' % (options.get('username'), options.get('password')))
if len(args) > 0:
bp = args[0]
if ':' in bp:
options['bind'] = bp[0:bp.index(':')]
options['port'] = int(bp[bp.index(':') + 1:])
else:
options['bind'] = '0.0.0.0'
options['port'] = int(bp)
def main():
config(sys.argv[1:])
start()
def test():
config(sys.argv[1:])
print get_files()
if __name__ == '__main__':
# test()
main()
|
StarcoderdataPython
|
6617769
|
import luigi
import os
import pandas as pd
from db import extract
from db import sql
from forecast import util
import shutil
import luigi.contrib.hadoop
from sqlalchemy import create_engine
from pysandag.database import get_connection_string
from pysandag import database
from db import log
class IncPopulation(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return None
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
db_connection_string = database.get_connection_string('model_config.yml', 'in_db')
sql_in_engine = create_engine(db_connection_string)
in_query = getattr(sql, 'max_run_id')
db_run_id = pd.read_sql(in_query, engine, index_col=None)
# db_run_id = log.new_run(name='inc_run_log', run_id=db_run_id['max'].iloc[0])
run_id = pd.Series([db_run_id['id'].iloc[0]])
run_id.to_hdf('temp/data.h5', 'run_id', mode='a')
dem_sim_rates = extract.create_df('dem_sim_rates', 'dem_sim_rates_table',
rate_id=self.dem_id, index=None)
dem_sim_rates.to_hdf('temp/data.h5', 'dem_sim_rates', mode='a')
econ_sim_rates = extract.create_df('econ_sim_rates', 'econ_sim_rates_table',
rate_id=self.econ_id, index=None)
econ_sim_rates.to_hdf('temp/data.h5', 'econ_sim_rates', mode='a')
tables = util.yaml_to_dict('model_config.yml', 'db_tables')
in_query = getattr(sql, 'inc_pop') % (tables['inc_pop_table'], run_id[0])
in_query2 = getattr(sql, 'inc_mil_hh_pop') % (tables['population_table'], dem_sim_rates.base_population_id[0])
pop = pd.read_sql(in_query, engine, index_col=['age', 'race_ethn', 'sex', 'mildep'])
pop_mil = pd.read_sql(in_query2, sql_in_engine, index_col=['age', 'race_ethn', 'sex', 'mildep'])
pop = pop.join(pop_mil)
pop['persons'] = (pop['persons'] - pop['mil_mildep'])
pop = pop.reset_index(drop=False)
pop = pop[pop['age'] >= 18]
pop['age_cat'] = ''
pop.loc[pop['age'].isin(list(range(18, 25))), ['age_cat']] = '18_24'
pop.loc[pop['age'].isin(list(range(25, 35))), ['age_cat']] = '25_34'
pop.loc[pop['age'].isin(list(range(35, 45))), ['age_cat']] = '35_44'
pop.loc[pop['age'].isin(list(range(45, 55))), ['age_cat']] = '45_54'
pop.loc[pop['age'].isin(list(range(55, 60))), ['age_cat']] = '55_59'
pop.loc[pop['age'].isin(list(range(60, 65))), ['age_cat']] = '60_64'
pop.loc[pop['age'].isin(list(range(65, 75))), ['age_cat']] = '65_74'
pop.loc[pop['age'].isin(list(range(75, 103))), ['age_cat']] = '75_99'
pop = pd.DataFrame(pop['persons'].groupby([pop['yr'], pop['age_cat']]).sum())
pop.to_hdf('temp/data.h5', 'pop', mode='a')
class IncomeByType(luigi.Task):
econ = luigi.Parameter()
dem = luigi.Parameter()
@property
def priority(self):
return 3
def requires(self):
return IncPopulation(econ_id=self.econ, dem_id=self.dem)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
econ_sim_rates = pd.read_hdf('temp/data.h5', 'econ_sim_rates')
pop = pd.read_hdf('temp/data.h5', 'pop')
inc_type_rates = extract.create_df('inc_shares', 'inc_shares_table', rate_id=econ_sim_rates.inc1_id[0], index=['yr', 'age_cat'])
inc_type_rates = inc_type_rates.join(pop)
inc_type_rates['totals'] = (inc_type_rates['income'] * inc_type_rates['persons'] * inc_type_rates['share'])
inc_type_rates = inc_type_rates.reset_index(drop=False)
inc_type_rates['multiplier'] = 0
aigr_table = extract.create_df('aigr', 'aigr_table', rate_id=econ_sim_rates.aigr_id[0], index=None)
inc_type_rates.loc[inc_type_rates['yr'] > 2014, ['multiplier']] = (aigr_table.aigr[0] * (inc_type_rates['yr'] - 2014))
# pow(1.01, mil_wages.index.get_level_values('yr') - 2014)
inc_type_rates['totals'] = (inc_type_rates['totals'] + inc_type_rates['totals'] * inc_type_rates['multiplier'])
inc_type_rates = pd.DataFrame(inc_type_rates['totals'].groupby([inc_type_rates['yr'], inc_type_rates['income_type']]).sum())
inc_type_rates = inc_type_rates.reset_index(drop=False)
inc_type_rates = pd.pivot_table(inc_type_rates, values='totals',
index=['yr'],
columns=['income_type'])
# inc_type_rates.to_hdf('temp/data.h5', 'inc_type_rates', mode='a')
inc_type_rates.rename(columns={'intp': 'Interest'}, inplace=True)
inc_type_rates.rename(columns={'oip': 'Other'}, inplace=True)
inc_type_rates.rename(columns={'pap': 'Public_Assistance'}, inplace=True)
inc_type_rates.rename(columns={'retp': 'Retirement'}, inplace=True)
inc_type_rates.rename(columns={'ssip': 'Supplemental_Social_Security'}, inplace=True)
inc_type_rates.rename(columns={'ssp': 'Social_Security'}, inplace=True)
inc_type_rates.rename(columns={'semp': 'Selfemp_Income'}, inplace=True)
inc_type_rates = inc_type_rates[['Interest', 'Other', 'Public_Assistance', 'Retirement',
'Supplemental_Social_Security', 'Social_Security', 'Selfemp_Income']]
inc_type_rates.to_hdf('temp/data.h5', 'ue_income')
run_table = pd.read_hdf('temp/data.h5', 'run_id')
run_id = run_table[0]
inc_type_rates['run_id'] = run_id
inc_type_rates.to_sql(name='non_wage_income', con=engine, schema='defm', if_exists='append', index=True)
if __name__ == '__main__':
os.makedirs('temp')
luigi.run(main_task_cls=IncomeByType, cmdline_args=['--dem=1005', '--econ=1002'])
shutil.rmtree('temp')
|
StarcoderdataPython
|
249988
|
<filename>scripts/get_raw_sun_data.py
############
# Compute raw sun data using pvlib
#
# 2021-09-01
# <NAME>
#
# The data is about
# - 1MB for a 2 of days, for ~2000 sites and takes about ~1 minutes
# - 6MB for a 10 of days, for ~2000 sites and takes about ~1 minutes
# - 252MB for a 365 of days, for ~2000 sites and takes about ~11 minutes (on a macbook pro)
# Decide to just go for one year of data
# on 1st Jan 2019 and 2020, the biggest differences was in elevation was 1 degree,
# More investigation has been done (link), and happy difference is less than 1 degree,
# Therefore, its ok good to use 1 year of data, for all the years
############
import logging
import os
from datetime import datetime
from pathlib import Path
import pandas as pd
import nowcasting_dataset
from nowcasting_dataset.data_sources.gsp.eso import get_gsp_metadata_from_eso
from nowcasting_dataset.data_sources.sun.raw_data_load_save import (
get_azimuth_and_elevation,
save_to_zarr,
)
from nowcasting_dataset.geospatial import lat_lon_to_osgb
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# set up
BUCKET = Path("solar-pv-nowcasting-data")
PV_PATH = BUCKET / "PV/PVOutput.org"
PV_METADATA_FILENAME = PV_PATH / "UK_PV_metadata.csv"
# set up variables
local_path = os.path.dirname(nowcasting_dataset.__file__) + "/.."
metadata_filename = f"gs://{PV_METADATA_FILENAME}"
start_dt = datetime.fromisoformat("2019-01-01 00:00:00.000+00:00")
end_dt = datetime.fromisoformat("2020-01-01 00:00:00.000+00:00")
datestamps = pd.date_range(start=start_dt, end=end_dt, freq="5T")
# PV metadata
pv_metadata = pd.read_csv(metadata_filename, index_col="system_id")
pv_metadata = pv_metadata.dropna(subset=["longitude", "latitude"])
pv_metadata["location_x"], pv_metadata["location_y"] = lat_lon_to_osgb(
pv_metadata["latitude"], pv_metadata["longitude"]
)
pv_x = pv_metadata["location_x"]
pv_y = pv_metadata["location_y"]
# GSP Metadata
gsp_metadata = get_gsp_metadata_from_eso()
gsp_metadata = gsp_metadata.dropna(subset=["centroid_lon", "centroid_lat"])
gsp_x = gsp_metadata["centroid_x"]
gsp_y = gsp_metadata["centroid_y"]
# join all sites together
x_centers = list(pv_x.values) + list(gsp_x.values)
y_centers = list(pv_y.values) + list(gsp_y.values)
# make d
azimuth, elevation = get_azimuth_and_elevation(
x_centers=x_centers, y_centers=y_centers, datestamps=datestamps
)
azimuth = azimuth.astype(int)
elevation = elevation.astype(int)
# save it locally and in the cloud, just in case when saving in the cloud it fails
save_to_zarr(azimuth=azimuth, elevation=elevation, filename="./sun.zarr")
save_to_zarr(
azimuth=azimuth, elevation=elevation, filename="gs://solar-pv-nowcasting-data/Sun/v0/sun.zarr/"
)
# This has been uploaded to 'gs://solar-pv-nowcasting-data/Sun/v0'
|
StarcoderdataPython
|
123445
|
<reponame>imranq2/SparkAutoMapper.FHIR
from __future__ import annotations
from typing import Optional, TYPE_CHECKING, Union
from spark_auto_mapper_fhir.fhir_types.date_time import FhirDateTime
from spark_auto_mapper_fhir.fhir_types.list import FhirList
from spark_auto_mapper_fhir.fhir_types.string import FhirString
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.base_types.fhir_backbone_element_base import (
FhirBackboneElementBase,
)
if TYPE_CHECKING:
pass
# id_ (string)
# extension (Extension)
# modifierExtension (Extension)
# collector (Reference)
from spark_auto_mapper_fhir.complex_types.reference import Reference
# Imports for References for collector
from spark_auto_mapper_fhir.resources.practitioner import Practitioner
from spark_auto_mapper_fhir.resources.practitioner_role import PractitionerRole
# collectedDateTime (dateTime)
# collectedPeriod (Period)
from spark_auto_mapper_fhir.complex_types.period import Period
# duration (Duration)
from spark_auto_mapper_fhir.complex_types.duration import Duration
# quantity (Quantity)
from spark_auto_mapper_fhir.complex_types.quantity import Quantity
# method (CodeableConcept)
from spark_auto_mapper_fhir.complex_types.codeable_concept import CodeableConcept
# End Import for References for method
# Import for CodeableConcept for method
from spark_auto_mapper_fhir.value_sets.fhir_specimen_collection_method import (
FHIRSpecimenCollectionMethodCode,
)
# End Import for CodeableConcept for method
# bodySite (CodeableConcept)
# End Import for References for bodySite
# Import for CodeableConcept for bodySite
from spark_auto_mapper_fhir.value_sets.snomedct_body_structures import (
SNOMEDCTBodyStructuresCode,
)
# End Import for CodeableConcept for bodySite
# fastingStatusCodeableConcept (CodeableConcept)
# End Import for References for fastingStatusCodeableConcept
# Import for CodeableConcept for fastingStatusCodeableConcept
from spark_auto_mapper_fhir.value_sets.v2_0916 import V2_0916
# End Import for CodeableConcept for fastingStatusCodeableConcept
# fastingStatusDuration (Duration)
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class SpecimenCollection(FhirBackboneElementBase):
"""
Specimen.Collection
A sample to be used for analysis.
"""
# noinspection PyPep8Naming
def __init__(
self,
*,
id_: Optional[FhirString] = None,
extension: Optional[FhirList[ExtensionBase]] = None,
modifierExtension: Optional[FhirList[ExtensionBase]] = None,
collector: Optional[Reference[Union[Practitioner, PractitionerRole]]] = None,
collectedDateTime: Optional[FhirDateTime] = None,
collectedPeriod: Optional[Period] = None,
duration: Optional[Duration] = None,
quantity: Optional[Quantity] = None,
method: Optional[CodeableConcept[FHIRSpecimenCollectionMethodCode]] = None,
bodySite: Optional[CodeableConcept[SNOMEDCTBodyStructuresCode]] = None,
fastingStatusCodeableConcept: Optional[CodeableConcept[V2_0916]] = None,
fastingStatusDuration: Optional[Duration] = None,
) -> None:
"""
A sample to be used for analysis.
:param id_: None
:param extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
:param modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
:param collector: Person who collected the specimen.
:param collectedDateTime: None
:param collectedPeriod: None
:param duration: The span of time over which the collection of a specimen occurred.
:param quantity: The quantity of specimen collected; for instance the volume of a blood sample,
or the physical measurement of an anatomic pathology sample.
:param method: A coded value specifying the technique that is used to perform the procedure.
:param bodySite: Anatomical location from which the specimen was collected (if subject is a
patient). This is the target site. This element is not used for environmental
specimens.
:param fastingStatusCodeableConcept: None
:param fastingStatusDuration: None
"""
super().__init__(
id_=id_,
extension=extension,
modifierExtension=modifierExtension,
collector=collector,
collectedDateTime=collectedDateTime,
collectedPeriod=collectedPeriod,
duration=duration,
quantity=quantity,
method=method,
bodySite=bodySite,
fastingStatusCodeableConcept=fastingStatusCodeableConcept,
fastingStatusDuration=fastingStatusDuration,
)
|
StarcoderdataPython
|
1792173
|
'''
meetings_member - handling for meetings member
====================================================================================
'''
# standard
from datetime import date, datetime
from traceback import format_exc, format_exception_only
from urllib.parse import urlencode
# pypi
from flask import request, flash, jsonify, current_app, url_for, g, redirect
from flask_security import current_user, logout_user, login_user
from flask.views import MethodView
from dominate.tags import div, h1, p, b
from dominate.util import raw
# homegrown
from . import bp
from ...model import db
from ...model import LocalInterest, LocalUser, Invite, ActionItem, Motion, MotionVote
from ...model import invite_response_all, INVITE_RESPONSE_ATTENDING, INVITE_RESPONSE_NO_RESPONSE, action_all
from ...model import motionvote_all, MOTIONVOTE_KEY_URLARG, INVITE_KEY_URLARG
from ...version import __docversion__
from ...meeting_evotes import get_evotes, generateevotes
from .meetings_common import MemberStatusReportBase, ActionItemsBase, MotionVotesBase, MotionsBase
from .meetings_common import motions_childelementargs, invite_statusreport
from .meetings_common import meeting_has_option, MEETING_OPTION_RSVP, MEETING_OPTION_HASSTATUSREPORTS
from .viewhelpers import localuser2user, user2localuser
from loutilities.tables import get_request_data
from loutilities.user.roles import ROLE_SUPER_ADMIN, ROLE_MEETINGS_ADMIN, ROLE_MEETINGS_MEMBER
from loutilities.user.tables import DbCrudApiInterestsRolePermissions
from loutilities.timeu import asctime
from loutilities.filters import filtercontainerdiv, filterdiv, yadcfoption
from loutilities.flask.user.views import SelectInterestsView
isodate = asctime('%Y-%m-%d')
displaytime = asctime('%Y-%m-%d %H:%M')
class ParameterError(Exception): pass
adminguide = 'https://members.readthedocs.io/en/{docversion}/meetings-member-guide.html'.format(docversion=__docversion__)
MEETINGS_MEMBER_ROLES = [ROLE_SUPER_ADMIN, ROLE_MEETINGS_ADMIN, ROLE_MEETINGS_MEMBER]
##########################################################################################
# memberstatusreport endpoint
##########################################################################################
def get_invite_response(dbrow):
invite = dbrow.invite
return invite.response
class MemberStatusReportView(MemberStatusReportBase):
# remove auth_required() decorator
decorators = []
def permission(self):
invitekey = request.args.get(INVITE_KEY_URLARG, None)
if invitekey:
permitted = True
invite = Invite.query.filter_by(invitekey=invitekey).one()
user = localuser2user(invite.user)
self.meeting = invite.meeting
self.interest = self.meeting.interest
if current_user != user:
# log out and in automatically
# see https://flask-security-too.readthedocs.io/en/stable/api.html#flask_security.login_user
logout_user()
login_user(user)
db.session.commit()
flash('you have been automatically logged in as {}'.format(current_user.name))
# at this point, if current_user has the target user (may have been changed by invitekey)
# check role permissions, permitted = True (from above) unless determined otherwise
roles_accepted = [ROLE_SUPER_ADMIN, ROLE_MEETINGS_ADMIN, ROLE_MEETINGS_MEMBER]
allowed = False
for role in roles_accepted:
if current_user.has_role(role):
allowed = True
break
if not allowed:
permitted = False
# no invitekey, not permitted
else:
permitted = False
return permitted
def init(self):
# redirect to rsvp page if necessary
meeting = self.get_meeting()
if meeting.has_meeting_option(MEETING_OPTION_RSVP) and not meeting.has_meeting_option(MEETING_OPTION_HASSTATUSREPORTS):
args = urlencode(request.args)
return redirect('{}?{}'.format(url_for('admin.rsvp', interest=g.interest), args))
def beforequery(self):
# set user based on invite key
invite = self.get_invite()
self.theuser = localuser2user(invite.user)
super().beforequery()
self.queryparams['meeting_id'] = self.meeting.id
self.queryparams['invite_id'] = invite.id
def get_invite(self):
meeting = self.get_meeting()
invite = Invite.query.filter_by(meeting_id=meeting.id, user_id=user2localuser(current_user).id).one_or_none()
if not invite:
raise ParameterError('no invitation found for this meeting/user combination')
return invite
def format_pretablehtml(self):
meeting = self.get_meeting()
html = div()
with html:
self.custom_wording()
self.instructions()
h1('{} - {} - {}'.format(meeting.date, meeting.purpose, current_user.name), _class='TextCenter')
return html.render()
memberstatusreport_view = MemberStatusReportView(
templateargs={'adminguide': adminguide},
pagename=lambda: 'My {}'.format(invite_statusreport().title()),
endpoint='admin.memberstatusreport',
endpointvalues={'interest': '<interest>'},
rule='/<interest>/memberstatusreport',
)
memberstatusreport_view.register()
##########################################################################################
# mymeetings endpoint
##########################################################################################
class MyMeetingsView(DbCrudApiInterestsRolePermissions):
def beforequery(self):
self.queryparams['user'] = user2localuser(current_user)
def mymeetings_attended(row):
if meeting_has_option(row.meeting, MEETING_OPTION_RSVP):
today = date.today()
if row.meeting.date >= today:
return ''
else:
return 'yes' if row.attended else 'no'
else:
return ''
mymeetings_dbattrs = 'id,interest_id,meeting.purpose,meeting.date,response,attended,' \
'invitekey,meeting.gs_agenda,meeting.gs_status,meeting.gs_minutes'.split(',')
mymeetings_formfields = 'rowid,interest_id,purpose,date,response,attended,' \
'invitekey,gs_agenda,gs_status,gs_minutes'.split(',')
mymeetings_dbmapping = dict(zip(mymeetings_dbattrs, mymeetings_formfields))
mymeetings_formmapping = dict(zip(mymeetings_formfields, mymeetings_dbattrs))
mymeetings_formmapping['date'] = lambda row: isodate.dt2asc(row.meeting.date)
mymeetings_formmapping['response'] = lambda row: row.response if meeting_has_option(row.meeting, MEETING_OPTION_RSVP) else ''
mymeetings_formmapping['attended'] = mymeetings_attended
mymeetings_formmapping['gs_agenda'] = lambda row: row.meeting.gs_agenda if row.meeting.gs_agenda else ''
mymeetings_formmapping['gs_status'] = lambda row: row.meeting.gs_status if row.meeting.gs_status else ''
mymeetings_formmapping['gs_minutes'] = lambda row: row.meeting.gs_minutes if row.meeting.gs_minutes else ''
mymeetings_formmapping['meetingtype'] = lambda row: row.meeting.meetingtype.meetingtype
mymeetings_formmapping['location'] = lambda row: row.meeting.location if row.meeting.location else ''
# connects with beforetables.js meetings_statusreportwording(), the function parameter to googledoc() for gs_status
mymeetings_formmapping['statusreportwording'] = lambda row: row.meeting.meetingtype.statusreportwording
# connects with beforetables.js mystatus_statusreport() and the attr parameter to googledoc() for gs_status
mymeetings_formmapping['hideviewicon'] = lambda row: not (meeting_has_option(row.meeting, MEETING_OPTION_RSVP) or
meeting_has_option(row.meeting, MEETING_OPTION_HASSTATUSREPORTS))
mymeetings_view = MyMeetingsView(
roles_accepted=[ROLE_SUPER_ADMIN, ROLE_MEETINGS_ADMIN, ROLE_MEETINGS_MEMBER],
local_interest_model=LocalInterest,
app=bp, # use blueprint instead of app
db=db,
model=Invite,
version_id_col='version_id', # optimistic concurrency control
template='datatables.jinja2',
templateargs={'adminguide': adminguide},
pagename='My Meetings',
endpoint='admin.mymeetings',
endpointvalues={'interest': '<interest>'},
rule='/<interest>/mymeetings',
dbmapping=mymeetings_dbmapping,
formmapping=mymeetings_formmapping,
checkrequired=True,
clientcolumns=[
{'data': '', # needs to be '' else get exception converting options from meetings render_template
# TypeError: '<' not supported between instances of 'str' and 'NoneType'
'name': 'view-control',
'className': 'view-control shrink-to-fit',
'orderable': False,
'defaultContent': '',
'label': '',
'type': 'hidden', # only affects editor modal
'title': 'View',
# see mymeetings_formmapping['disableview'] to known when view icon is disabled
'render': {'eval': 'render_icon("fas fa-eye", "hideviewicon")'},
},
{'data': 'date', 'name': 'date', 'label': 'Meeting Date',
'type': 'readonly'
},
{'data': 'meetingtype', 'name': 'meetingtype', 'label': 'Meeting Type',
'type': 'readonly'
},
{'data': 'purpose', 'name': 'purpose', 'label': 'Meeting Purpose',
'type': 'readonly'
},
{'data': 'location', 'name': 'location', 'label': 'Location',
'type': 'readonly'
},
{'data': 'response', 'name': 'response', 'label': 'RSVP',
'type': 'readonly'
},
{'data': 'attended', 'name': 'attended', 'label': 'Attended',
'className': 'TextCenter',
'type': 'readonly',
},
{'data': 'gs_agenda', 'name': 'gs_agenda', 'label': 'Agenda',
'type': 'googledoc', 'opts': {'text': 'Agenda'},
'render': {'eval': '$.fn.dataTable.render.googledoc( "Agenda" )'},
},
{'data': 'gs_status', 'name': 'gs_status', 'label': 'Status Report',
'type': 'googledoc', 'opts': {'text': 'Status Report'},
'render': {'eval': '$.fn.dataTable.render.googledoc( meetings_statusreportwording )'},
},
{'data': 'gs_minutes', 'name': 'gs_minutes_fdr', 'label': 'Minutes',
'type': 'googledoc', 'opts': {'text': 'Minutes'},
'render': {'eval': '$.fn.dataTable.render.googledoc( "Minutes" )'},
},
{'data': 'invitekey', 'name': 'invitekey', 'label': 'My Status Report',
'type': 'hidden',
'dt': {'visible': False},
},
],
idSrc='rowid',
buttons=[
{
'extend': 'edit',
'name': 'view-status',
'text': 'My Status Report',
'action': {'eval': 'mystatus_statusreport'},
'className': 'Hidden',
},
'csv',
],
dtoptions={
'scrollCollapse': True,
'scrollX': True,
'scrollXInner': "100%",
'scrollY': True,
'order': [['date:name', 'desc']],
},
)
mymeetings_view.register()
##########################################################################################
# myactionitems endpoint
##########################################################################################
class MyActionItemsView(DbCrudApiInterestsRolePermissions):
def beforequery(self):
self.queryparams['assignee'] = user2localuser(current_user)
myactionitems_dbattrs = 'id,interest_id,action,status,comments,meeting.date,agendaitem.title,agendaitem.agendaitem,update_time,updated_by'.split(',')
myactionitems_formfields = 'rowid,interest_id,action,status,comments,date,agendatitle,agendatext,update_time,updated_by'.split(',')
myactionitems_dbmapping = dict(zip(myactionitems_dbattrs, myactionitems_formfields))
myactionitems_formmapping = dict(zip(myactionitems_formfields, myactionitems_dbattrs))
myactionitems_formmapping['date'] = lambda row: isodate.dt2asc(row.meeting.date) if row.meeting else ''
# todo: should this be in tables.py? but see https://github.com/louking/loutilities/issues/25
myactionitems_dbmapping['meeting.date'] = '__readonly__'
myactionitems_dbmapping['agendaitem.title'] = '__readonly__'
myactionitems_dbmapping['agendaitem.agendaitem'] = '__readonly__'
myactionitems_formmapping['update_time'] = lambda row: displaytime.dt2asc(row.update_time)
myactionitems_dbmapping['update_time'] = lambda form: datetime.now()
myactionitems_formmapping['updated_by'] = lambda row: LocalUser.query.filter_by(id=row.updated_by).one().name
myactionitems_dbmapping['updated_by'] = lambda form: user2localuser(current_user).id
agendaitems_filters = filtercontainerdiv()
agendaitems_filters += filterdiv('agendaitems-external-filter-status', 'Status')
agendaitems_yadcf_options = [
yadcfoption('status:name', 'agendaitems-external-filter-status', 'multi_select', placeholder='Select statuses', width='200px'),
]
myactionitems_view = MyActionItemsView(
roles_accepted=[ROLE_SUPER_ADMIN, ROLE_MEETINGS_ADMIN, ROLE_MEETINGS_MEMBER],
local_interest_model=LocalInterest,
app=bp, # use blueprint instead of app
db=db,
model=ActionItem,
version_id_col='version_id', # optimistic concurrency control
template='datatables.jinja2',
templateargs={'adminguide': adminguide},
pretablehtml=agendaitems_filters.render(),
yadcfoptions=agendaitems_yadcf_options,
pagename='My Action Items',
endpoint='admin.myactionitems',
endpointvalues={'interest': '<interest>'},
rule='/<interest>/myactionitems',
dbmapping=myactionitems_dbmapping,
formmapping=myactionitems_formmapping,
checkrequired=True,
clientcolumns=[
{'data': 'date', 'name': 'date', 'label': 'Meeting Date',
'type': 'readonly'
},
{'data': 'action', 'name': 'action', 'label': 'Action',
'type': 'readonly'
},
{'data': 'agendatitle', 'name': 'agendatitle', 'label': 'Agenda Item',
'type': 'readonly',
'dt': {'visible': False},
},
{'data': 'agendatext', 'name': 'agendatext', 'label': '',
'type': 'display',
'dt': {'visible': False},
},
{'data': 'status', 'name': 'status', 'label': 'Status',
'type': 'select2',
'options': action_all,
},
{'data': 'comments', 'name': 'comments', 'label': 'Progress / Resolution',
'type': 'ckeditorClassic',
'fieldInfo': 'record your progress or how this was resolved',
'dt': {'visible': False},
},
{'data': 'update_time', 'name': 'update_time', 'label': 'Last Update',
'type': 'hidden',
},
{'data': 'updated_by', 'name': 'updated_by', 'label': 'Updated By',
'type': 'hidden',
},
],
idSrc='rowid',
buttons=[
'editRefresh',
'csv',
],
dtoptions={
'scrollCollapse': True,
'scrollX': True,
'scrollXInner': "100%",
'scrollY': True,
'order': [['date:name', 'desc']],
},
)
myactionitems_view.register()
##########################################################################################
# mymotionvotes endpoint
##########################################################################################
class MyMotionVotesView(DbCrudApiInterestsRolePermissions):
def beforequery(self):
self.queryparams['user'] = user2localuser(current_user)
mymotionvotes_dbattrs = 'id,interest_id,meeting.purpose,meeting.date,motion.motion,vote,motionvotekey'.split(',')
mymotionvotes_formfields = 'rowid,interest_id,purpose,date,motion,vote,motionvotekey'.split(',')
mymotionvotes_dbmapping = dict(zip(mymotionvotes_dbattrs, mymotionvotes_formfields))
mymotionvotes_formmapping = dict(zip(mymotionvotes_formfields, mymotionvotes_dbattrs))
mymotionvotes_formmapping['date'] = lambda row: isodate.dt2asc(row.meeting.date)
mymotionvotes_view = MyMotionVotesView(
roles_accepted=[ROLE_SUPER_ADMIN, ROLE_MEETINGS_ADMIN, ROLE_MEETINGS_MEMBER],
local_interest_model=LocalInterest,
app=bp, # use blueprint instead of app
db=db,
model=MotionVote,
version_id_col='version_id', # optimistic concurrency control
template='datatables.jinja2',
templateargs={'adminguide': adminguide},
pagename='My Motion Votes',
endpoint='admin.mymotionvotes',
endpointvalues={'interest': '<interest>'},
rule='/<interest>/mymotionvotes',
dbmapping=mymotionvotes_dbmapping,
formmapping=mymotionvotes_formmapping,
checkrequired=True,
clientcolumns=[
{'data': '', # needs to be '' else get exception converting options from meetings render_template
# TypeError: '<' not supported between instances of 'str' and 'NoneType'
'name': 'view-control',
'className': 'view-control shrink-to-fit',
'orderable': False,
'defaultContent': '',
'label': '',
'type': 'hidden', # only affects editor modal
'title': 'View',
'render': {'eval': 'render_icon("fas fa-eye")'},
},
{'data': 'date', 'name': 'date', 'label': 'Meeting Date',
'type': 'readonly'
},
{'data': 'purpose', 'name': 'purpose', 'label': 'Meeting Purpose',
'type': 'readonly'
},
{'data': 'motion', 'name': 'motion', 'label': 'Motion',
'type': 'readonly'
},
{'data': 'vote', 'name': 'vote', 'label': 'Vote',
'type': 'readonly',
},
{'data': 'motionvotekey', 'name': 'motionvotekey', 'label': 'My Motion Vote',
'type': 'hidden',
'dt': {'visible': False},
},
],
idSrc='rowid',
buttons=lambda: [
{
'extend': 'edit',
'name': 'view-motionvote',
'text': 'View Motion Vote',
'action': {'eval': 'mymotionvote_motionvote("{}")'.format(url_for('admin.motionvote', interest=g.interest))},
'className': 'Hidden',
},
'csv',
],
dtoptions={
'scrollCollapse': True,
'scrollX': True,
'scrollXInner': "100%",
'scrollY': True,
'order': [['date:name', 'desc']],
},
)
mymotionvotes_view.register()
##########################################################################################
# memberactionitems endpoint
###########################################################################################
class MemberActionItemsView(ActionItemsBase):
pass
memberactionitems_view = MemberActionItemsView(
roles_accepted=[ROLE_SUPER_ADMIN, ROLE_MEETINGS_ADMIN, ROLE_MEETINGS_MEMBER],
pagename='Action Items',
templateargs={'adminguide': adminguide},
endpoint='admin.memberactionitems',
rule='/<interest>/memberactionitems',
buttons=[
'csv'
],
)
memberactionitems_view.register()
##########################################################################################
# membermotionsvote endpoint
###########################################################################################
class MemberMotionVotesView(MotionVotesBase):
pass
membermotionvotes_view = MemberMotionVotesView(
roles_accepted=[ROLE_SUPER_ADMIN, ROLE_MEETINGS_ADMIN, ROLE_MEETINGS_MEMBER],
pagename='Motion Votes',
templateargs={'adminguide': adminguide},
endpoint='admin.membermotionvotes',
rule='/<interest>/membermotionvotes',
buttons=[
'csv'
],
)
membermotionvotes_view.register()
##########################################################################################
# membermotions endpoint
###########################################################################################
class MemberMotionsView(MotionsBase):
pass
membermotions_view = MemberMotionsView(
roles_accepted=[ROLE_SUPER_ADMIN, ROLE_MEETINGS_ADMIN, ROLE_MEETINGS_MEMBER],
pagename='Motions',
templateargs={'adminguide': adminguide},
endpoint='admin.membermotions',
rule='/<interest>/membermotions',
buttons=[
'csv'
],
childrowoptions={
'template': 'motion-child-row.njk',
'showeditor': False,
'group': 'interest',
'groupselector': '#metanav-select-interest',
'childelementargs': motions_childelementargs.get_childelementargs({
'motionvotes': membermotionvotes_view,
}),
},
)
membermotions_view.register()
##########################################################################################
# mymeetingrsvp api endpoint
##########################################################################################
class MyMeetingRsvpApi(MethodView):
def __init__(self):
self.roles_accepted = [ROLE_SUPER_ADMIN, ROLE_MEETINGS_ADMIN, ROLE_MEETINGS_MEMBER]
def permission(self):
'''
determine if current user is permitted to use the view
'''
# adapted from loutilities.tables.DbCrudApiRolePermissions
allowed = False
# must have invitekey query arg
if request.args.get(INVITE_KEY_URLARG, False):
for role in self.roles_accepted:
if current_user.has_role(role):
allowed = True
break
return allowed
def get(self):
try:
invitekey = request.args[INVITE_KEY_URLARG]
invite = Invite.query.filter_by(invitekey=invitekey).one()
options = [r for r in invite_response_all
# copy if no response yet, or (if a response) anything but no response
if invite.response == INVITE_RESPONSE_NO_RESPONSE or r != INVITE_RESPONSE_NO_RESPONSE]
return jsonify(status='success', response=invite.response, options=options)
except Exception as e:
exc = ''.join(format_exception_only(type(e), e))
output_result = {'status' : 'fail', 'error': 'exception occurred:\n{}'.format(exc)}
# roll back database updates and close transaction
db.session.rollback()
current_app.logger.error(format_exc())
return jsonify(output_result)
def post(self):
try:
# verify user can write the data, otherwise abort (adapted from loutilities.tables._editormethod)
if not self.permission():
db.session.rollback()
cause = 'operation not permitted for user'
return jsonify(error=cause)
invitekey = request.args[INVITE_KEY_URLARG]
response = request.form['response']
invite = Invite.query.filter_by(invitekey=invitekey).one()
invite.response = response
invite.attended = response == INVITE_RESPONSE_ATTENDING
db.session.commit()
output_result = {'status' : 'success'}
return jsonify(output_result)
except Exception as e:
exc = ''.join(format_exception_only(type(e), e))
output_result = {'status' : 'fail', 'error': 'exception occurred:\n{}'.format(exc)}
# roll back database updates and close transaction
db.session.rollback()
current_app.logger.error(format_exc())
return jsonify(output_result)
bp.add_url_rule('/<interest>/_mymeetingrsvp/rest', view_func=MyMeetingRsvpApi.as_view('_mymeetingrsvp'),
methods=['GET', 'POST'])
##########################################################################################
# motionvote endpoint
###########################################################################################
class MotionVoteView(SelectInterestsView):
# remove auth_required() decorator
decorators = []
def permission(self):
motionvotekey = request.args.get(MOTIONVOTE_KEY_URLARG, None)
if motionvotekey:
permitted = True
motionvote = MotionVote.query.filter_by(motionvotekey=motionvotekey).one()
user = localuser2user(motionvote.user)
if current_user != user:
# log out and in automatically
# see https://flask-security-too.readthedocs.io/en/stable/api.html#flask_security.login_user
logout_user()
login_user(user)
db.session.commit()
flash('you have been automatically logged in as {}'.format(current_user.name))
# at this point, if current_user has the target user (may have been changed by invitekey)
# check role permissions, permitted = True (from above) unless determined otherwise
roles_accepted = [ROLE_SUPER_ADMIN, ROLE_MEETINGS_ADMIN, ROLE_MEETINGS_MEMBER]
allowed = False
for role in roles_accepted:
if current_user.has_role(role):
allowed = True
break
if not allowed:
permitted = False
# no motionvotekey, not permitted
else:
permitted = False
return permitted
def setdisplayonly(self):
motionvotekey = request.args.get(MOTIONVOTE_KEY_URLARG)
motionvote = MotionVote.query.filter_by(motionvotekey=motionvotekey).one()
today = date.today()
meetingdate = motionvote.meeting.date
return today > meetingdate
def getval(self):
motionvotekey = request.args.get(MOTIONVOTE_KEY_URLARG)
motionvote = MotionVote.query.filter_by(motionvotekey=motionvotekey).one()
return '"{}"'.format(motionvote.vote)
def putval(self, val):
motionvotekey = request.args.get(MOTIONVOTE_KEY_URLARG)
motionvote = MotionVote.query.filter_by(motionvotekey=motionvotekey).one()
motionvote.vote = val
db.session.commit()
def motionvote_preselecthtml():
motionvotekey = request.args.get(MOTIONVOTE_KEY_URLARG)
motionvote = MotionVote.query.filter_by(motionvotekey=motionvotekey).one()
meeting = motionvote.meeting
motion = motionvote.motion
user = motionvote.user
html = div()
with html:
h1('{} {}: {}\'s Vote'.format(meeting.date, meeting.purpose, user.name))
p(b('Motion'))
with div(style='margin-left: 1em;'):
raw(motion.motion)
if motion.comments:
raw(motion.comments)
return html.render()
motionvote_view = MotionVoteView(
roles_accepted=[ROLE_SUPER_ADMIN, ROLE_MEETINGS_ADMIN],
local_interest_model=LocalInterest,
app=bp,
pagename='motion vote',
displayonly=lambda: motionvote_view.setdisplayonly(),
templateargs={'adminguide': adminguide},
endpoint='admin.motionvote',
endpointvalues={'interest': '<interest>'},
preselecthtml=motionvote_preselecthtml,
rule='<interest>/motionvote',
selectlabel='Vote',
select2options={
'width': '200px',
'data': motionvote_all
},
)
motionvote_view.register()
#########################################################################################
# motionvote api endpoint
#########################################################################################
class MotionVoteApi(MethodView):
def __init__(self):
self.roles_accepted = [ROLE_SUPER_ADMIN, ROLE_MEETINGS_ADMIN, ROLE_MEETINGS_MEMBER]
def permission(self):
'''
determine if current user is permitted to use the view
'''
# adapted from loutilities.tables.DbCrudApiRolePermissions
allowed = False
# must have meeting_id query arg
if request.args.get('motion_id', False):
for role in self.roles_accepted:
if current_user.has_role(role):
allowed = True
break
return allowed
def get(self):
try:
# verify user can write the data, otherwise abort (adapted from loutilities.tables._editormethod)
if not self.permission():
db.session.rollback()
cause = 'operation not permitted for user'
return jsonify(error=cause)
motion_id = request.args['motion_id']
tolist = get_evotes(motion_id)
# set defaults
motion = Motion.query.filter_by(id=motion_id).one()
from_email = motion.meeting.organizer.email
subject = '[{} {}] Motion eVote Request'.format(
motion.meeting.purpose, motion.meeting.date)
message = ''
return jsonify(from_email=from_email, subject=subject, message=message, tolist=tolist)
except Exception as e:
exc = ''.join(format_exception_only(type(e), e))
output_result = {'status': 'fail', 'error': 'exception occurred:<br>{}'.format(exc)}
# roll back database updates and close transaction
db.session.rollback()
current_app.logger.error(format_exc())
return jsonify(output_result)
def post(self):
try:
# verify user can write the data, otherwise abort (adapted from loutilities.tables._editormethod)
if not self.permission():
db.session.rollback()
cause = 'operation not permitted for user'
return jsonify(error=cause)
# there should be one 'id' in this form data, 'keyless'
requestdata = get_request_data(request.form)
motion_id = request.args['motion_id']
from_email = requestdata['keyless']['from_email']
subject = requestdata['keyless']['subject']
message = requestdata['keyless']['message']
generateevotes(motion_id, from_email, subject, message)
self._responsedata = []
db.session.commit()
return jsonify(self._responsedata)
except Exception as e:
exc = ''.join(format_exception_only(type(e), e))
output_result = {'status' : 'fail', 'error': 'exception occurred:<br>{}'.format(exc)}
# roll back database updates and close transaction
db.session.rollback()
current_app.logger.error(format_exc())
return jsonify(output_result)
bp.add_url_rule('/<interest>/_motionvote/rest', view_func=MotionVoteApi.as_view('_motionvote'),
methods=['GET', 'POST'])
##########################################################################################
# rsvp endpoint
###########################################################################################
class RsvpView(SelectInterestsView):
# remove auth_required() decorator
decorators = []
def permission(self):
invitekey = request.args.get(INVITE_KEY_URLARG, None)
if invitekey:
permitted = True
invite = Invite.query.filter_by(invitekey=invitekey).one()
user = localuser2user(invite.user)
if current_user != user:
# log out and in automatically
# see https://flask-security-too.readthedocs.io/en/stable/api.html#flask_security.login_user
logout_user()
login_user(user)
db.session.commit()
flash('you have been automatically logged in as {}'.format(current_user.name))
# at this point, if current_user has the target user (may have been changed by invitekey)
# check role permissions, permitted = True (from above) unless determined otherwise
roles_accepted = MEETINGS_MEMBER_ROLES
allowed = False
for role in roles_accepted:
if current_user.has_role(role):
allowed = True
break
if not allowed:
permitted = False
# no rsvpkey, not permitted
else:
permitted = False
return permitted
def setdisplayonly(self):
invitekey = request.args.get(INVITE_KEY_URLARG)
invite = Invite.query.filter_by(invitekey=invitekey).one()
today = date.today()
meetingdate = invite.meeting.date
return today > meetingdate
def getval(self):
invitekey = request.args.get(INVITE_KEY_URLARG)
invite = Invite.query.filter_by(invitekey=invitekey).one()
return '"{}"'.format(invite.response)
def putval(self, val):
invitekey = request.args.get(INVITE_KEY_URLARG)
invite = Invite.query.filter_by(invitekey=invitekey).one()
invite.response = val
db.session.commit()
def rsvp_preselecthtml():
invitekey = request.args.get(INVITE_KEY_URLARG)
invite = Invite.query.filter_by(invitekey=invitekey).one()
meeting = invite.meeting
user = invite.user
html = div()
with html:
h1('{} {}: {}\'s RSVP'.format(meeting.date, meeting.purpose, user.name))
return html.render()
rsvp_view = RsvpView(
local_interest_model=LocalInterest,
app=bp,
pagename='rsvp',
displayonly=lambda: rsvp_view.setdisplayonly(),
templateargs={'adminguide': adminguide},
endpoint='admin.rsvp',
endpointvalues={'interest': '<interest>'},
preselecthtml=rsvp_preselecthtml,
rule='<interest>/rsvp',
selectlabel='RSVP',
select2options={
'width': '200px',
'data': invite_response_all
},
)
rsvp_view.register()
|
StarcoderdataPython
|
82473
|
import maya
from .get_posts import get_posts
def get_posts_for_dates(site_url, start_date, end_date):
# get the posts
posts_all = []
posts_selected = []
pages_tried_max = 100
done = False
for i in list(range(1, pages_tried_max + 1)):
if done is True:
continue
try:
posts = get_posts(site_url, page_number=i)
posts_all.extend(posts)
except Exception as exc:
done = True
for post in posts_all:
post_date = maya.when(post["date"], timezone="UTC")
if post_date >= start_date and post_date <= end_date:
posts_selected.append(post)
return posts_selected
|
StarcoderdataPython
|
5081543
|
<gh_stars>0
import os
import pandas as pd
from get_traces import load_traces, get_traces
from highlights_state_selection import compute_states_importance, highlights
from get_trajectories import get_trajectory_images, create_video, trajectories_by_importance, states_to_trajectories
def create_highlights(args):
"""
load pre-trained agent from RL-Zoo.
retrieve execution traces and states.
Obtain trajectories and create highlights video.
"""
"""RL-Zoo"""
if args.load_traces:
traces, states = load_traces(args)
else:
traces, states = get_traces(args)
"""HIGHLIGHTS"""
data = {
'state': list(states.keys()),
'q_values': [x.observed_actions for x in states.values()]
}
q_values_df = pd.DataFrame(data)
"""importance by state"""
q_values_df = compute_states_importance(q_values_df, compare_to=args.state_importance)
highlights_df = q_values_df
state_importance_dict = dict(zip(highlights_df["state"], highlights_df["importance"]))
"""get highlights"""
if args.trajectory_importance == "single_state":
"""highlights importance by single state importance"""
summary_states = highlights(highlights_df, traces, args.summary_traj_budget, args.context_length,
args.minimum_gap)
summary_trajectories = states_to_trajectories(summary_states, state_importance_dict)
else:
"""highlights importance by trajectory"""
summary_trajectories = trajectories_by_importance(args.trajectory_importance, traces,
args.context_length, args.load_traces,
args.trajectories_file, state_importance_dict,
args.similarity_limit, args.summary_traj_budget)
if args.verbose: print('HIGHLIGHTS obtained')
"""make video"""
dir_name = os.path.join(args.video_dir, args.algo, args.state_importance +
"_state_importance", args.trajectory_importance)
get_trajectory_images(summary_trajectories, states, dir_name)
create_video(dir_name)
if args.verbose: print("HIGHLIGHTS Video Obtained")
return
def get_multiple_highlights(args):
# environments = ['SeaquestNoFrameskip-v4', 'MsPacmanNoFrameskip-v4']
algos = ['a2c', 'ppo2', 'acktr', 'dqn']
state_importance = ["second", "worst"]
trajectory_importance = ["avg", "max_minus_avg", "avg_delta", "max_min", "single_state"]
args.verbose = False
print("Starting Experiments:")
# for env in environments:
# print(f"\tEnvironment: {env}")
for algo in algos:
print(f"\t\tAlgorithm: {algo}")
args.algo = algo
args.traces_file = os.path.join(args.stt_dir, args.algo, "Traces:" + args.file_name)
args.state_file = os.path.join(args.stt_dir, args.algo, "States:" + args.file_name)
args.trajectories_file = os.path.join(args.stt_dir, args.algo, "Trajectories:" + args.file_name)
for s_i in state_importance:
args.load_traces = False # need to save new trajectories
print(f"\t\t\tState Importance: {s_i}")
args.state_importance = s_i
for t_i in trajectory_importance:
print(f"\t\t\t\tTrajectory Importance: {t_i}")
args.trajectory_importance = t_i
create_highlights(args)
print(f"\t\t\t\t....Completed")
args.load_traces = True # use saved trajectories
print("Experiments Completed")
|
StarcoderdataPython
|
6664265
|
#!/usr/bin/python
#
# Sigma Control API DUT (sniffer_get_field_value)
# Copyright (c) 2014, Qualcomm Atheros, Inc.
# All Rights Reserved.
# Licensed under the Clear BSD license. See README for more details.
import sys
import subprocess
import tshark
for arg in sys.argv:
if arg.startswith("FileName="):
filename = arg.split("=", 1)[1]
elif arg.startswith("SrcMac="):
srcmac = arg.split("=", 1)[1]
elif arg.startswith("FrameName="):
framename = arg.split("=", 1)[1].lower()
elif arg.startswith("FieldName="):
fieldname = arg.split("=", 1)[1].lower()
frame_filters = tshark.tshark_framenames()
if framename not in frame_filters:
print "errorCode,Unsupported FrameName"
sys.exit()
fields = tshark.tshark_fieldnames()
if fieldname not in fields:
print "errorCode,Unsupported FieldName"
sys.exit()
cmd = ['tshark', '-r', filename,
'-c', '1',
'-R', 'wlan.sa==' + srcmac + " and " + frame_filters[framename],
'-Tfields',
'-e', fields[fieldname]]
tshark = subprocess.Popen(cmd, stdout=subprocess.PIPE)
data = tshark.stdout.read().rstrip()
result = "SUCCESS" if len(data) > 0 else "FAIL"
print "CheckResult,%s,ReturnValue,%s" % (result, data)
|
StarcoderdataPython
|
5081081
|
<gh_stars>1-10
"""
Report Utility
Generates and saves a CT machine's report based off of audit data.
"""
import logging
from ctqa import logutil
# Explicitly disabling matplotlib to prevent log spam
logging.getLogger('matplotlib').setLevel(logging.WARNING)
import matplotlib
# Using simplified mpl backend due to exclusive png creation
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.pyplot import figure
import datetime
import numpy as np
import json
import os, sys
from ctqa import datautil
from ctqa import profileutil
from ctqa import notifications
#Logger init
logger = logging.getLogger(logutil.MAIN_LOG_NAME)
def generateReport(dataPath, config, title, upperlimit, lowerlimit, report_type="daily"):
"""
Retrieves audit data from the data path, organizes a site's data into a displayable format,
and creates a PNG graph at the passed save location.
"""
logger.debug("Generating report: " + title)
# Config variable assignment
savelocation = config.get("ReportLocation")
forecastdays = config.get("DaysToForecast")
if report_type == "daily":
graphdays = config.get("DailyReportDaysToGraph")
else:
graphdays = config.get("WeeklyReportDaysToGraph")
# Getting data
jsonData = datautil.load(dataPath)
if jsonData == -1:
print('Unable to load json data')
return -1
# Selecting center roi data and organizing
centerrois = []
centerdates = []
for date in jsonData['Homogeneity'].keys():
try:
centerrois.append(jsonData['Homogeneity'][date]['CENTER']['MEAN'])
centerdates.append(mdates.datestr2num(date))
except KeyError as e:
logger.error("Unable to find key when parsing Homogeneity data", exc_info=True)
# Loop through collected dates and omit any submitted before now - graphdays ago
temprois = []
tempdates = []
datenow = mdates.date2num(datetime.datetime.now())
for i in range(0, len(centerdates)):
if centerdates[i] > (datenow - graphdays):
temprois.append(centerrois[i])
tempdates.append(centerdates[i])
centerrois = temprois
centerdates = tempdates
months = mdates.MonthLocator() # every month
days = mdates.WeekdayLocator()
monthsFmt = mdates.DateFormatter('%Y-%m')
axes = plt.subplot()
plt.plot_date(x=centerdates, y=centerrois, fmt='o', label='Center ROI Means', zorder=10)
axes.xaxis.set_major_locator(months)
axes.xaxis.set_major_formatter(monthsFmt)
axes.xaxis.set_minor_locator(days)
# Setting axis bounds
plt.xlim((datenow - graphdays, datenow + forecastdays + 5))
# Calibration levels
plt.axhline(upperlimit, color='red', linewidth=1, label='Control Limits')
plt.axhline(lowerlimit, color='red', linewidth=1)
# Warning levels
plt.axhline(upperlimit/2, color='orange', linewidth=1)
plt.axhline(lowerlimit/2, color='orange', linewidth=1)
# Center line
plt.axhline(0, color='black', linewidth=1)
# Adding axes labels
plt.xlabel("Date")
plt.ylabel("ROI Mean Values")
# Rotating x-axis 45 degrees
plt.xticks(rotation=45)
# Setting image size
fig = plt.gcf()
# Dimensions for image defined in inches
fig.set_size_inches(8,5,forward=True) # foward=True => Propagates changes to gui window
# Gathering points taken in the last month
lastdates = []
lastrois = []
for i in range(0, len(centerdates)):
if centerdates[i] > (datenow - 30):
lastdates.append(centerdates[i])
lastrois.append(centerrois[i])
# Fitting regression for decalibration prediction if we have enough data
# Aiming for at least 3 points of data in the last month.
forecastend = None
if len(lastdates) > 2:
# Fitting linear polynomial
fit = np.polyfit(lastdates, lastrois, 1) # Fitting to dates/rois to a single degree polynomial
# Plotting best fit line with a two week forecast
forecasttime = lastdates[(len(lastdates)-1)] + forecastdays
forecaststart = (lastdates[0]*fit[0]) + fit[1]
forecastend = (forecasttime*fit[0]) + fit[1] # y = mx + b
# Starting plot at the first value in lastdates
plt.plot_date(x=[lastdates[0], forecasttime], y=[forecaststart, forecastend], label='Forecast Trend', fmt="--o", zorder=5)
# Creating legend
handles, labels = axes.get_legend_handles_labels()
strdates = list(jsonData["Homogeneity"].keys())
if len(strdates) >= 1:
# Creating blank rectangle for date holder
blankrectangle = matplotlib.patches.Rectangle((0, 0), 1, 1, fc="w", fill=False, edgecolor='none', linewidth=0)
handles.append(blankrectangle)
# Getting last point date as string
strdates.sort()
lastdatepoint = strdates[len(strdates)-1]
lastdatepoint = "Last Point: %s/%s/%s" % (lastdatepoint[0:4], lastdatepoint[4:6], lastdatepoint[6:8])
labels.append(lastdatepoint)
axes.legend(handles, labels)
# Title
plt.title(title)
# Packing layout
plt.tight_layout()
# Setting location for export to reports folder, local to executable
file = title + '.png'
loc = os.path.abspath(savelocation)
# Ensuring the save location exists
os.makedirs(loc, exist_ok=True)
file_loc = os.path.join(loc, file)
# Saving png image to savelocation
plt.savefig(file_loc, dpi=300)
# Clearing plot. MUST DO THIS UNLESS YOU WANT THE OLD PLOT TO REMAIN FOR ANOTHER RUN
plt.close()
return forecastend
def regenerateReports(dataPath, config, profiles, report_type="daily"):
"""Finds all data folders and updates reports based on existing data"""
# Getting report names and paths to the data
pathitems = os.listdir(dataPath)
subnames = []
for item in pathitems:
itempath = os.path.join(dataPath, item)
if os.path.isdir(itempath):
subnames.append(item)
# Generating reports
for site in subnames:
# Put together data.json location
sitepath = os.path.join(dataPath, site)
# Getting site profile stats
siteprofile = profiles[site]
upperlimit = siteprofile.get("UpperHomogeneityLimit")
lowerlimit = siteprofile.get("LowerHomogeneityLimit")
# Generating daily or weekly reports
sitesplit = site.split("-")
shortitle = sitesplit[3] + '-' + sitesplit[2] + '-' + sitesplit[0]
if report_type == "daily":
# Get title from site name
title = 'DAILY-' + shortitle
# Create report
generateReport(sitepath, config, title, upperlimit, lowerlimit)
elif report_type == "weekly":
# Get title from site name
title = 'WEEKLY-' + shortitle
# Create report
generateReport(sitepath, config, title, upperlimit, lowerlimit, report_type="weekly")
|
StarcoderdataPython
|
5041019
|
<reponame>navikt/dakan-api-graph
from typing import Dict
from typing import List
from pydantic import BaseModel
class Node(BaseModel):
id: str
label: str
properties: dict
class NodeResponse(Dict):
id: str
label: str
type: str
properties: dict
class PagedNodes(Dict):
page: int
total_pages: int
has_next_page: bool
max_items_per_page: int
total_items: int
data: List[Node]
|
StarcoderdataPython
|
8146205
|
import glob
import os
import gin
import MinkowskiEngine as ME
import open3d as o3d
from src.data.base_loader import *
from src.data.transforms import *
from src.utils.file import read_trajectory
@gin.configurable()
class ThreeDMatchPairDatasetBase(PairDataset):
OVERLAP_RATIO = None
DATA_FILES = None
def __init__(
self,
root,
phase,
voxel_size=0.05,
transform=None,
rotation_range=360,
random_rotation=True,
seed=0,
):
PairDataset.__init__(
self,
root,
phase,
voxel_size,
transform,
rotation_range,
random_rotation,
seed,
)
logging.info(f"Loading the subset {phase} from {root}")
subset_names = open(self.DATA_FILES[phase]).read().split()
for name in subset_names:
fname = name + "*%.2f.txt" % self.OVERLAP_RATIO
fnames_txt = glob.glob(root + "/" + fname)
assert (
len(fnames_txt) > 0
), f"Make sure that the path {root} has data {fname}"
for fname_txt in fnames_txt:
with open(fname_txt) as f:
content = f.readlines()
fnames = [x.strip().split() for x in content]
for fname in fnames:
self.files.append([fname[0], fname[1]])
logging.info(f"Loaded {len(self.files)} pairs")
def __getitem__(self, idx):
file0 = os.path.join(self.root, self.files[idx][0])
file1 = os.path.join(self.root, self.files[idx][1])
data0 = np.load(file0)
data1 = np.load(file1)
xyz0 = data0["pcd"]
xyz1 = data1["pcd"]
if self.random_rotation:
T0 = sample_random_trans(xyz0, self.randg, self.rotation_range)
T1 = sample_random_trans(xyz1, self.randg, self.rotation_range)
trans = T1 @ np.linalg.inv(T0)
xyz0 = self.apply_transform(xyz0, T0)
xyz1 = self.apply_transform(xyz1, T1)
else:
trans = np.identity(4)
# Voxelization
xyz0_th = torch.from_numpy(xyz0)
xyz1_th = torch.from_numpy(xyz1)
_, sel0 = ME.utils.sparse_quantize(xyz0_th / self.voxel_size, return_index=True)
_, sel1 = ME.utils.sparse_quantize(xyz1_th / self.voxel_size, return_index=True)
# Get features
npts0 = len(sel0)
npts1 = len(sel1)
feats_train0, feats_train1 = [], []
xyz0_th = xyz0_th[sel0]
xyz1_th = xyz1_th[sel1]
feats_train0.append(torch.ones((npts0, 1)))
feats_train1.append(torch.ones((npts1, 1)))
F0 = torch.cat(feats_train0, 1)
F1 = torch.cat(feats_train1, 1)
C0 = torch.floor(xyz0_th / self.voxel_size)
C1 = torch.floor(xyz1_th / self.voxel_size)
if self.transform:
C0, F0 = self.transform(C0, F0)
C1, F1 = self.transform(C1, F1)
extra_package = {"idx": idx, "file0": file0, "file1": file1}
return (
xyz0_th.float(),
xyz1_th.float(),
C0.int(),
C1.int(),
F0.float(),
F1.float(),
trans,
extra_package,
)
@gin.configurable()
class ThreeDMatchPairDataset03(ThreeDMatchPairDatasetBase):
OVERLAP_RATIO = 0.3
DATA_FILES = {
"train": "./datasets/splits/train_3dmatch.txt",
"val": "./datasets/splits/val_3dmatch.txt",
"test": "./datasets/splits/test_3dmatch.txt",
}
@gin.configurable()
class ThreeDMatchPairDataset05(ThreeDMatchPairDataset03):
OVERLAP_RATIO = 0.5
@gin.configurable()
class ThreeDMatchPairDataset07(ThreeDMatchPairDataset03):
OVERLAP_RATIO = 0.7
@gin.configurable()
class ThreeDMatchTestDataset:
"""3DMatch test dataset"""
DATA_FILES = {"test": "./datasets/splits/test_3dmatch.txt"}
CONFIG_ROOT = "./datasets/config/3DMatch"
TE_THRESH = 30
RE_THRESH = 15
def __init__(self, root):
self.root = root
subset_names = open(self.DATA_FILES["test"]).read().split()
self.subset_names = subset_names
self.files = []
for sname in subset_names:
traj_file = os.path.join(self.CONFIG_ROOT, sname, "gt.log")
assert os.path.exists(traj_file)
traj = read_trajectory(traj_file)
for ctraj in traj:
i = ctraj.metadata[0]
j = ctraj.metadata[1]
T_gt = ctraj.pose
self.files.append((sname, i, j, T_gt))
logging.info(f"Loaded {self.__class__.__name__} with {len(self.files)} data")
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
sname, i, j, T_gt = self.files[idx]
file0 = os.path.join(self.root, sname, f"cloud_bin_{i}.ply")
file1 = os.path.join(self.root, sname, f"cloud_bin_{j}.ply")
pcd0 = o3d.io.read_point_cloud(file0)
pcd1 = o3d.io.read_point_cloud(file1)
xyz0 = np.asarray(pcd0.points).astype(np.float32)
xyz1 = np.asarray(pcd1.points).astype(np.float32)
return sname, xyz0, xyz1, T_gt
class ThreeDLoMatchTestDataset(ThreeDMatchTestDataset):
"""3DLoMatch test dataset"""
SPLIT_FILES = {"test": "./datasets/splits/test_3dmatch.txt"}
CONFIG_ROOT = "./datasets/config/3DLoMatch"
|
StarcoderdataPython
|
3503982
|
<reponame>twisted/quotient
from nevow.livetrial.testcase import TestCase
from nevow.athena import expose
from nevow.tags import div, directive
from nevow.loaders import stan
from axiom.store import Store
from xquotient.spam import Filter, HamFilterFragment
class PostiniConfigurationTestCase(TestCase):
"""
Tests for configuring Postini-related behavior.
"""
jsClass = u'Quotient.Test.PostiniConfigurationTestCase'
def setUp(self):
self.store = Store()
self.filter = Filter(store=self.store)
self.widget = HamFilterFragment(self.filter)
self.widget.setFragmentParent(self)
return self.widget;
expose(setUp)
def checkConfiguration(self):
"""
Test that postini filtering has been turned on and that the threshhold
has been set to 5.0.
"""
self.failUnless(self.filter.usePostiniScore)
self.assertEquals(self.filter.postiniThreshhold, 5.0)
expose(checkConfiguration)
|
StarcoderdataPython
|
6547964
|
<gh_stars>10-100
from hana_ml.algorithms.pal.naive_bayes import NaiveBayes
from hana_automl.algorithms.base_algo import BaseAlgorithm
class NBayesCls(BaseAlgorithm):
def __init__(self):
super(NBayesCls, self).__init__()
self.title = "NaiveBayesClassifier"
self.params_range = {
"alpha": (1e-2, 100),
"discretization": (0, 1),
}
def set_params(self, **params):
params["discretization"] = ["no", "supervised"][round(params["discretization"])]
# self.model = UnifiedClassification(func='NaiveBayes', **params)
self.tuned_params = params
self.model = NaiveBayes(**params)
def optunatune(self, trial):
alpha = trial.suggest_float("alpha", 1e-2, 100, log=True)
discretization = trial.suggest_categorical(
"discretization", ["no", "supervised"]
)
# model = UnifiedClassification(func='NaiveBayes', alpha=alpha, discretization=discretization)
model = NaiveBayes(alpha=alpha, discretization=discretization)
self.model = model
|
StarcoderdataPython
|
3592584
|
<gh_stars>0
import os
class Config(object):
API_ID = int(os.environ.get("API_ID"))
API_HASH = os.environ.get("API_HASH")
BOT_TOKEN = os.environ.get("BOT_TOKEN")
DATABASE_URL = os.environ.get("DATABASE_URL")
UPDATES_CHANNEL = os.environ.get("UPDATES_CHANNEL", None)
BIN_CHANNEL = int(os.environ.get("BIN_CHANNEL"))
|
StarcoderdataPython
|
1959948
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2019 Akumatic
#
# https://adventofcode.com/2019/day/8
def readFile() -> str:
with open(f"{__file__.rstrip('code.py')}input.txt", "r") as f:
return f.read()[:-1]
def getLayers(input: str, width: int, height: int) -> list:
layers = []
for i in range(0, len(input), width*height):
layers.append([input[i+width*x:i+width*(x+1)] for x in range(height)])
return layers
def getPicture(layers: list) -> str:
width, height = len(layers[0][0]), len(layers[0])
return "\n".join(["".join([getColor(layers, w, h) for w in range(width)]) for h in range(height)])
def getColor(layers: list, w: int, h: int) -> str:
for layer in layers:
if layer[h][w] != "2":
return layer[h][w]
return "2"
def part1(layers: list) -> int:
min, minLayer = None, None
for layer in layers:
cnt = sum([l.count("0") for l in layer])
if min is None or cnt < min:
min, minLayer = cnt, layer
return sum([l.count("1") for l in minLayer]) * sum([l.count("2") for l in minLayer])
def part2(layers: list) -> str:
picture = getPicture(layers)
return f"\n{picture.replace('0', ' ').replace('1', 'X')}"
def test():
assert getLayers("123456789012",3,2) == [["123","456"],["789","012"]]
assert getPicture(getLayers("0222112222120000",2,2)) == "01\n10"
if __name__ == "__main__":
test()
vals = getLayers(readFile(), 25, 6)
print(f"Part 1: {part1(vals)}")
print(f"Part 2: {part2(vals)}")
|
StarcoderdataPython
|
1778472
|
import os
import sys
import imp
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# Test for Torch
def torch(test_models, model_path, img_path):
results_o, results_d, op_sets = dict(), dict(), dict()
from PIL import Image
import torch
import torchvision.models as models
from torchvision import transforms
from torch.autograd import Variable
# Torch to IR
from ox.pytorch.pytorch_parser import PytorchParser
for model in test_models:
if 'inception' in model: image_size = 299
else: image_size = 224
image = Image.open(img_path)
transformation = transforms.Compose([
transforms.Resize((image_size, image_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
image_tensor = transformation(image).float()
image_tensor = image_tensor.unsqueeze_(0)
x = Variable(image_tensor)
inputshape = [3, image_size, image_size]
arch_filename = os.path.join(model_path, 'PyTorch', model+'.pth')
# test model
if 'resnet50' in model:
model_eval = models.resnet50()
elif 'inception' in model:
from models.torch import inception
model_eval = inception.inceptionresnetv2(pretrained=False)
elif 'shufflenet' in model:
from models.torch import shufflenet
model_eval = shufflenet.shufflenet()
elif 'fcn' in model:
from models.torch import fcn
model_eval = fcn.FCNs()
elif 'lstm' in model:
from models.torch import lstm
model_eval = lstm.Lstm()
model_eval.eval()
predict = model_eval(x).data.numpy()
preds = np.squeeze(predict)
print('\033[1;31;40m')
print(' Result of', model, ': ', np.argmax(preds))
print('\033[0m')
results_o[model] = preds
torch.save(model_eval, arch_filename)
# convert
IR_filename = os.path.join(model_path, 'IR', model+'_torch')
parser = PytorchParser(arch_filename, inputshape)
ops = parser.run(IR_filename)
op_sets[model] = ops
del parser
del PytorchParser
# IR to Torch
from ox.pytorch.pytorch_emitter import PytorchEmitter
for model in test_models:
if 'inception' in model: image_size = 299
else: image_size = 224
image = Image.open(img_path)
transformation = transforms.Compose([
transforms.Resize((image_size, image_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
image_tensor = transformation(image).float()
image_tensor = image_tensor.unsqueeze_(0)
x = Variable(image_tensor)
inputshape = [3, image_size, image_size]
arch_filename = os.path.join(model_path, 'IR', model+'_torch.pb')
weight_filename = os.path.join(model_path, 'IR', model+'_torch.npy')
converted_file = os.path.join(model_path, 'PyTorch', model+'_ox')
emitter = PytorchEmitter((arch_filename, weight_filename))
emitter.run(converted_file + '.py', converted_file + '.npy', 'test')
model_converted = imp.load_source('PytorchModel', converted_file + '.py').KitModel(converted_file + '.npy')
model_converted.eval()
predict = model_converted(x).data.numpy()
preds = np.squeeze(predict)
print('\033[1;31;40m')
print(' Result of ', model+'_ox : ', np.argmax(preds))
print('\033[0m')
results_d[model] = np.mean(np.power(results_o[model] - preds, 2))
del emitter
del PytorchEmitter
return results_d, op_sets
# Test for Tensorflow
def tensorflow(test_models, model_path, img_path):
results_o, results_d, op_sets = dict(), dict(), dict()
import tensorflow as tf
from PIL import Image
image = Image.open(img_path)
# Tensorflow to IR
from ox.tensorflow.tensorflow_parser import TensorflowParser
for model in test_models:
arch_filename = os.path.join(model_path, 'tensorflow', model, model+'.ckpt.meta')
weight_filename = os.path.join(model_path, 'tensorflow', model, model+'.ckpt')
# test model
if 'resnet50' in model:
img = np.array(image.resize((299, 299), Image.ANTIALIAS))
x = np.expand_dims(img, axis=0)
from models.tf import resnet50
preds = resnet50.test(x, model_path)
elif 'inception' in model:
img = np.array(image.resize((224, 224), Image.ANTIALIAS))
x = np.expand_dims(img, axis=0)
from models.tf import inception_v3
preds = inception_v3.test(x, model_path)
elif 'shufflenet' in model:
img = np.array(image.resize((224, 224), Image.ANTIALIAS))
x = np.expand_dims(img, axis=0)
from models.tf import shufflenet
preds = shufflenet.test(x, model_path)
elif 'fcn' in model:
img = np.array(image.resize((224, 224), Image.ANTIALIAS))
x = np.expand_dims(img, axis=0)
from models.tf import fcn
preds = fcn.test(x, model_path)
elif 'lstm' in model:
img = np.array(image.resize((224, 224), Image.ANTIALIAS))
x = np.expand_dims(img, axis=0)
from models.tf import lstm
preds = lstm.test(x, model_path)
print('\033[1;31;40m')
print(' Result of', model, ': ', np.argmax(preds))
print('\033[0m')
preds = np.squeeze(preds)
if 'fcn' in model: preds = np.array(preds).astype(np.int32)
results_o[model] = preds
import tensorflow.contrib.keras as keras
keras.backend.clear_session()
# parser
IR_filename = os.path.join(model_path, 'IR', model+'_tf')
parser = TensorflowParser(arch_filename, weight_filename, ["OX_output"])
ops = parser.run(IR_filename)
op_sets[model] = ops
del parser
del TensorflowParser
# IR to Tensorflow
from ox.tensorflow.tensorflow_emitter import TensorflowEmitter
for model in test_models:
arch_filename = os.path.join(model_path, 'IR', model+'_tf.pb')
weight_filename = os.path.join(model_path, 'IR', model+'_tf.npy')
converted_file = os.path.join(model_path, 'tensorflow', model, model+'_ox')
emitter = TensorflowEmitter((arch_filename, weight_filename))
emitter.run(converted_file + '.py', None, 'test')
# test model
if 'resnet' in model:
img = image.resize((299, 299), Image.ANTIALIAS)
else:
img = image.resize((224, 224), Image.ANTIALIAS)
img = np.array(img)
x = np.expand_dims(img, axis=0)
if 'lstm' in model:
x = np.reshape(x, (-1, 224 * 224 * 3))
model_converted = imp.load_source('TFModel', converted_file + '.py').KitModel(weight_filename)
input_tf, model_tf = model_converted
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
predict = sess.run(model_tf, feed_dict = {input_tf : x})
del model_converted
del sys.modules['TFModel']
preds = np.squeeze(predict)
if 'fcn' in model: preds = np.array(preds).astype(np.int32)
print('\033[1;31;40m')
print(' Result of ', model+'_ox : ', np.argmax(preds))
print('\033[0m')
results_d[model] = np.mean(np.power(results_o[model] - preds, 2))
del emitter
del TensorflowEmitter
return results_d, op_sets
def mk_dirs(path):
if not os.path.exists(path):
os.makedirs(path)
return True
return False
if __name__=='__main__':
test_models = ['resnet50', 'inception_v3', 'shufflenet', 'fcn', 'lstm']
model_path = './../models'
img_path = os.path.join('./', 'elephant.jpg')
# mkdirs
mk_dirs(os.path.join(model_path, 'IR'))
mk_dirs(os.path.join(model_path, 'tensorflow'))
mk_dirs(os.path.join(model_path, 'PyTorch'))
tf_err, tf_op_sets = tensorflow(test_models, model_path, img_path)
torch_err, torch_op_sets = torch(test_models, model_path, img_path)
for model in test_models:
print('Model: {}'.format(model))
print('- Error: tf ({}) | torch ({})'.format(tf_err[model], torch_err[model]))
print('- TF Ops: {}'.format(tf_op_sets[model]))
print('- Torch Ops: {}'.format(torch_op_sets[model]))
print('\n')
|
StarcoderdataPython
|
9643583
|
"""
Experimental code for reading Cozmo animations in .bin format.
Cozmo animations are stored in files/cozmo/cozmo_resources/assets/animations inside the Cozmo mobile application.
Animation data structures are declared in FlatBuffers format in files/cozmo/cozmo_resources/config/cozmo_anim.fbs .
"""
from typing import List, Tuple
from PIL import Image, ImageDraw
from . import CozmoAnim
from . import protocol_encoder
from . import lights
__all__ = [
"AnimClip",
"load_anim_clips",
]
class AnimKeyframe(object):
def __init__(self):
self.pkts = []
self.record_heading = False
self.face_animation = None
self.event_id = None # DEVICE_AUDIO_TRIGGER / ENERGY_DRAINCUBE_END / TAPPED_BLOCK
class AnimEye(object):
def __init__(self,
center: Tuple[float, float] = (10.0, 0.0),
radius: Tuple[float, float] = (1.22, 0.9),
unknown4: float = 0.0,
curv1: Tuple[float, float, float, float] = (0.5, 0.5, 0.5, 0.5),
curv2: Tuple[float, float, float, float] = (0.5, 0.5, 0.5, 0.5),
unknown13: Tuple[float, float, float, float, float, float] = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)):
self.center = center
self.radius = radius
self.unknown4 = unknown4
self.curv1 = curv1
self.curv2 = curv2
self.unknown13 = unknown13
class AnimFace(object):
def __init__(self,
angle: float = 0.0,
center: Tuple[float, float] = (0.0, 0.0),
scale: Tuple[float, float] = (0.0, 0.0),
left_eye: AnimEye = AnimEye(),
right_eye: AnimEye = AnimEye()):
self.angle = angle
self.center = center
self.scale = scale
self.left_eye = left_eye
self.right_eye = right_eye
class AnimClip(object):
def __init__(self, name: str):
self.name = name
self.keyframes = {}
def add_message(self, trigger_time: int, pkt: protocol_encoder.Packet) -> None:
if trigger_time not in self.keyframes:
self.keyframes[trigger_time] = []
self.keyframes[trigger_time].append(pkt)
def record_heading(self, trigger_time: int) -> None:
# TODO
pass
def face_animation(self, trigger_time: int, name: str) -> None:
# TODO
pass
def event(self, trigger_time: int, event_id: str) -> None:
# TODO
pass
def face(self, trigger_time: int, face: AnimFace) -> None:
# TODO
pass
def render_face(face: AnimFace) -> None:
CX = 63
CY = 31
SX = 2.25
SY = 2.25
RX = 15
RY = 20
im = Image.new("1", (128, 64), color=(0))
draw = ImageDraw.Draw(im)
# draw.line((0, 0) + im.size, fill=128)
# draw.line((0, im.size[1], im.size[0], 0), fill=128)
for eye in (face.left_eye, face.right_eye):
x1 = CX + SX * eye.center[0] - RX * eye.radius[0]
y1 = CY + SY * eye.center[1] - RY * eye.radius[1]
x2 = CX + SX * eye.center[0] + RX * eye.radius[0]
y2 = CY + SY * eye.center[1] + RY * eye.radius[1]
draw.ellipse(xy=((x1, y1), (x2, y2)), fill=1, outline=1, width=1)
im.show()
pass
def load_anim_clip(fbclip: CozmoAnim.AnimClip) -> AnimClip:
""" Convert a single Cozmo FlatBuffers animation clip into a PyCozmo AnimClip object. """
clip = AnimClip(fbclip.Name().decode("utf-8"))
fbkfs = fbclip.Keyframes()
# Convert HeadAngle key frames to messages
for i in range(fbkfs.HeadAngleKeyFrameLength()):
fbkf = fbkfs.HeadAngleKeyFrame(i)
# FIXME: Why can duration be larger than 255?
pkt = protocol_encoder.AnimHead(duration_ms=min(fbkf.DurationTimeMs(), 255),
variability_deg=fbkf.AngleVariabilityDeg(),
angle_deg=fbkf.AngleDeg())
trigger_time = fbkf.TriggerTimeMs()
clip.add_message(trigger_time, pkt)
# Convert LiftHeight key frames to messages
for i in range(fbkfs.LiftHeightKeyFrameLength()):
fbkf = fbkfs.LiftHeightKeyFrame(i)
# FIXME: Why can duration be larger than 255?
pkt = protocol_encoder.AnimLift(duration_ms=min(fbkf.DurationTimeMs(), 255),
variability_mm=fbkf.HeightVariabilityMm(),
height_mm=fbkf.HeightMm())
trigger_time = fbkf.TriggerTimeMs()
clip.add_message(trigger_time, pkt)
# Convert RecordHeading key frames to messages
for i in range(fbkfs.RecordHeadingKeyFrameLength()):
fbkf = fbkfs.RecordHeadingKeyFrame(i)
trigger_time = fbkf.TriggerTimeMs()
clip.record_heading(trigger_time)
# Convert TurnToRecordedHeading key frames to messages
for i in range(fbkfs.TurnToRecordedHeadingKeyFrameLength()):
fbkf = fbkfs.TurnToRecordedHeadingKeyFrame(i)
# TODO
trigger_time = fbkf.TriggerTimeMs()
duration_ms = fbkf.DurationTimeMs()
offset_deg = fbkf.OffsetDeg()
speed_degPerSec = fbkf.SpeedDegPerSec()
accel_degPerSec2 = fbkf.AccelDegPerSec2()
decel_degPerSec2 = fbkf.DecelDegPerSec2()
tolerance_deg = fbkf.ToleranceDeg()
numHalfRevs = fbkf.NumHalfRevs()
useShortestDir = fbkf.UseShortestDir()
# Convert BodyMotion key frames to messages
for i in range(fbkfs.BodyMotionKeyFrameLength()):
fbkf = fbkfs.BodyMotionKeyFrame(i)
trigger_time = fbkf.TriggerTimeMs()
# FIXME: What to do with duration?
duration_ms = fbkf.DurationTimeMs()
radius_mm = fbkf.RadiusMm().decode("utf-8")
try:
radius_mm = float(radius_mm)
except ValueError:
pass
pkt = protocol_encoder.AnimBody(speed=fbkf.Speed(), unknown1=32767)
clip.add_message(trigger_time, pkt)
# Convert BackpackLights key frames to messages
for i in range(fbkfs.BackpackLightsKeyFrameLength()):
fbkf = fbkfs.BackpackLightsKeyFrame(i)
trigger_time = fbkf.TriggerTimeMs()
# FIXME: What to do with duration?
duration_ms = fbkf.DurationTimeMs()
assert fbkf.LeftLength() == 4
left = lights.Color(rgb=(fbkf.Left(0), fbkf.Left(1), fbkf.Left(2)))
assert fbkf.FrontLength() == 4
front = lights.Color(rgb=(fbkf.Front(0), fbkf.Front(1), fbkf.Front(2)))
assert fbkf.MiddleLength() == 4
middle = lights.Color(rgb=(fbkf.Middle(0), fbkf.Middle(1), fbkf.Middle(2)))
assert fbkf.BackLength() == 4
back = lights.Color(rgb=(fbkf.Back(0), fbkf.Back(1), fbkf.Back(2)))
assert fbkf.RightLength() == 4
right = lights.Color(rgb=(fbkf.Right(0), fbkf.Right(1), fbkf.Right(2)))
pkt = protocol_encoder.AnimBackpackLights(colors=(left.to_int16(),
front.to_int16(), middle.to_int16(), back.to_int16(),
right.to_int16()))
clip.add_message(trigger_time, pkt)
# Convert FaceAnimation key frames to messages
for i in range(fbkfs.FaceAnimationKeyFrameLength()):
fbkf = fbkfs.FaceAnimationKeyFrame(i)
trigger_time = fbkf.TriggerTimeMs()
name = fbkf.AnimName().decode("utf-8")
clip.face_animation(trigger_time, name)
# Convert ProceduralFace key frames to messages
for i in range(fbkfs.ProceduralFaceKeyFrameLength()):
fbkf = fbkfs.ProceduralFaceKeyFrame(i)
# TODO
trigger_time = fbkf.TriggerTimeMs()
assert fbkf.LeftEyeLength() == 19
left_eye = AnimEye(center=(fbkf.LeftEye(0), fbkf.LeftEye(1)),
radius=(fbkf.LeftEye(2), fbkf.LeftEye(3)),
unknown4=fbkf.LeftEye(4),
curv1=(fbkf.LeftEye(5), fbkf.LeftEye(6), fbkf.LeftEye(7), fbkf.LeftEye(8)),
curv2=(fbkf.LeftEye(9), fbkf.LeftEye(10), fbkf.LeftEye(11), fbkf.LeftEye(12)),
unknown13=(fbkf.LeftEye(13), fbkf.LeftEye(14), fbkf.LeftEye(15),
fbkf.LeftEye(16), fbkf.LeftEye(17), fbkf.LeftEye(18)))
assert fbkf.RightEyeLength() == 19
right_eye = AnimEye(center=(fbkf.RightEye(0), fbkf.RightEye(1)),
radius=(fbkf.RightEye(2), fbkf.RightEye(3)),
unknown4=fbkf.RightEye(4),
curv1=(fbkf.RightEye(5), fbkf.RightEye(6), fbkf.RightEye(7), fbkf.RightEye(8)),
curv2=(fbkf.RightEye(9), fbkf.RightEye(10), fbkf.RightEye(11), fbkf.RightEye(12)),
unknown13=(fbkf.RightEye(13), fbkf.RightEye(14), fbkf.RightEye(15),
fbkf.RightEye(16), fbkf.RightEye(17), fbkf.RightEye(18)))
face = AnimFace(angle=fbkf.FaceAngle(),
center=(fbkf.FaceCenterX(), fbkf.FaceCenterY()),
scale=(fbkf.FaceScaleX(), fbkf.FaceScaleY()),
left_eye=left_eye,
right_eye=right_eye)
clip.face(trigger_time, face)
render_face(face)
# Convert RobotAudio key frames to messages
for i in range(fbkfs.RobotAudioKeyFrameLength()):
fbkf = fbkfs.RobotAudioKeyFrame(i)
# TODO
trigger_time = fbkf.TriggerTimeMs()
audio_event_ids = []
for j in range(fbkf.AudioEventIdLength()):
audio_event_ids.append(fbkf.AudioEventId(j))
volume = fbkf.Volume()
probabilities = []
for j in range(fbkf.ProbabilityLength()):
probabilities.append(fbkf.Probability(j))
has_alts = fbkf.HasAlts()
# Convert Event key frames to messages
for i in range(fbkfs.EventKeyFrameLength()):
fbkf = fbkfs.EventKeyFrame(i)
trigger_time = fbkf.TriggerTimeMs()
event_id = fbkf.EventId().decode("utf-8")
clip.event(trigger_time, event_id)
return clip
def load_anim_clips(fspec: str) -> List[AnimClip]:
""" Load one or more animation clips from a .bin file in Cozmo FlatBuffers format. """
with open(fspec, "rb") as f:
buf = f.read()
fbclips = CozmoAnim.AnimClips.AnimClips.GetRootAsAnimClips(buf, 0)
clips = []
for i in range(fbclips.ClipsLength()):
clip = load_anim_clip(fbclips.Clips(i))
clips.append(clip)
return clips
|
StarcoderdataPython
|
6412395
|
Ambiente = {
'1': '1 - Produccion',
'2': '2 - Pruebas',
}
TipoDocumento = {
'11': '11 - Registro civil',
'12': '12 - Tarjeta de identidad',
'13': '13 - Cédula de ciudadanía',
'21': '21 - Tarjeta de extranjería',
'22': '22 - Cédula de extranjería',
'31': '31 - NIT',
'41': '41 - Pasaporte',
'42': '42 - Documento de identificación extranjero',
'47': '47 - PEP',
'50': '50 - NIT de otro país',
'91': '91 - NUIP * ',
}
class Dianbase():
@classmethod
def compute_check_digit(cls, rut: str, identification_type: str) -> str:
"""
@param rut(str): Rut without check digit
@param identification_type(str): Identification type
@return result(str): Return the check digit of the rut
"""
result = 0
if identification_type == '31':
factor = [3, 7, 13, 17, 19, 23, 29, 37, 41, 43, 47, 53, 59, 67, 71]
rut_ajustado=str(rut).rjust( 15, '0')
total = sum(int(rut_ajustado[14-i]) * factor[i] for i in range(14)) % 11
if total > 1:
result = 11 - total
else:
result = total
else:
result = 0
return result
|
StarcoderdataPython
|
5148087
|
import os
from ..discretization.modeltime import ModelTime
from ..discretization.structuredgrid import StructuredGrid
from ..mbase import BaseModel
from ..modflow import Modflow
from ..mt3d import Mt3dms
from ..pakbase import Package
from .swtvdf import SeawatVdf
from .swtvsc import SeawatVsc
class SeawatList(Package):
"""
List Package class
"""
def __init__(self, model, extension="list", listunit=7):
super().__init__(model, extension, "LIST", listunit)
return
def __repr__(self):
return "List package class"
def write_file(self):
# Not implemented for list class
return
class Seawat(BaseModel):
"""
SEAWAT Model Class.
Parameters
----------
modelname : str, default "swttest"
Name of model. This string will be used to name the SEAWAT input
that are created with write_model.
namefile_ext : str, default "nam"
Extension for the namefile.
modflowmodel : Modflow, default None
Instance of a Modflow object.
mt3dmodel : Mt3dms, default None
Instance of a Mt3dms object.
version : str, default "seawat"
Version of SEAWAT to use. Valid versions are "seawat" (default).
exe_name : str, default "swtv4"
The name of the executable to use.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
listunit : int, default 2
Unit number for the list file.
model_ws : str, default "."
Model workspace. Directory name to create model data sets.
Default is the present working directory.
external_path : str, optional
Location for external files.
verbose : bool, default False
Print additional information to the screen.
load : bool, default True
Load model.
silent : int, default 0
Silent option.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> m = flopy.seawat.swt.Seawat()
"""
def __init__(
self,
modelname="swttest",
namefile_ext="nam",
modflowmodel=None,
mt3dmodel=None,
version="seawat",
exe_name="swtv4",
structured=True,
listunit=2,
model_ws=".",
external_path=None,
verbose=False,
load=True,
silent=0,
):
super().__init__(
modelname,
namefile_ext,
exe_name,
model_ws,
structured=structured,
verbose=verbose,
)
# Set attributes
self.version_types = {"seawat": "SEAWAT"}
self.set_version(version)
self.lst = SeawatList(self, listunit=listunit)
self.glo = None
self._mf = None
self._mt = None
# If a MODFLOW model was passed in, then add its packages
self.mf = self
if modflowmodel is not None:
for p in modflowmodel.packagelist:
self.packagelist.append(p)
self._modelgrid = modflowmodel.modelgrid
else:
modflowmodel = Modflow()
# If a MT3D model was passed in, then add its packages
if mt3dmodel is not None:
for p in mt3dmodel.packagelist:
self.packagelist.append(p)
else:
mt3dmodel = Mt3dms()
# external option stuff
self.array_free_format = False
self.array_format = "mt3d"
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external = False
self.load = load
# the starting external data unit number
self._next_ext_unit = 3000
if external_path is not None:
assert (
model_ws == "."
), "ERROR: external cannot be used with model_ws"
# external_path = os.path.join(model_ws, external_path)
if os.path.exists(external_path):
print(f"Note: external_path {external_path} already exists")
# assert os.path.exists(external_path),'external_path does not exist'
else:
os.mkdir(external_path)
self.external = True
self.external_path = external_path
self.verbose = verbose
self.silent = silent
# Create a dictionary to map package with package object.
# This is used for loading models.
self.mfnam_packages = {}
for k, v in modflowmodel.mfnam_packages.items():
self.mfnam_packages[k] = v
for k, v in mt3dmodel.mfnam_packages.items():
self.mfnam_packages[k] = v
self.mfnam_packages["vdf"] = SeawatVdf
self.mfnam_packages["vsc"] = SeawatVsc
return
@property
def modeltime(self):
# build model time
data_frame = {
"perlen": self.dis.perlen.array,
"nstp": self.dis.nstp.array,
"tsmult": self.dis.tsmult.array,
}
self._model_time = ModelTime(
data_frame,
self.dis.itmuni_dict[self.dis.itmuni],
self.dis.start_datetime,
self.dis.steady.array,
)
return self._model_time
@property
def modelgrid(self):
if not self._mg_resync:
return self._modelgrid
if self.has_package("bas6"):
ibound = self.bas6.ibound.array
else:
ibound = None
# build grid
# self.dis should exist if modflow model passed
self._modelgrid = StructuredGrid(
self.dis.delc.array,
self.dis.delr.array,
self.dis.top.array,
self.dis.botm.array,
idomain=ibound,
lenuni=self.dis.lenuni,
proj4=self._modelgrid.proj4,
epsg=self._modelgrid.epsg,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
nlay=self.dis.nlay,
)
# resolve offsets
xoff = self._modelgrid.xoffset
if xoff is None:
if self._xul is not None:
xoff = self._modelgrid._xul_to_xll(self._xul)
else:
xoff = 0.0
yoff = self._modelgrid.yoffset
if yoff is None:
if self._yul is not None:
yoff = self._modelgrid._yul_to_yll(self._yul)
else:
yoff = 0.0
self._modelgrid.set_coord_info(
xoff,
yoff,
self._modelgrid.angrot,
self._modelgrid.epsg,
self._modelgrid.proj4,
)
self._mg_resync = not self._modelgrid.is_complete
return self._modelgrid
@property
def nlay(self):
if self.dis:
return self.dis.nlay
else:
return 0
@property
def nrow(self):
if self.dis:
return self.dis.nrow
else:
return 0
@property
def ncol(self):
if self.dis:
return self.dis.ncol
else:
return 0
@property
def nper(self):
if self.dis:
return self.dis.nper
else:
return 0
@property
def nrow_ncol_nlay_nper(self):
dis = self.get_package("DIS")
if dis:
return dis.nrow, dis.ncol, dis.nlay, dis.nper
else:
return 0, 0, 0, 0
def get_nrow_ncol_nlay_nper(self):
return self.nrow_ncol_nlay_nper
def get_ifrefm(self):
bas = self.get_package("BAS6")
if bas:
return bas.ifrefm
else:
return False
@property
def ncomp(self):
if self.btn:
return self.btn.ncomp
else:
return 1
@property
def mcomp(self):
if self.btn:
return self.btn.mcomp
else:
return 1
def _set_name(self, value):
# Overrides BaseModel's setter for name property
super()._set_name(value)
# for i in range(len(self.lst.extension)):
# self.lst.file_name[i] = self.name + '.' + self.lst.extension[i]
# return
def change_model_ws(self, new_pth=None, reset_external=False):
# if hasattr(self,"_mf"):
if self._mf is not None:
self._mf.change_model_ws(
new_pth=new_pth, reset_external=reset_external
)
# if hasattr(self,"_mt"):
if self._mt is not None:
self._mt.change_model_ws(
new_pth=new_pth, reset_external=reset_external
)
super().change_model_ws(new_pth=new_pth, reset_external=reset_external)
def write_name_file(self):
"""
Write the name file
Returns
-------
None
"""
# open and write header
fn_path = os.path.join(self.model_ws, self.namefile)
f_nam = open(fn_path, "w")
f_nam.write(f"{self.heading}\n")
# Write global file entry
if self.glo is not None:
if self.glo.unit_number[0] > 0:
f_nam.write(
"{:14s} {:5d} {}\n".format(
self.glo.name[0],
self.glo.unit_number[0],
self.glo.file_name[0],
)
)
# Write list file entry
f_nam.write(
"{:14s} {:5d} {}\n".format(
self.lst.name[0],
self.lst.unit_number[0],
self.lst.file_name[0],
)
)
# Write SEAWAT entries and close
f_nam.write(str(self.get_name_file_entries()))
if self._mf is not None:
# write the external files
for b, u, f in zip(
self._mf.external_binflag,
self._mf.external_units,
self._mf.external_fnames,
):
tag = "DATA"
if b:
tag = "DATA(BINARY)"
f_nam.write(f"{tag:14s} {u:5d} {f}\n")
# write the output files
for u, f, b in zip(
self._mf.output_units,
self._mf.output_fnames,
self._mf.output_binflag,
):
if u == 0:
continue
if b:
f_nam.write(f"DATA(BINARY) {u:5d} {f} REPLACE\n")
else:
f_nam.write(f"DATA {u:5d} {f}\n")
if self._mt is not None:
# write the external files
for b, u, f in zip(
self._mt.external_binflag,
self._mt.external_units,
self._mt.external_fnames,
):
tag = "DATA"
if b:
tag = "DATA(BINARY)"
f_nam.write(f"{tag:14s} {u:5d} {f}\n")
# write the output files
for u, f, b in zip(
self._mt.output_units,
self._mt.output_fnames,
self._mt.output_binflag,
):
if u == 0:
continue
if b:
f_nam.write(f"DATA(BINARY) {u:5d} {f} REPLACE\n")
else:
f_nam.write(f"DATA {u:5d} {f}\n")
# write the external files
for b, u, f in zip(
self.external_binflag, self.external_units, self.external_fnames
):
tag = "DATA"
if b:
tag = "DATA(BINARY)"
f_nam.write(f"{tag:14s} {u:5d} {f}\n")
# write the output files
for u, f, b in zip(
self.output_units, self.output_fnames, self.output_binflag
):
if u == 0:
continue
if b:
f_nam.write(f"DATA(BINARY) {u:5d} {f} REPLACE\n")
else:
f_nam.write(f"DATA {u:5d} {f}\n")
f_nam.close()
return
@classmethod
def load(
cls,
f,
version="seawat",
exe_name="swtv4",
verbose=False,
model_ws=".",
load_only=None,
):
"""
Load an existing model.
Parameters
----------
f : str
Path to SEAWAT name file to load.
version : str, default "seawat"
Version of SEAWAT to use. Valid versions are "seawat" (default).
exe_name : str, default "swtv4"
The name of the executable to use.
verbose : bool, default False
Print additional information to the screen.
model_ws : str, default "."
Model workspace. Directory name to create model data sets.
Default is the present working directory.
load_only : list of str, optional
Packages to load (e.g. ["lpf", "adv"]). Default None
means that all packages will be loaded.
Returns
-------
flopy.seawat.swt.Seawat
Examples
--------
>>> import flopy
>>> m = flopy.seawat.swt.Seawat.load(f)
"""
# test if name file is passed with extension (i.e., is a valid file)
if os.path.isfile(os.path.join(model_ws, f)):
modelname = f.rpartition(".")[0]
else:
modelname = f
# create instance of a seawat model and load modflow and mt3dms models
ms = cls(
modelname=modelname,
namefile_ext="nam",
modflowmodel=None,
mt3dmodel=None,
version=version,
exe_name=exe_name,
model_ws=model_ws,
verbose=verbose,
)
mf = Modflow.load(
f,
version="mf2k",
exe_name=None,
verbose=verbose,
model_ws=model_ws,
load_only=load_only,
forgive=False,
check=False,
)
mt = Mt3dms.load(
f,
version="mt3dms",
exe_name=None,
verbose=verbose,
model_ws=model_ws,
forgive=False,
)
# set listing and global files using mf objects
ms.lst = mf.lst
ms.glo = mf.glo
for p in mf.packagelist:
p.parent = ms
ms.add_package(p)
ms._mt = None
if mt is not None:
for p in mt.packagelist:
p.parent = ms
ms.add_package(p)
mt.external_units = []
mt.external_binflag = []
mt.external_fnames = []
ms._mt = mt
ms._mf = mf
# return model object
return ms
|
StarcoderdataPython
|
8125198
|
<filename>Toby/network.py
import keras
import tensorflow as tf
from keras import Input, Model
from keras.layers import (
Dense, Reshape, Flatten, LeakyReLU,
LayerNormalization, Dropout, BatchNormalization
)
def build_generator(latent_space, n_var, n_features=2,use_bias=False):
model = tf.keras.Sequential()
model.add(Dense(15, input_shape=(latent_space,), activation='relu', use_bias=use_bias))
model.add(BatchNormalization())
model.add(Dense(20, activation='relu')) # 10
model.add(BatchNormalization())
model.add(Dense((20), activation='relu')) # 25
model.add(BatchNormalization())
model.add(Dense(n_features, activation="tanh", use_bias=use_bias))
return model
def build_critic(n_var, use_bias=False):
model = tf.keras.Sequential()
model.add(Dense(25, input_shape=(n_var,), use_bias=use_bias))
# model.add(LayerNormalization())
model.add(LeakyReLU())
model.add(Dropout(0.3))
model.add(Dense(50))
model.add(LeakyReLU())
model.add(Dropout(0.3))
model.add(Dense(50))
model.add(LeakyReLU())
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(1))
return model
# 15, 5, 5 (generator for circle)
#3d dip data.
# def build_generator(latent_space, n_var):
#
# model = tf.keras.Sequential()
# model.add(Dense(n_var*15, input_shape=(latent_space,), use_bias=True))
# model.add(BatchNormalization())
# model.add(LeakyReLU())
# model.add(Dense(n_var*5, use_bias=True))
# model.add(BatchNormalization())
# model.add(LeakyReLU())
# model.add(Dense(n_var*5, use_bias=True))
# model.add(Dense(n_var, activation="tanh", use_bias=True))
#
# return model
#
# def build_critic(n_var):
#
# model = tf.keras.Sequential()
# model.add(Dense(n_var*5, use_bias=True))
# model.add(LayerNormalization())
# model.add(LeakyReLU())
# model.add(Dropout(0.2))
# model.add(Dense(n_var*15))
# model.add(LayerNormalization())
# model.add(LeakyReLU())
# model.add(Flatten())
# model.add(Dense(1))
#
# return model
|
StarcoderdataPython
|
1604048
|
print(100 - (int(input())%100))
|
StarcoderdataPython
|
6646865
|
<filename>pyimgsaliency/saliency_mbd.py<gh_stars>0
import math
import copy
# import sys
# import operator
# import networkx as nx
# import matplotlib.pyplot as plt
import numpy as np
import bottleneck as bn
from scipy.spatial.distance import cdist
from skimage.io import imread as skimage_imread
from skimage.util import img_as_float
from skimage.color import rgb2lab
from numba import jit
import numexpr as ne
# from skimage.segmentation import slic
# from scipy.optimize import minimize
# import pdb
@jit
def _raster_scan(img, l, u, d): # called by mbd method
n_rows = len(img)
n_cols = len(img[0])
for x in range(1, n_rows - 1):
for y in range(1, n_cols - 1):
ix = img[x][y]
d_ = d[x][y]
u1 = u[x - 1][y]
l1 = l[x - 1][y]
u2 = u[x][y - 1]
l2 = l[x][y - 1]
b1 = max(u1, ix) - min(l1, ix)
b2 = max(u2, ix) - min(l2, ix)
if d_ <= b1 and d_ <= b2:
continue
elif b1 < d_ and b1 <= b2:
d[x][y] = b1
u[x][y] = max(u1, ix)
l[x][y] = min(l1, ix)
else:
d[x][y] = b2
u[x][y] = max(u2, ix)
l[x][y] = min(l2, ix)
@jit
def _raster_scan_inv(img, l, u, d): # called by mbd method
n_rows = len(img)
n_cols = len(img[0])
for x in range(n_rows - 2, 1, -1):
for y in range(n_cols - 2, 1, -1):
ix = img[x][y]
d_ = d[x][y]
u1 = u[x + 1][y]
l1 = l[x + 1][y]
u2 = u[x][y + 1]
l2 = l[x][y + 1]
b1 = max(u1, ix) - min(l1, ix)
b2 = max(u2, ix) - min(l2, ix)
if d_ <= b1 and d_ <= b2:
continue
elif b1 < d_ and b1 <= b2:
d[x][y] = b1
u[x][y] = max(u1, ix)
l[x][y] = min(l1, ix)
else:
d[x][y] = b2
u[x][y] = max(u2, ix)
l[x][y] = min(l2, ix)
@jit
def mbd(img, num_iters):
if len(img.shape) != 2:
print('did not get 2d np array to fast mbd')
return None
if img.shape[0] <= 3 or img.shape[1] <= 3:
print('image is too small')
return None
# l = np.copy(img)
# u = np.copy(img)
d = np.full_like(img, fill_value=np.inf)
d[(0, -1), :] = 0
d[:, (0, -1)] = 0
# unfortunately, iterating over numpy arrays is very slow
img_list = img.tolist()
l_list = copy.deepcopy(img_list)
u_list = copy.deepcopy(img_list)
d_list = d.tolist()
for x in range(num_iters):
if x % 2 == 1:
_raster_scan(img_list, l_list, u_list, d_list)
else:
_raster_scan_inv(img_list, l_list, u_list, d_list)
return np.array(d_list)
def get_saliency_mbd(img, method='b', border_thickness_percent=0.1):
"""
Generate saliency map via minimum barrier detection.
Source: <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>.
"Minimum Barrier Salient Object Detection at 80 FPS."
:param img: either ndarray, image path string or lists of them.
:param method: string, 'b' for background map
:param border_thickness_percent: float, 10% in the paper
:return: ndarray representation of the mdb saliency map
"""
# convert input to an interable of ndarrays
if isinstance(img, str):
img_list = (skimage_imread(img), )
elif isinstance(img, list):
if isinstance(img[0], str):
img_list = [skimage_imread(im) for im in img]
else:
img_list = img
else:
img_list = (img, )
result = []
for img in img_list:
img_mean = np.mean(img, axis=2)
sal = mbd(img_mean, 3)
if 'b' == method: # get the background map
# paper uses 30px for an image of size 300px, so we use 10%
n_rows, n_cols = img.shape[:2]
img_size = math.sqrt(n_rows * n_cols)
border_thickness = int(img_size * border_thickness_percent)
img_lab = img_as_float(rgb2lab(img))
px_left = img_lab[:border_thickness, :, :]
px_right = img_lab[n_rows - border_thickness - 1:-1, :, :]
px_top = img_lab[:, :border_thickness, :]
px_bottom = img_lab[:, n_cols - border_thickness - 1:-1, :]
px_mean_left = np.mean(px_left, axis=(0, 1))
px_mean_right = np.mean(px_right, axis=(0, 1))
px_mean_top = np.mean(px_top, axis=(0, 1))
px_mean_bottom = np.mean(px_bottom, axis=(0, 1))
px_left = px_left.reshape((n_cols * border_thickness, 3))
px_right = px_right.reshape((n_cols * border_thickness, 3))
px_top = px_top.reshape((n_rows * border_thickness, 3))
px_bottom = px_bottom.reshape((n_rows * border_thickness, 3))
cov_left = np.cov(px_left.T)
cov_right = np.cov(px_right.T)
cov_top = np.cov(px_top.T)
cov_bottom = np.cov(px_bottom.T)
cov_left = np.linalg.inv(cov_left + np.eye(cov_left.shape[1]) * 1e-12)
cov_right = np.linalg.inv(cov_right + np.eye(cov_right.shape[1]) * 1e-12)
cov_top = np.linalg.inv(cov_top + np.eye(cov_top.shape[1]) * 1e-12)
cov_bottom = np.linalg.inv(cov_bottom + np.eye(cov_bottom.shape[1]) * 1e-12)
img_lab_unrolled = img_lab.reshape(img_lab.shape[0] * img_lab.shape[1], 3)
img_lab_shape = img_lab.shape[:2]
px_mean_left_2 = np.zeros((1, 3))
px_mean_left_2[0, :] = px_mean_left
u_left = cdist(img_lab_unrolled, px_mean_left_2, metric='mahalanobis', VI=cov_left)
u_left = u_left.reshape(img_lab_shape)
px_mean_right_2 = np.zeros((1, 3))
px_mean_right_2[0, :] = px_mean_right
u_right = cdist(img_lab_unrolled, px_mean_right_2, metric='mahalanobis', VI=cov_right)
u_right = u_right.reshape(img_lab_shape)
px_mean_top_2 = np.zeros((1, 3))
px_mean_top_2[0, :] = px_mean_top
u_top = cdist(img_lab_unrolled, px_mean_top_2, metric='mahalanobis', VI=cov_top)
u_top = u_top.reshape(img_lab_shape)
px_mean_bottom_2 = np.zeros((1, 3))
px_mean_bottom_2[0, :] = px_mean_bottom
u_bottom = cdist(img_lab_unrolled, px_mean_bottom_2, metric='mahalanobis', VI=cov_bottom)
u_bottom = u_bottom.reshape(img_lab_shape)
u_left_max = bn.nanmax(u_left)
if 0 != u_left_max:
u_left /= u_left_max
u_right_max = bn.nanmax(u_right)
if 0 != u_right_max:
u_right /= u_right_max
u_top_max = bn.nanmax(u_top)
if 0 != u_top_max:
u_top /= u_top_max
u_bottom_max = bn.nanmax(u_bottom)
if 0 != u_bottom_max:
u_bottom /= u_bottom_max
u_max = np.maximum.reduce([u_left, u_right, u_top, u_bottom])
u_final = ne.evaluate('(u_left + u_right + u_top + u_bottom) - u_max')
sal_max = bn.nanmax(sal)
if 0 != sal_max:
sal /= sal_max
u_final_max = bn.nanmax(u_final)
if 0 != u_final_max:
sal += u_final / u_final_max
else:
sal += u_final
# postprocessing
# # apply centeredness map
# s = np.mean(sal)
# alpha = 50.0
# delta = alpha * math.sqrt(s)
xv, yv = np.meshgrid(np.arange(sal.shape[1]), np.arange(sal.shape[0]))
w2, h2 = np.array(sal.shape) / 2
sal_max = bn.nanmax(sal)
if 0 != sal_max:
sal /= sal_max
sal = ne.evaluate('(1 - sqrt((xv - h2)**2 + (yv - w2)**2) / sqrt(w2**2 + h2**2)) * sal')
# # increase bg/fg contrast
sal_max = bn.nanmax(sal)
if 0 != sal_max:
sal /= sal_max
sal = ne.evaluate('255.0 / (1 + exp(-10 * (sal - 0.5)))')
result.append(sal)
if len(result) is 1:
return result[0]
return result
|
StarcoderdataPython
|
8013500
|
print("ece"<"csam")
|
StarcoderdataPython
|
8071490
|
#!/usr/bin/env python
# USAGE
# python real_time_object_detection.py
import sys
import configparser
import time
import numpy as np
import imutils
import cv2
import paho.mqtt.client as mqttClient
### Gather configuration parameters
def gather_arg():
conf_par = configparser.ConfigParser()
try:
conf_par.read('credentials.ini')
host= conf_par.get('camera', 'host')
broker = conf_par.get('mqtt', 'broker')
port = conf_par.getint('mqtt', 'port')
prototxt = conf_par.get('ssd', 'prototxt')
model = conf_par.get('ssd', 'model')
conf = conf_par.getfloat('ssd', 'conf')
except:
print('Missing credentials or input file!')
sys.exit(2)
return host, broker, port, prototxt, model, conf
### connect to MQTT Broker ###
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to broker")
global Connected #Use global variable
Connected = True #Signal connection
else:
print("Connection failed")
(host, broker, port, prototxt, model, conf) = gather_arg()
Connected = False #global variable for the state of the connection
client = mqttClient.Client("Python") #create new instance
client.on_connect= on_connect #attach function to callback
client.connect(broker, port=port) #connect to broker
client.loop_start() #start the loop
while Connected != True: #Wait for connection
time.sleep(1.0)
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# load our serialized model from disk
print("[INFO] loading model...")
#net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
net = cv2.dnn.readNetFromCaffe(prototxt, model)
# initialize the video stream, allow the cammera sensor to warmup,
print("[INFO] starting video stream...")
#if args["source"] == "webcam":
#vs = cv2.VideoCapture('rtsp://192.168.128.29:9000/live')
vs = cv2.VideoCapture(host)
time.sleep(2.0)
detected_objects = []
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
#if args["source"] == "webcam":
#ret, frame = vs.read()
ret, frame = vs.read()
#else:
#imgResp=urlopen(url)
#imgNp=np.array(bytearray(imgResp.read()),dtype=np.uint8)
#frame=cv2.imdecode(imgNp,-1)
frame = imutils.resize(frame, width=800)
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections and
# predictions
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > conf:
# extract the index of the class label from the
# `detections`, then compute the (x, y)-coordinates of
# the bounding box for the object
idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the prediction on the frame
label = "{}: {:.2f}%".format(CLASSES[idx],
confidence * 100)
print(label)
client.publish("python/test",label)
detected_objects.append(label)
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
#cleanup
cv2.destroyAllWindows()
|
StarcoderdataPython
|
6588842
|
# -*- coding: utf-8 -*-:
from django.test import TestCase
from django_dynamic_fixture import G
from django.contrib.auth.models import User
from resrc.tests.factories import UserFactory
from resrc.userprofile.models import Profile
from resrc.utils.templatetags.profile import profile
from resrc.utils.templatetags.gravatar import gravatar
class TemplateTagsTests(TestCase):
def test_profile_none(self):
'''Test the output of profile templatetag if profile does not exist'''
user = G(User)
self.assertEqual(None, profile(user))
def test_profile_existing(self):
'''Test the output of profile templatetag if profile does exist'''
user = G(User)
p = G(Profile, user=user)
self.assertEqual(p, profile(user))
def test_gravatar_default(self):
user = UserFactory()
user.save()
p = G(Profile, user=user)
p.email = '<EMAIL>'
p.save()
self.assertEqual(gravatar(p.email), \
'<img src="http://www.gravatar.com/avatar.php?size=80&gravatar_id=0d4907cea9d97688aa7a5e722d742f71" alt="gravatar" />')
self.assertEqual(gravatar(p.email, 80, 'Hello'), \
'<img src="http://www.gravatar.com/avatar.php?size=80&gravatar_id=0d4907cea9d97688aa7a5e722d742f71" alt="gravatar for Hello" />')
|
StarcoderdataPython
|
12853098
|
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django.utils.translation import ugettext_lazy as _
from .models import Ride
class RideForm(forms.ModelForm):
date = forms.DateField(
label=_('Date'),
widget=forms.DateInput(format=('%Y-%m-%d'),attrs={
'class': 'form-control input-group-alternative',
'type': 'date'
})
)
time = forms.TimeField(
label=_('Time'),
required=False,
input_formats=['%H:%M'],
widget=forms.TimeInput(format=('%H:%M'), attrs={
'class': 'form-control input-group-alternative',
'type': 'time'
})
)
description = forms.CharField(
label=_('Description'),
required=False,
help_text=_('Write here any additional information.'),
widget=forms.Textarea(attrs={
'class': 'form-control input-group-alternative',
})
)
class Meta:
model = Ride
fields = ('date', 'time', 'origin', 'destination', 'seats', 'price', 'description')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.add_input(
Submit('submit', _('Save Ride'), css_class='btn-block'))
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'input-group-alternative'
|
StarcoderdataPython
|
1689682
|
import os, copy, cProfile, pstats, io
import numpy as np
import gdspy as gp
import gds_tools as gdst
def profile(fnc):
"""A decorator that uses cProfile to profile a function"""
def inner(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
retval = fnc(*args, **kwargs)
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
return retval
return inner
def RotMat(rad):
#==========================
# Generate rotation matrix \\
#=========================================================================
# Arguments: rad : radians to rotate about origin ||
#=========================================================================
return np.matrix([[np.cos(rad), -np.sin(rad)], [np.sin(rad), np.cos(rad)]])
def VecRot(rad, vec, origin = (0, 0)):
#=========================
# Perform vector rotation \\
#=========================================================================
# Arguments: rad : radians to rotate about origin ||
# vec : input vector (2-x-n list) ||
#=========================================================================
return (RotMat(rad).dot(np.array(vec) - np.array(origin)) + np.array(origin)).tolist()[0]
def instruction_parse(s, args = None):
#============================
# Simple instructions parser \\
#=========================================================================
# Parses a string and converts it to a dictionary. ||
# ||
# Arguments: s : input strung ||
# args : dictionary with keys for variable placement ||
#=========================================================================
if args:
for a in args:
s = s.replace('{'+a+'}', str(args[a]))
dic = {}
key = ''
val = ''
pos = 'key'
for i, c in enumerate(s):
if c == ' ' or c == '\n' or c == '\t': # ignore whitespace
continue
elif c == ':':
pos = 'val'
elif c != ':' and c != ',' and pos == 'key':
key += c
elif c != ':' and c != ',' and pos == 'val':
val += c
elif c == ',':
True # do nothing
else:
print('Error: unknown parameter, could not parse.')
return False
if c == ',' or (i + 1) == len(s):
val = eval(val.replace('pi', str(np.pi)))
dic[key] = float(val)
key = ''
val = ''
pos = 'key'
if (i + 1) == len(s):
break
return dic
def flatten(objectlist, endpoints, endpoint_dims, layer = 0):
#===========================
# FLatten a list of objects \\
#=========================================================================
# Flattening will cause all objects in the objectlist to be placed in ||
# one single layer and remove boundaries between them if there are any. ||
# All layer information will become lost! If you just want to combine ||
# structures while keeping layer information, use cluster() ||
# ||
# Arguments: objectlist : list of objects (GDStructure) ||
# endpoints : dictionary of new endpoints ||
# endpoint_dims : dictionary of new endpoint sizes ||
#=========================================================================
# Define function to allow for recursive walk through list and pick out all
# compound structures
def stacker(inlist):
outlist = []
for i in inlist:
if i.compound:
outlist += [i] + stacker(i.compound)
else:
outlist += [i]
return outlist
objectlist = stacker(objectlist)
ends = copy.deepcopy(endpoints)
epsz = copy.deepcopy(endpoint_dims)
objs = []
for i in objectlist:
objs.append(i.structure)
return gdst.classes.GDStructure(gp.boolean(objs, None, 'or', layer = layer), ends, epsz)
def lattice(cell, repeat, spacing):
#============================
# Generate a crystal lattice \\
#=========================================================================
# Arguments: cell : unit cell as gdspy Cell object ||
# repeat : (n_x, n_y) vector with amount of cells ||
# spacing : sapce between unit cells ||
#=========================================================================
array = gp.CellArray(cell, repeat[0], repeat[1], spacing)
ends = {'A': (0, spacing[1] * repeat[1] / 2), 'B': (spacing[0] * (repeat[0] - 1) / 2, spacing[1] * (repeat[1] - 1/2))}
epsz = {'A': 0, 'B': 0}
return gdst.classes.GDStructure(array, ends, epsz)
def lattice_cutter(lattice, objectlist, mode = 'and', layer = 0):
#=====================================
# Cut a lattice up using boolean \\
#=========================================================================
# Arguments: lattice : output of lattice() function ||
# objectlist : list of objects that intersect lattice ||
# (optional) mode : what boolean operation to apply ||
# (optional) layer : layer to put resulting structure on ||
#=========================================================================
if type(objectlist) is not type([]):
objectlist = [objectlist]
for i in objectlist:
if i.compound:
lattice = lattice_cutter(lattice, i.compound)
lattice.structure = gp.boolean(lattice.structure, i.structure, mode, layer = layer)
return lattice
def add(cell, elements, signal_from = None):
#================================
# Add structures to a gdspy cell \\
#=========================================================================
# Arguments: cell : gdspy cell object ||
# elements : list of GDStructure objects ||
#=========================================================================
if not isinstance(signal_from, list):
signal_from = [signal_from]
if elements not in signal_from:
signal_from.append(elements)
if not isinstance(elements, list):
elements = [elements]
for element in elements:
if isinstance(element, list):
gdst.add(cell, element)
else:
if isinstance(element, gdst.classes.GDSComponent):
for polygon in element.polygons:
cell.add(polygon)
for previous_component in element.previous:
if previous_component not in signal_from and element.previous:
signal_from.append(previous_component)
gdst.add(cell, previous_component, signal_from = signal_from)
for next_component in element.next:
if next_component not in signal_from and element.next:
signal_from.append(next_component)
gdst.add(cell, next_component, signal_from = signal_from)
else:
cell.add(element)
def mirror(p):
#============================
# Mirror points about y-axis \\
#=========================================================================
# Arguments: p : list of (x, y) points ||
#=========================================================================
for i, val in enumerate(p):
p[i] = (-val[0], val[1])
return p
def symm_coords(points, mirror_x = True, mirror_y = True):
if not isinstance(points, list):
points = [points]
output_points = copy.deepcopy(points)
if mirror_y:
for i, val in enumerate(points):
output_points.append((-val[0], val[1]))
if mirror_x:
for i, val in enumerate(points):
output_points.append((val[0], -val[1]))
if mirror_x and mirror_y:
for i, val in enumerate(points):
output_points.append((-val[0], -val[1]))
return output_points
def save(cell, filename, unit = 1e-6, precision = 1e-9):
#=====================
# Save cell to a file \\
#=========================================================================
# Arguments: cell : gdspy cell object or a list of cells ||
# filename : filename to write to (relative path) ||
#=========================================================================
writer = gp.GdsWriter(filename, unit = unit, precision = precision)
if type(cell) == type([]):
for cell_item in cell:
writer.write_cell(cell_item)
else:
writer.write_cell(cell)
return writer.close()
def biquadratic_func(x):
return x ** 2 * (2 - x ** 2)
def rotate_reference_cell(reference, angle, center = (0, 0)):
dx = np.cos(angle) * (reference.origin[0] - center[0]) - np.sin(angle) * (reference.origin[1] - center[1]) + center[0]
dy = np.sin(angle) * (reference.origin[0] - center[0]) + np.cos(angle) * (reference.origin[1] - center[1]) + center[1]
angle_deg = np.degrees(angle)
reference.rotation += angle_deg
reference.translate(dx - reference.origin[0], dy - reference.origin[1])
def inside(points, cellref, dist, nop = 3, precision = 0.001):
#=====================
# Save cell to a file \\
#=========================================================================
# Arguments: points : list of points to check ||
# cellref : gdspy cell reference object ||
# dist : distance from points to search ||
# nop : number of probe points within dist ||
# precision : gdspy.inside precision parameter ||
#=========================================================================
# Force uneven
if nop % 2 == 0:
nop += 1
search_ps = []
for p in points:
px = np.linspace(p[0] - dist/2, p[0] + dist/2, nop)
py = np.linspace(p[1] - dist/2, p[1] + dist/2, nop)
search_ps.append([[i, j] for i in px for j in py])
return gp.inside(search_ps, cellref, precision = precision)
def convert_to_dxf(filename):
print("-- Converting to DXF --")
# Convert GDS to DXF with Klayout
os.system('/Applications/klayout.app/Contents/MacOS/klayout -zz -rd input="{}.gds" -rd output="{}.dxf" -r convert.rb'.format(filename, filename))
def bounding_box_center(object):
bounding_box = object.get_bounding_box()
bounding_box_x = (bounding_box[1][0] + bounding_box[0][0]) / 2
bounding_box_y = (bounding_box[1][1] + bounding_box[0][1]) / 2
return (bounding_box_x, bounding_box_y)
def file_path_name(file_path_name_ext):
filename = os.path.basename(file_path_name_ext)
filepath = file_path_name_ext.replace(filename, "")
filename = filename.replace(".py","")
return filepath + filename
|
StarcoderdataPython
|
1970573
|
<reponame>nik-panekin/olx_scraper<filename>utils/tor_proxy.py
import subprocess
import time
import requests
TOR_EXECUTABLE_PATH = 'C:/Tor/Tor/tor.exe'
TOR_SOCKS_PROXIES = {
'http': 'socks5://127.0.0.1:9050',
'https': 'socks5://127.0.0.1:9050'
}
TOR_STARTUP_TIME = 15
HTTP_BIN_HOST = 'https://httpbin.org/'
class TorProxy():
def __init__(self, executable_path: str=TOR_EXECUTABLE_PATH):
self.executable_path = executable_path
self.process = None
def __del__(self):
self.terminate()
def restart(self, wait: bool=False) -> bool:
self.terminate()
self.process = subprocess.Popen(args=[self.executable_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if wait:
time.sleep(TOR_STARTUP_TIME)
def is_running(self) -> bool:
return self.process != None and self.process.poll() == None
def terminate(self):
if self.is_running():
self.process.terminate()
def test_ok(self) -> bool:
if self.is_running():
try:
r = requests.get(HTTP_BIN_HOST, proxies=TOR_SOCKS_PROXIES)
except requests.exceptions.RequestException:
return False
if r.status_code != requests.codes.ok:
return False
return True
return False
def get_output(self) -> str:
if self.process != None and self.process.poll() != None:
return self.process.stdout.read().decode('ascii', 'ignore')
else:
return None
|
StarcoderdataPython
|
1639645
|
<reponame>KVSlab/vascularManipulationToolkit<filename>morphman/common/vmtk_wrapper.py
## Copyright (c) <NAME>, <NAME>. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
from os import path
from vmtk import vtkvmtk, vmtkscripts
# Global array names
from morphman.common.vtk_wrapper import read_polydata, write_polydata
radiusArrayName = 'MaximumInscribedSphereRadius'
surfaceNormalsArrayName = 'SurfaceNormalArray'
parallelTransportNormalsArrayName = 'ParallelTransportNormals'
groupIDsArrayName = "GroupIds"
abscissasArrayName = 'Abscissas'
blankingArrayName = 'Blanking'
branchClippingArrayName = 'BranchClippingArray'
def vmtk_smooth_centerline(centerlines, num_iter, smooth_factor):
"""
Wrapper for vmtkCenterlineSmoothing. Smooth centerlines with a moving average filter.
Args:
centerlines (vtkPolyDat): Centerline to be smoothed.
num_iter (int): Number of smoothing iterations.
smooth_factor (float): Smoothing factor
Returns:
vtkPolyData: Smoothed version of input centerline
"""
centerline_smoothing = vmtkscripts.vmtkCenterlineSmoothing()
centerline_smoothing.Centerlines = centerlines
centerline_smoothing.SetNumberOfSmoothingIterations = num_iter
centerline_smoothing.SetSmoothingFactor = smooth_factor
centerline_smoothing.Execute()
centerlines_smoothed = centerline_smoothing.Centerlines
return centerlines_smoothed
def vmtk_compute_centerlines(end_point, inlet, method, outlet, pole_ids, resampling_step, surface, voronoi,
flip_normals=False, cap_displacement=None, delaunay_tolerance=None,
simplify_voronoi=False):
"""
Wrapper for vmtkCenterlines.
compute centerlines from a branching tubular surface. Seed points can be interactively selected on the surface,
or specified as the barycenters of the open boundaries of the surface.
Args:
end_point (int): Toggle append open profile barycenters to centerlines
surface (vktPolyData): Surface model
voronoi (vtkPolyData): Voronoi diagram based on previous centerlines (Optional)
inlet (ndarray): List of source point coordinates
method (str): Seed point selection method
outlet (ndarray): List of target point coordinates
pole_ids (ndarray): Pole ID list of Voronoi diagram (Optional)
resampling_step (float): Resampling step
flip_normals (float): Flip normals after outward normal computation
cap_displacement (float): Displacement of the center points of caps at open profiles along their normals
delaunay_tolerance (float): Tolerance for evaluating coincident points during Delaunay tessellation
simplify_voronoi (bool): Toggle simplification of Voronoi diagram
Returns:
"""
centerlines = vmtkscripts.vmtkCenterlines()
centerlines.Surface = surface
centerlines.SeedSelectorName = method
centerlines.AppendEndPoints = end_point
centerlines.Resampling = 1
centerlines.ResamplingStepLength = resampling_step
centerlines.SourcePoints = inlet
centerlines.TargetPoints = outlet
if voronoi is not None and pole_ids is not None:
centerlines.VoronoiDiagram = voronoi
centerlines.PoleIds = pole_ids
if flip_normals:
centerlines.FlipNormals = 1
if cap_displacement is not None:
centerlines.CapDisplacement = cap_displacement
if delaunay_tolerance is not None:
centerlines.DelaunayTolerance = delaunay_tolerance
if simplify_voronoi:
centerlines.SimplifyVoronoi = 1
centerlines.Execute()
centerlines_output = centerlines.Centerlines
return centerlines, centerlines_output
def vmtk_compute_centerline_sections(surface, centerlines):
"""
Wrapper for vmtk centerline sections.
Args:
surface (vtkPolyData): Surface to meassure area.
centerlines (vtkPolyData): centerline to measure along.
Returns:
line (vtkPolyData): centerline with the attributes
centerline_sections_area (vtkPolyData): sections along the centerline
"""
centerline_sections = vtkvmtk.vtkvmtkPolyDataCenterlineSections()
centerline_sections.SetInputData(surface)
centerline_sections.SetCenterlines(centerlines)
centerline_sections.SetCenterlineSectionAreaArrayName('CenterlineSectionArea')
centerline_sections.SetCenterlineSectionMinSizeArrayName('CenterlineSectionMinSize')
centerline_sections.SetCenterlineSectionMaxSizeArrayName('CenterlineSectionMaxSize')
centerline_sections.SetCenterlineSectionShapeArrayName('CenterlineSectionShape')
centerline_sections.SetCenterlineSectionClosedArrayName('CenterlineSectionClosed')
centerline_sections.Update()
centerlines_sections_area = centerline_sections.GetOutput()
line = centerline_sections.GetCenterlines()
return line, centerlines_sections_area
def vmtk_compute_geometric_features(centerlines, smooth, outputsmoothed=False, factor=1.0, iterations=100):
"""Wrapper for vmtk centerline geometry.
Args:
centerlines (vtkPolyData): Line to compute centerline geometry from.
smooth (bool): Turn on and off smoothing before computing the geometric features.
outputsmoothed (bool): Turn on and off the smoothed centerline.
factor (float): Smoothing factor.
iterations (int): Number of iterations.
Returns:
line (vtkPolyData): Line with geometry.
"""
geometry = vmtkscripts.vmtkCenterlineGeometry()
geometry.Centerlines = centerlines
if smooth:
geometry.LineSmoothing = 1
geometry.OutputSmoothedLines = outputsmoothed
geometry.SmoothingFactor = factor
geometry.NumberOfSmoothingIterations = iterations
geometry.FernetTangentArrayName = "FernetTangent"
geometry.FernetNormalArrayName = "FernetNormal"
geometry.FrenetBinormalArrayName = "FernetBiNormal"
geometry.CurvatureArrayName = "Curvature"
geometry.TorsionArrayName = "Torsion"
geometry.TortuosityArrayName = "Tortuosity"
geometry.Execute()
return geometry.Centerlines
def vmtk_compute_centerline_attributes(centerlines):
""" Wrapper for centerline attributes.
Args:
centerlines (vtkPolyData): Line to investigate.
Returns:
line (vtkPolyData): Line with centerline atributes.
"""
attributes = vmtkscripts.vmtkCenterlineAttributes()
attributes.Centerlines = centerlines
attributes.NormalsArrayName = parallelTransportNormalsArrayName
attributes.AbscissaArrayName = abscissasArrayName
attributes.Execute()
centerlines = attributes.Centerlines
return centerlines
def vmtk_resample_centerline(centerlines, length):
"""Wrapper for vmtkcenterlineresampling
Args:
centerlines (vtkPolyData): line to resample.
length (float): resampling step.
Returns:
line (vtkPolyData): Resampled line.
"""
resampler = vmtkscripts.vmtkCenterlineResampling()
resampler.Centerlines = centerlines
resampler.Length = length
resampler.Execute()
resampled_centerline = resampler.Centerlines
return resampled_centerline
def vmtk_cap_polydata(surface, boundary_ids=None, displacement=0.0, in_plane_displacement=0.0):
"""Wrapper for vmtkCapPolyData.
Close holes in a surface model.
Args:
in_plane_displacement (float): Displacement of boundary baricenters, at section plane relative to the radius
displacement (float): Displacement of boundary baricenters along boundary normals relative to the radius.
boundary_ids (ndarray): Set ids of the boundaries to cap.
surface (vtkPolyData): Surface to be capped.
Returns:
surface (vtkPolyData): Capped surface.
"""
surface_capper = vtkvmtk.vtkvmtkCapPolyData()
surface_capper.SetInputData(surface)
surface_capper.SetDisplacement(displacement)
surface_capper.SetInPlaneDisplacement(in_plane_displacement)
if boundary_ids is not None:
surface_capper.SetBoundaryIds(boundary_ids)
surface_capper.Update()
return surface_capper.GetOutput()
def vmtk_smooth_surface(surface, method, iterations=800, passband=1.0, relaxation=0.01, normalize_coordinates=True,
smooth_boundary=True):
"""Wrapper for a vmtksurfacesmoothing.
Args:
smooth_boundary (bool): Toggle allow change of position of boundary points
normalize_coordinates (bool): Normalization of coordinates prior to filtering,
minimize spurious translation effects (Taubin only)
surface (vtkPolyData): Input surface to be smoothed.
method (str): Smoothing method.
iterations (int): Number of iterations.
passband (float): The passband for Taubin smoothing.
relaxation (float): The relaxation for laplace smoothing.
Returns:
surface (vtkPolyData): The smoothed surface.
"""
smoother = vmtkscripts.vmtkSurfaceSmoothing()
smoother.Surface = surface
smoother.NumberOfIterations = iterations
if method == "laplace":
smoother.RelaxationFactor = relaxation
elif method == "taubin":
smoother.PassBand = passband
if not normalize_coordinates:
smoother.NormalizeCoordinates = 0
if not smooth_boundary:
smoother.BoundarySmoothing = 0
smoother.Method = method
smoother.Execute()
surface = smoother.Surface
return surface
def vmtk_compute_voronoi_diagram(surface, filename, simplify_voronoi=False, cap_displacement=None, flip_normals=False,
check_non_manifold=False, delaunay_tolerance=0.001, subresolution_factor=1.0):
"""
Wrapper for vmtkDelanayVoronoi. Creates a surface model's
coresponding voronoi diagram.
Args:
subresolution_factor (float): Factor for removal of subresolution tetrahedra
flip_normals (bool): Flip normals after outward normal computation.
cap_displacement (float): Displacement of the center points of caps at open profiles along their normals
simplify_voronoi (bool): Use alternative algorith for compute Voronoi diagram, reducing quality, improving speed
check_non_manifold (bool): Check the surface for non-manifold edges
delaunay_tolerance (float): Tolerance for evaluating coincident points during Delaunay tessellation
surface (vtkPolyData): Surface model
filename (str): Path where voronoi diagram is stored
Returns:
new_voronoi (vtkPolyData): Voronoi diagram
"""
if path.isfile(filename):
return read_polydata(filename)
voronoi = vmtkscripts.vmtkDelaunayVoronoi()
voronoi.Surface = surface
voronoi.RemoveSubresolutionTetrahedra = 1
voronoi.DelaunayTolerance = delaunay_tolerance
voronoi.SubresolutionFactor = subresolution_factor
if simplify_voronoi:
voronoi.SimplifyVoronoi = 1
if cap_displacement is not None:
voronoi.CapDisplacement = cap_displacement
if flip_normals:
voronoi.FlipNormals = 1
if check_non_manifold:
voronoi.CheckNonManifold = 1
voronoi.Execute()
new_voronoi = voronoi.VoronoiDiagram
write_polydata(new_voronoi, filename)
return new_voronoi
def vmtk_polyball_modeller(voronoi_diagram, poly_ball_size):
"""
Wrapper for vtkvmtkPolyBallModeller.
Create an image where a polyball or polyball line are evaluated as a function.
Args:
voronoi_diagram (vtkPolyData): Input Voronoi diagram representing surface model
poly_ball_size (list): Resolution of output
Returns:
vtkvmtkPolyBallModeller: Image where polyballs have been evaluated over a Voronoi diagram
"""
modeller = vtkvmtk.vtkvmtkPolyBallModeller()
modeller.SetInputData(voronoi_diagram)
modeller.SetRadiusArrayName(radiusArrayName)
modeller.UsePolyBallLineOff()
modeller.SetSampleDimensions(poly_ball_size)
modeller.Update()
return modeller
def vmtk_surface_connectivity(surface, method="largest", clean_output=True, closest_point=None):
"""
Wrapper for vmtkSurfaceConnectivity. Extract the largest connected region,
the closest point-connected region or the scalar-connected region from a surface
Args:
surface (vtkPolyData): Surface model
method (str): Connectivity method, either 'largest' or 'closest'
clean_output (bool): Clean the unused points in the output
closest_point (ndarray): Coordinates of the closest point
Returns:
vmtkSurfaceConnectivity: Filter for extracting largest connected region
"""
connector = vmtkscripts.vmtkSurfaceConnectivity()
connector.Surface = surface
connector.Method = method
if clean_output:
connector.CleanOutput = 1
if closest_point is not None:
connector.ClosestPoint = closest_point
connector.Execute()
return connector
def vmtk_branch_clipper(centerlines, surface, clip_value=0.0, inside_out=False, use_radius_information=True,
interactive=False):
"""
Wrapper for vmtkBranchClipper. Divide a surface in relation to its split and grouped centerlines.
Args:
centerlines (vtkPolyData): Input centerlines
surface (vtkPolyData): Input surface model
clip_value (float):
inside_out (bool): Get the inverse of the branch clipper output.
use_radius_information (bool): To use MISR info for clipping branches.
interactive (bool): Use interactive mode, requires user input.
Returns:
vmtkBranchClipper: Branch clipper used to divide a surface into regions.
"""
clipper = vmtkscripts.vmtkBranchClipper()
clipper.Surface = surface
clipper.Centerlines = centerlines
clipper.ClipValue = clip_value
clipper.RadiusArrayName = radiusArrayName
clipper.GroupIdsArrayName = groupIDsArrayName
clipper.BlankingArrayName = blankingArrayName
if inside_out:
clipper.InsideOut = 1
if not use_radius_information:
clipper.UseRadiusInformation = 0
if interactive:
clipper.Interactive = 1
clipper.Execute()
return clipper
def vmtk_endpoint_extractor(centerlines, number_of_end_point_spheres, number_of_gap_spheres=1):
"""
Wrapper for vmtkEndpointExtractor.
Find the endpoints of a split and grouped centerline
Args:
centerlines (vtkPolyData): Input centerlines.
number_of_end_point_spheres (float): Number of spheres to skip at endpoint
number_of_gap_spheres (float): Number of spheres to skip per gap.
Returns:
vmtkEndpointExtractor: Endpoint extractor based on centerline
"""
extractor = vmtkscripts.vmtkEndpointExtractor()
extractor.Centerlines = centerlines
extractor.RadiusArrayName = radiusArrayName
extractor.GroupIdsArrayName = groupIDsArrayName
extractor.BlankingArrayName = branchClippingArrayName
extractor.NumberOfEndPointSpheres = number_of_end_point_spheres
extractor.NumberOfGapSpheres = number_of_gap_spheres
extractor.Execute()
return extractor
def vmtk_compute_surface_normals(surface, auto_orient_normals=True, orient_normals=True,
compute_cell_normals=False, flip_normals=False):
"""
Wrapper for vmtkSurfaceNormals.
Computes the normals of the input surface.
Args:
surface (vtkPolyData): Input surface model
auto_orient_normals (bool): Try to auto orient normals outwards
orient_normals (bool): Try to orient normals so that neighboring points have similar orientations
compute_cell_normals (bool): Compute cell normals instead of point normals
flip_normals (bool): Flip normals after computing them
Returns:
vtkPolyData: Surface model with computed normals
"""
surface_normals = vmtkscripts.vmtkSurfaceNormals()
surface_normals.Surface = surface
surface_normals.NormalsArrayName = surfaceNormalsArrayName
if not auto_orient_normals:
surface_normals.AutoOrientNormals = 0
if not orient_normals:
surface_normals.Consistency = 0
if compute_cell_normals:
surface_normals.ComputeCellNormals = 1
if flip_normals:
surface_normals.FlipNormals = 1
surface_normals.Execute()
surface_with_normals = surface_normals.Surface
return surface_with_normals
def vmtk_compute_branch_extractor(centerlines):
"""
Wrapper for vmtkBranchExtractor.
Split and group centerlines along branches:
Args:
centerlines (vtkPolyData): Line to split into branches.
Returns:
vtkPolyData: Split centerline.
"""
brancher = vmtkscripts.vmtkBranchExtractor()
brancher.Centerlines = centerlines
brancher.RadiusArrayName = radiusArrayName
brancher.Execute()
centerlines_branched = brancher.Centerlines
return centerlines_branched
def vmtk_surface_curvature(surface, curvature_type="mean", absolute=False,
median_filtering=False, curvature_on_boundaries=False,
bounded_reciporcal=False, epsilon=1.0, offset=0.0):
"""Wrapper for vmtksurfacecurvature
Args:
surface (vtkPolyData): The input surface
curvature_type (str): The type of surface curvature to compute (mean | gaussian | maximum | minimum)
absolute (bool): Output the avsolute value of the curvature
median_filtering (bool): Output curvature after median filtering to suppress numerical noise speckles
curvature_on_boundaries (bool): Turn on/off curvature on boundaries
bounded_reciporcal (bool): Output bounded reciprocal of the curvature
epsilon (float): Bounded reciprocal epsilon at the denominator
offset (float): Offset curvature by the specified value
Returns:
surface (vtkPolydata): Input surface with an point data array with curvature values
"""
curvature = vmtkscripts.vmtkSurfaceCurvature()
curvature.Surface = surface
curvature.CurvatureType = curvature_type
if absolute:
curvature.AbsoluteCurvature = 1
else:
curvature.AbsoluteCurvature = 0
if median_filtering:
curvature.MedianFiltering = 1
else:
curvature.MedianFiltering = 0
if curvature_on_boundaries:
curvature.CurvatureOnBoundaries = 1
else:
curvature.CurvatureOnBoundaries = 0
if bounded_reciporcal:
curvature.BoundedReciporcal = 1
else:
curvature.BoundedReciporcal = 0
curvature.Epsilon = epsilon
curvature.Offset = offset
curvature.Execute()
return curvature.Surface
def vmtk_surface_distance(surface1, surface2, distance_array_name="Distance",
distance_vectors_array_name="",
signed_distance_array_name="", flip_normals=False):
"""
Compute the pointwise minimum distance of the input surface from a reference surface
Args:
surface1 (vtkPolyData): Input surface
surface2 (vtkPolyData): Reference surface
distance_array_name (str): Name of distance array
distance_vectors_array_name (str): Name of distance array (of vectors)
signed_distance_array_name (str): Name of distance arrays signed as positive or negative
flip_normals (bool): Flip normals relative to reference surface
Returns:
surface (vtkPoyData): Output surface with distance info
"""
distance = vmtkscripts.vmtkSurfaceDistance()
distance.Surface = surface1
distance.ReferenceSurface = surface2
distance.DistanceArrayName = distance_array_name
distance.DistanceVectorsArrayname = distance_vectors_array_name
distance.SignedDistanceArrayName = signed_distance_array_name
if flip_normals:
distance.FlipNormals = 1
else:
distance.FlipNormals = 0
distance.Execute()
return distance.Surface
|
StarcoderdataPython
|
6498146
|
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2017 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from pymor.core.interfaces import ImmutableInterface
from pymor.discretizations.basic import DiscretizationBase
from pymor.operators.mpi import mpi_wrap_operator
from pymor.tools import mpi
from pymor.vectorarrays.mpi import MPIVectorSpace, _register_local_space
class MPIDiscretization(DiscretizationBase):
"""Wrapper class for MPI distributed |Discretizations|.
Given a single-rank implementation of a |Discretization|, this
wrapper class uses the event loop from :mod:`pymor.tools.mpi`
to allow an MPI distributed usage of the |Discretization|.
The underlying implementation needs to be MPI aware.
In particular, the discretization's
:meth:`~pymor.discretizations.interfaces.DiscretizationInterface.solve`
method has to perform an MPI parallel solve of the discretization.
Note that this class is not intended to be instantiated directly.
Instead, you should use :func:`mpi_wrap_discretization`.
Parameters
----------
obj_id
:class:`~pymor.tools.mpi.ObjectId` of the local
|Discretization| on each rank.
operators
Dictionary of all |Operators| contained in the discretization,
wrapped for use on rank 0. Use :func:`mpi_wrap_discretization`
to automatically wrap all operators of a given MPI-aware
|Discretization|.
products
See `operators`.
pickle_local_spaces
See :class:`~pymor.operators.mpi.MPIOperator`.
space_type
See :class:`~pymor.operators.mpi.MPIOperator`.
"""
def __init__(self, obj_id, operators, products=None,
pickle_local_spaces=True, space_type=MPIVectorSpace):
d = mpi.get_object(obj_id)
visualizer = MPIVisualizer(obj_id)
super().__init__(operators=operators, products=products,
visualizer=visualizer, cache_region=None, name=d.name)
self.obj_id = obj_id
local_spaces = mpi.call(_MPIDiscretization_get_local_spaces, obj_id, pickle_local_spaces)
if all(ls == local_spaces[0] for ls in local_spaces):
local_spaces = (local_spaces[0],)
self.solution_space = space_type(local_spaces)
self.build_parameter_type(d)
self.parameter_space = d.parameter_space
def _solve(self, mu=None):
return self.solution_space.make_array(
mpi.call(mpi.method_call_manage, self.obj_id, 'solve', mu=mu)
)
def __del__(self):
mpi.call(mpi.remove_object, self.obj_id)
def _MPIDiscretization_get_local_spaces(self, pickle_local_spaces):
self = mpi.get_object(self)
local_space = self.solution_space
if not pickle_local_spaces:
local_space = _register_local_space(local_space)
local_spaces = mpi.comm.gather(local_space, root=0)
if mpi.rank0:
return tuple(local_spaces)
class MPIVisualizer(ImmutableInterface):
def __init__(self, d_obj_id):
self.d_obj_id = d_obj_id
def visualize(self, U, d, **kwargs):
if isinstance(U, tuple):
U = tuple(u.obj_id for u in U)
else:
U = U.obj_id
mpi.call(_MPIVisualizer_visualize, self.d_obj_id, U, **kwargs)
def _MPIVisualizer_visualize(d, U, **kwargs):
d = mpi.get_object(d)
if isinstance(U, tuple):
U = tuple(mpi.get_object(u) for u in U)
else:
U = mpi.get_object(U)
d.visualize(U, **kwargs)
def mpi_wrap_discretization(local_discretizations, use_with=False, with_apply2=False,
pickle_local_spaces=True, space_type=MPIVectorSpace):
"""Wrap MPI distributed local |Discretizations| to a global |Discretization| on rank 0.
Given MPI distributed local |Discretizations| referred to by the
:class:`~pymor.tools.mpi.ObjectId` `local_discretizations`, return a new |Discretization|
which manages these distributed discretizations from rank 0. This
is done by first wrapping all |Operators| of the |Discretization| using
:func:`~pymor.operators.mpi.mpi_wrap_operator`.
Alternatively, `local_discretizations` can be a callable (with no arguments)
which is then called on each rank to instantiate the local |Discretizations|.
When `use_with` is `False`, an :class:`MPIDiscretization` is instantiated
with the wrapped operators. A call to
:meth:`~pymor.discretizations.interfaces.DiscretizationInterface.solve`
will then use an MPI parallel call to the
:meth:`~pymor.discretizations.interfaces.DiscretizationInterface.solve`
methods of the wrapped local |Discretizations| to obtain the solution.
This is usually what you want when the actual solve is performed by
an implementation in the external solver.
When `use_with` is `True`, :meth:`~pymor.core.interfaces.ImmutableInterface.with_`
is called on the local |Discretization| on rank 0, to obtain a new
|Discretization| with the wrapped MPI |Operators|. This is mainly useful
when the local discretizations are generic |Discretizations| as in
:mod:`pymor.discretizations.basic` and
:meth:`~pymor.discretizations.interfaces.DiscretizationInterface.solve`
is implemented directly in pyMOR via operations on the contained
|Operators|.
Parameters
----------
local_discretizations
:class:`~pymor.tools.mpi.ObjectId` of the local |Discretizations|
on each rank or a callable generating the |Discretizations|.
use_with
See above.
with_apply2
See :class:`~pymor.operators.mpi.MPIOperator`.
pickle_local_spaces
See :class:`~pymor.operators.mpi.MPIOperator`.
space_type
See :class:`~pymor.operators.mpi.MPIOperator`.
"""
if not isinstance(local_discretizations, mpi.ObjectId):
local_discretizations = mpi.call(mpi.function_call_manage, local_discretizations)
operators, products = mpi.call(_mpi_wrap_discretization_manage_operators, local_discretizations)
operators = {k: mpi_wrap_operator(v, with_apply2=with_apply2,
pickle_local_spaces=pickle_local_spaces, space_type=space_type) if v else None
for k, v in operators.items()}
products = {k: mpi_wrap_operator(v, with_apply2=with_apply2,
pickle_local_spaces=pickle_local_spaces, space_type=space_type) if v else None
for k, v in products.items()}
if use_with:
d = mpi.get_object(local_discretizations)
visualizer = MPIVisualizer(local_discretizations)
return d.with_(operators=operators, products=products, visualizer=visualizer, cache_region=None)
else:
return MPIDiscretization(local_discretizations, operators, products,
pickle_local_spaces=pickle_local_spaces, space_type=space_type)
def _mpi_wrap_discretization_manage_operators(obj_id):
d = mpi.get_object(obj_id)
operators = {k: mpi.manage_object(v) if v else None for k, v in sorted(d.operators.items())}
products = {k: mpi.manage_object(v) if v else None for k, v in sorted(d.products.items())} if d.products else {}
if mpi.rank0:
return operators, products
|
StarcoderdataPython
|
143037
|
'''Define the user model'''
from sqlalchemy import Column, Integer, String
from .base import Base
class User(Base):
'''User Table'''
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
display_name = Column(String(100), nullable=True)
username = Column(String(300), nullable=False, index=True)
email = Column(String(254), unique=True)
|
StarcoderdataPython
|
3428917
|
<reponame>cuis15/FCFL
import numpy as np
import argparse
from utils import concave_fun_eval, create_pf, circle_points
from hco_search import hco_search
import matplotlib.pyplot as plt
from latex_utils import latexify
parser = argparse.ArgumentParser()
parser.add_argument('--n', type = int, default=20, help="the batch size for the unbiased model")
parser.add_argument('--m', type = int, default=20, help="the batch size for the predicted model")
parser.add_argument('--eps0', type = float, default=0.6, help="max_epoch for unbiased_moe")
parser.add_argument('--eps1', type = float, default=1e-4, help="max_epoch for predictor")
parser.add_argument('--eps2', type = float, default=1e-4, help="iteras for printing the loss info")
parser.add_argument('--para_delta', type = float, default=0.1, help="max_epoch for unbiased_moe")
parser.add_argument('--lr_delta', type = float, default=0.01, help="max_epoch for predictor")
parser.add_argument('--step_size', type = float, default=0.005, help="iteras for printing the loss info")
parser.add_argument('--max_iters', type = int, default=700, help="iteras for printing the loss info")
parser.add_argument('--grad_tol', type = float, default=1e-4, help="iteras for printing the loss info")
parser.add_argument('--store_xs', type = bool, default=False, help="iteras for printing the loss info")
args = parser.parse_args()
def case1_satisfyingMCF():
n = args.n # dim of solution space
m = args.m # dim of objective space
##construct x0
x0 = np.zeros(n)
x0[range(0, n, 2)] = -0.2
x0[range(1, n, 2)] = -0.2
eps_set = [0.8, 0.6, 0.4, 0.2]
color_set = ["c", "g", "orange", "b"]
latexify(fig_width=2.2, fig_height=1.8)
l0, _ = concave_fun_eval(x0)
max_iters = args.max_iters
relax = True
pf = create_pf()
fig = plt.figure()
fig.subplots_adjust(left=.12, bottom=.12, right=.9, top=.9)
label = 'Pareto\nFront' if relax else ''
plt.plot(pf[:, 0], pf[:, 1], lw=2.0, c='k', label=label)
label = r'$l(\theta^0)$'
plt.scatter([l0[0]], [l0[1]], c='r', s=40)
plt.annotate(label, xy = (l0[0]+0.03, l0[1]), xytext = (l0[0]+0.03, l0[1]))
for idx, eps0 in enumerate(eps_set):
if eps0 == 0.2:
eps_plot = np.array([[ i*0.1 * 0.903, 0.2] for i in range(11)])
plt.plot(eps_plot[:,0], eps_plot[:,1], color = "gray", label = r'$\epsilon$', lw=1, ls='--')
elif eps0 == 0.4:
eps_plot = np.array([[ i*0.1 * 0.807, 0.4] for i in range(11)])
plt.plot(eps_plot[:,0], eps_plot[:,1], color = "gray", lw=1, ls='--')
elif eps0 == 0.6:
eps_plot = np.array([[ i*0.1* 0.652, 0.6] for i in range(11)])
plt.plot(eps_plot[:,0], eps_plot[:,1], color = "gray", lw=1, ls='--')
elif eps0 == 0.8:
eps_plot = np.array([[ i*0.1 * 0.412, 0.8] for i in range(11)])
plt.plot(eps_plot[:,0], eps_plot[:,1], color = "gray", lw=1, ls='--')
else:
print("error eps0")
exit()
c = color_set[idx]
_, res = hco_search(concave_fun_eval, x=x0, deltas = [0.5, 0.5], para_delta = 0.5, lr_delta = args.lr_delta, relax=False, eps=[eps0, args.eps1, args.eps2], max_iters=max_iters,
n_dim=args.n, step_size=args.step_size, grad_tol=args.grad_tol, store_xs=args.store_xs)
ls = res['ls']
alpha = 1.0
zorder = 1
# plt.plot(ls[:, 0], ls[:, 1], c=c, lw=2.0, alpha=alpha, zorder=zorder)
plt.plot(ls[:, 0], ls[:, 1], c=c, lw=2.0)
print(ls[-1])
plt.scatter(ls[[-1], 0], ls[[-1], 1], c=c, s=40)
plt.xlabel(r'$l_1$')
plt.ylabel(r'$l_2$', rotation = "horizontal")
plt.legend(loc='lower left', handletextpad=0.3, framealpha=0.9)
ax = plt.gca()
ax.xaxis.set_label_coords(1.05, -0.02)
ax.yaxis.set_label_coords(-0.02, 1.02)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
plt.savefig('figures/satifying' + '.pdf')
plt.close()
def case2_violatingMCF():
n = args.n # dim of solution space
m = args.m # dim of objective space
##construct x0
x0 = np.zeros(n)
x0[range(0, n, 2)] = 0.3
x0[range(1, n, 2)] = -0.3
eps_set = [0.2, 0.4, 0.6, 0.8]
color_set = ["c", "g", "orange", "b"]
latexify(fig_width=2.2, fig_height=1.8)
l0, _ = concave_fun_eval(x0)
max_iters = args.max_iters
relax = True
pf = create_pf()
fig = plt.figure()
fig.subplots_adjust(left=.12, bottom=.12, right=.9, top=.9)
label = 'Pareto\nFront' if relax else ''
plt.plot(pf[:, 0], pf[:, 1], lw=2.0, c='k', label=label)
label = r'$l(\theta^0)$'
plt.scatter([l0[0]], [l0[1]], c='r', s=40)
plt.annotate(label, xy = (l0[0]+0.03, l0[1]), xytext = (l0[0]+0.03, l0[1]))
for idx, eps0 in enumerate(eps_set):
if eps0 == 0.2:
eps_plot = np.array([[ i*0.1 * 0.903, 0.2] for i in range(11)])
plt.plot(eps_plot[:,0], eps_plot[:,1], color = "gray", label = r'$\epsilon$', lw=1, ls='--')
elif eps0 == 0.4:
eps_plot = np.array([[ i*0.1 * 0.807, 0.4] for i in range(11)])
plt.plot(eps_plot[:,0], eps_plot[:,1], color = "gray", lw=1, ls='--')
elif eps0 == 0.6:
eps_plot = np.array([[ i*0.1* 0.652, 0.6] for i in range(11)])
plt.plot(eps_plot[:,0], eps_plot[:,1], color = "gray", lw=1, ls='--')
elif eps0 == 0.8:
eps_plot = np.array([[ i*0.1 * 0.412, 0.8] for i in range(11)])
plt.plot(eps_plot[:,0], eps_plot[:,1], color = "gray", lw=1, ls='--')
else:
print("error eps0")
exit()
c = color_set[idx]
_, res = hco_search(concave_fun_eval, x=x0, deltas = [0.5, 0.5], para_delta = 0.5, lr_delta = args.lr_delta, relax=False, eps=[eps0, args.eps1, args.eps2], max_iters=max_iters,
n_dim=args.n, step_size=args.step_size, grad_tol=args.grad_tol, store_xs=args.store_xs)
ls = res['ls']
alpha = 1.0
zorder = 1
plt.plot(ls[:, 0], ls[:, 1], c=c, lw=2.0, alpha=alpha, zorder=zorder)
print(ls[-1])
plt.scatter(ls[[-1], 0], ls[[-1], 1], c=c, s=40)
plt.xlabel(r'$l_1$')
plt.ylabel(r'$l_2$', rotation = "horizontal")
plt.legend(loc='lower left', handletextpad=0.3, framealpha=0.9)
ax = plt.gca()
ax.xaxis.set_label_coords(1.05, -0.02)
ax.yaxis.set_label_coords(-0.02, 1.02)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
plt.savefig('figures/violating' + '.pdf')
plt.close()
if __name__ == '__main__':
case1_satisfyingMCF()
case2_violatingMCF()
|
StarcoderdataPython
|
1697337
|
<reponame>Weiqi97/LilyPadz
from flask import Flask, request, render_template
from lilypadz.model.clustering import get_all_clustering_result, \
get_one_clustering_result
from lilypadz.model.small_series import get_ss_for_one_toad, \
get_ss_for_multiple_toads, get_ss_for_one_toad_sight, \
get_ss_for_multiple_toads_sight
# Set up the flask app with desired parameters.
app = Flask(
__name__,
static_folder="static",
template_folder="templates"
)
@app.route('/')
def index():
return render_template(
"index.html"
)
@app.route("/small_series", methods=["POST"])
def small_series():
options = request.json
if not options["sight"]:
if options["compare"]:
return get_ss_for_multiple_toads(
names=options["toads"].split("!"),
variable=options["variable"].split("!")
)
else:
return get_ss_for_one_toad(
name=options["toad"],
variable=options["variable"].split("!")
)
else:
if options["compare"]:
return get_ss_for_multiple_toads_sight(
names=options["toads"].split("!"),
variable=options["variable"].split("!")
)
else:
return get_ss_for_one_toad_sight(
name=options["toad"],
variable=options["variable"].split("!")
)
@app.route("/cluster", methods=["POST"])
def cluster():
options = request.json
if not options["sight"]:
if options["compare"]:
return get_all_clustering_result(
n_clusters=int(options["num_cluster"]),
names=options["toads"].split("!"),
variable=options["variable"].split("!")
)
else:
return get_one_clustering_result(
n_clusters=int(options["num_cluster"]),
name=options["toad"],
variable=options["variable"].split("!")
)
else:
if options["compare"]:
return get_all_clustering_result(
n_clusters=int(options["num_cluster"]),
names=options["toads"].split("!"),
variable=options["variable"].split("!")
)
else:
return get_one_clustering_result(
n_clusters=int(options["num_cluster"]),
name=options["toad"],
variable=options["variable"].split("!")
)
@app.route('/upload', methods=['POST'])
def upload_file():
# Get the file from flask.
file = request.files["file"]
# Save the file to proper location
file.save("lilypadz/data/dummy")
# Return a dummy message to front end.
return "GOOD"
|
StarcoderdataPython
|
3432349
|
from utils.db.mongo_orm import *
class Role(Model):
class Meta:
database = db
collection = 'role'
# Fields
_id = ObjectIdField()
name = StringField(unique=True)
description = StringField()
def __str__(self):
return "name:{} - description:{}".format(self.name, self.description)
if __name__ == "__main__":
pass
|
StarcoderdataPython
|
11390852
|
# -*- coding: utf-8 -*-
"""Implementation of the ``AbstractRepositoryBackend`` using the ``disk-objectstore`` as the backend."""
import contextlib
import shutil
from typing import BinaryIO, Iterable, Iterator, List, Optional, Tuple
from disk_objectstore import Container
from aiida.common.lang import type_check
from .abstract import AbstractRepositoryBackend
__all__ = ('DiskObjectStoreRepositoryBackend',)
class DiskObjectStoreRepositoryBackend(AbstractRepositoryBackend):
"""Implementation of the ``AbstractRepositoryBackend`` using the ``disk-object-store`` as the backend."""
def __init__(self, container):
type_check(container, Container)
self._container = container
def __str__(self) -> str:
"""Return the string representation of this repository."""
if self.is_initialised:
return f'DiskObjectStoreRepository: {self.container.container_id} | {self.container.get_folder()}'
return 'DiskObjectStoreRepository: <uninitialised>'
@property
def uuid(self) -> Optional[str]:
"""Return the unique identifier of the repository."""
if not self.is_initialised:
return None
return self.container.container_id
@property
def key_format(self) -> Optional[str]:
return self.container.hash_type
def initialise(self, **kwargs) -> None:
"""Initialise the repository if it hasn't already been initialised.
:param kwargs: parameters for the initialisation.
"""
self.container.init_container(**kwargs)
@property
def is_initialised(self) -> bool:
"""Return whether the repository has been initialised."""
return self.container.is_initialised
@property
def container(self) -> Container:
return self._container
def erase(self):
"""Delete the repository itself and all its contents."""
try:
shutil.rmtree(self.container.get_folder())
except FileNotFoundError:
pass
def _put_object_from_filelike(self, handle: BinaryIO) -> str:
"""Store the byte contents of a file in the repository.
:param handle: filelike object with the byte content to be stored.
:return: the generated fully qualified identifier for the object within the repository.
:raises TypeError: if the handle is not a byte stream.
"""
return self.container.add_streamed_object(handle)
def has_objects(self, keys: List[str]) -> List[bool]:
return self.container.has_objects(keys)
@contextlib.contextmanager
def open(self, key: str) -> Iterator[BinaryIO]:
"""Open a file handle to an object stored under the given key.
.. note:: this should only be used to open a handle to read an existing file. To write a new file use the method
``put_object_from_filelike`` instead.
:param key: fully qualified identifier for the object within the repository.
:return: yield a byte stream object.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
super().open(key)
with self.container.get_object_stream(key) as handle:
yield handle # type: ignore[misc]
def iter_object_streams(self, keys: List[str]) -> Iterator[Tuple[str, BinaryIO]]:
with self.container.get_objects_stream_and_meta(keys) as triplets:
for key, stream, _ in triplets:
assert stream is not None
yield key, stream # type: ignore[misc]
def delete_objects(self, keys: List[str]) -> None:
super().delete_objects(keys)
self.container.delete_objects(keys)
def list_objects(self) -> Iterable[str]:
return self.container.list_all_objects()
def get_object_hash(self, key: str) -> str:
"""Return the SHA-256 hash of an object stored under the given key.
.. important::
A SHA-256 hash should always be returned,
to ensure consistency across different repository implementations.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
"""
if not self.has_object(key):
raise FileNotFoundError(key)
if self.container.hash_type != 'sha256':
return super().get_object_hash(key)
return key
|
StarcoderdataPython
|
6639036
|
<reponame>Hopson97/AppleFall
import graphics as gfx
import common
import vector
import tiles
import apple as appleF
import math
import drawer
def createAndroid(window):
'''Creates the Android sprite (based on Dr. <NAME>'s code)'''
coords = drawer.loadSpriteVerticies("android")
body = gfx.Polygon(coords)
head = gfx.Circle(gfx.Point(30, 20), 20)
eye1 = gfx.Circle(gfx.Point(22, 7), 4)
eye2 = gfx.Circle(gfx.Point(37, 7), 4)
droidParts = [body, head, eye1, eye2]
eyes = [eye1, eye2]
for part in droidParts:
if part in eyes:
colour = "white"
else:
colour = "green"
part.setFill(colour)
part.setOutline(colour)
part.move(500, tiles.BASE_HEIGHT - 45)
part.draw(window)
return droidParts
def handleInput(key, velocity):
'''Says it on the tin'''
acceleration = 1.25
if key == "a":
if (velocity > 0):
velocity = -velocity / 2
velocity -= acceleration
elif key == "d":
if (velocity < 0):
velocity = -velocity / 2
velocity += acceleration
return velocity
def clampVelocity(velX):
clamp = 10
if (velX > clamp):
return clamp
elif (velX < -clamp):
return -clamp
else:
return velX
def movePlayer(sprite, amount):
for part in sprite:
part.move(amount, 0)
def tryCollideMissingTiles(playerVel, minIndex, maxIndex, isTilesActive):
'''Collides player with any tiles that might be missing'''
if playerVel < 0: #moving left
if not isTilesActive[minIndex]:
playerVel = 0.5
elif playerVel > 0:
if not isTilesActive[maxIndex]:
playerVel = -0.5
return playerVel
def tryCollideWindowEdges(playerVel, minX, maxX):
'''Collides player with window edges'''
if minX < 0:
playerVel = 0.5
elif maxX > common.WINDOW_WIDTH:
playerVel = -0.5
return playerVel
def tryCollideEdges(playerVel, minX, maxX, isTilesActive):
'''Collides player with the X-edges of the window, as well as inactive tiles'''
tileIndexMin = math.floor((minX + 15) / tiles.TILE_SIZE)
tileIndexMax = math.ceil ((maxX - 15) / tiles.TILE_SIZE) - 1
playerVel = tryCollideMissingTiles(playerVel, tileIndexMin, tileIndexMax, isTilesActive)
playerVel = tryCollideWindowEdges (playerVel, minX, maxX)
return playerVel * 0.91 #apply velocity dampening
def isTochingApple(apple, minX):
'''Returns True if the player is touching an apple'''
appleX = apple.getCenter().x
appleY = apple.getCenter().y
return vector.distance(minX + 30, tiles.BASE_HEIGHT - 20,
appleX, appleY) < appleF.DIAMETER
def shouldFireProjectile(window):
'''Returns true on mouse click (which is > 10px away from last click)'''
mousePoint = window.checkMouse()
if mousePoint != None:
x1 = shouldFireProjectile.oldPos.x
x2 = mousePoint.x
y1 = shouldFireProjectile.oldPos.y
y2 = mousePoint.y
if vector.distance(x1, y1, x2, y2) > 10:
shouldFireProjectile.oldPos = mousePoint
return True, mousePoint
return False, shouldFireProjectile.oldPos
shouldFireProjectile.oldPos = gfx.Point(-100, -100)
|
StarcoderdataPython
|
6415220
|
########## 6.8.7. Kernel Qui-quadrado ##########
# O kernel qui-quadrado é uma escolha muito popular para treinar SVMs não lineares em aplicações de visão computacional. Ele pode ser calculado usando chi2_kernel e depois passado para um SVC com kernel="precomputed":
from sklearn.svm import SVC
from sklearn.metrics.pairwise import chi2_kernel
X = [[0, 1], [1, 0], [.2, .8], [.7, .3]]
y = [0, 1, 0, 1]
K = chi2_kernel(X, gamma=.5)
K
#array([[1. , 0.36787944, 0.89483932, 0.58364548],
# [0.36787944, 1. , 0.51341712, 0.83822343],
# [0.89483932, 0.51341712, 1. , 0.7768366 ],
# [0.58364548, 0.83822343, 0.7768366 , 1. ]])
svm = SVC(kernel='precomputed').fit(K, y)
svm.predict(K)
#array([0, 1, 0, 1])
# Ele também pode ser usado diretamente como o argumento do kernel:
svm = SVC(kernel=chi2_kernel).fit(X, y)
svm.predict(X)
#array([0, 1, 0, 1])
# O núcleo qui-quadrado é dado por
# k(x, y) = \exp \left (-\gamma \sum_i \frac{(x[i] - y[i]) ^ 2}{x[i] + y[i]} \right )
# Os dados são assumidos como não negativos e geralmente são normalizados para ter uma norma L1 de um. A normalização é racionalizada com a conexão com a distância qui-quadrado, que é uma distância entre distribuições de probabilidade discretas.
# O núcleo qui quadrado é mais comumente usado em histogramas (sacos) de palavras visuais.
## Referências:
## <NAME>. and <NAME>. and <NAME>. and <NAME>. Local features and kernels for classification of texture and object categories: A comprehensive study International Journal of Computer Vision 2007 (https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf)
|
StarcoderdataPython
|
351837
|
import os
import tempfile
import unittest
import shutil
from typing import Dict
from atcodertools.client.atcoder import AtCoderClient
from atcodertools.client.models.contest import Contest
from atcodertools.client.models.problem import Problem
from atcodertools.common.language import CPP
from atcodertools.tools import submit
RESOURCE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"./resources/test_atcoder_client_mock/")
class MockResponse:
def __init__(self, text=None, url=None):
self.text = text
self.url = url
@classmethod
def response_from(cls, filename):
with open(filename, 'r') as f:
return MockResponse(text=f.read())
def fake_resp(filename: str):
return MockResponse.response_from(os.path.join(RESOURCE_DIR, filename))
def create_fake_request_func(get_url_to_resp: Dict[str, MockResponse] = None,
post_url_to_resp: Dict[str, MockResponse] = None,
):
def func(url, method="GET", **kwargs):
if method == "GET":
return get_url_to_resp.get(url)
return post_url_to_resp.get(url)
return func
def restore_client_after_run(func):
def test_decorated(*args, **kwargs):
client = AtCoderClient()
prev = client._request
func(*args, **kwargs)
client._request = prev
return test_decorated
class TestAtCoderClientMock(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.client = AtCoderClient()
@restore_client_after_run
def test_download_submission_list(self):
contest = Contest("arc001")
self.client._request = create_fake_request_func(
{
contest.get_my_submissions_url(1): fake_resp("my_submissions/1.html"),
contest.get_my_submissions_url(2): fake_resp("my_submissions/2.html"),
contest.get_my_submissions_url(3): fake_resp("my_submissions/3.html"),
contest.get_my_submissions_url(4): fake_resp("my_submissions/4.html")
}
)
submissions = self.client.download_submission_list(Contest("arc001"))
submission_ids = [x.submission_id for x in submissions]
self.assertEqual(50, len(submission_ids))
self.assertEqual(sorted(submission_ids, reverse=True), submission_ids)
@restore_client_after_run
def test_submit_source_code(self):
contest = Contest("arc001")
problem = Problem(contest, "A", "arc001_1")
self.client._request = create_fake_request_func(
{contest.get_submit_url(): fake_resp("submit/after_get.html")},
{contest.get_submit_url(): fake_resp("submit/after_post.html")}
)
# test two patterns (str, Language object) for parameter lang
for lang in [CPP, "C++14 (GCC 5.4.1)"]:
submission = self.client.submit_source_code(
contest, problem, lang, "x")
self.assertEqual(13269587, submission.submission_id)
self.assertEqual("arc001_1", submission.problem_id)
@restore_client_after_run
def test_login_success(self):
self.client._request = create_fake_request_func(
post_url_to_resp={
"https://atcoder.jp/login": fake_resp("after_login.html")
}
)
def fake_supplier():
return "@@@ invalid user name @@@", "@@@ password @@@"
self.client.login(credential_supplier=fake_supplier,
use_local_session_cache=False)
@restore_client_after_run
def test_check_logging_in_success(self):
setting_url = "https://atcoder.jp/home"
self.client._request = create_fake_request_func(
{setting_url: fake_resp("after_login.html")},
)
self.assertTrue(self.client.check_logging_in())
@restore_client_after_run
def test_check_logging_in_fail(self):
setting_url = "https://atcoder.jp/home"
self.client._request = create_fake_request_func(
{setting_url: fake_resp("before_login.html")}
)
self.assertFalse(self.client.check_logging_in())
@restore_client_after_run
def test_exec_on_submit(self):
global submitted_source_code
submitted_source_code = None
def create_fake_request_func_for_source(get_url_to_resp: Dict[str, MockResponse] = None,
post_url_to_resp: Dict[str,
MockResponse] = None,
):
global submitted_source_code
def func(url, method="GET", **kwargs):
global submitted_source_code
if method == "GET":
return get_url_to_resp.get(url)
submitted_source_code = kwargs["data"]['sourceCode']
return post_url_to_resp.get(url)
return func
contest = Contest("abc215")
self.client._request = create_fake_request_func_for_source(
{contest.get_submit_url(): fake_resp("submit/after_get.html")},
{contest.get_submit_url(): fake_resp("submit/after_post.html")}
)
test_dir = os.path.join(self.temp_dir, "exec_on_submit")
shutil.copytree(os.path.join(RESOURCE_DIR, "exec_on_submit"), test_dir)
config_path = os.path.join(test_dir, "config.toml")
submit.main('', ["--dir", test_dir, "--config",
config_path, "-f", "-u"], client=self.client)
self.assertTrue(os.path.exists(os.path.join(
test_dir, "exec_before_submit_is_completed")))
self.assertEqual(submitted_source_code, "Kyuuridenamida\n")
self.assertTrue(os.path.exists(os.path.join(
test_dir, "exec_after_submit_is_completed")))
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
1931444
|
def findDecision(obj): #obj[0]: Driving_to, obj[1]: Passanger, obj[2]: Weather, obj[3]: Temperature, obj[4]: Time, obj[5]: Coupon, obj[6]: Coupon_validity, obj[7]: Gender, obj[8]: Age, obj[9]: Maritalstatus, obj[10]: Children, obj[11]: Education, obj[12]: Occupation, obj[13]: Income, obj[14]: Bar, obj[15]: Coffeehouse, obj[16]: Carryaway, obj[17]: Restaurantlessthan20, obj[18]: Restaurant20to50, obj[19]: Direction_same, obj[20]: Distance
# {"feature": "Coupon", "instances": 204, "metric_value": 0.9822, "depth": 1}
if obj[5]>1:
# {"feature": "Coffeehouse", "instances": 148, "metric_value": 0.9291, "depth": 2}
if obj[15]>1.0:
# {"feature": "Education", "instances": 74, "metric_value": 0.7273, "depth": 3}
if obj[11]<=2:
# {"feature": "Time", "instances": 59, "metric_value": 0.5726, "depth": 4}
if obj[4]>0:
# {"feature": "Carryaway", "instances": 43, "metric_value": 0.6931, "depth": 5}
if obj[16]<=3.0:
# {"feature": "Children", "instances": 38, "metric_value": 0.5618, "depth": 6}
if obj[10]<=0:
# {"feature": "Maritalstatus", "instances": 25, "metric_value": 0.7219, "depth": 7}
if obj[9]>0:
# {"feature": "Income", "instances": 20, "metric_value": 0.469, "depth": 8}
if obj[13]>3:
return 'True'
elif obj[13]<=3:
# {"feature": "Driving_to", "instances": 8, "metric_value": 0.8113, "depth": 9}
if obj[0]<=0:
# {"feature": "Temperature", "instances": 4, "metric_value": 1.0, "depth": 10}
if obj[3]>55:
return 'False'
elif obj[3]<=55:
return 'True'
else: return 'True'
elif obj[0]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[9]<=0:
# {"feature": "Income", "instances": 5, "metric_value": 0.971, "depth": 8}
if obj[13]<=4:
# {"feature": "Age", "instances": 3, "metric_value": 0.9183, "depth": 9}
if obj[8]<=2:
return 'True'
elif obj[8]>2:
return 'False'
else: return 'False'
elif obj[13]>4:
return 'False'
else: return 'False'
else: return 'False'
elif obj[10]>0:
return 'True'
else: return 'True'
elif obj[16]>3.0:
# {"feature": "Driving_to", "instances": 5, "metric_value": 0.971, "depth": 6}
if obj[0]>0:
return 'False'
elif obj[0]<=0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[4]<=0:
return 'True'
else: return 'True'
elif obj[11]>2:
# {"feature": "Coupon_validity", "instances": 15, "metric_value": 0.9968, "depth": 4}
if obj[6]>0:
# {"feature": "Age", "instances": 9, "metric_value": 0.7642, "depth": 5}
if obj[8]<=3:
return 'False'
elif obj[8]>3:
return 'True'
else: return 'True'
elif obj[6]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[15]<=1.0:
# {"feature": "Occupation", "instances": 74, "metric_value": 0.9995, "depth": 3}
if obj[12]>2:
# {"feature": "Passanger", "instances": 63, "metric_value": 0.9911, "depth": 4}
if obj[1]<=1:
# {"feature": "Restaurantlessthan20", "instances": 48, "metric_value": 0.9377, "depth": 5}
if obj[17]<=2.0:
# {"feature": "Coupon_validity", "instances": 40, "metric_value": 0.9837, "depth": 6}
if obj[6]<=0:
# {"feature": "Time", "instances": 21, "metric_value": 0.9852, "depth": 7}
if obj[4]>0:
# {"feature": "Education", "instances": 18, "metric_value": 1.0, "depth": 8}
if obj[11]<=2:
# {"feature": "Restaurant20to50", "instances": 16, "metric_value": 0.9887, "depth": 9}
if obj[18]<=1.0:
# {"feature": "Bar", "instances": 14, "metric_value": 0.9403, "depth": 10}
if obj[14]<=2.0:
# {"feature": "Income", "instances": 13, "metric_value": 0.8905, "depth": 11}
if obj[13]<=4:
# {"feature": "Direction_same", "instances": 10, "metric_value": 0.971, "depth": 12}
if obj[19]<=0:
# {"feature": "Age", "instances": 6, "metric_value": 0.65, "depth": 13}
if obj[8]>0:
return 'False'
elif obj[8]<=0:
# {"feature": "Weather", "instances": 2, "metric_value": 1.0, "depth": 14}
if obj[2]>0:
return 'True'
elif obj[2]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[19]>0:
# {"feature": "Maritalstatus", "instances": 4, "metric_value": 0.8113, "depth": 13}
if obj[9]<=0:
return 'True'
elif obj[9]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[13]>4:
return 'False'
else: return 'False'
elif obj[14]>2.0:
return 'True'
else: return 'True'
elif obj[18]>1.0:
return 'True'
else: return 'True'
elif obj[11]>2:
return 'True'
else: return 'True'
elif obj[4]<=0:
return 'True'
else: return 'True'
elif obj[6]>0:
# {"feature": "Income", "instances": 19, "metric_value": 0.8315, "depth": 7}
if obj[13]>0:
# {"feature": "Maritalstatus", "instances": 17, "metric_value": 0.6723, "depth": 8}
if obj[9]<=2:
# {"feature": "Bar", "instances": 16, "metric_value": 0.5436, "depth": 9}
if obj[14]<=1.0:
return 'False'
elif obj[14]>1.0:
# {"feature": "Education", "instances": 5, "metric_value": 0.971, "depth": 10}
if obj[11]>0:
# {"feature": "Driving_to", "instances": 3, "metric_value": 0.9183, "depth": 11}
if obj[0]<=1:
# {"feature": "Weather", "instances": 2, "metric_value": 1.0, "depth": 12}
if obj[2]<=0:
return 'True'
elif obj[2]>0:
return 'False'
else: return 'False'
elif obj[0]>1:
return 'True'
else: return 'True'
elif obj[11]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[9]>2:
return 'True'
else: return 'True'
elif obj[13]<=0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[17]>2.0:
return 'False'
else: return 'False'
elif obj[1]>1:
# {"feature": "Age", "instances": 15, "metric_value": 0.8366, "depth": 5}
if obj[8]>3:
# {"feature": "Time", "instances": 8, "metric_value": 1.0, "depth": 6}
if obj[4]<=2:
# {"feature": "Restaurantlessthan20", "instances": 6, "metric_value": 0.9183, "depth": 7}
if obj[17]>2.0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.9183, "depth": 8}
if obj[7]<=0:
return 'True'
elif obj[7]>0:
return 'False'
else: return 'False'
elif obj[17]<=2.0:
return 'False'
else: return 'False'
elif obj[4]>2:
return 'True'
else: return 'True'
elif obj[8]<=3:
return 'True'
else: return 'True'
else: return 'True'
elif obj[12]<=2:
# {"feature": "Carryaway", "instances": 11, "metric_value": 0.4395, "depth": 4}
if obj[16]<=3.0:
return 'True'
elif obj[16]>3.0:
# {"feature": "Passanger", "instances": 2, "metric_value": 1.0, "depth": 5}
if obj[1]>1:
return 'False'
elif obj[1]<=1:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[5]<=1:
# {"feature": "Bar", "instances": 56, "metric_value": 0.9544, "depth": 2}
if obj[14]>0.0:
# {"feature": "Restaurant20to50", "instances": 32, "metric_value": 0.9972, "depth": 3}
if obj[18]<=2.0:
# {"feature": "Income", "instances": 28, "metric_value": 0.9963, "depth": 4}
if obj[13]>0:
# {"feature": "Restaurantlessthan20", "instances": 26, "metric_value": 0.9829, "depth": 5}
if obj[17]<=3.0:
# {"feature": "Driving_to", "instances": 22, "metric_value": 1.0, "depth": 6}
if obj[0]<=1:
# {"feature": "Education", "instances": 14, "metric_value": 0.9403, "depth": 7}
if obj[11]<=2:
# {"feature": "Time", "instances": 10, "metric_value": 1.0, "depth": 8}
if obj[4]<=3:
# {"feature": "Weather", "instances": 7, "metric_value": 0.8631, "depth": 9}
if obj[2]<=0:
return 'False'
elif obj[2]>0:
# {"feature": "Age", "instances": 3, "metric_value": 0.9183, "depth": 10}
if obj[8]>1:
return 'True'
elif obj[8]<=1:
return 'False'
else: return 'False'
else: return 'True'
elif obj[4]>3:
return 'True'
else: return 'True'
elif obj[11]>2:
return 'False'
else: return 'False'
elif obj[0]>1:
# {"feature": "Occupation", "instances": 8, "metric_value": 0.8113, "depth": 7}
if obj[12]<=7:
# {"feature": "Time", "instances": 4, "metric_value": 1.0, "depth": 8}
if obj[4]<=0:
# {"feature": "Age", "instances": 3, "metric_value": 0.9183, "depth": 9}
if obj[8]>2:
return 'True'
elif obj[8]<=2:
return 'False'
else: return 'False'
elif obj[4]>0:
return 'False'
else: return 'False'
elif obj[12]>7:
return 'True'
else: return 'True'
else: return 'True'
elif obj[17]>3.0:
return 'False'
else: return 'False'
elif obj[13]<=0:
return 'True'
else: return 'True'
elif obj[18]>2.0:
return 'True'
else: return 'True'
elif obj[14]<=0.0:
# {"feature": "Coffeehouse", "instances": 24, "metric_value": 0.65, "depth": 3}
if obj[15]>0.0:
# {"feature": "Education", "instances": 18, "metric_value": 0.3095, "depth": 4}
if obj[11]<=3:
return 'False'
elif obj[11]>3:
# {"feature": "Coupon_validity", "instances": 4, "metric_value": 0.8113, "depth": 5}
if obj[6]<=0:
return 'False'
elif obj[6]>0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[15]<=0.0:
# {"feature": "Passanger", "instances": 6, "metric_value": 1.0, "depth": 4}
if obj[1]<=1:
# {"feature": "Time", "instances": 4, "metric_value": 0.8113, "depth": 5}
if obj[4]<=3:
return 'False'
elif obj[4]>3:
return 'True'
else: return 'True'
elif obj[1]>1:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
|
StarcoderdataPython
|
3436053
|
from django.urls import path
from . import views
app_name = "upload"
urlpatterns = [path("", views.image_upload, name="image_upload")]
|
StarcoderdataPython
|
11279288
|
<gh_stars>1-10
from django.core.management.base import BaseCommand, CommandError
from cablegate.cable.models import Cable, CableMetadata
class Command(BaseCommand):
#args = '<poll_id poll_id ...>'
#help = 'Closes the specified poll for voting'
def handle(self, *args, **options):
for cable in Cable.objects.filter(cablemetadata=None):
meta = CableMetadata()
meta.cable_id = cable.id
meta.save()
# Update cache
meta.get_words_count()
meta.get_words_freqdist()
self.stdout.write('Done\n')
|
StarcoderdataPython
|
224808
|
<reponame>dwxrycb123/Akina3
import nonebot
from database.mysql import *
from database.tables import *
from models.model import *
from config import *
@nonebot.scheduler.scheduled_job('cron', day='*')
async def clear_command_times():
record = await table_user_command_times.select_record('TRUE')
for item in record:
await table_user_command_times.update_record(CMD_TIMES, item)
print('command times cleared!')
|
StarcoderdataPython
|
8067142
|
#!/usr/bin/env python
Copyright = """
Copyright 2020 © <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@Author : <NAME>
@Copyright: <NAME> & <NAME> © Copyright 2020
@INPIREDBYGF: <NAME> <3
"""
import datetime
import hashlib
class Block:
def __init__(self, previous_block_hash, data, timestamp):
self.previous_block_hash = previous_block_hash
self.data = data
self.timestamp = timestamp
self.hash = self.get_hash()
@staticmethod
def create_genesis_block():
return Block("0", "0", datetime.datetime.now())
def get_hash(self):
header_bin = (str(self.previous_block_hash) +
str(self.data) +
str(self.timestamp))
inner_hash = hashlib.sha256(header_bin.encode()).hexdigest().encode()
outer_hash = hashlib.sha256(inner_hash).hexdigest()
return outer_hash
|
StarcoderdataPython
|
9657815
|
from collections import defaultdict, deque, Counter
from itertools import combinations, combinations_with_replacement, permutations
from functools import reduce
import math
# import numpy as np
from operator import add, delitem, mul, itemgetter, attrgetter
import re
DS = [[-1, 0], [1, 0], [0, 1], [0, -1]]
DS8 = DS + [[-1, -1], [1, -1], [-1, 1], [1, 1]]
def arounds_inside(x, y, diagonals, w, h):
ret = []
for dx, dy in DS8 if diagonals else DS:
ret.append((x + dx, y + dy))
CHALLENGE_DAY = "12"
REAL = [10, 7]
SAMPLE_EXPECTED = 739785
if SAMPLE_EXPECTED:
SAMPLE = [4, 8]
die = 1
rolls = 0
def solve(raw):
spots = raw
# Debug here to make sure parsing is good.
scores = [0, 0]
global die
global rolls
die = 1
rolls = 0
def roll():
global die
global rolls
ret = die
die += 1
rolls += 1
if die > 100:
die = 1
return ret
def play(pi, scores):
move = roll() + roll() + roll()
next = ((spots[pi] - 1) + move) % 10 + 1
spots[pi] = next
scores[pi] += next
while True:
for pi in [0, 1]:
play(pi, scores)
# print(pi, scores)
if scores[pi] >= 1000:
return rolls * scores[(pi + 1) % 2]
return ret
if SAMPLE_EXPECTED != None:
sample = solve(SAMPLE)
if sample != SAMPLE_EXPECTED:
print("SAMPLE FAILED: ", sample, " != ", SAMPLE_EXPECTED)
assert sample == SAMPLE_EXPECTED
print("\n*** SAMPLE PASSED ***\n")
else:
print("Skipping sample")
solved = solve(REAL)
assert 874698 < solved
print(solved)
|
StarcoderdataPython
|
4821784
|
from flask import current_app
from app.default.default_values import types_sales
from sqlalchemy.orm import Session
from app.models.types_sales.type_sale import TypeSaleModel
def default_types_sales():
try:
session: Session = current_app.db.session
types_sales_found = TypeSaleModel.query.all()
if not types_sales_found:
type_sales_list = []
for type in types_sales:
type_sales_list.append(TypeSaleModel(**type))
session.add_all(type_sales_list)
session.commit()
except Exception as e:
raise e
|
StarcoderdataPython
|
11353757
|
# checking for already existing files
import os
# downloading/extracting mnist data
import gzip
from tqdm import tqdm # visualising progress
import numpy as np # loading data from buffer
from fetch.ml import Layer, Variable, Session
from fetch.ml import CrossEntropyLoss
import matplotlib.pyplot as plt
import line_profiler
DO_PLOTTING = 0
def plot_weights(layers):
for layer in layers:
plt.hist(layer.weights().data())
plt.show()
plt.close()
class MLearner():
def __init__(self):
self.data_url = 'http://yann.lecun.com/exdb/mnist/'
self.x_tr_filename = 'train-images-idx3-ubyte.gz'
self.y_tr_filename = 'train-labels-idx1-ubyte.gz'
self.x_te_filename = 't10k-images-idx3-ubyte.gz'
self.y_te_filename = 't10k-labels-idx1-ubyte.gz'
self.training_size = 2000
self.validation_size = 50
self.n_epochs = 30
self.batch_size = 50
self.alpha = 0.2
self.mnist_input_size = 784 # pixels in 28 * 28 mnist images
self.net = [10] # size of hidden layers
self.mnist_output_size = 10 # 10 possible characters to recognise
self.has_biases = True
self.activation_fn = 'Sigmoid' # LeakyRelu might be good?
self.layers = []
self.sess = None
self.initialise_network()
self.X_batch = self.sess.Variable([self.batch_size, 784], "X_batch")
self.Y_batch = self.sess.Variable([self.batch_size, 10], "Y_batch")
def initialise_network(self):
self.sess = Session()
# definition of the network layers
# for i in range(len(self.layers)):
# self.net.append(self.layers[i])
self.net.append(self.mnist_output_size)
self.layers.append(
self.sess.Layer(
self.mnist_input_size,
self.net[0],
self.activation_fn,
"input_layer"))
if len(self.net) > 2:
for i in range(len(self.net) - 2):
self.layers.append(
self.sess.Layer(
self.net[i],
self.net[i + 1],
self.activation_fn, "layer_" + str(i + 1)))
self.layers.append(
self.sess.Layer(
self.net[-1],
self.mnist_output_size, self.activation_fn, "output_layer"))
# switch off biases (since we didn't bother with them for the numpy
# example)
if not(self.has_biases):
for cur_layer in self.layers:
cur_layer.BiasesSetup(False)
# copy preinitialised numpy weights over into our layers
weights = np.load('weights.npy')
for i in range(len(weights)):
self.layers[i].weights().FromNumpy(weights[i])
# for j in range(len(weights[i])):
# self.layers[i].weights().Set(i, j, weights[i][j])
if DO_PLOTTING:
plot_weights(self.layers)
self.y_pred = self.layers[-1].Output()
return
def load_data(self, one_hot=True, reshape=None):
x_tr = self.load_images(
self.x_tr_filename, self.training_size, "X_train")
y_tr = self.load_labels(
self.y_tr_filename, self.training_size, "Y_train")
x_te = self.load_images(
self.x_te_filename, self.validation_size, "X_test")
y_te = self.load_labels(
self.y_te_filename, self.validation_size, "Y_test")
if one_hot:
y_tr_onehot = Session.Zeroes(
self.sess, [y_tr.size(), self.mnist_output_size])
y_te_onehot = Session.Zeroes(
self.sess, [y_te.size(), self.mnist_output_size])
for i in range(y_tr.size()):
y_tr_onehot[i, int(y_tr[i])] = 1
for i in range(y_te.size()):
y_te_onehot[i, int(y_te[i])] = 1
self.y_tr = y_tr_onehot
self.y_te = y_te_onehot
else:
self.y_tr = y_tr
self.y_te = y_te
if reshape:
x_tr, x_te = [x.reshape(*reshape) for x in (x_tr, x_te)]
self.x_tr = x_tr
self.x_te = x_te
def load_images(self, filename, data_size, name):
self.download(filename)
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
data = data.reshape(-1, 28 * 28) / 256
nd_data = self.sess.Variable([data_size, 784], name)
nd_data.FromNumpy(data[:data_size, :])
return nd_data
def load_labels(self, filename, data_size, name):
self.download(filename)
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
data.reshape(np.shape(data)[0], -1)
nd_data = self.sess.Variable([data_size, 1], name)
nd_data.FromNumpy(data[:data_size].reshape(data_size, -1))
# nd_data = Layer(data_size, 1, self.sess)
# nd_data.data().FromNumpy(data[:data_size].reshape(data_size, -1))
# return nd_data
return nd_data
def download(self, filename):
if not os.path.exists(filename):
from urllib.request import urlretrieve
print("Downloading %s" % filename)
urlretrieve(self.data_url + filename, filename)
return
# feed forward pass of a network
# take X input and the network (defined as a list of weights)
# no biases?
def feed_forward(self, X):
a = [X]
activate = True
for idx in range(len(self.layers)):
if (idx == (len(self.layers) - 1)):
activate = False
a.append(self.layers[idx].Forward(a[-1], activate))
return a
def calculate_loss(self, X, Y):
loss = CrossEntropyLoss(X, Y, self.sess)
return loss
def do_one_pred(self):
return self.sess.Predict(self.X_batch, self.layers[-1].Output())
def predict(self):
n_samples = self.y_te.shape()[0]
preds = []
if (self.batch_size < n_samples):
for cur_rep in range(0, n_samples, self.batch_size):
self.assign_batch(self.x_te, self.y_te, cur_rep)
preds.append(self.do_one_pred())
elif (self.batch_size > self.y_te.shape()[0]):
print("not implemented prediction padding yet")
raise NotImplementedError
else:
self.assign_batch(self.x_te, self.y_te, 0)
preds.append(self.do_one_pred())
return preds
@profile
def assign_batch(self, x, y, cur_rep):
self.X_batch.SetRange(
[[cur_rep, cur_rep + self.batch_size, 1], [0, 784, 1]], x)
self.Y_batch.SetRange(
[[cur_rep, cur_rep + self.batch_size, 1], [0, 10, 1]], y)
return
def print_accuracy(self, cur_pred):
max_pred = []
for item in cur_pred:
max_pred.extend(item.ArgMax(1))
gt = self.y_te.data().ArgMax(1)
sum_acc = 0
for i in range(self.y_te.shape()[0]):
sum_acc += (gt[i] == max_pred[i])
sum_acc /= (self.y_te.data().size() / 10)
print("\taccuracy: ", sum_acc)
return
@profile
def train(self):
self.sess.SetInput(self.layers[0], self.X_batch)
for i in range(len(self.layers) - 1):
self.sess.SetInput(self.layers[i + 1], self.layers[i].Output())
# loss calculation
self.loss = self.calculate_loss(self.layers[-1].Output(), self.Y_batch)
# epochs
for i in range(self.n_epochs):
print("epoch ", i, ": ")
# training batches
for j in tqdm(range(0, self.x_tr.shape()[0], self.batch_size)):
# # assign fresh data batch
self.assign_batch(self.x_tr, self.y_tr, j)
# # loss calculation
# loss = self.calculate_loss(self.layers[-1].Output(), self.Y_batch)
# back propagate
self.sess.BackProp(self.X_batch, self.loss, self.alpha, 1)
sum_loss = 0
for i in range(self.loss.size()):
sum_loss += self.loss.data()[i]
# # print("\n")
# # for j in range(1):
# # sys.stdout.write('{:0.13f}'.format(loss.data()[i]) + "\t")
print("sumloss: " + str(sum_loss))
cur_pred = self.predict()
self.print_accuracy(cur_pred)
return
|
StarcoderdataPython
|
5043008
|
<reponame>yuzhounaut/SpaceM
from . import FIJIcalls, manipulations
__all__ = [FIJIcalls, manipulations]
|
StarcoderdataPython
|
1916351
|
<gh_stars>1-10
import nltk.chat.eliza as el
import nltk.chat.iesha as ie
import nltk.chat.suntsu as sun
import nltk.chat.zen as zen
import nltk.chat.rude as rude
from chatterbot import ChatBot
from chatterbot.training.trainers import ChatterBotCorpusTrainer
from chatterbot.utils import clean
from nltk.chat import util
from chatbot import chatbot
import sys, download, os
MODEL_PATH = "./save/model-try_1/model.ckpt"
MODEL_URL = "https://www.dropbox.com/s/hyxoj49uw0g4dn8/model.ckpt?dl=0"
nltk_bot_lookup = {"Sun Tsu": sun.suntsu_chat, "Eliza": el.eliza_chat, "*iesha*": ie.iesha_chat, "Chad": rude.rude_chat,
"Zen Master": zen.zen_chat}
class Bot:
def __init__(self, bot_name="eliza"):
if bot_name.lower() == "sun":
self._bot = util.Chat(sun.pairs, sun.reflections)
self._greetings = "Welcome, my child. Do you seek enlightenment?"
self._name = "<NAME>"
elif bot_name.lower() == "iesha":
self._bot = util.Chat(ie.pairs, ie.reflections)
self._greetings = "hi!! i'm iesha! who r u??!"
self._name = "*iesha*"
elif bot_name.lower() == "zen":
self._bot = util.Chat(zen.responses, zen.reflections)
self._greetings = "Look beyond mere words and letters - look into your mind"
self._name = "Zen Master"
elif bot_name.lower() == "rude":
self._bot = util.Chat(rude.pairs, rude.reflections)
self._greetings = "Wow, I look real fresh today. Hey!"
self._name = "Chad"
elif bot_name.lower() == "eliza":
self._bot = util.Chat(el.pairs, el.reflections)
self._greetings = "Hello. How are you feeling today?"
self._name = "Eliza"
elif bot_name.lower() == "deepqa":
if not os.path.isfile(MODEL_PATH) or os.path.getsize(download.MODEL_PATH) != 154616514:
self._bot = util.Chat(el.pairs, el.reflections)
self._greetings = "I am not ready yet. Reload me in 5 minutes."
self._name = "Jessica"
else:
self._bot = chatbot.Chatbot()
self._bot.main(['--modelTag', 'try_1', '--test', 'daemon', '--rootDir', '.'])
self._greetings = "Hi. I currently only understand sentences with < 10 words."
self._name = "Jessica"
else:
self._corpus_path = "CopyCat.db"
self._bot = ChatBot(bot_name,
storage_adapter="chatterbot.adapters.storage.JsonDatabaseAdapter",
logic_adapters=["chatterbot.adapters.logic.ClosestMatchAdapter"],
input_adapter="chatterbot.adapters.input.VariableInputTypeAdapter",
output_adapter="chatterbot.adapters.output.OutputFormatAdapter",
database=self._corpus_path)
self._bot.set_trainer(ChatterBotCorpusTrainer)
self._name = bot_name
self._greetings = "You are speaking to " + bot_name + "."
def respond(self, query):
query = clean.clean(clean.clean_whitespace(query))
if self._name in nltk_bot_lookup.keys():
return self._bot.respond(query)
elif not os.path.isfile(MODEL_PATH) or os.path.getsize(download.MODEL_PATH) != 154616514:
return str(self._greetings)
else:
return str(self._bot.get_response(query))
def greet(self):
return self._greetings
def setup(self, corpus="chatterbot.corpus.english"):
if self._name not in nltk_bot_lookup.keys() and self._name != "Jessica":
self._bot.train(corpus)
def name(self):
return self._name
def location(self):
return self._corpus_path
def main():
"""Speak to an NLTK ChatBot through CLI."""
try:
bot_type = sys.argv.pop()
except IndexError:
raise IndexError("No bot type specified.")
if bot_type.lower() in nltk_bot_lookup.keys():
print("Hi, you are chatting to NLTK's {0} Chatbot.\n"
"Simply press Ctrl-D to exit.\n".format(bot_type))
nltk_bot_lookup.get(bot_type)() # init NLTK-bot
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
12864054
|
import json
import os
import subprocess
import sys
TEST_FILENAME = "tmp_py_file"
TEST_FOLDER = "clone_tests"
TESTS = [
("clone!( => move || {})",
"If you have nothing to clone, no need to use this macro!"),
("clone!(|| {})",
"If you have nothing to clone, no need to use this macro!"),
("clone!(|a, b| {})",
"If you have nothing to clone, no need to use this macro!"),
("clone!(@strong self => move |x| {})",
"Can't use `self` as variable name. Try storing it in a temporary variable or rename it using `as`."),
("clone!(@strong self.v => move |x| {})",
"Field accesses are not allowed as is, you must rename it!"),
("clone!(@weak v => @default-return false, || {})",
"Closure needs to be \"moved\" so please add `move` before closure"),
("clone!(@weak v => @default-return false, |bla| {})",
"Closure needs to be \"moved\" so please add `move` before closure"),
("clone!(@weak v => default-return false, move || {})",
"Missing `@` before `default-return`"),
("clone!(@weak v => @default-return false move || {})",
"Missing comma after `@default-return`'s value"),
("clone!(@yolo v => move || {})",
"Unknown keyword, only `weak` and `strong` are allowed"),
("clone!(v => move || {})",
"You need to specify if this is a weak or a strong clone."),
]
def convert_to_string(s):
if s.__class__.__name__ == 'bytes':
return s.decode('utf-8')
return s
def exec_command(command):
child = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = child.communicate()
return (child.returncode == 0, convert_to_string(stdout), convert_to_string(stderr))
def run_test(code, expected_str):
with open("{}/{}.rs".format(TEST_FOLDER, TEST_FILENAME), 'w') as f:
f.write('extern crate glib;use glib::clone;use std::rc::Rc;fn main(){{let v = Rc::new(1);{};}}'.format(code))
code, stdout, stderr = exec_command([
"bash",
"-c",
"cd {} && cargo build --message-format json".format(TEST_FOLDER),
])
os.remove("{}/{}.rs".format(TEST_FOLDER, TEST_FILENAME))
if code is True:
return "This isn't supposed to compile!"
parts = stdout.split('}\n{')
compiler_message = None
for (pos, part) in enumerate(parts):
try:
if pos > 0:
part = "{" + part
if pos + 1 < len(parts):
part += "}"
x = json.loads(part)
if (x["reason"] != "compiler-message"
or x["message"]["message"] == "aborting due to previous error"):
continue
compiler_message = x["message"]["message"]
break
except Exception:
continue
if compiler_message is None:
return "Weird issue: no compiler-message found..."
if expected_str not in compiler_message:
return "`{}` not found in `{}`".format(expected_str, compiler_message)
return None
def run_tests():
print("About to start the tests on the clone! macro.")
print("It might be slow to run the first one since cargo has to build dependencies...")
print("")
errors = 0
with open('{}/Cargo.toml'.format(TEST_FOLDER), 'w') as f:
f.write("""[package]
name = "test"
version = "0.0.1"
authors = ["gtk-rs developers"]
[dependencies]
glib = {{ path = ".." }}
[[bin]]
name = "{0}"
path = "{0}.rs"
""".format(TEST_FILENAME))
for (code, expected_str) in TESTS:
sys.stdout.write('Running `{}`...'.format(code))
sys.stdout.flush()
err = run_test(code, expected_str)
if err is not None:
print(" FAILED\n{}".format(err))
errors += 1
else:
print(" OK")
print("Ran {} tests, got {} failure{}".format(len(TESTS), errors, "s" if errors > 1 else ""))
os.remove("{}/Cargo.toml".format(TEST_FOLDER))
os.remove("{}/Cargo.lock".format(TEST_FOLDER))
exec_command(['bash', '-c', 'rm -r {}/target'.format(TEST_FOLDER)])
return errors
if __name__ == "__main__":
sys.exit(run_tests())
|
StarcoderdataPython
|
5031005
|
<gh_stars>0
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
YOUR_PROJECT_ID = os.environ['GOOGLE_CLOUD_PROJECT']
# [START bigquery_ibis_connect]
import ibis
conn = ibis.bigquery.connect(
project_id=YOUR_PROJECT_ID,
dataset_id='bigquery-public-data.stackoverflow')
# [END bigquery_ibis_connect]
# [START bigquery_ibis_table]
table = conn.table('posts_questions')
print(table)
# BigQueryTable[table]
# name: bigquery-public-data.stackoverflow.posts_questions
# schema:
# id : int64
# title : string
# body : string
# accepted_answer_id : int64
# answer_count : int64
# comment_count : int64
# community_owned_date : timestamp
# creation_date : timestamp
# favorite_count : int64
# last_activity_date : timestamp
# last_edit_date : timestamp
# last_editor_display_name : string
# last_editor_user_id : int64
# owner_display_name : string
# owner_user_id : int64
# post_type_id : int64
# score : int64
# tags : string
# view_count : int64
# [END bigquery_ibis_table]
# [START bigquery_ibis_table_not_exist]
try:
doesnt_exist = conn.table('doesnt_exist')
except Exception as exp:
print(str(exp))
# Not found: Table bigquery-public-data:stackoverflow.doesnt_exist
# [END bigquery_ibis_table_not_exist]
# [START bigquery_ibis_table_cross_project]
reddit_posts_table = conn.table('2018_05', database='fh-bigquery.reddit_posts')
# [END bigquery_ibis_table_cross_project]
# [START bigquery_ibis_type_error]
try:
table.answer_count.upper()
except AttributeError as exp:
print(str(exp))
# 'IntegerColumn' object has no attribute 'upper'
# [END bigquery_ibis_type_error]
# [START bigquery_ibis_projection]
projection = table['creation_date', 'answer_count']
# [END bigquery_ibis_projection]
# [START bigquery_ibis_transform_timestamp]
projection = projection.mutate(year=projection.creation_date.year())
# [END bigquery_ibis_transform_timestamp]
# [START bigquery_ibis_transform_integer]
has_answer_boolean = projection.answer_count > 0
# [END bigquery_ibis_transform_integer]
# [START bigquery_ibis_transform_boolean]
has_answer_int = has_answer_boolean.ifelse(1, 0)
# [END bigquery_ibis_transform_boolean]
# [START bigquery_ibis_aggregate]
total_questions = projection.count()
percentage_answered = has_answer_int.mean() * 100
# [END bigquery_ibis_aggregate]
# [START bigquery_ibis_group_by]
expression = projection.groupby('year').aggregate(
total_questions=total_questions,
percentage_answered=percentage_answered,
).sort_by(ibis.desc(projection.year))
# [END bigquery_ibis_group_by]
print('\nExecuting query:')
# [START bigquery_ibis_execute]
print(expression.execute())
# year total_questions percentage_answered
# 0 2018 997508 66.776307
# 1 2017 2318405 75.898732
# 2 2016 2226478 84.193197
# 3 2015 2219791 86.170365
# 4 2014 2164895 88.356987
# 5 2013 2060753 91.533241
# 6 2012 1645498 94.510659
# 7 2011 1200601 97.149261
# 8 2010 694410 99.060497
# 9 2009 343879 99.655402
# 10 2008 58399 99.871573
# [END bigquery_ibis_execute]
print('\nThe previous query used the following SQL:')
# [START bigquery_ibis_compile]
print(expression.compile())
# SELECT `year`, count(*) AS `total_questions`,
# (IEEE_DIVIDE(sum(CASE WHEN `answer_count` > 0 THEN 1 ELSE 0 END), count(*))) * 100 AS `percentage_answered`
# FROM (
# SELECT `creation_date`, `answer_count`,
# EXTRACT(year from `creation_date`) AS `year`
# FROM `bigquery-public-data.stackoverflow.posts_questions`
# ) t0
# GROUP BY 1
# ORDER BY `year` DESC
# [END bigquery_ibis_compile]
# print('\nExecuting UDF query:')
# [START bigquery_ibis_udf]
# @ibis.bigquery.udf(['double'], 'double')
# def example_udf(value):
# return value + 1.0
#
# test_column = ibis.literal(1, type='double')
# expression = example_udf(test_column)
#
# print(conn.execute(expression))
# [END bigquery_ibis_udf]
print('\nExecuting join query:')
# [START bigquery_ibis_joins]
edu_table = conn.table(
'international_education',
database='bigquery-public-data.world_bank_intl_education')
edu_table = edu_table['value', 'year', 'country_code', 'indicator_code']
country_table = conn.table(
'country_code_iso',
database='bigquery-public-data.utility_us')
country_table = country_table['country_name', 'alpha_3_code']
expression = edu_table.join(
country_table,
[edu_table.country_code == country_table.alpha_3_code])
print(conn.execute(
expression[edu_table.year == 2016]
# Adult literacy rate.
[edu_table.indicator_code == 'SE.ADT.LITR.ZS']
.sort_by([ibis.desc(edu_table.value)])
.limit(20)
))
# [END bigquery_ibis_joins]
|
StarcoderdataPython
|
5007902
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Handler class."""
import json
import logging
import os
import urlparse
import webapp2
from google.appengine.api import modules
from lib.crud import crud_handler
class RequestHandler(webapp2.RequestHandler):
"""Base Handler class with useful utility methods."""
def Respond(self, msg, status=200, *args):
logging.info(msg, *args)
self.response.write(msg % args)
self.response.status = status
def BadRequest(self, msg, *args):
self.Respond(msg, 400, *args)
def NotFound(self, msg, *args):
self.Respond(msg, 404, *args)
def Error(self, msg, *args):
self.Responsd(msg, 500, *args)
def SendJson(self, json_dict, pretty_print=False, include_prefix=True):
"""Send a json dict as a response."""
if include_prefix:
self.response.write(crud_handler.JSON_PREFIX)
if pretty_print:
self.response.write(
json.dumps(json_dict, indent=2, separators=(',', ': '),
default=crud_handler.JsonPrinter))
else:
self.response.write(
json.dumps(json_dict, default=crud_handler.JsonPrinter))
def GetModuleUrl(self, module=None, url=None):
"""Get the url for the current page but on the given module."""
url_parts = list(urlparse.urlsplit(url or self.request.url))
url_parts[1] = modules.get_hostname(module=module)
# If we're on https we need to replace version.backend.datapipeline
# with version-dot-backend-dot-datapipeline
if url_parts[0] == 'https':
hostname_parts = url_parts[1].rsplit('.', 2)
hostname_parts[0] = hostname_parts[0].replace('.', '-dot-')
url_parts[1] = '.'.join(hostname_parts)
return str(urlparse.urlunsplit(url_parts))
def OpenResource(self, path):
"""Open up a file that is included with this app engine deployment.
NOTE: you cannot open a file that has been pushed as a static file.
Args:
path: the path relative to the app/ directory to open.
Returns:
a file object to the opened file (don't forget to use 'with').
"""
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
file_name = os.path.join(base_dir, path)
return open(file_name)
|
StarcoderdataPython
|
4947908
|
<reponame>wgslr/agh-compilation-theory
#!/usr/bin/python
from collections import defaultdict
from copy import copy
import AST
allowed_operations = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: "")))
allowed_operations["+"]["int"]["int"] = "int"
allowed_operations["+"]["float"]["int"] = "float"
allowed_operations["+"]["int"]["float"] = "float"
allowed_operations["+"]["vector"]["vector"] = "vector"
allowed_operations["+"]["matrix"]["matrix"] = "matrix"
allowed_operations["-"]["int"]["int"] = "int"
allowed_operations["-"]["float"]["int"] = "float"
allowed_operations["-"]["int"]["float"] = "float"
allowed_operations["-"]["vector"]["vector"] = "vector"
allowed_operations["-"]["matrix"]["matrix"] = "matrix"
allowed_operations["*"]["int"]["int"] = "int"
allowed_operations["*"]["float"]["int"] = "float"
allowed_operations["*"]["int"]["float"] = "float"
allowed_operations["*"]["vector"]["vector"] = "vector"
allowed_operations["*"]["matrix"]["matrix"] = "matrix"
allowed_operations["*"]["vector"]["int"] = "vector"
allowed_operations["*"]["int"]["vector"] = "vector"
allowed_operations["*"]["matrix"]["int"] = "matrix"
allowed_operations["*"]["int"]["matrix"] = "matrix"
allowed_operations["*"]["vector"]["float"] = "vector"
allowed_operations["*"]["float"]["vector"] = "vector"
allowed_operations["*"]["matrix"]["float"] = "matrix"
allowed_operations["*"]["float"]["matrix"] = "matrix"
allowed_operations["/"]["int"]["int"] = "float"
allowed_operations["/"]["float"]["int"] = "float"
allowed_operations["/"]["int"]["float"] = "float"
allowed_operations["/"]["vector"]["int"] = "vector"
allowed_operations["/"]["int"]["vector"] = "vector"
allowed_operations["/"]["matrix"]["int"] = "matrix"
allowed_operations["/"]["int"]["matrix"] = "matrix"
allowed_operations["/"]["vector"]["int"] = "vector"
allowed_operations["/"]["matrix"]["int"] = "matrix"
allowed_operations["/"]["vector"]["float"] = "vector"
allowed_operations["/"]["matrix"]["float"] = "matrix"
allowed_operations[".+"]["matrix"]["int"] = "matrix"
allowed_operations[".+"]["matrix"]["float"] = "matrix"
allowed_operations[".+"]["vector"]["int"] = "vector"
allowed_operations[".+"]["vector"]["float"] = "vector"
allowed_operations[".-"]["matrix"]["int"] = "matrix"
allowed_operations[".-"]["matrix"]["float"] = "matrix"
allowed_operations[".-"]["vector"]["int"] = "vector"
allowed_operations[".-"]["vector"]["float"] = "vector"
allowed_operations[".*"]["matrix"]["int"] = "matrix"
allowed_operations[".*"]["matrix"]["float"] = "matrix"
allowed_operations[".*"]["vector"]["int"] = "vector"
allowed_operations[".*"]["vector"]["float"] = "vector"
allowed_operations["./"]["matrix"]["int"] = "matrix"
allowed_operations["./"]["matrix"]["float"] = "matrix"
allowed_operations["./"]["vector"]["int"] = "vector"
allowed_operations["./"]["vector"]["float"] = "vector"
op_to_string = {
'+': 'ADD',
'-': 'SUB',
'*': 'MUL',
'/': 'DIV',
}
class NodeVisitor(object):
loop = 0
variables = defaultdict(lambda: None)
def visit(self, node):
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method)
return visitor(node)
class TypeChecker(NodeVisitor):
def visit_Instructions(self, node):
# print("visit_Instructions")
for n in node.nodes:
self.visit(n)
def visit_FlowKeyword(self, node):
# print("visit_Flowkeyword")
if self.loop == 0:
TypeChecker.print_error(node, "flow keyword {} outside loop".format(node.keyword))
def visit_Print(self, node):
# print("visit_Print")
pass
def visit_Return(self, node):
# print("visit_Return")
pass
def visit_String(self, node):
# print("visit_String")
pass
def visit_Matrix(self, node):
size1 = len(node.elements)
sizes = map(lambda x: len(x.elements), node.elements)
size2 = min(sizes)
if all(map(lambda x: x == size2, sizes)):
return self.Variable("matrix", [size1, size2])
else:
TypeChecker.print_error(node, "vectors with different sizes in matrix initialization")
return None
def visit_Vector(self, node):
# print("visit_Vector")
return self.Variable("vector", [len(node.elements)])
def visit_Reference(self, node):
# print("visit_Reference")
v = self.variables[node.name.name]
if not v:
TypeChecker.print_error(node, "undefined variable {}".format(node.name.name))
return None
if len(node.coords) > len(v.size):
TypeChecker.print_error(node, "to many dimensions in vector reference")
return None
error = False
for coord, size in zip(node.coords, v.size):
if isinstance(coord, AST.IntNum) and coord.value >= size:
TypeChecker.print_error(node, "reference {} is over vector size {}".format(coord.value, size))
error = True
if error:
return None
if len(v.size) - len(node.coords) == 0:
return TypeChecker.Variable("float")
else:
return TypeChecker.Variable("vector", [v.size[-1]])
def visit_FunctionCall(self, node):
# print("visit_FunctionCall")
arguments = node.arguments
if len(arguments) == 1:
arguments = [arguments[0], arguments[0]]
return TypeChecker.Variable("matrix", map(lambda x: x.value, arguments))
def visit_While(self, node):
# print("visit_While")
self.loop += 1
self.visit(node.body)
self.loop -= 1
def visit_For(self, node):
# print("visit_For")
self.loop += 1
self.visit(node.body)
self.loop -= 1
def visit_Range(self, node):
# print("visit_Range")
pass
def visit_Variable(self, node):
# print("visit_Variable")
return self.variables[node.name]
def visit_If(self, node):
# print("visit_if")
pass
def visit_BinExpr(self, node):
# print("visit_BinExpr")
var1 = self.visit(node.left)
var2 = self.visit(node.right)
if not var1:
TypeChecker.print_error(node, "undefined variable {}".format(node.left.name))
return None
if not var2:
TypeChecker.print_error(node, "undefined variable {}".format(node.right.name))
return None
op = node.op
newtype = allowed_operations[op[0]][var1.type][var2.type]
if newtype:
new_var = copy(var1)
new_var.type = newtype
return new_var
else:
TypeChecker.print_error(node, "cannot {} {} and {}".format(op_to_string[op], var1.type, var2.type))
return None
def visit_ArithmeticOperation(self, node):
# print("visit_ArithmeticOperation")
return self.visit_BinExpr(node)
def visit_Assignment(self, node):
# print("visit_Assignment")
var1 = self.visit(node.left)
var2 = self.visit(node.right)
if not var2:
return None
name = node.left.name
op = node.op
if op == "=":
self.variables[name] = self.Variable(var2.type, var2.size, name)
else:
if not var1:
TypeChecker.print_error(node, "undefined variable {}".format(name))
return None
newtype = allowed_operations[op[0]][var1.type][var2.type]
if newtype:
self.variables[name] = self.Variable(newtype, var2.size, name)
else:
TypeChecker.print_error(node, "cannot assign {} to {}".format(var2.type, var1.type))
def visit_IntNum(self, node):
# print("visit_IntNum")
return self.Variable("int")
def visit_FloatNum(self, node):
# print("visit_FloatNum")
return self.Variable("float")
def visit_UnaryExpr(self, node):
# print("visit_UnaryExpr")
pass
def visit_Comparison(self, node):
# print("visit_Comparison")
pass
def visit_Error(self, node):
# print("visit_Error")
pass
@staticmethod
def print_error(node, error):
print("Error in line {}: {}".format(node.lineno, error))
class Variable(object):
def __init__(self, type, size=[], name=""):
self.type = type
self.size = size
self.name = name
def __str__(self):
return 'Variable {}: {}, {}'.format(self.name, self.type, self.size)
|
StarcoderdataPython
|
11213330
|
#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by <NAME>
# Copyright (c) 2015 <NAME>
#
# License: MIT
#
"""This module exports the Scalastyle plugin class."""
from os import path
from SublimeLinter.lint import Linter, util
class Scalastyle(Linter):
"""Provides an interface to scalastyle."""
syntax = 'scala'
executable = 'java'
cmd = None
config_file = ('--config', 'scalastyle-config.xml')
regex = (
r'^(?:(?P<error>error)|(?P<warning>warning))'
r'(?: file=(?P<file>.+?))'
r'(?: message=(?P<message>.+?))'
r'(?: line=(?P<line>\d+))?'
r'(?: column=(?P<col>\d+))?$'
)
multiline = False
line_col_base = (1, 0)
tempfile_suffix = 'scala'
error_stream = util.STREAM_BOTH
word_re = r'^(\w+|([\'"]).+?\2)'
defaults = {
'jar_file': ''
}
inline_settings = None
inline_overrides = None
comment_re = None
def cmd(self):
"""Return the command line to execute."""
jar_file = self.get_jarfile_path()
return [self.executable_path, '-jar', jar_file]
def get_jarfile_path(self):
"""
Return the absolute path to the scalastyle jar file.
Expand user shortcut (~) and environment variables.
"""
settings = self.get_view_settings()
jar_file = settings.get('jar_file')
# Expand user directory shortcuts
jar_file = path.expanduser(jar_file)
# Expand environment variables
jar_file = path.expandvars(jar_file)
# Get canonical path
jar_file = path.realpath(jar_file)
return jar_file
def split_match(self, match):
"""
Return the components of the match.
We override this method so that errors with no line number can be displayed.
"""
match, line, col, error, warning, message, near = super().split_match(match)
if line is None and message:
line = 0
return match, line, col, error, warning, message, near
|
StarcoderdataPython
|
1770944
|
<filename>shim/shim.py
from helper import log, status, settings, sequence
from abc import abstractmethod, ABCMeta
from ctypes import *
import os
EXTENSION_NAME = "some extension"
class Shim(metaclass=ABCMeta):
def __init__(self):
lib = cdll.LoadLibrary(os.path.dirname(__file__) + "/main.so")
self.status = status.Status(lib)
self.sequence = sequence.Sequence(lib)
self.settings = settings.Settings(lib)
self.log = log.Log(lib)
"""
Install calls
"""
def pre_install(self):
self.log.info("BEGIN Install Extension: %s"%(EXTENSION_NAME))
self.status.transitioning("BEGIN Install Extension: %s"%(EXTENSION_NAME), "BEGIN Install Extension: %s"%(EXTENSION_NAME))
@abstractmethod
def install(self):
pass
def post_install(self):
self.log.info("END Install Extension %s"%(EXTENSION_NAME))
self.status.transitioning("END Install Extension: %s"%(EXTENSION_NAME), "END Install Extension: %s"%(EXTENSION_NAME))
"""
Enable calls
"""
def pre_enable(self):
shouldProcess = self.sequence.check_sequence_number()
self.log.info("BEGIN Enable Extension: %s"%(EXTENSION_NAME))
self.status.transitioning("BEGIN Enable Extension: %s"%(EXTENSION_NAME), "BEGIN Enable Extension: %s"%(EXTENSION_NAME))
# Get settings to return back to the user to use in application logic
self.settings = self.settings.get_settings()
@abstractmethod
def enable(self):
pass
def post_enable(self):
self.log.info("END Enable Extension: %s"%(EXTENSION_NAME))
self.status.success("END Enable Extension: %s"%(EXTENSION_NAME), "END Enable Extension: %s"%(EXTENSION_NAME))
"""
Disable calls
"""
def pre_disable(self):
self.log.info("BEGIN Disable Extension: %s"%(EXTENSION_NAME))
self.status.transitioning("BEGIN Disable Extension: %s"%(EXTENSION_NAME), "BEGIN Disable Extension: %s"%(EXTENSION_NAME))
@abstractmethod
def disable(self):
pass
def post_disable(self):
self.log.info("END Disable Extension %s"%(EXTENSION_NAME))
self.status.success("END Disable Extension: %s"%(EXTENSION_NAME), "END Disable Extension: %s"%(EXTENSION_NAME))
"""
Uninstall calls
"""
def pre_uninstall(self):
self.log.info("BEGIN Uninstall Extension: %s"%(EXTENSION_NAME))
self.status.transitioning("BEGIN Uninstall Extension: %s"%(EXTENSION_NAME), "BEGIN Uninstall Extensions: %s"%(EXTENSION_NAME))
@abstractmethod
def uninstall(self):
pass
def post_uinstall(self):
self.log.info("END Uninstall Extension: %s"%(EXTENSION_NAME))
self.status.transitioning("END Uninstall Extension: %s"%(EXTENSION_NAME), "END Uninstall Extension: %s"%(EXTENSION_NAME))
def on_timeout(self):
self.log.error("Extension install took to long for Extension: %s"%(EXTENSION_NAME))
self.status.error("Enabling failed for extension: %s"%(EXTENSION_NAME), "failed installing %s"%(EXTENSION_NAME))
|
StarcoderdataPython
|
37480
|
<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import random
if __name__ == '__main__':
ones = np.ones(30, dtype=np.uint8)
print(ones)
doubled = [x * 2 for x in ones]
doubled = ones * 2
print(doubled)
negatives = ones - doubled
print(negatives)
y = np.random.rand(30)
y *= 20
print(y)
x = range(0, 30)
print(x)
plt.plot(x, y, 'r')
plt.xlabel("nombre de personnes")
plt.ylabel("une information associée")
plt.title("ma graphe")
plt.savefig("mafig.png")
plt.show()
|
StarcoderdataPython
|
9708562
|
<reponame>JPGarzonE/curso-de-python<gh_stars>0
def main():
print("C A L C U L A D O R A D E F A C T O R I A L")
numero = int( input("¿Cuál es tu numero? ") )
resultado = factorial(numero)
print("El factorial de {} es {}".format(numero, resultado))
def factorial(numero):
if numero == 1 :
return 1
else:
return numero * factorial(numero-1)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1778509
|
<filename>openprocurement/search/update_orgs.py
# -*- coding: utf-8 -*-
import os
import sys
import fcntl
import signal
import logging.config
from datetime import datetime, timedelta
from ConfigParser import ConfigParser
from openprocurement.search.version import __version__
from openprocurement.search.engine import IndexEngine, logger
from openprocurement.search.index.orgs import OrgsIndex
from openprocurement.search.source.orgs import OrgsSource
from openprocurement.search.source.tender import TenderSource
from openprocurement.search.source.ocds import OcdsSource
from openprocurement.search.source.plan import PlanSource
from openprocurement.search.source.auction import AuctionSource
from openprocurement.search.utils import decode_bool_values, chage_process_user_group
engine = type('engine', (), {})()
def sigterm_handler(signo, frame):
logger.warning("Signal received %d", signo)
engine.should_exit = True
signal.alarm(2)
sys.exit(0)
class IndexOrgsEngine(IndexEngine):
def __init__(self, config, update_config):
self.patch_engine_config(config, update_config)
super(IndexOrgsEngine, self).__init__(config)
self.orgs_map = {}
def patch_engine_config(self, config, update_config):
config['slave_mode'] = None
config['start_wait'] = 0
config['tender_fast_client'] = False
config['plan_fast_client'] = False
config['auction_fast_client'] = False
config['tender_preload'] = int(1e6)
config['plan_preload'] = int(1e6)
config['ocds_preload'] = int(1e6)
config['auction_preload'] = int(1e6)
# update skip_until
update_days = update_config.get('update_days') or 30
date = datetime.now() - timedelta(days=int(update_days))
date = date.strftime("%Y-%m-%d")
logger.info("Patch config: update_days = %s -> skip_until = %s", update_days, date)
config['auction_skip_until'] = date
config['tender_skip_until'] = date
config['plan_skip_until'] = date
config['ocds_skip_until'] = date
def process_entity(self, entity):
code = None
try:
code = entity['identifier']['id']
if not code:
raise ValueError("No code")
if type(code) != str:
code = str(code)
if len(code) < 5 or len(code) > 15:
raise ValueError("Bad code")
except (KeyError, TypeError, ValueError):
return False
try:
self.index_by_type('org', entity)
except Exception as e:
logger.exception("Can't index %s: %s", code, str(e))
if code in self.orgs_map:
self.orgs_map[code] += 1
else:
self.orgs_map[code] = 1
return True
def process_source(self, source):
logger.info("Process source [%s]", source.doc_type)
source.client_user_agent += " update_orgs"
items_list = True
items_count = 0
flush_count = 0
while True:
if self.should_exit:
break
try:
save_count = items_count
items_list = source.items()
for meta in items_list:
if self.should_exit:
break
items_count += 1
item = source.get(meta)
entity = source.procuring_entity(item)
if entity:
self.process_entity(entity)
# log progress
if items_count % 100 == 0:
logger.info("[%s] Processed %d last %s orgs_found %d",
source.doc_type, items_count,
meta.get('dateModified'), len(self.orgs_map))
# flush orgs_map each 10k
if items_count - flush_count > 10000:
flush_count = items_count
self.flush_orgs_map()
except Exception as e:
logger.exception("Can't process_source: %s", str(e))
break
# prevent stop by skip_until before first 100 processed
if items_count < 100 and getattr(source, 'last_skipped', None):
logger.info("[%s] Processed %d last_skipped %s",
source.doc_type, items_count, source.last_skipped)
continue
elif items_count - save_count < 5:
break
# flush orgs ranks
self.flush_orgs_map()
def flush_orgs_map(self):
index_name = self.get_current_indexes()
logger.info("[%s] Flush orgs to index", index_name)
if not index_name:
return
iter_count = 0
update_count = 0
orgs_index = self.index_list[0]
orgs_index.process(allow_reindex=False)
doc_type = orgs_index.source.doc_type
map_len = len(self.orgs_map)
error_count = 0
for code, rank in self.orgs_map.iteritems():
if self.should_exit:
break
iter_count += 1
if iter_count % 1000 == 0:
logger.info("[%s] Updated %d / %d orgs %d%%",
index_name, update_count, iter_count,
int(100 * iter_count / map_len))
# dont update rare companies
if rank < 10:
continue
# get item
meta = {'id': code, 'doc_type': doc_type}
found = self.get_item(index_name, meta)
# if not found - ignore, but warn
if not found:
logger.warning("[%s] Code %s not found", index_name, str(code))
continue
# if rank not changed - ignore
if found['_source']['rank'] == rank:
continue
item = {
'meta': {
'id': found['_id'],
'doc_type': found['_type'],
'version': found['_version'] + 1,
},
'data': found['_source'],
}
item['data']['rank'] = rank
try:
self.index_item(index_name, item)
update_count += 1
except Exception as e:
logger.error("Fail index %s: %s", str(item), str(e))
error_count += 1
if error_count > 100:
logger.exception("%s", str(e))
break
# final info
logger.info("[%s] Updated %d / %d orgs %d%%",
index_name, update_count, iter_count,
int(100.0 * iter_count / map_len))
def main():
if len(sys.argv) < 2 or '-h' in sys.argv:
print("Usage: update_orgs etc/search.ini [custom_index_names]")
sys.exit(1)
parser = ConfigParser()
parser.read(sys.argv[1])
config = dict(parser.items('search_engine'))
config = decode_bool_values(config)
uo_config = dict(parser.items('update_orgs'))
if len(sys.argv) > 2:
config['index_names'] = sys.argv[2]
logging.config.fileConfig(sys.argv[1])
logger.info("Starting openprocurement.search.update_orgs v%s", __version__)
logger.info("Copyright (c) 2015-2018 <NAME> <<EMAIL>>")
# try get exclusive lock to prevent second start
lock_filename = uo_config.get('pidfile') or 'update_orgs.pid'
lock_file = open(lock_filename, "w")
fcntl.lockf(lock_file, fcntl.LOCK_EX + fcntl.LOCK_NB)
lock_file.write(str(os.getpid()) + "\n")
lock_file.flush()
signal.signal(signal.SIGTERM, sigterm_handler)
# signal.signal(signal.SIGINT, sigterm_handler)
try:
chage_process_user_group(config)
except Exception as e:
logger.error("Can't change process user: %s", str(e))
try:
global engine
engine = IndexOrgsEngine(config, uo_config)
source = OrgsSource(config)
index = OrgsIndex(engine, source, config)
# manualy reset and prevent second reset on first process_source
source.reset()
index.last_current_index = index.current_index
if config.get('tender_api_url', None):
source = TenderSource(config)
engine.process_source(source)
if config.get('ocds_dir', None):
source = OcdsSource(config)
engine.process_source(source)
if config.get('plan_api_url', None):
source = PlanSource(config)
engine.process_source(source)
if config.get('auction_api_url', None):
source = AuctionSource(config)
engine.process_source(source)
engine.flush_orgs_map()
except Exception as e:
logger.exception("Exception: %s", str(e))
finally:
lock_file.close()
os.remove(lock_filename)
logger.info("Shutdown")
return 0
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
11389262
|
<reponame>kamoljan/amazon-personalize-samples
import json
import boto3
import base64
def lambda_handler(event, context):
# TODO implement
#### Attach Policy to S3 Bucket
s3 = boto3.client("s3")
policy = {
"Version": "2012-10-17",
"Id": "PersonalizeS3BucketAccessPolicy",
"Statement": [
{
"Sid": "PersonalizeS3BucketAccessPolicy",
"Effect": "Allow",
"Principal": {
"Service": "personalize.amazonaws.com"
},
"Action": [
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::{}".format(event['bucket']),
"arn:aws:s3:::{}/*".format(event['bucket'])
]
}
]
}
s3.put_bucket_policy(Bucket=event['bucket'], Policy=json.dumps(policy))
#### Create Personalize Role
iam = boto3.client("iam")
role_name = "PersonalizeRole"
assume_role_policy_document = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "personalize.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
create_role_response = iam.create_role(
RoleName = role_name,
AssumeRolePolicyDocument = json.dumps(assume_role_policy_document)
)
# AmazonPersonalizeFullAccess provides access to any S3 bucket with a name that includes "personalize" or "Personalize"
# if you would like to use a bucket with a different name, please consider creating and attaching a new policy
# that provides read access to your bucket or attaching the AmazonS3ReadOnlyAccess policy to the role
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonPersonalizeFullAccess"
iam.attach_role_policy(
RoleName = role_name,
PolicyArn = policy_arn
)
time.sleep(60) # wait for a minute to allow IAM role policy attachment to propagate
role_arn = create_role_response["Role"]["Arn"]
print(role_arn)
return {
'statusCode': 200,
'role_arn':role_arn
#'body': json.dumps('Hello from Lambda!')
}
|
StarcoderdataPython
|
3271988
|
from skmultiflow.evaluation.metrics import metrics
import numpy as np
from skmultiflow.core.base_object import BaseObject
from skmultiflow.core.utils.data_structures import FastBuffer, FastComplexBuffer, ConfusionMatrix, MOLConfusionMatrix
from skmultiflow.core.utils.validation import check_weights
class ClassificationMeasurements(BaseObject):
""" ClassificationMeasurements
Class used to keep updated statistics about a classifier, in order
to be able to provide, at any given moment, any relevant metric about
that classifier.
It combines a ConfusionMatrix object, with some additional statistics,
to compute a range of performance metrics.
In order to keep statistics updated, the class won't require lots of
information, but two: the predictions and true labels.
At any given moment, it can compute the following statistics: performance,
kappa, kappa_t, kappa_m, majority_class and error rate.
Parameters
----------
targets: list
A list containing the possible labels.
dtype: data type (Default: numpy.int64)
The data type of the existing labels.
Examples
--------
"""
def __init__(self, targets=None, dtype=np.int64):
super().__init__()
if targets is not None:
self.n_targets = len(targets)
else:
self.n_targets = 0
self.confusion_matrix = ConfusionMatrix(self.n_targets, dtype)
self.last_true_label = None
self.last_prediction = None
self.sample_count = 0
self.majority_classifier = 0
self.correct_no_change = 0
self.targets = targets
def reset(self, targets=None):
if targets is not None:
self.n_targets = len(targets)
else:
self.n_targets = 0
self.sample_count = 0
self.majority_classifier = 0
self.correct_no_change = 0
self.confusion_matrix.restart(self.n_targets)
def add_result(self, sample, prediction, weight=1.0):
""" add_result
Updates its statistics with the results of a prediction.
Parameters
----------
sample: int
The true label.
prediction: int
The classifier's prediction
"""
check_weights(weight)
true_y = self._get_target_index(sample, True)
pred = self._get_target_index(prediction, True)
self.confusion_matrix.update(true_y, pred)
self.sample_count += weight
if self.get_majority_class() == sample:
self.majority_classifier = self.majority_classifier + weight
if self.last_true_label == sample:
self.correct_no_change = self.correct_no_change + weight
self.last_true_label = sample
self.last_prediction = prediction
def get_last(self):
return self.last_true_label, self.last_prediction
def get_majority_class(self):
""" get_majority_class
Computes the true majority class.
Returns
-------
int
Returns the true majority class.
"""
if (self.n_targets is None) or (self.n_targets == 0):
return False
majority_class = 0
max_prob = 0.0
for i in range(self.n_targets):
sum = 0.0
for j in range(self.n_targets):
sum += self.confusion_matrix.value_at(i, j)
sum = sum / self.sample_count
if sum > max_prob:
max_prob = sum
majority_class = i
return majority_class
def get_performance(self):
""" get_performance
Computes the performance.
Returns
-------
float
Returns the performance.
"""
sum_value = 0.0
n, _ = self.confusion_matrix.shape()
for i in range(n):
sum_value += self.confusion_matrix.value_at(i, i)
try:
return sum_value / self.sample_count
except ZeroDivisionError:
return 0.0
def get_incorrectly_classified_ratio(self):
return 1.0 - self.get_performance()
def _get_target_index(self, target, add = False):
""" _get_target_index
Computes the index of an element in the self.targets list.
Also reshapes the ConfusionMatrix and adds new found targets
if add is True.
Parameters
----------
target: int
A class label.
add: bool
Either to add new found labels to the targets list or not.
Returns
-------
int
The target index in the self.targets list.
"""
if (self.targets is None) and add:
self.targets = []
self.targets.append(target)
self.n_targets = len(self.targets)
self.confusion_matrix.reshape(len(self.targets), len(self.targets))
elif (self.targets is None) and (not add):
return None
if ((target not in self.targets) and (add)):
self.targets.append(target)
self.n_targets = len(self.targets)
self.confusion_matrix.reshape(len(self.targets), len(self.targets))
for i in range(len(self.targets)):
if self.targets[i] == target:
return i
return None
def get_kappa(self):
""" get_kappa
Computes the Cohen's kappa coefficient.
Returns
-------
float
Returns the Cohen's kappa coefficient.
"""
p0 = self.get_performance()
pc = 0.0
n, l = self.confusion_matrix.shape()
for i in range(n):
row = self.confusion_matrix.row(i)
column = self.confusion_matrix.column(i)
sum_row = np.sum(row) / self.sample_count
sum_column = np.sum(column) / self.sample_count
pc += sum_row * sum_column
if pc == 1:
return 1
return (p0 - pc) / (1.0 - pc)
def get_kappa_t(self):
""" get_kappa_t
Computes the Cohen's kappa T coefficient. This measures the
temporal correlation between samples.
Returns
-------
float
Returns the Cohen's kappa T coefficient.
"""
p0 = self.get_performance()
if self.sample_count != 0:
pc = self.correct_no_change / self.sample_count
else:
pc =0
if pc == 1:
return 1
return (p0 - pc) / (1.0 - pc)
def get_kappa_m(self):
""" get_kappa_t
Computes the Cohen's kappa M coefficient.
Returns
-------
float
Returns the Cohen's kappa M coefficient.
"""
p0 = self.get_performance()
if self.sample_count != 0:
pc = self.majority_classifier / self.sample_count
else:
pc = 0
if pc == 1:
return 1
return (p0 - pc) / (1.0 - pc)
@property
def _matrix(self):
return self.confusion_matrix._matrix
def get_info(self):
return 'ClassificationMeasurements: targets: ' + str(self.targets) + \
' - sample_count: ' + str(self.sample_count) + \
' - performance: ' + str(self.get_performance()) + \
' - kappa: ' + str(self.get_kappa()) + \
' - kappa_t: ' + str(self.get_kappa_t()) + \
' - kappa_m: ' + str(self.get_kappa_m()) + \
' - majority_class: ' + str(self.get_majority_class())
def get_class_type(self):
return 'collection'
class WindowClassificationMeasurements(BaseObject):
""" WindowClassificationMeasurements
This class will maintain a fixed sized window of the newest information
about one classifier. It can provide, as requested, any of the relevant
current metrics about the classifier, measured inside the window.
To keep track of statistics inside a window, the class will use a
ConfusionMatrix object, alongside FastBuffers, to simulate fixed sized
windows of the important classifier's attributes.
Its functionalities are somewhat similar to those of the
ClassificationMeasurements class. The difference is that the statistics
kept by this class are local, or partial, while the statistics kept by
the ClassificationMeasurements class are global.
At any given moment, it can compute the following statistics: performance,
kappa, kappa_t, kappa_m, majority_class and error rate.
Parameters
----------
targets: list
A list containing the possible labels.
dtype: data type (Default: numpy.int64)
The data type of the existing labels.
window_size: int (Default: 200)
The width of the window. Determines how many samples the object
can see.
Examples
--------
"""
def __init__(self, targets=None, dtype=np.int64, window_size=200):
super().__init__()
if targets is not None:
self.n_targets = len(targets)
else:
self.n_targets = 0
self.confusion_matrix = ConfusionMatrix(self.n_targets, dtype)
self.last_class = None
self.targets = targets
self.window_size = window_size
self.true_labels = FastBuffer(window_size)
self.predictions = FastBuffer(window_size)
self.temp = 0
self.last_prediction = None
self.last_true_label = None
self.majority_classifier = 0
self.correct_no_change = 0
self.majority_classifier_correction = FastBuffer(window_size)
self.correct_no_change_correction = FastBuffer(window_size)
def reset(self, targets=None):
if targets is not None:
self.n_targets = len(targets)
else:
self.n_targets = 0
self.majority_classifier = 0
self.correct_no_change = 0
self.confusion_matrix.restart(self.n_targets)
self.majority_classifier_correction = FastBuffer(self.window_size)
self.correct_no_change_correction = FastBuffer(self.window_size)
def add_result(self, sample, prediction):
""" add_result
Updates its statistics with the results of a prediction. If needed it
will remove samples from the observation window.
Parameters
----------
sample: int
The true label.
prediction: int
The classifier's prediction
"""
true_y = self._get_target_index(sample, True)
pred = self._get_target_index(prediction, True)
old_true = self.true_labels.add_element(np.array([sample]))
old_predict = self.predictions.add_element(np.array([prediction]))
# Verify if its needed to decrease the count of any label
# pair in the confusion matrix
if (old_true is not None) and (old_predict is not None):
self.temp += 1
error = self.confusion_matrix.remove(self._get_target_index(old_true[0]), self._get_target_index(old_predict[0]))
self.correct_no_change += self.correct_no_change_correction.peek()
self.majority_classifier += self.majority_classifier_correction.peek()
# Verify if its needed to decrease the majority_classifier count
if (self.get_majority_class() == sample) and (self.get_majority_class() is not None):
self.majority_classifier += 1
self.majority_classifier_correction.add_element([-1])
else:
self.majority_classifier_correction.add_element([0])
# Verify if its needed to decrease the correct_no_change
if (self.last_true_label == sample) and (self.last_true_label is not None):
self.correct_no_change += 1
self.correct_no_change_correction.add_element([-1])
else:
self.correct_no_change_correction.add_element([0])
self.confusion_matrix.update(true_y, pred)
self.last_true_label = sample
self.last_prediction = prediction
def get_last(self):
return self.last_true_label, self.last_prediction
def get_majority_class(self):
""" get_majority_class
Computes the window/local true majority class.
Returns
-------
int
Returns the true window/local majority class.
"""
if (self.n_targets is None) or (self.n_targets == 0):
return None
majority_class = 0
max_prob = 0.0
for i in range(self.n_targets):
sum = 0.0
for j in range(self.n_targets):
sum += self.confusion_matrix.value_at(i, j)
sum = sum / self.true_labels.get_current_size()
if sum > max_prob:
max_prob = sum
majority_class = i
return majority_class
def get_performance(self):
""" get_performance
Computes the window/local performance.
Returns
-------
float
Returns the window/local performance.
"""
sum_value = 0.0
n, _ = self.confusion_matrix.shape()
for i in range(n):
sum_value += self.confusion_matrix.value_at(i, i)
try:
return sum_value / self.true_labels.get_current_size()
except ZeroDivisionError:
return 0.0
def get_incorrectly_classified_ratio(self):
return 1.0 - self.get_performance()
def _get_target_index(self, target, add=False):
""" _get_target_index
Computes the index of an element in the self.targets list.
Also reshapes the ConfusionMatrix and adds new found targets
if add is True.
Parameters
----------
target: int
A class label.
add: bool
Either to add new found labels to the targets list or not.
Returns
-------
int
The target index in the self.targets list.
"""
if (self.targets is None) and add:
self.targets = []
self.targets.append(target)
self.n_targets = len(self.targets)
self.confusion_matrix.reshape(len(self.targets), len(self.targets))
elif (self.targets is None) and (not add):
return None
if ((target not in self.targets) and (add)):
self.targets.append(target)
self.n_targets = len(self.targets)
self.confusion_matrix.reshape(len(self.targets), len(self.targets))
for i in range(len(self.targets)):
if self.targets[i] == target:
return i
return None
def get_kappa(self):
""" get_kappa
Computes the window/local Cohen's kappa coefficient.
Returns
-------
float
Returns the window/local Cohen's kappa coefficient.
"""
p0 = self.get_performance()
pc = 0.0
n, l = self.confusion_matrix.shape()
for i in range(n):
row = self.confusion_matrix.row(i)
column = self.confusion_matrix.column(i)
sum_row = np.sum(row) / self.true_labels.get_current_size()
sum_column = np.sum(column) / self.true_labels.get_current_size()
pc += sum_row * sum_column
if pc == 1:
return 1
return (p0 - pc) / (1.0 - pc)
def get_kappa_t(self):
""" get_kappa_t
Computes the window/local Cohen's kappa T coefficient. This measures
the temporal correlation between samples.
Returns
-------
float
Returns the window/local Cohen's kappa T coefficient.
"""
p0 = self.get_performance()
if self._sample_count != 0:
pc = self.correct_no_change / self._sample_count
else:
pc =0
if pc == 1:
return 1
return (p0 - pc) / (1.0 - pc)
def get_kappa_m(self):
""" get_kappa_t
Computes the window/local Cohen's kappa M coefficient.
Returns
-------
float
Returns the window/local Cohen's kappa M coefficient.
"""
p0 = self.get_performance()
if self._sample_count != 0:
pc = self.majority_classifier / self._sample_count
else:
pc = 0
if pc == 1:
return 1
return (p0 - pc) / (1.0 - pc)
@property
def _matrix(self):
return self.confusion_matrix._matrix
@property
def _sample_count(self):
return self.true_labels.get_current_size()
def get_class_type(self):
return 'collection'
def get_info(self):
return 'ClassificationMeasurements: targets: ' + str(self.targets) + \
' - sample_count: ' + str(self._sample_count) + \
' - window_size: ' + str(self.window_size) + \
' - performance: ' + str(self.get_performance()) + \
' - kappa: ' + str(self.get_kappa()) + \
' - kappa_t: ' + str(self.get_kappa_t()) + \
' - kappa_m: ' + str(self.get_kappa_m()) + \
' - majority_class: ' + str(self.get_majority_class())
class MultiOutputMeasurements(BaseObject):
""" MultiOutputMeasurements
This class will keep updated statistics about a multi output classifier,
using a confusion matrix adapted to multi output problems, the
MOLConfusionMatrix, alongside other of the classifier's relevant
attributes.
The performance metrics for multi output tasks are different from those used
for normal classification tasks. Thus, the statistics provided by this class
are different from those provided by the ClassificationMeasurements and from
the WindowClassificationMeasurements.
At any given moment, it can compute the following statistics: hamming_loss,
hamming_score, exact_match and j_index.
Parameters
----------
targets: list
A list containing the possible labels.
dtype: data type (Default: numpy.int64)
The data type of the existing labels.
Examples
--------
"""
def __init__(self, targets=None, dtype=np.int64):
super().__init__()
if targets is not None:
self.n_targets = len(targets)
else:
self.n_targets = 0
self.confusion_matrix = MOLConfusionMatrix(self.n_targets, dtype)
self.last_true_label = None
self.last_prediction = None
self.sample_count = 0
self.targets = targets
self.exact_match_count = 0
self.j_sum = 0
def reset(self, targets=None):
if targets is not None:
self.n_targets = len(targets)
else:
self.n_targets = 0
self.sample_count = 0
self.confusion_matrix.restart(self.n_targets)
self.exact_match_count = 0
self.j_sum = 0
pass
def add_result(self, sample, prediction):
""" add_result
Updates its statistics with the results of a prediction.
Adds the result to the MOLConfusionMatrix and update exact_matches and
j-index sum counts.
Parameters
----------
sample: int
The true label.
prediction: int
The classifier's prediction
"""
self.last_true_label = sample
self.last_prediction = prediction
m = 0
if hasattr(sample, 'size'):
m = sample.size
elif hasattr(sample, 'append'):
m = len(sample)
self.n_targets = m
equal = True
for i in range(m):
self.confusion_matrix.update(i, sample[i], prediction[i])
# update exact_match count
if sample[i] != prediction[i]:
equal = False
# update exact_match
if equal:
self.exact_match_count += 1
# update j_index count
inter = sum((sample * prediction) > 0) * 1.
union = sum((sample + prediction) > 0) * 1.
if union > 0:
self.j_sum += inter / union
elif np.sum(sample) == 0:
self.j_sum += 1
self.sample_count += 1
def get_last(self):
return self.last_true_label, self.last_prediction
def get_hamming_loss(self):
""" get_hamming_loss
Computes the Hamming loss, which is the complement of the hamming
score metric.
Returns
-------
float
The hamming loss.
"""
return 1.0 - self.get_hamming_score()
def get_hamming_score(self):
""" get_hamming_score
Computes the hamming score, defined as the number of correctly classified
labels divided by the total number of labels classified.
Returns
-------
float
The hamming score.
"""
try:
return self.confusion_matrix.get_sum_main_diagonal() / (self.sample_count * self.n_targets)
except ZeroDivisionError:
return 0.0
def get_exact_match(self):
""" get_exact_match
Computes the exact match metric.
This is the most strict multi output metric, defined as the number of
samples that have all their labels correctly classified, divided by the
total number of samples.
Returns
-------
float
Returns the exact match metric.
"""
return self.exact_match_count / self.sample_count
def get_j_index(self):
""" get_j_index
Computes the Jaccard index, also known as the intersection over union
metric. It is calculated by dividing the number of correctly classified
labels by the union of predicted and true labels.
Returns
-------
float
The Jaccard index.
"""
return self.j_sum / self.sample_count
def get_total_sum(self):
return self.confusion_matrix.get_total_sum()
@property
def _matrix(self):
return self.confusion_matrix._matrix
@property
def _sample_count(self):
return self.sample_count
def get_info(self):
return 'MultiOutputMeasurements: targets: ' + str(self.targets) + \
' - sample_count: ' + str(self._sample_count) + \
' - hamming_loss: ' + str(self.get_hamming_loss()) + \
' - hamming_score: ' + str(self.get_hamming_score()) + \
' - exact_match: ' + str(self.get_exact_match()) + \
' - j_index: ' + str(self.get_j_index())
def get_class_type(self):
return 'collection'
class WindowMultiOutputMeasurements(BaseObject):
""" MultiOutputMeasurements
This class will maintain a fixed sized window of the newest information
about one classifier. It can provide, as requested, any of the relevant
current metrics about the classifier, measured inside the window.
This class will keep updated statistics about a multi output classifier,
using a confusion matrix adapted to multi output problems, the
MOLConfusionMatrix, alongside other of the classifier's relevant
attributes stored in ComplexFastBuffer objects, which will simulate
fixed sized windows.
Its functionalities are somewhat similar to those of the
MultiOutputMeasurements class. The difference is that the statistics
kept by this class are local, or partial, while the statistics kept by
the MultiOutputMeasurements class are global.
At any given moment, it can compute the following statistics: hamming_loss,
hamming_score, exact_match and j_index.
Parameters
----------
targets: list
A list containing the possible labels.
dtype: data type (Default: numpy.int64)
The data type of the existing labels.
window_size: int (Default: 200)
The width of the window. Determines how many samples the object
can see.
Examples
--------
"""
def __init__(self, targets=None, dtype=np.int64, window_size=200):
super().__init__()
if targets is not None:
self.n_targets = len(targets)
else:
self.n_targets = 0
self.confusion_matrix = MOLConfusionMatrix(self.n_targets, dtype)
self.last_true_label = None
self.last_prediction = None
self.targets = targets
self.window_size = window_size
self.true_labels = FastComplexBuffer(window_size, self.n_targets)
self.predictions = FastComplexBuffer(window_size, self.n_targets)
def reset(self, targets=None):
if targets is not None:
self.n_targets = len(targets)
else:
self.n_targets = 0
self.confusion_matrix.restart(self.n_targets)
self.exact_match_count = 0
self.j_sum = 0
self.true_labels = FastComplexBuffer(self.window_size, self.n_targets)
self.predictions = FastComplexBuffer(self.window_size, self.n_targets)
def add_result(self, sample, prediction):
""" add_result
Updates its statistics with the results of a prediction.
Adds the result to the MOLConfusionMatrix, and updates the
ComplexFastBuffer objects.
Parameters
----------
sample: int
The true label.
prediction: int
The classifier's prediction
"""
self.last_true_label = sample
self.last_prediction = prediction
m = 0
if hasattr(sample, 'size'):
m = sample.size
elif hasattr(sample, 'append'):
m = len(sample)
self.n_targets = m
for i in range(m):
self.confusion_matrix.update(i, sample[i], prediction[i])
old_true = self.true_labels.add_element(sample)
old_predict = self.predictions.add_element(prediction)
if (old_true is not None) and (old_predict is not None):
for i in range(m):
error = self.confusion_matrix.remove(old_true[0][i], old_predict[0][i])
def get_last(self):
return self.last_true_label, self.last_prediction
def get_hamming_loss(self):
""" get_hamming_loss
Computes the window/local Hamming loss, which is the complement of
the hamming score metric.
Returns
-------
float
The window/local hamming loss.
"""
return 1.0 - self.get_hamming_score()
def get_hamming_score(self):
""" get_hamming_score
Computes the window/local hamming score, defined as the number of
correctly classified labels divided by the total number of labels
classified.
Returns
-------
float
The window/local hamming score.
"""
return metrics.hamming_score(self.true_labels.get_queue(), self.predictions.get_queue())
def get_exact_match(self):
""" get_exact_match
Computes the window/local exact match metric.
This is the most strict multi output metric, defined as the number of
samples that have all their labels correctly classified, divided by the
total number of samples.
Returns
-------
float
Returns the window/local exact match metric.
"""
return metrics.exact_match(self.true_labels.get_queue(), self.predictions.get_queue())
def get_j_index(self):
""" get_j_index
Computes the window/local Jaccard index, also known as the intersection
over union metric. It is calculated by dividing the number of correctly
classified labels by the union of predicted and true labels.
Returns
-------
float
The window/local Jaccard index.
"""
return metrics.j_index(self.true_labels.get_queue(), self.predictions.get_queue())
def get_total_sum(self):
return self.confusion_matrix.get_total_sum()
@property
def _matrix(self):
return self.confusion_matrix._matrix
@property
def _sample_count(self):
return self.true_labels.get_current_size()
def get_info(self):
return 'WindowMultiOutputMeasurements: targets: ' + str(self.targets) + \
' - sample_count: ' + str(self._sample_count) + \
' - hamming_loss: ' + str(self.get_hamming_loss()) + \
' - hamming_score: ' + str(self.get_hamming_score()) + \
' - exact_match: ' + str(self.get_exact_match()) + \
' - j_index: ' + str(self.get_j_index())
def get_class_type(self):
return 'collection'
class RegressionMeasurements(BaseObject):
""" RegressionMeasurements
This class is used to keep updated statistics over a regression
learner in a regression problem context.
It will keep track of global metrics, that can be provided at
any moment. The relevant metrics kept by an instance of this class
are: MSE (mean square error) and MAE (mean absolute error).
"""
def __init__(self):
super().__init__()
self.total_square_error = 0.0
self.average_error = 0.0
self.sample_count = 0
self.last_true_label = None
self.last_prediction = None
def reset(self):
self.total_square_error = 0.0
self.average_error = 0.0
self.sample_count = 0
self.last_true_label = None
self.last_prediction = None
def add_result(self, sample, prediction):
""" add_result
Use the true label and the prediction to update the statistics.
Parameters
----------
sample: int
The true label.
prediction: int
The classifier's prediction
"""
self.last_true_label = sample
self.last_prediction = prediction
self.total_square_error += (sample - prediction) * (sample - prediction)
self.average_error += np.absolute(sample-prediction)
self.sample_count += 1
def get_mean_square_error(self):
""" get_mean_square_error
Computes the mean square error.
Returns
-------
float
Returns the mean square error.
"""
if self.sample_count == 0:
return 0.0
else:
return self.total_square_error / self.sample_count
def get_average_error(self):
""" get_average_error
Computes the mean absolute error.
Returns
-------
float
Returns the mean absolute error.
"""
if self.sample_count == 0:
return 0.0
else:
return self.average_error / self.sample_count
def get_last(self):
return self.last_true_label, self.last_prediction
@property
def _sample_count(self):
return self.sample_count
def get_class_type(self):
return 'collection'
def get_info(self):
return 'RegressionMeasurements: sample_count: ' + str(self._sample_count) + \
' - mean_square_error: ' + str(self.get_mean_square_error()) + \
' - mean_absolute_error: ' + str(self.get_average_error())
class WindowRegressionMeasurements(BaseObject):
""" WindowRegressionMeasurements
This class is used to keep updated statistics over a regression
learner in a regression problem context inside a fixed sized window.
It uses FastBuffer objects to simulate the fixed sized windows.
It will keep track of partial metrics, that can be provided at
any moment. The relevant metrics kept by an instance of this class
are: MSE (mean square error) and MAE (mean absolute error).
"""
def __init__(self, window_size=200):
super().__init__()
self.total_square_error = 0.0
self.average_error = 0.0
self.last_true_label = None
self.last_prediction = None
self.total_square_error_correction = FastBuffer(window_size)
self.average_error_correction = FastBuffer(window_size)
self.window_size = window_size
def reset(self):
self.total_square_error = 0.0
self.average_error = 0.0
self.last_true_label = None
self.last_prediction = None
self.total_square_error_correction = FastBuffer(self.window_size)
self.average_error_correction = FastBuffer(self.window_size)
def add_result(self, sample, prediction):
""" add_result
Use the true label and the prediction to update the statistics.
Parameters
----------
sample: int
The true label.
prediction: int
The classifier's prediction
"""
self.last_true_label = sample
self.last_prediction = prediction
self.total_square_error += (sample - prediction) * (sample - prediction)
self.average_error += np.absolute(sample-prediction)
old_square = self.total_square_error_correction.add_element(np.array([-1*((sample - prediction) * (sample - prediction))]))
old_average = self.average_error_correction.add_element(np.array([-1*(np.absolute(sample-prediction))]))
if (old_square is not None) and (old_average is not None):
self.total_square_error += old_square[0]
self.average_error += old_average[0]
def get_mean_square_error(self):
""" get_mean_square_error
Computes the window/local mean square error.
Returns
-------
float
Returns the window/local mean square error.
"""
if self._sample_count == 0:
return 0.0
else:
return self.total_square_error / self._sample_count
def get_average_error(self):
""" get_average_error
Computes the window/local mean absolute error.
Returns
-------
float
Returns the window/local mean absolute error.
"""
if self._sample_count == 0:
return 0.0
else:
return self.average_error / self._sample_count
def get_last(self):
return self.last_true_label, self.last_prediction
@property
def _sample_count(self):
return self.total_square_error_correction.get_current_size()
def get_class_type(self):
return 'collection'
def get_info(self):
return 'RegressionMeasurements: sample_count: ' + str(self._sample_count) + \
' - mean_square_error: ' + str(self.get_mean_square_error()) + \
' - mean_absolute_error: ' + str(self.get_average_error())
|
StarcoderdataPython
|
4997416
|
<gh_stars>0
'''
Date: 2021-07-17 11:50:52
LastEditors: Liuliang
LastEditTime: 2021-07-17 14:30:11
Description:
'''
from collections import Iterable,Iterator,Generator
# #1实现了__iter__方法就是iterable
# # class IterObj:
# # def __iter__(self):
# # # 这里简单地返回自身
# # # 但实际情况可能不会这么写
# # # 而是通过内置的可迭代对象来实现
# # # 下文的列子中将会展示
# # return self
# class IterObj:
# def __init__(self):
# self.a = [3, 5, 7, 11, 13, 17, 19]
# def __iter__(self):
# return iter(self.a)
# it = IterObj()
# print(isinstance(it, Iterable)) # true
# print(isinstance(it, Iterator)) # false
# print(isinstance(it, Generator)) # false
# #2 常见的可迭代对象
# # 在Python中有哪些常见的可迭代对象呢?
# # 1集合或序列类型(如list、tuple、set、dict、str)
# # 2文件对象
# # 3在类中定义了__iter__()方法的对象,可以被认为是 Iterable对象,但自定义的可迭代对象要能在for循环中正确使用,就需要保证__iter__()实现必须是正确的(即可以通过内置iter()函数转成Iterator对象。关于Iterator下文还会说明,这里留下一个坑,只是记住iter()函数是能够将一个可迭代对象转成迭代器对象,然后在for中使用)
# # 4在类中实现了如果只实现__getitem__()的对象可以通过iter()函数转化成迭代器但其本身不是可迭代对象。所以当一个对象能够在for循环中运行,但不一定是Iterable对象。
# print(isinstance([], Iterable)) # true list 是可迭代的
# print(isinstance({}, Iterable)) # true 字典是可迭代的
# print(isinstance((), Iterable)) # true 元组是可迭代的
# print(isinstance(set(), Iterable)) # true set是可迭代的
# print(isinstance('', Iterable)) # true 字符串是可迭代的
# import os
# currPath = os.path.dirname(os.path.abspath(__file__))
# with open(currPath+'/test.py') as file:
# print(isinstance(file, Iterable)) # true
# for i in it:
# print(i)
# class IterObj:
# # def __init__(self):
# # self.a = [3, 5, 7, 11, 13, 17, 19]
# # def __getitem__(self, i):
# # return self.a[i]
# # it = IterObj()
# # print(isinstance(it, Iterable)) # false
# # print(isinstance(it, Iterator)) # false
# # print(isinstance(it, Generator)) #false
# # print(hasattr(it, "__iter__")) # false
# # print(iter(it)) # <iterator object at 0x10b231278>
# # # print(isinstance(iter(it), Iterator))
# # for i in it:
# # print(i) # 将打印出3、5、7、11、13、17、19
# class IterObj:
# def __init__(self):
# self.a = [3, 5, 7, 11, 13, 17, 19]
# self.n = len(self.a)
# self.i = 0
# def __iter__(self):
# return iter(self.a)
# def __next__(self):
# while self.i < self.n:
# v = self.a[self.i]
# self.i += 1
# return v
# else:
# self.i = 0
# raise StopIteration()
# it = IterObj()
# print(isinstance(it, Iterable)) # true
# print(isinstance(it, Iterator)) # true
# print(isinstance(it, Generator)) # false
# print(hasattr(it, "__iter__")) # true
# print(hasattr(it, "__next__")) # true
# print(isinstance([], Iterator)) # false
# print(isinstance({}, Iterator)) # false
# print(isinstance((), Iterator)) # false
# print(isinstance(set(), Iterator)) # false
# print(isinstance('', Iterator)) # false
# print(next(it))
# print(next(it))
# # 3 Generator
# #.1 列表生成器
# g = (x*2 for x in range(10))
# print(g)
# print(isinstance(g, Iterable)) # true
# print(isinstance(g, Iterator)) # true
# print(isinstance(g, Generator)) # true
# print(hasattr(g, "__iter__")) # true
# print(hasattr(g, "__next__")) # true
# print(next(g)) # 0
# print(next(g)) # 2
# print(next(g))
# #.2使用yield定义生成器函数
# def gen():
# for i in range(10):
# yield i
def producer(c):
n = 0
while n < 5:
n += 1
print('producer {}'.format(n))
r = c.send(n)
print('consumer return {}'.format(r))
def consumer():
r = ''
while True:
n = yield r
if not n:
return
print('consumer {} '.format(n))
r = 'ok'
if __name__ == '__main__':
c = consumer()
next(c) # 启动consumer
producer(c)
|
StarcoderdataPython
|
5003949
|
# -*- coding: utf-8 -*-
"""
Dropbox file system and targets.
"""
__all__ = ["DropboxFileSystem", "DropboxTarget", "DropboxFileTarget", "DropboxDirectoryTarget"]
import logging
import six
from law.config import Config
from law.target.remote import (
RemoteFileSystem, RemoteTarget, RemoteFileTarget, RemoteDirectoryTarget,
)
logger = logging.getLogger(__name__)
class DropboxFileSystem(RemoteFileSystem):
default_instance = None
def __init__(self, config=None, base=None, app_key=None, app_secret=None, access_token=None,
**kwargs):
# default configs
kwargs.setdefault("retries", 1)
kwargs.setdefault("retry_delay", 5)
kwargs.setdefault("transfer_config", {"checksum_check": False})
kwargs.setdefault("validate_copy", False)
kwargs.setdefault("cache_config", {})
kwargs.setdefault("permissions", False)
# prepare the gfal options
# resolution order: config, key+secret+token, default dropbox fs section
cfg = Config.instance()
if not config and not app_key and not app_secret and not access_token:
config = cfg.get("target", "default_dropbox_fs")
if config and cfg.has_section(config):
# load options from the config
opts = {attr: cfg.get_default(config, attr)
for attr in ("app_key", "app_secret", "access_token")}
if base is None:
base = cfg.get_default(config, "base")
# loop through items and load optional configs
cache_prefix = "cache_"
others = ("retries", "retry_delay", "validate_copy", "atomic_contexts", "permissions")
for key, value in cfg.items(config):
if not value:
continue
if key.startswith(cache_prefix):
kwargs["cache_config"][key[len(cache_prefix):]] = value
elif key in others:
kwargs[key] = value
elif app_key and app_secret and access_token:
opts = {"app_key": app_key, "app_secret": app_secret, "access_token": access_token}
else:
raise Exception("invalid arguments, set either config, app_key+app_secret+access_token "
"or the target.default_dropbox_fs option in your law config")
# base is mandatory
if base is None:
raise Exception("no base directory set")
# special dropbox options
gfal_options = {
"integer": [("DROPBOX", "OAUTH", 2)],
"string": [("DROPBOX", key.upper(), str(value)) for key, value in opts.items()],
}
base_url = "dropbox://dropbox.com/" + base.strip("/")
RemoteFileSystem.__init__(self, base_url, gfal_options=gfal_options, **kwargs)
# try to set the default fs instance
try:
DropboxFileSystem.default_instance = DropboxFileSystem()
logger.debug("created default DropboxFileSystem instance '{}'".format(
DropboxFileSystem.default_instance))
except Exception as e:
logger.debug("could not create default DropboxFileSystem instance: {}".format(e))
class DropboxTarget(RemoteTarget):
def __init__(self, path, fs=DropboxFileSystem.default_instance, **kwargs):
""" __init__(path, fs=DropboxFileSystem.default_instance, **kwargs)
"""
if isinstance(fs, six.string_types):
fs = DropboxFileSystem(fs)
RemoteTarget.__init__(self, path, fs, **kwargs)
class DropboxFileTarget(DropboxTarget, RemoteFileTarget):
pass
class DropboxDirectoryTarget(DropboxTarget, RemoteDirectoryTarget):
pass
DropboxTarget.file_class = DropboxFileTarget
DropboxTarget.directory_class = DropboxDirectoryTarget
|
StarcoderdataPython
|
5147874
|
<reponame>li-phone/DetectionCompetition
from tqdm import tqdm
import glob
import xml.etree.ElementTree as ET
import os
import json
import numpy as np
import random
import pandas as pd
try:
from pandas import json_normalize
except:
from pandas.io.json import json_normalize
def convert(size, box):
dw = 1. / (size[0])
dh = 1. / (size[1])
x = (box[0] + box[2]) / 2.0 - 1
y = (box[1] + box[3]) / 2.0 - 1
w = box[2] - box[0]
h = box[3] - box[1]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return (x, y, w, h)
def coco2yolo(ann_file, img_dir, save_dir):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
from pycocotools.coco import COCO
coco = COCO(ann_file)
img_ids = coco.getImgIds()
targets = []
for img_id in tqdm(img_ids):
image_info = coco.loadImgs(img_id)[0]
ann_ids = coco.getAnnIds(imgIds=img_id)
annotations = coco.loadAnns(ann_ids)
img_path = os.path.join(img_dir, image_info['file_name'])
if not os.path.exists(img_path):
print(img_path, "not exists")
continue
file_name = os.path.join(save_dir, '{}.txt'.format(image_info['file_name'].split('.')[0]))
with open(file_name, 'w') as fp:
for ann in annotations:
bb = ann['bbox']
bb[2] += bb[0]
bb[3] += bb[1]
img_w = image_info['width']
img_h = image_info['height']
bb[2] = min(bb[2], img_w)
bb[3] = min(bb[3], img_h)
bb = convert((img_w, img_h), bb)
cls_id = int(ann['category_id']) - 1
fp.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
targets.append(dict(img=img_path, target=file_name))
return coco, targets
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='coco2yolo')
parser.add_argument('--coco',
default='/home/liphone/undone-work/data/detection/garbage_huawei/annotations/instance_train.json',
help='coco')
parser.add_argument('--img_dir', default='/home/liphone/undone-work/data/detection/garbage_huawei/images',
help='img_dir')
parser.add_argument('--save_dir', default='/home/liphone/undone-work/data/detection/garbage_huawei/yolo',
help='save_dir')
parser.add_argument('--frac', default=0.8, type=float, help='frac')
parser.add_argument('--random_state', default=666, help='random_state')
args = parser.parse_args()
return args
def main():
args = parse_args()
coco, targets = coco2yolo(args.coco, args.img_dir, os.path.join(args.save_dir, 'labels'))
categories = json_normalize(coco.dataset['categories'])
categories['name'].to_csv(os.path.join(args.save_dir, 'label_list.txt'), index=False, header=False)
targets = json_normalize(targets)
targets = targets.sample(frac=1., random_state=args.random_state)
train_samples = targets.sample(frac=args.frac, random_state=args.random_state)
val_samples = targets.drop(train_samples.index)
targets.to_csv(os.path.join(args.save_dir, 'trainval.txt'), index=False, header=False)
train_samples.to_csv(os.path.join(args.save_dir, 'train.txt'), index=False, header=False)
val_samples.to_csv(os.path.join(args.save_dir, 'val.txt'), index=False, header=False)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
9631589
|
import os
from flask import Flask,jsonify,request,render_template,redirect
from heart_sound import predict
from werkzeug.utils import secure_filename
app = Flask(__name__) ## __name__= current file name (main)
@app.route("/", methods = ["GET", "POST"]) ## page name
def index():
prediction = ""
if request.method == "POST":
print("FORM DATA RECEIVED")
if "file" not in request.files:
return redirect(request.url)
file = request.files["file"]
if file.filename == "":
return redirect(request.url)
if file:
app.config['Audio_UPLOADS'] = ""
file = request.files['file']
filename = secure_filename(file.filename)
file.save(os.path.join(app.config["Audio_UPLOADS"], filename))
actual_file = filename
prediction = predict(actual_file)
print(prediction)
return render_template('index.html', prediction=prediction)
app.run(debug=False, threaded = True)
|
StarcoderdataPython
|
1924537
|
<filename>Ayoubsprogramm1.py
print("Mooooooin Meister!")
topf = "Lego set"
print(topf)
print("Hallo ich programmiere gerade. Wer kann das auch?°,,,,°")
print("-------------------------------------------------------")
|
StarcoderdataPython
|
9798412
|
<reponame>brandongk-ubco/wrinkler
import torchvision
from .AugmentedDataset import AugmentedDataset
dataset_path = "/mnt/e/datasets/voc/"
train_data = torchvision.datasets.VOCSegmentation(dataset_path,
image_set='train')
val_data = torchvision.datasets.VOCSegmentation(dataset_path,
image_set='trainval')
class VOCAugmentedDataset(AugmentedDataset):
def __getitem__(self, idx):
image, mask = super().__getitem__(idx)
mask[mask > 0] = 1.
return image, mask
|
StarcoderdataPython
|
11307727
|
<gh_stars>1-10
import torch
import numpy as np
from collections import deque
import time
# if you have pre loaded weights in place. Set eps_start=0
def dqn(env, agent, WEIGHTS_PATH, brain_name, n_episodes=2000, eps_start=1, eps_end=0.01, eps_decay=0.993):
"""Deep Q-Learning.
Params
======
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon
"""
scores = [] # track episode scores
yellow_bananas = [] # track episode yellow bananas
blue_bananas = [] # track episode blue bananas
steps = [] # track episode steps
epsilons = [] # track episode epsilons
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes + 1):
env_info = env.reset(train_mode=True)[brain_name] # reset the environment
state = env_info.vector_observations[0] # get the current state
score = 0 # initialize the score
n_steps = 0 # initialize steps
n_yellow_bananas = 0
n_blue_bananas = 0
while True:
action = agent.act(state, eps)
env_info = env.step(action)[brain_name] # send the action to the environment
next_state = env_info.vector_observations[0] # get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0] # see if episode has finished
score += reward # update the score
n_steps += 1
if reward == -1:
n_blue_bananas += 1
if reward == 1:
n_yellow_bananas += 1
agent.step(state, action, reward, next_state, done)
state = next_state # roll over the state to next time step
if done: # exit loop if episode finished
break
# append performance metrics to lists
scores_window.append(score)
scores.append(score)
steps.append(n_steps)
yellow_bananas.append(n_yellow_bananas)
blue_bananas.append(n_blue_bananas)
epsilons.append(eps)
eps = max(eps_end, eps_decay * eps) # decrease epsilon
# track training episodes and save weight file checkpoints
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}\tEpsilon: {:.4f}'.format(i_episode, np.mean(scores_window), eps))
weights_file_name = WEIGHTS_PATH + 'checkpoint_episode_' + str(i_episode) + '.pth'
torch.save(agent.qnetwork_local.state_dict(), weights_file_name)
if np.mean(scores_window) >= 13.0:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode,
np.mean(scores_window)))
# save trained model weights with a timestamp
weights_file_name = WEIGHTS_PATH + 'checkpoint_solved' + str(int(round(time.time(), 0))) + '.pth'
torch.save(agent.qnetwork_local.state_dict(), weights_file_name)
break
return scores, steps, yellow_bananas, blue_bananas, epsilons
|
StarcoderdataPython
|
3385211
|
<reponame>ThomasThoren/geographic-data
"""
topo2geojson.py
Convert topojson to geojson
Example Usage:
python topo2geojson.py data.topojson data.geojson
The topojson tested here was created using the mbostock topojson CLI
created with --spherical coords and --properties turned on
Author: <NAME> (http://github.com/perrygeo)
Thanks to @sgillies for the topojson geometry logic
requires:
https://github.com/sgillies/topojson/blob/master/topojson.py
Next steps: how can this be generalized to a robust CLI converter?
"""
import json
import sys
from topojson import geometry
from shapely.geometry import asShape
topojson_path = sys.argv[1]
geojson_path = sys.argv[2]
with open(topojson_path, 'r') as fh:
f = fh.read()
topology = json.loads(f)
# file can be renamed, the first 'object' is more reliable
layername = topology['objects'].keys()[0]
features = topology['objects'][layername]['geometries']
scale = topology['transform']['scale']
trans = topology['transform']['translate']
with open(geojson_path, 'w') as dest:
fc = {'type': "FeatureCollection", 'features': []}
for id, tf in enumerate(features):
f = {'id': id, 'type': "Feature"}
f['properties'] = tf['properties'].copy()
# print tf
geommap = geometry(tf, topology['arcs'], scale, trans)
geom = asShape(geommap).buffer(0)
assert geom.is_valid
f['geometry'] = geom.__geo_interface__
fc['features'].append(f)
dest.write(json.dumps(fc))
|
StarcoderdataPython
|
4894775
|
<gh_stars>100-1000
import numpy as np
import time
from collections import defaultdict, OrderedDict
from future.utils import viewitems
from .base import Base
from .to_float import to_float
__all__ = [
"Simple", "TNT", "Timer", "Maximum", "Minimum", "Average", "Sum"
]
class Simple(Base):
def __init__(self, time_indexing=None, plotter=None, plot_title=None, plot_legend=None):
super(Simple, self).__init__(time_indexing, plotter, plot_title, plot_legend)
def reset(self):
self._val = 0.
return self
def _update(self, val, n=None):
self._val = to_float(val)
def state_dict_extra(self, state):
state['val'] = self._val
def load_state_dict_extra(self, state):
self._val = to_float(state['val'])
@property
def value(self):
return self._val
def __repr__(self):
repr_ = "Simple({time_indexing}, {plotter}, '{plot_title}', '{plot_legend}')"
repr_ = repr_.format(time_indexing=self._time_indexing,
plotter=None,
plot_title=self._plot_title,
plot_legend=self._plot_legend)
return repr_
class TNT(Base):
def __init__(self, tnt_meter, time_indexing=None, plotter=None, plot_title=None, plot_legend=None):
super(TNT, self).__init__(time_indexing, plotter, plot_title, plot_legend)
self._tnt_meter = tnt_meter
def reset(self):
self._tnt_meter.reset()
return self
def _update(self, *args, **kwargs):
self._tnt_meter.add(*args, **kwargs)
@property
def value(self):
return self._tnt_meter.value()
def __repr__(self):
repr_ = "TNT({tnt_meter}, {time_indexing}, {plotter}, '{plot_title}', '{plot_legend}')"
repr_ = repr_.format(tnt_meter=repr(self._tnt_meter),
time_indexing=self._time_indexing,
plotter=None,
plot_title=self._plot_title,
plot_legend=self._plot_legend)
return repr_
class Timer(Base):
def __init__(self, plotter=None, plot_title=None, plot_legend=None):
super(Timer, self).__init__(False, plotter, plot_title, plot_legend)
def reset(self):
self.start = time.time()
self.current = self.start
return self
def _update(self, current_time=None):
if current_time is not None:
self.current = to_float(current_time)
else:
self.current = time.time()
@property
def value(self):
return self.current - self.start
def state_dict_extra(self, state):
state['start'] = self.start
state['current'] = self.current
def load_state_dict_extra(self, state):
self.start = state['start']
self.current = state['current']
def __repr__(self):
repr_ = "Timer({plotter}, '{plot_title}', '{plot_legend}')"
repr_ = repr_.format(plotter=None,
plot_title=self._plot_title,
plot_legend=self._plot_legend)
return repr_
class Maximum(Base):
def __init__(self, time_indexing=None, plotter=None, plot_title=None, plot_legend=None):
super(Maximum, self).__init__(time_indexing, plotter, plot_title, plot_legend)
def reset(self):
self._val = -np.inf
self.hooks_on_new_max = ()
return self
def _update(self, val, n=None):
val = to_float(val)
if val > self._val:
self._val = val
for hook in self.hooks_on_new_max:
hook()
def hook_on_new_max(self, hook):
self.hooks_on_new_max += (hook,)
def state_dict_extra(self, state):
state['val'] = self._val
def load_state_dict_extra(self, state):
self._val = to_float(state['val'])
@property
def value(self):
return self._val
def __repr__(self):
repr_ = "Maximum({time_indexing}, {plotter}, '{plot_title}', '{plot_legend}')"
repr_ = repr_.format(time_indexing=self._time_indexing,
plotter=None,
plot_title=self._plot_title,
plot_legend=self._plot_legend)
return repr_
class Minimum(Base):
def __init__(self, time_indexing=None, plotter=None, plot_title=None, plot_legend=None):
super(Minimum, self).__init__(time_indexing, plotter, plot_title, plot_legend)
def reset(self):
self._val = np.inf
self.hooks_on_new_min = ()
return self
def _update(self, val, n=None):
val = to_float(val)
if val < self._val:
self._val = val
for hook in self.hooks_on_new_min:
hook()
def hook_on_new_min(self, hook):
self.hooks_on_new_min += (hook,)
def state_dict_extra(self, state):
state['val'] = self._val
def load_state_dict_extra(self, state):
self._val = to_float(state['val'])
@property
def value(self):
return self._val
def __repr__(self):
repr_ = "Minimum({time_indexing}, {plotter}, '{plot_title}', '{plot_legend}')"
repr_ = repr_.format(time_indexing=self._time_indexing,
plotter=None,
plot_title=self._plot_title,
plot_legend=self._plot_legend)
return repr_
class Accumulator_(Base):
"""
Credits to the authors of pytorch/tnt for this.
"""
def __init__(self, time_indexing, plotter=None, plot_title=None, plot_legend=None):
super(Accumulator_, self).__init__(time_indexing, plotter, plot_title, plot_legend)
def reset(self):
self._avg = 0
self._total_weight = 0
return self
def _update(self, val, weighting=1):
val, weighting = to_float(val), to_float(weighting)
assert weighting > 0
r = self._total_weight / (weighting + self._total_weight)
self._avg = r * self._avg + (1 - r) * val
self._total_weight += weighting
def state_dict_extra(self, state):
state['avg'] = self._avg
state['total_weight'] = self._total_weight
def load_state_dict_extra(self, state):
self._avg = state['avg']
self._total_weight = state['total_weight']
@property
def value(self):
raise NotImplementedError("Accumulator should be subclassed")
class Average(Accumulator_):
def __init__(self, time_indexing=None, plotter=None, plot_title=None, plot_legend=None):
super(Average, self).__init__(time_indexing, plotter, plot_title, plot_legend)
@property
def value(self):
return self._avg
def __repr__(self):
repr_ = "Average({time_indexing}, {plotter}, '{plot_title}', '{plot_legend}')"
repr_ = repr_.format(time_indexing=self._time_indexing,
plotter=None,
plot_title=self._plot_title,
plot_legend=self._plot_legend)
return repr_
class Sum(Accumulator_):
def __init__(self, time_indexing=None, plotter=None, plot_title=None, plot_legend=None):
super(Sum, self).__init__(time_indexing, plotter, plot_title, plot_legend)
@property
def value(self):
return self._avg * self._total_weight
def __repr__(self):
repr_ = "Sum({time_indexing}, {plotter}, '{plot_title}', '{plot_legend}')"
repr_ = repr_.format(time_indexing=self._time_indexing,
plotter=None,
plot_title=self._plot_title,
plot_legend=self._plot_legend)
return repr_
|
StarcoderdataPython
|
40150
|
<filename>tests/test_utils.py
import argparse
import distutils.spawn
import os
import subprocess
import pytest
from pytest import mark
from pytest_benchmark.utils import clonefunc
from pytest_benchmark.utils import get_commit_info
from pytest_benchmark.utils import get_project_name
from pytest_benchmark.utils import parse_columns
from pytest_benchmark.utils import parse_elasticsearch_storage
from pytest_benchmark.utils import parse_warmup
pytest_plugins = 'pytester',
f1 = lambda a: a # noqa
def f2(a):
return a
@mark.parametrize('f', [f1, f2])
def test_clonefunc(f):
assert clonefunc(f)(1) == f(1)
assert clonefunc(f)(1) == f(1)
def test_clonefunc_not_function():
assert clonefunc(1) == 1
@pytest.yield_fixture(params=(True, False))
def crazytestdir(request, testdir):
if request.param:
testdir.tmpdir.join('foo', 'bar').ensure(dir=1).chdir()
yield testdir
@pytest.fixture(params=('git', 'hg'))
def scm(request, testdir):
scm = request.param
if not distutils.spawn.find_executable(scm):
pytest.skip("%r not availabe on $PATH")
subprocess.check_call([scm, 'init', '.'])
if scm == 'git':
subprocess.check_call('git config user.email <EMAIL>'.split())
subprocess.check_call('git config user.name you'.split())
else:
testdir.tmpdir.join('.hg', 'hgrc').write("""
[ui]
username = you <<EMAIL>>
""")
return scm
def test_get_commit_info(scm, crazytestdir):
with open('test_get_commit_info.py', 'w') as fh:
fh.write('asdf')
subprocess.check_call([scm, 'add', 'test_get_commit_info.py'])
subprocess.check_call([scm, 'commit', '-m', 'asdf'])
out = get_commit_info()
branch = 'master' if scm == 'git' else 'default'
assert out['branch'] == branch
assert out.get('dirty') is False
assert 'id' in out
with open('test_get_commit_info.py', 'w') as fh:
fh.write('sadf')
out = get_commit_info()
assert out.get('dirty') is True
assert 'id' in out
def test_missing_scm_bins(scm, crazytestdir, monkeypatch):
with open('test_get_commit_info.py', 'w') as fh:
fh.write('asdf')
subprocess.check_call([scm, 'add', 'test_get_commit_info.py'])
subprocess.check_call([scm, 'commit', '-m', 'asdf'])
monkeypatch.setenv('PATH', os.getcwd())
out = get_commit_info()
assert 'No such file or directory' in out['error']
def test_get_branch_info(scm, testdir):
# make an initial commit
testdir.tmpdir.join('foo.txt').ensure(file=True)
subprocess.check_call([scm, 'add', 'foo.txt'])
subprocess.check_call([scm, 'commit', '-m', 'added foo.txt'])
branch = get_commit_info()['branch']
expected = 'master' if scm == 'git' else 'default'
assert branch == expected
#
# switch to a branch
if scm == 'git':
subprocess.check_call(['git', 'checkout', '-b', 'mybranch'])
else:
subprocess.check_call(['hg', 'branch', 'mybranch'])
branch = get_commit_info()['branch']
assert branch == 'mybranch'
#
# git only: test detached head
if scm == 'git':
subprocess.check_call(['git', 'commit', '--allow-empty', '-m', '...'])
subprocess.check_call(['git', 'commit', '--allow-empty', '-m', '...'])
subprocess.check_call(['git', 'checkout', 'HEAD~1'])
assert get_commit_info()['branch'] == '(detached head)'
def test_no_branch_info(testdir):
assert get_commit_info()['branch'] == '(unknown)'
def test_commit_info_error(testdir):
testdir.mkdir('.git')
info = get_commit_info()
assert info['branch'].lower() == '(unknown)'.lower()
assert info['error'].lower() == 'CalledProcessError(128, ' \
'\'fatal: Not a git repository (or any of the parent directories): .git\\n\')'.lower()
def test_parse_warmup():
assert parse_warmup('yes') is True
assert parse_warmup('on') is True
assert parse_warmup('true') is True
assert parse_warmup('off') is False
assert parse_warmup('off') is False
assert parse_warmup('no') is False
assert parse_warmup('') is True
assert parse_warmup('auto') in [True, False]
def test_parse_columns():
assert parse_columns('min,max') == ['min', 'max']
assert parse_columns('MIN, max ') == ['min', 'max']
with pytest.raises(argparse.ArgumentTypeError):
parse_columns('min,max,x')
@mark.parametrize('scm', [None, 'git', 'hg'])
@mark.parametrize('set_remote', [
False,
'https://example.com/pytest_benchmark_repo',
'https://example.com/pytest_benchmark_repo.git',
'c:\\foo\\bar\\pytest_benchmark_repo.git'
'<EMAIL>:pytest_benchmark_repo.git'])
def test_get_project_name(scm, set_remote, testdir):
if scm is None:
assert get_project_name().startswith("test_get_project_name")
return
if not distutils.spawn.find_executable(scm):
pytest.skip("%r not availabe on $PATH")
subprocess.check_call([scm, 'init', '.'])
if scm == 'git' and set_remote:
subprocess.check_call(['git', 'config', 'remote.origin.url', set_remote])
elif scm == 'hg' and set_remote:
set_remote = set_remote.replace('.git', '')
set_remote = set_remote.replace('.com:', '/')
testdir.tmpdir.join('.hg', 'hgrc').write(
"[ui]\n"
"username = you <<EMAIL>>\n"
"[paths]\n"
"default = %s\n" % set_remote)
if set_remote:
assert get_project_name() == "pytest_benchmark_repo"
else:
# use directory name if remote branch is not set
assert get_project_name().startswith("test_get_project_name")
@mark.parametrize('scm', ['git', 'hg'])
def test_get_project_name_broken(scm, testdir):
testdir.tmpdir.join('.' + scm).ensure(dir=1)
assert get_project_name() in ['test_get_project_name_broken0', 'test_get_project_name_broken1']
def test_get_project_name_fallback(testdir, capfd):
testdir.tmpdir.ensure('.hg', dir=1)
project_name = get_project_name()
assert project_name.startswith("test_get_project_name_fallback")
assert capfd.readouterr() == ('', '')
def test_get_project_name_fallback_broken_hgrc(testdir, capfd):
testdir.tmpdir.ensure('.hg', 'hgrc').write('[paths]\ndefault = /')
project_name = get_project_name()
assert project_name.startswith("test_get_project_name_fallback")
assert capfd.readouterr() == ('', '')
def test_parse_elasticsearch_storage():
benchdir = os.path.basename(os.getcwd())
assert parse_elasticsearch_storage("http://localhost:9200") == (
["http://localhost:9200"], "benchmark", "benchmark", benchdir)
assert parse_elasticsearch_storage("http://localhost:9200/benchmark2") == (
["http://localhost:9200"], "benchmark2", "benchmark", benchdir)
assert parse_elasticsearch_storage("http://localhost:9200/benchmark2/benchmark2") == (
["http://localhost:9200"], "benchmark2", "benchmark2", benchdir)
assert parse_elasticsearch_storage("http://host1:9200,host2:9200") == (
["http://host1:9200", "http://host2:9200"], "benchmark", "benchmark", benchdir)
assert parse_elasticsearch_storage("http://host1:9200,host2:9200/benchmark2") == (
["http://host1:9200", "http://host2:9200"], "benchmark2", "benchmark", benchdir)
assert parse_elasticsearch_storage("http://localhost:9200/benchmark2/benchmark2?project_name=project_name") == (
["http://localhost:9200"], "benchmark2", "benchmark2", "project_name")
|
StarcoderdataPython
|
217955
|
<gh_stars>0
import os
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from libs.threading import *
import time
import pickle
from libs.ustr import ustr
from libs.utils import *
from libs.namedImage import *
class FolderImagesSource():
def __init__(self, foldername):
self._folder_name = foldername
self._names_list = []
def GetStorageName(self):
return self._folder_name
def GetNames(self):
self._names_list = []
if os.path.exists(self._folder_name) and os.path.isdir(self._folder_name):
extensions = ['.%s' % fmt.data().decode("ascii").lower() for fmt in
QImageReader.supportedImageFormats()]
for root, dirs, files in os.walk(self._folder_name):
for file in files:
if file.lower().endswith(tuple(extensions)):
self._names_list.append(file)
natural_sort(self._names_list, key=lambda x: x.lower())
return self._names_list
def GetImage(self, filename):
relative_path = os.path.join(self._folder_name, filename)
path = ustr(os.path.abspath(relative_path))
if os.path.exists(path) and os.path.isfile(path):
img = NamedImage(filename)
res = img.FromFile(path)
return res, img
return False, None
def GetIndex(self, filename):
basename = os.path.basename(filename)
res = 0
if basename in self._names_list:
res = self._names_list.index(basename)
return res
|
StarcoderdataPython
|
12862481
|
<reponame>parag-hub/arrayfire-python
#!/usr/bin/env python
#######################################################
# Copyright (c) 2018, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
from time import time
import arrayfire as af
import os
import sys
def draw_corners(img, x, y, draw_len):
# Draw vertical line of (draw_len * 2 + 1) pixels centered on the corner
# Set only the first channel to 1 (green lines)
xmin = max(0, x - draw_len)
xmax = min(img.dims()[1], x + draw_len)
img[y, xmin : xmax, 0] = 0.0
img[y, xmin : xmax, 1] = 1.0
img[y, xmin : xmax, 2] = 0.0
# Draw vertical line of (draw_len * 2 + 1) pixels centered on the corner
# Set only the first channel to 1 (green lines)
ymin = max(0, y - draw_len)
ymax = min(img.dims()[0], y + draw_len)
img[ymin : ymax, x, 0] = 0.0
img[ymin : ymax, x, 1] = 1.0
img[ymin : ymax, x, 2] = 0.0
return img
def harris_demo(console):
root_path = os.path.dirname(os.path.abspath(__file__))
file_path = root_path
if console:
file_path += "/../../assets/examples/images/square.png"
else:
file_path += "/../../assets/examples/images/man.jpg"
img_color = af.load_image(file_path, True);
img = af.color_space(img_color, af.CSPACE.GRAY, af.CSPACE.RGB)
img_color /= 255.0
ix, iy = af.gradient(img)
ixx = ix * ix
ixy = ix * iy
iyy = iy * iy
# Compute a Gaussian kernel with standard deviation of 1.0 and length of 5 pixels
# These values can be changed to use a smaller or larger window
gauss_filt = af.gaussian_kernel(5, 5, 1.0, 1.0)
# Filter second order derivatives
ixx = af.convolve(ixx, gauss_filt)
ixy = af.convolve(ixy, gauss_filt)
iyy = af.convolve(iyy, gauss_filt)
# Calculate trace
itr = ixx + iyy
# Calculate determinant
idet = ixx * iyy - ixy * ixy
# Calculate Harris response
response = idet - 0.04 * (itr * itr)
# Get maximum response for each 3x3 neighborhood
mask = af.constant(1, 3, 3)
max_resp = af.dilate(response, mask)
# Discard responses that are not greater than threshold
corners = response > 1e5
corners = corners * response
# Discard responses that are not equal to maximum neighborhood response,
# scale them to original value
corners = (corners == max_resp) * corners
# Copy device array to python list on host
corners_list = corners.to_list()
draw_len = 3
good_corners = 0
for x in range(img_color.dims()[1]):
for y in range(img_color.dims()[0]):
if corners_list[x][y] > 1e5:
img_color = draw_corners(img_color, x, y, draw_len)
good_corners += 1
print("Corners found: {}".format(good_corners))
if not console:
# Previews color image with green crosshairs
wnd = af.Window(512, 512, "Harris Feature Detector")
while not wnd.close():
wnd.image(img_color)
else:
idx = af.where(corners)
corners_x = idx / float(corners.dims()[0])
corners_y = idx % float(corners.dims()[0])
print(corners_x)
print(corners_y)
if __name__ == "__main__":
if (len(sys.argv) > 1):
af.set_device(int(sys.argv[1]))
console = (sys.argv[2] == '-') if len(sys.argv) > 2 else False
af.info()
print("** ArrayFire Harris Corner Detector Demo **\n")
harris_demo(console)
|
StarcoderdataPython
|
6554046
|
import command_system
import db
import commands.st3ch1 as st3ch1
import commands.st4_1ch0 as st4_1ch0
def next(vk_id, body):
candidates = db.get_candidates(vk_id)
cand_keys = []
for num, keys in enumerate(candidates):
if keys[8]:
fullname = keys[0].lower() + ' ' + keys[1].lower() + ' ' + keys[2].lower()
fullname = fullname.strip()
vars = [fullname]
vars.append(str(num + 1) + '. ' + fullname)
vars.append(str(num + 1))
vars.append(str(num + 1) + '.')
vars.append(keys[0].lower())
vars.append(num)
cand_keys.append(vars)
choice = None
for cand in cand_keys:
if body in cand:
choice = cand[5]
break
if choice is None:
return None, None
db.make_choice(vk_id, candidates[choice][7])
candidates = db.get_candidates(vk_id)
choosen = []
not_choosen = []
for num, cand in enumerate(candidates):
fullname = cand[0] + ' ' + cand[1] + ' ' + cand[2]
fullname = fullname.strip()
if cand[8]:
choosen.append(str(num + 1) + '. ' + fullname)
else:
not_choosen.append(str(num + 1) + '. ' + fullname)
if choosen == []:
return st3ch1.next(vk_id, '1')
if not_choosen == []:
return st4_1ch0.next(vk_id, '0')
msg = 'Вы выбрали кандидатов:\n'
for cand in choosen:
msg += cand + '\n'
msg += '\nВы также можете проголосовать и за других кандидатов:\n'
for cand in not_choosen:
msg += cand + '\n'
msg += '\nДля этого выберите кого-то из оставшихся кандидатов, либо отправьте его номер.\n\n'
msg += 'Чтобы отменить выбор кандидата, нажмите на кнопку с ним повторно или отправьте его номер.\n\n'
msg += 'Для того, чтобы завершить голосование, выберите соответствующий вариант, либо отправьте 0'
return msg, '4.1'
command_0 = command_system.Command(['4.1.1'])
command_0.description = 'Голосование'
command_0.process = next
|
StarcoderdataPython
|
357759
|
<reponame>konnase/DI-engine<gh_stars>1-10
import pytest
import numpy as np
import gym
from easydict import EasyDict
from dizoo.atari.envs import AtariMultiDiscreteEnv
@pytest.mark.unittest
class TestAtariMultiDiscreteEnv:
def test_pong(self):
env_num = 3
cfg = {'env_id': 'PongNoFrameskip-v4', 'frame_stack': 4, 'is_train': True, 'multi_env_num': env_num}
cfg = EasyDict(cfg)
pong_env = AtariMultiDiscreteEnv(cfg)
pong_env.seed(0)
obs = pong_env.reset()
assert obs.shape == (cfg.frame_stack * env_num, 84, 84)
act_dim = pong_env.info().act_space.shape[0]
while True:
random_action = [np.random.choice(range(act_dim), size=(1, )) for _ in range(env_num)]
timestep = pong_env.step(random_action)
assert timestep.obs.shape == (cfg.frame_stack * env_num, 84, 84)
assert timestep.reward.shape == (1, )
assert isinstance(timestep, list)
if timestep.done:
assert 'final_eval_reward' in timestep.info, timestep.info
break
print(pong_env.info(), 'final_eval_reward: {}'.format(timestep.info['final_eval_reward']))
pong_env.close()
|
StarcoderdataPython
|
9793707
|
"""
This module is only responsible for the type of GUI.
"""
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'usbadc10gui/design.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from qwt import QwtPlot
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1224, 596)
self.centralwidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setMinimumSize(QtCore.QSize(0, 0))
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout_2.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetFixedSize)
self.verticalLayout.setContentsMargins(-1, -1, -1, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.b_1_blue = QtWidgets.QCheckBox(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(133, 133, 133))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.b_1_blue.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Noto Sans")
self.b_1_blue.setFont(font)
self.b_1_blue.setChecked(True)
self.b_1_blue.setObjectName("b_1_blue")
self.verticalLayout.addWidget(self.b_1_blue)
self.b_2_green = QtWidgets.QCheckBox(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 170, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 170, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(133, 133, 133))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.b_2_green.setPalette(palette)
self.b_2_green.setChecked(True)
self.b_2_green.setObjectName("b_2_green")
self.verticalLayout.addWidget(self.b_2_green)
self.b_3_red = QtWidgets.QCheckBox(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(133, 133, 133))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.b_3_red.setPalette(palette)
self.b_3_red.setChecked(True)
self.b_3_red.setObjectName("b_3_red")
self.verticalLayout.addWidget(self.b_3_red)
self.b_4_black = QtWidgets.QCheckBox(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(133, 133, 133))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.b_4_black.setPalette(palette)
self.b_4_black.setChecked(True)
self.b_4_black.setObjectName("b_4_black")
self.verticalLayout.addWidget(self.b_4_black)
self.b_5_orange = QtWidgets.QCheckBox(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(133, 133, 133))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.b_5_orange.setPalette(palette)
self.b_5_orange.setChecked(True)
self.b_5_orange.setTristate(False)
self.b_5_orange.setObjectName("b_5_orange")
self.verticalLayout.addWidget(self.b_5_orange)
self.b_6_blue_light = QtWidgets.QCheckBox(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(133, 133, 133))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.b_6_blue_light.setPalette(palette)
self.b_6_blue_light.setChecked(True)
self.b_6_blue_light.setObjectName("b_6_blue_light")
self.verticalLayout.addWidget(self.b_6_blue_light)
self.b_7_green_light = QtWidgets.QCheckBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.b_7_green_light.sizePolicy().hasHeightForWidth())
self.b_7_green_light.setSizePolicy(sizePolicy)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(133, 133, 133))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.b_7_green_light.setPalette(palette)
self.b_7_green_light.setChecked(True)
self.b_7_green_light.setObjectName("b_7_green_light")
self.verticalLayout.addWidget(self.b_7_green_light)
self.b_8_pig = QtWidgets.QCheckBox(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(133, 133, 133))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.b_8_pig.setPalette(palette)
self.b_8_pig.setChecked(True)
self.b_8_pig.setObjectName("b_8_pig")
self.verticalLayout.addWidget(self.b_8_pig)
self.b_9_gray = QtWidgets.QCheckBox(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(111, 111, 111))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(111, 111, 111))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(133, 133, 133))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.b_9_gray.setPalette(palette)
self.b_9_gray.setChecked(True)
self.b_9_gray.setObjectName("b_9_gray")
self.verticalLayout.addWidget(self.b_9_gray)
self.b_10_brown = QtWidgets.QCheckBox(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(170, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(133, 133, 133))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.b_10_brown.setPalette(palette)
self.b_10_brown.setChecked(True)
self.b_10_brown.setObjectName("b_10_brown")
self.verticalLayout.addWidget(self.b_10_brown)
self.horizontalLayout_4.addLayout(self.verticalLayout)
self.graphWidget = QwtPlot(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.graphWidget.sizePolicy().hasHeightForWidth())
self.graphWidget.setSizePolicy(sizePolicy)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
self.graphWidget.setCanvasBackground(brush)
# self.graphWidget.setAutoReplot(False)
self.graphWidget.setObjectName("graphWidget")
self.horizontalLayout_4.addWidget(self.graphWidget)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.start_stop = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.start_stop.sizePolicy().hasHeightForWidth())
self.start_stop.setSizePolicy(sizePolicy)
self.start_stop.setObjectName("start_stop")
self.verticalLayout_2.addWidget(self.start_stop)
self.start_stop_recording = QtWidgets.QPushButton(self.centralwidget)
self.start_stop_recording.setObjectName("start_stop_recording")
self.verticalLayout_2.addWidget(self.start_stop_recording)
self.size_of_data_txt = QtWidgets.QLabel(self.centralwidget)
self.size_of_data_txt.setObjectName("size_of_data_txt")
self.verticalLayout_2.addWidget(self.size_of_data_txt)
self.size_of_data_out = QtWidgets.QLabel(self.centralwidget)
self.size_of_data_out.setText("")
self.size_of_data_out.setObjectName("size_of_data_out")
self.verticalLayout_2.addWidget(self.size_of_data_out)
self.save_button = QtWidgets.QPushButton(self.centralwidget)
self.save_button.setObjectName("save_button")
self.verticalLayout_2.addWidget(self.save_button)
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_2.addWidget(self.line)
self.device_label = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.device_label.sizePolicy().hasHeightForWidth())
self.device_label.setSizePolicy(sizePolicy)
self.device_label.setObjectName("device_label")
self.verticalLayout_2.addWidget(self.device_label)
self.comboBox_ports = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_ports.setEditable(True)
self.comboBox_ports.setObjectName("comboBox_ports")
self.verticalLayout_2.addWidget(self.comboBox_ports)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.connect_button = QtWidgets.QPushButton(self.centralwidget)
self.connect_button.setObjectName("connect_button")
self.horizontalLayout_3.addWidget(self.connect_button)
self.disconnect_button = QtWidgets.QPushButton(self.centralwidget)
self.disconnect_button.setObjectName("disconnect_button")
self.horizontalLayout_3.addWidget(self.disconnect_button)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.rescan_botton = QtWidgets.QPushButton(self.centralwidget)
self.rescan_botton.setObjectName("rescan_botton")
self.verticalLayout_2.addWidget(self.rescan_botton)
self.period = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.period.sizePolicy().hasHeightForWidth())
self.period.setSizePolicy(sizePolicy)
self.period.setObjectName("period")
self.verticalLayout_2.addWidget(self.period)
self.comboBox_period_val = QtWidgets.QComboBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_period_val.sizePolicy().hasHeightForWidth())
self.comboBox_period_val.setSizePolicy(sizePolicy)
self.comboBox_period_val.setInputMethodHints(QtCore.Qt.ImhLatinOnly)
self.comboBox_period_val.setEditable(False)
self.comboBox_period_val.setMaxVisibleItems(10)
self.comboBox_period_val.setObjectName("comboBox_period_val")
self.comboBox_period_val.addItem("")
self.comboBox_period_val.addItem("")
self.comboBox_period_val.addItem("")
self.comboBox_period_val.addItem("")
self.comboBox_period_val.addItem("")
self.comboBox_period_val.addItem("")
self.comboBox_period_val.addItem("")
self.comboBox_period_val.addItem("")
self.comboBox_period_val.addItem("")
self.comboBox_period_val.addItem("")
self.comboBox_period_val.addItem("")
self.comboBox_period_val.addItem("")
self.comboBox_period_val.addItem("")
self.comboBox_period_val.addItem("")
self.verticalLayout_2.addWidget(self.comboBox_period_val)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.horizontalLayout_4.addLayout(self.verticalLayout_2)
self.horizontalLayout_2.addLayout(self.horizontalLayout_4)
MainWindow.setCentralWidget(self.centralwidget)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 1224, 29))
self.menuBar.setObjectName("menuBar")
self.menuFILE = QtWidgets.QMenu(self.menuBar)
self.menuFILE.setObjectName("menuFILE")
self.menuTOOLS = QtWidgets.QMenu(self.menuBar)
self.menuTOOLS.setObjectName("menuTOOLS")
self.menuABOUT = QtWidgets.QMenu(self.menuBar)
self.menuABOUT.setObjectName("menuABOUT")
self.menuVIEW = QtWidgets.QMenu(self.menuBar)
self.menuVIEW.setObjectName("menuVIEW")
self.menuChannels_2 = QtWidgets.QMenu(self.menuVIEW)
self.menuChannels_2.setObjectName("menuChannels_2")
MainWindow.setMenuBar(self.menuBar)
self.actionConnect = QtWidgets.QAction(MainWindow)
self.actionConnect.setObjectName("actionConnect")
self.actionDisconnect = QtWidgets.QAction(MainWindow)
self.actionDisconnect.setObjectName("actionDisconnect")
self.actionStart_Stop_getting_data = QtWidgets.QAction(MainWindow)
self.actionStart_Stop_getting_data.setObjectName("actionStart_Stop_getting_data")
self.actionStart_stop_recording = QtWidgets.QAction(MainWindow)
self.actionStart_stop_recording.setObjectName("actionStart_stop_recording")
self.actionSave = QtWidgets.QAction(MainWindow)
self.actionSave.setObjectName("actionSave")
self.actionOut = QtWidgets.QAction(MainWindow)
self.actionOut.setObjectName("actionOut")
self.actionThis_Application = QtWidgets.QAction(MainWindow)
self.actionThis_Application.setObjectName("actionThis_Application")
self.action1_Blue = QtWidgets.QAction(MainWindow)
self.action1_Blue.setCheckable(True)
self.action1_Blue.setChecked(True)
self.action1_Blue.setObjectName("action1_Blue")
self.action2_Green = QtWidgets.QAction(MainWindow)
self.action2_Green.setCheckable(True)
self.action2_Green.setChecked(True)
self.action2_Green.setObjectName("action2_Green")
self.action3_Red = QtWidgets.QAction(MainWindow)
self.action3_Red.setCheckable(True)
self.action3_Red.setChecked(True)
self.action3_Red.setObjectName("action3_Red")
self.action4_Black = QtWidgets.QAction(MainWindow)
self.action4_Black.setCheckable(True)
self.action4_Black.setChecked(True)
self.action4_Black.setObjectName("action4_Black")
self.action5_Orange = QtWidgets.QAction(MainWindow)
self.action5_Orange.setCheckable(True)
self.action5_Orange.setChecked(True)
self.action5_Orange.setObjectName("action5_Orange")
self.action6_Blue_Light = QtWidgets.QAction(MainWindow)
self.action6_Blue_Light.setCheckable(True)
self.action6_Blue_Light.setChecked(True)
self.action6_Blue_Light.setObjectName("action6_Blue_Light")
self.action7_Green_Light = QtWidgets.QAction(MainWindow)
self.action7_Green_Light.setCheckable(True)
self.action7_Green_Light.setChecked(True)
self.action7_Green_Light.setObjectName("action7_Green_Light")
self.action8_Pig = QtWidgets.QAction(MainWindow)
self.action8_Pig.setCheckable(True)
self.action8_Pig.setChecked(True)
self.action8_Pig.setObjectName("action8_Pig")
self.action9_Gray = QtWidgets.QAction(MainWindow)
self.action9_Gray.setCheckable(True)
self.action9_Gray.setChecked(True)
self.action9_Gray.setObjectName("action9_Gray")
self.action10_Brown = QtWidgets.QAction(MainWindow)
self.action10_Brown.setCheckable(True)
self.action10_Brown.setChecked(True)
self.action10_Brown.setObjectName("action10_Brown")
self.actionRescan = QtWidgets.QAction(MainWindow)
self.actionRescan.setObjectName("actionRescan")
self.menuFILE.addAction(self.actionSave)
self.menuFILE.addSeparator()
self.menuFILE.addAction(self.actionOut)
self.menuTOOLS.addAction(self.actionConnect)
self.menuTOOLS.addAction(self.actionDisconnect)
self.menuTOOLS.addAction(self.actionStart_Stop_getting_data)
self.menuTOOLS.addAction(self.actionStart_stop_recording)
self.menuTOOLS.addAction(self.actionRescan)
self.menuABOUT.addAction(self.actionThis_Application)
self.menuChannels_2.addAction(self.action1_Blue)
self.menuChannels_2.addAction(self.action2_Green)
self.menuChannels_2.addAction(self.action3_Red)
self.menuChannels_2.addAction(self.action4_Black)
self.menuChannels_2.addAction(self.action5_Orange)
self.menuChannels_2.addAction(self.action6_Blue_Light)
self.menuChannels_2.addAction(self.action7_Green_Light)
self.menuChannels_2.addAction(self.action8_Pig)
self.menuChannels_2.addAction(self.action9_Gray)
self.menuChannels_2.addAction(self.action10_Brown)
self.menuVIEW.addAction(self.menuChannels_2.menuAction())
self.menuBar.addAction(self.menuFILE.menuAction())
self.menuBar.addAction(self.menuVIEW.menuAction())
self.menuBar.addAction(self.menuTOOLS.menuAction())
self.menuBar.addAction(self.menuABOUT.menuAction())
self.retranslateUi(MainWindow)
self.comboBox_period_val.setCurrentIndex(5)
self.actionOut.triggered.connect(MainWindow.close)
self.action1_Blue.triggered.connect(self.b_1_blue.toggle)
self.action2_Green.triggered.connect(self.b_2_green.toggle)
self.action3_Red.triggered.connect(self.b_3_red.toggle)
self.action4_Black.triggered.connect(self.b_4_black.toggle)
self.action10_Brown.triggered.connect(self.b_10_brown.toggle)
self.action9_Gray.triggered.connect(self.b_9_gray.toggle)
self.action8_Pig.triggered.connect(self.b_8_pig.toggle)
self.action7_Green_Light.triggered.connect(self.b_7_green_light.toggle)
self.action6_Blue_Light.triggered.connect(self.b_6_blue_light.toggle)
self.action5_Orange.triggered.connect(self.b_5_orange.toggle)
self.actionSave.triggered.connect(self.save_button.click)
self.actionStart_Stop_getting_data.triggered.connect(self.start_stop.click)
self.actionStart_stop_recording.triggered.connect(self.start_stop_recording.click)
self.actionConnect.triggered.connect(self.connect_button.click)
self.actionDisconnect.triggered.connect(self.disconnect_button.click)
self.b_1_blue.clicked.connect(self.action1_Blue.toggle)
self.b_2_green.clicked.connect(self.action2_Green.toggle)
self.b_3_red.clicked.connect(self.action3_Red.toggle)
self.b_4_black.clicked.connect(self.action4_Black.toggle)
self.b_5_orange.clicked.connect(self.action5_Orange.toggle)
self.b_6_blue_light.clicked.connect(self.action6_Blue_Light.toggle)
self.b_7_green_light.clicked.connect(self.action7_Green_Light.toggle)
self.b_9_gray.clicked.connect(self.action9_Gray.toggle)
self.b_10_brown.clicked.connect(self.action10_Brown.toggle)
self.b_8_pig.clicked.connect(self.action8_Pig.toggle)
self.actionRescan.triggered.connect(self.rescan_botton.click)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "UALab"))
self.b_1_blue.setText(_translate("MainWindow", "ADC1"))
self.b_2_green.setText(_translate("MainWindow", "ADC2"))
self.b_3_red.setText(_translate("MainWindow", "ADC3"))
self.b_4_black.setText(_translate("MainWindow", "ADC4"))
self.b_5_orange.setText(_translate("MainWindow", "ADC5"))
self.b_6_blue_light.setText(_translate("MainWindow", "ADC6"))
self.b_7_green_light.setText(_translate("MainWindow", "ADC7"))
self.b_8_pig.setText(_translate("MainWindow", "ADC8"))
self.b_9_gray.setText(_translate("MainWindow", "ADC9"))
self.b_10_brown.setText(_translate("MainWindow", "ADC10"))
self.start_stop.setText(_translate("MainWindow", "Start/Stop getting data"))
self.start_stop_recording.setText(_translate("MainWindow", "Start/stop recording"))
self.size_of_data_txt.setText(_translate("MainWindow", "Size of data in RAM"))
self.save_button.setText(_translate("MainWindow", "Save"))
self.device_label.setText(_translate("MainWindow", "Enter device address"))
self.connect_button.setText(_translate("MainWindow", "Connect"))
self.disconnect_button.setText(_translate("MainWindow", "Disconnect"))
self.rescan_botton.setText(_translate("MainWindow", "Rescan"))
self.period.setText(_translate("MainWindow", "Period"))
self.comboBox_period_val.setCurrentText(_translate("MainWindow", "200 ms"))
self.comboBox_period_val.setItemText(0, _translate("MainWindow", "<1 ms"))
self.comboBox_period_val.setItemText(1, _translate("MainWindow", "10 ms"))
self.comboBox_period_val.setItemText(2, _translate("MainWindow", "20 ms"))
self.comboBox_period_val.setItemText(3, _translate("MainWindow", "50 ms"))
self.comboBox_period_val.setItemText(4, _translate("MainWindow", "100 ms"))
self.comboBox_period_val.setItemText(5, _translate("MainWindow", "200 ms"))
self.comboBox_period_val.setItemText(6, _translate("MainWindow", "500 ms"))
self.comboBox_period_val.setItemText(7, _translate("MainWindow", "1 s"))
self.comboBox_period_val.setItemText(8, _translate("MainWindow", "5 s"))
self.comboBox_period_val.setItemText(9, _translate("MainWindow", "10 s"))
self.comboBox_period_val.setItemText(10, _translate("MainWindow", "1 m"))
self.comboBox_period_val.setItemText(11, _translate("MainWindow", "5 m"))
self.comboBox_period_val.setItemText(12, _translate("MainWindow", "10 m"))
self.comboBox_period_val.setItemText(13, _translate("MainWindow", "30 m"))
self.menuFILE.setTitle(_translate("MainWindow", "File"))
self.menuTOOLS.setTitle(_translate("MainWindow", "Tools"))
self.menuABOUT.setTitle(_translate("MainWindow", "About"))
self.menuVIEW.setTitle(_translate("MainWindow", "View"))
self.menuChannels_2.setTitle(_translate("MainWindow", "Channels"))
self.actionConnect.setText(_translate("MainWindow", "Connect"))
self.actionConnect.setShortcut(_translate("MainWindow", "Ctrl+K"))
self.actionDisconnect.setText(_translate("MainWindow", "Disconnect"))
self.actionDisconnect.setShortcut(_translate("MainWindow", "Ctrl+D"))
self.actionStart_Stop_getting_data.setText(_translate("MainWindow", "Start/Stop getting data"))
self.actionStart_stop_recording.setText(_translate("MainWindow", "Start/stop recording"))
self.actionStart_stop_recording.setShortcut(_translate("MainWindow", "Space"))
self.actionSave.setText(_translate("MainWindow", "Save ..."))
self.actionSave.setShortcut(_translate("MainWindow", "Ctrl+S"))
self.actionOut.setText(_translate("MainWindow", "Exit"))
self.actionOut.setShortcut(_translate("MainWindow", "Ctrl+Q"))
self.actionThis_Application.setText(_translate("MainWindow", "This Application"))
self.action1_Blue.setText(_translate("MainWindow", "ADC1"))
self.action1_Blue.setShortcut(_translate("MainWindow", "Ctrl+1"))
self.action2_Green.setText(_translate("MainWindow", "ADC2"))
self.action2_Green.setShortcut(_translate("MainWindow", "Ctrl+2"))
self.action3_Red.setText(_translate("MainWindow", "ADC3"))
self.action3_Red.setShortcut(_translate("MainWindow", "Ctrl+3"))
self.action4_Black.setText(_translate("MainWindow", "ADC4"))
self.action4_Black.setShortcut(_translate("MainWindow", "Ctrl+4"))
self.action5_Orange.setText(_translate("MainWindow", "ADC5"))
self.action5_Orange.setShortcut(_translate("MainWindow", "Ctrl+5"))
self.action6_Blue_Light.setText(_translate("MainWindow", "ADC6"))
self.action6_Blue_Light.setShortcut(_translate("MainWindow", "Ctrl+6"))
self.action7_Green_Light.setText(_translate("MainWindow", "ADC7"))
self.action7_Green_Light.setShortcut(_translate("MainWindow", "Ctrl+7"))
self.action8_Pig.setText(_translate("MainWindow", "ADC8"))
self.action8_Pig.setShortcut(_translate("MainWindow", "Ctrl+8"))
self.action9_Gray.setText(_translate("MainWindow", "ADC9"))
self.action9_Gray.setShortcut(_translate("MainWindow", "Ctrl+9"))
self.action10_Brown.setText(_translate("MainWindow", "ADC10"))
self.action10_Brown.setShortcut(_translate("MainWindow", "Ctrl+0"))
self.actionRescan.setText(_translate("MainWindow", "Rescan"))
self.actionRescan.setShortcut(_translate("MainWindow", "Ctrl+R"))
|
StarcoderdataPython
|
3458196
|
<reponame>brunocvs7/dstools
import pandas as pd
import numpy as np
from sklearn.metrics import recall_score, precision_score, f1_score
import matplotlib.pyplot as plt
def eval_thresh(y_real, y_proba):
'''
Check the metrics varying the classification threshold
Parameters:
y_real (np.array): An array containing the ground truth labels
y_proba (np.array): An array containing the probabilities of label equal 1
Returns:
df_metrics (pd.DataFrame): A pandas dataframe containing the thresholds and metrics associated
'''
recall_score_thresh = []
precision_score_thresh = []
f1_score_thresh = []
q_rate = []
tx = len(y_real)
for thresh in np.arange(0,1,0.05):
y_thresh = [1 if x >= thresh else 0 for x in y_proba]
q_rate.append(np.sum(y_thresh)/tx)
recall_score_thresh.append(recall_score(y_real, y_thresh))
precision_score_thresh.append(precision_score(y_real, y_thresh))
f1_score_thresh.append(f1_score(y_real, y_thresh))
dict_metrics = {'threshold':np.arange(0,1,0.05),'recall_score':recall_score_thresh,\
'precision_score':precision_score_thresh,'f1_score':f1_score_thresh, 'queue_rate':q_rate}
df_metrics = pd.DataFrame(dict_metrics)
return df_metrics
def plot_metrics(df):
'''
plot metrics calculated by the function eval_thresh
Parameters:
df (pandas.dataframe): A data frame that is the result of eval_thresh function
Returns:
None
'''
plt.plot(df['threshold'],df['recall_score'], '-.')
plt.plot(df['threshold'],df['precision_score'], '-.')
plt.plot(df['threshold'],df['f1_score'],'-.')
plt.plot(df['threshold'],df['queue_rate'],'-.')
plt.legend(['recall','precision','f1_score', 'queue_rate'])
plt.xlabel("Threshold")
plt.ylabel("Metric")
display(plt.show())
|
StarcoderdataPython
|
4819458
|
<gh_stars>0
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
"""
IDA Plugin SDK API wrapper: entry
"""
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_ida_entry', [dirname(__file__)])
except ImportError:
import _ida_entry
return _ida_entry
if fp is not None:
try:
_mod = imp.load_module('_ida_entry', fp, pathname, description)
finally:
fp.close()
return _mod
_ida_entry = swig_import_helper()
del swig_import_helper
else:
import _ida_entry
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import ida_idaapi
import sys
_BC695 = sys.modules["__main__"].IDAPYTHON_COMPAT_695_API
if _BC695:
def bc695redef(func):
ida_idaapi._BC695.replace_fun(func)
return func
def get_entry_qty(*args):
"""
get_entry_qty() -> size_t
Get number of entry points.
"""
return _ida_entry.get_entry_qty(*args)
AEF_UTF8 = _ida_entry.AEF_UTF8
"""
the name is given in UTF-8 (default)
"""
AEF_IDBENC = _ida_entry.AEF_IDBENC
"""
the name is given in the IDB encoding; non-ASCII bytes will be decoded
accordingly Specifying AEF_IDBENC also implies AEF_NODUMMY
"""
AEF_NODUMMY = _ida_entry.AEF_NODUMMY
"""
it begins with a dummy suffix. See also AEF_IDBENC
automatically prepend the name with '_' if
"""
def add_entry(*args):
"""
add_entry(ord, ea, name, makecode, flags=0x0) -> bool
Add an entry point to the list of entry points.
@param ord: ordinal number if ordinal number is equal to 'ea' then
ordinal is not used (C++: uval_t)
@param ea: linear address (C++: ea_t)
@param name: name of entry point. If the specified location already
has a name, the old name will be appended to the regular
comment. If name == NULL, then the old name will be
retained. (C++: const char *)
@param makecode: should the kernel convert bytes at the entry point to
instruction(s) (C++: bool)
@param flags: See AEF_* (C++: int)
@return: success (currently always true)
"""
return _ida_entry.add_entry(*args)
def get_entry_ordinal(*args):
"""
get_entry_ordinal(idx) -> uval_t
Get ordinal number of an entry point.
@param idx: internal number of entry point. Should be in the range 0..
get_entry_qty() -1 (C++: size_t)
@return: ordinal number or 0.
"""
return _ida_entry.get_entry_ordinal(*args)
def get_entry(*args):
"""
get_entry(ord) -> ea_t
Get entry point address by its ordinal
@param ord: ordinal number of entry point (C++: uval_t)
@return: address or BADADDR
"""
return _ida_entry.get_entry(*args)
def get_entry_name(*args):
"""
get_entry_name(ord) -> ssize_t
Get name of the entry point by its ordinal.
@param ord: ordinal number of entry point (C++: uval_t)
@return: size of entry name or -1
"""
return _ida_entry.get_entry_name(*args)
def rename_entry(*args):
"""
rename_entry(ord, name, flags=0x0) -> bool
Rename entry point.
@param ord: ordinal number of the entry point (C++: uval_t)
@param name: name of entry point. If the specified location already
has a name, the old name will be appended to a repeatable
comment. (C++: const char *)
@param flags: See AEF_* (C++: int)
@return: success
"""
return _ida_entry.rename_entry(*args)
def set_entry_forwarder(*args):
"""
set_entry_forwarder(ord, name, flags=0x0) -> bool
Set forwarder name for ordinal.
@param ord: ordinal number of the entry point (C++: uval_t)
@param name: forwarder name for entry point. (C++: const char *)
@param flags: See AEF_* (C++: int)
@return: success
"""
return _ida_entry.set_entry_forwarder(*args)
def get_entry_forwarder(*args):
"""
get_entry_forwarder(ord) -> ssize_t
Get forwarder name for the entry point by its ordinal.
@param ord: ordinal number of entry point (C++: uval_t)
@return: size of entry forwarder name or -1
"""
return _ida_entry.get_entry_forwarder(*args)
|
StarcoderdataPython
|
4808343
|
from tools.general import load_input_list
def resolve_bsp(bsp_code, low_char, high_char):
lower = 0
upper = 2 ** len(bsp_code) - 1
for c in bsp_code:
mid = (lower + upper) // 2
if c == high_char:
lower = mid + 1
elif c == low_char:
upper = mid
else:
raise ValueError(f"Invalid character '{c}'")
return lower
def seat_id(bp_code):
row = resolve_bsp(bp_code[:7], 'F', 'B')
col = resolve_bsp(bp_code[7:], 'L', 'R')
return (8 * row) + col
taken_seats = sorted(seat_id(s) for s in load_input_list("day5.txt").split('\n'))
print(f"Part 1 => {taken_seats[-1]}")
prev = taken_seats[0]
for s in taken_seats[1:]:
if s - prev == 2:
print(f"Part 2 => {s - 1}")
break
prev = s
|
StarcoderdataPython
|
3243121
|
<filename>twitter/util.py
"""
Internal utility functions.
`htmlentitydecode` came from here:
http://wiki.python.org/moin/EscapingHtml
"""
import re
from htmlentitydefs import name2codepoint
def htmlentitydecode(s):
return re.sub(
'&(%s);' % '|'.join(name2codepoint),
lambda m: unichr(name2codepoint[m.group(1)]), s)
__all__ = ["htmlentitydecode"]
|
StarcoderdataPython
|
9642459
|
<gh_stars>10-100
import math
from src.objs import *
# src -> https://stackoverflow.com/a/14822210
#: Convert bytes into human-readable size
def convertSize(byte):
if byte == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(byte, 1024)))
p = math.pow(1024, i)
s = round(byte / p, 2)
return "%s %s" % (s, size_name[i])
#: Convert seconds into human-readable time
def convertTime(seconds):
if seconds == 0:
return "0 Sec"
size_name = ("Sec", "Min", "Hrs")
i = int(math.floor(math.log(seconds, 60)))
p = math.pow(60, i)
s = round(seconds / p, 2)
return "%s %s" % (s, size_name[i])
|
StarcoderdataPython
|
11311893
|
# problem 16
# Power digit sum
"""
2**15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
What is the sum of the digits of the number 2**1000?
"""
val = 2 ** 1000
summation = sum(int(i) for i in str(val))
print(summation)
|
StarcoderdataPython
|
4853580
|
<gh_stars>1-10
"""
给定两个单词 word1 和 word2,找到使得 word1 和 word2 相同所需的最小步数,每步可以删除任意一个字符串中的一个字符。
示例 1:
输入: "sea", "eat"
输出: 2
解释: 第一步将"sea"变为"ea",第二步将"eat"变为"ea"
说明:
给定单词的长度不超过500。
给定单词中的字符只含有小写字母。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/delete-operation-for-two-strings
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
if word2 == word1: return 0
_len1, _len2 = len(word1) + 1, len(word2) + 1
# 最长公共子序列
dp = [[0 for _ in range(_len2)] for _ in range(_len1)]
for i in range(_len1):
for j in range(_len2):
if i == 0 or j == 0: continue
if word1[i - 1] == word2[j - 1]:
dp[i][j] = dp[i - 1][j - 1] + 1 # 字符相同 子长度+1
else:
dp[i][j] = max(dp[i - 1][j],
dp[i][j - 1]) # 最长 max 字符不同 要删除 word1删除dp[i - 1][j] 或 word2删除dp[i][j-1]
return len(word1) + len(word2) - 2 * dp[-1][-1]
def minDistance2(self, word1: str, word2: str) -> int:
if word2 == word1: return 0
_len1, _len2 = len(word1) + 1, len(word2) + 1
dp = [[0 for _ in range(_len2)] for _ in range(_len1)]
for i in range(_len1):
for j in range(_len2):
if i == 0 or j == 0:
dp[i][j] = i + j # 边界值 删除word1 i 或 word2 j
elif word1[i - 1] == word2[j - 1]:
dp[i][j] = dp[i - 1][j - 1] # 字符相同 不删除
else:
dp[i][j] = 1 + min(dp[i - 1][j],
dp[i][j - 1]) # 最少删除min 字符不同 要删除 word1删除dp[i - 1][j] 或 word2删除dp[i][j-1]
return dp[-1][-1]
def minDistance3(self, word1: str, word2: str) -> int:
if word2 == word1: return 0
_len1, _len2 = len(word1) + 1, len(word2) + 1
dp, temp = [0 for _ in range(_len2)], [0 for _ in range(_len2)]
# dp = [[0 for _ in range(_len2)] for _ in range(_len1)]
# dp2[i-1][k] = dp[k] 即dp=dp2[i-1] dp2[i][k] = temp[k-1] 即temp = dp2[i]
for i in range(_len1):
for j in range(_len2):
temp[j] = 0
if i == 0 or j == 0:
temp[j] = i + j # 边界值 删除word1 i 或 word2 j
elif word1[i - 1] == word2[j - 1]:
temp[j] = dp[j - 1] # 字符相同 不删除
else:
temp[j] = 1 + min(dp[j], temp[j - 1]) # 最少删除min 字符不同 要删除 word1删除dp[i - 1][j] 或 word2删除dp[i][j-1]
dp, temp = temp, dp
return dp[-1]
print(Solution().minDistance3("", "a"))
|
StarcoderdataPython
|
4989791
|
<gh_stars>0
"""Builder Design Pattern (Made in Python 3.4.3)
http://en.wikipedia.org/wiki/Builder_pattern
This is my interpretation of the builder pattern for Python 3.
Very simple. But also kinda cool! I use the lambda expression. This way
you can design a builder to only be able to change certain properties
later on. One could even create builders of builders this way. Neat!
Combine with factories as well I suppose!
"""
class Composition:
def __init__(self, c1, c2, name="default"):
self.name = name
self.c1 = c1
self.c2 = c2
def run(self):
print(self.name, "is running:")
for i in range(self.c2.n):
print(self.c1.names)
class Component1:
def __init__(self, *args):
self.names = args
class Component2:
def __init__(self, n):
self.n = n
if __name__ == "__main__": # Client code:
builder0 = lambda name: Composition(
Component1("Knatte", "Fnatte", "Tjatte"),
Component2(2),
name=name
)
builder1 = lambda: Composition(
Component1("Tripp", "Trapp", "Trull"),
Component2(3)
)
builder2 = lambda i: Composition(
Component1("Snipp", "Snapp", "Slut"),
Component2(i)
)
ob0 = builder0("Kalle")
ob1 = builder0("Kajsa")
ob2 = builder0("Pluto")
ob3 = builder1()
ob4 = builder1()
ob5 = builder2(7)
ob0.run()
ob1.run()
ob2.run()
ob3.run()
ob4.run()
ob5.run()
|
StarcoderdataPython
|
9622311
|
<reponame>vishalbelsare/GraphFlow-1
import enum
import numpy as np
def L1norm(r1, r2):
return np.sum(abs(r1 - r2))
class PageRankLanguage(enum.Enum):
PYTHON = 0
CYTHON = 1
FORTRAN = 2
from . import hits
from . import pagerank
from . import simvoltage
|
StarcoderdataPython
|
3539441
|
<reponame>mroll/manticore<gh_stars>0
''' Symbolic EVM implementation based on the yellow paper: http://gavwood.com/paper.pdf '''
import random
import copy
import inspect
from functools import wraps
from ..utils.helpers import issymbolic, memoized
from ..platforms.platform import *
from ..core.smtlib import solver, TooManySolutions, Expression, Bool, BitVec, Array, Operators, Constant, BitVecConstant, ConstraintSet, \
SolverException
from ..core.state import ForkState, TerminateState
from ..utils.event import Eventful
from ..core.smtlib.visitors import pretty_print, arithmetic_simplifier, translate_to_smtlib
from ..core.state import Concretize, TerminateState
import logging
import sys
from collections import namedtuple
if sys.version_info < (3, 6):
import sha3
logger = logging.getLogger(__name__)
# Auxiliar constants and functions
TT256 = 2 ** 256
TT256M1 = 2 ** 256 - 1
TT255 = 2 ** 255
TOOHIGHMEM = 0x1000
PendingTransaction = namedtuple("PendingTransaction", ['type', 'address', 'origin', 'price', 'data', 'caller', 'value', 'bytecode', 'header'])
def ceil32(x):
return Operators.ITEBV(256, (x % 32) == 0, x, x + 32 - (x % 32))
def to_signed(i):
return Operators.ITEBV(256, i < TT255, i, i - TT256)
class Transaction(object):
__slots__ = 'sort', 'address', 'origin', 'price', 'data', 'caller', 'value', 'return_data', 'result'
def __init__(self, sort, address, origin, price, data, caller, value, return_data, result):
self.sort = sort
self.address = address
self.origin = origin
self.price = price
self.data = data
self.caller = caller
self.value = value
self.return_data = return_data
self.result = result
def __reduce__(self):
''' Implements serialization/pickle '''
return (self.__class__, (self.sort, self.address, self.origin, self.price, self.data, self.caller, self.value, self.return_data, self.result))
def __str__(self):
return 'Transaction(%s, from=0x%x, to=0x%x, value=%r, data=%r..)' % (self.sort, self.caller, self.address, self.value, self.data)
class EVMLog():
def __init__(self, address, memlog, topics):
self.address = address
self.memlog = memlog
self.topics = topics
class EVMMemory(object):
def __init__(self, constraints, address_size=256, value_size=8, *args, **kwargs):
'''
A symbolic memory manager for EVM.
This is internally used to provide memory to an Ethereum Virtual Machine.
It maps address_size bits wide bitvectors to value_size wide bitvectors.
Normally BitVec(256) -> BitVec(8)
Example use::
cs = ConstraintSet()
mem = EVMMemory(cs)
mem[16] = 0x41
assert (mem.allocated == 1)
assert (mem[16] == 0x41)
:param constraints: a set of constraints
:type constraints: ConstraintSet
:param address_size: address bit width
:param values_size: value bit width
'''
assert isinstance(constraints, (ConstraintSet, type(None)))
self._constraints = constraints
self._symbols = {}
self._memory = {}
self._address_size = address_size
self._value_size = value_size
self._allocated = 0
def __copy__(self):
''' Makes a copy of itself '''
new_mem = EVMMemory(self._constraints, self._address_size, self._value_size)
new_mem._memory = dict(self._memory)
new_mem._symbols = dict(self._symbols)
return new_mem
def __reduce__(self):
''' Implements serialization/pickle '''
return (self.__class__, (self._constraints, self._address_size, self._value_size), {
'_symbols': self._symbols, '_memory': self._memory, '_allocated': self._allocated})
@property
def constraints(self):
return self._constraints
@constraints.setter
def constraints(self, constraints):
self._constraints = constraints
def _get_size(self, index):
''' Calculates the size of a slice
:param index: a slice
:type index: slice
'''
size = index.stop - index.start
if isinstance(size, BitVec):
size = arithmetic_simplifier(size)
else:
size = BitVecConstant(self._address_size, size)
assert isinstance(size, BitVecConstant)
return size.value
def __getitem__(self, index):
if isinstance(index, slice):
size = self._get_size(index)
return self.read(index.start, size)
else:
return self.read(index, 1)[0]
def __setitem__(self, index, value):
if isinstance(index, slice):
size = self._get_size(index)
assert len(value) == size
for i in xrange(size):
self.write(index.start + i, [value[i]])
else:
self.write(index, [value])
def __delitem__(self, index):
def delete(offset):
if offset in self.memory:
del self._memory[offset]
if offset in self._symbol:
del self._symbols[offset]
if isinstance(index, slice):
for offset in xrange(index.start, index.end):
delete(offset)
else:
delete(index)
def __contains__(self, offset):
return offset in self._memory or \
offset in self._symbols
def items(self):
offsets = set(self._symbols.keys() + self._memory.keys())
return [(x, self[x]) for x in offsets]
def get(self, offset, default=0):
result = self.read(offset, 1)
if not result:
return default
return result[0]
def __getitem__(self, index):
if isinstance(index, slice):
size = self._get_size(index)
return self.read(index.start, size)
else:
return self.read(index, 1)[0]
def __repr__(self):
return self.__str__()
def __str__(self):
m = {}
for key in self._memory.keys():
c = self.read(key, 1)[0]
if issymbolic(c):
m[key] = '?'
else:
m[key] = hex(c)
return str(m)
def __len__(self):
return self._allocated
@property
def allocated(self):
return self._allocated
def _allocate(self, address):
'''
Allocate more memory
'''
new_max = ceil32(address) // 32
self._allocated = Operators.ITEBV(256, self._allocated < new_max, new_max, self._allocated)
def _concrete_read(self, address):
return self._memory.get(address, 0)
def _concrete_write(self, address, value):
assert not issymbolic(address)
assert not issymbolic(value)
assert value & ~(pow(2, self._value_size) - 1) == 0, "Not the correct size for a value"
self._memory[address] = value
def read(self, address, size):
'''
Read size items from address.
Address can by a symbolic value.
The result is a sequence the requested size.
Resultant items can by symbolic.
:param address: Where to read from
:param size: How many items
:rtype: list
'''
assert not issymbolic(size)
self._allocate(address + size)
if issymbolic(address):
address = arithmetic_simplifier(address)
assert solver.check(self.constraints)
logger.debug('Reading %d items from symbolic offset %s', size, address)
try:
solutions = solver.get_all_values(self.constraints, address, maxcnt=0x1000) # if more than 0x3000 exception
except TooManySolutions as e:
m, M = solver.minmax(self.constraints, address)
logger.debug('Got TooManySolutions on a symbolic read. Range [%x, %x]. Not crashing!', m, M)
logger.info('INCOMPLETE Result! Using the sampled solutions we have as result')
condition = False
for base in e.solutions:
condition = Operators.OR(address == base, condition)
raise ForkState('address too high', condition)
# So here we have all potential solutions of symbolic address (complete set)
assert len(solutions) > 0
condition = False
for base in solutions:
condition = Operators.OR(address == base, condition)
result = []
# consider size ==1 to read following code
for offset in range(size):
# Given ALL solutions for the symbolic address
for base in solutions:
addr_value = base + offset
item = self._concrete_read(addr_value)
if addr_value in self._symbols:
for condition, value in self._symbols[addr_value]:
item = Operators.ITEBV(self._value_size, condition, value, item)
if len(result) > offset:
result[offset] = Operators.ITEBV(self._value_size, address == base, item, result[offset])
else:
result.append(item)
assert len(result) == offset + 1
return result
else:
result = []
for i in range(size):
result.append(self._concrete_read(address + i))
for offset in range(size):
if address + offset in self._symbols:
for condition, value in self._symbols[address + offset]:
if condition is True:
result[offset] = value
else:
result[offset] = Operators.ITEBV(self._value_size, condition, value, result[offset])
return result
def write(self, address, value):
'''
Write a value at address.
:param address: The address at which to write
:type address: int or long or Expression
:param value: Bytes to write
:type value: tuple or list
'''
size = len(value)
self._allocate(address + size)
if issymbolic(address):
solutions = solver.get_all_values(self.constraints, address, maxcnt=0x1000) # if more than 0x3000 exception
for offset in xrange(size):
for base in solutions:
condition = base == address
self._symbols.setdefault(base + offset, []).append((condition, value[offset]))
else:
for offset in xrange(size):
if issymbolic(value[offset]):
self._symbols[address + offset] = [(True, value[offset])]
else:
# overwrite all previous items
if address + offset in self._symbols:
del self._symbols[address + offset]
self._concrete_write(address + offset, value[offset])
class EVMAsm(object):
'''
EVM Instruction factory
Example use::
>>> from manticore.platforms.evm import EVMAsm
>>> EVMAsm.disassemble_one('\\x60\\x10')
Instruction(0x60, 'PUSH', 1, 0, 1, 0, 'Place 1 byte item on stack.', 16, 0)
>>> EVMAsm.assemble_one('PUSH1 0x10')
Instruction(0x60, 'PUSH', 1, 0, 1, 0, 'Place 1 byte item on stack.', 16, 0)
>>> tuple(EVMAsm.disassemble_all('\\x30\\x31'))
(Instruction(0x30, 'ADDRESS', 0, 0, 1, 2, 'Get address of currently executing account.', None, 0),
Instruction(0x31, 'BALANCE', 0, 1, 1, 20, 'Get balance of the given account.', None, 1))
>>> tuple(EVMAsm.assemble_all('ADDRESS\\nBALANCE'))
(Instruction(0x30, 'ADDRESS', 0, 0, 1, 2, 'Get address of currently executing account.', None, 0),
Instruction(0x31, 'BALANCE', 0, 1, 1, 20, 'Get balance of the given account.', None, 1))
>>> EVMAsm.assemble_hex(
... """PUSH1 0x60
... BLOCKHASH
... MSTORE
... PUSH1 0x2
... PUSH2 0x100
... """
... )
'0x606040526002610100'
>>> EVMAsm.disassemble_hex('0x606040526002610100')
'PUSH1 0x60\\nBLOCKHASH\\nMSTORE\\nPUSH1 0x2\\nPUSH2 0x100'
'''
class Instruction(object):
def __init__(self, opcode, name, operand_size, pops, pushes, fee, description, operand=None, offset=0):
'''
This represents an EVM instruction.
EVMAsm will create this for you.
:param opcode: the opcode value
:param name: instruction name
:param operand_size: immediate operand size in bytes
:param pops: number of items popped from the stack
:param pushes: number of items pushed into the stack
:param fee: gas fee for the instruction
:param description: textual description of the instruction
:param operand: optional immediate operand
:param offset: optional offset of this instruction in the program
Example use::
instruction = EVMAsm.assemble_one('PUSH1 0x10')
print 'Instruction: %s'% instruction
print '\tdescription:', instruction.description
print '\tgroup:', instruction.group
print '\taddress:', instruction.offset
print '\tsize:', instruction.size
print '\thas_operand:', instruction.has_operand
print '\toperand_size:', instruction.operand_size
print '\toperand:', instruction.operand
print '\tsemantics:', instruction.semantics
print '\tpops:', instruction.pops
print '\tpushes:', instruction.pushes
print '\tbytes:', '0x'+instruction.bytes.encode('hex')
print '\twrites to stack:', instruction.writes_to_stack
print '\treads from stack:', instruction.reads_from_stack
print '\twrites to memory:', instruction.writes_to_memory
print '\treads from memory:', instruction.reads_from_memory
print '\twrites to storage:', instruction.writes_to_storage
print '\treads from storage:', instruction.reads_from_storage
print '\tis terminator', instruction.is_terminator
'''
self._opcode = opcode
self._name = name
self._operand_size = operand_size
self._pops = pops
self._pushes = pushes
self._fee = fee
self._description = description
self._operand = operand # Immediate operand if any
if operand_size != 0 and operand is not None:
mask = (1 << operand_size * 8) - 1
if ~mask & operand:
raise ValueError("operand should be %d bits long" % (operand_size * 8))
self._offset = offset
def __eq__(self, other):
''' Instructions are equal if all features match '''
return self._opcode == other._opcode and\
self._name == other._name and\
self._operand == other._operand and\
self._operand_size == other._operand_size and\
self._pops == other._pops and\
self._pushes == other._pushes and\
self._fee == other._fee and\
self._offset == other._offset and\
self._description == other._description
def __repr__(self):
output = 'Instruction(0x%x, %r, %d, %d, %d, %d, %r, %r, %r)' % (self._opcode, self._name, self._operand_size,
self._pops, self._pushes, self._fee, self._description, self._operand, self._offset)
return output
def __str__(self):
output = self.name + (' 0x%x' % self.operand if self.has_operand else '')
return output
@property
def opcode(self):
''' The opcode as an integer '''
return self._opcode
@property
def name(self):
''' The instruction name/mnemonic '''
if self._name == 'PUSH':
return 'PUSH%d' % self.operand_size
elif self._name == 'DUP':
return 'DUP%d' % self.pops
elif self._name == 'SWAP':
return 'SWAP%d' % (self.pops - 1)
elif self._name == 'LOG':
return 'LOG%d' % (self.pops - 2)
return self._name
def parse_operand(self, buf):
''' Parses an operand from buf
:param buf: a buffer
:type buf: iterator/generator/string
'''
buf = iter(buf)
try:
operand = 0
for _ in range(self.operand_size):
operand <<= 8
operand |= ord(next(buf))
self._operand = operand
except StopIteration:
raise Exception("Not enough data for decoding")
@property
def operand_size(self):
''' The immediate operand size '''
return self._operand_size
@property
def has_operand(self):
''' True if the instruction uses an immediate operand'''
return self.operand_size > 0
@property
def operand(self):
''' The immediate operand '''
return self._operand
@property
def pops(self):
'''Number words popped from the stack'''
return self._pops
@property
def pushes(self):
'''Number words pushed to the stack'''
return self._pushes
@property
def size(self):
''' Size of the encoded instruction '''
return self._operand_size + 1
@property
def fee(self):
''' The basic gas fee of the instruction '''
return self._fee
@property
def semantics(self):
''' Canonical semantics '''
return self._name
@property
def description(self):
''' Coloquial description of the instruction '''
return self._description
@property
def bytes(self):
''' Encoded instruction '''
bytes = []
bytes.append(chr(self._opcode))
for offset in reversed(xrange(self.operand_size)):
c = (self.operand >> offset * 8) & 0xff
bytes.append(chr(c))
return ''.join(bytes)
@property
def offset(self):
'''Location in the program (optional)'''
return self._offset
@property
def group(self):
'''Instruction classification as per the yellow paper'''
classes = {
0: 'Stop and Arithmetic Operations',
1: 'Comparison & Bitwise Logic Operations',
2: 'SHA3',
3: 'Environmental Information',
4: 'Block Information',
5: 'Stack, Memory, Storage and Flow Operations',
6: 'Push Operations',
7: 'Push Operations',
8: 'Duplication Operations',
9: 'Exchange Operations',
0xa: 'Logging Operations',
0xf: 'System operations'
}
return classes.get(self.opcode >> 4, 'Invalid instruction')
@property
def uses_stack(self):
''' True if the instruction reads/writes from/to the stack '''
return self.reads_from_stack or self.writes_to_stack
@property
def reads_from_stack(self):
''' True if the instruction reads from stack '''
return self.pops > 0
@property
def writes_to_stack(self):
''' True if the instruction writes to the stack '''
return self.pushes > 0
@property
def reads_from_memory(self):
''' True if the instruction reads from memory '''
return self.semantics in ('MLOAD', 'CREATE', 'CALL', 'CALLCODE', 'RETURN', 'DELEGATECALL', 'REVERT')
@property
def writes_to_memory(self):
''' True if the instruction writes to memory '''
return self.semantics in ('MSTORE', 'MSTORE8', 'CALLDATACOPY', 'CODECOPY', 'EXTCODECOPY')
@property
def reads_from_memory(self):
''' True if the instruction reads from memory '''
return self.semantics in ('MLOAD', 'CREATE', 'CALL', 'CALLCODE', 'RETURN', 'DELEGATECALL', 'REVERT')
@property
def writes_to_storage(self):
''' True if the instruction writes to the storage '''
return self.semantics in ('SSTORE')
@property
def reads_from_storage(self):
''' True if the instruction reads from the storage '''
return self.semantics in ('SLOAD')
@property
def is_terminator(self):
''' True if the instruction is a basic block terminator '''
return self.semantics in ('RETURN', 'STOP', 'INVALID', 'JUMP', 'JUMPI', 'SELFDESTRUCT', 'REVERT')
@property
def is_branch(self):
''' True if the instruction is a jump'''
return self.semantics in ('JUMP', 'JUMPI')
@property
def is_environmental(self):
''' True if the instruction access enviromental data '''
return self.group == 'Environmental Information'
@property
def is_system(self):
''' True if the instruction is a system operation '''
return self.group == 'System operations'
@property
def uses_block_info(self):
''' True if the instruction access block information'''
return self.group == 'Block Information'
@property
def is_arithmetic(self):
''' True if the instruction is an arithmetic operation '''
return self.semantics in ('ADD', 'MUL', 'SUB', 'DIV', 'SDIV', 'MOD', 'SMOD', 'ADDMOD', 'MULMOD', 'EXP', 'SIGNEXTEND')
# from http://gavwood.com/paper.pdf
_table = { # opcode: (name, immediate_operand_size, pops, pushes, gas, description)
0x00: ('STOP', 0, 0, 0, 0, 'Halts execution.'),
0x01: ('ADD', 0, 2, 1, 3, 'Addition operation.'),
0x02: ('MUL', 0, 2, 1, 5, 'Multiplication operation.'),
0x03: ('SUB', 0, 2, 1, 3, 'Subtraction operation.'),
0x04: ('DIV', 0, 2, 1, 5, 'Integer division operation.'),
0x05: ('SDIV', 0, 2, 1, 5, 'Signed integer division operation (truncated).'),
0x06: ('MOD', 0, 2, 1, 5, 'Modulo remainder operation.'),
0x07: ('SMOD', 0, 2, 1, 5, 'Signed modulo remainder operation.'),
0x08: ('ADDMOD', 0, 3, 1, 8, 'Modulo addition operation.'),
0x09: ('MULMOD', 0, 3, 1, 8, 'Modulo multiplication operation.'),
0x0a: ('EXP', 0, 2, 1, 10, 'Exponential operation.'),
0x0b: ('SIGNEXTEND', 0, 2, 1, 5, "Extend length of two's complement signed integer."),
0x10: ('LT', 0, 2, 1, 3, 'Less-than comparision.'),
0x11: ('GT', 0, 2, 1, 3, 'Greater-than comparision.'),
0x12: ('SLT', 0, 2, 1, 3, 'Signed less-than comparision.'),
0x13: ('SGT', 0, 2, 1, 3, 'Signed greater-than comparision.'),
0x14: ('EQ', 0, 2, 1, 3, 'Equality comparision.'),
0x15: ('ISZERO', 0, 1, 1, 3, 'Simple not operator.'),
0x16: ('AND', 0, 2, 1, 3, 'Bitwise AND operation.'),
0x17: ('OR', 0, 2, 1, 3, 'Bitwise OR operation.'),
0x18: ('XOR', 0, 2, 1, 3, 'Bitwise XOR operation.'),
0x19: ('NOT', 0, 1, 1, 3, 'Bitwise NOT operation.'),
0x1a: ('BYTE', 0, 2, 1, 3, 'Retrieve single byte from word.'),
0x20: ('SHA3', 0, 2, 1, 30, 'Compute Keccak-256 hash.'),
0x30: ('ADDRESS', 0, 0, 1, 2, 'Get address of currently executing account .'),
0x31: ('BALANCE', 0, 1, 1, 20, 'Get balance of the given account.'),
0x32: ('ORIGIN', 0, 0, 1, 2, 'Get execution origination address.'),
0x33: ('CALLER', 0, 0, 1, 2, 'Get caller address.'),
0x34: ('CALLVALUE', 0, 0, 1, 2, 'Get deposited value by the instruction/transaction responsible for this execution.'),
0x35: ('CALLDATALOAD', 0, 1, 1, 3, 'Get input data of current environment.'),
0x36: ('CALLDATASIZE', 0, 0, 1, 2, 'Get size of input data in current environment.'),
0x37: ('CALLDATACOPY', 0, 3, 0, 3, 'Copy input data in current environment to memory.'),
0x38: ('CODESIZE', 0, 0, 1, 2, 'Get size of code running in current environment.'),
0x39: ('CODECOPY', 0, 3, 0, 3, 'Copy code running in current environment to memory.'),
0x3a: ('GASPRICE', 0, 0, 1, 2, 'Get price of gas in current environment.'),
0x3b: ('EXTCODESIZE', 0, 1, 1, 20, "Get size of an account's code."),
0x3c: ('EXTCODECOPY', 0, 4, 0, 20, "Copy an account's code to memory."),
0x40: ('BLOCKHASH', 0, 1, 1, 20, 'Get the hash of one of the 256 most recent complete blocks.'),
0x41: ('COINBASE', 0, 0, 1, 2, "Get the block's beneficiary address."),
0x42: ('TIMESTAMP', 0, 0, 1, 2, "Get the block's timestamp."),
0x43: ('NUMBER', 0, 0, 1, 2, "Get the block's number."),
0x44: ('DIFFICULTY', 0, 0, 1, 2, "Get the block's difficulty."),
0x45: ('GASLIMIT', 0, 0, 1, 2, "Get the block's gas limit."),
0x50: ('POP', 0, 1, 0, 2, 'Remove item from stack.'),
0x51: ('MLOAD', 0, 1, 1, 3, 'Load word from memory.'),
0x52: ('MSTORE', 0, 2, 0, 3, 'Save word to memory.'),
0x53: ('MSTORE8', 0, 2, 0, 3, 'Save byte to memory.'),
0x54: ('SLOAD', 0, 1, 1, 50, 'Load word from storage.'),
0x55: ('SSTORE', 0, 2, 0, 0, 'Save word to storage.'),
0x56: ('JUMP', 0, 1, 0, 8, 'Alter the program counter.'),
0x57: ('JUMPI', 0, 2, 0, 10, 'Conditionally alter the program counter.'),
0x58: ('GETPC', 0, 0, 1, 2, 'Get the value of the program counter prior to the increment.'),
0x59: ('MSIZE', 0, 0, 1, 2, 'Get the size of active memory in bytes.'),
0x5a: ('GAS', 0, 0, 1, 2, 'Get the amount of available gas, including the corresponding reduction the amount of available gas.'),
0x5b: ('JUMPDEST', 0, 0, 0, 1, 'Mark a valid destination for jumps.'),
0x60: ('PUSH', 1, 0, 1, 0, 'Place 1 byte item on stack.'),
0x61: ('PUSH', 2, 0, 1, 0, 'Place 2-byte item on stack.'),
0x62: ('PUSH', 3, 0, 1, 0, 'Place 3-byte item on stack.'),
0x63: ('PUSH', 4, 0, 1, 0, 'Place 4-byte item on stack.'),
0x64: ('PUSH', 5, 0, 1, 0, 'Place 5-byte item on stack.'),
0x65: ('PUSH', 6, 0, 1, 0, 'Place 6-byte item on stack.'),
0x66: ('PUSH', 7, 0, 1, 0, 'Place 7-byte item on stack.'),
0x67: ('PUSH', 8, 0, 1, 0, 'Place 8-byte item on stack.'),
0x68: ('PUSH', 9, 0, 1, 0, 'Place 9-byte item on stack.'),
0x69: ('PUSH', 10, 0, 1, 0, 'Place 10-byte item on stack.'),
0x6a: ('PUSH', 11, 0, 1, 0, 'Place 11-byte item on stack.'),
0x6b: ('PUSH', 12, 0, 1, 0, 'Place 12-byte item on stack.'),
0x6c: ('PUSH', 13, 0, 1, 0, 'Place 13-byte item on stack.'),
0x6d: ('PUSH', 14, 0, 1, 0, 'Place 14-byte item on stack.'),
0x6e: ('PUSH', 15, 0, 1, 0, 'Place 15-byte item on stack.'),
0x6f: ('PUSH', 16, 0, 1, 0, 'Place 16-byte item on stack.'),
0x70: ('PUSH', 17, 0, 1, 0, 'Place 17-byte item on stack.'),
0x71: ('PUSH', 18, 0, 1, 0, 'Place 18-byte item on stack.'),
0x72: ('PUSH', 19, 0, 1, 0, 'Place 19-byte item on stack.'),
0x73: ('PUSH', 20, 0, 1, 0, 'Place 20-byte item on stack.'),
0x74: ('PUSH', 21, 0, 1, 0, 'Place 21-byte item on stack.'),
0x75: ('PUSH', 22, 0, 1, 0, 'Place 22-byte item on stack.'),
0x76: ('PUSH', 23, 0, 1, 0, 'Place 23-byte item on stack.'),
0x77: ('PUSH', 24, 0, 1, 0, 'Place 24-byte item on stack.'),
0x78: ('PUSH', 25, 0, 1, 0, 'Place 25-byte item on stack.'),
0x79: ('PUSH', 26, 0, 1, 0, 'Place 26-byte item on stack.'),
0x7a: ('PUSH', 27, 0, 1, 0, 'Place 27-byte item on stack.'),
0x7b: ('PUSH', 28, 0, 1, 0, 'Place 28-byte item on stack.'),
0x7c: ('PUSH', 29, 0, 1, 0, 'Place 29-byte item on stack.'),
0x7d: ('PUSH', 30, 0, 1, 0, 'Place 30-byte item on stack.'),
0x7e: ('PUSH', 31, 0, 1, 0, 'Place 31-byte item on stack.'),
0x7f: ('PUSH', 32, 0, 1, 0, 'Place 32-byte (full word) item on stack.'),
0x80: ('DUP', 0, 1, 2, 3, 'Duplicate 1st stack item.'),
0x81: ('DUP', 0, 2, 3, 3, 'Duplicate 2nd stack item.'),
0x82: ('DUP', 0, 3, 4, 3, 'Duplicate 3rd stack item.'),
0x83: ('DUP', 0, 4, 5, 3, 'Duplicate 4th stack item.'),
0x84: ('DUP', 0, 5, 6, 3, 'Duplicate 5th stack item.'),
0x85: ('DUP', 0, 6, 7, 3, 'Duplicate 6th stack item.'),
0x86: ('DUP', 0, 7, 8, 3, 'Duplicate 7th stack item.'),
0x87: ('DUP', 0, 8, 9, 3, 'Duplicate 8th stack item.'),
0x88: ('DUP', 0, 9, 10, 3, 'Duplicate 9th stack item.'),
0x89: ('DUP', 0, 10, 11, 3, 'Duplicate 10th stack item.'),
0x8a: ('DUP', 0, 11, 12, 3, 'Duplicate 11th stack item.'),
0x8b: ('DUP', 0, 12, 13, 3, 'Duplicate 12th stack item.'),
0x8c: ('DUP', 0, 13, 14, 3, 'Duplicate 13th stack item.'),
0x8d: ('DUP', 0, 14, 15, 3, 'Duplicate 14th stack item.'),
0x8e: ('DUP', 0, 15, 16, 3, 'Duplicate 15th stack item.'),
0x8f: ('DUP', 0, 16, 17, 3, 'Duplicate 16th stack item.'),
0x90: ('SWAP', 0, 2, 2, 3, 'Exchange 1st and 2nd stack items.'),
0x91: ('SWAP', 0, 3, 3, 3, 'Exchange 1st and 3rd stack items.'),
0x92: ('SWAP', 0, 4, 4, 3, 'Exchange 1st and 4th stack items.'),
0x93: ('SWAP', 0, 5, 5, 3, 'Exchange 1st and 5th stack items.'),
0x94: ('SWAP', 0, 6, 6, 3, 'Exchange 1st and 6th stack items.'),
0x95: ('SWAP', 0, 7, 7, 3, 'Exchange 1st and 7th stack items.'),
0x96: ('SWAP', 0, 8, 8, 3, 'Exchange 1st and 8th stack items.'),
0x97: ('SWAP', 0, 9, 9, 3, 'Exchange 1st and 9th stack items.'),
0x98: ('SWAP', 0, 10, 10, 3, 'Exchange 1st and 10th stack items.'),
0x99: ('SWAP', 0, 11, 11, 3, 'Exchange 1st and 11th stack items.'),
0x9a: ('SWAP', 0, 12, 12, 3, 'Exchange 1st and 12th stack items.'),
0x9b: ('SWAP', 0, 13, 13, 3, 'Exchange 1st and 13th stack items.'),
0x9c: ('SWAP', 0, 14, 14, 3, 'Exchange 1st and 14th stack items.'),
0x9d: ('SWAP', 0, 15, 15, 3, 'Exchange 1st and 15th stack items.'),
0x9e: ('SWAP', 0, 16, 16, 3, 'Exchange 1st and 16th stack items.'),
0x9f: ('SWAP', 0, 17, 17, 3, 'Exchange 1st and 17th stack items.'),
0xa0: ('LOG', 0, 2, 0, 375, 'Append log record with no topics.'),
0xa1: ('LOG', 0, 3, 0, 750, 'Append log record with one topic.'),
0xa2: ('LOG', 0, 4, 0, 1125, 'Append log record with two topics.'),
0xa3: ('LOG', 0, 5, 0, 1500, 'Append log record with three topics.'),
0xa4: ('LOG', 0, 6, 0, 1875, 'Append log record with four topics.'),
0xf0: ('CREATE', 0, 3, 1, 32000, 'Create a new account with associated code.'),
0xf1: ('CALL', 0, 7, 1, 40, 'Message-call into an account.'),
0xf2: ('CALLCODE', 0, 7, 1, 40, "Message-call into this account with alternative account's code."),
0xf3: ('RETURN', 0, 2, 0, 0, 'Halt execution returning output data.'),
0xf4: ('DELEGATECALL', 0, 6, 1, 40, "Message-call into this account with an alternative account's code, but persisting into this account with an alternative account's code."),
0xf5: ('BREAKPOINT', 0, 0, 0, 40, 'Not in yellow paper FIXME'),
0xf6: ('RNGSEED', 0, 1, 1, 0, 'Not in yellow paper FIXME'),
0xf7: ('SSIZEEXT', 0, 2, 1, 0, 'Not in yellow paper FIXME'),
0xf8: ('SLOADBYTES', 0, 3, 0, 0, 'Not in yellow paper FIXME'),
0xf9: ('SSTOREBYTES', 0, 3, 0, 0, 'Not in yellow paper FIXME'),
0xfa: ('SSIZE', 0, 1, 1, 40, 'Not in yellow paper FIXME'),
0xfb: ('STATEROOT', 0, 1, 1, 0, 'Not in yellow paper FIXME'),
0xfc: ('TXEXECGAS', 0, 0, 1, 0, 'Not in yellow paper FIXME'),
0xfd: ('REVERT', 0, 2, 0, 0, 'Stop execution and revert state changes, without consuming all provided gas and providing a reason.'),
0xfe: ('INVALID', 0, 0, 0, 0, 'Designated invalid instruction.'),
0xff: ('SELFDESTRUCT', 0, 1, 0, 5000, 'Halt execution and register account for later deletion.')
}
@staticmethod
@memoized
def _get_reverse_table():
''' Build an internal table used in the assembler '''
reverse_table = {}
for (opcode, (name, immediate_operand_size, pops, pushes, gas, description)) in EVMAsm._table.items():
mnemonic = name
if name == 'PUSH':
mnemonic = '%s%d' % (name, (opcode & 0x1f) + 1)
elif name in ('SWAP', 'LOG', 'DUP'):
mnemonic = '%s%d' % (name, (opcode & 0xf) + 1)
reverse_table[mnemonic] = opcode, name, immediate_operand_size, pops, pushes, gas, description
return reverse_table
@staticmethod
def assemble_one(assembler, offset=0):
''' Assemble one EVM instruction from its textual representation.
:param assembler: assembler code for one instruction
:param offset: offset of the instruction in the bytecode (optional)
:return: An Instruction object
Example use::
>>> print evm.EVMAsm.assemble_one('LT')
'''
try:
_reverse_table = EVMAsm._get_reverse_table()
assembler = assembler.strip().split(' ')
opcode, name, operand_size, pops, pushes, gas, description = _reverse_table[assembler[0].upper()]
if operand_size > 0:
assert len(assembler) == 2
operand = int(assembler[1], 0)
else:
assert len(assembler) == 1
operand = None
return EVMAsm.Instruction(opcode, name, operand_size, pops, pushes, gas, description, operand=operand, offset=offset)
except BaseException:
raise Exception("Something wrong at offset %d" % offset)
@staticmethod
def assemble_all(assembler, offset=0):
''' Assemble a sequence of textual representation of EVM instructions
:param assembler: assembler code for any number of instructions
:param offset: offset of the first instruction in the bytecode(optional)
:return: An generator of Instruction objects
Example use::
>>> evm.EVMAsm.encode_one("""PUSH1 0x60
PUSH1 0x40
MSTORE
PUSH1 0x2
PUSH2 0x108
PUSH1 0x0
POP
SSTORE
PUSH1 0x40
MLOAD
""")
'''
if isinstance(assembler, str):
assembler = assembler.split('\n')
assembler = iter(assembler)
for line in assembler:
if not line.strip():
continue
instr = EVMAsm.assemble_one(line, offset=offset)
yield instr
offset += instr.size
@staticmethod
def disassemble_one(bytecode, offset=0):
''' Decode a single instruction from a bytecode
:param bytecode: the bytecode stream
:param offset: offset of the instruction in the bytecode(optional)
:type bytecode: iterator/sequence/str
:return: an Instruction object
Example use::
>>> print EVMAsm.assemble_one('PUSH1 0x10')
'''
bytecode = iter(bytecode)
opcode = ord(next(bytecode))
invalid = ('INVALID', 0, 0, 0, 0, 'Unknown opcode')
name, operand_size, pops, pushes, gas, description = EVMAsm._table.get(opcode, invalid)
instruction = EVMAsm.Instruction(opcode, name, operand_size, pops, pushes, gas, description, offset=offset)
if instruction.has_operand:
instruction.parse_operand(bytecode)
return instruction
@staticmethod
def disassemble_all(bytecode, offset=0):
''' Decode all instructions in bytecode
:param bytecode: an evm bytecode (binary)
:param offset: offset of the first instruction in the bytecode(optional)
:type bytecode: iterator/sequence/str
:return: An generator of Instruction objects
Example use::
>>> for inst in EVMAsm.decode_all(bytecode):
... print inst
...
PUSH1 0x60
PUSH1 0x40
MSTORE
PUSH1 0x2
PUSH2 0x108
PUSH1 0x0
POP
SSTORE
PUSH1 0x40
MLOAD
'''
bytecode = iter(bytecode)
while True:
instr = EVMAsm.disassemble_one(bytecode, offset=offset)
offset += instr.size
yield instr
@staticmethod
def disassemble(bytecode, offset=0):
''' Disassemble an EVM bytecode
:param bytecode: binary representation of an evm bytecode (hexadecimal)
:param offset: offset of the first instruction in the bytecode(optional)
:type bytecode: str
:return: the text representation of the aseembler code
Example use::
>>> EVMAsm.disassemble("\x60\x60\x60\x40\x52\x60\x02\x61\x01\x00")
...
PUSH1 0x60
BLOCKHASH
MSTORE
PUSH1 0x2
PUSH2 0x100
'''
return '\n'.join(map(str, EVMAsm.disassemble_all(bytecode, offset=offset)))
@staticmethod
def assemble(asmcode, offset=0):
''' Assemble an EVM program
:param asmcode: an evm assembler program
:param offset: offset of the first instruction in the bytecode(optional)
:type asmcode: str
:return: the hex representation of the bytecode
Example use::
>>> EVMAsm.assemble( """PUSH1 0x60
BLOCKHASH
MSTORE
PUSH1 0x2
PUSH2 0x100
"""
)
...
"\x60\x60\x60\x40\x52\x60\x02\x61\x01\x00"
'''
return ''.join(map(lambda x: x.bytes, EVMAsm.assemble_all(asmcode, offset=offset)))
@staticmethod
def disassemble_hex(bytecode, offset=0):
''' Disassemble an EVM bytecode
:param bytecode: canonical representation of an evm bytecode (hexadecimal)
:param int offset: offset of the first instruction in the bytecode(optional)
:type bytecode: str
:return: the text representation of the aseembler code
Example use::
>>> EVMAsm.disassemble_hex("0x6060604052600261010")
...
PUSH1 0x60
BLOCKHASH
MSTORE
PUSH1 0x2
PUSH2 0x100
'''
if bytecode.startswith('0x'):
bytecode = bytecode[2:]
bytecode = bytecode.decode('hex')
return EVMAsm.disassemble(bytecode, offset=offset)
@staticmethod
def assemble_hex(asmcode, offset=0):
''' Assemble an EVM program
:param asmcode: an evm assembler program
:param offset: offset of the first instruction in the bytecode(optional)
:type asmcode: str
:return: the hex representation of the bytecode
Example use::
>>> EVMAsm.assemble_hex( """PUSH1 0x60
BLOCKHASH
MSTORE
PUSH1 0x2
PUSH2 0x100
"""
)
...
"0x6060604052600261010"
'''
return '0x' + EVMAsm.assemble(asmcode, offset=offset).encode('hex')
# Exceptions...
class EVMException(Exception):
pass
class EVMInstructionException(EVMException):
pass
class ConcretizeStack(EVMException):
'''
Raised when a symbolic memory cell needs to be concretized.
'''
def __init__(self, pos, expression=None, policy='MINMAX'):
self.message = "Concretizing evm stack item {}".format(pos)
self.pos = pos
self.expression = expression
self.policy = policy
class StackOverflow(EVMException):
''' Attemped to push more than 1024 items '''
class StackUnderflow(EVMException):
''' Attemped to popo from an empty stack '''
class InvalidOpcode(EVMException):
''' Trying to execute invalid opcode '''
class Call(EVMInstructionException):
def __init__(self, gas, to, value, data, out_offset=None, out_size=None):
self.gas = gas
self.to = to
self.value = value
self.data = data
self.out_offset = out_offset
self.out_size = out_size
def __reduce__(self):
return (self.__class__, (self.gas, self.to, self.value, self.data, self.out_offset, self.out_size))
class Create(Call):
def __init__(self, value, bytecode):
super(Create, self).__init__(gas=None, to=None, value=value, data=bytecode)
class DelegateCall(Call):
pass
class Stop(EVMInstructionException):
''' Program reached a STOP instruction '''
class Return(EVMInstructionException):
''' Program reached a RETURN instruction '''
def __init__(self, data):
self.data = data
def __reduce__(self):
return (self.__class__, (self.data,))
class Revert(EVMInstructionException):
''' Program reached a RETURN instruction '''
def __init__(self, data):
self.data = data
def __reduce__(self):
return (self.__class__, (self.data,))
class SelfDestruct(EVMInstructionException):
''' Program reached a RETURN instruction '''
def __init__(self, to):
self.to = to
class NotEnoughGas(EVMException):
''' Not enough gas for operation '''
class Sha3(EVMException):
def __init__(self, data):
self.data = data
def __reduce__(self):
return (self.__class__, (self.data, ))
def concretized_args(**policies):
"""
Make sure an EVM instruction has all of its arguments concretized according to
provided policies.
Example decoration:
@concretized_args(size='ONE', address='')
def LOG(self, address, size, *topics):
...
The above will make sure that the |size| parameter to LOG is Concretized when symbolic
according to the 'ONE' policy and concretize |address| with the default policy.
:param policies: A kwargs list of argument names and their respective policies.
Provide None or '' as policy to use default.
:return: A function decorator
"""
def concretizer(func):
@wraps(func)
def wrapper(*args, **kwargs):
spec = inspect.getargspec(func)
for arg, policy in policies.items():
assert arg in spec.args, "Concretizer argument not found in wrapped function."
# index is 0-indexed, but ConcretizeStack is 1-indexed. However, this is correct
# since implementation method is always a bound method (self is param 0)
index = spec.args.index(arg)
if issymbolic(args[index]):
if policy:
raise ConcretizeStack(index, policy=policy)
else:
raise ConcretizeStack(index)
return func(*args, **kwargs)
return wrapper
return concretizer
class EVM(Eventful):
'''Machine State. The machine state is defined as
the tuple (g, pc, m, i, s) which are the gas available, the
program counter pc , the memory contents, the active
number of words in memory (counting continuously
from position 0), and the stack contents. The memory
contents are a series of zeroes of bitsize 256
'''
_published_events = {'evm_execute_instruction',
'evm_read_storage', 'evm_write_storage',
'evm_read_memory',
'evm_write_memory',
'evm_read_code',
'decode_instruction', 'execute_instruction', 'concrete_sha3', 'symbolic_sha3'}
def __init__(self, constraints, address, origin, price, data, caller, value, code, header, global_storage=None, depth=0, gas=1000000000, **kwargs):
'''
Builds a Ethereum Virtual Machine instance
:param memory: the initial memory
:param address: the address of the account which owns the code that is executing.
:param origin: the sender address of the transaction that originated this execution. A 160-bit code used for identifying Accounts.
:param price: the price of gas in the transaction that originated this execution.
:param data: the byte array that is the input data to this execution
:param caller: the address of the account which caused the code to be executing. A 160-bit code used for identifying Accounts
:param value: the value, in Wei, passed to this account as part of the same procedure as execution. One Ether is defined as being 10**18 Wei.
:param bytecode: the byte array that is the machine code to be executed.
:param header: the block header of the present block.
:param depth: the depth of the present message-call or contract-creation (i.e. the number of CALLs or CREATEs being executed at present).
:param gas: gas budget for this transaction.
'''
super(EVM, self).__init__(**kwargs)
self._constraints = constraints
self.last_exception = None
self.memory = EVMMemory(constraints)
self.address = address
self.origin = origin # always an account with empty associated code
self.caller = caller # address of the account that is directly responsible for this execution
self.data = data
self.price = price # This is gas price specified by the originating transaction
self.value = value
self.depth = depth
self.bytecode = code
self.suicides = set()
self.logs = []
#FIXME parse decode and mark invalid instructions
#self.invalid = set()
assert 'coinbase' in header
assert 'gaslimit' in header
assert 'difficulty' in header
assert 'timestamp' in header
assert 'number' in header
self.header = header
# Machine state
self.pc = 0
self.stack = []
self._gas = gas
self.global_storage = global_storage
self.allocated = 0
@property
def constraints(self):
return self._constraints
@constraints.setter
def constraints(self, constraints):
self._constraints = constraints
self.memory.constraints = constraints
@property
def gas(self):
return self._gas
def __getstate__(self):
state = super(EVM, self).__getstate__()
state['memory'] = self.memory
state['global_storage'] = self.global_storage
state['constraints'] = self.constraints
state['last_exception'] = self.last_exception
state['address'] = self.address
state['origin'] = self.origin
state['caller'] = self.caller
state['data'] = self.data
state['price'] = self.price
state['value'] = self.value
state['depth'] = self.depth
state['bytecode'] = self.bytecode
state['header'] = self.header
state['pc'] = self.pc
state['stack'] = self.stack
state['gas'] = self._gas
state['allocated'] = self.allocated
state['suicides'] = self.suicides
state['logs'] = self.logs
return state
def __setstate__(self, state):
self._gas = state['gas']
self.memory = state['memory']
self.logs = state['logs']
self.global_storage = state['global_storage']
self.constraints = state['constraints']
self.last_exception = state['last_exception']
self.address = state['address']
self.origin = state['origin']
self.caller = state['caller']
self.data = state['data']
self.price = state['price']
self.value = state['value']
self.depth = state['depth']
self.bytecode = state['bytecode']
self.header = state['header']
self.pc = state['pc']
self.stack = state['stack']
self.allocated = state['allocated']
self.suicides = state['suicides']
super(EVM, self).__setstate__(state)
# Memory related
def _allocate(self, address):
if address > self.memory._allocated:
GMEMORY = 3
GQUADRATICMEMDENOM = 512 # 1 gas per 512 quadwords
old_size = ceil32(self.memory._allocated) // 32
old_totalfee = old_size * GMEMORY + old_size ** 2 // GQUADRATICMEMDENOM
new_size = ceil32(address) // 32
increased = new_size - old_size
fee = increased * GMEMORY + increased**2 // GQUADRATICMEMDENOM
self._consume(fee)
def _store(self, address, value):
# CHECK ADDRESS IS A 256 BIT INT OR BITVEC
# CHECK VALUE IS A 256 BIT INT OR BITVEC
self._allocate(address)
self.memory.write(address, [value])
self._publish('did_evm_write_memory', address, value)
def _load(self, address):
self._allocate(address)
value = self.memory.read(address, 1)[0]
value = arithmetic_simplifier(value)
if isinstance(value, Constant) and not value.taint:
value = value.value
self._publish('did_evm_read_memory', address, value)
return value
@staticmethod
def check256int(value):
assert True
def read_code(self, address, size=1):
'''
Read size byte from bytecode.
If less than size bytes are available result will be pad with \x00
'''
assert address < len(self.bytecode)
value = self.bytecode[address:address + size]
if len(value) < size:
value += '\x00' * (size - len(value)) # pad with null (spec)
return value
def disassemble(self):
return EVMAsm.disassemble(self.bytecode)
@property
def PC(self):
return self.pc
@property
def instruction(self):
'''
Current instruction pointed by self.pc
'''
# FIXME check if pc points to invalid instruction
# if self.pc >= len(self.bytecode):
# return InvalidOpcode('Code out of range')
# if self.pc in self.invalid:
# raise InvalidOpcode('Opcode inside a PUSH immediate')
def getcode():
for byte in self.bytecode[self.pc:]:
yield byte
while True:
yield '\x00'
return EVMAsm.disassemble_one(getcode())
# auxiliar funcs
# Stack related
def _push(self, value):
'''
ITEM0
ITEM1
ITEM2
sp-> {empty}
'''
assert isinstance(value, (int, long)) or isinstance(value, BitVec) and value.size == 256
if len(self.stack) >= 1024:
raise StackOverflow()
self.stack.append(value & TT256M1)
def _pop(self):
if len(self.stack) == 0:
raise StackUnderflow()
return self.stack.pop()
def _consume(self, fee):
assert fee >= 0
if self._gas < fee:
logger.debug("Not enough gas for instruction")
raise NotEnoughGas()
self._gas -= fee
# Execute an instruction from current pc
def execute(self):
if issymbolic(self.pc):
expression = self.pc
def setstate(state, value):
state.platform.current.pc = value
raise Concretize("Concretice PC",
expression=expression,
setstate=setstate,
policy='ALL')
self._publish('will_decode_instruction', self.pc)
last_pc = self.pc
current = self.instruction
# Consume some gas
self._consume(current.fee)
implementation = getattr(self, current.semantics, None)
if implementation is None:
raise TerminateState("Instruction not implemented %s" % current.semantics, testcase=True)
# Get arguments (imm, pop)
arguments = []
if self.instruction.has_operand:
arguments.append(current.operand)
for _ in range(current.pops):
arguments.append(self._pop())
# simplify stack arguments
for i in range(len(arguments)):
if isinstance(arguments[i], Expression):
arguments[i] = arithmetic_simplifier(arguments[i])
if isinstance(arguments[i], Constant):
arguments[i] = arguments[i].value
self._publish('will_execute_instruction', self.pc, current)
self._publish('will_evm_execute_instruction', current, arguments)
last_pc = self.pc
result = None
try:
result = implementation(*arguments)
self._emit_did_execute_signals(current, arguments, result, last_pc)
except ConcretizeStack as ex:
for arg in reversed(arguments):
self._push(arg)
def setstate(state, value):
self.stack[-ex.pos] = value
raise Concretize("Concretice Stack Variable",
expression=self.stack[-ex.pos],
setstate=setstate,
policy=ex.policy)
except EVMException as e:
self.last_exception = e
# Technically, this is not the right place to emit these events because the
# instruction hasn't executed yet; it executes in the EVM platform class (EVMWorld).
# However, when I tried that, in the event handlers, `state.platform.current`
# ends up being None, which caused issues. So, as a pragmatic solution, we emit
# the event before technically executing the instruction.
if isinstance(e, EVMInstructionException):
self._emit_did_execute_signals(current, arguments, result, last_pc)
raise
# Check result (push)
if current.pushes > 1:
assert len(result) == current.pushes
for value in reversed(result):
self._push(value)
elif current.pushes == 1:
self._push(result)
else:
assert current.pushes == 0
assert result is None
if current.semantics not in ('JUMP', 'JUMPI'):
# advance pc pointer
self.pc += self.instruction.size
def _emit_did_execute_signals(self, current, arguments, result, last_pc):
self._publish('did_evm_execute_instruction', current, arguments, result)
self._publish('did_execute_instruction', last_pc, self.pc, current)
# INSTRUCTIONS
def INVALID(self):
'''Halts execution'''
raise InvalidOpcode()
##########################################################################
# Stop and Arithmetic Operations
# All arithmetic is modulo 256 unless otherwise noted.
def STOP(self):
''' Halts execution '''
raise Stop()
def ADD(self, a, b):
''' Addition operation '''
return a + b
def MUL(self, a, b):
''' Multiplication operation '''
return a * b
def SUB(self, a, b):
''' Subtraction operation '''
return a - b
def DIV(self, a, b):
'''Integer division operation'''
try:
result = Operators.UDIV(a, b)
except ZeroDivisionError:
result = 0
return Operators.ITEBV(256, b == 0, 0, result)
def SDIV(self, a, b):
'''Signed integer division operation (truncated)'''
s0, s1 = to_signed(a), to_signed(b)
try:
result = (abs(s0) // abs(s1) * (-1 if s0 * s1 < 0 else 1))
except ZeroDivisionError:
result = 0
return Operators.ITEBV(256, b == 0, 0, result)
def MOD(self, a, b):
'''Modulo remainder operation'''
try:
result = Operators.ITEBV(256, b == 0, 0, a % b)
except ZeroDivisionError:
result = 0
return result
def SMOD(self, a, b):
'''Signed modulo remainder operation'''
s0, s1 = to_signed(a), to_signed(b)
sign = Operators.ITEBV(256, s0 < 0, -1, 1)
try:
result = abs(s0) % abs(s1) * sign
except ZeroDivisionError:
result = 0
return Operators.ITEBV(256, s1 == 0, 0, result)
def ADDMOD(self, a, b, c):
'''Modulo addition operation'''
try:
result = Operators.ITEBV(256, c == 0, 0, (a + b) % c)
except ZeroDivisionError:
result = 0
return result
def MULMOD(self, a, b, c):
'''Modulo addition operation'''
try:
result = Operators.ITEBV(256, c == 0, 0, (a * b) % c)
except ZeroDivisionError:
result = 0
return result
def EXP(self, base, exponent):
'''
Exponential operation
The zero-th power of zero 0^0 is defined to be one
'''
# fixme integer bitvec
EXP_SUPPLEMENTAL_GAS = 50 # cost of EXP exponent per byte
def nbytes(e):
for i in range(32):
if e >> (i * 8) == 0:
return i
return 32
self._consume(EXP_SUPPLEMENTAL_GAS * nbytes(exponent))
return pow(base, exponent, TT256)
def SIGNEXTEND(self, size, value):
'''Extend length of two's complement signed integer'''
# FIXME maybe use Operators.SEXTEND
testbit = Operators.ITEBV(256, size <= 31, size * 8 + 7, 257)
result1 = (value | (TT256 - (1 << testbit)))
result2 = (value & ((1 << testbit) - 1))
result = Operators.ITEBV(256, (value & (1 << testbit)) != 0, result1, result2)
return Operators.ITEBV(256, size <= 31, result, value)
##########################################################################
# Comparison & Bitwise Logic Operations
def LT(self, a, b):
'''Less-than comparision'''
return Operators.ITEBV(256, Operators.ULT(a, b), 1, 0)
def GT(self, a, b):
'''Greater-than comparision'''
return Operators.ITEBV(256, Operators.UGT(a, b), 1, 0)
def SLT(self, a, b):
'''Signed less-than comparision'''
# http://gavwood.com/paper.pdf
s0, s1 = to_signed(a), to_signed(b)
return Operators.ITEBV(256, s0 < s1, 1, 0)
def SGT(self, a, b):
'''Signed greater-than comparision'''
# http://gavwood.com/paper.pdf
s0, s1 = to_signed(a), to_signed(b)
return Operators.ITEBV(256, s0 > s1, 1, 0)
def EQ(self, a, b):
'''Equality comparision'''
return Operators.ITEBV(256, a == b, 1, 0)
def ISZERO(self, a):
'''Simple not operator'''
return Operators.ITEBV(256, a == 0, 1, 0)
def AND(self, a, b):
'''Bitwise AND operation'''
return a & b
def OR(self, a, b):
'''Bitwise OR operation'''
return a | b
def XOR(self, a, b):
'''Bitwise XOR operation'''
return a ^ b
def NOT(self, a):
'''Bitwise NOT operation'''
return ~a
def BYTE(self, offset, value):
'''Retrieve single byte from word'''
offset = Operators.ITEBV(256, offset < 32, (31 - offset) * 8, 256)
return Operators.ZEXTEND(Operators.EXTRACT(value, offset, 8), 256)
def SHA3(self, start, size):
'''Compute Keccak-256 hash'''
GSHA3WORD = 6 # Cost of SHA3 per word
# read memory from start to end
# calculate hash on it/ maybe remember in some structure where that hash came from
# http://gavwood.com/paper.pdf
if size:
self._consume(GSHA3WORD * (ceil32(size) // 32))
data = self.read_buffer(start, size)
if any(map(issymbolic, data)):
raise Sha3(data)
buf = ''.join(data)
value = sha3.keccak_256(buf).hexdigest()
value = int('0x' + value, 0)
self._publish('on_concrete_sha3', buf, value)
logger.info("Found a concrete SHA3 example %r -> %x", buf, value)
return value
##########################################################################
# Environmental Information
def ADDRESS(self):
'''Get address of currently executing account '''
return self.address
def BALANCE(self, account):
'''Get balance of the given account'''
BALANCE_SUPPLEMENTAL_GAS = 380
self._consume(BALANCE_SUPPLEMENTAL_GAS)
if account & TT256M1 not in self.global_storage:
return 0
value = self.global_storage[account & TT256M1]['balance']
if value is None:
return 0
return value
def ORIGIN(self):
'''Get execution origination address'''
return self.origin
def CALLER(self):
'''Get caller address'''
return Operators.ZEXTEND(self.caller, 256)
def CALLVALUE(self):
'''Get deposited value by the instruction/transaction responsible for this execution'''
return self.value
def CALLDATALOAD(self, offset):
'''Get input data of current environment'''
# FIXME concretize offset?
# if issymbolic(offset):
# self._constraints.add(Operators.ULE(offset, len(self.data)+32))
#self._constraints.add(0 == offset%32)
# raise ConcretizeStack(3, expression=offset, policy='ALL')
bytes = list(self.data[offset:offset + 32])
bytes += list('\x00' * (32 - len(bytes)))
bytes = map(Operators.ORD, bytes)
value = Operators.CONCAT(256, *bytes)
return value
def CALLDATASIZE(self):
'''Get size of input data in current environment'''
return len(self.data)
def CALLDATACOPY(self, mem_offset, data_offset, size):
'''Copy input data in current environment to memory'''
GCOPY = 3 # cost to copy one 32 byte word
self._consume(GCOPY * ceil32(size) // 32)
# FIXME put zero if not enough data
if issymbolic(size) or issymbolic(data_offset):
#self._constraints.add(Operators.ULE(data_offset, len(self.data)))
self._constraints.add(Operators.ULE(size + data_offset, len(self.data) + (32 - len(self.data) % 32)))
if issymbolic(size):
raise ConcretizeStack(3, policy='ALL')
for i in range(size):
c = Operators.ITEBV(8, data_offset + i < len(self.data), Operators.ORD(self.data[data_offset + i]), 0)
self._store(mem_offset + i, c)
def CODESIZE(self):
'''Get size of code running in current environment'''
return len(self.bytecode)
@concretized_args(size='')
def CODECOPY(self, mem_offset, code_offset, size):
'''Copy code running in current environment to memory'''
GCOPY = 3 # cost to copy one 32 byte word
self._consume(GCOPY * ceil32(size) // 32)
for i in range(size):
if code_offset + i >= len(self.bytecode):
self._store(mem_offset + i, 0)
else:
self._store(mem_offset + i, Operators.ORD(self.bytecode[code_offset + i]))
self._publish('did_evm_read_code', code_offset, size)
def GASPRICE(self):
'''Get price of gas in current environment'''
return self.price
def EXTCODESIZE(self, account):
'''Get size of an account's code'''
# FIXME
if not account & TT256M1 in self.global_storage:
return 0
return len(self.global_storage[account & TT256M1]['code'])
def EXTCODECOPY(self, account, address, offset, size):
'''Copy an account's code to memory'''
# FIXME STOP! if not enough data
if not account & TT256M1 in self.global_storage:
return
extbytecode = self.global_storage[account & TT256M1]['code']
GCOPY = 3 # cost to copy one 32 byte word
self._consume(GCOPY * ceil32(len(extbytecode)) // 32)
for i in range(size):
if offset + i < len(extbytecode):
self._store(address + i, extbytecode[offset + i])
else:
self._store(address + i, 0)
##########################################################################
# Block Information
def BLOCKHASH(self, a):
'''Get the hash of one of the 256 most recent complete blocks'''
# We are not maintaining an actual -block-chain- so we just generate
# some hashes for each virtual block
value = sha3.keccak_256(repr(a) + 'NONCE').hexdigest()
value = int('0x' + value, 0)
# 0 is left on the stack if the looked for block number is greater than the current block number
# or more than 256 blocks behind the current block.
value = Operators.ITEBV(256, Operators.OR(a > self.header['number'], a < max(0, self.header['number'] - 256)), 0, value)
return value
def COINBASE(self):
'''Get the block's beneficiary address'''
return self.header['coinbase']
def TIMESTAMP(self):
'''Get the block's timestamp'''
return self.header['timestamp']
def NUMBER(self):
'''Get the block's number'''
return self.header['number']
def DIFFICULTY(self):
'''Get the block's difficulty'''
return self.header['difficulty']
def GASLIMIT(self):
'''Get the block's gas limit'''
return self.header['gaslimit']
##########################################################################
# Stack, Memory, Storage and Flow Operations
def POP(self, a):
'''Remove item from stack'''
# Items are automatically removed from stack
# by the instruction distpatcher
def MLOAD(self, address):
'''Load word from memory'''
bytes = []
for offset in xrange(32):
bytes.append(self._load(address + offset))
return Operators.CONCAT(256, *bytes)
def MSTORE(self, address, value):
'''Save word to memory'''
for offset in xrange(32):
self._store(address + offset, Operators.EXTRACT(value, (31 - offset) * 8, 8))
def MSTORE8(self, address, value):
'''Save byte to memory'''
self._store(address, Operators.EXTRACT(value, 0, 8))
def SLOAD(self, offset):
'''Load word from storage'''
self._publish('will_evm_read_storage', offset)
value = self.global_storage[self.address]['storage'].get(offset, 0)
self._publish('did_evm_read_storage', offset, value)
return value
def SSTORE(self, offset, value):
'''Save word to storage'''
self._publish('will_evm_write_storage', offset, value)
self.global_storage[self.address]['storage'][offset] = value
if value is 0:
del self.global_storage[self.address]['storage'][offset]
self._publish('did_evm_write_storage', offset, value)
def JUMP(self, dest):
'''Alter the program counter'''
self.pc = dest
# TODO check for JUMPDEST on next iter?
def JUMPI(self, dest, cond):
'''Conditionally alter the program counter'''
self.pc = Operators.ITEBV(256, cond != 0, dest, self.pc + self.instruction.size)
assert self.bytecode[dest] == '\x5b', "Must be jmpdest instruction" # fixme what if dest == self.pc + self.instruction.size?
def GETPC(self):
'''Get the value of the program counter prior to the increment'''
return self.pc
def MSIZE(self):
'''Get the size of active memory in bytes'''
return self.memory._allocated * 32
def GAS(self):
'''Get the amount of available gas, including the corresponding reduction the amount of available gas'''
#fixme calculate gas consumption
return self._gas
def JUMPDEST(self):
'''Mark a valid destination for jumps'''
##########################################################################
# Push Operations
def PUSH(self, value):
'''Place 1 to 32 bytes item on stack'''
return value
##########################################################################
# Duplication Operations
def DUP(self, *operands):
'''Duplicate stack item'''
return (operands[-1],) + operands
##########################################################################
# Exchange Operations
def SWAP(self, *operands):
'''Exchange 1st and 2nd stack items'''
a = operands[0]
b = operands[-1]
return (b,) + operands[1:-1] + (a,)
##########################################################################
# Logging Operations
@concretized_args(size='ONE')
def LOG(self, address, size, *topics):
memlog = self.read_buffer(address, size)
self.logs.append(EVMLog(self.address, memlog, topics))
logger.info('LOG %r %r', memlog, topics)
##########################################################################
# System operations
def read_buffer(self, offset, size):
if size:
self._allocate(offset + size)
data = []
for i in xrange(size):
data.append(self._load(offset+i))
data = map(Operators.CHR, data)
if any(map(issymbolic, data)):
data_symb = self._constraints.new_array(index_bits=256, index_max=len(data))
for i in range(len(data)):
data_symb[i] = Operators.ORD(data[i])
data = data_symb
else:
data = ''.join(data)
return data
def write_buffer(self, offset, buf):
for i, c in enumerate(buf):
self._store(offset+i, Operators.ORD(c))
def CREATE(self, value, offset, size):
'''Create a new account with associated code'''
code = self.read_buffer(offset, size)
raise Create(value, code)
@concretized_args(in_offset='SAMPLED', in_size='SAMPLED')
def CALL(self, gas, to, value, in_offset, in_size, out_offset, out_size):
'''Message-call into an account'''
data = self.read_buffer(in_offset, in_size)
raise Call(gas, to, value, data, out_offset, out_size)
def CALLCODE(self, gas, to, value, in_offset, in_size, out_offset, out_size):
'''Message-call into this account with alternative account's code'''
data = self.read_buffer(in_offset, in_size)
raise Call(gas, self.address, value, data, out_offset, out_size)
def RETURN(self, offset, size):
'''Halt execution returning output data'''
data = self.read_buffer(offset, size)
raise Return(data)
def DELEGATECALL(self, gas, to, in_offset, in_size, out_offset, out_size):
'''Message-call into this account with an alternative account's code, but persisting into this account with an alternative account's code'''
value = 0
data = self.read_buffer(in_offset, in_size)
raise Call(gas, self.address, value, data, out_offset, out_size)
def REVERT(self, offset, size):
data = self.read_buffer(offset, size)
raise Revert(data)
def SELFDESTRUCT(self, to):
'''Halt execution and register account for later deletion'''
raise SelfDestruct(to)
def __str__(self):
def hexdump(src, length=16):
FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.' for x in range(256)])
lines = []
for c in xrange(0, len(src), length):
chars = src[c:c + length]
def p(x):
if issymbolic(x):
return '??'
else:
return "%02x" % x
hex = ' '.join([p(x) for x in chars])
def p1(x):
if issymbolic(x):
return '.'
else:
return "%s" % ((x <= 127 and FILTER[x]) or '.')
printable = ''.join([p1(x) for x in chars])
lines.append("%04x %-*s %s" % (c, length * 3, hex, printable))
return lines
m = []
if len(self.memory._memory.keys()):
for i in range(max([0] + self.memory._memory.keys()) + 1):
c = self.memory.read(i, 1)[0]
m.append(c)
hd = hexdump(m)
result = ['-' * 147]
if issymbolic(self.pc):
result.append('<Symbolic PC>')
else:
result.append('0x%04x: %s %s %s\n' % (self.pc, self.instruction.name, self.instruction.has_operand and '0x%x' %
self.instruction.operand or '', self.instruction.description))
result.append('Stack Memory')
sp = 0
for i in list(reversed(self.stack))[:10]:
r = ''
if issymbolic(i):
r = '%s %r' % (sp == 0 and 'top> ' or ' ', i)
else:
r = '%s 0x%064x' % (sp == 0 and 'top> ' or ' ', i)
sp += 1
h = ''
try:
h = hd[sp - 1]
except BaseException:
pass
r += ' ' * (75 - len(r)) + h
result.append(r)
for i in range(sp, len(hd)):
r = ' ' * 75 + hd[i]
result.append(r)
result = [hex(self.address) + ": " + x for x in result]
return '\n'.join(result)
################################################################################
################################################################################
################################################################################
################################################################################
class EVMWorld(Platform):
_published_events = {'evm_read_storage', 'evm_write_storage', 'evm_read_code',
'decode_instruction', 'execute_instruction', 'concrete_sha3', 'symbolic_sha3'}
def __init__(self, constraints, storage=None, **kwargs):
super(EVMWorld, self).__init__(path="NOPATH", **kwargs)
self._global_storage = {} if storage is None else storage
self._constraints = constraints
self._callstack = []
self._deleted_address = set()
self._logs = list()
self._sha3 = {}
self._pending_transaction = None
self._transactions = list()
self._internal_transactions = list()
def __getstate__(self):
state = super(EVMWorld, self).__getstate__()
state['sha3'] = self._sha3
state['pending_transaction'] = self._pending_transaction
state['logs'] = self._logs
state['storage'] = self._global_storage
state['constraints'] = self._constraints
state['callstack'] = self._callstack
state['deleted_address'] = self._deleted_address
state['transactions'] = self._transactions
state['internal_transactions'] = self._internal_transactions
return state
def __setstate__(self, state):
super(EVMWorld, self).__setstate__(state)
self._sha3 = state['sha3']
self._pending_transaction = state['pending_transaction']
self._logs = state['logs']
self._global_storage = state['storage']
self._constraints = state['constraints']
self._callstack = state['callstack']
self._deleted_address = state['deleted_address']
self._transactions = state['transactions']
self._internal_transactions = state['internal_transactions']
self._do_events()
def _do_events(self):
if self.current is not None:
self.forward_events_from(self.current)
self.subscribe('on_concrete_sha3', self._concrete_sha3_callback)
def _concrete_sha3_callback(self, buf, value):
if buf in self._sha3:
assert self._sha3[buf] == value
self._sha3[buf] = value
def __getitem__(self, index):
assert isinstance(index, (int, long))
return self.storage[index]
def __str__(self):
return "WORLD:" + str(self._global_storage)
@property
def logs(self):
return self._logs
@property
def constraints(self):
return self._constraints
@property
def transactions(self):
return self._transactions
@property
def internal_transactions(self):
number_of_transactions = len(self._transactions)
for _ in range(len(self._internal_transactions), number_of_transactions):
self._internal_transactions.append([])
return self._internal_transactions
@property
def all_transactions(self):
txs = []
for tx in self._transactions:
txs.append(tx)
for txi in self.internal_transactions[self._transactions.index(tx)]:
txs.append(txi)
return txs
@property
def last_return_data(self):
return self.transactions[-1].return_data
@constraints.setter
def constraints(self, constraints):
self._constraints = constraints
for addr in self.storage:
if isinstance(self.storage[addr]['storage'], EVMMemory):
self.storage[addr]['storage'].constraints = constraints
if self.current:
self.current.constraints = constraints
@property
def current(self):
try:
return self._callstack[-1]
except IndexError:
return None
@property
def accounts(self):
return self.storage.keys()
@property
def normal_accounts(self):
accs = []
for address in self.accounts:
if len(self.get_code(address)) == 0:
accs.append(address)
return accs
@property
def contract_accounts(self):
accs = []
for address in self.accounts:
if len(self.get_code(address)) > 0:
accs.append(address)
return accs
@property
def deleted_addresses(self):
return self._deleted_address
@property
def storage(self):
if self.depth:
return self.current.global_storage
else:
return self._global_storage
def set_storage_data(self, address, offset, value):
self.storage[address]['storage'][offset] = value
def get_storage_data(self, address, offset):
return self.storage[address]['storage'].get(offset)
def get_storage_items(self, address):
return self.storage[address]['storage'].items()
def has_storage(self, address):
return len(self.storage[address]['storage'].items()) != 0
def set_balance(self, address, value):
self.storage[int(address)]['balance'] = value
def get_balance(self, address):
return self.storage[address]['balance']
def add_to_balance(self, address, value):
self.storage[address]['balance'] += value
def get_code(self, address):
return self.storage[address]['code']
def set_code(self, address, data):
self.storage[address]['code'] = data
def has_code(self, address):
return len(self.storage[address]['code']) > 0
def log(self, address, topic, data):
self.logs.append((address, data, topics))
logger.info('LOG %r %r', memlog, topics)
def log_storage(self, addr):
pass
def add_refund(self, value):
pass
def block_prevhash(self):
return 0
def block_coinbase(self):
return 0
def block_timestamp(self):
return 0
def block_number(self):
return 0
def block_difficulty(self):
return 0
def block_gas_limit(self):
return 0
def tx_origin(self):
return self.current_vm.origin
def tx_gasprice(self):
return 0
# CALLSTACK
def _push_vm(self, vm):
# Storage address -> account(value, local_storage)
vm.global_storage = self.storage
vm.global_storage[vm.address]['storage'] = copy.copy(self.storage[vm.address]['storage'])
if self.depth:
self.current.constraints = None
# MAKE A DEEP COPY OF THE SPECIFIC ACCOUNT
self._callstack.append(vm)
self.current.depth = self.depth
self.current.constraints = self.constraints
# self.forward_events_from(self.current)
self._do_events()
if self.depth > 1024:
while self.depth > 0:
self._pop_vm(rollback=True)
raise TerminateState("Maximum call depth limit is reached", testcase=True)
def _pop_vm(self, rollback=False):
vm = self._callstack.pop()
assert self.constraints == vm.constraints
if self.current:
self.current.constraints = vm.constraints
if not rollback:
if self.depth:
self.current.global_storage = vm.global_storage
self.current.logs += vm.logs
self.current.suicides = self.current.suicides.union(vm.suicides)
else:
self._global_storage = vm.global_storage
self._deleted_address = self._deleted_address.union(vm.suicides)
self._logs += vm.logs
for address in self._deleted_address:
del self.storage[address]
return vm
@property
def depth(self):
return len(self._callstack)
def new_address(self):
''' create a fresh 160bit address '''
new_address = random.randint(100, pow(2, 160))
if new_address in self._global_storage.keys():
return self.new_address()
return new_address
def execute(self):
self._process_pending_transaction()
try:
if self.current is None:
raise TerminateState("Trying to execute an empty transaction", testcase=False)
self.current.execute()
except Create as ex:
self.CREATE(ex.value, ex.data)
except Call as ex:
self.CALL(ex.gas, ex.to, ex.value, ex.data)
except Stop as ex:
self.STOP()
except Return as ex:
self.RETURN(ex.data)
except Revert as ex:
self.REVERT(ex.data)
except SelfDestruct as ex:
self.SELFDESTRUCT(ex.to)
except Sha3 as ex:
self.HASH(ex.data)
except EVMException as e:
self.THROW()
except Exception:
raise
def run(self):
try:
while True:
self.execute()
except TerminateState as e:
if self.depth == 0 and e.message == 'RETURN':
return self.last_return
raise e
def create_account(self, address=None, balance=0, code='', storage=None):
''' code is the runtime code '''
storage = {} if storage is None else storage
if address is None:
address = self.new_address()
assert address not in self.storage.keys(), 'The account already exists'
self.storage[address] = {}
self.storage[address]['nonce'] = 0
self.storage[address]['balance'] = balance
self.storage[address]['storage'] = storage
self.storage[address]['code'] = code
return address
def create_contract(self, origin=None, price=0, address=None, caller=None, balance=0, init='', run=False, header=None):
assert len(init) > 0
'''
The way that the Solidity compiler expects the constructor arguments to
be passed is by appending the arguments to the byte code produced by the
Solidity compiler. The arguments are formatted as defined in the Ethereum
ABI2. The arguments are then copied from the init byte array to the EVM
memory through the CODECOPY opcode with appropriate values on the stack.
This is done when the byte code in the init byte array is actually run
on the network.
'''
assert self._pending_transaction is None
if caller is None and origin is not None:
caller = origin
if origin is None and caller is not None:
origin = caller
assert caller == origin
if header is None:
header = {'timestamp': 0,
'number': 0,
'coinbase': 0,
'gaslimit': 0,
'difficulty': 0
}
assert not issymbolic(address)
assert not issymbolic(origin)
address = self.create_account(address, 0)
self.storage[address]['storage'] = EVMMemory(self.constraints, 256, 256)
self._pending_transaction = PendingTransaction('Create', address, origin, price, '', origin, balance, ''.join(init), header)
if run:
assert False
# run initialization code
# Assert everything is concrete?
assert not issymbolic(origin)
assert not issymbolic(address)
assert self.storage[origin]['balance'] >= balance
runtime = self.run()
self.storage[address]['code'] = ''.join(runtime)
return address
def CREATE(self, value, bytecode):
origin = self.current.origin
caller = self.current.address
price = self.current.price
self.create_contract(origin, price, address=None, balance=value, init=bytecode, run=False)
self._process_pending_transaction()
def transaction(self, address, origin=None, price=0, data='', caller=None, value=0, header=None, run=False):
assert self._pending_transaction is None
if caller is None and origin is not None:
caller = origin
if origin is None and caller is not None:
origin = caller
if address not in self.accounts or\
caller not in self.accounts or \
origin != caller and origin not in self.accounts:
raise TerminateState('Account does not exist %x' % address, testcase=True)
if header is None:
header = {'timestamp': 0,
'number': 0,
'coinbase': 0,
'gaslimit': 0,
'difficulty': 0
}
if any([isinstance(data[i], Expression) for i in range(len(data))]):
data_symb = self._constraints.new_array(index_bits=256, index_max=len(data))
for i in range(len(data)):
data_symb[i] = Operators.ORD(data[i])
data = data_symb
else:
data = ''.join(data)
bytecode = self.get_code(address)
self._pending_transaction = PendingTransaction('Call', address, origin, price, data, caller, value, bytecode, header)
if run:
assert self.depth == 0
assert not issymbolic(caller)
assert not issymbolic(address)
assert self.get_balance(caller) >= value
# run contract
# Assert everything is concrete?
try:
return self.run()
except TerminateState:
# FIXME better use of exceptions!
pass
def _process_pending_transaction(self):
if self._pending_transaction is None:
return
assert self.current is None or self.current.last_exception is not None
ty, address, origin, price, data, caller, value, bytecode, header = self._pending_transaction
src_balance = self.get_balance(caller) # from
dst_balance = self.get_balance(address) # to
# discarding absurd amount of ether (no ether overflow)
self.constraints.add(src_balance + value >= src_balance)
failed = False
if self.depth > 1024:
failed = True
if not failed:
enough_balance = src_balance >= value
if issymbolic(enough_balance):
enough_balance_solutions = solver.get_all_values(self._constraints, enough_balance)
if set(enough_balance_solutions) == set([True, False]):
raise Concretize('Forking on available funds',
expression=src_balance < value,
setstate=lambda a, b: None,
policy='ALL')
if set(enough_balance_solutions) == set([False]):
failed = True
else:
if not enough_balance:
failed = True
self._pending_transaction = None
if ty == 'Create':
data = bytecode
is_human_tx = (self.depth == 0)
if failed:
if is_human_tx: # human transaction
tx = Transaction(ty, address, origin, price, data, caller, value, 'TXERROR', None)
self._transactions.append(tx)
raise TerminateState('TXERROR')
else:
self.current._push(0)
return
# Here we have enoug funds and room in the callstack
self.storage[address]['balance'] += value
self.storage[caller]['balance'] -= value
new_vm = EVM(self._constraints, address, origin, price, data, caller, value, bytecode, header, global_storage=self.storage)
self._push_vm(new_vm)
tx = Transaction(ty, address, origin, price, data, caller, value, None, None)
if is_human_tx:
# handle human transactions
if ty == 'Create':
self.current.last_exception = Create(None, None)
elif ty == 'Call':
self.current.last_exception = Call(None, None, None, None)
self._transactions.append(tx)
else:
n = len(self._transactions)
if len(self._internal_transactions) <= n:
for _ in xrange(n-len(self._internal_transactions)+1):
self._internal_transactions.append([])
self._internal_transactions[n].append(tx)
def CALL(self, gas, to, value, data):
address = to
origin = self.current.origin
caller = self.current.address
price = self.current.price
depth = self.depth + 1
bytecode = self.get_code(to)
self.transaction(address, origin, price, data, caller, value)
self._process_pending_transaction()
def RETURN(self, data):
prev_vm = self._pop_vm() # current VM changed!
if self.depth == 0:
tx = self._transactions[-1]
tx.return_data = data
tx.result = 'RETURN'
raise TerminateState("RETURN", testcase=True)
last_ex = self.current.last_exception
self.current.last_exception = None
assert isinstance(last_ex, (Call, Create))
if isinstance(last_ex, Create):
self.current._push(prev_vm.address)
self.set_code(prev_vm.address, data)
else:
size = min(last_ex.out_size, len(data))
self.current.write_buffer(last_ex.out_offset, data[:size])
self.current._push(1)
# we are still on the CALL/CREATE
self.current.pc += self.current.instruction.size
def STOP(self):
prev_vm = self._pop_vm(rollback=False)
if self.depth == 0:
tx = self._transactions[-1]
tx.return_data = None
tx.result = 'STOP'
raise TerminateState("STOP", testcase=True)
self.current.last_exception = None
self.current._push(1)
# we are still on the CALL/CREATE
self.current.pc += self.current.instruction.size
def THROW(self):
prev_vm = self._pop_vm(rollback=True)
# revert balance on CALL fail
self.storage[prev_vm.caller]['balance'] += prev_vm.value
self.storage[prev_vm.address]['balance'] -= prev_vm.value
if self.depth == 0:
tx = self._transactions[-1]
tx.return_data = None
tx.result = 'THROW'
raise TerminateState("THROW", testcase=True)
self.current.last_exception = None
self.current._push(0)
# we are still on the CALL/CREATE
self.current.pc += self.current.instruction.size
def REVERT(self, data):
prev_vm = self._pop_vm(rollback=True)
# revert balance on CALL fail
self.storage[prev_vm.caller]['balance'] += prev_vm.value
self.storage[prev_vm.address]['balance'] -= prev_vm.value
if self.depth == 0:
tx = self._transactions[-1]
tx.return_data = data
tx.result = 'REVERT'
raise TerminateState("REVERT", testcase=True)
self.current.last_exception = None
# we are still on the CALL/CREATE
self.current.pc += self.current.instruction.size
def SELFDESTRUCT(self, recipient):
# This may create a user account
recipient = Operators.EXTRACT(recipient, 0, 160)
address = self.current.address
if recipient not in self.storage.keys():
self.create_account(address=recipient, balance=0, code='', storage=None)
self.storage[recipient]['balance'] += self.storage[address]['balance']
self.storage[address]['balance'] = 0
self.current.suicides.add(address)
prev_vm = self._pop_vm(rollback=False)
if self.depth == 0:
tx = self._transactions[-1]
tx.result = 'SELFDESTRUCT'
raise TerminateState("SELFDESTRUCT", testcase=True)
def HASH(self, data):
def compare_buffers(a, b):
if len(a) != len(b):
return False
cond = True
for i in range(len(a)):
cond = Operators.AND(a[i] == b[i], cond)
if cond is False:
return False
return cond
assert any(map(issymbolic, data))
logger.info("SHA3 Searching over %d known hashes", len(self._sha3))
logger.info("SHA3 TODO save this state for future explorations with more known hashes")
# Broadcast the signal
self._publish('on_symbolic_sha3', data, self._sha3.items())
results = []
# If know_hashes is true then there is a _known_ solution for the hash
known_hashes = False
for key, value in self._sha3.items():
assert not any(map(issymbolic, key))
cond = compare_buffers(key, data)
if solver.can_be_true(self._constraints, cond):
results.append((cond, value))
known_hashes = Operators.OR(cond, known_hashes)
# results contains all the possible and known solutions
# If known_hashes can be False then data can take at least one concrete
# value of which we do not know a hash for.
# Calculate the sha3 of one extra example solution and add this as a
# potential result
# This is an incomplete result:
# Intead of choosing one single extra concrete solution we should save
# the state and when a new sha3 example is found load it back and try
# the new concretization for sha3.
with self._constraints as temp_cs:
if solver.can_be_true(temp_cs, Operators.NOT(known_hashes)):
temp_cs.add(Operators.NOT(known_hashes))
# a_buffer is different from all strings we know a hash for
a_buffer = solver.get_value(temp_cs, data)
cond = compare_buffers(a_buffer, data)
# Get the sha3 for a_buffer
a_value = int(sha3.keccak_256(a_buffer).hexdigest(), 16)
# add the new sha3 pair to the known_hashes and result
self._publish('on_concrete_sha3', a_buffer, a_value)
results.append((cond, a_value))
known_hashes = Operators.OR(cond, known_hashes)
if solver.can_be_true(self._constraints, known_hashes):
self._constraints.add(known_hashes)
value = 0 # never used
for cond, sha in results:
value = Operators.ITEBV(256, cond, sha, value)
else:
raise TerminateState("Unknown hash")
self.current._push(value)
self.current.pc += self.current.instruction.size
|
StarcoderdataPython
|
8140694
|
<reponame>filwaitman/rest-api-lib-creator
import requests
from .datastructures import Meta, NoContent, UnhandledResponse
from .utils import add_querystring_to_url
class ListMixin(object):
list_expected_status_code = 200
list_url = None
@classmethod
def get_list_url(cls):
if cls.list_url:
return cls.list_url.format(base_api_url=cls.get_base_api_url())
return cls.get_base_api_url()
@classmethod
def list(cls, **kwargs):
url = add_querystring_to_url(cls.get_list_url(), **kwargs)
response = cls.request(requests.get, url)
if response.status_code != cls.list_expected_status_code:
return UnhandledResponse(meta=Meta(response))
return cls.prepare_response(response, cls, many=True)
class CreateMixin(object):
create_payload_mode = 'data' # 'data or 'json
create_expected_status_code = 201
create_url = None
@classmethod
def get_create_url(cls):
if cls.create_url:
return cls.create_url.format(base_api_url=cls.get_base_api_url())
return cls.get_base_api_url()
@classmethod
def create(cls, **kwargs):
outer_kwargs = {cls.create_payload_mode: kwargs}
response = cls.request(requests.post, cls.get_create_url(), **outer_kwargs)
if response.status_code != cls.create_expected_status_code:
return UnhandledResponse(meta=Meta(response))
return cls.prepare_response(response, cls)
def save(self):
if not(self._existing_instance):
return self.create(**self._changed_data)
return self.update(self.get_identifier(), **self._changed_data)
class RetrieveMixin(object):
retrieve_expected_status_code = 200
retrieve_url = None
@classmethod
def get_retrieve_url(cls, identifier):
if cls.retrieve_url:
return cls.retrieve_url.format(base_api_url=cls.get_base_api_url(), identifier=identifier)
return cls.get_instance_url(identifier)
@classmethod
def retrieve(cls, identifier):
response = cls.request(requests.get, cls.get_retrieve_url(identifier))
if response.status_code != cls.retrieve_expected_status_code:
return UnhandledResponse(meta=Meta(response))
return cls.prepare_response(response, cls)
class UpdateMixin(object):
update_payload_mode = 'data' # 'data or 'json
update_expected_status_code = 200
update_url = None
@classmethod
def get_update_url(cls, identifier):
if cls.update_url:
return cls.update_url.format(base_api_url=cls.get_base_api_url(), identifier=identifier)
return cls.get_instance_url(identifier)
@classmethod
def update(cls, identifier, **kwargs):
outer_kwargs = {cls.update_payload_mode: kwargs}
response = cls.request(requests.patch, cls.get_update_url(identifier), **outer_kwargs)
if response.status_code != cls.update_expected_status_code:
return UnhandledResponse(meta=Meta(response))
return cls.prepare_response(response, cls)
def save(self):
if not(self._existing_instance):
return self.create(**self._changed_data)
return self.update(self.get_identifier(), **self._changed_data)
class DeleteMixin(object):
delete_expected_status_code = 204
delete_url = None
@classmethod
def get_delete_url(cls, identifier):
if cls.delete_url:
return cls.delete_url.format(base_api_url=cls.get_base_api_url(), identifier=identifier)
return cls.get_instance_url(identifier)
@classmethod
def delete(cls, identifier):
response = cls.request(requests.delete, cls.get_delete_url(identifier))
if response.status_code != cls.delete_expected_status_code or (response.status_code != 204):
return UnhandledResponse(meta=Meta(response))
return NoContent(meta=Meta(response))
def destroy(self):
return self.delete(self.get_identifier())
|
StarcoderdataPython
|
187820
|
<filename>main.py
# -*- coding: utf-8 -*-
from flask import Flask
import yagmail
import logging
import sys
from flask import request
reload(sys)
sys.setdefaultencoding('utf-8')
# change here
mail_user = "yourmail"
mail_pass = "<PASSWORD>"
smtp_host = "smtp.163.com"
smtp_port = '994'
# end change
app = Flask(__name__)
@app.route("/send", methods=["POST"])
def send():
logging.info(request.form['content'])
lines = str(request.form['content']).split("||")
content = dict()
for line in lines:
content[line.split("=")[0]] = line.split("=")[1]
user = content["user"]
subject = content.get("subject", "send by my self mail service.")
content = content["content"]
if user is None or user == '':
logging.error("user is empty! %s" % request.form['content'])
return
# new instance ever once for smtp server may drop the connection
yag = yagmail.SMTP(user=mail_user, password=<PASSWORD>, host=smtp_host, port=smtp_port)
yag.send(to=user, subject=subject, contents=content)
return "send mail successfully!"
if __name__ == '__main__':
app.run(port=5555)
|
StarcoderdataPython
|
3294158
|
<gh_stars>10-100
'''
Authors: <NAME>, <NAME>
'''
from ReLERNN.imports import *
class SequenceBatchGenerator(tf.keras.utils.Sequence):
'''
This class, SequenceBatchGenerator, extends tf.keras.utils.Sequence.
So as to multithread the batch preparation in tandum with network training
for maximum effeciency on the hardware provided.
It generated batches of genotype matrices from a given .trees directory
(which is generated most effeciently from the Simulator class)
which have been prepped according to the given parameters.
It also offers a range of data prepping heuristics as well as normalizing
the targets.
def __getitem__(self, idx):
def __data_generation(self, batchTreeIndices):
'''
#Initialize the member variables which largely determine the data prepping heuristics
#in addition to the .trees directory containing the data from which to generate the batches
def __init__(self,
treesDirectory,
targetNormalization = 'zscore',
batchSize=64,
maxLen=None,
frameWidth=0,
center=False,
shuffleInds=False,
sortInds=False,
ancVal = -1,
padVal = -1,
derVal = 1,
realLinePos = True,
posPadVal = 0,
shuffleExamples = True,
splitFLAG = False,
seqD = None,
maf = None,
hotspots = False,
seed = None
):
self.treesDirectory = treesDirectory
self.targetNormalization = targetNormalization
infoFilename = os.path.join(self.treesDirectory,"info.p")
self.infoDir = pickle.load(open(infoFilename,"rb"))
self.batch_size = batchSize
self.maxLen = maxLen
self.frameWidth = frameWidth
self.center = center
self.shuffleInds = shuffleInds
self.sortInds=sortInds
self.ancVal = ancVal
self.padVal = padVal
self.derVal = derVal
self.realLinePos = realLinePos
self.posPadVal = posPadVal
self.indices = np.arange(self.infoDir["numReps"])
self.shuffleExamples = shuffleExamples
self.splitFLAG = splitFLAG
self.seqD = seqD
self.maf = maf
self.hotspots = hotspots
self.seed = seed
if self.seed:
os.environ['PYTHONHASHSEED']=str(self.seed)
random.seed(self.seed)
np.random.seed(self.seed)
if(targetNormalization != None):
if self.hotspots:
self.normalizedTargets = self.normalizeTargetsBinaryClass()
else:
self.normalizedTargets = self.normalizeTargets()
if(shuffleExamples):
np.random.shuffle(self.indices)
def sort_min_diff(self,amat):
'''this function takes in a SNP matrix with indv on rows and returns the same matrix with indvs sorted by genetic similarity.
this problem is NP, so here we use a nearest neighbors approx. it's not perfect, but it's fast and generally performs ok.
assumes your input matrix is a numpy array'''
mb = NearestNeighbors(len(amat), metric='manhattan').fit(amat)
v = mb.kneighbors(amat)
smallest = np.argmin(v[0].sum(axis=1))
return amat[v[1][smallest]]
def pad_HapsPos(self,haplotypes,positions,maxSNPs=None,frameWidth=0,center=False):
'''
pads the haplotype and positions tensors
to be uniform with the largest tensor
'''
haps = haplotypes
pos = positions
#Normalize the shape of all haplotype vectors with padding
for i in range(len(haps)):
numSNPs = haps[i].shape[0]
paddingLen = maxSNPs - numSNPs
if(center):
prior = paddingLen // 2
post = paddingLen - prior
haps[i] = np.pad(haps[i],((prior,post),(0,0)),"constant",constant_values=2.0)
pos[i] = np.pad(pos[i],(prior,post),"constant",constant_values=-1.0)
else:
if(paddingLen < 0):
haps[i] = np.pad(haps[i],((0,0),(0,0)),"constant",constant_values=2.0)[:paddingLen]
pos[i] = np.pad(pos[i],(0,0),"constant",constant_values=-1.0)[:paddingLen]
else:
haps[i] = np.pad(haps[i],((0,paddingLen),(0,0)),"constant",constant_values=2.0)
pos[i] = np.pad(pos[i],(0,paddingLen),"constant",constant_values=-1.0)
haps = np.array(haps,dtype='float32')
pos = np.array(pos,dtype='float32')
if(frameWidth):
fw = frameWidth
haps = np.pad(haps,((0,0),(fw,fw),(fw,fw)),"constant",constant_values=2.0)
pos = np.pad(pos,((0,0),(fw,fw)),"constant",constant_values=-1.0)
return haps,pos
def padAlleleFqs(self,haplotypes,positions,maxSNPs=None,frameWidth=0,center=False):
'''
convert haps to allele frequencies, normalize, and
pad the haplotype and positions tensors
to be uniform with the largest tensor
'''
haps = haplotypes
positions = positions
fqs, pos = [], []
# Resample to sequencing depth and convert to allele frequencies
for i in range(len(haps)):
tmp_freqs = []
tmp_pos = []
fqs_list = haps[i].tolist()
for j in range(len(fqs_list)):
if self.seqD != -9:
## Resample
z = resample(fqs_list[j], n_samples=self.seqD, replace=True)
raw_freq = round(np.count_nonzero(z)/float(len(z)),3)
if self.maf <= raw_freq < 1.0:
tmp_freqs.append(raw_freq)
tmp_pos.append(positions[i][j])
else:
## Don't resample
raw_freq = round(np.count_nonzero(fqs_list[j])/float(len(fqs_list[j])),3)
tmp_freqs.append(raw_freq)
tmp_pos.append(positions[i][j])
fqs.append(np.array(tmp_freqs))
pos.append(np.array(tmp_pos))
# Normalize
fqs = self.normalizeAlleleFqs(fqs)
# Pad
for i in range(len(fqs)):
numSNPs = fqs[i].shape[0]
paddingLen = maxSNPs - numSNPs
if(center):
prior = paddingLen // 2
post = paddingLen - prior
fqs[i] = np.pad(fqs[i],(prior,post),"constant",constant_values=-1.0)
pos[i] = np.pad(pos[i],(prior,post),"constant",constant_values=-1.0)
else:
if(paddingLen < 0):
fqs[i] = np.pad(fqs[i],(0,0),"constant",constant_values=-1.0)[:paddingLen]
pos[i] = np.pad(pos[i],(0,0),"constant",constant_values=-1.0)[:paddingLen]
else:
fqs[i] = np.pad(fqs[i],(0,paddingLen),"constant",constant_values=-1.0)
pos[i] = np.pad(pos[i],(0,paddingLen),"constant",constant_values=-1.0)
fqs = np.array(fqs,dtype='float32')
pos = np.array(pos,dtype='float32')
if(frameWidth):
fw = frameWidth
fqs = np.pad(fqs,((0,0),(fw,fw)),"constant",constant_values=-1.0)
pos = np.pad(pos,((0,0),(fw,fw)),"constant",constant_values=-1.0)
return fqs,pos
def normalizeTargets(self):
'''
We want to normalize all targets.
'''
norm = self.targetNormalization
nTargets = copy.deepcopy(self.infoDir['rho'])
if(norm == 'zscore'):
tar_mean = np.mean(nTargets,axis=0)
tar_sd = np.std(nTargets,axis=0)
nTargets -= tar_mean
nTargets = np.divide(nTargets,tar_sd,out=np.zeros_like(nTargets),where=tar_sd!=0)
elif(norm == 'divstd'):
tar_sd = np.std(nTargets,axis=0)
nTargets = np.divide(nTargets,tar_sd,out=np.zeros_like(nTargets),where=tar_sd!=0)
return nTargets
def normalizeTargetsBinaryClass(self):
'''
We want to normalize all targets.
'''
norm = self.targetNormalization
nTargets = copy.deepcopy(self.infoDir['hotWin'])
nTargets[nTargets<5] = 0
nTargets[nTargets>=5] = 1
return nTargets.astype(np.uint8)
def normalizeAlleleFqs(self, fqs):
'''
normalize the allele frequencies for the batch
'''
norm = self.targetNormalization
if(norm == 'zscore'):
allVals = np.concatenate([a.flatten() for a in fqs])
fqs_mean = np.mean(allVals)
fqs_sd = np.std(allVals)
for i in range(len(fqs)):
fqs[i] = np.subtract(fqs[i],fqs_mean)
fqs[i] = np.divide(fqs[i],fqs_sd,out=np.zeros_like(fqs[i]),where=fqs_sd!=0)
elif(norm == 'divstd'):
allVals = np.concatenate([a.flatten() for a in fqs])
fqs_sd = np.std(allVals)
for i in range(len(fqs)):
fqs[i] = np.divide(fqs[i],fqs_sd,out=np.zeros_like(fqs[i]),where=fqs_sd!=0)
return fqs
def on_epoch_end(self):
if(self.shuffleExamples):
np.random.shuffle(self.indices)
def __len__(self):
return int(np.floor(self.infoDir["numReps"]/self.batch_size))
def __getitem__(self, idx):
indices = self.indices[idx*self.batch_size:(idx+1)*self.batch_size]
X, y = self.__data_generation(indices)
return X,y
def shuffleIndividuals(self,x):
t = np.arange(x.shape[1])
np.random.shuffle(t)
return x[:,t]
def __data_generation(self, batchTreeIndices):
haps = []
pos = []
for treeIndex in batchTreeIndices:
Hfilepath = os.path.join(self.treesDirectory,str(treeIndex) + "_haps.npy")
Pfilepath = os.path.join(self.treesDirectory,str(treeIndex) + "_pos.npy")
H = np.load(Hfilepath)
P = np.load(Pfilepath)
haps.append(H)
pos.append(P)
respectiveNormalizedTargets = [[t] for t in self.normalizedTargets[batchTreeIndices]]
targets = np.array(respectiveNormalizedTargets)
if(self.realLinePos):
for p in range(len(pos)):
pos[p] = pos[p] / self.infoDir["ChromosomeLength"]
if(self.sortInds):
for i in range(len(haps)):
haps[i] = np.transpose(self.sort_min_diff(np.transpose(haps[i])))
if(self.shuffleInds):
for i in range(len(haps)):
haps[i] = self.shuffleIndividuals(haps[i])
if self.seqD:
# simulate pool-sequencing
if(self.maxLen != None):
# convert the haps to allele frequecies and then pad
haps,pos = self.padAlleleFqs(haps,pos,
maxSNPs=self.maxLen,
frameWidth=self.frameWidth,
center=self.center)
haps=np.where(haps == -1.0, self.posPadVal,haps)
pos=np.where(pos == -1.0, self.posPadVal,pos)
z = np.stack((haps,pos), axis=-1)
return z, targets
else:
if(self.maxLen != None):
# pad
haps,pos = self.pad_HapsPos(haps,pos,
maxSNPs=self.maxLen,
frameWidth=self.frameWidth,
center=self.center)
pos=np.where(pos == -1.0, self.posPadVal,pos)
haps=np.where(haps < 1.0, self.ancVal, haps)
haps=np.where(haps > 1.0, self.padVal, haps)
haps=np.where(haps == 1.0, self.derVal, haps)
return [haps,pos], targets
class VCFBatchGenerator(tf.keras.utils.Sequence):
"""Basically same as SequenceBatchGenerator Class except for VCF files"""
def __init__(self,
INFO,
CHROM,
winLen,
numWins,
IDs,
GT,
POS,
batchSize=64,
maxLen=None,
frameWidth=0,
center=False,
sortInds=False,
ancVal = -1,
padVal = -1,
derVal = 1,
realLinePos = True,
posPadVal = 0,
phase=None,
seed=None
):
self.INFO=INFO
self.CHROM=CHROM
self.winLen=winLen
self.numWins=numWins
self.indices=np.arange(self.numWins)
self.IDs=IDs
self.GT=GT
self.POS=POS
self.batch_size = batchSize
self.maxLen = maxLen
self.frameWidth = frameWidth
self.center = center
self.sortInds=sortInds
self.ancVal = ancVal
self.padVal = padVal
self.derVal = derVal
self.realLinePos = realLinePos
self.posPadVal = posPadVal
self.phase=phase
self.seed=seed
if self.seed:
os.environ['PYTHONHASHSEED']=str(self.seed)
random.seed(self.seed)
np.random.seed(self.seed)
def pad_HapsPosVCF(self,haplotypes,positions,maxSNPs=None,frameWidth=0,center=False):
'''
pads the haplotype and positions tensors
to be uniform with the largest tensor
'''
haps = haplotypes
pos = positions
nSNPs=[]
#Normalize the shape of all haplotype vectors with padding
for i in range(len(haps)):
numSNPs = haps[i].shape[0]
nSNPs.append(numSNPs)
paddingLen = maxSNPs - numSNPs
if(center):
prior = paddingLen // 2
post = paddingLen - prior
haps[i] = np.pad(haps[i],((prior,post),(0,0)),"constant",constant_values=2.0)
pos[i] = np.pad(pos[i],(prior,post),"constant",constant_values=-1.0)
else:
haps[i] = np.pad(haps[i],((0,paddingLen),(0,0)),"constant",constant_values=2.0)
pos[i] = np.pad(pos[i],(0,paddingLen),"constant",constant_values=-1.0)
haps = np.array(haps,dtype='float32')
pos = np.array(pos,dtype='float32')
if(frameWidth):
fw = frameWidth
haps = np.pad(haps,((0,0),(fw,fw),(fw,fw)),"constant",constant_values=2.0)
pos = np.pad(pos,((0,0),(fw,fw)),"constant",constant_values=-1.0)
return haps,pos,nSNPs
def __getitem__(self, idx):
indices = self.indices[idx*self.batch_size:(idx+1)*self.batch_size]
X, nSNPs = self.__data_generation(indices)
return X, self.CHROM, self.winLen, self.INFO, nSNPs
def __data_generation(self, indices):
if self.seed:
os.environ['PYTHONHASHSEED']=str(self.seed)
random.seed(self.seed)
np.random.seed(self.seed)
#def __getitem__(self, idx):
genos=self.GT
GT=self.GT.to_haplotypes()
diploid_check=[]
for n in range(1,len(genos[0]),2):
GTB=GT[:,n:n+1]
if np.unique(GTB).shape[0] == 1 and np.unique(GTB)[0] == -1:
diploid_check.append(0)
else:
diploid_check.append(1)
break
if 1 in diploid_check:
GT=np.array(GT)
else:
GT=GT[:,::2] #Select only the first of the genotypes
GT = np.where(GT == -1, 2, GT) # Code missing data as 2, these will ultimately end up being transformed to the pad value
if not self.phase:
np.random.shuffle(np.transpose(GT))
haps,pos=[],[]
for i in indices:
haps.append(GT[self.IDs[i][0]:self.IDs[i][1]])
pos.append(self.POS[self.IDs[i][0]:self.IDs[i][1]])
if(self.realLinePos):
for i in range(len(pos)):
pos[i] = (pos[i]-(self.winLen*indices[i])) / self.winLen
if(self.sortInds):
for i in range(len(haps)):
haps[i] = np.transpose(sort_min_diff(np.transpose(haps[i])))
if(self.maxLen != None):
haps,pos,nSNPs = self.pad_HapsPosVCF(haps,pos,
maxSNPs=self.maxLen,
frameWidth=self.frameWidth,
center=self.center)
pos=np.where(pos == -1.0, self.posPadVal,pos)
haps=np.where(haps < 1.0, self.ancVal, haps)
haps=np.where(haps > 1.0, self.padVal, haps)
haps=np.where(haps == 1.0, self.derVal, haps)
return [haps,pos], nSNPs
class POOLBatchGenerator(tf.keras.utils.Sequence):
"""Basically same as SequenceBatchGenerator Class except for POOL files"""
def __init__(self,
INFO,
CHROM,
winLen,
numWins,
IDs,
GT,
POS,
batchSize=64,
maxLen=None,
frameWidth=0,
center=False,
sortInds=False,
ancVal = -1,
padVal = -1,
derVal = 1,
realLinePos = True,
posPadVal = 0,
normType = 'zscore',
seed = None
):
self.INFO=INFO
self.normType = normType
self.CHROM=CHROM
self.winLen=winLen
self.numWins=numWins
self.indices=np.arange(self.numWins)
self.IDs=IDs
self.GT=GT
self.POS=POS
self.batch_size = batchSize
self.maxLen = maxLen
self.frameWidth = frameWidth
self.center = center
self.sortInds=sortInds
self.ancVal = ancVal
self.padVal = padVal
self.derVal = derVal
self.realLinePos = realLinePos
self.posPadVal = posPadVal
self.seed = seed
if self.seed:
os.environ['PYTHONHASHSEED']=str(self.seed)
random.seed(self.seed)
np.random.seed(self.seed)
def padFqs(self,haplotypes,positions,maxSNPs=None,frameWidth=0,center=False):
'''
normalize, and pad the haplotype and positions tensors
to be uniform with the largest tensor
'''
fqs = haplotypes
pos = positions
# Normalize
fqs = self.normalizeAlleleFqs(fqs)
nSNPs=[]
# Pad
for i in range(len(fqs)):
numSNPs = fqs[i].shape[0]
nSNPs.append(numSNPs)
paddingLen = maxSNPs - numSNPs
if(center):
prior = paddingLen // 2
post = paddingLen - prior
fqs[i] = np.pad(fqs[i],(prior,post),"constant",constant_values=-1.0)
pos[i] = np.pad(pos[i],(prior,post),"constant",constant_values=-1.0)
else:
if(paddingLen < 0):
fqs[i] = np.pad(fqs[i],(0,0),"constant",constant_values=-1.0)[:paddingLen]
pos[i] = np.pad(pos[i],(0,0),"constant",constant_values=-1.0)[:paddingLen]
else:
fqs[i] = np.pad(fqs[i],(0,paddingLen),"constant",constant_values=-1.0)
pos[i] = np.pad(pos[i],(0,paddingLen),"constant",constant_values=-1.0)
fqs = np.array(fqs,dtype='float32')
pos = np.array(pos,dtype='float32')
if(frameWidth):
fw = frameWidth
fqs = np.pad(fqs,((0,0),(fw,fw)),"constant",constant_values=-1.0)
pos = np.pad(pos,((0,0),(fw,fw)),"constant",constant_values=-1.0)
return fqs,pos,nSNPs
def normalizeAlleleFqs(self, fqs):
'''
normalize the allele frequencies for the batch
'''
norm = self.normType
if(norm == 'zscore'):
allVals = np.concatenate([a.flatten() for a in fqs])
fqs_mean = np.mean(allVals)
fqs_sd = np.std(allVals)
for i in range(len(fqs)):
fqs[i] = np.subtract(fqs[i],fqs_mean)
fqs[i] = np.divide(fqs[i],fqs_sd,out=np.zeros_like(fqs[i]),where=fqs_sd!=0)
elif(norm == 'divstd'):
allVals = np.concatenate([a.flatten() for a in fqs])
fqs_sd = np.std(allVals)
for i in range(len(fqs)):
fqs[i] = np.divide(fqs[i],fqs_sd,out=np.zeros_like(fqs[i]),where=fqs_sd!=0)
return fqs
def __getitem__(self, idx):
indices = self.indices[idx*self.batch_size:(idx+1)*self.batch_size]
X, nSNPs = self.__data_generation(indices)
return X, self.CHROM, self.winLen, self.INFO, nSNPs
def __data_generation(self, indices):
if self.seed:
os.environ['PYTHONHASHSEED']=str(self.seed)
random.seed(self.seed)
np.random.seed(self.seed)
GT=self.GT
haps,pos=[],[]
for i in indices:
haps.append(GT[self.IDs[i][0]:self.IDs[i][1]])
pos.append(self.POS[self.IDs[i][0]:self.IDs[i][1]])
if(self.realLinePos):
for i in range(len(pos)):
pos[i] = (pos[i]-(self.winLen*indices[i])) / self.winLen
if(self.sortInds):
for i in range(len(haps)):
haps[i] = np.transpose(sort_min_diff(np.transpose(haps[i])))
# pad the allele freqs and positions
if(self.maxLen != None):
haps,pos,nSNPs = self.padFqs(haps,pos,
maxSNPs=self.maxLen,
frameWidth=self.frameWidth,
center=self.center)
haps=np.where(haps == -1.0, self.posPadVal,haps)
pos=np.where(pos == -1.0, self.posPadVal,pos)
np.set_printoptions(threshold=sys.maxsize)
z = np.stack((haps,pos), axis=-1)
return z, nSNPs
|
StarcoderdataPython
|
5051840
|
# -*- coding: utf-8 -*-
"""Tests for Coastal Blue Carbon Functions."""
import unittest
import os
import shutil
import csv
import logging
import tempfile
import functools
import copy
import pprint
import numpy
from osgeo import gdal
import pygeoprocessing.testing as pygeotest
from natcap.invest import utils
REGRESSION_DATA = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data',
'coastal_blue_carbon')
LOGGER = logging.getLogger(__name__)
lulc_lookup_list = \
[['lulc-class', 'code', 'is_coastal_blue_carbon_habitat'],
['n', '0', 'False'],
['x', '1', 'True'],
['y', '2', 'True'],
['z', '3', 'True']]
lulc_lookup_list_unreadable = \
[['lulc-class', 'code', 'is_coastal_blue_carbon_habitat'],
['n', '0', ''],
['x', '1', 'True'],
['y', '2', 'True'],
['z', '3', 'True']]
lulc_lookup_list_no_ones = \
[['lulc-class', 'code', 'is_coastal_blue_carbon_habitat'],
['n', '0', 'False'],
['y', '2', 'True'],
['z', '3', 'True']]
lulc_transition_matrix_list = \
[['lulc-class', 'n', 'x', 'y', 'z'],
['n', 'NCC', 'accum', 'accum', 'accum'],
['x', 'med-impact-disturb', 'accum', 'accum', 'accum'],
['y', 'med-impact-disturb', 'accum', 'accum', 'accum'],
['z', 'med-impact-disturb', 'accum', 'accum', 'accum']]
carbon_pool_initial_list = \
[['code', 'lulc-class', 'biomass', 'soil', 'litter'],
['0', 'n', '0', '0', '0'],
['1', 'x', '5', '5', '0.5'],
['2', 'y', '10', '10', '0.5'],
['3', 'z', '20', '20', '0.5']]
carbon_pool_transient_list = \
[['code', 'lulc-class', 'biomass-half-life', 'biomass-med-impact-disturb',
'biomass-yearly-accumulation',
'soil-half-life',
'soil-med-impact-disturb',
'soil-yearly-accumulation'],
['0', 'n', '0', '0', '0', '0', '0', '0'],
['1', 'x', '1', '0.5', '1', '1', '0.5', '1.1'],
['2', 'y', '1', '0.5', '2', '1', '0.5', '2.1'],
['3', 'z', '1', '0.5', '1', '1', '0.5', '1.1']]
price_table_list = \
[['year', 'price'],
[2000, 20]]
NODATA_INT = -9999
def _read_array(raster_path):
""""Read raster as array."""
ds = gdal.Open(raster_path)
band = ds.GetRasterBand(1)
a = band.ReadAsArray()
ds = None
return a
def _create_table(uri, rows_list):
"""Create csv file from list of lists."""
with open(uri, 'wb') as f:
writer = csv.writer(f)
writer.writerows(rows_list)
return uri
def _create_workspace():
"""Create workspace directory."""
return tempfile.mkdtemp()
def _get_args(workspace, num_transitions=2, valuation=True):
"""Create and return arguments for CBC main model.
Parameters:
workspace(string): A path to a folder on disk. Generated inputs will
be saved to this directory.
num_transitions=2 (int): The number of transitions to synthesize.
valuation=True (bool): Whether to include parameters related to
valuation in the args dict.
Returns:
args (dict): main model arguments.
"""
band_matrices = [numpy.ones((2, 2))]
band_matrices_two = [numpy.ones((2, 2)) * 2]
band_matrices_with_nodata = [numpy.ones((2, 2))]
band_matrices_with_nodata[0][0][0] = NODATA_INT
srs = pygeotest.sampledata.SRS_WILLAMETTE
lulc_lookup_uri = _create_table(
os.path.join(workspace, 'lulc_lookup.csv'), lulc_lookup_list)
lulc_transition_matrix_uri = _create_table(
os.path.join(workspace, 'lulc_transition_matrix.csv'),
lulc_transition_matrix_list)
carbon_pool_initial_uri = _create_table(
os.path.join(workspace, 'carbon_pool_initial.csv'),
carbon_pool_initial_list)
carbon_pool_transient_uri = _create_table(
os.path.join(workspace, 'carbon_pool_transient.csv'),
carbon_pool_transient_list)
raster_0_uri = pygeotest.create_raster_on_disk(
band_matrices,
srs.origin,
srs.projection,
NODATA_INT,
srs.pixel_size(100),
datatype=gdal.GDT_Int32,
filename=os.path.join(workspace, 'raster_0.tif'))
raster_1_uri = pygeotest.create_raster_on_disk(
band_matrices_with_nodata,
srs.origin,
srs.projection,
NODATA_INT,
srs.pixel_size(100),
datatype=gdal.GDT_Int32,
filename=os.path.join(workspace, 'raster_1.tif'))
raster_2_uri = pygeotest.create_raster_on_disk(
band_matrices_two,
srs.origin,
srs.projection,
NODATA_INT,
srs.pixel_size(100),
datatype=gdal.GDT_Int32,
filename=os.path.join(workspace, 'raster_2.tif'))
possible_transitions = [raster_1_uri, raster_2_uri]
possible_transition_years = [2000, 2005]
args = {
'workspace_dir': os.path.join(workspace, 'workspace'),
'results_suffix': 'test',
'lulc_lookup_uri': lulc_lookup_uri,
'lulc_transition_matrix_uri': lulc_transition_matrix_uri,
'lulc_baseline_map_uri': raster_0_uri,
'lulc_baseline_year': 1995,
'lulc_transition_maps_list': possible_transitions[:num_transitions+1],
'lulc_transition_years_list': possible_transition_years[:num_transitions+1],
'analysis_year': 2010,
'carbon_pool_initial_uri': carbon_pool_initial_uri,
'carbon_pool_transient_uri': carbon_pool_transient_uri,
'do_economic_analysis': False,
}
utils.make_directories([args['workspace_dir']])
if valuation:
args.update({
'do_economic_analysis': True,
'do_price_table': False,
'price': 2.,
'inflation_rate': 5.,
'price_table_uri': None,
'discount_rate': 2.
})
return args
def _get_preprocessor_args(args_choice, workspace):
"""Create and return arguments for preprocessor model.
Args:
args_choice (int): which arguments to return
workspace (string): The path to a workspace directory.
Returns:
args (dict): preprocessor arguments
"""
band_matrices_zeros = [numpy.zeros((2, 2))]
band_matrices_ones = [numpy.ones((2, 3))] # tests alignment
band_matrices_nodata = [numpy.ones((2, 2)) * NODATA_INT]
srs = pygeotest.sampledata.SRS_WILLAMETTE
lulc_lookup_uri = _create_table(
os.path.join(workspace, 'lulc_lookup.csv'), lulc_lookup_list)
raster_0_uri = pygeotest.create_raster_on_disk(
band_matrices_ones, srs.origin, srs.projection, NODATA_INT,
srs.pixel_size(100), datatype=gdal.GDT_Int32,
filename=os.path.join(workspace, 'raster_0.tif'))
raster_1_uri = pygeotest.create_raster_on_disk(
band_matrices_ones, srs.origin, srs.projection, NODATA_INT,
srs.pixel_size(100), datatype=gdal.GDT_Int32,
filename=os.path.join(workspace, 'raster_1.tif'))
raster_2_uri = pygeotest.create_raster_on_disk(
band_matrices_ones, srs.origin, srs.projection, NODATA_INT,
srs.pixel_size(100), datatype=gdal.GDT_Int32,
filename=os.path.join(workspace, 'raster_2.tif'))
raster_3_uri = pygeotest.create_raster_on_disk(
band_matrices_zeros, srs.origin, srs.projection, NODATA_INT,
srs.pixel_size(100), datatype=gdal.GDT_Int32,
filename=os.path.join(workspace, 'raster_3.tif'))
raster_4_uri = pygeotest.create_raster_on_disk(
band_matrices_zeros, srs.origin, srs.projection, -1,
srs.pixel_size(100), datatype=gdal.GDT_Int32,
filename=os.path.join(workspace, 'raster_4.tif'))
raster_nodata_uri = pygeotest.create_raster_on_disk(
band_matrices_nodata, srs.origin, srs.projection, NODATA_INT,
srs.pixel_size(100), datatype=gdal.GDT_Int32,
filename=os.path.join(workspace, 'raster_4.tif'))
args = {
'workspace_dir': os.path.join(workspace, 'workspace'),
'results_suffix': 'test',
'lulc_lookup_uri': lulc_lookup_uri,
'lulc_snapshot_list': [raster_0_uri, raster_1_uri, raster_2_uri]
}
args2 = {
'workspace_dir': os.path.join(workspace, 'workspace'),
'results_suffix': 'test',
'lulc_lookup_uri': lulc_lookup_uri,
'lulc_snapshot_list': [raster_0_uri, raster_1_uri, raster_3_uri]
}
args3 = {
'workspace_dir': os.path.join(workspace, 'workspace'),
'results_suffix': 'test',
'lulc_lookup_uri': lulc_lookup_uri,
'lulc_snapshot_list': [raster_0_uri, raster_nodata_uri, raster_3_uri]
}
args4 = {
'workspace_dir': os.path.join(workspace, 'workspace'),
'results_suffix': 'test',
'lulc_lookup_uri': lulc_lookup_uri,
'lulc_snapshot_list': [raster_0_uri, raster_nodata_uri, raster_4_uri]
}
if args_choice == 1:
return args
elif args_choice == 2:
return args2
elif args_choice == 3:
return args3
else:
return args4
class TestPreprocessor(unittest.TestCase):
"""Test Coastal Blue Carbon preprocessor library functions."""
def setUp(self):
"""Create a temp directory for the workspace."""
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
"""Remove workspace."""
shutil.rmtree(self.workspace_dir)
def test_create_carbon_pool_transient_table_template(self):
"""Coastal Blue Carbon: Test creation of transient table template."""
from natcap.invest.coastal_blue_carbon import preprocessor
args = _get_preprocessor_args(1, self.workspace_dir)
filepath = os.path.join(self.workspace_dir,
'transient_temp.csv')
code_to_lulc_dict = {1: 'one', 2: 'two', 3: 'three'}
preprocessor._create_carbon_pool_transient_table_template(
filepath, code_to_lulc_dict)
transient_dict = utils.build_lookup_from_csv(filepath, 'code')
# demonstrate that output table contains all input land cover classes
for i in [1, 2, 3]:
self.assertTrue(i in transient_dict.keys())
def test_preprocessor_ones(self):
"""Coastal Blue Carbon: Test entire run of preprocessor (ones).
All rasters contain ones.
"""
from natcap.invest.coastal_blue_carbon import preprocessor
args = _get_preprocessor_args(1, self.workspace_dir)
preprocessor.execute(args)
trans_csv = os.path.join(
args['workspace_dir'],
'outputs_preprocessor',
'transitions_test.csv')
with open(trans_csv, 'r') as f:
lines = f.readlines()
# just a regression test. this tests that an output file was
# successfully created, and demonstrates that one land class transition
# does not occur and the other is set in the right direction.
self.assertTrue(lines[2].startswith('x,,accum'))
def test_preprocessor_zeros(self):
"""Coastal Blue Carbon: Test entire run of preprocessor (zeroes).
First two rasters contain ones, last contains zeros.
"""
from natcap.invest.coastal_blue_carbon import preprocessor
args2 = _get_preprocessor_args(2, self.workspace_dir)
preprocessor.execute(args2)
trans_csv = os.path.join(
args2['workspace_dir'],
'outputs_preprocessor',
'transitions_test.csv')
with open(trans_csv, 'r') as f:
lines = f.readlines()
# just a regression test. this tests that an output file was
# successfully created, and that two particular land class transitions
# occur and are set in the right directions.
self.assertTrue(lines[2][:].startswith('x,disturb,accum'))
def test_preprocessor_nodata(self):
"""Coastal Blue Carbon: Test run of preprocessor (various values).
First raster contains ones, second nodata, third zeros.
"""
from natcap.invest.coastal_blue_carbon import preprocessor
args = _get_preprocessor_args(3, self.workspace_dir)
preprocessor.execute(args)
trans_csv = os.path.join(
args['workspace_dir'],
'outputs_preprocessor',
'transitions_test.csv')
with open(trans_csv, 'r') as f:
lines = f.readlines()
# just a regression test. this tests that an output file was
# successfully created, and that two particular land class transitions
# occur and are set in the right directions.
self.assertTrue(lines[2][:].startswith('x,,'))
def test_preprocessor_user_defined_nodata(self):
"""Coastal Blue Carbon: Test preprocessor with user-defined nodata.
First raster contains ones, second nodata, third zeros.
"""
from natcap.invest.coastal_blue_carbon import preprocessor
args = _get_preprocessor_args(4, self.workspace_dir)
preprocessor.execute(args)
trans_csv = os.path.join(
self.workspace_dir,
'workspace',
'outputs_preprocessor',
'transitions_test.csv')
with open(trans_csv, 'r') as f:
lines = f.readlines()
# just a regression test. this tests that an output file was
# successfully created, and that two particular land class transitions
# occur and are set in the right directions.
self.assertTrue(lines[2][:].startswith('x,,'))
def test_lookup_parsing_exception(self):
"""Coastal Blue Carbon: Test lookup table parsing exception."""
from natcap.invest.coastal_blue_carbon import preprocessor
args = _get_preprocessor_args(1, self.workspace_dir)
_create_table(args['lulc_lookup_uri'], lulc_lookup_list_unreadable)
with self.assertRaises(ValueError):
preprocessor.execute(args)
def test_raster_validation(self):
"""Coastal Blue Carbon: Test raster validation."""
from natcap.invest.coastal_blue_carbon import preprocessor
args = _get_preprocessor_args(1, self.workspace_dir)
OTHER_NODATA = -1
srs = pygeotest.sampledata.SRS_WILLAMETTE
band_matrices_with_nodata = [numpy.ones((2, 2)) * OTHER_NODATA]
raster_wrong_nodata = pygeotest.create_raster_on_disk(
band_matrices_with_nodata,
srs.origin,
srs.projection,
OTHER_NODATA,
srs.pixel_size(100),
datatype=gdal.GDT_Int32,
filename=os.path.join(
self.workspace_dir, 'raster_wrong_nodata.tif'))
args['lulc_snapshot_list'][0] = raster_wrong_nodata
with self.assertRaises(ValueError):
preprocessor.execute(args)
def test_raster_values_not_in_lookup_table(self):
"""Coastal Blue Carbon: Test raster values not in lookup table."""
from natcap.invest.coastal_blue_carbon import preprocessor
args = _get_preprocessor_args(1, self.workspace_dir)
_create_table(args['lulc_lookup_uri'], lulc_lookup_list_no_ones)
with self.assertRaises(ValueError):
preprocessor.execute(args)
def test_mark_transition_type(self):
"""Coastal Blue Carbon: Test mark_transition_type."""
from natcap.invest.coastal_blue_carbon import preprocessor
args = _get_preprocessor_args(1, self.workspace_dir)
band_matrices_zero = [numpy.zeros((2, 2))]
srs = pygeotest.sampledata.SRS_WILLAMETTE
raster_zeros = pygeotest.create_raster_on_disk(
band_matrices_zero,
srs.origin,
srs.projection,
NODATA_INT,
srs.pixel_size(100),
datatype=gdal.GDT_Int32,
filename=os.path.join(
self.workspace_dir, 'raster_1.tif'))
args['lulc_snapshot_list'][0] = raster_zeros
preprocessor.execute(args)
trans_csv = os.path.join(
self.workspace_dir,
'workspace',
'outputs_preprocessor',
'transitions_test.csv')
with open(trans_csv, 'r') as f:
lines = f.readlines()
self.assertTrue(lines[1][:].startswith('n,NCC,accum'))
def test_mark_transition_type_nodata_check(self):
"""Coastal Blue Carbon: Test mark_transition_type with nodata check."""
from natcap.invest.coastal_blue_carbon import preprocessor
args = _get_preprocessor_args(1, self.workspace_dir)
band_matrices_zero = [numpy.zeros((2, 2))]
srs = pygeotest.sampledata.SRS_WILLAMETTE
raster_zeros = pygeotest.create_raster_on_disk(
band_matrices_zero,
srs.origin,
srs.projection,
NODATA_INT,
srs.pixel_size(100),
datatype=gdal.GDT_Int32,
filename=os.path.join(
self.workspace_dir, 'raster_1.tif'))
args['lulc_snapshot_list'][0] = raster_zeros
preprocessor.execute(args)
def test_binary(self):
"""Coastal Blue Carbon: Test preprocessor run against InVEST-Data."""
from natcap.invest.coastal_blue_carbon import preprocessor
raster_0_uri = os.path.join(
REGRESSION_DATA, 'inputs/GBJC_2010_mean_Resample.tif')
raster_1_uri = os.path.join(
REGRESSION_DATA, 'inputs/GBJC_2030_mean_Resample.tif')
raster_2_uri = os.path.join(
REGRESSION_DATA, 'inputs/GBJC_2050_mean_Resample.tif')
args = {
'workspace_dir': _create_workspace(),
'results_suffix': '150225',
'lulc_lookup_uri': os.path.join(
REGRESSION_DATA, 'inputs', 'lulc_lookup.csv'),
'lulc_snapshot_list': [raster_0_uri, raster_1_uri, raster_2_uri]
}
preprocessor.execute(args)
# walk through all files in the workspace and assert that outputs have
# the file suffix.
non_suffixed_files = []
for root_dir, dirnames, filenames in os.walk(args['workspace_dir']):
for filename in filenames:
if not filename.lower().endswith('.txt'): # ignore logfile
basename, extension = os.path.splitext(filename)
if not basename.endswith('_150225'):
path_rel_to_workspace = os.path.relpath(
os.path.join(root_dir, filename),
self.args['workspace_dir'])
non_suffixed_files.append(path_rel_to_workspace)
if non_suffixed_files:
self.fail('%s files are missing suffixes: %s' %
(len(non_suffixed_files),
pprint.pformat(non_suffixed_files)))
class TestIO(unittest.TestCase):
"""Test Coastal Blue Carbon io library functions."""
def setUp(self):
"""Create arguments."""
self.workspace_dir = tempfile.mkdtemp()
self.args = _get_args(self.workspace_dir)
def tearDown(self):
shutil.rmtree(self.workspace_dir)
def test_get_inputs(self):
"""Coastal Blue Carbon: Test get_inputs function in IO module."""
from natcap.invest.coastal_blue_carbon \
import coastal_blue_carbon as cbc
d = cbc.get_inputs(self.args)
# check several items in the data dictionary to check that the inputs
# are properly fetched.
self.assertEqual(d['lulc_to_Hb'][0], 0.0)
self.assertEqual(d['lulc_to_Hb'][1], 1.0)
self.assertEqual(len(d['price_t']), 16)
self.assertEqual(len(d['snapshot_years']), 4)
self.assertEqual(len(d['transition_years']), 2)
def test_get_price_table_exception(self):
"""Coastal Blue Carbon: Test price table exception."""
from natcap.invest.coastal_blue_carbon \
import coastal_blue_carbon as cbc
self.args['price_table_uri'] = os.path.join(
self.args['workspace_dir'], 'price.csv')
self.args['do_price_table'] = True
self.args['price_table_uri'] = _create_table(
self.args['price_table_uri'], price_table_list)
with self.assertRaises(KeyError):
cbc.get_inputs(self.args)
def test_chronological_order_exception(self):
"""Coastal Blue Carbon: Test exception checking chronological order."""
from natcap.invest.coastal_blue_carbon \
import coastal_blue_carbon as cbc
self.args['lulc_transition_years_list'] = [2005, 2000]
with self.assertRaises(ValueError):
cbc.get_inputs(self.args)
def test_chronological_order_exception_analysis_year(self):
"""Coastal Blue Carbon: Test exception checking analysis year order."""
from natcap.invest.coastal_blue_carbon \
import coastal_blue_carbon as cbc
self.args['analysis_year'] = 2000
with self.assertRaises(ValueError):
cbc.get_inputs(self.args)
def test_create_transient_dict(self):
"""Coastal Blue Carbon: Read transient table."""
from natcap.invest.coastal_blue_carbon \
import coastal_blue_carbon as cbc
biomass_transient_dict, soil_transient_dict = \
cbc._create_transient_dict(self.args['carbon_pool_transient_uri'])
# check that function can properly parse table of transient carbon pool
# values.
self.assertTrue(1 in biomass_transient_dict.keys())
self.assertTrue(1 in soil_transient_dict.keys())
def test_get_lulc_trans_to_D_dicts(self):
"""Coastal Blue Carbon: Read transient table (disturbed)."""
from natcap.invest.coastal_blue_carbon \
import coastal_blue_carbon as cbc
biomass_transient_dict, soil_transient_dict = \
cbc._create_transient_dict(self.args['carbon_pool_transient_uri'])
lulc_transition_uri = self.args['lulc_transition_matrix_uri']
lulc_lookup_uri = self.args['lulc_lookup_uri']
lulc_trans_to_Db, lulc_trans_to_Ds = cbc._get_lulc_trans_to_D_dicts(
lulc_transition_uri,
lulc_lookup_uri,
biomass_transient_dict,
soil_transient_dict)
# check that function can properly parse table of transient carbon pool
# values.
self.assertTrue((3.0, 0.0) in lulc_trans_to_Db.keys())
self.assertTrue((3.0, 0.0) in lulc_trans_to_Ds.keys())
class TestModel(unittest.TestCase):
"""Test Coastal Blue Carbon main model functions."""
def setUp(self):
"""Create arguments."""
self.workspace_dir = tempfile.mkdtemp()
self.args = _get_args(workspace=self.workspace_dir)
def tearDown(self):
"""Remove workspace."""
shutil.rmtree(self.workspace_dir)
def test_model_run(self):
"""Coastal Blue Carbon: Test run function in main model."""
from natcap.invest.coastal_blue_carbon \
import coastal_blue_carbon as cbc
self.args['suffix'] = 'xyz'
self.args['lulc_baseline_year'] = 2000
self.args['lulc_transition_years_list'] = [2005, 2010]
self.args['analysis_year'] = None
cbc.execute(self.args)
netseq_output_raster = os.path.join(
self.args['workspace_dir'],
'outputs_core/total_net_carbon_sequestration_test.tif')
npv_output_raster = os.path.join(
self.args['workspace_dir'],
'outputs_core/net_present_value_test.tif')
netseq_array = _read_array(netseq_output_raster)
npv_array = _read_array(npv_output_raster)
# (Explanation for why netseq is 31.)
# LULC Code: Baseline: 1 --> Year 2000: 1, Year 2005: 2, Year 2010: 2
# Initial Stock from Baseline: 5+5=10
# Sequest:
# 2000-->2005: (1+1.1)*5=10.5, 2005-->2010: (2+2.1)*5=20.5
# Total: 10.5 + 20.5 = 31.
netseq_test = numpy.array([[cbc.NODATA_FLOAT, 31.], [31., 31.]])
npv_test = numpy.array(
[[cbc.NODATA_FLOAT, 60.27801514], [60.27801514, 60.27801514]])
# just a simple regression test. this demonstrates that a NaN value
# will properly propagate across the model. the npv raster was chosen
# because the values are determined by multiple inputs, and any changes
# in those inputs would propagate to this raster.
numpy.testing.assert_array_almost_equal(
netseq_array, netseq_test, decimal=4)
numpy.testing.assert_array_almost_equal(
npv_array, npv_test, decimal=4)
def test_model_run_2(self):
"""Coastal Blue Carbon: Test CBC without analysis year."""
from natcap.invest.coastal_blue_carbon \
import coastal_blue_carbon as cbc
self.args['analysis_year'] = None
self.args['lulc_baseline_year'] = 2000
self.args['lulc_transition_maps_list'] = [self.args['lulc_transition_maps_list'][0]]
self.args['lulc_transition_years_list'] = [2005]
cbc.execute(self.args)
netseq_output_raster = os.path.join(
self.args['workspace_dir'],
'outputs_core/total_net_carbon_sequestration_test.tif')
netseq_array = _read_array(netseq_output_raster)
# (Explanation for why netseq is 10.5.)
# LULC Code: Baseline: 1 --> Year 2000: 1, Year 2005: 2
# Initial Stock from Baseline: 5+5=10
# Sequest:
# 2000-->2005: (1+1.1)*5=10.5
netseq_test = numpy.array([[cbc.NODATA_FLOAT, 10.5], [10.5, 10.5]])
# just a simple regression test. this demonstrates that a NaN value
# will properly propagate across the model. the npv raster was chosen
# because the values are determined by multiple inputs, and any changes
# in those inputs would propagate to this raster.
numpy.testing.assert_array_almost_equal(
netseq_array, netseq_test, decimal=4)
def test_model_no_valuation(self):
"""Coastal Blue Carbon: Test main model without valuation."""
from natcap.invest.coastal_blue_carbon \
import coastal_blue_carbon as cbc
self.args = _get_args(valuation=False, workspace=self.workspace_dir)
self.args['lulc_baseline_year'] = 2000
self.args['lulc_transition_years_list'] = [2005, 2010]
self.args['analysis_year'] = None
cbc.execute(self.args)
netseq_output_raster = os.path.join(
self.args['workspace_dir'],
'outputs_core/total_net_carbon_sequestration_test.tif')
netseq_array = _read_array(netseq_output_raster)
# (Explanation for why netseq is 31.)
# LULC Code: Baseline: 1 --> Year 2000: 1, Year 2005: 2, Year 2010: 2
# Initial Stock from Baseline: 5+5=10
# Sequest:
# 2000-->2005: (1+1.1)*5=10.5, 2005-->2010: (2+2.1)*5=20.5
# Total: 10.5 + 20.5 = 31.
netseq_test = numpy.array([[cbc.NODATA_FLOAT, 31.], [31., 31.]])
# just a simple regression test. this demonstrates that a NaN value
# will properly propagate across the model. the npv raster was chosen
# because the values are determined by multiple inputs, and any changes
# in those inputs would propagate to this raster.
numpy.testing.assert_array_almost_equal(
netseq_array, netseq_test, decimal=4)
def test_binary(self):
"""Coastal Blue Carbon: Test CBC model against InVEST-Data."""
from natcap.invest.coastal_blue_carbon \
import coastal_blue_carbon as cbc
args = {
'workspace_dir': self.args['workspace_dir'],
'carbon_pool_initial_uri': os.path.join(
REGRESSION_DATA,
'outputs_preprocessor/carbon_pool_initial_sample.csv'),
'carbon_pool_transient_uri': os.path.join(
REGRESSION_DATA,
'outputs_preprocessor/carbon_pool_transient_sample.csv'),
'discount_rate': 6.0,
'do_economic_analysis': True,
'do_price_table': True,
'inflation_rate': 3.0,
'lulc_lookup_uri': os.path.join(
REGRESSION_DATA, 'inputs', 'lulc_lookup.csv'),
'lulc_baseline_map_uri': os.path.join(
REGRESSION_DATA, 'inputs/GBJC_2010_mean_Resample.tif'),
'lulc_baseline_year': 2010,
'lulc_transition_maps_list': [
os.path.join(
REGRESSION_DATA, 'inputs/GBJC_2030_mean_Resample.tif'),
os.path.join(
REGRESSION_DATA, 'inputs/GBJC_2050_mean_Resample.tif')],
'lulc_transition_years_list': [2030, 2050],
'price_table_uri': os.path.join(
REGRESSION_DATA, 'inputs/Price_table_SCC3.csv'),
'lulc_transition_matrix_uri': os.path.join(
REGRESSION_DATA, 'outputs_preprocessor/transitions_sample.csv'),
'price': 10.0,
'results_suffix': '150225'
}
cbc.execute(args)
npv_raster = os.path.join(
os.path.join(
args['workspace_dir'],
'outputs_core/net_present_value_150225.tif'))
npv_array = _read_array(npv_raster)
# this is just a regression test, but it will capture all values
# in the net present value raster. the npv raster was chosen because
# the values are determined by multiple inputs, and any changes in
# those inputs would propagate to this raster.
u = numpy.unique(npv_array)
u.sort()
a = numpy.array([-76992.05, -40101.57, -34930., -34821.32,
0., 108.68, 6975.94, 7201.22, 7384.99],
dtype=numpy.float32)
a.sort()
numpy.testing.assert_array_almost_equal(u, a, decimal=2)
# walk through all files in the workspace and assert that outputs have
# the file suffix.
non_suffixed_files = []
for root_dir, dirnames, filenames in os.walk(self.args['workspace_dir']):
for filename in filenames:
if not filename.lower().endswith('.txt'): # ignore logfile
basename, extension = os.path.splitext(filename)
if not basename.endswith('_150225'):
path_rel_to_workspace = os.path.relpath(
os.path.join(root_dir, filename),
self.args['workspace_dir'])
non_suffixed_files.append(path_rel_to_workspace)
if non_suffixed_files:
self.fail('%s files are missing suffixes: %s' %
(len(non_suffixed_files),
pprint.pformat(non_suffixed_files)))
def test_1_transition_passes(self):
"""Coastal Blue Carbon: Test model runs with only 1 transition.
This is a regression test addressing issue #3572
(see: https://bitbucket.org/natcap/invest/issues/3572)
"""
from natcap.invest.coastal_blue_carbon \
import coastal_blue_carbon as cbc
self.args['lulc_transition_maps_list'] = \
[self.args['lulc_transition_maps_list'][0]]
self.args['lulc_transition_years_list'] = \
[self.args['lulc_transition_years_list'][0]]
self.args['analysis_year'] = None
try:
cbc.execute(self.args)
except AttributeError as error:
LOGGER.exception("Here's the traceback encountered: %s" % error)
self.fail('CBC should not crash when only 1 transition provided')
class CBCRefactorTest(unittest.TestCase):
def setUp(self):
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.workspace_dir)
@staticmethod
def create_args(workspace, transition_tuples=None, analysis_year=None):
"""Create a default args dict with the given transition matrices.
Arguments:
workspace (string): The path to the workspace directory on disk.
Files will be saved to this location.
transition_tuples (list or None): A list of tuples, where the first
element of the tuple is a numpy matrix of the transition values,
and the second element of the tuple is the year of the transition.
Provided years must be in chronological order.
If ``None``, the transition parameters will be ignored.
analysis_year (int or None): The year of the final analysis. If
provided, it must be greater than the last year within the
transition tuples (unless ``transition_tuples`` is None, in which
case ``analysis_year`` can be anything greater than 2000, the
baseline year).
Returns:
A dict of the model arguments.
"""
from pygeoprocessing.testing import sampledata
args = {
'workspace_dir': workspace,
'lulc_lookup_uri': os.path.join(workspace, 'lulc_lookup.csv'),
'lulc_transition_matrix_uri': os.path.join(workspace,
'transition_matrix.csv'),
'carbon_pool_initial_uri': os.path.join(workspace,
'carbon_pool_initial.csv'),
'carbon_pool_transient_uri': os.path.join(workspace,
'carbon_pool_transient.csv'),
'lulc_baseline_map_uri': os.path.join(workspace, 'lulc.tif'),
'lulc_baseline_year': 2000,
'do_economic_analysis': False,
}
_create_table(args['lulc_lookup_uri'], lulc_lookup_list)
_create_table(
args['lulc_transition_matrix_uri'],
lulc_transition_matrix_list)
_create_table(
args['carbon_pool_initial_uri'],
carbon_pool_initial_list)
_create_table(
args['carbon_pool_transient_uri'],
carbon_pool_transient_list)
# Only parameters needed are band_matrices and filename
make_raster = functools.partial(
sampledata.create_raster_on_disk,
origin=sampledata.SRS_WILLAMETTE.origin,
projection_wkt=sampledata.SRS_WILLAMETTE.projection,
nodata=-1, pixel_size=sampledata.SRS_WILLAMETTE.pixel_size(100))
known_matrix_size = None
if transition_tuples:
args['lulc_transition_maps_list'] = []
args['lulc_transition_years_list'] = []
for band_matrix, transition_year in transition_tuples:
known_matrix_size = band_matrix.shape
filename = os.path.join(workspace,
'transition_%s.tif' % transition_year)
make_raster(band_matrices=[band_matrix], filename=filename)
args['lulc_transition_maps_list'].append(filename)
args['lulc_transition_years_list'].append(transition_year)
# Make the lulc
lulc_shape = (10, 10) if not known_matrix_size else known_matrix_size
make_raster(band_matrices=[numpy.ones(lulc_shape)],
filename=args['lulc_baseline_map_uri'])
if analysis_year:
args['analysis_year'] = analysis_year
return args
def test_no_transitions(self):
"""Coastal Blue Carbon: Verify model can run without transitions."""
from natcap.invest.coastal_blue_carbon \
import coastal_blue_carbon as cbc
args = CBCRefactorTest.create_args(
workspace=self.workspace_dir, transition_tuples=None,
analysis_year=None)
cbc.execute(args)
def test_no_transitions_with_analysis_year(self):
"""Coastal Blue Carbon: Model can run w/o trans., w/analysis yr."""
from natcap.invest.coastal_blue_carbon \
import coastal_blue_carbon as cbc
args = CBCRefactorTest.create_args(
workspace=self.workspace_dir, transition_tuples=None,
analysis_year=2010)
cbc.execute(args)
def test_one_transition(self):
"""Coastal Blue Carbon: Verify model can run with 1 transition."""
from natcap.invest.coastal_blue_carbon \
import coastal_blue_carbon as cbc
transition_tuples = [
(numpy.ones((10, 10)), 2010),
]
args = CBCRefactorTest.create_args(
workspace=self.workspace_dir,
transition_tuples=transition_tuples,
analysis_year=None)
cbc.execute(args)
def test_transient_dict_extraction(self):
"""Coastal Blue Carbon: Verify extraction of transient dictionary."""
from natcap.invest.coastal_blue_carbon \
import coastal_blue_carbon as cbc
transient_file = _create_table(
os.path.join(self.workspace_dir, 'transient.csv'),
carbon_pool_transient_list[:3])
biomass_dict, soil_dict = cbc._create_transient_dict(transient_file)
expected_biomass_dict = {
0: {
'lulc-class': 'n',
'half-life': 0.0,
'med-impact-disturb': 0.0,
'yearly-accumulation': 0.0,
},
1: {
'lulc-class': 'x',
'half-life': 1,
'med-impact-disturb': 0.5,
'yearly-accumulation': 1,
}
}
expected_soil_dict = copy.deepcopy(expected_biomass_dict)
expected_soil_dict[1]['yearly-accumulation'] = 1.1
self.assertEqual(biomass_dict, expected_biomass_dict)
self.assertEqual(soil_dict, expected_soil_dict)
|
StarcoderdataPython
|
9663202
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import logging
import os
import six
from toscaparser.elements.interfaces import InterfacesDef
from toscaparser.functions import GetInput
from toscaparser.nodetemplate import NodeTemplate
from toscaparser.utils.gettextutils import _
SECTIONS = (TYPE, PROPERTIES, MEDADATA, DEPENDS_ON, UPDATE_POLICY,
DELETION_POLICY) = \
('type', 'properties', 'metadata',
'depends_on', 'update_policy', 'deletion_policy')
policy_type = ['tosca.policies.Placement',
'tosca.policies.Scaling',
'tosca.policies.Scaling.Cluster']
log = logging.getLogger('heat-translator')
class HotResource(object):
'''Base class for TOSCA node type translation to Heat resource type.'''
def __init__(self, nodetemplate, name=None, type=None, properties=None,
metadata=None, depends_on=None,
update_policy=None, deletion_policy=None, csar_dir=None):
log.debug(_('Translating TOSCA node type to HOT resource type.'))
self.nodetemplate = nodetemplate
if name:
self.name = name
else:
self.name = nodetemplate.name
self.type = type
self.properties = properties or {}
self.csar_dir = csar_dir
# special case for HOT softwareconfig
cwd = os.getcwd()
if type == 'OS::Heat::SoftwareConfig':
config = self.properties.get('config')
if isinstance(config, dict):
if self.csar_dir:
os.chdir(self.csar_dir)
implementation_artifact = os.path.abspath(config.get(
'get_file'))
else:
implementation_artifact = config.get('get_file')
if implementation_artifact:
filename, file_extension = os.path.splitext(
implementation_artifact)
file_extension = file_extension.lower()
# artifact_types should be read to find the exact script
# type, unfortunately artifact_types doesn't seem to be
# supported by the parser
if file_extension == '.ansible' \
or file_extension == '.yaml' \
or file_extension == '.yml':
self.properties['group'] = 'ansible'
if file_extension == '.pp':
self.properties['group'] = 'puppet'
if self.properties.get('group') is None:
self.properties['group'] = 'script'
os.chdir(cwd)
self.metadata = metadata
# The difference between depends_on and depends_on_nodes is
# that depends_on defines dependency in the context of the
# HOT template and it is used during the template output.
# Depends_on_nodes defines the direct dependency between the
# tosca nodes and is not used during the output of the
# HOT template but for internal processing only. When a tosca
# node depends on another node it will be always added to
# depends_on_nodes but not always to depends_on. For example
# if the source of dependency is a server, the dependency will
# be added as properties.get_resource and not depends_on
if depends_on:
self.depends_on = depends_on
self.depends_on_nodes = depends_on
else:
self.depends_on = []
self.depends_on_nodes = []
self.update_policy = update_policy
self.deletion_policy = deletion_policy
self.group_dependencies = {}
# if hide_resource is set to true, then this resource will not be
# generated in the output yaml.
self.hide_resource = False
def handle_properties(self):
# the property can hold a value or the intrinsic function get_input
# for value, copy it
# for get_input, convert to get_param
for prop in self.nodetemplate.get_properties_objects():
pass
def handle_life_cycle(self):
hot_resources = []
deploy_lookup = {}
# TODO(anyone): sequence for life cycle needs to cover different
# scenarios and cannot be fixed or hard coded here
operations_deploy_sequence = ['create', 'configure', 'start']
operations = HotResource.get_all_operations(self.nodetemplate)
# create HotResource for each operation used for deployment:
# create, start, configure
# ignore the other operations
# observe the order: create, start, configure
# use the current HotResource for the first operation in this order
# hold the original name since it will be changed during
# the transformation
node_name = self.name
reserve_current = 'NONE'
for operation in operations_deploy_sequence:
if operation in operations.keys():
reserve_current = operation
break
# create the set of SoftwareDeployment and SoftwareConfig for
# the interface operations
hosting_server = None
if self.nodetemplate.requirements is not None:
hosting_server = self._get_hosting_server()
sw_deployment_resouce = HOTSoftwareDeploymentResources(hosting_server)
server_key = sw_deployment_resouce.server_key
servers = sw_deployment_resouce.servers
sw_deploy_res = sw_deployment_resouce.software_deployment
# hosting_server is None if requirements is None
hosting_on_server = hosting_server if hosting_server else None
base_type = HotResource.get_base_type_str(
self.nodetemplate.type_definition)
# if we are on a compute node the host is self
if hosting_on_server is None and base_type == 'tosca.nodes.Compute':
hosting_on_server = self.name
servers = {'get_resource': self.name}
cwd = os.getcwd()
for operation in operations.values():
if operation.name in operations_deploy_sequence:
config_name = node_name + '_' + operation.name + '_config'
deploy_name = node_name + '_' + operation.name + '_deploy'
if self.csar_dir:
os.chdir(self.csar_dir)
get_file = os.path.abspath(operation.implementation)
else:
get_file = operation.implementation
hot_resources.append(
HotResource(self.nodetemplate,
config_name,
'OS::Heat::SoftwareConfig',
{'config':
{'get_file': get_file}},
csar_dir=self.csar_dir))
if operation.name == reserve_current and \
base_type != 'tosca.nodes.Compute':
deploy_resource = self
self.name = deploy_name
self.type = sw_deploy_res
self.properties = {'config': {'get_resource': config_name},
server_key: servers}
deploy_lookup[operation] = self
else:
sd_config = {'config': {'get_resource': config_name},
server_key: servers}
deploy_resource = \
HotResource(self.nodetemplate,
deploy_name,
sw_deploy_res,
sd_config, csar_dir=self.csar_dir)
hot_resources.append(deploy_resource)
deploy_lookup[operation] = deploy_resource
lifecycle_inputs = self._get_lifecycle_inputs(operation)
if lifecycle_inputs:
deploy_resource.properties['input_values'] = \
lifecycle_inputs
os.chdir(cwd)
# Add dependencies for the set of HOT resources in the sequence defined
# in operations_deploy_sequence
# TODO(anyone): find some better way to encode this implicit sequence
group = {}
op_index_min = None
op_index_max = -1
for op, hot in deploy_lookup.items():
# position to determine potential preceding nodes
op_index = operations_deploy_sequence.index(op.name)
if op_index_min is None or op_index < op_index_min:
op_index_min = op_index
if op_index > op_index_max:
op_index_max = op_index
for preceding_op_name in \
reversed(operations_deploy_sequence[:op_index]):
preceding_hot = deploy_lookup.get(
operations.get(preceding_op_name))
if preceding_hot:
hot.depends_on.append(preceding_hot)
hot.depends_on_nodes.append(preceding_hot)
group[preceding_hot] = hot
break
if op_index_max >= 0:
last_deploy = deploy_lookup.get(operations.get(
operations_deploy_sequence[op_index_max]))
else:
last_deploy = None
# save this dependency chain in the set of HOT resources
self.group_dependencies.update(group)
for hot in hot_resources:
hot.group_dependencies.update(group)
roles_deploy_resource = self._handle_ansiblegalaxy_roles(
hot_resources, node_name, servers)
# add a dependency to this ansible roles deploy to
# the first "classic" deploy generated for this node
if roles_deploy_resource and op_index_min:
first_deploy = deploy_lookup.get(operations.get(
operations_deploy_sequence[op_index_min]))
first_deploy.depends_on.append(roles_deploy_resource)
first_deploy.depends_on_nodes.append(roles_deploy_resource)
return hot_resources, deploy_lookup, last_deploy
def _handle_ansiblegalaxy_roles(self, hot_resources, initial_node_name,
hosting_on_server):
artifacts = self.get_all_artifacts(self.nodetemplate)
install_roles_script = ''
sw_deployment_resouce = \
HOTSoftwareDeploymentResources(hosting_on_server)
server_key = sw_deployment_resouce.server_key
sw_deploy_res = sw_deployment_resouce.software_deployment
for artifact_name, artifact in artifacts.items():
artifact_type = artifact.get('type', '').lower()
if artifact_type == 'tosca.artifacts.ansiblegalaxy.role':
role = artifact.get('file', None)
if role:
install_roles_script += 'ansible-galaxy install ' + role \
+ '\n'
if install_roles_script:
# remove trailing \n
install_roles_script = install_roles_script[:-1]
# add shebang and | to use literal scalar type (for multiline)
install_roles_script = '|\n#!/bin/bash\n' + install_roles_script
config_name = initial_node_name + '_install_roles_config'
deploy_name = initial_node_name + '_install_roles_deploy'
hot_resources.append(
HotResource(self.nodetemplate, config_name,
'OS::Heat::SoftwareConfig',
{'config': install_roles_script},
csar_dir=self.csar_dir))
sd_config = {'config': {'get_resource': config_name},
server_key: hosting_on_server}
deploy_resource = \
HotResource(self.nodetemplate, deploy_name,
sw_deploy_res,
sd_config, csar_dir=self.csar_dir)
hot_resources.append(deploy_resource)
return deploy_resource
def handle_connectsto(self, tosca_source, tosca_target, hot_source,
hot_target, config_location, operation):
# The ConnectsTo relationship causes a configuration operation in
# the target.
# This hot resource is the software config portion in the HOT template
# This method adds the matching software deployment with the proper
# target server and dependency
if config_location == 'target':
hosting_server = hot_target._get_hosting_server()
hot_depends = hot_target
elif config_location == 'source':
hosting_server = self._get_hosting_server()
hot_depends = hot_source
sw_deployment_resouce = HOTSoftwareDeploymentResources(hosting_server)
server_key = sw_deployment_resouce.server_key
servers = sw_deployment_resouce.servers
sw_deploy_res = sw_deployment_resouce.software_deployment
deploy_name = tosca_source.name + '_' + tosca_target.name + \
'_connect_deploy'
sd_config = {'config': {'get_resource': self.name},
server_key: servers}
deploy_resource = \
HotResource(self.nodetemplate,
deploy_name,
sw_deploy_res,
sd_config,
depends_on=[hot_depends], csar_dir=self.csar_dir)
connect_inputs = self._get_connect_inputs(config_location, operation)
if connect_inputs:
deploy_resource.properties['input_values'] = connect_inputs
return deploy_resource
def handle_expansion(self):
pass
def handle_hosting(self):
# handle hosting server for the OS:HEAT::SoftwareDeployment
# from the TOSCA nodetemplate, traverse the relationship chain
# down to the server
sw_deploy_group = \
HOTSoftwareDeploymentResources.HOT_SW_DEPLOYMENT_GROUP_RESOURCE
sw_deploy = HOTSoftwareDeploymentResources.HOT_SW_DEPLOYMENT_RESOURCE
if self.properties.get('servers') and \
self.properties.get('server'):
del self.properties['server']
if self.type == sw_deploy_group or self.type == sw_deploy:
# skip if already have hosting
# If type is NodeTemplate, look up corresponding HotResrouce
host_server = self.properties.get('servers') \
or self.properties.get('server')
if host_server is None:
raise Exception(_("Internal Error: expecting host "
"in software deployment"))
elif isinstance(host_server.get('get_resource'), NodeTemplate):
self.properties['server']['get_resource'] = \
host_server['get_resource'].name
elif isinstance(host_server, dict) and \
not host_server.get('get_resource'):
self.properties['servers'] = \
host_server
def top_of_chain(self):
dependent = self.group_dependencies.get(self)
if dependent is None:
return self
else:
return dependent.top_of_chain()
# this function allows to provides substacks as external files
# those files will be dumped along the output file.
#
# return a dict of filename-content
def extract_substack_templates(self, base_filename, hot_template_version):
return {}
# this function asks the resource to embed substacks
# into the main template, if any.
# this is used when the final output is stdout
def embed_substack_templates(self, hot_template_version):
pass
def get_dict_output(self):
resource_sections = OrderedDict()
resource_sections[TYPE] = self.type
if self.properties:
resource_sections[PROPERTIES] = self.properties
if self.metadata:
resource_sections[MEDADATA] = self.metadata
if self.depends_on:
resource_sections[DEPENDS_ON] = []
for depend in self.depends_on:
resource_sections[DEPENDS_ON].append(depend.name)
if self.update_policy:
resource_sections[UPDATE_POLICY] = self.update_policy
if self.deletion_policy:
resource_sections[DELETION_POLICY] = self.deletion_policy
return {self.name: resource_sections}
def _get_lifecycle_inputs(self, operation):
# check if this lifecycle operation has input values specified
# extract and convert to HOT format
if isinstance(operation.value, six.string_types):
# the operation has a static string
return {}
else:
# the operation is a dict {'implemenation': xxx, 'input': yyy}
inputs = operation.value.get('inputs')
deploy_inputs = {}
if inputs:
for name, value in inputs.items():
deploy_inputs[name] = value
return deploy_inputs
def _get_connect_inputs(self, config_location, operation):
if config_location == 'target':
inputs = operation.get('pre_configure_target').get('inputs')
elif config_location == 'source':
inputs = operation.get('pre_configure_source').get('inputs')
deploy_inputs = {}
if inputs:
for name, value in inputs.items():
deploy_inputs[name] = value
return deploy_inputs
def _get_hosting_server(self, node_template=None):
# find the server that hosts this software by checking the
# requirements and following the hosting chain
hosting_servers = []
host_exists = False
this_node_template = self.nodetemplate \
if node_template is None else node_template
for requirement in this_node_template.requirements:
for requirement_name, assignment in requirement.items():
for check_node in this_node_template.related_nodes:
# check if the capability is Container
if isinstance(assignment, dict):
node_name = assignment.get('node')
else:
node_name = assignment
if node_name and node_name == check_node.name:
if self._is_container_type(requirement_name,
check_node):
hosting_servers.append(check_node.name)
host_exists = True
elif check_node.related_nodes and not host_exists:
return self._get_hosting_server(check_node)
if hosting_servers:
return hosting_servers
return None
def _is_container_type(self, requirement_name, node):
# capability is a list of dict
# For now just check if it's type tosca.nodes.Compute
# TODO(anyone): match up requirement and capability
base_type = HotResource.get_base_type_str(node.type_definition)
if base_type == 'tosca.nodes.Compute':
return True
else:
return False
def get_hot_attribute(self, attribute, args):
# this is a place holder and should be implemented by the subclass
# if translation is needed for the particular attribute
raise Exception(_("No translation in TOSCA type {0} for attribute "
"{1}").format(self.nodetemplate.type, attribute))
def get_tosca_props(self):
tosca_props = {}
for prop in self.nodetemplate.get_properties_objects():
if isinstance(prop.value, GetInput):
tosca_props[prop.name] = {'get_param': prop.value.input_name}
else:
tosca_props[prop.name] = prop.value
return tosca_props
@staticmethod
def get_all_artifacts(nodetemplate):
# workaround bug in the parser
base_type = HotResource.get_base_type_str(nodetemplate.type_definition)
if base_type in policy_type:
artifacts = {}
else:
artifacts = nodetemplate.type_definition.get_value('artifacts',
parent=True)
if not artifacts:
artifacts = {}
tpl_artifacts = nodetemplate.entity_tpl.get('artifacts')
if tpl_artifacts:
artifacts.update(tpl_artifacts)
return artifacts
@staticmethod
def get_all_operations(node):
operations = {}
for operation in node.interfaces:
operations[operation.name] = operation
# workaround bug in the parser
base_type = HotResource.get_base_type_str(node.type_definition)
if base_type in policy_type:
return operations
node_type = node.type_definition
while True:
type_operations = HotResource._get_interface_operations_from_type(
node_type, node, 'Standard')
type_operations.update(operations)
operations = type_operations
if node_type.parent_type is not None:
node_type = node_type.parent_type
else:
return operations
@staticmethod
def _get_interface_operations_from_type(node_type, node, lifecycle_name):
operations = {}
base_type = HotResource.get_base_type_str(node_type)
if base_type in policy_type:
return operations
if node_type.interfaces and lifecycle_name in node_type.interfaces:
for name, elems in node_type.interfaces[lifecycle_name].items():
# ignore empty operations (only type)
# ignore global interface inputs,
# concrete inputs are on the operations themselves
if name != 'type' and name != 'inputs':
operations[name] = InterfacesDef(node_type,
lifecycle_name,
node, name, elems)
return operations
@staticmethod
def get_base_type_str(node_type):
if isinstance(node_type, six.string_types):
return node_type
if node_type.parent_type is not None:
parent_type_str = None
if isinstance(node_type.parent_type, six.string_types):
parent_type_str = node_type.parent_type
else:
parent_type_str = node_type.parent_type.type
if parent_type_str and parent_type_str.endswith('.Root'):
return node_type.type
else:
return HotResource.get_base_type_str(node_type.parent_type)
return node_type.type
class HOTSoftwareDeploymentResources(object):
"""Provides HOT Software Deployment resources
SoftwareDeployment or SoftwareDeploymentGroup Resource
"""
HOT_SW_DEPLOYMENT_RESOURCE = 'OS::Heat::SoftwareDeployment'
HOT_SW_DEPLOYMENT_GROUP_RESOURCE = 'OS::Heat::SoftwareDeploymentGroup'
def __init__(self, hosting_server=None):
self.software_deployment = self.HOT_SW_DEPLOYMENT_RESOURCE
self.software_deployment_group = self.HOT_SW_DEPLOYMENT_GROUP_RESOURCE
self.server_key = 'server'
self.hosting_server = hosting_server
self.servers = {}
if hosting_server is not None:
if len(self.hosting_server) == 1:
if isinstance(hosting_server, list):
self.servers['get_resource'] = self.hosting_server[0]
else:
for server in self.hosting_server:
self.servers[server] = {'get_resource': server}
self.software_deployment = self.software_deployment_group
self.server_key = 'servers'
|
StarcoderdataPython
|
1788243
|
<reponame>Dephilia/poaurk<filename>tests/api_test.py
# -*- coding: utf-8 -*-
import os
import json
import unittest
# compatible python3
import sys
from urllib.parse import parse_qsl
from poaurk import PlurkAPI, PlurkOAuth
class Test0ConsumerTokenSecret(unittest.TestCase):
def setUp(self):
pass
def teardown(self):
pass
def test_no_consumer_key(self):
with self.assertRaises(ValueError):
self.plurk = PlurkAPI()
self.plurk.callAPI('/APP/Profile/getPublicProfile',
{'user_id': 'dephillia'})
def test_invalid_consumer_key(self):
self.plurk = PlurkAPI("token", "secret")
r = self.plurk.callAPI('/APP/Profile/getPublicProfile',
{'user_id': 'dephillia'})
self.assertIsNone(r)
err = self.plurk.error()
self.assertEqual(err['code'], 400)
self.assertEqual(err['reason'], "BAD REQUEST")
self.assertEqual(err['content']['error_text'],
"40101:unknown application key")
class Test1AccessTokenSecret(unittest.TestCase):
def setUp(self):
pass
def teardown(self):
pass
def test_invalid_access_key(self):
self.plurk = PlurkAPI("key", "secret")
self.plurk.authorize("foor", "bar")
r = self.plurk.callAPI('/APP/Profile/getOwnProfile')
self.assertIsNone(r)
err = self.plurk.error()
self.assertEqual(err['code'], 400)
self.assertEqual(err['reason'], "BAD REQUEST")
self.assertEqual(err['content']['error_text'],
"40106:invalid access token")
@unittest.skipUnless(os.path.isfile("API.keys"), "requires API.keys")
class TestThreeLeggedAPI(unittest.TestCase):
def setUp(self):
self.plurk = PlurkAPI.fromfile('API.keys')
if not self.plurk.is_authorized():
raise KeyError("You need to put cunsomer/access key/secret in API.keys")
def teardown(self):
pass
def test_get_ownprofile(self):
jdata = self.plurk.callAPI('/APP/Profile/getOwnProfile')
self.assertIsInstance(jdata, dict, "Object should be a dict")
self.assertGreater(jdata['user_info']['uid'], 0, "Self Uid > 0")
def test_upload_lenna(self):
jdata = self.plurk.callAPI('/APP/Timeline/uploadPicture',
files={"image":"tests/lenna.jpg"})
self.assertIsInstance(jdata, dict, "Object should be a dict")
self.assertTrue("full" in jdata, "have key 'full'")
self.assertTrue("thumbnail" in jdata, "have key 'thumbnail'")
@unittest.skipUnless(os.path.isfile("API.keys"), "requires API.keys")
class TestTwoLeggedAPI(unittest.TestCase):
def setUp(self):
try:
file = open('API.keys', 'r+')
except IOError:
print("You need to put key/secret in API.keys")
raise
except:
print("Unexpected error:", sys.exc_info()[0])
else:
data = json.load(file)
file.close()
self.plurk = PlurkAPI(data["CONSUMER_KEY"], data["CONSUMER_SECRET"])
def teardown(self):
pass
def test_get_public_profile(self):
jdata = self.plurk.callAPI('/APP/Profile/getPublicProfile',
{'user_id': 'clsung'})
self.assertIsInstance(jdata, dict, "Object should be a dict")
self.assertGreater(jdata['user_info']['uid'], 0, "Self Uid > 0")
self.assertEqual(jdata['user_info']['nick_name'],
"clsung", "Author's Name ;)")
class TestRequestToken(unittest.TestCase):
"""
Unit test for PlurkOAuth.get_request_token
"""
def setUp(self):
""" Create mock oauth object """
self.mox = mox.Mox()
self.oauth = PlurkOAuth("CONSUMER_KEY", "CONSUMER_SECRET")
self.oauth_response = \
'oauth_token_secret=O7WqqqWHA61f4ZE5izQdTQmK&oauth_token=ReqXBFOswcyR&oauth_callback_confirmed=true' # NOQA
self.golden_token = dict(parse_qsl(self.oauth_response))
self.mox.StubOutWithMock(PlurkOAuth, 'request')
def tearDown(self):
self.mox.UnsetStubs()
def _200_request(self):
return 200, self.oauth_response, ""
def test_get_request_token(self):
self.oauth.request(mox.IgnoreArg()).AndReturn(self._200_request())
self.mox.ReplayAll()
self.oauth.get_request_token()
self.assertEqual(self.golden_token, self.oauth.oauth_token)
self.mox.VerifyAll()
class TestAPIAuth(unittest.TestCase):
'''
Unit test for PlurkAPI auth part
'''
def setUp(self):
self.mox = mox.Mox()
self.api = PlurkAPI('CONSUMER_KEY', 'CONSUMER_SECRET')
self.oauth_response = \
'oauth_token_secret=O7WqqqWHA61f4ZE5izQdTQmK&oauth_token=ReqXBFOswcyR&oauth_callback_confirmed=true' # NOQA
self.verify_response = \
'oauth_token_secret=O7WqqqWHA61f4ZE5izQdTQmK&oauth_token=<KEY>'
self.golden_token = {
'key': '<KEY>',
'secret': '<KEY>',
}
self.golden_url = 'https://www.plurk.com/OAuth/authorize?oauth_token=ReqXBFOswcyR'
self.mox.StubOutWithMock(PlurkOAuth, 'request')
def tearDown(self):
self.mox.UnsetStubs()
def _200_request(self):
return 200, self.oauth_response, ""
def _200_verify(self):
return 200, self.verify_response, ''
def test_set_request_token(self):
self.api.set_request_token('<KEY>', '<KEY>')
token = self.api.get_request_token()
self.assertEqual(self.golden_token, token)
self.mox.VerifyAll()
def test_get_request_token(self):
self.api._oauth.request(mox.IgnoreArg()).AndReturn(self._200_request())
self.mox.ReplayAll()
token = self.api.get_request_token()
self.assertEqual(self.golden_token, token)
self.mox.VerifyAll()
def test_get_verifier_url(self):
self.api.set_request_token('<KEY>', '<KEY>')
url = self.api.get_verifier_url()
self.assertEqual(self.golden_url, url)
self.mox.VerifyAll()
def test_get_access_token(self):
self.api._oauth.request(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(self._200_verify())
self.mox.ReplayAll()
self.api.set_request_token('<KEY>', '<KEY>')
token = self.api.get_access_token('VERIFIER')
self.assertEqual(self.golden_token, token)
self.mox.VerifyAll()
|
StarcoderdataPython
|
8084655
|
import unittest
import json
from unittest.mock import Mock
from tests import SAMPLE_BASE_URL, SAMPLE_CLIENT_ID, SAMPLE_CLIENT_SECRET
def mock_response(headers,
status_code,
content='CONTENT',
mock_json=None):
# initialize mock response
response = Mock()
# define response content
if isinstance(content, dict):
response.content = json.dumps(content).encode('utf-8')
else:
response.content = bytes(content, 'utf-8')
# define response headers
response.headers = headers
# define response status code
response.status_code = status_code
# define response json
if mock_json:
response.body = mock_json
response.json = Mock(
return_value=mock_json
)
return response
|
StarcoderdataPython
|
3299747
|
<gh_stars>0
from collections import defaultdict
from copy import copy
from onegov.core.crypto import random_password
from onegov.core.directives import query_form_class
from onegov.core.security import Secret
from onegov.core.templates import render_template
from onegov.form import merge_forms
from onegov.org import _, OrgApp
from onegov.org.forms import ManageUserForm, NewUserForm
from onegov.org.layout import DefaultMailLayout
from onegov.org.layout import UserLayout
from onegov.org.layout import UserManagementLayout
from onegov.core.elements import Link, LinkGroup
from onegov.ticket import TicketCollection, Ticket
from onegov.user import Auth, User, UserCollection
from onegov.user.errors import ExistingUserError
from onegov.user.forms import SignupLinkForm
from webob.exc import HTTPForbidden
from wtforms.validators import Optional
@OrgApp.html(model=UserCollection, template='usermanagement.pt',
permission=Secret)
def view_usermanagement(self, request, layout=None):
""" Allows the management of organisation users. """
layout = layout or UserManagementLayout(self, request)
users = defaultdict(list)
query = self.query().order_by(User.username)
for user in query:
users[user.role].append(user)
filters = {}
filters['role'] = [
Link(
text=request.translate(title),
active=value in self.filters.get('role', ()),
url=request.link(self.for_filter(role=value))
) for title, value in (
(_("Administrator"), 'admin'),
(_("Editor"), 'editor'),
(_("Member"), 'member'),
)
]
filters['active'] = [
Link(
text=request.translate(title),
active=value in self.filters.get('active', ()),
url=request.link(self.for_filter(active=value))
) for title, value in (
(_("Active"), True),
(_("Inactive"), False)
)
]
filters['tag'] = [
Link(
text=tag,
active=tag in self.filters.get('tag', ()),
url=request.link(self.for_filter(tag=tag))
) for tag in self.tags
]
filters['source'] = [
Link(
text={
'ldap_kerberos': 'LDAP Kerberos',
'ldap': 'LDAP',
'msal': 'AzureAD',
'': '-'
}.get(value, value),
active=value in self.filters.get('source', ()),
url=request.link(self.for_filter(source=value))
) for value in self.sources + ('', )
]
return {
'layout': layout,
'title': _("User Management"),
'users': users,
'filters': filters
}
@OrgApp.form(
model=UserCollection,
template='signup_link.pt',
permission=Secret,
form=SignupLinkForm,
name='signup-link')
def handle_create_signup_link(self, request, form, layout=None):
link = None
if form.submitted(request):
auth = Auth(request.app)
auth.signup_token = form.signup_token(auth)
link = request.link(auth, 'register')
layout = layout or UserManagementLayout(self, request)
layout.breadcrumbs.append(Link(_("New Signup Link"), '#'))
layout.editbar_links = None
return {
'layout': layout,
'title': _("New Signup Link"),
'link': link,
'form': form
}
@OrgApp.html(model=User, template='user.pt', permission=Secret)
def view_user(self, request, layout=None):
""" Shows all objects owned by the given user. """
layout = layout or UserLayout(self, request)
linkgroups = [
fn(request, self) for fn in request.app.config.linkgroup_registry
]
linkgroups.sort(key=lambda group: request.translate(group.title))
return {
'layout': layout,
'title': self.title,
'linkgroups': linkgroups
}
@OrgApp.userlinks()
def ticket_links(request, user):
tickets = TicketCollection(request.session).query()
tickets = tickets.filter_by(user_id=user.id)
tickets = tickets.order_by(Ticket.number)
tickets = tickets.with_entities(
Ticket.id, Ticket.number, Ticket.handler_code)
return LinkGroup(
title=_("Tickets"),
links=[
Link(
ticket.number,
request.class_link(Ticket, {
'handler_code': ticket.handler_code,
'id': ticket.id
}),
)
for ticket in tickets
]
)
def get_manage_user_form(self, request):
userprofile_form = query_form_class(request, self, name='userprofile')
assert userprofile_form
class OptionalUserprofile(userprofile_form):
hooked = False
def submitted(self, request):
# fields only present on the userprofile_form are made optional
# to make sure that we can always change the active/inactive state
# of the user and the role the user has
if not self.hooked:
for name, field in self._fields.items():
if not hasattr(userprofile_form, name):
continue
if not field.validators:
continue
# be careful not to change the class itself
field.validators = copy(field.validators)
field.validators.insert(0, Optional())
self.hooked = True
return super().submitted(request)
return merge_forms(ManageUserForm, OptionalUserprofile)
@OrgApp.form(model=User, template='form.pt', form=get_manage_user_form,
permission=Secret, name='edit')
def handle_manage_user(self, request, form, layout=None):
if self.source:
raise HTTPForbidden()
# XXX the manage user form doesn't have access to the username
# because it can't be edited, so we need to inject it here
# for validation purposes (check for a unique yubikey)
form.current_username = self.username
if not request.app.enable_yubikey:
form.delete_field('yubikey')
if form.submitted(request):
form.populate_obj(self)
request.success(_("Your changes were saved"))
return request.redirect(request.class_link(UserCollection))
elif not request.POST:
form.process(obj=self)
layout = layout or UserManagementLayout(self, request)
layout.breadcrumbs.append(Link(self.username, '#'))
return {
'layout': layout,
'title': self.username,
'form': form
}
@OrgApp.form(model=UserCollection, template='newuser.pt',
form=NewUserForm, name='new', permission=Secret)
def handle_new_user(self, request, form, layout=None):
if not request.app.enable_yubikey:
form.delete_field('yubikey')
layout = layout or UserManagementLayout(self, request)
layout.breadcrumbs.append(Link(_("New User"), '#'))
layout.editbar_links = None
if form.submitted(request):
password = <PASSWORD>()
if form.data.get('yubikey'):
second_factor = {
'type': 'yubikey',
'data': form.data['yubikey'][:12]
}
else:
second_factor = None
try:
user = self.add(
username=form.username.data,
password=password,
role=form.role.data,
active=form.active,
second_factor=second_factor,
)
except ExistingUserError:
form.username.errors.append(
_("A user with this e-mail address already exists"))
else:
if form.send_activation_email.data:
subject = request.translate(
_("An account was created for you")
)
content = render_template('mail_new_user.pt', request, {
'user': user,
'org': request.app.org,
'layout': DefaultMailLayout(user, request),
'title': subject
})
request.app.send_transactional_email(
subject=subject,
receivers=(user.username, ),
content=content,
)
request.info(_("The user was created successfully"))
return {
'layout': layout,
'title': _("New User"),
'username': form.username.data,
'password': password,
'sent_email': form.send_activation_email.data
}
return {
'layout': layout,
'title': _("New User"),
'form': form,
'password': <PASSWORD>,
'sent_email': False
}
|
StarcoderdataPython
|
11396897
|
#!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
"""
This is a script that snapshots all volumes in a given account.
The volumes must be tagged like so:
snapshot: daily
snapshot: weekly
Usage:
ops-gcp-trim-pd-snapshots.py --keep-hourly 10 --gcp-creds-file /root/.gce/creds.json
"""
# Ignoring module name
# pylint: disable=invalid-name,import-error
import json
import argparse
from openshift_tools.cloud.gcp import gcp_snapshotter
# Reason: disable pylint import-error because our libs aren't loaded on jenkins.
# Status: temporary until we start testing in a container where our stuff is installed.
# pylint: disable=import-error
from openshift_tools.monitoring.metric_sender import MetricSender
EXPIRED_SNAPSHOTS_KEY = 'gcp.pd.snapshotter.expired_snapshots'
DELETED_SNAPSHOTS_KEY = 'gcp.pd.snapshotter.deleted_snapshots'
DELETION_ERRORS_KEY = 'gcp.pd.snapshotter.deletion_errors'
class TrimmerCli(object):
""" Responsible for parsing cli args and running the trimmer. """
def __init__(self):
""" initialize the class """
self.args = None
self.parse_args()
def parse_args(self):
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='PD Snapshot Trimmer')
parser.add_argument('--keep-hourly', required=True, type=int,
help='The number of hourly snapshots to keep. 0 is infinite.')
parser.add_argument('--keep-daily', required=True, type=int,
help='The number of daily snapshots to keep. 0 is infinite.')
parser.add_argument('--keep-weekly', required=True, type=int,
help='The number of weekly snapshots to keep. 0 is infinite.')
parser.add_argument('--keep-monthly', required=True, type=int,
help='The number of monthly snapshots to keep. 0 is infinite.')
parser.add_argument('--gcp-creds-file', required=False,
help='The gcp credentials file to use.')
parser.add_argument('--dry-run', action='store_true', default=False,
help='Say what would have been done, but don\'t actually do it.')
self.args = parser.parse_args()
def main(self):
""" main function """
total_expired_snapshots = 0
total_deleted_snapshots = 0
total_deletion_errors = 0
creds = json.loads(open(self.args.gcp_creds_file).read())
regions = gcp_snapshotter.PDSnapshotter.get_supported_regions(creds['project_id'], self.args.gcp_creds_file)
for region in regions:
print "Region: %s:" % region
ss = gcp_snapshotter.PDSnapshotter(creds['project_id'],
region['name'],
self.args.gcp_creds_file,
verbose=True)
expired_snapshots, deleted_snapshots, snapshot_deletion_errors = \
ss.trim_snapshots(hourly_backups=self.args.keep_hourly, \
daily_backups=self.args.keep_daily, \
weekly_backups=self.args.keep_weekly, \
monthly_backups=self.args.keep_monthly, \
dry_run=self.args.dry_run)
num_deletion_errors = len(snapshot_deletion_errors)
total_expired_snapshots += len(expired_snapshots)
total_deleted_snapshots += len(deleted_snapshots)
total_deletion_errors += num_deletion_errors
if num_deletion_errors > 0:
print " Snapshot Deletion errors (%d):" % num_deletion_errors
for cur_err in snapshot_deletion_errors:
print " %s" % cur_err
print
print " Total number of expired snapshots: %d" % total_expired_snapshots
print " Total number of deleted snapshots: %d" % total_deleted_snapshots
print "Total number of snapshot deletion errors: %d" % total_deletion_errors
print
print "Sending results to Zabbix:"
if self.args.dry_run:
print " *** DRY RUN, NO ACTION TAKEN ***"
else:
TrimmerCli.report_to_zabbix(total_expired_snapshots, total_deleted_snapshots, total_deletion_errors)
@staticmethod
def report_to_zabbix(total_expired_snapshots, total_deleted_snapshots, total_deletion_errors):
""" Sends the commands exit code to zabbix. """
mts = MetricSender(verbose=True)
mts.add_metric({
EXPIRED_SNAPSHOTS_KEY: total_expired_snapshots,
DELETED_SNAPSHOTS_KEY: total_deleted_snapshots,
DELETION_ERRORS_KEY: total_deletion_errors
})
mts.send_metrics()
if __name__ == "__main__":
TrimmerCli().main()
|
StarcoderdataPython
|
9788320
|
<filename>esmond/cassandra.py
#!/usr/bin/env python
# encoding: utf-8
"""
Cassandra DB interface calls and data encapsulation objects.
esmond schema in json-like notation:
// regular col family
"raw_data" : {
"snmp:router_a:FastPollHC:ifHCInOctets:xe-0_2_0:30000:2012" : {
"1343955624" : // long column name
"16150333739148" // UTF-8 containing JSON for values.
}
}
// supercolumn
"base_rates" : {
"snmp:router_a:FastPollHC:ifHCInOctets:xe-0_2_0:30000:2012" : {
"1343955600" : { // long column name.
"val": "123", // string key, counter type value.
"is_valid" : "2" // zero or positive non-zero.
}
}
}
// supercolumn
"rate_aggregations" : {
"snmp:router_a:FastPollHC:ifHCInOctets:xe-0_2_0:3600000:2012" : {
"1343955600" : { // long column name.
"val": "1234", // string key, counter type.
"30": "38" // key of the 'non-val' column is freq of the base rate.
} // the value of said is the count used in the average.
}
}
// supercolumn
"stat_aggregations" : {
"snmp:router_a:FastPollHC:ifHCInOctets:xe-0_2_0:86400000:2012" : {
"1343955600" : { // long column name.
"min": "0", // string keys, long types.
"max": "484140"
}
}
}
"""
# Standard
import calendar
import datetime
import json
import logging
import os
import pprint
import sys
import time
from collections import OrderedDict
from esmond.util import get_logger
# Third party
from pycassa import PycassaLogger
from pycassa.pool import ConnectionPool, AllServersUnavailable
from pycassa.columnfamily import ColumnFamily, NotFoundException
from pycassa.system_manager import *
from thrift.transport.TTransport import TTransportException
SEEK_BACK_THRESHOLD = 2592000000 # 30 days in ms
KEY_DELIMITER = ":"
AGG_TYPES = ['average', 'min', 'max', 'raw']
class CassandraException(Exception):
"""Common base"""
pass
class ConnectionException(CassandraException):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class CASSANDRA_DB(object):
keyspace = 'esmond'
raw_cf = 'raw_data'
rate_cf = 'base_rates'
agg_cf = 'rate_aggregations'
stat_cf = 'stat_aggregations'
_queue_size = 200
def __init__(self, config, qname=None):
"""
Class contains all the relevent cassandra logic. This includes:
* schema creation,
* connection information/pooling,
* generating the metadata cache of last val/ts information,
* store data/update the rate/aggregaion bins,
* and execute queries to return data to the REST interface.
"""
# Configure logging - if a qname has been passed in, hook
# into the persister logger, if not, toss together some fast
# console output for devel/testing.
if qname:
self.log = get_logger("espersistd.%s.cass_db" % qname)
else:
self.log = logging.getLogger('cassandra_db')
self.log.setLevel(logging.DEBUG)
format = logging.Formatter('%(name)s [%(levelname)s] %(message)s')
handle = logging.StreamHandler()
handle.setFormatter(format)
self.log.addHandler(handle)
# Add pycassa driver logging to existing logger.
plog = PycassaLogger()
plog.set_logger_name('%s.pycassa' % self.log.name)
# Debug level is far too noisy, so just hardcode the pycassa
# logger to info level.
plog.set_logger_level('info')
# Connect to cassandra with SystemManager, do a schema check
# and set up schema components if need be.
try:
sysman = SystemManager(config.cassandra_servers[0])
except TTransportException, e:
raise ConnectionException("System Manager can't connect to Cassandra "
"at %s - %s" % (config.cassandra_servers[0], e))
# Blow everything away if we're testing - be aware of this and use
# with care. Currently just being explictly set in test harness
# code but no longer set as a config file option since there could
# be unfortunate side effects.
if config.db_clear_on_testing:
self.log.info('Dropping keyspace %s' % self.keyspace)
if self.keyspace in sysman.list_keyspaces():
sysman.drop_keyspace(self.keyspace)
time.sleep(3)
# Create keyspace
_schema_modified = False # Track if schema components are created.
if not self.keyspace in sysman.list_keyspaces():
_schema_modified = True
self.log.info('Creating keyspace %s' % self.keyspace)
sysman.create_keyspace(self.keyspace, SIMPLE_STRATEGY,
{'replication_factor': '1'})
time.sleep(3)
# Create column families if they don't already exist.
# If a new column family is added, make sure to set
# _schema_modified = True so it will be propigated.
self.log.info('Checking/creating column families')
# Raw Data CF
if not sysman.get_keyspace_column_families(self.keyspace).has_key(self.raw_cf):
_schema_modified = True
sysman.create_column_family(self.keyspace, self.raw_cf, super=False,
comparator_type=LONG_TYPE,
default_validation_class=UTF8_TYPE,
key_validation_class=UTF8_TYPE)
self.log.info('Created CF: %s' % self.raw_cf)
# Base Rate CF
if not sysman.get_keyspace_column_families(self.keyspace).has_key(self.rate_cf):
_schema_modified = True
sysman.create_column_family(self.keyspace, self.rate_cf, super=True,
comparator_type=LONG_TYPE,
default_validation_class=COUNTER_COLUMN_TYPE,
key_validation_class=UTF8_TYPE)
self.log.info('Created CF: %s' % self.rate_cf)
# Rate aggregation CF
if not sysman.get_keyspace_column_families(self.keyspace).has_key(self.agg_cf):
_schema_modified = True
sysman.create_column_family(self.keyspace, self.agg_cf, super=True,
comparator_type=LONG_TYPE,
default_validation_class=COUNTER_COLUMN_TYPE,
key_validation_class=UTF8_TYPE)
self.log.info('Created CF: %s' % self.agg_cf)
# Stat aggregation CF
if not sysman.get_keyspace_column_families(self.keyspace).has_key(self.stat_cf):
_schema_modified = True
sysman.create_column_family(self.keyspace, self.stat_cf, super=True,
comparator_type=LONG_TYPE,
default_validation_class=LONG_TYPE,
key_validation_class=UTF8_TYPE)
self.log.info('Created CF: %s' % self.stat_cf)
sysman.close()
self.log.info('Schema check done')
# If we just cleared the keyspace/data and there is more than
# one server, pause to let schema propigate to the cluster machines.
if _schema_modified == True:
self.log.info("Waiting for schema to propagate...")
time.sleep(10)
self.log.info("Done")
# Now, set up the ConnectionPool
# Read auth information from config file and set up if need be.
_creds = {}
if config.cassandra_user and config.cassandra_pass:
_creds['username'] = config.cassandra_user
_creds['password'] = config.cassandra_pass
self.log.debug('Connecting with username: %s' % (config.cassandra_user,))
try:
self.log.debug('Opening ConnectionPool')
self.pool = ConnectionPool(self.keyspace,
server_list=config.cassandra_servers,
pool_size=10,
max_overflow=5,
max_retries=10,
timeout=30,
credentials=_creds)
except AllServersUnavailable, e:
raise ConnectionException("Couldn't connect to any Cassandra "
"at %s - %s" % (config.cassandra_servers, e))
self.log.info('Connected to %s' % config.cassandra_servers)
# Define column family connections for the code to use.
self.raw_data = ColumnFamily(self.pool, self.raw_cf).batch(self._queue_size)
self.rates = ColumnFamily(self.pool, self.rate_cf).batch(self._queue_size)
self.aggs = ColumnFamily(self.pool, self.agg_cf).batch(self._queue_size)
self.stat_agg = ColumnFamily(self.pool, self.stat_cf).batch(self._queue_size)
# Used when a cf needs to be selected on the fly.
self.cf_map = {
'raw': self.raw_data,
'rate': self.rates,
'aggs': self.aggs,
'stat': self.stat_agg
}
# Timing - this turns the database call profiling code on and off.
# This is not really meant to be used in production and generally
# just spits out statistics at the end of a run of test data. Mostly
# useful for timing specific database calls to aid in development.
self.profiling = False
if config.db_profile_on_testing and os.environ.get("ESMOND_TESTING", False):
self.profiling = True
self.stats = DatabaseMetrics(profiling=self.profiling)
# Class members
# Just the dict for the metadata cache.
self.metadata_cache = {}
def flush(self):
"""
Calling this will explicity flush all the batches to the
server. Generally only used in testing/dev scripts and not
in production when the batches will be self-flushing.
"""
self.log.debug('Flush called')
self.raw_data.send()
self.rates.send()
self.aggs.send()
self.stat_agg.send()
def close(self):
"""
Explicitly close the connection pool.
"""
self.log.debug('Close/dispose called')
self.pool.dispose()
def set_raw_data(self, raw_data, ttl=None):
"""
Called by the persister. Writes the raw incoming data to the appropriate
column family. The optional TTL option is passed in self.raw_opts and
is set up in the constructor.
The raw_data arg passes in is an instance of the RawData class defined
in this module.
"""
_kw = {}
if ttl:
_kw['ttl'] = ttl
t = time.time()
# Standard column family update.
self.raw_data.insert(raw_data.get_key(),
{raw_data.ts_to_jstime(): json.dumps(raw_data.val)}, **_kw)
if self.profiling: self.stats.raw_insert(time.time() - t)
def set_metadata(self, k, meta_d):
"""
Just does a simple write to the dict being used as metadata.
"""
self.metadata_cache[k] = meta_d.get_document()
def get_metadata(self, raw_data):
"""
Called by the persister to get the metadata - last value and timestamp -
for a given measurement. If a given value is not found (as in when the
program is initially started for example) it will look in the raw data
as far back as SEEK_BACK_THRESHOLD to find the previous value. If found,
This is seeded to the cache and returned. If not, this is presumed to be
new, and the cache is seeded with the value that is passed in.
The raw_data arg passes in is an instance of the RawData class defined
in this module.
The return value is a Metadata object, also defined in this module.
"""
t = time.time()
meta_d = None
if not self.metadata_cache.has_key(raw_data.get_meta_key()):
# Didn't find a value in the metadata cache. First look
# back through the raw data for SEEK_BACK_THRESHOLD seconds
# to see if we can find the last processed value.
ts_max = raw_data.ts_to_jstime() - 1 # -1ms to look at older vals
ts_min = ts_max - SEEK_BACK_THRESHOLD
ret = self.raw_data._column_family.multiget(
self._get_row_keys(raw_data.path, raw_data.freq,
ts_min, ts_max),
# Note: ts_max and ts_min appear to be reversed here -
# that's because this is a reversed range query.
column_start=ts_max, column_finish=ts_min,
column_count=1, column_reversed=True)
if self.profiling: self.stats.meta_fetch((time.time() - t))
if ret:
# A previous value was found in the raw data, so we can
# seed/return that.
key = ret.keys()[-1]
ts = ret[key].keys()[0]
val = json.loads(ret[key][ts])
meta_d = Metadata(last_update=ts, last_val=val, min_ts=ts,
freq=raw_data.freq, path=raw_data.path)
self.log.debug('Metadata lookup from raw_data for: %s' %
(raw_data.get_meta_key()))
else:
# No previous value was found (or at least not one in the defined
# time range) so seed/return the current value.
meta_d = Metadata(last_update=raw_data.ts, last_val=raw_data.val,
min_ts=raw_data.ts, freq=raw_data.freq, path=raw_data.path)
self.log.debug('Initializing metadata for: %s using %s' %
(raw_data.get_meta_key(), raw_data))
self.set_metadata(raw_data.get_meta_key(), meta_d)
else:
meta_d = Metadata(**self.metadata_cache[raw_data.get_meta_key()])
return meta_d
def update_metadata(self, k, metadata):
"""
Update the metadata cache with a recently updated value. Called by the
persister.
The metadata arg is a Metadata object defined in this module.
"""
t = time.time()
for i in ['last_val', 'min_ts', 'last_update']:
self.metadata_cache[k][i] = getattr(metadata, i)
#self.stats.meta_update((time.time() - t))
def update_rate_bin(self, ratebin):
"""
Called by the persister. This updates a base rate bin in the base
rate column family.
The ratebin arg is a BaseRateBin object defined in this module.
"""
t = time.time()
# A super column insert. Both val and is_valid are counter types.
self.rates.insert(ratebin.get_key(),
{ratebin.ts_to_jstime(): {'val': ratebin.val, 'is_valid': ratebin.is_valid}})
if self.profiling: self.stats.baserate_update((time.time() - t))
def update_rate_aggregation(self, raw_data, agg_ts, freq):
"""
Called by the persister to update the rate aggregation rollups.
The args are a RawData object, the "compressed" aggregation timestamp
and the frequency of the rollups in seconds.
"""
t = time.time()
agg = AggregationBin(
ts=agg_ts, freq=freq, val=raw_data.val, base_freq=raw_data.freq, count=1,
min=raw_data.val, max=raw_data.val, path=raw_data.path
)
# Super column update. The base rate frequency is stored as the column
# name key that is not 'val' - this will be used by the query interface
# to generate the averages. Both values are counter types.
self.aggs.insert(agg.get_key(),
{agg.ts_to_jstime(): {'val': agg.val, str(agg.base_freq): 1}})
if self.profiling: self.stats.aggregation_update((time.time() - t))
def update_stat_aggregation(self, raw_data, agg_ts, freq):
"""
Called by the persister to update the stat aggregations (ie: min/max).
Unlike the other update code, this has to read from the appropriate bin
to see if the min or max needs to be updated. The update is done if
need be, and the updated boolean is set to true and returned to the
calling code to flush the batch if need be. Done that way to flush
more than one batch update rather than doing it each time.
The args are a RawData object, the "compressed" aggregation timestamp
and the frequency of the rollups in seconds.
"""
updated = False
# Create the AggBin object.
agg = AggregationBin(
ts=agg_ts, freq=freq, val=raw_data.val, base_freq=raw_data.freq, count=1,
min=raw_data.val, max=raw_data.val, path=raw_data.path
)
t = time.time()
ret = None
try:
# Retrieve the appropriate stat aggregation.
ret = self.stat_agg._column_family.get(agg.get_key(),
super_column=agg.ts_to_jstime())
except NotFoundException:
# Nothing will be found if the rollup bin does not yet exist.
pass
if self.profiling: self.stats.stat_fetch((time.time() - t))
t = time.time()
if not ret:
# Bin does not exist, so initialize min and max with the same val.
self.stat_agg.insert(agg.get_key(),
{agg.ts_to_jstime(): {'min': agg.val, 'max': agg.val}})
updated = True
elif agg.val > ret['max']:
# Update max.
self.stat_agg.insert(agg.get_key(),
{agg.ts_to_jstime(): {'max': agg.val}})
updated = True
elif agg.val < ret['min']:
# Update min.
self.stat_agg.insert(agg.get_key(),
{agg.ts_to_jstime(): {'min': agg.val}})
updated = True
else:
pass
if self.profiling: self.stats.stat_update((time.time() - t))
return updated
def _get_row_keys(self, path, freq, ts_min, ts_max):
"""
Utility function used by the query interface.
Given these values and the starting/stopping timestamp, return a
list of row keys (ie: more than one if the query spans years) to
be used as the first argument to a multiget cassandra query.
"""
year_start = datetime.datetime.utcfromtimestamp(float(ts_min)/1000.0).year
year_finish = datetime.datetime.utcfromtimestamp(float(ts_max)/1000.0).year
key_range = []
if year_start != year_finish:
for year in range(year_start, year_finish+1):
key_range.append(get_rowkey(path, freq=freq, year=year))
else:
key_range.append(get_rowkey(path, freq=freq, year=year_start))
return key_range
def check_for_valid_keys(self, path=None, freq=None,
ts_min=None, ts_max=None, col_fam='rate'):
"""
Utility function used to discrete key/set of keys exists. Used
by api/etc see if an invalid key is the reason no data is returned.
"""
found = False
keys = self._get_row_keys(path,freq,ts_min,ts_max)
for key in keys:
try:
self.cf_map[col_fam]._column_family.get(key, column_count=1)
except NotFoundException:
# Key was not found.
pass
else:
# Key was found so mark boolean as good - revisit?
found = True
return found
def query_baserate_timerange(self, path=None, freq=None,
ts_min=None, ts_max=None, cf='average'):
"""
Query interface method to retrieve the base rates (generally average
but could be delta as well).
"""
ret_count = self.rates._column_family.multiget_count(
self._get_row_keys(path,freq,ts_min,ts_max),
column_start=ts_min, column_finish=ts_max)
cols = 0
for i in ret_count.keys():
cols += ret_count[i]
ret = self.rates._column_family.multiget(
self._get_row_keys(path,freq,ts_min,ts_max),
column_start=ts_min, column_finish=ts_max,
column_count=cols+5)
if cf not in ['average', 'delta']:
self.log.error('Not a valid option: %s - defaulting to average' % cf)
cf = 'average'
# Divisors to return either the average or a delta.
value_divisors = { 'average': int(freq/1000), 'delta': 1 }
# Just return the results and format elsewhere.
results = []
for k,v in ret.items():
for kk,vv in v.items():
results.append({'ts': kk, 'val': float(vv['val']) / value_divisors[cf],
'is_valid': vv['is_valid']})
return results
def query_aggregation_timerange(self, path=None, freq=None,
ts_min=None, ts_max=None, cf=None):
"""
Query interface method to retrieve the aggregation rollups - could
be average/min/max. Different column families will be queried
depending on what value "cf" is set to.
"""
if cf not in AGG_TYPES:
self.log.error('Not a valid option: %s - defaulting to average' % cf)
cf = 'average'
if cf == 'average' or cf == 'raw':
ret_count = self.aggs._column_family.multiget_count(
self._get_row_keys(path,freq,ts_min,ts_max),
column_start=ts_min, column_finish=ts_max)
cols = 0
for i in ret_count.keys():
cols += ret_count[i]
# print cols
ret = self.aggs._column_family.multiget(
self._get_row_keys(path,freq,ts_min,ts_max),
column_start=ts_min, column_finish=ts_max,
column_count=cols+5)
# Just return the results and format elsewhere.
results = []
for k,v in ret.items():
for kk,vv in v.items():
ts = kk
val = None
base_freq = None
count = None
for kkk in vv.keys():
if kkk == 'val':
val = vv[kkk]
else:
base_freq = kkk
count = vv[kkk]
ab = AggregationBin(**{'ts': ts, 'val': val,'base_freq': int(base_freq), 'count': count, 'cf': cf})
if cf == 'average':
datum = {'ts': ts, 'val': ab.average, 'cf': ab.cf}
else:
datum = {'ts': ts, 'val': ab.val, 'cf': ab.cf}
results.append(datum)
elif cf == 'min' or cf == 'max':
ret_count = self.stat_agg._column_family.multiget_count(
self._get_row_keys(path,freq,ts_min,ts_max),
column_start=ts_min, column_finish=ts_max)
cols = 0
for i in ret_count.keys():
cols += ret_count[i]
ret = self.stat_agg._column_family.multiget(
self._get_row_keys(path,freq,ts_min,ts_max),
column_start=ts_min, column_finish=ts_max,
column_count=cols+5)
results = []
for k,v in ret.items():
for kk,vv in v.items():
ts = kk
if cf == 'min':
results.append({'ts': ts, 'val': vv['min'], 'cf': cf})
else:
results.append({'ts': ts, 'val': vv['max'], 'cf': cf})
return results
def query_raw_data(self, path=None, freq=None,
ts_min=None, ts_max=None):
"""
Query interface to query the raw data.
"""
ret_count = self.raw_data._column_family.multiget_count(
self._get_row_keys(path,freq,ts_min,ts_max),
column_start=ts_min, column_finish=ts_max)
cols = 0
for i in ret_count.keys():
cols += ret_count[i]
ret = self.raw_data._column_family.multiget(
self._get_row_keys(path,freq,ts_min,ts_max),
column_start=ts_min, column_finish=ts_max,
column_count=cols+5)
# Just return the results and format elsewhere.
results = []
for k,v in ret.items():
for kk,vv in v.items():
results.append({'ts': kk, 'val': json.loads(vv)})
return results
def __del__(self):
pass
# Stats/timing code for connection class
class DatabaseMetrics(object):
"""
Code to handle calculating timing statistics for discrete database
calls in the CASSANDRA_DB module. Generally only used in development
to produce statistics when pushing runs of test data through it.
"""
# List of attributes to generate/method names.
_individual_metrics = [
'raw_insert',
'baserate_update',
'aggregation_update',
'meta_fetch',
'stat_fetch',
'stat_update',
]
_all_metrics = _individual_metrics + ['total', 'all']
def __init__(self, profiling=False):
self.profiling = profiling
if not self.profiling:
return
# Populate attrs from list.
for im in self._individual_metrics:
setattr(self, '%s_time' % im, 0)
setattr(self, '%s_count' % im, 0)
def _increment(self, m, t):
"""
Actual logic called by named wrapper methods. Increments
the time sums and counts for the various db calls.
"""
setattr(self, '%s_time' % m, getattr(self, '%s_time' % m) + t)
setattr(self, '%s_count' % m, getattr(self, '%s_count' % m) + 1)
# These are all wrapper methods that call _increment()
def raw_insert(self, t):
self._increment('raw_insert', t)
def baserate_update(self, t):
self._increment('baserate_update', t)
def aggregation_update(self, t):
self._increment('aggregation_update', t)
def meta_fetch(self, t):
self._increment('meta_fetch', t)
def stat_fetch(self, t):
self._increment('stat_fetch', t)
def stat_update(self, t):
self._increment('stat_update', t)
def report(self, metric='all'):
"""
Called at the end of a test harness or other loading dev script.
Outputs the various data to the console.
"""
if not self.profiling:
print 'Not profiling'
return
if metric not in self._all_metrics:
print 'bad metric'
return
s = ''
time = count = 0
if metric in self._individual_metrics:
datatype, action = metric.split('_')
action = action.title()
time = getattr(self, '%s_time' % metric)
count = getattr(self, '%s_count' % metric)
if time: # stop /0 errors
s = '%s %s %s data in %.3f (%.3f per sec)' \
% (action, count, datatype, time, (count/time))
if metric.find('total') > -1:
s += ' (informational - not in total)'
elif metric == 'total':
for k,v in self.__dict__.items():
if k.find('total') > -1:
# don't double count the agg total numbers
continue
if k.endswith('_count'):
count += v
elif k.endswith('_time'):
time += v
else:
pass
if time:
s = 'Total: %s db transactions in %.3f (%.3f per sec)' \
% (count, time, (count/time))
elif metric == 'all':
for m in self._all_metrics:
if m == 'all':
continue
else:
self.report(m)
if len(s): print s
# Data encapsulation objects - these objects wrap the various data
# in an object and provide utility methods and properties to convert
# timestampes, calculate averages, etc.
class DataContainerBase(object):
"""
Base class for the other encapsulation objects. Mostly provides
utility methods for subclasses.
"""
_doc_properties = []
def __init__(self, path):
self.path = path
def _handle_date(self,d):
"""
Return a datetime object given a JavaScript timestamp.
"""
if type(d) == datetime.datetime:
return d
else:
return datetime.datetime.utcfromtimestamp(float(d)/1000.0)
def get_document(self):
"""
Return a dictionary of the attrs/props in the object.
"""
doc = {}
for k,v in self.__dict__.items():
if k.startswith('_'):
continue
doc[k] = v
for p in self._doc_properties:
doc[p] = getattr(self, '%s' % p)
return doc
def get_key(self):
"""
Return a cassandra row key based on the contents of the object.
"""
return get_rowkey(self.path)
def ts_to_jstime(self, t='ts'):
"""
Return an internally represented datetime value as a JavaScript
timestamp which is milliseconds since the epoch (Unix timestamp * 1000).
Defaults to returning 'ts' property, but can be given an arg to grab a
different property/attribute like Metadata.last_update.
"""
ts = getattr(self, t)
return calendar.timegm(ts.utctimetuple()) * 1000
def ts_to_unixtime(self, t='ts'):
"""
Return an internally represented datetime value as a Unix timestamp.
Defaults to returning 'ts' property, but can be given an arg to grab a
different property/attribute like Metadata.last_update.
"""
ts = getattr(self, t)
return calendar.timegm(ts.utctimetuple())
class RawData(DataContainerBase):
"""
Container for raw data rows.
Can be instantiated from args when reading from persist queue, or via **kw
when reading data back out of Cassandra.
"""
_doc_properties = ['ts']
def __init__(self, path=None, ts=None, val=None):
DataContainerBase.__init__(self, path)
self._ts = None
self.ts = ts
self.val = val
def get_key(self):
"""
Return a cassandra row key based on the contents of the object.
We append the year to the row key to limit the size of each row to only
one year's worth of data. This is an implementation detail for using
Cassandra effectively.
"""
return get_rowkey(self.path, year=self.ts.year)
@property
def ts(self):
return self._ts
@ts.setter
def ts(self, value):
self._ts = self._handle_date(value)
class RawRateData(RawData):
"""
Container for raw data for rate based rows.
"""
_doc_properties = ['ts']
def __init__(self, path=None, ts=None, val=None, freq=None):
RawData.__init__(self, path, ts, val)
self.freq = freq
def __unicode__(self):
return "<RawRateData/%d: ts=%s, val=%s, path=%s>" % \
(id(self), self.ts, self.val, self.path)
def __repr__(self):
return "<RawRateData/%d: ts=%s, val=%s, path=%s>" % \
(id(self), self.ts, self.val, self.path)
def get_key(self):
"""
Return a cassandra row key based on the contents of the object.
For rate data we add the frequency to the row key before the year, see
the RawData.get_key() documentation for details about the year.
"""
return get_rowkey(self.path, freq=self.freq, year=self.ts.year)
def get_meta_key(self):
"""
Get a "metadata row key" - metadata don't have timestamps/years.
Other objects use this to look up entires in the metadata_cache.
"""
return get_rowkey(self.path, freq=self.freq)
@property
def min_last_update(self):
return self.ts_to_jstime() - self.freq * 40
@property
def slot(self):
return (self.ts_to_jstime() / self.freq) * self.freq
class Metadata(DataContainerBase):
"""
Container for metadata information.
"""
_doc_properties = ['min_ts', 'last_update']
def __init__(self, path=None, last_update=None, last_val=None, min_ts=None, freq=None):
DataContainerBase.__init__(self, path)
self._min_ts = self._last_update = None
self.last_update = last_update
self.last_val = last_val
self.min_ts = min_ts
self.freq = freq
def __unicode__(self):
return "<Metadata/%d: last_update=%s, last_val=%s, min_ts=%s, freq=%s>" % \
(id(self), self.last_update, self.last_val, self.min_ts, self.freq)
def __repr__(self):
return "<Metadata/%d: last_update=%s, last_val=%s, min_ts=%s, freq=%s>" % \
(id(self), self.last_update, self.last_val, self.min_ts, self.freq)
@property
def min_ts(self):
return self._min_ts
@min_ts.setter
def min_ts(self, value):
self._min_ts = self._handle_date(value)
@property
def last_update(self):
return self._last_update
@last_update.setter
def last_update(self, value):
self._last_update = self._handle_date(value)
def refresh_from_raw(self, data):
"""
Update the internal state of a metadata object from a raw data
object. This is called by the persister when calculating
base rate deltas to refresh cache with current values after a
successful delta is generated.
"""
if self.min_ts > data.ts:
self.min_ts = data.ts
self.last_update = data.ts
self.last_val = data.val
class BaseRateBin(RawRateData):
"""
Container for base rates. Has 'average' property to return the averages.
"""
_doc_properties = ['ts']
def __init__(self, path=None, ts=None, val=None, freq=None, is_valid=1):
RawRateData.__init__(self, path, ts, val, freq)
self.is_valid = is_valid
@property
def average(self):
return self.val / self.freq
class AggregationBin(BaseRateBin):
"""
Container for aggregation rollups. Also has 'average' property to generage averages.
"""
def __init__(self, path=None, ts=None, val=None, freq=None, base_freq=None, count=None,
min=None, max=None, cf=None):
BaseRateBin.__init__(self, path, ts, val, freq)
self.count = count
self.min = min
self.max = max
self.base_freq = base_freq
self.cf = cf
@property
def average(self):
return self.val / (self.count * (self.base_freq/1000.0))
def escape_path(path):
escaped = []
for step in path:
escaped.append(step.replace(KEY_DELIMITER,
"\\%s" % KEY_DELIMITER))
return escaped
def get_rowkey(path, freq=None, year=None):
"""
Given a path and some additional data build the Cassandra row key.
The freq and year arguments are used for internal book keeping inside
Cassandra.
"""
appends = []
if freq:
appends.append(str(freq))
if year:
appends.append(str(year))
return KEY_DELIMITER.join(escape_path(path) + appends)
def _split_rowkey(s, escape='\\'):
"""
Return the elements of the rowkey taking escaping into account.
FOR INTERNAL USE ONLY! This returns more than just the path in most
instances and needs to be used with specific knowledge of what kind of row
key is used.
"""
indices = []
for i in range(len(s)):
if s[i] == KEY_DELIMITER:
if i > 0 and s[i-1] != escape:
indices.append(i)
elif i == 0:
indices.append(i)
out = []
last = 0
for i in indices:
out.append(s[last:i].replace(escape, ""))
last = i+1
out.append(s[last:])
return out
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.