id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
3289424
|
from constants import (
TEMPLATES_TABLE,
ASSETS_TABLE,
BLOCKS_TABLE,
BOOKMARKS_TABLE,
COMMENTS_TABLE,
USERS_TABLE,
USERS_TOKENS_TABLE,
AUTHORS_TABLE,
EMAILS_TABLE,
UID_KEY,
UUID_KEY,
TOKEN_KEY,
USER_KEY,
FULL_NAME_KEY,
AVATAR_URL_KEY,
EMAIL_KEY,
TEMPLATE_KEY,
)
from utils.db import connect_db
"""Functions for managing a dataset SQL database
# Schemas
#################### templates ######################
id
name
description
assets
template
thumbnail
pages
styles
updated_at
"""
@connect_db
def add_template(db, template):
table = db[TEMPLATES_TABLE]
template[UUID_KEY] = str(template[UUID_KEY])
table.upsert(
{
"uuid": template[UUID_KEY],
**{k: v for k, v in template.items() if v is not None},
},
[UUID_KEY],
)
@connect_db
def remove_template(db, uuid, author):
table = db[TEMPLATES_TABLE]
table.delete(uuid=str(uuid), author=author)
@connect_db
def get_template(db, uuid):
table = db[TEMPLATES_TABLE]
row = table.find_one(uuid=str(uuid))
if row is not None:
return row
return None
@connect_db
def get_all_templates(db, author, page, size):
table = db[TEMPLATES_TABLE]
return table.find(author=author, _limit=size, _offset=page * size)
@connect_db
def get_templates_by_category(db, category, page, size):
table = db[TEMPLATES_TABLE]
return table.find(category=category, _limit=size, _offset=page * size)
@connect_db
def get_all_public_templates(db, page, size):
table = db[TEMPLATES_TABLE]
return table.find(public=True, _limit=size, _offset=page * size)
@connect_db
def get_templates_length(db):
return len(db[TEMPLATES_TABLE])
@connect_db
def get_user_templates_count(db, author):
table = db[TEMPLATES_TABLE]
return table.count(author=author)
############### assets ########################
@connect_db
def add_asset(db, asset):
table = db[ASSETS_TABLE]
table.insert(asset)
@connect_db
def update_asset(db, asset):
table = db[ASSETS_TABLE]
table.update(
{"uid": asset[UID_KEY], **{k: v for k, v in asset.items() if v is not None}},
[UID_KEY],
)
@connect_db
def remove_asset(db, uid, author):
table = db[ASSETS_TABLE]
table.delete(uid=uid, author=author)
@connect_db
def get_asset(db, uid, author):
table = db[ASSETS_TABLE]
row = table.find_one(uid=uid, author=author)
if row is not None:
return row
return None
@connect_db
def get_all_assets(db, author, page, size):
table = db[ASSETS_TABLE]
return table.find(author=author, _limit=size, _offset=page * size)
@connect_db
def get_assets_length(db):
return len(db[ASSETS_TABLE])
@connect_db
def get_user_assets_count(db, author):
table = db[ASSETS_TABLE]
return table.count(author=author)
########################### bookmarks ##################################
@connect_db
def add_bookmark(db, bookmark):
table = db[BOOKMARKS_TABLE]
bookmark[UUID_KEY] = str(bookmark[UUID_KEY])
bookmark[TEMPLATE_KEY] = str(bookmark[TEMPLATE_KEY])
table.insert(bookmark)
@connect_db
def update_bookmark(db, bookmark):
table = db[BOOKMARKS_TABLE]
bookmark[UUID_KEY] = str(bookmark[UUID_KEY])
table.update(
{
"uuid": bookmark[UUID_KEY],
**{k: v for k, v in bookmark.items() if v is not None},
},
[UUID_KEY],
)
@connect_db
def remove_bookmark(db, uuid, author):
table = db[BOOKMARKS_TABLE]
table.delete(uuid=str(uuid), author=author)
@connect_db
def get_bookmark(db, uuid, author):
table = db[BOOKMARKS_TABLE]
row = table.find_one(uuid=str(uuid), author=author)
if row is not None:
return row
return None
@connect_db
def get_all_bookmarks(db, author, page, size):
table = db[BOOKMARKS_TABLE]
return table.find(author=author, _limit=size, _offset=page * size)
@connect_db
def get_bookmarks_length(db):
return len(db[BOOKMARKS_TABLE])
@connect_db
def get_user_bookmarks_count(db, author):
table = db[BOOKMARKS_TABLE]
return table.count(author=author)
@connect_db
def get_template_bookmarks_count(db, template):
table = db[BOOKMARKS_TABLE]
return table.count(template=str(template))
################################# blocks #################################
@connect_db
def add_block(db, block):
table = db[BLOCKS_TABLE]
block[UUID_KEY] = str(block[UUID_KEY])
block[TEMPLATE_KEY] = str(block[TEMPLATE_KEY])
table.insert(block)
@connect_db
def update_block(db, block):
table = db[BLOCKS_TABLE]
block[UUID_KEY] = str(block[UUID_KEY])
table.update(
{"uuid": block[UUID_KEY], **{k: v for k, v in block.items() if v is not None}},
[UUID_KEY],
)
@connect_db
def remove_block(db, uuid, author):
table = db[BLOCKS_TABLE]
table.delete(uuid=str(uuid), author=author)
@connect_db
def get_block(db, uuid, author):
table = db[BLOCKS_TABLE]
row = table.find_one(uuid=str(uuid), author=author)
if row is not None:
return row
return None
@connect_db
def get_all_blocks(db, author, page, size):
table = db[BLOCKS_TABLE]
return table.find(author=author, _limit=size, _offset=page * size)
@connect_db
def get_blocks_length(db):
return len(db[BLOCKS_TABLE])
@connect_db
def get_user_blocks_count(db, author):
table = db[BLOCKS_TABLE]
return table.count(author=author)
################################## comments ###################################
@connect_db
def add_comment(db, comment):
table = db[COMMENTS_TABLE]
comment[UUID_KEY] = str(comment[UUID_KEY])
comment[TEMPLATE_KEY] = str(comment[TEMPLATE_KEY])
table.insert(comment)
@connect_db
def update_comment(db, comment):
table = db[COMMENTS_TABLE]
comment[UUID_KEY] = str(comment[UUID_KEY])
table.update(
{
"uuid": comment[UUID_KEY],
**{k: v for k, v in comment.items() if v is not None},
},
[UUID_KEY],
)
@connect_db
def remove_comment(db, uuid, author):
table = db[COMMENTS_TABLE]
table.delete(uuid=str(uuid), author=author)
@connect_db
def get_comment(db, uuid, author):
table = db[COMMENTS_TABLE]
row = table.find_one(uuid=str(uuid), author=author)
if row is not None:
return row
return None
@connect_db
def get_all_template_comments(db, template, page, size):
table = db[COMMENTS_TABLE]
return table.find(template=str(template), _limit=size, _offset=page * size)
@connect_db
def get_all_comments(db, author, page, size):
table = db[COMMENTS_TABLE]
return table.find(author=author, _limit=size, _offset=page * size)
@connect_db
def get_comments_length(db):
return len(db[COMMENTS_TABLE])
@connect_db
def get_user_comments_count(db, author):
table = db[COMMENTS_TABLE]
return table.count(author=author)
################################## users ###################################
@connect_db
def add_user(db, user):
table = db[USERS_TABLE]
table.upsert(user, [UID_KEY])
@connect_db
def get_user(db, user_id):
table = db[USERS_TABLE]
row = table.find_one(uid=user_id)
if row is not None:
return row
return None
@connect_db
def get_users_length(db):
return len(db[USERS_TABLE])
@connect_db
def add_user_token(db, token, user_id):
table = db[USERS_TOKENS_TABLE]
table.upsert(
{TOKEN_KEY: token, USER_KEY: user_id},
[USER_KEY],
)
@connect_db
def get_user_id(db, token):
table = db[USERS_TOKENS_TABLE]
row = table.find_one(token=token)
if row is not None:
return row[USER_KEY]
return None
@connect_db
def get_user_tokens_length(db):
return len(db[USERS_TOKENS_TABLE])
@connect_db
def add_author(db, user_id, full_name, avatar_url):
table = db[AUTHORS_TABLE]
table.upsert(
{UID_KEY: user_id, FULL_NAME_KEY: full_name, AVATAR_URL_KEY: avatar_url},
[UID_KEY],
)
@connect_db
def get_author(db, user_id):
table = db[AUTHORS_TABLE]
row = table.find_one(uid=user_id)
if row is not None:
return row
return None
@connect_db
def get_authors_length(db):
return len(db[AUTHORS_TABLE])
################################## emails ###################################
@connect_db
def add_email(db, email):
table = db[EMAILS_TABLE]
email[UUID_KEY] = str(email[UUID_KEY])
table.upsert(email, [EMAIL_KEY])
@connect_db
def remove_email(db, email):
table = db[EMAILS_TABLE]
table.delete(email=email)
@connect_db
def get_all_emails(db):
table = db[EMAILS_TABLE]
return table.find()
|
StarcoderdataPython
|
1683064
|
import smart_imports
smart_imports.all()
class Road(django_models.Model):
point_1 = django_models.ForeignKey('places.Place', related_name='+', on_delete=django_models.CASCADE)
point_2 = django_models.ForeignKey('places.Place', related_name='+', on_delete=django_models.CASCADE)
length = django_models.FloatField(blank=True, default=0.0)
exists = django_models.BooleanField(default=True)
path = django_models.TextField(null=False, default='')
class Meta:
unique_together = (('point_1', 'point_2'), )
def __str__(self):
return '%s -> %s' % (self.point_1, self.point_2)
class Waymark(django_models.Model):
point_from = django_models.ForeignKey('places.Place', related_name='+', on_delete=django_models.CASCADE)
point_to = django_models.ForeignKey('places.Place', related_name='+', on_delete=django_models.CASCADE)
road = django_models.ForeignKey(Road, null=True, related_name='+', on_delete=django_models.SET_NULL)
length = django_models.FloatField(blank=True, default=0.0)
class Meta:
unique_together = (('point_from', 'point_to', 'road'), )
|
StarcoderdataPython
|
6628528
|
from flask import Flask, flash, redirect, render_template, request, session, abort
app = Flask(__name__)
@app.route("/")
def hello():
stat = []
with open('./stations.csv', 'r') as f:
temp = f.readlines()
for i in temp:
stat.append(i.strip().split(","))
return render_template('results.html',hours=list(range(24)),minutes=["{:02d}".format(i) for i in list(range(0,59,5))],stations=stat)
@app.route("/send",methods=['POST'])
def send():
form_vals = ["hour","minute","station"]
_,month,date = request.form["date_time"].split("-")
month=int(month)
date = int(date)
hour = int(request.form["hour"])
minute = int(request.form["minute"])
stat_id = int(request.form["station"])
stat = []
with open('./stations.csv', 'r') as f:
temp = f.readlines()
for i in temp:
stat.append(i.strip().split(","))
for i,j in enumerate(stat):
if(int(j[1])==stat_id):
cap = stat[i][2]
import tensorflow as tf
from tensorflow.python.framework import ops
import pickle
ops.reset_default_graph()
training_epochs = 1000
batch_size = 64
n_input = 6
n_classes = 1
n_hidden_1 = 6
n_hidden_2 = 2
weights = {
'h1': tf.Variable(pickle.load(open("./../weights/h1.p", "rb"))),
'h2': tf.Variable(pickle.load(open("./../weights/h2.p", "rb"))),
'out': tf.Variable(pickle.load(open("./../weights/hout.p", "rb")))
}
biases = {
'b1': tf.Variable(pickle.load(open("./../weights/b1.p", "rb"))),
'b2': tf.Variable(pickle.load(open("./../weights/b2.p", "rb"))),
'out': tf.Variable(pickle.load(open("./../weights/bout.p", "rb")))
}
#keep_prob = tf.placeholder("float")
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.sigmoid(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.sigmoid(layer_2)
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
cost = tf.reduce_mean(tf.squared_difference(y,out_layer))
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost)
import numpy as np
X = np.array([month,date,hour,minute,stat_id,cap]).reshape(-1,6)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
pred = sess.run([out_layer], feed_dict={x: X})
pred_1 = pred[0][0][0]
from sklearn.ensemble import RandomForestRegressor
rf = pickle.load(open("./../weights/rf.p", "rb"))
pred_2 = rf.predict(X)[0]
pred = (pred_1+pred_2)/2
return "<br>".join(["Predicted Number of Bikes are: "+"{:.0f}".format(np.round((pred))),"Capacity at Station: "+str(cap)])
station
if __name__ == "__main__":
app.run(host= '0.0.0.0')
|
StarcoderdataPython
|
165662
|
<filename>cogspaces/datasets/derivative.py
import os
import re
import warnings
from math import ceil
from os.path import join
import pandas as pd
from joblib import load
from sklearn.utils import Bunch
from cogspaces.datasets.utils import get_data_dir
warnings.filterwarnings('ignore', category=FutureWarning, module='h5py')
from nilearn.datasets.utils import _fetch_files, _get_dataset_dir
def fetch_atlas_modl(data_dir=None,
url=None,
resume=True, verbose=1):
"""Download and load a multi-scale atlas computed using MODL over HCP900.
Parameters
----------
data_dir: string, optional
Path of the data directory. Used to force data storage in a non-
standard location. Default: None (meaning: default)
url: string, optional
Download URL of the dataset. Overwrite the default URL.
"""
if url is None:
url = 'http://cogspaces.github.io/assets/data/modl/'
data_dir = get_data_dir(data_dir)
dataset_name = 'modl'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
keys = ['components_64',
'components_128',
'components_453_gm',
'loadings_128_gm'
]
paths = [
'components_64.nii.gz',
'components_128.nii.gz',
'components_453_gm.nii.gz',
'loadings_128_gm.npy',
]
urls = [url + path for path in paths]
files = [(path, url, {}) for path, url in zip(paths, urls)]
files = _fetch_files(data_dir, files, resume=resume,
verbose=verbose)
params = {key: file for key, file in zip(keys, files)}
fdescr = 'Components computed using the MODL package, at various scale,' \
'from HCP900 data'
params['description'] = fdescr
params['data_dir'] = data_dir
return Bunch(**params)
STUDY_LIST = ['knops2009recruitment', 'ds009', 'gauthier2010resonance',
'ds017B', 'ds110', 'vagharchakian2012temporal', 'ds001',
'devauchelle2009sentence', 'camcan', 'archi',
'henson2010faces', 'ds052', 'ds006A', 'ds109', 'ds108', 'la5c',
'gauthier2009resonance', 'ds011', 'ds107', 'ds116', 'ds101',
'ds002', 'ds003', 'ds051', 'ds008', 'pinel2009twins', 'ds017A',
'ds105', 'ds007', 'ds005', 'amalric2012mathematicians', 'ds114',
'brainomics', 'cauvet2009muslang', 'hcp']
def fetch_reduced_loadings(data_dir=None, url=None, verbose=False,
resume=True):
if url is None:
url = 'http://cogspaces.github.io/assets/data/loadings/'
data_dir = get_data_dir(data_dir)
dataset_name = 'loadings'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
keys = STUDY_LIST
paths = ['data_%s.pt' % key for key in keys]
urls = [url + path for path in paths]
files = [(path, url, {}) for path, url in zip(paths, urls)]
files = _fetch_files(data_dir, files, resume=resume,
verbose=verbose)
params = {key: file for key, file in zip(keys, files)}
fdescr = (
"Z-statistic loadings over a dictionary of 453 components covering "
"grey-matter `modl_atlas['components_512_gm']` "
"for 35 different task fMRI studies.")
params['description'] = fdescr
params['data_dir'] = data_dir
return params
def add_study_contrast(ys):
for study in ys:
ys[study]['study_contrast'] = ys[study]['study'] + '__' + ys[study]['task'] + '__' + \
ys[study]['contrast']
return ys
def load_reduced_loadings(data_dir=None, url=None, verbose=False, resume=True):
loadings = fetch_reduced_loadings(data_dir, url, verbose, resume)
del loadings['description']
del loadings['data_dir']
Xs, ys = {}, {}
for study, loading in loadings.items():
Xs[study], ys[study] = load(loading)
ys = add_study_contrast(ys)
return Xs, ys
def load_from_directory(dataset, data_dir=None):
data_dir = get_data_dir(data_dir)
dataset_dir = join(data_dir, dataset)
Xs, ys = {}, {}
regex = re.compile(r'data_(.*).pt')
for file in os.listdir(dataset_dir):
m = regex.match(file)
if m is not None:
study = m.group(1)
Xs[study], ys[study] = load(join(dataset_dir, file))
ys = add_study_contrast(ys)
return Xs, ys
def fetch_mask(data_dir=None, url=None, resume=True, verbose=1):
if url is None:
url = 'http://cogspaces.github.io/assets/data/hcp_mask.nii.gz'
files = [('hcp_mask.nii.gz', url, {})]
dataset_name = 'mask'
data_dir = get_data_dir(data_dir)
dataset_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
files = _fetch_files(dataset_dir, files, resume=resume,
verbose=verbose)
return files[0]
def get_chance_subjects(data_dir=None, split_by_task=False):
data, target = load_reduced_loadings(data_dir)
if split_by_task:
data, target = split_studies(data, target)
chance_level = {}
n_subjects = {}
for study, this_target in target.items():
chance_level[study] = 1. / len(this_target['contrast'].unique())
n_subjects[study] = int(ceil(len(this_target['subject'].unique()) / 2))
chance_level = pd.Series(chance_level)
n_subjects = pd.Series(n_subjects)
return chance_level, n_subjects
def _get_citations():
dirname, filename = os.path.split(os.path.abspath(__file__))
citation_keys = pd.read_csv(join(dirname, 'brainpedia.csv'), index_col=0,
header=0)
citation_keys = citation_keys.drop(columns='description')
citation_keys = citation_keys.reset_index()
r = re.compile(r'\\bibitem\{(.*)\}')
citations = {}
i = 1
dirname, filename = os.path.split(os.path.abspath(__file__))
with open(join(dirname, "article.bbl"), 'r') as f:
for line in f.readlines():
m = r.match(line)
if m:
citekey = m.group(1)
citations[citekey] = str(i)
i += 1
def apply(x):
try:
return ','.join([citations[citekey] for citekey in x.split(',')])
except KeyError:
return pd.NA
citation_keys['bibkey'] = citation_keys['citekey'].apply(apply)
return citation_keys
def get_study_info():
input_data, targets = load_reduced_loadings(data_dir=get_data_dir())
targets = pd.concat(targets.values(), axis=0)
targets['#subjects'] = targets.groupby(by=['study', 'task', 'contrast'])['subject'].transform('nunique')
targets['#contrasts_per_task'] = targets.groupby(by=['study', 'task'])['contrast'].transform('nunique')
targets['#contrasts_per_study'] = targets.groupby(by='study')['contrast'].transform('nunique')
targets['chance_study'] = 1 / targets['#contrasts_per_study']
targets['chance_task'] = 1 / targets['#contrasts_per_task']
citations = _get_citations()
targets = pd.merge(targets, citations, on='study', how='left')
targets = targets.groupby(by=['study', 'task', 'contrast']).first().sort_index().drop(columns='index').reset_index()
targets['study__task'] = targets.apply(lambda x: f'{x["study"]}__{x["task"]}', axis='columns')
targets['name_task'] = targets.apply(lambda x: f'[{x["bibkey"]}] {x["task"]}', axis='columns')
def apply(x):
comment = x['comment'].iloc[0]
if comment != '':
tasks = comment
tasks_lim = comment
else:
tasks_list = x['task'].unique()
tasks = ' & '.join(tasks_list)
if len(tasks) > 50:
tasks_lim = tasks_list[0] + ' & ...'
else:
tasks_lim = tasks
name = f'[{x["bibkey"].iloc[0]}] {tasks_lim}'
latex_name = f'\cite{{{x["citekey"].iloc[0]}}} {tasks}'.replace('&', '\&')
name = pd.DataFrame(data={'name': name,
'latex_name':latex_name}, index=x.index)
return name
name = targets.groupby(by='study').apply(apply)
targets = pd.concat([targets, name], axis=1)
return targets
def split_studies(input_data, target):
new_input_data = {}
new_target = {}
for study, this_target in target.items():
this_data = input_data[study]
this_data = pd.DataFrame(index=this_target.index, data=this_data)
cat_data = pd.concat([this_data, this_target], axis=1, keys=['data', 'target'], names=['type'])
for task, this_cat_data in cat_data.groupby(by=('target', 'task')):
key = study + '__' + task
new_input_data[key] = this_cat_data['data'].values
new_target[key] = this_cat_data['target']
return new_input_data, new_target
|
StarcoderdataPython
|
346529
|
"""
Copyright (c) 2020-2021 Moxin
[Software Name] is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
By:M0x1n Time:2020.12.23Updated
Ver:1.2(Third edition) 1.2第三版
https://bbs.moxinwangluo.cn/
此脚本可以通过一些网站特征来进行CMS识别,当然,这个脚本也是开源的。
This script can be used for CMS recognition based on some website features, and of course, this script is also open source.
"""
import auxiliary
import files
import os
def discuzrobots(url):
if auxiliary.searchrobots('Discuz! X3', url) == "Pass":
print("从robots.txt检测中得到", url, "可能是Discuz X3站点!")
if auxiliary.searchrobots('/uc_server/', url) == "Pass":
print("从robots.txt检测中得到", url, "可能是Discuz X3站点!(存在uc_server)")
pass
def dederobots(url, uurl):
if auxiliary.searchrobots('/plus/feedback_js.php', url) == "Pass":
print("从robots.txt检测中得到", url, "可能是dedecms站点!")
files.dedever(uurl) # 下同,只有符合条件才会进行文件判断
if auxiliary.searchrobots('/plus/shops_buyaction.php', url) == "Pass":
print("从robots.txt检测中得到", url, "可能是dedecms站点!")
files.dedever(uurl)
pass
def allrobots(url, uurl):
discuzrobots(url)
dederobots(url, uurl)
os.remove("date\\robots.txt")
pass
|
StarcoderdataPython
|
6422650
|
from unittest import mock
import pytest
from jsonschema import RefResolver
from mypy.plugin import AnalyzeTypeContext
from jsonschema_typed import plugin
class MockInstance:
def __init__(self, name, *args, **kwargs):
self.name = name
self.args = args
self.kwargs = kwargs
def __eq__(self, other):
return self.name == other.name \
and self.args == other.args \
and self.kwargs == other.kwargs
class MockUnionType:
def __init__(self, items):
self.items = items
def __eq__(self, other):
return self.items == other.items
@pytest.fixture
def mock_analyze_context():
with mock.patch.object(plugin, 'UnionType', MockUnionType):
_mock_analyze_type_context = mock.Mock(spec=AnalyzeTypeContext)
_mock_analyze_type_context.api.named_type = MockInstance
yield _mock_analyze_type_context
def test_pattern_properties(mock_analyze_context):
schema = {
'type': 'object',
'patternProperties': {
'.*': {
'type': 'integer',
},
},
}
resolver = RefResolver.from_schema(schema)
result = plugin.APIv4(resolver, '').get_type(mock_analyze_context, schema, outer=True)
# We want to end up with a Dict[str, Union[int] type
assert result == MockInstance(
'builtins.dict',
[
MockInstance(
'builtins.str',
),
MockUnionType([
MockInstance(
'builtins.int',
),
]),
],
)
def test_pattern_properties_multiple(mock_analyze_context):
schema = {
'type': 'object',
'patternProperties': {
'foo': {
'type': 'boolean',
},
'.*': {
'type': 'integer',
},
},
}
resolver = RefResolver.from_schema(schema)
result = plugin.APIv4(resolver, '').get_type(mock_analyze_context, schema, outer=True)
# We want to end up with a Dict[str, Union[bool, int] type
assert result == MockInstance(
'builtins.dict',
[
MockInstance(
'builtins.str',
),
MockUnionType([
MockInstance(
'builtins.bool',
[],
),
MockInstance(
'builtins.int',
),
]),
],
)
def test_pattern_properties_error(mock_analyze_context):
schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'boolean',
},
},
'patternProperties': {
'.*': {
'type': 'integer',
},
},
}
resolver = RefResolver.from_schema(schema)
with pytest.raises(NotImplementedError):
plugin.APIv4(resolver, '').get_type(mock_analyze_context, schema, outer=True)
|
StarcoderdataPython
|
6597994
|
<filename>hddm/tests/test_models.py<gh_stars>0
from __future__ import division
from copy import copy
import itertools
import glob
import os
import unittest
import pymc as pm
import numpy as np
import pandas as pd
import nose
pd.set_printoptions(precision=4)
from nose import SkipTest
import hddm
from hddm.diag import check_model
def diff_model(param, subj=True, num_subjs=10, change=.5, size=500):
params_cond_a = {'v':.5, 'a':2., 'z':.5, 't': .3, 'st':0., 'sv':0., 'sz':0.}
params_cond_b = copy(params_cond_a)
params_cond_b[param] += change
params = {'A': params_cond_a, 'B': params_cond_b}
data, subj_params = hddm.generate.gen_rand_data(params, subjs=num_subjs, size=size)
model = hddm.models.HDDMTruncated(data, depends_on={param:['condition']}, is_group_model=subj)
return model
class TestMulti(unittest.TestCase):
def runTest(self):
pass
def test_diff_v(self, size=100):
m = diff_model('v', subj=False, change=.5, size=size)
return m
def test_diff_a(self, size=100):
m = diff_model('a', subj=False, change=-.5, size=size)
return m
def test_diff_a_subj(self, size=100):
raise SkipTest("Disabled.")
m = diff_model('a', subj=True, change=-.5, size=size)
return m
class TestSingleBreakdown(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestSingleBreakdown, self).__init__(*args, **kwargs)
self.iter = 40
self.burn = 10
def runTest(self):
return
def test_HDDM(self, assert_=False):
includes = [[], ['z'],['z', 'sv'],['z', 'st'],['z', 'sz'], ['z', 'sz','st'], ['z', 'sz','st','sv']]
model_classes = [hddm.models.HDDMTruncated, hddm.models.HDDM]
for include, model_class in itertools.product(includes, model_classes):
params = hddm.generate.gen_rand_params(include=include)
data, params_true = hddm.generate.gen_rand_data(params, size=10, subjs=1)
model = model_class(data, include=include, bias='z' in include, is_group_model=False)
model.map(runs=1)
model.sample(self.iter, burn=self.burn)
return model.mc
def test_HDDM_group(self, assert_=False):
includes = [[], ['z'],['z', 'sv'],['z', 'st'],['z', 'sz'], ['z', 'sz','st'], ['z', 'sz','st','sv']]
model_classes = [hddm.models.HDDMTruncated, hddm.models.HDDM]
for include, model_class in itertools.product(includes, model_classes):
params = hddm.generate.gen_rand_params(include=include)
data, params_true = hddm.generate.gen_rand_data(params, size=10, subjs=4)
model = model_class(data, include=include, bias='z' in include, is_group_model=True)
model.approximate_map()
model.sample(self.iter, burn=self.burn)
return model.mc
def test_HDDM_group_only_group_nodes(self, assert_=False):
group_only_nodes = [[], ['z'], ['z', 'st'], ['v', 'a']]
model_classes = [hddm.models.HDDMTruncated, hddm.models.HDDM]
for nodes, model_class in itertools.product(group_only_nodes, model_classes):
params = hddm.generate.gen_rand_params(include=nodes)
data, params_true = hddm.generate.gen_rand_data(params, size=10, subjs=4)
model = model_class(data, include=nodes, group_only_nodes=nodes, is_group_model=True)
for node in nodes:
self.assertNotIn(node+'_subj', model.nodes_db.index)
self.assertIn(node, model.nodes_db.index)
def test_HDDM_load_save(self, assert_=False):
include = ['z', 'sz', 'st', 'sv']
dbs = ['pickle', 'sqlite']
model_classes = [hddm.models.HDDMTruncated, hddm.models.HDDM, hddm.models.HDDMRegressor]
params = hddm.generate.gen_rand_params(include=include)
data, params_true = hddm.generate.gen_rand_data(params, size=10, subjs=2)
data = pd.DataFrame(data)
data['cov'] = 1.
for db, model_class in itertools.product(dbs, model_classes):
if model_class is hddm.models.HDDMRegressor:
model = model_class(data, 'v ~ cov', include=include, is_group_model=True)
else:
model = model_class(data, include=include, is_group_model=True)
model.sample(20, dbname='test.db', db=db)
model.save('test.model')
m_load = hddm.load('test.model')
os.remove('test.db')
os.remove('test.model')
def test_HDDMTruncated_distributions(self):
params = hddm.generate.gen_rand_params()
data, params_subj = hddm.generate.gen_rand_data(subjs=4, params=params, size=10)
m = hddm.HDDMTruncated(data)
m.sample(self.iter, burn=self.burn)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'], pm.Normal)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['mu'], pm.Normal)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['tau'], pm.Deterministic)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['tau'].parents['x'], pm.Uniform)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'], pm.TruncatedNormal)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'].parents['mu'], pm.Uniform)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'].parents['tau'], pm.Deterministic)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'].parents['tau'].parents['x'], pm.Uniform)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['t'], pm.TruncatedNormal)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['t'].parents['tau'], pm.Deterministic)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['t'].parents['tau'].parents['x'], pm.Uniform)
def test_HDDM_distributions(self):
params = hddm.generate.gen_rand_params()
data, params_subj = hddm.generate.gen_rand_data(subjs=4, params=params, size=10)
m = hddm.HDDM(data)
m.sample(self.iter, burn=self.burn)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'], pm.Normal)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['mu'], pm.Normal)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['tau'], pm.Deterministic)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['tau'].parents['x'], pm.Uniform)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'], pm.Deterministic)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'].parents['x'], pm.Normal)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'].parents['x'].parents['mu'], pm.Normal)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'].parents['x'].parents['tau'], pm.Deterministic)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'].parents['x'].parents['tau'].parents['x'], pm.Uniform)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['t'], pm.Deterministic)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['t'].parents['x'], pm.Normal)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['t'].parents['x'].parents['mu'], pm.Normal)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['t'].parents['x'].parents['tau'], pm.Deterministic)
assert isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['t'].parents['x'].parents['tau'].parents['x'], pm.Uniform)
def test_HDDMStimCoding(self):
params_full, params = hddm.generate.gen_rand_params(cond_dict={'v': [-1, 1], 'z': [.8, .4]})
data, params_subj = hddm.generate.gen_rand_data(params=params_full, size=10)
m = hddm.HDDMStimCoding(data, stim_col='condition', split_param='v')
m.sample(self.iter, burn=self.burn)
assert isinstance(m.nodes_db.ix['wfpt(c0)']['node'].parents['v'], pm.Normal)
assert isinstance(m.nodes_db.ix['wfpt(c1)']['node'].parents['v'], pm.PyMCObjects.Deterministic)
assert isinstance(m.nodes_db.ix['wfpt(c1)']['node'].parents['v'].parents['self'], pm.Normal)
m = hddm.HDDMStimCoding(data, stim_col='condition', split_param='z')
m.sample(self.iter, burn=self.burn)
assert isinstance(m.nodes_db.ix['wfpt(c0)']['node'].parents['z'], pm.CommonDeterministics.InvLogit)
assert isinstance(m.nodes_db.ix['wfpt(c1)']['node'].parents['z'], pm.PyMCObjects.Deterministic)
assert isinstance(m.nodes_db.ix['wfpt(c1)']['node'].parents['z'].parents['a'], int)
assert isinstance(m.nodes_db.ix['wfpt(c1)']['node'].parents['z'].parents['b'], pm.CommonDeterministics.InvLogit)
class TestHDDMRegressor(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestHDDMRegressor, self).__init__(*args, **kwargs)
self.iter = 40
self.burn = 10
def runTest(self):
return
def test_simple(self):
params = hddm.generate.gen_rand_params()
data, params_true = hddm.generate.gen_rand_data(params, size=10, subjs=4)
data = pd.DataFrame(data)
data['cov'] = 1.
m = hddm.HDDMRegressor(data, 'v ~ cov')
m.sample(self.iter, burn=self.burn)
self.assertTrue(isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][0], pm.Normal))
self.assertEqual(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][0].__name__, 'v_Intercept_subj.0')
self.assertTrue(isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][1], pm.Normal))
self.assertEqual(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][1].__name__, 'v_cov_subj.0')
self.assertEqual(len(np.unique(m.nodes_db.ix['wfpt.0']['node'].parents['v'].value)), 1)
def test_link_func_on_z(self):
params = hddm.generate.gen_rand_params()
data, params_true = hddm.generate.gen_rand_data(params, size=10, subjs=4)
data = pd.DataFrame(data)
data['cov'] = 1.
link_func = lambda x: 1 / (1 + np.exp(-x))
m = hddm.HDDMRegressor(data, {'model': 'z ~ cov', 'link_func': link_func}, include='z')
m.sample(self.iter, burn=self.burn)
self.assertIn('z', m.include)
self.assertIn('z_Intercept', m.nodes_db.knode_name)
self.assertTrue(isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['z'].parents['args'][0], pm.Normal))
self.assertEqual(m.nodes_db.ix['wfpt.0']['node'].parents['z'].parents['args'][0].__name__, 'z_Intercept_subj.0')
self.assertTrue(isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['z'].parents['args'][1], pm.Normal))
self.assertEqual(m.nodes_db.ix['wfpt.0']['node'].parents['z'].parents['args'][1].__name__, 'z_cov_subj.0')
self.assertEqual(len(np.unique(m.nodes_db.ix['wfpt.0']['node'].parents['z'].value)), 1)
self.assertEqual(m.model_descrs[0]['link_func'](2), link_func(2))
def test_no_group(self):
params = hddm.generate.gen_rand_params()
data, params_true = hddm.generate.gen_rand_data(params, size=10, subjs=1)
data['cov'] = 1.
del data['subj_idx']
m = hddm.HDDMRegressor(data, 'v ~ cov', is_group_model=False, depends_on={})
m.sample(self.iter, burn=self.burn)
print m.nodes_db.index
self.assertTrue(isinstance(m.nodes_db.ix['wfpt']['node'].parents['v'].parents['args'][0], pm.Normal))
self.assertEqual(m.nodes_db.ix['wfpt']['node'].parents['v'].parents['args'][0].__name__, 'v_Intercept')
self.assertTrue(isinstance(m.nodes_db.ix['wfpt']['node'].parents['v'].parents['args'][1], pm.Normal))
self.assertEqual(m.nodes_db.ix['wfpt']['node'].parents['v'].parents['args'][1].__name__, 'v_cov')
self.assertEqual(len(np.unique(m.nodes_db.ix['wfpt']['node'].parents['v'].value)), 1)
def test_two_covariates(self):
params = hddm.generate.gen_rand_params()
data, params_true = hddm.generate.gen_rand_data(params, size=10, subjs=4)
data = pd.DataFrame(data)
data['cov1'] = 1.
data['cov2'] = -1
m = hddm.HDDMRegressor(data, 'v ~ cov1 + cov2')
m.sample(self.iter, burn=self.burn)
self.assertTrue(isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][0], pm.Normal))
self.assertEqual(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][0].__name__, 'v_Intercept_subj.0')
self.assertTrue(isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][1], pm.Normal))
self.assertEqual(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][1].__name__, 'v_cov1_subj.0')
self.assertEqual(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][2].__name__, 'v_cov2_subj.0')
self.assertEqual(len(np.unique(m.nodes_db.ix['wfpt.0']['node'].parents['v'].value)), 1)
def test_two_regressors(self):
params = hddm.generate.gen_rand_params()
data, params_true = hddm.generate.gen_rand_data(params, size=10, subjs=4)
data = pd.DataFrame(data)
data['cov1'] = 1.
data['cov2'] = -1
m = hddm.HDDMRegressor(data, ['v ~ cov1', 'a ~ cov2'])
m.sample(self.iter, burn=self.burn)
self.assertTrue(isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][0], pm.Normal))
self.assertTrue(isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'].parents['args'][0], pm.Normal))
self.assertEqual(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][0].__name__, 'v_Intercept_subj.0')
self.assertEqual(m.nodes_db.ix['wfpt.0']['node'].parents['a'].parents['args'][0].__name__, 'a_Intercept_subj.0')
self.assertTrue(isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][1], pm.Normal))
self.assertTrue(isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['a'].parents['args'][1], pm.Normal))
self.assertEqual(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][1].__name__, 'v_cov1_subj.0')
self.assertEqual(m.nodes_db.ix['wfpt.0']['node'].parents['a'].parents['args'][1].__name__, 'a_cov2_subj.0')
def test_group_only(self):
params = hddm.generate.gen_rand_params()
data, params_true = hddm.generate.gen_rand_data(params, size=10, subjs=4)
data = pd.DataFrame(data)
data['cov'] = 1.
m = hddm.HDDMRegressor(data, 'v ~ cov', group_only_regressors=True)
m.sample(self.iter, burn=self.burn)
self.assertTrue(isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][0], pm.Normal))
self.assertEqual(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][0].__name__, 'v_Intercept')
self.assertTrue(isinstance(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][1], pm.Normal))
self.assertEqual(m.nodes_db.ix['wfpt.0']['node'].parents['v'].parents['args'][1].__name__, 'v_cov')
self.assertEqual(len(np.unique(m.nodes_db.ix['wfpt.0']['node'].parents['v'].value)), 1)
def test_group_only_depends(self):
params = hddm.generate.gen_rand_params(cond_dict={'v': [1, 2, 3]})
data, params_true = hddm.generate.gen_rand_data(params[0], size=10, subjs=4)
data = pd.DataFrame(data)
data['cov'] = 1.
# Create one merged column
data['condition2'] = 'merged'
data[data.condition == 'c1']['condition2'] = 'single'
self.assertRaises(AssertionError, hddm.HDDMRegressor, data, 'v ~ cov', depends_on={'v_Intercept': 'condition2'}, group_only_regressors=True)
def test_contrast_coding(self):
params = hddm.generate.gen_rand_params(cond_dict={'v': [1, 2, 3]})
data, params_true = hddm.generate.gen_rand_data(params[0], size=10, subjs=4)
data = pd.DataFrame(data)
data['cov'] = 1.
m = hddm.HDDMRegressor(data, 'v ~ cov * C(condition)',
depends_on={'a': 'condition'})
m.sample(self.iter, burn=self.burn)
self.assertTrue(isinstance(m.nodes_db.ix['wfpt(c1).0']['node'].parents['v'].parents['args'][0], pm.Normal))
self.assertEqual(m.nodes_db.ix['wfpt(c1).0']['node'].parents['v'].parents['args'][0].__name__, 'v_Intercept_subj.0')
self.assertTrue(isinstance(m.nodes_db.ix['wfpt(c1).0']['node'].parents['v'].parents['args'][1], pm.Normal))
self.assertEqual(m.nodes_db.ix['wfpt(c1).0']['node'].parents['v'].parents['args'][1].__name__, 'v_C(condition)[T.c1]_subj.0')
self.assertEqual(len(np.unique(m.nodes_db.ix['wfpt(c1).0']['node'].parents['v'].value)), 1)
def test_posterior_plots_breakdown():
params = hddm.generate.gen_rand_params()
data, params_subj = hddm.generate.gen_rand_data(params=params, subjs=4)
m = hddm.HDDM(data)
m.sample(100, burn=10)
m.plot_posterior_predictive()
m.plot_posterior_quantiles()
m.plot_posteriors()
if __name__=='__main__':
print "Run nosetest.py"
|
StarcoderdataPython
|
9629576
|
<reponame>dw0rdptr/2019_IoT_GoToDouble
#Must be run after arduino starts sending
#delay-proof
import os
import sys
from pathlib import Path
import django
from bluetooth import *
currentPath = Path(os.getcwd())
sys.path.append(str(currentPath.parent.parent))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webview.settings')
django.setup()
client_socket=BluetoothSocket(RFCOMM)
client_socket.connect(("98:D3:71:FD:7C:19",1))
from monitor.models import Fposition, Sposition
def f_save(distance,angle):
position = Fposition.objects.create(angle=angle,distance=distance)
position.save()
dp=0;ap=0
flag = False
msg = ''
while True:
try:
chunk = client_socket.recv(32).decode("utf-8")
except:
continue
for elem in chunk:
if elem == '!':
flag = True
continue
if elem == '#':
msg = msg.strip()
try:
d,a = map(float,msg.split())
except:
d = dp
a = ap
#print(d,a)
f_save(d,a)
dp = d
ap = a
flag = False
msg = ''
if flag:
msg += elem
'''
if msg == b'[':
continue
else :
msg = msg.replace(b'[',b'') #all sorts of exceptions
msg = msg.replace(b'\r',b'')
dic = msg.split(b'\n')
try:
print(float(dic[0]),float(dic[1]))
f_save(float(dic[0]),float(dic[1]))
except:
continue
'''
client_socket.close()
|
StarcoderdataPython
|
5032009
|
<gh_stars>0
from machine import Pin, PWM
from neopixel import NeoPixel
from time import sleep, sleep_ms
class Lamp:
def __init__(self, n_pin, n_leds):
self.LEDS = n_leds
self.LED_MIDDLE = self.LEDS/2
# NEOPIXEL output pin
pin = Pin(n_pin, Pin.OUT, Pin.PULL_UP)
self.np = NeoPixel(pin, LEDS)
for i in range(LEDS):
self.np[i] = (0,0,0)
self.np.write()
self.eyeCorrection = [ 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4,
4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7,
7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11,
11, 12, 12, 12, 13, 13, 13, 14, 14, 15, 15, 15, 16, 16, 17, 17,
17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25,
25, 26, 26, 27, 28, 28, 29, 29, 30, 31, 31, 32, 32, 33, 34, 34,
35, 36, 37, 37, 38, 39, 39, 40, 41, 42, 43, 43, 44, 45, 46, 47,
47, 48, 49, 50, 51, 52, 53, 54, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 70, 71, 72, 73, 74, 75, 76, 77, 79,
80, 81, 82, 83, 85, 86, 87, 88, 90, 91, 92, 94, 95, 96, 98, 99,
100, 102, 103, 105, 106, 108, 109, 110, 112, 113, 115, 116, 118, 120, 121, 123,
124, 126, 128, 129, 131, 132, 134, 136, 138, 139, 141, 143, 145, 146, 148, 150,
152, 154, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181,
183, 185, 187, 189, 191, 193, 196, 198, 200, 202, 204, 207, 209, 211, 214, 216,
218, 220, 223, 225, 228, 230, 232, 235, 237, 240, 242, 245, 247, 250, 252, 255]
self.callbacks = {
"setPixelColor": {"call": self.setPixelColor, "parameters": "nLed, #hexColor", "description": "Set one pixel color (hex format)"},
"setAllPixelsColor": {"call": self.setAllPixelsColor, "parameters": "#hexColor", "description": "Set all pixel colors (hex format)"},
"morphColors": {"call": self.morphColors, "parameters": "#hexColor, delay_ms", "description": "Morph each pixel to target color (hex format). Set delay between steps"},
"fadeinFromMiddle": {"call": self.fadeinFromMiddle, "parameters": "delay_ms, #hexColor=#FFFFFF", "description": "Fade in animation, Set delay between steps, target color is white by default, can be overriden"},
"fadeoutFromMiddle": {"call": self.fadeoutFromMiddle, "parameters": "delay_ms", "description": "Fade out animation"},
"gradientSimple": {"call": self.gradientSimple, "parameters": "#fromHexColor, #toHexColor", "description": "Set gradient from one color (hex format) to another"},
"gradient": {"call": self.gradient, "parameters": "#startHexColor, #targetHexColor, #endHexColor", "description": "Set complex gradient from one color (hex format) to target color to end color. Position of middle target color is expressed in percents"}
}
def setPixelColor(self, arg):
n = arg[0]
hexy = arg[1]
rgb = self.hex_to_rgb(hexy)
self.np[n] = self.eyeCorrectRgb(rgb)
def setAllPixelsColor(self, arg):
hexy = arg[0]
rgb = self.hex_to_rgb(hexy)
t_color = self.eyeCorrectRgb(rgb)
for a in range(self.LEDS):
self.np[a] = t_color
self.np.write()
def morphColors(self, arg):
tC = arg[0]
delay_ms = arg[1]
step = 1
if (len(arg)>2):
step = arg[2]
targetColor = self.hex_to_rgb(tC)
for a in range(0, 101, step):
t = a/100.0
for i in range(self.LEDS):
c = self.lerpColor(self.np[i], targetColor, t)
self.np[i] = self.eyeCorrectRgb(c)
self.np.write()
sleep_ms(delay_ms)
def fadeinFromMiddle(self, arg):
delay_ms = arg[0]
color = "#FFFFFF"
if (len(arg)>1):
color = arg[1]
c = self.hex_to_rgb(color)
cc = self.eyeCorrectRgb(c)
for a in range(self.LED_MIDDLE+1):
self.np[21-a] = cc
self.np[20+a] = cc
self.np.write()
sleep_ms(delay_ms)
def fadeoutFromMiddle(self, arg):
delay_ms = arg[0]
for a in range(self.LED_MIDDLE+1):
self.np[a] = (0, 0, 0)
self.np[(self.LEDS-1)-a] = (0, 0, 0)
self.np.write()
sleep_ms(delay_ms)
def gradientSimple(self, arg):
c1hex = arg[0]
c2hex = arg[1]
c1 = self.hex_to_rgb(c1hex)
c2 = self.hex_to_rgb(c2hex)
for a in range(self.LEDS):
t = (1.0/self.LEDS)*a
color = self.lerpColor(c1,c2,t)
self.np[a] = self.eyeCorrectRgb(color)
self.np.write()
def gradient(self, arg):
startColor = arg[0]
targetColor = arg[1]
endColor = arg[2]
targetPosition = arg[3] # position in %
startC = self.hex_to_rgb(startColor)
targetC = self.hex_to_rgb(targetColor)
endC = self.hex_to_rgb(endColor)
# start to target
ledTargetPos = int((self.LEDS/100.0)*targetPosition)
for a in range(ledTargetPos):
t = (1.0/ledTargetPos)*a
color = self.lerpColor(startC,targetC,t)
self.np[a] = self.eyeCorrectRgb(color)
deltaLed = self.LEDS-ledTargetPos
for a in range(ledTargetPos, self.LEDS):
t = (1.0/deltaLed)*(a-ledTargetPos)
color = self.lerpColor(targetC,endC,t)
self.np[a] = self.eyeCorrectRgb(color)
self.np.write()
# PRIVATE FUNCTIONS
def hex_to_rgb(self, value):
"""Return (red, green, blue) for the color given as #rrggbb."""
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def lerpColor(c1, c2, amt):
"""Return (red, green, blue) for two colors given as tuples and amt between 0.0 - 1.0"""
if (amt < 0.0):
amt = 0.0
if (amt > 1.0):
amt = 1.0
r1 = c1[0]
g1 = c1[1]
b1 = c1[2]
r2 = c2[0]
g2 = c2[1]
b2 = c2[2]
r = int(r1 + (r2-r1)*amt)
g = int(g1 + (g2-g1)*amt)
b = int(b1 + (b2-b1)*amt)
return (r,g,b)
def eyeCorrectRgb(self, rgb_tuple):
r = self.eyeCorrection[rgb_tuple[0]]
g = self.eyeCorrection[rgb_tuple[1]]
b = self.eyeCorrection[rgb_tuple[2]]
return (r,g,b)
|
StarcoderdataPython
|
4841054
|
b, c = map(int, input().split())
if b == 0:
ans = c
elif b > 0:
if c < 3:
ans = c + 1
elif 3 <= c <= 2 * b:
ans = 2 * c - 1
else:
ans = 2 * b + c - 1
else:
b = abs(b)
if c < 3:
ans = c + 1
elif 3 <= c <= 2 * b + 1:
ans = 2 * c - 1
else:
ans = 2 * b + c
print(ans)
|
StarcoderdataPython
|
1934566
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Definition of a basic seq2seq model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pydoc import locate
import tensorflow as tf
from seq2seq.contrib.seq2seq import helper as tf_decode_helper
from seq2seq.models.seq2seq_model import Seq2SeqModel
from seq2seq.graph_utils import templatemethod
from seq2seq.models import bridges
class BasicSeq2Seq(Seq2SeqModel):
"""Basic Sequence2Sequence model with a unidirectional encoder and decoder.
The last encoder state is used to initialize the decoder and thus both
must share the same type of RNN cell.
Args:
source_vocab_info: An instance of `VocabInfo`
for the source vocabulary
target_vocab_info: An instance of `VocabInfo`
for the target vocabulary
params: A dictionary of hyperparameters
"""
def __init__(self, params, mode, name="basic_seq2seq"):
super(BasicSeq2Seq, self).__init__(params, mode, name)
self.encoder_class = locate(self.params["encoder.class"])
self.decoder_class = locate(self.params["decoder.class"])
@staticmethod
def default_params():
params = Seq2SeqModel.default_params().copy()
params.update({
"bridge.class": "seq2seq.models.bridges.InitialStateBridge",
"bridge.params": {},
"encoder.class": "seq2seq.encoders.UnidirectionalRNNEncoder",
"encoder.params": {}, # Arbitrary parameters for the encoder
"decoder.class": "seq2seq.decoders.BasicDecoder",
"decoder.params": {} # Arbitrary parameters for the decoder
})
return params
def _create_bridge(self, encoder_outputs, decoder_state_size):
"""Creates the bridge to be used between encoder and decoder"""
bridge_class = locate(self.params["bridge.class"]) or \
getattr(bridges, self.params["bridge.class"])
return bridge_class(
encoder_outputs=encoder_outputs,
decoder_state_size=decoder_state_size,
params=self.params["bridge.params"],
mode=self.mode)
def _create_decoder(self, _encoder_output, _features, _labels):
"""Creates a decoder instance based on the passed parameters."""
return self.decoder_class(
params=self.params["decoder.params"],
mode=self.mode,
vocab_size=self.target_vocab_info.total_size)
def _decode_train(self, decoder, bridge, _encoder_output, _features, labels):
"""Runs decoding in training mode"""
target_embedded = tf.nn.embedding_lookup(self.target_embedding,
labels["target_ids"])
helper_train = tf_decode_helper.TrainingHelper(
inputs=target_embedded[:, :-1],
sequence_length=labels["target_len"] - 1)
decoder_initial_state = bridge()
return decoder(decoder_initial_state, helper_train)
def _decode_infer(self, decoder, bridge, _encoder_output, features, labels):
"""Runs decoding in inference mode"""
batch_size = self.batch_size(features, labels)
if self.use_beam_search:
batch_size = self.params["inference.beam_search.beam_width"]
target_start_id = self.target_vocab_info.special_vocab.SEQUENCE_START
helper_infer = tf_decode_helper.GreedyEmbeddingHelper(
embedding=self.target_embedding,
start_tokens=tf.fill([batch_size], target_start_id),
end_token=self.target_vocab_info.special_vocab.SEQUENCE_END)
decoder_initial_state = bridge()
return decoder(decoder_initial_state, helper_infer)
@templatemethod("encode")
def encode(self, features, labels):
source_embedded = tf.nn.embedding_lookup(self.source_embedding,
features["source_ids"])
encoder_fn = self.encoder_class(self.params["encoder.params"], self.mode)
return encoder_fn(source_embedded, features["source_len"])
@templatemethod("decode")
def decode(self, encoder_output, features, labels):
decoder = self._create_decoder(encoder_output, features, labels)
if self.use_beam_search:
decoder = self._get_beam_search_decoder(decoder)
bridge = self._create_bridge(
encoder_outputs=encoder_output,
decoder_state_size=decoder.cell.state_size)
if self.mode == tf.contrib.learn.ModeKeys.INFER:
return self._decode_infer(decoder, bridge, encoder_output, features,
labels)
else:
return self._decode_train(decoder, bridge, encoder_output, features,
labels)
|
StarcoderdataPython
|
1600543
|
<filename>sprinkles/commands.py<gh_stars>0
import click
import tomlkit
from ._secrets import get_values
from ._templating import render
@click.command()
@click.option('--template', default=None, help='Template file to use')
@click.option('--target', default=None, help='Target config file')
@click.option('--secret-arn', default=None, help='ARN of the AWS Secrets Manager secret to bind')
@click.option('--config', default='.sprinklesrc', help='sprinkles TOML config file')
def generate_config(template, target, secret_arn, config):
if template is None:
with open(config, 'r') as config_file:
sprinkles_config = tomlkit.loads(config_file.read())
_secret_arn = secret_arn or sprinkles_config['secret']['arn']
values = get_values(_secret_arn)
for section, files in sprinkles_config['files'].items():
template_path = files['template']
target_path = files['target']
click.echo(
"Processing [{section}]: {template} -> {target}".format(
section=section,
template=template_path,
target=target_path
)
)
render(template_path, values, target_path)
elif secret_arn is not None and template is not None:
values = get_values(secret_arn)
render(template, values, target)
if __name__ == '__main__':
generate_config()
|
StarcoderdataPython
|
4950790
|
<filename>browsers/opera.py
import multiprocessing
import time
import os
from selenium.webdriver import DesiredCapabilities
from misc.browser import Driver, open_browser
from selenium import webdriver
from selenium.webdriver.chrome import service
from selenium.webdriver.chrome.options import Options
from browsers.screenshot import screenshot_website, kill_browser
from misc.setup_logger import logger
CURRENT_DIR = os.getcwd()
class OperaDriver(Driver):
"""
Class that represents Opera Driver
"""
def __init__(self, path, version, capabilities, old=False):
super().__init__(path, version, capabilities)
self.old = old
def set_driver_path(self):
"""
Setting Opera driver path.
:return: None
"""
logger.info("Preparing driver path.")
self.path = CURRENT_DIR + "\\drivers\\operadrivers\\operadriver-" + self.version + "\\operadriver.exe"
logger.info("Driver path set.")
def set_driver_version(self, browser_version):
"""
Returns the folder name for operadrivers of the given version.
:param browser_version: Browser version
:return: Driver version
"""
logger.info("Getting operadriver version.")
if browser_version >= 63:
self.version = "76"
if browser_version == 62:
self.version = "2.41"
if 58 < browser_version < 62:
self.version = "2.45"
if 56 < browser_version <= 58:
self.version = "2.36"
if browser_version == 56:
self.version = "2.40"
if browser_version == 55:
self.version = "2.38"
if browser_version == 54:
self.version = "2.37"
if browser_version == 53:
self.version = "2.36"
if 50 < browser_version <= 52:
self.version = "2.35"
if browser_version == 50:
self.version = "2.33"
if browser_version == 49:
self.version = "2.33"
if browser_version == 48:
self.version = "2.30"
if browser_version == 47:
self.version = "2.30"
if 42 < browser_version <= 46:
self.version = "2.29"
if 40 < browser_version <= 42:
self.version = "2.27"
if 26 < browser_version <= 40:
self.version = "0.2.2"
if browser_version == 26:
self.version = "0.2.0"
if browser_version <= 25:
self.version = "0.1.0"
logger.info("Operadriver version - {}".format(self.version))
def set_capabilities(self, browser):
"""
Setting capabilities for Opera.
:return: Capabilities
"""
logger.info("Setting capabilities.")
opts = Options()
if not self.old:
# In older version these switches do not work, but alerts are there by default.
opts.add_experimental_option("excludeSwitches", ["ignore-certificate-errors", "ignore-ssl-errors"])
capabilities = DesiredCapabilities.OPERA
capabilities.update(opts.to_capabilities())
capabilities["acceptInsecureCerts"] = False
capabilities["acceptSslCerts"] = False
capabilities["operaOptions"] = {"binary": "C:\\Program Files\\Opera\\" + browser.version + "\\opera.exe"}
logger.info("Capabilities are set.")
self.capabilities = capabilities
def set_opera_driver_oldness(self):
"""
Checking if Opera driver version is lower than 2.40. If yes, sets the old value to True, False otherwise
:return: None
"""
logger.info("Checking if the Opera version is lower than 2.40")
if self.version == "0.2.2":
self.version = "0.2"
if self.version == "0.2.0":
self.version = "0.2"
if self.version == "0.1.0":
self.version = "0.1"
if float(self.version) < 2.40:
logger.info("Opera version is using old driver. - True")
self.old = True
def create_opera_driver(self):
"""
Setting Opera driver to be able to open URL.
:return: WebDriver
"""
logger.info("Preparing driver.")
webdriver_service = service.Service(self.path)
webdriver_service.start()
driver = webdriver.Remote(webdriver_service.service_url, self.capabilities)
driver.maximize_window()
logger.info("Driver is set.")
return driver
def opera(browser):
"""
Opens Opera and makes a screenshot of the desired website.
:param browser: Browser
:return: None
"""
browser.set_short_browser_version()
opera_driver = prepare_driver(browser)
driver = opera_driver.create_opera_driver()
time.sleep(2)
try:
open_opera(driver, browser, old_driver=opera_driver.old)
except Exception as e:
logger.error("Exception in opera(): - {}".format(e))
finally:
driver.quit()
kill_browser()
def open_opera(driver, browser, old_driver=False):
"""
Run screenshot in different thread.
:param driver: Driver
:param browser: Browser
:param old_driver: True if opera runs old driver, False otherwise
:return: None
"""
try:
if old_driver:
logger.info("Starting timeout_and_screenshot.")
timeout_and_screenshot(driver, browser)
else:
open_browser(driver, browser.url)
screenshot_website(driver, browser)
except Exception as e:
logger.error("Error in open_opera: {}".format(e))
def timeout_and_screenshot(driver, browser):
"""
Opens the url in different thread so that it is not waiting until the page is loaded.
:param driver: Driver
:param browser: Browser
:return: None
"""
try:
p1 = multiprocessing.Process(name="p1", target=open_browser, args=(driver, browser.url))
logger.info("Starting process for open_browser.")
p1.start()
p1.join(3)
logger.info("Going to take screenshot from timeout_and_screenshot.")
if opera:
screenshot_website(driver, browser, True, False)
except Exception as e:
logger.error("Exception in multiprocessing: {}".format(e))
finally:
logger.info("Checking if thread is active")
terminate_thread(p1)
def terminate_thread(thread):
"""
Terminate the thread.
:param thread: Thread to be terminated
:return: None
"""
if thread.is_alive():
logger.info("Terminating the thread")
thread.terminate()
thread.join()
def prepare_driver(browser):
"""
Preparing Operadriver to run Opera via Selenium
:param browser: Browser object
:return: Driver object ready to be used
"""
driver = OperaDriver("", 0, None)
driver.set_driver_version(browser.short_version)
driver.set_driver_path()
driver.set_opera_driver_oldness()
driver.set_capabilities(browser)
return driver
|
StarcoderdataPython
|
9728167
|
<gh_stars>0
#!/usr/bin/env python3
import fileinput
import re
import typing
def parse_records() -> list[dict[str, str]]:
ret = []
ret_single = {}
for line in fileinput.input():
if len(line.strip()) == 0:
if len(ret_single) > 0:
ret.append(ret_single)
ret_single = {}
else:
for w in line.split():
k, v = w.split(':')
ret_single[k] = v
if len(ret_single) > 0:
ret.append(ret_single)
return ret
def valid_a(record: dict[str, str]) -> bool:
return record.keys() >= {"byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"}
def valid_b(record: dict[str, str]) -> bool:
return (bool(re.match('^(19[2-9][0-9]|200[0-2])$', record.get("byr", "")))
and bool(re.match('^20(20|1[0-9])$', record.get("iyr", "")))
and bool(re.match('^20(30|2[0-9])$', record.get("eyr", "")))
and bool(re.match('^(1[5-8][0-9]cm|19[0-3]cm|59in|6[0-9]in|7[0-6]in)$', record.get("hgt", "")))
and bool(re.match('^#[0-9a-f]{6}$', record.get("hcl", "")))
and bool(re.match('^(amb|blu|brn|gry|grn|hzl|oth)$', record.get("ecl", "")))
and bool(re.match('^[0-9]{9}$', record.get("pid", ""))))
if __name__ == '__main__':
records = parse_records()
print(f"Part a: {sum((valid_a(r) for r in records))}")
print(f"Part b: {sum((valid_b(r) for r in records))}")
|
StarcoderdataPython
|
11257076
|
<filename>src/Model/Config/freeconf_config.py
#!/usr/bin/python3
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
class Config(object):
"""Config model object. Store information of Freeconf at all.
Information about all packages and lang preferences.
"""
def __init__(self):
self._config_file = "freeconf.xml"
self._packages = {}
self._lang = None
@property
def packages(self):
""" packages getter
:return: packages dictionary
"""
return self._packages
def package(self, name):
""" package getter
:param name: name of package
:return: configuration of package with given name or None if package
with given name not found
"""
return self._packages[name]
@property
def config_file(self):
""" config_file getter
:return: path of config file
"""
return self._config_file
@property
def lang(self):
""" lang getter
:return: lang code
"""
return self._lang
@lang.setter
def lang(self, lang):
"""lang setter
:param lang: lang code
"""
self._lang = lang
|
StarcoderdataPython
|
195056
|
import time
from org.white5moke.blockchain.Block import Block
from org.white5moke.blockchain.Blockchain import Blockchain
from org.white5moke.blockchain.Wallet import Wallet
def load_test_blocks(blockchain):
b = Block(blockchain.last_block().index + 1, 'value is soul', int(time.time() * 1000), blockchain.last_block().hash)
blockchain.add_block(b)
b1 = Block(
blockchain.last_block().index + 1,
'b00psi3',
int(time.time() * 1000),
blockchain.last_block().hash
)
blockchain.add_block(b1)
b2 = Block(
blockchain.last_block().index + 1,
'tremble like a FLOWAH!',
int(time.time() * 1000),
blockchain.last_block().hash
)
blockchain.add_block(b2)
def start_up():
wallet = Wallet()
blockchain = Blockchain()
# load_test_blocks(blockchain)
print(blockchain)
# node = Node()
if __name__ == '__main__':
start_up()
|
StarcoderdataPython
|
12807330
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from sftpcloudfs.constants import version, project_url
def readme():
try:
return open('README.md').read()
except:
return ""
setup(name='sftp-cloudfs',
version=version,
description='SFTP interface to OpenStack Object Storage (Swift)',
long_description=readme(),
author='<NAME>',
author_email='<EMAIL>',
url=project_url,
license='MIT',
include_package_data=True,
zip_safe=False,
install_requires=['paramiko>=1.7.6', 'python-swiftclient>=2.0.0', 'python-daemon>=1.5',
'python-memcached>=1.45', 'ftp-cloudfs>=0.30'],
scripts=['bin/sftpcloudfs'],
packages = find_packages(exclude=['tests']),
tests_require = ["nose"],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Environment :: No Input/Output (Daemon)',
'License :: OSI Approved :: MIT License',
],
test_suite = "nose.collector",
)
|
StarcoderdataPython
|
9630597
|
<gh_stars>0
"""
Class - Object - Functions\n
1. Class\n
2. Functions\n
# Python Classes/Objects\n
`ds.chunk_5.python_class`\n
Python is an object oriented programming language.\n
Almost everything in Python is an object, with its properties and methods.\n
A Class is like an object constructor, or a "blueprint" for creating objects.\n
# Python Functions\n
`ds.chunk_5.function`\n
A function is a block of code which only runs when it is called.\n
You can pass data, known as parameters, into a function.\n
A function can return data as a result.\n
"""
|
StarcoderdataPython
|
5020793
|
import json
class Config:
"""
Reads a JSON file and stores its data. Specific values can be retreived via
value paths that are structured like absolute unix file system paths, e.g.:
/client/port
"""
def __init__(self):
self._values = {}
def _load_values(self, data: any, parent_path=""):
"""
Load config values from json recursively, make them
addressable like unix files
"""
if type(data) is dict:
for key in data.keys():
self._load_values(data[key], parent_path + "/" + str(key))
else:
self._values[parent_path] = data
def get(self, value_path:str, default: any=None) -> any:
"""
Returns the value for given key, or default value if it does not exist
"""
return self._values.get(value_path, default)
def _load_from_file(self, filename):
"""
Loads values from given config file
"""
try:
with open(filename, "r") as config_file:
data = json.loads(config_file.read())
self._load_values(data)
except IOError as err:
print(err)
# Global config file object
CONFIG = Config()
CONFIG._load_from_file("config.json")
# Get additional config files from the main config file and load their values.
# If these additional files contain the same keys as the main file, the main
# files values will be overwritten.
config_files = CONFIG.get("/configFiles")
if type(config_files) is list:
for config_file in config_files:
CONFIG._load_from_file(config_file)
del(config_files)
|
StarcoderdataPython
|
5093725
|
<filename>app/streaming.py<gh_stars>1-10
import queue
import socket
import threading
from .broadcaster import Broadcaster
# try:
# from .SimpleWebSocketServer.SimpleWebSocketServer import WebSocket
# except ImportError as e:
# print(("Failed to import dependency: {}".format(e)))
# print("Please ensure the SimpleWebSocketServer submodule has been correctly installed: git submodule update --init")
# sys.exit(1)
from SimpleWebSocketServer import WebSocket
class StreamingClient(object):
def __init__(self):
self.streamBuffer: bytes = b""
self.streamQueue = queue.Queue()
self.streamThread = threading.Thread(target = self.stream)
self.streamThread.daemon = True
self.connected = True
self.kill = False
super(StreamingClient, self).__init__()
def start(self):
self.streamThread.start()
def transmit(self, data):
return len(data)
def stop(self):
pass
def bufferStreamData(self, data):
#use a thread-safe queue to ensure stream buffer is not modified while we're sending it
self.streamQueue.put(data)
def stream(self):
while self.connected:
#this call blocks if there's no data in the queue, avoiding the need for busy-waiting
self.streamBuffer += self.streamQueue.get()
#check if kill or connected state has changed after being blocked
if (self.kill or not self.connected):
self.stop()
return
while (len(self.streamBuffer) > 0):
streamedTo = self.transmit(self.streamBuffer)
if (streamedTo and streamedTo >= 0):
self.streamBuffer = self.streamBuffer[streamedTo:]
else:
self.streamBuffer = b""
class TCPStreamingClient(StreamingClient):
def __init__(self, sock):
super(TCPStreamingClient, self).__init__()
self.sock = sock
self.sock.settimeout(5)
def stop(self):
self.sock.close()
def transmit(self, data):
try:
return self.sock.send(data)
except socket.error as e:
self.connected = False
self.sock.close()
class WebSocketStreamingClient(WebSocket, StreamingClient):
def __init__(self, *args, **kwargs):
super(WebSocketStreamingClient, self).__init__(*args, **kwargs)
def stop(self):
pass
def transmit(self, data):
self.sendMessage("data:image/jpg;base64," + data)
return len(data)
def handleConnected(self):
self.start()
Broadcaster._instance.webSocketClients.append(self)
def handleClose(self):
self.connected = False
|
StarcoderdataPython
|
9679912
|
<filename>layer_loader/flatten.py
from typing import Type
from .types import Layer, LayerElement, Path
class TypeMismatchError(ValueError):
def __init__(
self,
path: Path,
upper_type: Type[object],
lower_type: Type[object],
) -> None:
super().__init__(
"Entry type mismatch at {!r}, got types {} and {}".format(
'.'.join(path),
upper_type.__name__,
lower_type.__name__,
),
)
self.path = path
self.upper_type = upper_type
self.lower_type = lower_type
def validate_types(
path: Path,
upper_value: LayerElement,
lower_value: LayerElement,
) -> None:
upper_type = type(upper_value)
lower_type = type(lower_value)
if upper_type is not lower_type:
raise TypeMismatchError(path, upper_type, lower_type)
def flatten_pair(upper: Layer, lower: Layer, current_path: Path) -> None:
for key, value in lower.items():
key_path = current_path + [key]
upper_value = upper.setdefault(key, value)
if upper_value is value:
# optimisation to prevent recursing into nested mappings where
# nothing will change
continue
if None in (upper_value, value):
# explicit nulls are allowed in order to clear lower values
continue
validate_types(key_path, upper_value, value)
if isinstance(value, dict):
assert isinstance(upper_value, dict)
flatten_pair(upper_value, value, key_path)
def flatten(top_layer: Layer, *layers: Layer) -> Layer:
layers = tuple(reversed([top_layer, *layers]))
for upper, lower in zip(layers[1:], layers):
flatten_pair(upper, lower, [])
return top_layer
|
StarcoderdataPython
|
348726
|
from typing import Dict
from typing import List
def prettify_news_links(links: List[Dict[str, str]]) -> str:
"""tries to convert the news links form the dict to a
single large text
"""
link_string = "" # the message to be send
for link_dict in links:
for title, link in link_dict.items():
link_string += f"{title} \n{link}"
if len(link_string) > 3995:
return link_string
link_string += "\n\n"
return link_string
|
StarcoderdataPython
|
1860169
|
<gh_stars>0
from base64 import b64encode
import traceback
import sys
import boto3
import datetime
def write(*args, stream=sys.stdout):
for a in args:
if isinstance(a, Exception):
traceback.print_exception(type(a), a, a.__traceback__, file=stream)
stream.flush()
else:
print(a, file=stream, flush=True)
def info(*args):
write(*args, stream=sys.stdout)
def error(*args):
write(*args, stream=sys.stderr)
def fatal(*args):
error(*args)
sys.exit(1)
def metric(metric, namespace, dimensions, value):
client = boto3.client('cloudwatch', 'us-west-2')
client.put_metric_data(
Namespace=namespace,
MetricData=[{
'MetricName': metric,
'Dimensions': dimensions,
'Timestamp': datetime.datetime.utcnow(),
'Value': value
}]
)
def metadata_fill(metadata, status, rows=0, e=None):
exception = ''.join(traceback.format_exception(type(e), e, e.__traceback__)) if e else None
metadata['END_TIME'] = datetime.datetime.utcnow()
metadata['RUN_TIME'] = metadata['END_TIME'] - metadata['START_TIME']
metadata['ROWS'] = rows
metadata['STATUS'] = status
metadata['EXCEPTION'] = b64encode(exception.encode()).decode() if e else None
metadata['START_TIME'] = str(metadata['START_TIME']) # This is mildly gross, but we record them as
metadata['END_TIME'] = str(metadata['END_TIME']) # datetime objects so we can do math on them, then
metadata['RUN_TIME'] = str(metadata['RUN_TIME']) # convert to string so we can json serialize.
|
StarcoderdataPython
|
1604845
|
<gh_stars>1-10
# pytest requires at least one test case to run
def test_placeholder():
pass
|
StarcoderdataPython
|
129710
|
import os
from pydantic import BaseSettings
class Settings(BaseSettings):
class Config:
env_file = ".env"
app_name: str = "FastAPI Demo"
admin_email: str = "<EMAIL>"
secret_key: str = os.getenv("SECRET_KEY")
hash_algo: str = os.getenv("HASH_ALGO", "HS256")
access_token_expiration: int = os.getenv("ACCESS_TOKEN_EXPIRATION", 86400)
settings = Settings()
|
StarcoderdataPython
|
8163745
|
import copy
import os
from importlib.util import find_spec
def load_local_settings(settings, module_name):
"""
Load local settings from `module_name`.
Search for a `local_settings` module, load its code and execute it in the
`settings` dict. All of the settings declared in the sertings dict are thus available
to the local_settings module. The settings dict is updated.
"""
local_settings_spec = find_spec(module_name)
if local_settings_spec:
local_settings_code = local_settings_spec.loader.get_code(module_name)
exec(local_settings_code, settings)
def load_secret_key(settings):
"""
Load a secret key from `.django_secret` if one is not already set.
:param settings: Settings dict
:type settings: dict
"""
if settings.get("SECRET_KEY"):
return
secret_file = os.path.join(settings.get("BASE_DIR"), '.django_secret')
if os.path.isfile(secret_file):
with open(secret_file) as secret:
settings["SECRET_KEY"] = secret.read().strip()
return
from django.utils.crypto import get_random_string
try:
settings["SECRET_KEY"] = secret_key = get_random_string(64)
with open(secret_file, 'w') as secret:
os.chmod(secret_file, 0o0600)
secret.write(secret_key)
secret.close()
print("Secret key file %s generated." % secret_file)
except IOError:
raise Exception(
'Please create a %s file with random characters to generate your secret key!' % secret_file
)
def get_settings(settings_module):
"""
Get a copy of the settings (upper-cased variables) declared in the given settings module.
:param settings_module: A settings module
:type settings_module: module
:return: Dict of settings
:rtype: dict[str, object]
"""
return copy.deepcopy({k: v for (k, v) in vars(settings_module).items() if k.isupper()})
|
StarcoderdataPython
|
6652583
|
<reponame>venkat-marina/git-branch-comparator<filename>compare-branches.py
#!/usr/bin/python -u
# This script checks, if 'development' branch has all changes from 'master' branch on Git
# 'master' can have some changes, which 'development' does not have in case of hot fixes on 'master'
# Usage: compare-branches.py <path_to_your_git_repository>
# This script can be executed in a jenkins job from command line
import os, sys
from subprocess import Popen, PIPE
from time import sleep
def git_log():
GIT_COMMIT_FIELDS = ['id', 'author_name', 'author_email', 'date', 'message']
GIT_LOG_FORMAT = ['%H', '%an', '%ae', '%ad', '%s']
GIT_LOG_FORMAT = '%x1f'.join(GIT_LOG_FORMAT) + '%x1e'
p = Popen('git log --format="%s"' % GIT_LOG_FORMAT, shell=True, stdout=PIPE)
(log, _) = p.communicate()
log = log.strip('\n\x1e').split("\x1e")
log = [row.strip().split("\x1f") for row in log]
log = [dict(zip(GIT_COMMIT_FIELDS, row)) for row in log]
return log
def git_checkout(branch_name):
Popen('git checkout "%s"' % branch_name, shell=True, stdout=PIPE)
def commits_are_same(commit_one, commit_two):
same_email = commit_one['author_email'] == commit_two['author_email']
same_date = commit_one['date'] == commit_two['date']
same_message = commit_one['message'] == commit_two['message']
return same_email and same_date and same_message
def development_has_changes_from_master():
# we need to add sleeps in this method
# to read data from git correctly
print 'Switching to \'master\'...'
git_checkout('master')
print 'Reading git log...'
sleep(2)
git_log_master = git_log()
print 'Switching to \'development\'...'
sleep(2)
git_checkout('development')
print 'Reading git log...'
sleep(2)
git_log_development = git_log()
commit_found_array = []
for commit_master in git_log_master:
# if all elements in the array are False, commit wasn't found
commit_not_found = not True in commit_found_array
if(commit_found_array and commit_not_found):
print "ERROR: \'master\' needs to be merged into \'development\'"
return False
for commit_development in git_log_development:
commit_found = commits_are_same(commit_master, commit_development)
commit_found_array.append(commit_found)
return True
def fail_jenkins_job():
print 'Failing job...'
sys.exit(-1) # when script will exit with non-zero status, jenkins job should fail
def compare_branches_in_repository():
if len(sys.argv) == 1:
print 'ERROR: path to repository is not defined'
return
path_to_repository = sys.argv[1]
os.chdir(path_to_repository)
if development_has_changes_from_master():
print 'SUCCESS: \'development\' has all changes from \'master\''
else:
fail_jenkins_job()
compare_branches_in_repository()
|
StarcoderdataPython
|
1847830
|
import subprocess
from os.path import dirname, exists
import yaml
import logging
import argparse
from instances_connection import (logger,
ReplaceCertificatesConfig,
ReplaceCertificatesError)
def get_dict_from_yaml(yaml_path):
with open(yaml_path) as f:
yaml_dict = yaml.load(f, yaml.Loader)
return yaml_dict
def raise_errors_list(errors_list):
logger.info(_errors_list_str(errors_list))
raise ReplaceCertificatesError()
def _errors_list_str(errors_list):
err_str = 'Errors:\n'
err_lst = '\n'.join([' [{0}] {1}'.format(i + 1, err) for i, err
in enumerate(errors_list)])
return err_str + err_lst
def validate_config_dict(config_dict, all_in_one):
errors_list = []
_assert_username_and_key_file_path(errors_list, config_dict)
if all_in_one:
validate_all_in_one_config_dict(errors_list, config_dict)
else:
_validate_instances(errors_list, config_dict)
_check_path(errors_list, config_dict['manager']['new_ldap_ca_cert'])
if errors_list:
raise_errors_list(errors_list)
def validate_all_in_one_config_dict(errors_list, config_dict):
manager_section = config_dict['manager']
_validate_manager_node_cert_and_key(errors_list, manager_section)
err_msg = 'A {0} was specified for manager but a {1} was not specified'
if (manager_section.get('new_ca_cert') and
(not manager_section.get('new_internal_cert'))):
errors_list.append(err_msg.format('new_ca_cert', 'new_internal_cert'))
if (manager_section.get('new_external_ca_cert') and
(not manager_section.get('new_external_cert'))):
errors_list.append(err_msg.format('new_external_ca_cert',
'new_external_cert'))
if (manager_section.get('new_ca_cert') and
(not manager_section.get('new_postgresql_client_cert'))):
errors_list.append(err_msg.format('new_ca_cert',
'new_postgresql_client_cert'))
postgresql_section = config_dict['postgresql_server']
_validate_node_certs(errors_list, postgresql_section,
'new_cert', 'new_key')
if (postgresql_section.get('new_ca_cert') and
(not postgresql_section.get('new_cert'))):
errors_list.append('A new_ca_cert was specified for postgresql_server'
'but a new_cert was not specified')
rabbitmq_section = config_dict['rabbitmq']
_validate_node_certs(errors_list, rabbitmq_section,
'new_cert', 'new_key')
if (manager_section.get('new_ca_cert') and
(not rabbitmq_section.get('new_cert'))):
errors_list.append('A new_ca_cert was specified for manager'
'but a new_cert was not specified for rabbitmq')
def _assert_username_and_key_file_path(errors_list, config_dict):
if ((not config_dict.get('username')) or
(not config_dict.get('key_file_path'))):
errors_list.append('Please provide the username and key_file_path')
if not exists(config_dict.get('key_file_path')):
errors_list.append('The key_file_path does not exist')
def _validate_instances(errors_list, config_dict):
for instance in 'postgresql_server', 'rabbitmq':
_validate_cert_and_key(errors_list,
config_dict[instance]['cluster_members'])
_validate_new_ca_cert(errors_list, config_dict, instance)
_validate_manager_cert_and_key(errors_list,
config_dict['manager']['cluster_members'])
_validate_new_manager_ca_certs(errors_list, config_dict)
def _validate_new_ca_cert(errors_list, config_dict, instance_name):
_validate_ca_cert(errors_list, config_dict[instance_name], instance_name,
'new_ca_cert', 'new_cert',
config_dict[instance_name]['cluster_members'])
def _validate_new_manager_ca_certs(errors_list, config_dict):
_validate_ca_cert(errors_list, config_dict['manager'], 'manager',
'new_ca_cert', 'new_internal_cert',
config_dict['manager']['cluster_members'])
_validate_ca_cert(errors_list, config_dict['manager'],
'manager', 'new_external_ca_cert',
'new_external_cert',
config_dict['manager']['cluster_members'])
_validate_ca_cert(errors_list, config_dict['postgresql_server'],
'postgresql_server', 'new_ca_cert',
'new_postgresql_client_cert',
config_dict['manager']['cluster_members'])
def _validate_ca_cert(errors_list, instance, instance_name, new_ca_cert_name,
cert_name, cluster_members):
"""Validates the CA cert.
Validates that the CA path is valid, and if it is, then a new cert was
specified for all cluster members.
"""
err_msg = '{0} was specified for instance {1}, but {2} was not specified' \
' for all cluster members.'.format(new_ca_cert_name,
instance_name,
cert_name)
new_ca_cert_path = instance.get(new_ca_cert_name)
if _check_path(errors_list, new_ca_cert_path):
if not all(member.get(cert_name) for member in cluster_members):
errors_list.append(err_msg)
def _validate_cert_and_key(errors_list, nodes):
for node in nodes:
_validate_node_certs(errors_list, node, 'new_cert', 'new_key')
def _validate_manager_cert_and_key(errors_list, nodes):
for node in nodes:
_validate_manager_node_cert_and_key(errors_list, node)
def _validate_manager_node_cert_and_key(errors_list, node):
_validate_node_certs(errors_list, node,
'new_internal_cert',
'new_internal_key')
_validate_node_certs(errors_list, node,
'new_external_cert',
'new_external_key')
_validate_node_certs(errors_list, node,
'new_postgresql_client_cert',
'new_postgresql_client_key')
def _validate_node_certs(errors_list, certs_dict, new_cert_name, new_key_name):
new_cert_path = certs_dict.get(new_cert_name)
new_key_path = certs_dict.get(new_key_name)
if bool(new_key_path) != bool(new_cert_path):
errors_list.append('Either both {0} and {1} must be '
'provided, or neither for host '
'{2}'.format(new_cert_name, new_key_name,
certs_dict['host_ip']))
_check_path(errors_list, new_cert_path)
_check_path(errors_list, new_key_path)
def _check_path(errors_list, path):
if path:
if exists(path):
return True
errors_list.append('The path {0} does not exist'.format(path))
return False
def is_all_in_one():
cluster_status = subprocess.check_output(['cfy', 'cluster', 'status'])
if cluster_status[:3] == 'You':
return True
return False
def parse_command():
parser = argparse.ArgumentParser(description='Replacing certificates on '
'a cluster')
parser.add_argument('--config-path', action='store', type=str,
help='The replace_certificates_config.yaml file path')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
dest='verbose')
return parser.parse_args()
def main():
parse_args = parse_command()
verbose = parse_args.verbose
if verbose:
logger.setLevel(logging.DEBUG)
all_in_one = is_all_in_one()
logger.info('Replacing certificates on %s',
'an AIO manager' if all_in_one else 'a cluster')
if parse_args.config_path:
config_path = parse_args.config_path
else:
if all_in_one:
config_path = '{0}/aio_replace_certificates_config.yaml'.format(
dirname(__file__))
else:
config_path = '{0}/cluster_replace_certificates_config' \
'.yaml'.format(dirname(__file__))
config_dict = get_dict_from_yaml(config_path)
validate_config_dict(config_dict, all_in_one)
main_config = ReplaceCertificatesConfig(config_dict, all_in_one, verbose)
main_config.validate_certificates()
main_config.replace_certificates()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
8016810
|
from datetimewidget.widgets import DateWidget
from django import forms
class TritonLinkLoginForm(forms.Form):
username = forms.CharField(required=True, widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'User ID / PID',
'id': 'user',
'name': 'Username'
}))
password = forms.CharField(required=True, widget=forms.PasswordInput(attrs={
'class': 'form-control',
'placeholder': 'Password / PAC',
'id': 'password',
'name': 'Password'
}))
start_date_of_quarter = forms.DateField(widget=DateWidget(usel10n=False, bootstrap_version=3, options={
'clearBtn': False,
'format': 'mm/dd/yyyy',
}))
|
StarcoderdataPython
|
4985617
|
<filename>backend/app.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Flask."""
from flask import Flask, jsonify, request
from flask_cors import CORS
from models.daikin import Daikin
from models.gpio import Gpio
app = Flask(__name__)
CORS(app)
daikin = Daikin()
gpio = Gpio()
@app.route('/')
def hello():
"""デフォルトページ."""
name = "Let's connect with RESTFUL API"
return name
@app.route('/v1/daikin')
def get_daikin_data():
"""ダイキンデータAPI.
query:dict
from: int
timestamp
to: int
timestamp
period: string
minute, hour, day, week
"""
params = {
'from': request.args.get('from'),
'to': request.args.get('to'),
'period': request.args.get('period'),
'limit': request.args.get('limit'),
}
res = daikin.get_data(params)
return jsonify(res)
@app.route('/v1/gpio')
def get_gpio_data():
"""GPIOデータAPI.
query:dict
from: int
timestamp
to: int
timestamp
period: string
minute, hour, day, week
"""
params = {
'from': request.args.get('from'),
'to': request.args.get('to'),
'period': request.args.get('period'),
'limit': request.args.get('limit'),
}
res = gpio.get_sensors_data(params)
return jsonify(res)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
|
StarcoderdataPython
|
146300
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import enum
import re
__all__ = (
"NodeType",
"NodeToken",
"NODE_PATTERN",
"JOIN_TOKENS",
)
class NodeType(str, enum.Enum):
"""An enumeration of the different types of nodes in a script."""
ACT = "act"
SCENE = "scene"
PROL = "prologue"
EPIL = "epilogue"
INTER = "intermission"
PERS = "persona"
ENTER = "entrance"
EXIT = "exit"
ACTION = "action"
DIR = "direction"
DIAL = "dialogue"
SPCH = "speech"
TREE = "tree"
PLAY = "play"
META = "meta"
class NodeToken(str, enum.Enum):
"""An enumeration of special 'tokens', e.g., strings of characters."""
JOIN1 = "…"
JOIN2 = "..."
JOIN = "/"
META1 = "---"
META2 = "..."
JOIN_TOKENS = (NodeToken.JOIN, NodeToken.JOIN1, NodeToken.JOIN2)
NODE_PATTERN = re.compile(
r"""
(
# Locales: Act, Scene, Prologue, Epilogue, Intermission
(
^\#+\s
(
(?P<act>(ACT)\s([IVX]+|\d+)) |
(?P<scene>SCENE\s([IVX]+|\d+)) |
(?P<prologue>PROLOGUE) |
(?P<epilogue>EPILOGUE) |
(?P<intermission>INTERMISSION) |
(?P<title>(.*))
)
(?:\.)?\s?(?P<setting>.*)
$
) |
# Persona
[*_]{2}(?P<persona>(
([A-Z][a-zA-Z'’]*\W{0,2}([a-z]+)?\s?
)+([A-Z]|\d)*))[*_]{2} |
# Enter/Exit/Action/Direction
(
(?P<start>^[_*]\\?\[?)?
(
(
(?P<entrance>(enter)((?![_*\[\]]).)*)
|
(?P<exit>(exeunt|exit)((?![_*\[\]]).)*)
)
|
(?P<direction>((?![_*\[\]]).)+)
)
(?P<end>\]?[_*])?\s{0,2}
) |
# Dialogue (catch-all)
(?P<dialogue>(^.+))
)
""",
re.VERBOSE | re.IGNORECASE | re.MULTILINE,
)
|
StarcoderdataPython
|
217893
|
<reponame>rendinam/crds
"""uses.py defines functions which will list the files which use a given
reference or mapping file.
>> from pprint import pprint as pp
>> pp(_findall_mappings_using_reference("v2e20129l_flat.fits"))
['hst.pmap',
'hst_0001.pmap',
'hst_0002.pmap',
'hst_0003.pmap',
'hst_0004.pmap',
'hst_0005.pmap',
'hst_0006.pmap',
'hst_cos.imap',
'hst_cos_0001.imap',
'hst_cos_flatfile.rmap',
'hst_cos_flatfile_0002.rmap']
"""
import sys
import os.path
from crds.core import config, cmdline, utils, log, rmap
@utils.cached
def load_all_mappings(observatory, pattern="*map"):
"""Return a dictionary mapping the names of all CRDS Mappings matching `pattern`
onto the loaded Mapping object.
"""
all_mappings = rmap.list_mappings(pattern, observatory)
loaded = {}
for name in all_mappings:
with log.error_on_exception("Failed loading", repr(name)):
loaded[name] = rmap.get_cached_mapping(name)
return loaded
@utils.cached
def mapping_type_names(observatory, ending):
"""Return a mapping dictionary containing only mappings with names with `ending`."""
return { name : mapping.mapping_names() + mapping.reference_names()
for (name, mapping) in load_all_mappings(observatory).items()
if name.endswith(ending) }
def uses_files(files, observatory, ending):
"""Alternate approach to uses that works by loading all mappings instead of grepping
them on the file system.
"""
referrers = set()
for filename in files:
config.check_filename(filename)
loaded = mapping_type_names(observatory, ending)
for_filename = set(name for name in loaded if filename in loaded[name])
referrers |= for_filename
return sorted(list(referrers))
def _findall_rmaps_using_reference(filename, observatory="hst"):
"""Return the basename of all reference mappings which mention `filename`."""
return uses_files([filename], observatory, "rmap")
def _findall_imaps_using_rmap(filename, observatory="hst"):
"""Return the basenames of all instrument contexts which mention `filename`."""
return uses_files([filename], observatory, "imap")
def _findall_pmaps_using_imap(filename, observatory="hst"):
"""Return the basenames of all pipeline contexts which mention `filename`."""
return uses_files([filename], observatory, "pmap")
def _findall_mappings_using_reference(reference, observatory="hst"):
"""Return the basenames of all mapping files in the hierarchy which
mentions reference `reference`.
"""
mappings = []
for rmap in _findall_rmaps_using_reference(reference, observatory):
mappings.append(rmap)
for imap in _findall_imaps_using_rmap(rmap, observatory):
mappings.append(imap)
for pmap in _findall_pmaps_using_imap(imap, observatory):
mappings.append(pmap)
return sorted(list(set(mappings)))
def _findall_mappings_using_rmap(rmap, observatory="hst"):
"""Return the basenames of all mapping files in the hierarchy which
mentions reference mapping `rmap`.
"""
mappings = []
for imap in _findall_imaps_using_rmap(rmap, observatory):
mappings.append(imap)
for pmap in _findall_pmaps_using_imap(imap, observatory):
mappings.append(pmap)
return sorted(list(set(mappings)))
def uses(files, observatory="hst"):
"""Return the list of mappings which use any of `files`."""
mappings = []
for file_ in files:
if file_.endswith(".rmap"):
mappings.extend(_findall_mappings_using_rmap(file_, observatory))
elif file_.endswith(".imap"):
mappings.extend(_findall_pmaps_using_imap(file_, observatory))
elif file_.endswith(".pmap"):
pass # nothing refers to a .pmap
else:
mappings.extend(_findall_mappings_using_reference(file_, observatory))
return sorted(list(set(mappings)))
class UsesScript(cmdline.Script):
"""Command line script for printing rmaps using references, or datasets using references."""
description = """
Prints out the mappings which refer to the specified mappings or references.
Prints out the datasets which historically used a particular reference as defined by DADSOPS.
IMPORTANT:
1. You must specify references or rules on which to operate with --files.
2. You must set CRDS_PATH and CRDS_SERVER_URL to give crds.uses access to CRDS mappings and databases.
"""
epilog = """
crds.uses can be invoked like this:
% crds uses --files n3o1022ij_drk.fits --hst
hst.pmap
hst_0001.pmap
hst_0002.pmap
hst_0003.pmap
...
hst_0041.pmap
hst_acs.imap
hst_acs_0001.imap
hst_acs_0002.imap
hst_acs_0003.imap
...
hst_acs_0008.imap
hst_acs_darkfile.rmap
hst_acs_darkfile_0001.rmap
hst_acs_darkfile_0002.rmap
hst_acs_darkfile_0003.rmap
...
hst_acs_darkfile_0005.rmap
"""
def add_args(self):
"""Add command line parameters unique to this script."""
super(UsesScript, self).add_args()
self.add_argument("--files", nargs="+",
help="References for which to dump using mappings or datasets.")
self.add_argument("-i", "--include-used", action="store_true", dest="include_used",
help="Include the used file in the output as the first column.")
def main(self):
"""Process command line parameters in to a context and list of
reference files. Print out the match tuples within the context
which contain the reference files.
"""
if not self.args.files:
self.print_help()
sys.exit(-1)
self.print_mappings_using_files()
return log.errors()
def locate_file(self, file_):
"""Just use basenames for identifying file references."""
return os.path.basename(file_)
def print_mappings_using_files(self):
"""Print out the mappings which refer to the specified mappings or references."""
for file_ in self.files:
for use in uses([file_], self.observatory):
if self.args.include_used:
print(file_, use)
else:
print(use)
def test():
"""Run the module doctest."""
import doctest
from . import uses
return doctest.testmod(uses)
if __name__ == "__main__":
sys.exit(UsesScript()())
|
StarcoderdataPython
|
11289386
|
<filename>learning_DAN/RF/RF.py
import scipy.io as sio
import numpy as np
from sklearn.ensemble import RandomForestRegressor
data = []
data = sio.loadmat('RF_2.mat')
x_train = data['x_train']
y_train = data['y_train']
x_test = data['x_test']
y_test = data['y_test']
y_train = np.reshape(y_train, [np.shape(y_train)[0]])
y_test = np.reshape(y_test, [np.shape(y_test)[0]])
print('input done')
mdl = RandomForestRegressor(n_estimators=50, max_depth=2, max_features='sqrt')
print('parameter done')
mdl.fit(x_train,y_train)
y_pre = mdl.predict(x_test)
sio.savemat('result1.mat',{'pre': y_pre,'true': y_test})
|
StarcoderdataPython
|
5035081
|
import numpy as np
import pytest
import torch
from mmdet3d.core.evaluation.indoor_eval import average_precision, indoor_eval
def test_indoor_eval():
if not torch.cuda.is_available():
pytest.skip()
from mmdet3d.core.bbox.structures import Box3DMode, DepthInstance3DBoxes
det_infos = [{
'labels_3d':
torch.tensor([0, 1, 2, 2, 0, 3, 1, 2, 3, 2]),
'boxes_3d':
DepthInstance3DBoxes(
torch.tensor([[
-2.4089e-03, -3.3174e+00, 4.9438e-01, 2.1668e+00, 2.8431e-01,
1.6506e+00, 0.0000e+00
],
[
-3.4269e-01, -2.7565e+00, 2.8144e-02, 6.8554e-01,
9.6854e-01, 6.1755e-01, 0.0000e+00
],
[
-3.8320e+00, -1.0646e+00, 1.7074e-01, 2.4981e-01,
4.4708e-01, 6.2538e-01, 0.0000e+00
],
[
4.1073e-01, 3.3757e+00, 3.4311e-01, 8.0617e-01,
2.8679e-01, 1.6060e+00, 0.0000e+00
],
[
6.1199e-01, -3.1041e+00, 4.1873e-01, 1.2310e+00,
4.0162e-01, 1.7303e+00, 0.0000e+00
],
[
-5.9877e-01, -2.6011e+00, 1.1148e+00, 1.5704e-01,
7.5957e-01, 9.6930e-01, 0.0000e+00
],
[
2.7462e-01, -3.0088e+00, 6.5231e-02, 8.1208e-01,
4.1861e-01, 3.7339e-01, 0.0000e+00
],
[
-1.4704e+00, -2.0024e+00, 2.7479e-01, 1.7888e+00,
1.0566e+00, 1.3704e+00, 0.0000e+00
],
[
8.2727e-02, -3.1160e+00, 2.5690e-01, 1.4054e+00,
2.0772e-01, 9.6792e-01, 0.0000e+00
],
[
2.6896e+00, 1.9881e+00, 1.1566e+00, 9.9885e-02,
3.5713e-01, 4.5638e-01, 0.0000e+00
]]),
origin=(0.5, 0.5, 0)),
'scores_3d':
torch.tensor([
1.7516e-05, 1.0167e-06, 8.4486e-07, 7.1048e-02, 6.4274e-05,
1.5003e-07, 5.8102e-06, 1.9399e-08, 5.3126e-07, 1.8630e-09
])
}]
label2cat = {
0: 'cabinet',
1: 'bed',
2: 'chair',
3: 'sofa',
}
gt_annos = [{
'gt_num':
10,
'gt_boxes_upright_depth':
np.array([[
-2.4089e-03, -3.3174e+00, 4.9438e-01, 2.1668e+00, 2.8431e-01,
1.6506e+00, 0.0000e+00
],
[
-3.4269e-01, -2.7565e+00, 2.8144e-02, 6.8554e-01,
9.6854e-01, 6.1755e-01, 0.0000e+00
],
[
-3.8320e+00, -1.0646e+00, 1.7074e-01, 2.4981e-01,
4.4708e-01, 6.2538e-01, 0.0000e+00
],
[
4.1073e-01, 3.3757e+00, 3.4311e-01, 8.0617e-01,
2.8679e-01, 1.6060e+00, 0.0000e+00
],
[
6.1199e-01, -3.1041e+00, 4.1873e-01, 1.2310e+00,
4.0162e-01, 1.7303e+00, 0.0000e+00
],
[
-5.9877e-01, -2.6011e+00, 1.1148e+00, 1.5704e-01,
7.5957e-01, 9.6930e-01, 0.0000e+00
],
[
2.7462e-01, -3.0088e+00, 6.5231e-02, 8.1208e-01,
4.1861e-01, 3.7339e-01, 0.0000e+00
],
[
-1.4704e+00, -2.0024e+00, 2.7479e-01, 1.7888e+00,
1.0566e+00, 1.3704e+00, 0.0000e+00
],
[
8.2727e-02, -3.1160e+00, 2.5690e-01, 1.4054e+00,
2.0772e-01, 9.6792e-01, 0.0000e+00
],
[
2.6896e+00, 1.9881e+00, 1.1566e+00, 9.9885e-02,
3.5713e-01, 4.5638e-01, 0.0000e+00
]]),
'class':
np.array([0, 1, 2, 0, 0, 3, 1, 3, 3, 2])
}]
ret_value = indoor_eval(
gt_annos,
det_infos, [0.25, 0.5],
label2cat,
box_type_3d=DepthInstance3DBoxes,
box_mode_3d=Box3DMode.DEPTH)
assert abs(ret_value['cabinet_AP_0.25'] - 0.666667) < 1e-3
assert abs(ret_value['bed_AP_0.25'] - 1.0) < 1e-3
assert abs(ret_value['chair_AP_0.25'] - 0.5) < 1e-3
assert abs(ret_value['mAP_0.25'] - 0.708333) < 1e-3
assert abs(ret_value['mAR_0.25'] - 0.833333) < 1e-3
def test_average_precision():
ap = average_precision(
np.array([[0.25, 0.5, 0.75], [0.25, 0.5, 0.75]]),
np.array([[1., 1., 1.], [1., 1., 1.]]), '11points')
print(ap[0])
assert abs(ap[0] - 0.06611571) < 0.001
|
StarcoderdataPython
|
6546695
|
<gh_stars>1-10
from rest_framework import permissions
class IsOwnerOrAdmin(permissions.BasePermission):
'''
The purpose of this permission class is to limit
viewing or editing of resources to the owner OR to
someone with administrative privileges.
Assumes the instance `obj` has an `owner` attribute
'''
def has_object_permission(self, request, view, obj):
if request.user.is_staff:
return True
return obj.owner == request.user
class IsInfoAboutSelf(permissions.BasePermission):
'''
This permission class is used for the User serialization.
Admins can view anyone's details.
Regular users can only view information about themselves.
Unlike other database objects where the user can "own"
something (likely through a foreign key relationship), here we
are checking that the only user information they can obtain is
about themself
'''
def has_object_permission(self, request, view, obj):
if request.user.is_staff:
return True
return obj == request.user
class ReadOnly(permissions.BasePermission):
'''
Allows us to restrict certain ListCreate views so that
regular users can only list and NOT create objects.
'''
def has_permission(self, request, view):
return request.method in permissions.SAFE_METHODS
|
StarcoderdataPython
|
6508671
|
import xml.etree.ElementTree as ET
import os
import shutil
import matplotlib.pyplot as plt
import cv2
import numpy as np
from tqdm import tqdm
def data_analysis(image_size=1024):
"""
分析数据集中box宽高比,高度和宽度的分布
"""
# xml_path
xml_path = '../dataset/train/valid_box'
xml_files = os.listdir(xml_path)
# image folder
img_folder = '../dataset/train/image'
# 记录rations
ratios = []
# 记录缩放后的宽度和高度
scales = []
# parse xml file
for f in tqdm(xml_files, desc='Preprocessing....'):
target = ET.parse(os.path.join(xml_path, f)).getroot()
img_id = f[:-4]
img_path = os.path.join(img_folder, img_id+'.jpg')
# read image
img = cv2.imread(img_path)
H, W, _ = img.shape
# 图像缩放到1024x1024
factor_H = image_size/H
factor_W = image_size/W
for obj in target.iter('object'):
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
if i%2==0:
cur_pt *= factor_W
else:
cur_pt *= factor_H
bndbox.append(cur_pt)
# 计算缩放的ratio, H, W
new_H, new_W = bndbox[3]-bndbox[1], bndbox[2]-bndbox[0]
try:
ratio = new_W/new_H
ratios.append(ratio)
scales.append([new_H, new_W])
except Exception:
pass
ratios = np.array(ratios)
scales = np.array(scales)
print('max_ratio:', np.max(ratios), 'min_ratio:', np.min(ratios), 'mean_ratio:', np.mean(ratios))
print('max_height:', np.max(scales[:, 0]), 'min_height:', np.min(scales[:, 0]), 'mean_height:', np.mean(scales[:, 0]))
print('max_width:', np.max(scales[:, 1]), 'min_width:', np.min(scales[:, 1]), 'mean_width:', np.mean(scales[:, 1]))
# 绘图分布
fig = plt.figure()
plt.subplot(1,3,1)
plt.hist(ratios, bins=100)
plt.title('宽高比分布')
plt.xlabel('ratio')
plt.subplot(1,3,2)
plt.hist(np.round(scales[:, 0]), bins=100)
plt.title('Height分布')
plt.xlabel('Height')
plt.subplot(1,3,3)
plt.hist(np.round(scales[:, 1]), bins=100)
plt.title('Width分布')
plt.xlabel('Width')
plt.savefig('./统计.jpg', dpi=300)
plt.show()
if __name__ == '__main__':
data_analysis()
|
StarcoderdataPython
|
6412670
|
<filename>tests/test_splitstack.py
import pglet
from pglet import SplitStack, Stack
from pglet.protocol import Command
def test_splitstack_add():
s = SplitStack(
horizontal=True,
gutter_size=10,
gutter_color="yellow",
gutter_hover_color="orange",
gutter_drag_color="blue",
controls=[Stack(id="left"), Stack(id="center")],
)
assert isinstance(s, pglet.Control)
assert isinstance(s, pglet.SplitStack)
assert s.get_cmd_str() == [
Command(
indent=0,
name=None,
values=["splitstack"],
attrs={
"guttercolor": "yellow",
"gutterdragcolor": "blue",
"gutterhovercolor": "orange",
"guttersize": "10",
"horizontal": "true",
},
lines=[],
commands=[],
),
Command(
indent=2,
name=None,
values=["stack"],
attrs={"id": ("left", True)},
lines=[],
commands=[],
),
Command(
indent=2,
name=None,
values=["stack"],
attrs={"id": ("center", True)},
lines=[],
commands=[],
),
], "Test failed"
|
StarcoderdataPython
|
9711431
|
<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from . import util
class Path:
def __init__(self, path_nodes=[], path_cost=0.0, offset=None):
self._nodes = path_nodes
self._cost = path_cost
self._offset = offset
def offset_grid_coordinate(self, coords):
if self._offset is not None:
coords = [coords[i] + self._offset[i] for i in range(min(len(self._offset), len(coords)))]
return coords
@property
def nodes(self):
return self._nodes
@property
def start(self):
return self.offset_grid_coordinate(self._nodes[0]) if len(self._nodes) > 0 else None
@property
def goal(self):
return self.offset_grid_coordinate(self._nodes[-1]) if len(self._nodes) > 0 else None
@property
def cost(self):
return self._cost
@property
def num_nodes(self):
return len(self._nodes)
@property
def nodes_offset(self):
nodes = np.array(self._nodes)
if self._offset is not None:
nodes += self._offset
return nodes
@property
def details(self):
start = np.array(self.start)
goal = np.array(self.goal)
with np.printoptions(precision=2, suppress=True):
msg = "Path: {} nodes from {} to {} (cost: {:.2f})".format(
len(self._nodes), start, goal, self.cost)
return msg
def is_empty(self):
return len(self._nodes) < 1
def prune(self, collinearity_th=1e-6):
"""
:param collinearity_th: collinearity threshold
:return:
"""
i = 0
while i + 2 < len(self._nodes):
points = [(p[0], p[1], 1) for p in self._nodes[i: i + 3]]
if util.collinearity_check(points[0], points[1], points[2], collinearity_th):
del self._nodes[i + 1]
else:
i += 1
def draw(self, ax, color='green', markersize=9, show_endpoints=True):
if self.is_empty():
return
nodes = np.array(self._nodes)
if self._offset is not None:
nodes += self._offset
if show_endpoints:
ax.plot(self.start[1], self.start[0], marker='o', markersize=markersize, color=color)
ax.plot(self.goal[1], self.goal[0], marker='x', markersize=markersize, color=color)
ax.plot(nodes[:, 1], nodes[:, 0], color=color, linewidth=3)
def draw_path_3d(self, color='blue', show_endpoints=True):
if self.is_empty():
return
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
nodes = np.array(self._nodes)
if show_endpoints:
ax.plot([self.start[1]], [self.start[0]], [self.start[2]], marker='o', markersize=9, color=color)
ax.plot([self.goal[1]], [self.goal[0]], [self.goal[2]], marker='x', markersize=9, color=color)
ax.plot(nodes[:, 1], nodes[:, 0], nodes[:, 2], color=color, linewidth=3)
ax.set_ylabel('NORTH')
ax.set_xlabel('EAST')
ax.set_zlabel('ALTITUDE')
fig.show()
|
StarcoderdataPython
|
197034
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import ctypes
from ctypes import Structure, Union, c_ubyte, c_long, c_ulong, c_ushort, \
c_wchar, c_void_p, c_uint
from ctypes import byref, POINTER, sizeof
from ctypes.wintypes import ULONG, BOOLEAN, BYTE, WORD, DWORD, HANDLE, BOOL, \
WCHAR, LPWSTR, LPCWSTR
#from core import HIDError
from . import helpers
import platform
UCHAR = c_ubyte
ENUM = c_uint
TCHAR = WCHAR
if platform.architecture()[0].startswith('64'):
WIN_PACK = 8
else:
WIN_PACK = 1
class WinApiException(Exception):
"Rough Windows API exception type"
pass
def winapi_result( result ):
"""Validate WINAPI BOOL result, raise exception if failed"""
if not result:
raise WinApiException("%d (%x): %s" % (ctypes.GetLastError(),
ctypes.GetLastError(), ctypes.FormatError()))
return result
#dll references
setup_api = ctypes.windll.setupapi
hid_dll = ctypes.windll.hid
kernel32 = ctypes.windll.kernel32
#os independent functions
ReadFile = kernel32.ReadFile
CancelIo = kernel32.CancelIo
WriteFile = kernel32.WriteFile
CloseHandle = kernel32.CloseHandle
CloseHandle.restype = BOOL
CloseHandle.argtypes = [HANDLE]
SetEvent = kernel32.SetEvent
WaitForSingleObject = kernel32.WaitForSingleObject
#os dependant functions and definitions
c_tchar = c_wchar
CreateFile = kernel32.CreateFileW
CreateEvent = kernel32.CreateEventW
CM_Get_Device_ID = setup_api.CM_Get_Device_IDW
b_verbose = True
usb_verbose = False
#**************
# SetupApi.dll, it likes pack'ed = 1 structures
class GUID(Structure):
"""GUID Windows OS structure"""
_pack_ = 1
_fields_ = [("data1", DWORD),
("data2", WORD),
("data3", WORD),
("data4", BYTE * 8)]
class OVERLAPPED(Structure):
class OFFSET_OR_HANDLE(Union):
class OFFSET(Structure):
_fields_ = [
("offset", DWORD),
("offset_high", DWORD) ]
_fields_ = [
("offset", OFFSET),
("pointer", c_void_p) ]
_fields_ = [
("internal", POINTER(ULONG)),
("internal_high", POINTER(ULONG)),
("u", OFFSET_OR_HANDLE),
("h_event", HANDLE)
]
class SP_DEVICE_INTERFACE_DATA(Structure):
"""
typedef struct _SP_DEVICE_INTERFACE_DATA {
DWORD cbSize;
GUID InterfaceClassGuid;
DWORD Flags;
ULONG_PTR Reserved;
} SP_DEVICE_INTERFACE_DATA, *PSP_DEVICE_INTERFACE_DATA;
"""
_pack_ = WIN_PACK
_fields_ = [ \
("cb_size", DWORD),
("interface_class_guid", GUID),
("flags", DWORD),
("reserved", POINTER(ULONG))
]
class SP_DEVICE_INTERFACE_DETAIL_DATA(Structure):
"""
typedef struct _SP_DEVICE_INTERFACE_DETAIL_DATA {
DWORD cbSize;
TCHAR DevicePath[ANYSIZE_ARRAY];
} SP_DEVICE_INTERFACE_DETAIL_DATA, *PSP_DEVICE_INTERFACE_DETAIL_DATA;
"""
_pack_ = WIN_PACK
_fields_ = [ \
("cb_size", DWORD),
("device_path", TCHAR * 1) # device_path[1]
]
def get_string(self):
"""Retreive stored string"""
return ctypes.wstring_at(byref(self, sizeof(DWORD)))
class SP_DEVINFO_DATA(Structure):
"""
typedef struct _SP_DEVINFO_DATA {
DWORD cbSize;
GUID ClassGuid;
DWORD DevInst;
ULONG_PTR Reserved;
} SP_DEVINFO_DATA, *PSP_DEVINFO_DATA;
"""
_pack_ = WIN_PACK
_fields_ = [ \
("cb_size", DWORD),
("class_guid", GUID),
("dev_inst", DWORD),
("reserved", POINTER(ULONG)),
]
SetupDiGetDeviceInterfaceDetail = setup_api.SetupDiGetDeviceInterfaceDetailW
SetupDiGetDeviceInterfaceDetail.restype = BOOL
SetupDiGetDeviceInterfaceDetail.argtypes = [
HANDLE, # __in HDEVINFO DeviceInfoSet,
POINTER(SP_DEVICE_INTERFACE_DATA), # __in PSP_DEVICE_INTERFACE_DATA DeviceIn
# __out_opt PSP_DEVICE_INTERFACE_DETAIL_DATA DeviceInterfaceDetailData,
POINTER(SP_DEVICE_INTERFACE_DETAIL_DATA),
DWORD, # __in DWORD DeviceInterfaceDetailDataSize,
POINTER(DWORD), # __out_opt PDWORD RequiredSize,
POINTER(SP_DEVINFO_DATA), # __out_opt PSP_DEVINFO_DATA DeviceInfoData
]
SetupDiGetDeviceInstanceId = setup_api.SetupDiGetDeviceInstanceIdW
SetupDiGetDeviceInstanceId.restype = BOOL
SetupDiGetDeviceInstanceId.argtypes = [
HANDLE, # __in HDEVINFO DeviceInfoSet,
POINTER(SP_DEVINFO_DATA), # __in PSP_DEVINFO_DATA DeviceInfoData,
LPWSTR, # __out_opt PTSTR DeviceInstanceId,
DWORD, # __in DWORD DeviceInstanceIdSize,
POINTER(DWORD), # __out_opt PDWORD RequiredSize
]
SetupDiGetClassDevs = setup_api.SetupDiGetClassDevsW
SetupDiGetClassDevs.restype = HANDLE
SetupDiGetClassDevs.argtypes = [
POINTER(GUID), # __in_opt const GUID *ClassGuid,
LPCWSTR, # __in_opt PCTSTR Enumerator,
HANDLE, # __in_opt HWND hwndParent,
DWORD, # __in DWORD Flags
]
SetupDiGetDeviceRegistryProperty = setup_api.SetupDiGetDeviceRegistryPropertyW
SetupDiGetDeviceRegistryProperty.restype = BOOL
SetupDiGetDeviceRegistryProperty.argtypes = [
HANDLE, # __in HDEVINFO DeviceInfoSet,
POINTER(SP_DEVINFO_DATA), # __in PSP_DEVINFO_DATA DeviceInfoData,
DWORD, # __in DWORD Property,
POINTER(DWORD), # __out_opt PDWORD PropertyRegDataType,
POINTER(BYTE), # __out_opt PBYTE PropertyBuffer,
DWORD, # __in DWORD PropertyBufferSize,
POINTER(DWORD), # __out_opt PDWORD RequiredSize
]
SetupDiDestroyDeviceInfoList = setup_api.SetupDiDestroyDeviceInfoList
SetupDiDestroyDeviceInfoList.restype = BOOL
SetupDiDestroyDeviceInfoList.argtypes = [
HANDLE, # __in HDEVINFO DeviceInfoSet,
]
SetupDiEnumDeviceInterfaces = setup_api.SetupDiEnumDeviceInterfaces
SetupDiEnumDeviceInterfaces.restype = BOOL
SetupDiEnumDeviceInterfaces.argtypes = [
HANDLE, # _In_ HDEVINFO DeviceInfoSet,
POINTER(SP_DEVINFO_DATA), # _In_opt_ PSP_DEVINFO_DATA DeviceInfoData,
POINTER(GUID), # _In_ const GUIDi *InterfaceClassGuid,
DWORD, # _In_ DWORD MemberIndex,
POINTER(SP_DEVICE_INTERFACE_DATA), # _Out_ PSP_DEVICE_INTERFACE_DATA DeviceInterfaceData
]
#structures for ctypes
class DIGCF:
"""
Flags controlling what is included in the device information set built
by SetupDiGetClassDevs
"""
DEFAULT = 0x00000001 # only valid with DIGCF.DEVICEINTERFACE
PRESENT = 0x00000002
ALLCLASSES = 0x00000004
PROFILE = 0x00000008
DEVICEINTERFACE = 0x00000010
#*******
# hid.dll
class HIDD_ATTRIBUTES(Structure):
_fields_ = [("cb_size", DWORD),
("vendor_id", c_ushort),
("product_id", c_ushort),
("version_number", c_ushort)
]
class HIDP_CAPS(Structure):
_fields_ = [
("usage", c_ushort), #usage id
("usage_page", c_ushort), #usage page
("input_report_byte_length", c_ushort),
("output_report_byte_length", c_ushort),
("feature_report_byte_length", c_ushort),
("reserved", c_ushort * 17),
("number_link_collection_nodes", c_ushort),
("number_input_button_caps", c_ushort),
("number_input_value_caps", c_ushort),
("number_input_data_indices", c_ushort),
("number_output_button_caps", c_ushort),
("number_output_value_caps", c_ushort),
("number_output_data_indices", c_ushort),
("number_feature_button_caps", c_ushort),
("number_feature_value_caps", c_ushort),
("number_feature_data_indices", c_ushort)
]
class HIDP_BUTTON_CAPS(Structure):
class RANGE_NOT_RANGE(Union):
class RANGE(Structure):
_fields_ = [
("usage_min", c_ushort), ("usage_max", c_ushort),
("string_min", c_ushort), ("string_max", c_ushort),
("designator_min", c_ushort),("designator_max", c_ushort),
("data_index_min", c_ushort), ("data_index_max", c_ushort)
]
class NOT_RANGE(Structure):
_fields_ = [
("usage", c_ushort), ("reserved1", c_ushort),
("string_index", c_ushort), ("reserved2", c_ushort),
("designator_index", c_ushort), ("reserved3", c_ushort),
("data_index", c_ushort), ("reserved4", c_ushort)
]
_fields_ = [
("range", RANGE),
("not_range", NOT_RANGE)
]
_fields_ = [
("usage_page", c_ushort),
("report_id", c_ubyte),
("is_alias", BOOLEAN),
("bit_field", c_ushort),
("link_collection", c_ushort),
("link_usage", c_ushort),
("link_usage_page", c_ushort),
("is_range", BOOLEAN),
("is_string_range", BOOLEAN),
("is_designator_range", BOOLEAN),
("is_absolute", BOOLEAN),
("reserved", c_ulong * 10),
("union", RANGE_NOT_RANGE)
]
class HIDP_VALUE_CAPS(Structure):
class RANGE_NOT_RANGE(Union):
class RANGE(Structure):
_fields_ = [
("usage_min", c_ushort), ("usage_max", c_ushort),
("string_min", c_ushort), ("string_max", c_ushort),
("designator_min", c_ushort),("designator_max", c_ushort),
("data_index_min", c_ushort), ("data_index_max", c_ushort)
]
class NOT_RANGE(Structure):
_fields_ = [
("usage", c_ushort), ("reserved1", c_ushort),
("string_index", c_ushort), ("reserved2", c_ushort),
("designator_index", c_ushort), ("reserved3", c_ushort),
("data_index", c_ushort), ("reserved4", c_ushort)
]
_fields_ = [
("range", RANGE),
("not_range", NOT_RANGE)
]
_fields_ = [
("usage_page", c_ushort),
("report_id", c_ubyte),
("is_alias", BOOLEAN),
("bit_field", c_ushort),
("link_collection", c_ushort),
("link_usage", c_ushort),
("link_usage_page", c_ushort),
("is_range", BOOLEAN),
("is_string_range", BOOLEAN),
("is_designator_range", BOOLEAN),
("is_absolute", BOOLEAN),
("has_null", BOOLEAN),
("reserved", c_ubyte),
("bit_size", c_ushort),
("report_count", c_ushort),
("reserved2", c_ushort * 5),
("units_exp", c_ulong),
("units", c_ulong),
("logical_min", c_long),
("logical_max", c_long),
("physical_min", c_long),
("physical_max", c_long),
("union", RANGE_NOT_RANGE)
]
class HIDP_DATA(Structure):
class HIDP_DATA_VALUE(Union):
_fields_ = [
("raw_value", c_ulong),
("on", BOOLEAN),
]
_fields_ = [
("data_index", c_ushort),
("reserved", c_ushort),
("value", HIDP_DATA_VALUE)
]
#get report
HidP_Input = 0x0000
HidP_Output = 0x0001
HidP_Feature = 0x0002
FACILITY_HID_ERROR_CODE = 0x11
def HIDP_ERROR_CODES(sev, code):
return (((sev) << 28) | (FACILITY_HID_ERROR_CODE << 16) | (code)) & 0xFFFFFFFF
class HidStatus(object):
HIDP_STATUS_SUCCESS = ( HIDP_ERROR_CODES(0x0, 0) )
HIDP_STATUS_NULL = ( HIDP_ERROR_CODES(0x8, 1) )
HIDP_STATUS_INVALID_PREPARSED_DATA = ( HIDP_ERROR_CODES(0xC, 1) )
HIDP_STATUS_INVALID_REPORT_TYPE = ( HIDP_ERROR_CODES(0xC, 2) )
HIDP_STATUS_INVALID_REPORT_LENGTH = ( HIDP_ERROR_CODES(0xC, 3) )
HIDP_STATUS_USAGE_NOT_FOUND = ( HIDP_ERROR_CODES(0xC, 4) )
HIDP_STATUS_VALUE_OUT_OF_RANGE = ( HIDP_ERROR_CODES(0xC, 5) )
HIDP_STATUS_BAD_LOG_PHY_VALUES = ( HIDP_ERROR_CODES(0xC, 6) )
HIDP_STATUS_BUFFER_TOO_SMALL = ( HIDP_ERROR_CODES(0xC, 7) )
HIDP_STATUS_INTERNAL_ERROR = ( HIDP_ERROR_CODES(0xC, 8) )
HIDP_STATUS_I8042_TRANS_UNKNOWN = ( HIDP_ERROR_CODES(0xC, 9) )
HIDP_STATUS_INCOMPATIBLE_REPORT_ID = ( HIDP_ERROR_CODES(0xC, 0xA) )
HIDP_STATUS_NOT_VALUE_ARRAY = ( HIDP_ERROR_CODES(0xC, 0xB) )
HIDP_STATUS_IS_VALUE_ARRAY = ( HIDP_ERROR_CODES(0xC, 0xC) )
HIDP_STATUS_DATA_INDEX_NOT_FOUND = ( HIDP_ERROR_CODES(0xC, 0xD) )
HIDP_STATUS_DATA_INDEX_OUT_OF_RANGE = ( HIDP_ERROR_CODES(0xC, 0xE) )
HIDP_STATUS_BUTTON_NOT_PRESSED = ( HIDP_ERROR_CODES(0xC, 0xF) )
HIDP_STATUS_REPORT_DOES_NOT_EXIST = ( HIDP_ERROR_CODES(0xC, 0x10) )
HIDP_STATUS_NOT_IMPLEMENTED = ( HIDP_ERROR_CODES(0xC, 0x20) )
error_message_dict = {
HIDP_STATUS_SUCCESS : "success",
HIDP_STATUS_NULL : "null",
HIDP_STATUS_INVALID_PREPARSED_DATA : "invalid preparsed data",
HIDP_STATUS_INVALID_REPORT_TYPE : "invalid report type",
HIDP_STATUS_INVALID_REPORT_LENGTH : "invalid report length",
HIDP_STATUS_USAGE_NOT_FOUND : "usage not found",
HIDP_STATUS_VALUE_OUT_OF_RANGE : "value out of range",
HIDP_STATUS_BAD_LOG_PHY_VALUES : "bad log phy values",
HIDP_STATUS_BUFFER_TOO_SMALL : "buffer too small",
HIDP_STATUS_INTERNAL_ERROR : "internal error",
HIDP_STATUS_I8042_TRANS_UNKNOWN : "i8042/I8242 trans unknown",
HIDP_STATUS_INCOMPATIBLE_REPORT_ID : "incompatible report ID",
HIDP_STATUS_NOT_VALUE_ARRAY : "not value array",
HIDP_STATUS_IS_VALUE_ARRAY : "is value array",
HIDP_STATUS_DATA_INDEX_NOT_FOUND : "data index not found",
HIDP_STATUS_DATA_INDEX_OUT_OF_RANGE : "data index out of range",
HIDP_STATUS_BUTTON_NOT_PRESSED : "button not pressed",
HIDP_STATUS_REPORT_DOES_NOT_EXIST : "report does not exist",
HIDP_STATUS_NOT_IMPLEMENTED : "not implemented"
}
def __init__(self, error_code):
error_code &= 0xFFFFFFFF
self.error_code = error_code
if error_code != self.HIDP_STATUS_SUCCESS:
if error_code in self.error_message_dict:
raise helpers.HIDError("hidP error: %s" % self.error_message_dict[error_code])
else:
raise helpers.HIDError("Unknown HidP error (%s)"%hex(error_code))
#*****************
# kernel32
#
#wait for single object
WAIT_ABANDONED = 0x00000080 # mutex used by another thread
WAIT_OBJECT_0 = 0x00000000 # signaled
WAIT_TIMEOUT = 0x00000102 # object signal timed out
WAIT_FAILED = 0xFFFFFFFF #failed
INFINITE = 0xFFFFFFFF
GENERIC_READ = (-2147483648)
GENERIC_WRITE = (1073741824)
FILE_SHARE_READ = 1
FILE_SHARE_WRITE = 2
#
OPEN_EXISTING = 3
OPEN_ALWAYS = 4
#
INVALID_HANDLE_VALUE = HANDLE(-1)
FILE_FLAG_OVERLAPPED = 1073741824
FILE_ATTRIBUTE_NORMAL = 128
#
NO_ERROR = 0
ERROR_IO_PENDING = 997
def GetHidGuid():
"Get system-defined GUID for HIDClass devices"
hid_guid = GUID()
hid_dll.HidD_GetHidGuid(byref(hid_guid))
return hid_guid
class DeviceInterfaceSetInfo(object):
"""Context manager for SetupDiGetClassDevs / SetupDiDestroyDeviceInfoList
resource allocation / cleanup
"""
def __init__(self, guid_target):
self.guid = guid_target
self.h_info = None
def __enter__(self):
"""Context manager initializer, calls self.open()"""
return self.open()
def open(self):
"""
Calls SetupDiGetClassDevs to obtain a handle to an opaque device
information set that describes the device interfaces supported by all
the USB collections currently installed in the system. The
application should specify DIGCF.PRESENT and DIGCF.INTERFACEDEVICE
in the Flags parameter passed to SetupDiGetClassDevs.
"""
self.h_info = SetupDiGetClassDevs(byref(self.guid), None, None,
(DIGCF.PRESENT | DIGCF.DEVICEINTERFACE) )
return self.h_info
def __exit__(self, exc_type, exc_value, traceback):
"""Context manager clean up, calls self.close()"""
self.close()
def close(self):
"""Destroy allocated storage"""
if self.h_info and self.h_info != INVALID_HANDLE_VALUE:
# clean up
SetupDiDestroyDeviceInfoList(self.h_info)
self.h_info = None
def enum_device_interfaces(h_info, guid):
"""Function generator that returns a device_interface_data enumerator
for the given device interface info and GUID parameters
"""
dev_interface_data = SP_DEVICE_INTERFACE_DATA()
dev_interface_data.cb_size = sizeof(dev_interface_data)
device_index = 0
while SetupDiEnumDeviceInterfaces(h_info,
None,
byref(guid),
device_index,
byref(dev_interface_data) ):
yield dev_interface_data
device_index += 1
del dev_interface_data
def get_device_path(h_info, interface_data, ptr_info_data = None):
""""Returns Hardware device path
Parameters:
h_info, interface set info handler
interface_data, device interface enumeration data
ptr_info_data, pointer to SP_DEVINFO_DATA() instance to receive details
"""
required_size = c_ulong(0)
dev_inter_detail_data = SP_DEVICE_INTERFACE_DETAIL_DATA()
dev_inter_detail_data.cb_size = sizeof(SP_DEVICE_INTERFACE_DETAIL_DATA)
# get actual storage requirement
SetupDiGetDeviceInterfaceDetail(h_info, byref(interface_data),
None, 0, byref(required_size),
None)
ctypes.resize(dev_inter_detail_data, required_size.value)
# read value
SetupDiGetDeviceInterfaceDetail(h_info, byref(interface_data),
byref(dev_inter_detail_data), required_size, None,
ptr_info_data)
# extract string only
return dev_inter_detail_data.get_string()
|
StarcoderdataPython
|
1731372
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from os import path as op
from uuid import uuid4
import numpy as np
import nibabel as nib
from matplotlib import pyplot as plt
from svgutils.transform import fromstring
from seaborn import color_palette
from nipype.interfaces.base import (
traits,
File,
isdefined,
)
from niworkflows.interfaces.report_base import ReportingInterface, _SVGReportCapableInputSpec
from niworkflows.viz.utils import compose_view, extract_svg, cuts_from_bbox
from nilearn.plotting import plot_epi, plot_anat
from ...utils import nvol
from ...resource import get as getresource
class PlotInputSpec(_SVGReportCapableInputSpec):
in_file = File(exists=True, mandatory=True, desc="volume")
mask_file = File(exists=True, mandatory=True, desc="mask")
label = traits.Str()
class PlotEpi(ReportingInterface):
input_spec = PlotInputSpec
def _generate_report(self):
in_img = nib.load(self.inputs.in_file)
assert nvol(in_img) == 1
mask_img = nib.load(self.inputs.mask_file)
assert nvol(mask_img) == 1
label = None
if isdefined(self.inputs.label):
label = self.inputs.label
compress = self.inputs.compress_report
n_cuts = 7
cuts = cuts_from_bbox(mask_img, cuts=n_cuts)
img_vals = in_img.get_fdata()[np.asanyarray(mask_img.dataobj).astype(np.bool)]
vmin = img_vals.min()
vmax = img_vals.max()
outfiles = []
for dimension in ["z", "y", "x"]:
display = plot_epi(
in_img,
draw_cross=False,
display_mode=dimension,
cut_coords=cuts[dimension],
title=label,
vmin=vmin,
vmax=vmax,
colorbar=(dimension == "z"),
cmap=plt.cm.gray,
)
display.add_contours(mask_img, levels=[0.5], colors="r")
label = None # only on first
svg = extract_svg(display, compress=compress)
svg = svg.replace("figure_1", str(uuid4()), 1)
outfiles.append(fromstring(svg))
self._out_report = op.abspath(self.inputs.out_report)
compose_view(bg_svgs=outfiles, fg_svgs=None, out_file=self._out_report)
class PlotRegistrationInputSpec(PlotInputSpec):
template = traits.Str(mandatory=True)
class PlotRegistration(ReportingInterface):
input_spec = PlotRegistrationInputSpec
def _generate_report(self):
in_img = nib.load(self.inputs.in_file)
assert nvol(in_img) == 1
mask_img = nib.load(self.inputs.mask_file)
assert nvol(mask_img) == 1
template = self.inputs.template
parc_file = getresource(f"tpl-{template}_RegistrationCheckOverlay.nii.gz")
assert parc_file is not None
parc_img = nib.load(parc_file)
levels = np.unique(np.asanyarray(parc_img.dataobj).astype(np.int32))
levels = (levels[levels > 0] - 0.5).tolist()
colors = color_palette("husl", len(levels))
label = None
if isdefined(self.inputs.label):
label = self.inputs.label
compress = self.inputs.compress_report
n_cuts = 7
cuts = cuts_from_bbox(mask_img, cuts=n_cuts)
outfiles = []
for dimension in ["z", "y", "x"]:
display = plot_anat(
in_img,
draw_cross=False,
display_mode=dimension,
cut_coords=cuts[dimension],
title=label,
)
display.add_contours(parc_img, levels=levels, colors=colors, linewidths=0.25)
display.add_contours(mask_img, levels=[0.5], colors="r", linewidths=0.5)
label = None # only on first
svg = extract_svg(display, compress=compress)
svg = svg.replace("figure_1", str(uuid4()), 1)
outfiles.append(fromstring(svg))
self._out_report = op.abspath(self.inputs.out_report)
compose_view(bg_svgs=outfiles, fg_svgs=None, out_file=self._out_report)
|
StarcoderdataPython
|
3562830
|
import logging
import textwrap
import traceback
import json
import os
import discode
with open("env.json", "r") as env_file:
env = json.load(env_file)
token = env.get("BOT_TOKEN", os.environ.get("BOT_TOKEN"))
owner_ids = (
859996173943177226,
739443421202087966,
551257232143024139,
685082846993317953,
611448893904781312,
)
bot = discode.Client(token=token)
logger = logging.getLogger("discode")
logger.setLevel(logging.NOTSET)
def get_info():
ret = f"Intents: {bot.intents.value}\nLatency: {round(bot.latency * 1000, 2)}ms\nGuilds: {len(bot.guilds)}\nUsers: {len(bot.users)}"
return ret
@discode.utils.async_function
def run_docs(make=False):
os.system('python -m pip install sphinx furo')
if make:
os.system("cd docs\nmake html\ncd\ncd discode")
return os.system("python -m http.server -d docs/_build/html")
@bot.on_event(discode.GatewayEvent.READY)
async def on_ready():
print(get_info())
print(bot.user, "is ready!")
await run_docs(make=True)
@bot.on_event(discode.GatewayEvent.SHARD_READY)
async def on_shard_ready(shard_id):
print(f"SHARD ID {shard_id} is ready!")
@bot.on_event(discode.GatewayEvent.MESSAGE_CREATE)
async def on_message(message: discode.Message):
if len(message.content) == 0:
return
print(message.author, ":", message.content)
msg = message.content
if msg.startswith("d!choose"):
channel = message.channel
await channel.send("Choose between foo and bar!")
async def hi_check(_message: discode.Message):
choice = str(_message).lower()
if (
(choice in ("foo", "bar"))
and _message.author_id == message.author_id
and _message.channel_id == channel.id
):
await channel.send(f"Your choice is {choice}!")
return True
try:
await bot.wait_for("message_create", check=hi_check)
except:
await channel.send("You did not reply in time!")
elif msg.startswith("d!invite"):
embed = discode.Embed(title="Invite Discode!", description=bot.invite_url)
await message.channel.send(embeds=(embed,))
elif msg.startswith("d!ratelimit"):
channel = message.channel
if message.author_id not in owner_ids:
return await channel.send("Only owners can do this sus")
for r in range(10):
await channel.send(r + 1)
elif msg.startswith("d!eval"):
if message.author_id not in owner_ids:
return await message.channel.send("Only owners can do this sus")
try:
data = msg[6:]
args = {
**globals(),
"message": message,
"author": message.author,
"channel": message.channel,
"guild": message.guild,
"client": bot,
"imp": __import__,
}
data = data.replace("return", "yield").replace("”", '"').replace("“", '"')
if data.startswith(" "):
data = data[1:]
split = data.splitlines()
if len(split) == 1:
if not data.startswith("yield"):
data = f"yield {data}"
data = textwrap.indent(data, " ")
exec(f"async def func():\n{data}", args)
async for resp in eval("func()", args):
resp = str(resp).replace(token, "[TOKEN]")
await message.channel.send(resp)
except:
error = traceback.format_exc()
await message.channel.send(
embed=discode.Embed(
title="Uh Oh!",
description=f"```py\n{error}```",
)
)
@bot.on_event("message_update")
async def on_message_edit(before: discode.Message, after: discode.Message):
print(f"{before.author} edited their message from {before.content!r} to {after.content!r}")
bot.run()
|
StarcoderdataPython
|
11216315
|
import json
import os
import torch
from transformers import RobertaTokenizer, RobertaForQuestionAnswering
AZUREML_MODEL_DIR = "AZUREML_MODEL_DIR"
ROBERTA_BASE = 'roberta-base'
def init():
global model, tokenizer
model_path = os.path.join(os.getenv(AZUREML_MODEL_DIR), ROBERTA_BASE)
model = RobertaForQuestionAnswering.from_pretrained(model_path)
tokenizer = RobertaTokenizer.from_pretrained(model_path)
print("This is init")
def run(data):
json_data = json.loads(data)
text = json_data['text']
text = text[:500]
question = json_data['question']
inputs = tokenizer(question, text, return_tensors='pt')
input_ids = tokenizer.encode(question, text)
tokens = tokenizer.convert_ids_to_tokens(input_ids)
def fix_token(token):
if token.startswith('Ġ'):
token = ' ' + token[1:]
return token
tokens = [fix_token(token) for token in tokens]
outputs = model(**inputs) # this is where model runs
start_scores = outputs.start_logits
end_scores = outputs.end_logits
answer_starts = torch.argsort(-start_scores)
answer_ends = torch.argsort(-end_scores)
print(answer_starts)
print('-------------')
print(answer_ends)
final_answers = []
for answer_start, answer_end in zip(answer_starts[0], answer_ends[0][:10]):
print((answer_start, answer_end))
if answer_end >= answer_start:
answer = "".join(tokens[answer_start:answer_end + 1])
print("\nQuestion:\n{}".format(question.capitalize()))
print("\nAnswer:\n{}.".format(answer.capitalize()))
final_answers.append({'answer': answer}) # , 'start': answer_start, 'end': answer_end})
else:
print("I am unable to find the answer to this question. Can you please ask another question?")
return final_answers
|
StarcoderdataPython
|
8109395
|
from phue import Bridge
import tkinter as tk
from tkinter import *
class HueApp:
def __init__(self, parent):
self.bridge = Bridge('192.168.0.4')
self.lights = self.bridge.get_light_objects('name')
self.root = parent
self.labels = {}
self.vars = {}
self.scaleValues = {}
self.names = []
self.mousePressed = 0
self.top = tk.Frame(parent)
self.root.title("Light Control")
self.top.bind("<Button-1>", self.left_click)
self.top.bind("<ButtonRelease-1>", self.left_release)
self.create_controls()
self.check_light()
self.top.pack()
def check_light(self):
if not self.mousePressed:
value = 0
for name, label in self.labels.items():
if self.lights[name].on:
self.vars[name].set("On")
label.configure(bg='green')
else:
self.vars[name].set("Off")
label.configure(bg='red')
self.top.after(500, self.check_light)
def dim(self, percent, name):
if self.lights[name].on:
self.bridge.set_light(name, 'on', True)
value = 254/100 * int(percent)
self.lights[name].brightness = int(value)
def light_on(self, name):
self.lights[name].on = True
def light_off(self, name):
self.lights[name].on = False
def left_click(self, event):
self.mousePressed = 1
def left_release(self, event):
self.mousePressed = 0
self.check_light()
def show(self):
self.root.update()
self.root.deiconify()
def create_controls(self):
buttons = []
scales = []
padx = 5
pady = 5
ipadx = 5
ipady = 5
for light in self.bridge.lights:
light_name = light.name
self.vars[light_name] = StringVar()
self.scaleValues[light_name] = IntVar()
label = Label(self.top, text=light_name, height=2)
buttons.append(Button(self.top, text="On", command=lambda arg=light_name: self.light_on(arg)))
buttons.append(Button(self.top, text="Off", command=lambda arg=light_name: self.light_off(arg)))
self.labels[light_name] = (Label(self.top, textvariable=self.vars[light_name]))
scales.append(Scale(self.top, orient=HORIZONTAL, length=200, repeatdelay=300, command=lambda arg=0, arg2=light_name: self.dim(arg, arg2)))
self.names.append(light_name)
label.grid(row=light.light_id, column=1, padx=padx, pady=pady, ipadx=ipadx, ipady=ipady)
buttons[-2].grid(row=light.light_id, column=2, padx=padx, pady=pady, ipadx=ipadx, ipady=ipady)
buttons[-1].grid(row=light.light_id, column=3, padx=padx, pady=pady, ipadx=ipadx, ipady=ipady)
self.labels[light_name].grid(row=light.light_id, column=4, padx=padx, pady=pady, ipadx=ipadx, ipady=ipady)
scales[-1].grid(row=light.light_id, column=5, padx=padx, pady=pady)
# Set scale to correct value
value = self.lights[light_name].brightness / 254 * 100
scales[-1].set(value)
if __name__ == "__main__":
root = tk.Tk()
hue_app = HueApp(root)
root.geometry('600x500+-1200+300')
root.mainloop()
|
StarcoderdataPython
|
3211758
|
<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2020/2/12 15:57
# @Author : Vodka0629
# @Email : <EMAIL>, <EMAIL>
# @FileName: player.py
# @Software: Mahjong II
# @Blog :
from random import randint, choice
from mahjong.error import *
from mahjong.expose import Expose
from mahjong.mj_math import MjMath
from mahjong.mj_set import MjSet
from mahjong.rule import Rule
from mahjong.suit import Suit
from mahjong.tile import Tile
from setting import Setting
from sprite import *
class Player(object):
__slots__ = ('_nick', '_concealed', '_exposed', '_discarded', '_desk', 'discarding', 'flowers',
'_state', '_position',
'_coin', '_is_viewer', '_viewer_position',
'_draw_count', '_discard_by_random_count',
# for pygame screen
"_screen", "_font", "clock",
'current_index', 'current_tiles',
"concealed_group", "discarded_group", "exposed_group", "flowers_group",
"info_group", 'avatar', "all_group",
# for sound
"_sound_discard", "_sound_draw_stack",
"_sound_kong", "_sound_chow", "_sound_pong",
"_sound_hu", "_sound_self_hu",
)
def __init__(self, nick="Eric", coin: int = 0, is_viewer: bool = False, viewer_position: str = '东',
screen=None, clock=None):
self._nick = nick
self._concealed: list = []
self._exposed: list = []
self._discarded: list = []
self._desk: list = []
self.flowers: list = []
self.discarding = None
self._state: str = ''
self._position: str = ''
self._coin = coin
self._is_viewer = is_viewer
self._viewer_position = viewer_position
self.info_group = pygame.sprite.Group()
self.concealed_group = pygame.sprite.Group()
self.exposed_group = pygame.sprite.Group()
self.discarded_group = pygame.sprite.Group()
self.flowers_group = pygame.sprite.Group()
avatar = Setting.avatar_base + nick + ".png"
img = pygame.image.load(avatar)
self.avatar = pygame.transform.scale(img, (64, 64))
self.all_group = pygame.sprite.Group()
self._screen = screen
self.clock = clock
self.current_index = -1
self.current_tiles = []
# self._font = pygame.font.SysFont(Setting.font, 16)
# self._font.set_bold(True)
self._discard_by_random_count = 0
self._draw_count = 0
self._sound_discard = pygame.mixer.Sound(Setting.sound_base + Setting.sound_discard)
self._sound_draw_stack = pygame.mixer.Sound(Setting.sound_base + Setting.sound_draw_stack)
self._sound_chow = pygame.mixer.Sound(Setting.sound_base + Setting.sound_chow)
self._sound_pong = pygame.mixer.Sound(Setting.sound_base + Setting.sound_pong)
self._sound_kong = pygame.mixer.Sound(Setting.sound_base + Setting.sound_kong)
self._sound_hu = pygame.mixer.Sound(Setting.sound_base + Setting.sound_hu)
self._sound_self_hu = pygame.mixer.Sound(Setting.sound_base + Setting.sound_self_hu)
def __str__(self):
return self._nick
@property
def nick(self):
return self._nick
@property
def position(self):
return self._position
@position.setter
def position(self, value):
self._position = value
@property
def concealed(self):
return self._concealed
@property
def concealed_str(self):
arr = [f'{x}' for x in self._concealed]
text = ",".join(arr)
return text
@property
def exposed(self):
return self._exposed
@property
def exposed_str(self):
arr = [f'={x}=' for x in self._exposed]
return ','.join(arr)
@property
def discarded(self):
return self._discarded
@property
def desk(self):
return self._desk
@property
def desk_str(self):
arr = [f'{x}' for x in self._desk]
text = ",".join(arr)
return text
@property
def discard_by_random_count(self):
return self._discard_by_random_count
@property
def draw_count(self):
return self._draw_count
@property
def is_viewer(self):
return self._is_viewer
@is_viewer.setter
def is_viewer(self, value):
self._is_viewer = value
@property
def viewer_position(self):
return self._viewer_position
@viewer_position.setter
def viewer_position(self, value):
self._viewer_position = value
@property
def screen(self):
return self._screen
@screen.setter
def screen(self, value):
self._screen = value
@property
def coin(self):
return self._coin
@coin.setter
def coin(self, value):
self._coin = value
def reset(self):
self._concealed = []
self._exposed = []
self._discarded = []
self.flowers = []
self._desk = []
self._discard_by_random_count = 0
self.discarding = None
self.current_index = -1
self.current_tiles = []
def draw(self, mj_set: MjSet):
tile = mj_set.draw()
if not tile:
print("mj_set.draw() None")
return None
if not Rule.is_flower(tile):
self.add(tile)
self._draw_count += 1
return tile
print(self, 'get a flower:', tile)
self.flowers.append(tile)
tile = self.draw_from_back(mj_set)
print("draw_from_back:", tile)
return tile
def draw_from_back(self, mj_set: MjSet):
tile = mj_set.draw_from_back()
if not tile:
print("mj_set.draw_from_back() None")
return None
while Rule.is_flower(tile) and mj_set.tiles:
# (self, 'get a flower from back:', tile)
self.flowers.append(tile)
self._draw_count += 1
tile = mj_set.draw_from_back()
if not tile:
print("mj_set.draw_from_back() is_flower None")
return None
self.add(tile)
return tile
def draw_stack(self, mj_set: MjSet):
for _ in range(4):
self.draw(mj_set)
def add(self, tile: Tile):
self._concealed.append(tile)
def put_on_desk(self, tile: Tile):
self._desk.append(tile)
def discard(self, tile: Tile):
if tile not in self._concealed:
raise LookupError(f"{self.nick} have not tile:{tile}")
self._concealed.remove(tile)
self._discarded.append(tile)
self.discarding = tile
return tile
def decide_discard(self) -> Tile:
return self.decide_discard_random()
def decide_discard_random(self):
if not self.concealed:
raise ValueError(f"{self.nick} have no concealed tiles!")
# finally, random discard one
tile = choice(self.concealed)
self._discard_by_random_count += 1
return tile
def decide_discard_by_random_orphan(self):
arr = Rule.convert_tiles_to_arr(self.concealed)
orphans_arr = MjMath.get_orphans(arr)
if not orphans_arr:
return None
one = choice(orphans_arr)
tile = Rule.convert_key_to_tile(one)
return tile
def sort_concealed(self):
arr = Rule.convert_tiles_to_arr(self._concealed)
arr.sort()
self._concealed = Rule.convert_arr_to_tiles(arr)
@staticmethod
def throw_dice():
return randint(1, 6)
@staticmethod
def throw_two_dice():
return tuple((randint(1, 6), randint(1, 6)))
def try_conceal_kong(self, mj_set: MjSet) -> bool:
if not self.concealed:
return False
if not mj_set.tiles:
return False
test_tiles = list(set(self.concealed))
for x in test_tiles:
count = self.concealed.count(x)
if count >= 4:
# concealed kong when possible
self.concealed_kong(tile=x, mj_set=mj_set)
return True
return False
def concealed_kong(self, tile: Tile, mj_set: MjSet):
self._sound_kong.play()
count = 0
inners = []
for x in self.concealed[::-1]:
if x.key == tile.key:
count += 1
inners.append(x)
self.concealed.remove(x)
if count >= 4:
break
expose = Expose(expose_type='concealed kong', inners=inners, outer=None, outer_owner=None)
self._exposed.append(expose)
tile = self.draw_from_back(mj_set)
if not tile:
raise OutOfTilesError()
# 杠上开花
if self.try_mahjong():
raise HaveWinnerError(winner=self)
def try_mahjong(self, tile=None) -> bool:
test = self.concealed[:]
if tile:
test.append(tile)
if Rule.is_mahjong(test):
return True
return False
def try_exposed_kong_from_exposed_pong(self, mj_set: MjSet) -> bool:
tile = self.concealed[-1]
if not mj_set.tiles:
return False
if not self.exposed:
return False
for expose in self.exposed:
if expose.expose_type == 'exposed pong' and expose.outer.key == tile.key:
# 有杠就杠
return self.exposed_kong_from_exposed_pong(tile=tile, expose=expose, mj_set=mj_set)
return False
def exposed_kong_from_exposed_pong(self, tile: Tile, expose: Expose, mj_set: MjSet) -> bool:
self._sound_kong.play()
expose.expose_type = 'exposed kong from exposed pong'
expose.inners.append(tile)
expose.all.append(tile)
self.concealed.pop()
tile = self.draw_from_back(mj_set)
if not tile:
raise OutOfTilesError()
# 杠上开花
if self.try_mahjong():
raise HaveWinnerError(winner=self)
return True
def try_exposed_kong(self, tile: Tile, owner, mj_set: MjSet) -> bool:
count = 0
for test in self._concealed:
if tile.key == test.key:
count += 1
if count == 3:
self.exposed_kong(tile=tile, owner=owner, mj_set=mj_set)
return True
return False
def exposed_kong(self, tile: Tile, owner, mj_set: MjSet):
count = 0
inner = []
for test in self._concealed[::-1]:
if tile.key == test.key:
inner.append(test)
self._concealed.remove(test)
count += 1
if count == 3:
break
if count < 3:
raise ValueError(f"{self.nick} don't have enough {tile}!")
expose = Expose('exposed kong', inners=inner, outer=tile, outer_owner=owner)
self._exposed.append(expose)
self._sound_kong.play()
tile = self.draw_from_back(mj_set)
if not tile:
raise OutOfTilesError()
# 杠上开花
if self.try_mahjong():
raise HaveWinnerError(winner=self)
def try_exposed_pong(self, tile: Tile, owner) -> bool:
count = 0
for test in self._concealed:
if tile.key == test.key:
count += 1
if count >= 2:
self.exposed_pong(tile=tile, owner=owner)
return True
return False
def exposed_pong(self, tile: Tile, owner):
count = 0
inner = []
for test in self._concealed[::-1]:
if tile.key == test.key:
inner.append(test)
self._concealed.remove(test)
count += 1
if count == 2:
break
if count < 2:
raise ValueError(f"{self.nick} don't have enough {tile}!")
expose = Expose('exposed pong', inners=inner, outer=tile, outer_owner=owner)
self._exposed.append(expose)
def try_exposed_chow(self, tile: Tile, owner) -> bool:
arr = Rule.convert_tiles_to_arr(self.concealed)
outer = tile.key
# 胡吃一气
combins = MjMath.get_chow_combins_from_arr(arr=arr, outer=outer)
if not combins:
return False
combin = choice(combins)
inners = Rule.convert_arr_to_tiles(combin)
self.exposed_chow(inners=inners, tile=tile, owner=owner)
return True
def exposed_chow(self, inners, tile, owner):
if len(inners) != 2:
raise ValueError(f"self_tiles length should be 2:{inners}")
for x in inners:
for test in self._concealed[::-1]:
if x.key == test.key:
self._concealed.remove(test)
break
expose = Expose('exposed chow', inners=inners, outer=tile, outer_owner=owner)
self._exposed.append(expose)
def draw_info(self):
self.info_group.empty()
centerx = Setting.win_w // 2
bottom = Setting.win_h // 2 - 100
# draw player's position
# file_name = Setting.sprite_base + Suit.Wind[self.position]['eng'] + ".png"
# image = pygame.image.load(file_name)
# sprite = pygame.sprite.Sprite()
# sprite.image = image
# rect = image.get_rect()
# rect.centerx = centerx
# rect.bottom = bottom
# sprite.rect = rect
# self.info_group.add(sprite)
# draw player's discarding tile
if self.discarding:
# centerx += sprite.rect.w
file_name = Setting.tile_img_path + self.discarding.img
image = pygame.image.load(file_name)
sprite = pygame.sprite.Sprite()
sprite.image = image
rect = image.get_rect()
rect.centerx = centerx
rect.bottom = bottom
sprite.rect = rect
self.info_group.add(sprite)
def draw_concealed(self, state: str = ''):
self.concealed_group.empty()
left = Setting.concealed_left
bottom = Setting.concealed_bottom
for index, tile in enumerate(self.concealed):
sprite = pygame.sprite.Sprite()
if self._is_viewer or state == 'scoring':
image = pygame.image.load(Setting.tile_img_path + tile.img)
else:
image = pygame.image.load(Setting.tile_img_path + Setting.face_down_img)
rect = image.get_rect()
if not self._is_viewer:
rect.w, rect.h = rect.w // 4 * 3, rect.h // 4 * 3
image = pygame.transform.scale(image, (rect.w, rect.h))
sprite.image = image
sprite.rect = rect
sprite.rect.left = left
sprite.rect.bottom = bottom
if self.is_viewer and index in self.current_tiles:
sprite.rect.bottom += Setting.current_jump
self.concealed_group.add(sprite)
left += sprite.rect.width
def draw_discard(self, state: str = ''):
self.discarded_group.empty()
left = Setting.discarded_left
bottom = Setting.discarded_bottom
for index, tile in enumerate(self.desk):
sprite = pygame.sprite.Sprite()
image = pygame.image.load(Setting.tile_img_path + tile.img)
rect = image.get_rect()
rect.w, rect.h = rect.w // 4 * 3, rect.h // 4 * 3
image = pygame.transform.scale(image, (rect.w, rect.h))
sprite.image = image
sprite.rect = rect
sprite.rect.left = left
sprite.rect.bottom = bottom
self.discarded_group.add(sprite)
left += rect.width
if index != 0 and (index + 1) % Setting.discarded_line_limit == 0:
left += min(rect.width, rect.height)
if index != 0 and (index + 1) % (Setting.discarded_line_limit * 2) == 0:
left = Setting.discarded_left
bottom += max(rect.w, rect.h) + 5
def draw_flowers(self, state: str = ''):
self.flowers_group.empty()
left = Setting.flowers_left
bottom = Setting.discarded_bottom
for index, tile in enumerate(self.flowers):
sprite = pygame.sprite.Sprite()
image = pygame.image.load(Setting.tile_img_path + tile.img)
rect = image.get_rect()
rect.w, rect.h = rect.w // 2, rect.h // 2
image = pygame.transform.scale(image, (rect.w, rect.h))
sprite.image = image
sprite.rect = rect
sprite.rect.right = left
sprite.rect.bottom = bottom
self.flowers_group.add(sprite)
left -= rect.width
if index != 0 and (index + 1) % (Setting.flower_line_limit) == 0:
left = Setting.flowers_left
bottom += max(rect.w, rect.h) + 5
def draw_exposed(self, state: str = ''):
self.exposed_group.empty()
left = Setting.exposed_left
bottom = Setting.exposed_bottom
for exposed in self.exposed:
space_width = 0
for i2, tile in enumerate(exposed.all):
sprite = pygame.sprite.Sprite()
adjust = 0 # adjust for exposed chow / pong / kong
image = pygame.image.load(Setting.tile_img_path + tile.img)
# lay down the tile for chow
if exposed.expose_type == "exposed chow":
if tile == exposed.outer:
adjust = 90
# lay down the tile for pong
if exposed.expose_type == "exposed pong":
if exposed.outer_owner.position == Suit.get_before_wind(self.position):
if i2 == 0:
adjust = 90
if exposed.outer_owner.position == Suit.get_next_wind(self.position):
if i2 == 2:
adjust = 90
if exposed.outer_owner.position != Suit.get_before_wind(self.position) and \
exposed.outer_owner.position != Suit.get_next_wind(self.position):
if i2 == 1:
adjust = 90
# lay down the tile for kong
if exposed.expose_type in ["exposed kong", "exposed kong from exposed pong"]:
if exposed.outer_owner.position == Suit.get_before_wind(self.position):
if i2 == 0:
adjust = 90
if exposed.outer_owner.position == Suit.get_next_wind(self.position):
if i2 == 2:
adjust = 90
if exposed.outer_owner.position != Suit.get_before_wind(self.position) and \
exposed.outer_owner.position != Suit.get_next_wind(self.position):
if i2 == 3:
adjust = 90
# cover the concealed kong
if exposed.expose_type == "concealed kong" and state != 'scoring':
image = pygame.image.load(Setting.tile_img_path + Setting.face_down_img)
pass
image = pygame.transform.rotate(image, adjust)
rect = image.get_rect()
rect.w, rect.h = rect.w // 4 * 3, rect.h // 4 * 3
image = pygame.transform.scale(image, (rect.w, rect.h))
sprite.image = image
sprite.rect = rect
sprite.rect.left = left
sprite.rect.bottom = bottom
self.exposed_group.add(sprite)
left += sprite.rect.width
space_width = min(sprite.rect.width, sprite.rect.height)
# end for an expose
left += space_width
# end for exposed
def adjust_screen_position_for_left_bottom(self):
self.all_group.empty()
for sprite in self.concealed_group:
self.all_group.add(sprite)
for sprite in self.exposed_group:
self.all_group.add(sprite)
for sprite in self.discarded_group:
self.all_group.add(sprite)
for sprite in self.flowers_group:
self.all_group.add(sprite)
for sprite in self.all_group:
img_original = sprite.image
rect_original = sprite.rect
rotated = None
rect = None
left, bottom = rect_original.left, rect_original.bottom
if self._is_viewer:
left += Setting.win_w_h_half
rotated = img_original
rect = rect_original
rect.left, rect.bottom = left, Setting.win_h - bottom
elif self.position == Suit.get_next_wind(self._viewer_position):
angel = 90
rotated = pygame.transform.rotate(img_original, angel)
rect = rotated.get_rect()
rect.bottom, rect.right = Setting.win_h - left, Setting.win_w - bottom
elif self.position == Suit.get_before_wind(self._viewer_position):
angel = 270
rotated = pygame.transform.rotate(img_original, angel)
rect = rotated.get_rect()
rect.top, rect.left = left, bottom
elif self.position == Suit.get_opposition_wind(self._viewer_position):
left += Setting.win_w_h_half
angel = 180
rotated = pygame.transform.rotate(img_original, angel)
rect = rotated.get_rect()
rect.right, rect.top = Setting.win_w - left, bottom
else:
sprite.kill()
sprite.image = rotated
sprite.rect = rect
def adjust_screen_position_for_centerx_bottom(self):
for sprite in self.info_group:
img_original = sprite.image
rect_original = sprite.rect
rotated = None
rect = None
centerx, bottom = rect_original.centerx, rect_original.bottom
if self._is_viewer:
rotated = img_original
rect = rect_original
rect.centerx, rect.bottom = centerx, Setting.win_h - bottom
elif self.position == Suit.get_next_wind(self._viewer_position):
angel = 90
rotated = pygame.transform.rotate(img_original, angel)
rect = rotated.get_rect()
rect.centery, rect.right = (Setting.win_h - Setting.win_w) // 2 + centerx, Setting.win_w - bottom
elif self.position == Suit.get_before_wind(self._viewer_position):
angel = 270
rotated = pygame.transform.rotate(img_original, angel)
rect = rotated.get_rect()
rect.centery, rect.left = (Setting.win_h - Setting.win_w) // 2 + centerx, bottom
elif self.position == Suit.get_opposition_wind(self._viewer_position):
angel = 180
rotated = pygame.transform.rotate(img_original, angel)
rect = rotated.get_rect()
rect.centerx, rect.top = centerx, bottom
else:
sprite.kill()
sprite.image = rotated
sprite.rect = rect
def draw_screen(self, state: str = ''):
if not self._screen:
return
self.draw_info()
self.draw_concealed(state)
self.draw_exposed(state)
self.draw_discard()
self.draw_flowers()
self.adjust_screen_position_for_centerx_bottom()
self.adjust_screen_position_for_left_bottom()
self.info_group.draw(self._screen)
self.all_group.draw(self._screen)
def test_exposed_chow():
# mj_set = MjSet()
# player = Player("Eric")
# owner = Player("Nana")
# for _ in range(2):
# for x in range(3):
# mj_set.draw_from_back()
# player.draw_from_back(mj_set)
# for x in range(3):
# mj_set.draw_from_back()
# sample = mj_set.draw_from_back()
# for _ in range(3):
# for x in range(3):
# mj_set.draw_from_back()
# player.draw_from_back(mj_set)
#
# mj_set.shuffle()
# for _ in range(6):
# player.draw(mj_set)
# print(Rule.convert_tiles_to_str(player.concealed))
# print(sample)
# player.try_exposed_chow(sample, owner)
# print(','.join([f'{x.expose_type}:{x}' for x in player.exposed]))
print("test_exposed_chow")
pass
def main():
test_exposed_chow()
pass
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4815978
|
# This file should be located in the $HOME/.local/lib/python3.7/site-packages directory
import platform
def add1(n):
return n + 1
def os_version():
return platform.system() + " " + platform.release()
|
StarcoderdataPython
|
11336999
|
# -*- coding: utf-8 -*-
import unittest
import os
# prepare for test
os.environ['ANIMA_TEST_SETUP'] = ""
from anima.env import mayaEnv # to setup maya extensions
import pymel.core
from anima.edit import Sequence, Media, Video, Track, Clip, File
class SequenceManagerTestCase(unittest.TestCase):
"""tests the SequenceManagerExtension class
"""
def setUp(self):
"""set up the test
"""
# create a new scene and get the sequenceManager in the scene
pymel.core.newFile(force=True)
self.sm = pymel.core.PyNode('sequenceManager1')
def test_from_xml_path_argument_skipped(self):
"""testing if a TypeError will be raised when the path argument is
skipped
"""
sm = pymel.core.PyNode('sequenceManager1')
with self.assertRaises(TypeError) as cm:
sm.from_xml()
self.assertEqual(
cm.exception.message,
'from_xml() takes exactly 2 arguments (1 given)'
)
def test_from_xml_path_argument_is_not_a_string(self):
"""testing if a TypeError will be raised when the path argument is not
a string
"""
sm = pymel.core.PyNode('sequenceManager1')
with self.assertRaises(TypeError) as cm:
sm.from_xml(30)
self.assertEqual(
cm.exception.message,
'path argument in SequenceManager.from_xml should be a string, '
'not int'
)
def test_from_xml_path_argument_is_not_a_valid_path(self):
"""testing if a IOError will be raised when the path argument is not
a valid path
"""
sm = pymel.core.PyNode('sequenceManager1')
with self.assertRaises(IOError) as cm:
sm.from_xml('not a valid path')
self.assertEqual(
cm.exception.message,
'Please supply a valid path to an XML file!'
)
def test_from_xml_generates_correct_sequencer_hierarchy(self):
"""testing if from_xml method will generate Sequences and shots
correctly
"""
path = os.path.abspath('./test_data/test_v001.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.from_xml(path)
sequences = sm.sequences.get()
self.assertEqual(len(sequences), 1)
sequencer = sequences[0]
self.assertIsInstance(sequencer, pymel.core.nt.Sequencer)
self.assertEqual(sequencer.duration, 111)
self.assertEqual(sequencer.sequence_name.get(), 'SEQ001_HSNI_003')
# check scene fps
self.assertEqual(pymel.core.currentUnit(q=1, t=1), 'film')
# check timecode
time = pymel.core.PyNode('time1')
self.assertEqual(time.timecodeProductionStart.get(), 0.0)
shots = sequencer.shots.get()
self.assertEqual(len(shots), 3)
shot1 = shots[0]
shot2 = shots[1]
shot3 = shots[2]
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1024, shot1.wResolution.get())
self.assertEqual(778, shot1.hResolution.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(34.0, shot1.sequenceEndFrame.get())
self.assertEqual(34.0, shot1.duration)
self.assertEqual(10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
self.assertEqual(
'/tmp/SEQ001_HSNI_003_0010_v001.mov',
shot1.output.get()
)
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1024, shot2.wResolution.get())
self.assertEqual(778, shot2.hResolution.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(35.0, shot2.sequenceStartFrame.get())
self.assertEqual(65.0, shot2.sequenceEndFrame.get())
self.assertEqual(31.0, shot2.duration)
self.assertEqual(10.0, shot2.startFrame.get())
self.assertEqual(40.0, shot2.endFrame.get())
self.assertEqual(
'/tmp/SEQ001_HSNI_003_0020_v001.mov',
shot2.output.get()
)
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1024, shot3.wResolution.get())
self.assertEqual(778, shot3.hResolution.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(66.0, shot3.sequenceStartFrame.get())
self.assertEqual(111.0, shot3.sequenceEndFrame.get())
self.assertEqual(46.0, shot3.duration)
self.assertEqual(10.0, shot3.startFrame.get())
self.assertEqual(55.0, shot3.endFrame.get())
self.assertEqual(
'/tmp/SEQ001_HSNI_003_0030_v001.mov',
shot3.output.get()
)
def test_from_xml_updates_sequencer_hierarchy_with_shots_expanded_and_contracted(self):
"""testing if from_xml method will update Sequences and shots
correctly with the xml file
"""
path = os.path.abspath('./test_data/test_v002.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_xml(path)
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(55.0, shot2.sequenceStartFrame.get())
self.assertEqual(75.0, shot2.sequenceEndFrame.get())
self.assertEqual(44.0, shot2.startFrame.get())
self.assertEqual(64.0, shot2.endFrame.get())
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(76.0, shot3.sequenceStartFrame.get())
self.assertEqual(131.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(120.0, shot3.endFrame.get())
def test_from_edl_updates_sequencer_hierarchy_with_shots_expanded_and_contracted(self):
"""testing if from_edl method will update Sequences and shots
correctly with the edl file
"""
path = os.path.abspath('./test_data/test_v002.edl')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_edl(path)
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(55.0, shot2.sequenceStartFrame.get())
self.assertEqual(76.0, shot2.sequenceEndFrame.get())
self.assertEqual(44.0, shot2.startFrame.get())
self.assertEqual(65.0, shot2.endFrame.get())
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(77.0, shot3.sequenceStartFrame.get())
self.assertEqual(133.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(121.0, shot3.endFrame.get())
def test_from_edl_updates_sequencer_hierarchy_with_shots_used_more_than_one_times(self):
"""testing if from_edl method will update Sequences and shots correctly
with shot are used more than once
"""
path = os.path.abspath('./test_data/test_v004.edl')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
# set a camera for shot4
shot3.set_camera('persp')
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_edl(path)
# check if there are 4 shots
self.assertEqual(4, len(seq.shots.get()))
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(55.0, shot2.sequenceStartFrame.get())
self.assertEqual(76.0, shot2.sequenceEndFrame.get())
self.assertEqual(44.0, shot2.startFrame.get())
self.assertEqual(65.0, shot2.endFrame.get())
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(77.0, shot3.sequenceStartFrame.get())
self.assertEqual(133.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(121.0, shot3.endFrame.get())
# Clip4
# there should be an extra shot
shot4 = seq.shots.get()[-1]
self.assertEqual('0030', shot4.shotName.get())
self.assertEqual(1, shot4.track.get())
self.assertEqual(133.0, shot4.sequenceStartFrame.get())
self.assertEqual(189.0, shot4.sequenceEndFrame.get())
self.assertEqual(65.0, shot4.startFrame.get())
self.assertEqual(121.0, shot4.endFrame.get())
# check if their cameras also the same
self.assertEqual(
shot3.get_camera(),
shot4.get_camera()
)
def test_from_xml_updates_sequencer_hierarchy_with_shots_removed(self):
"""testing if from_xml method will update Sequences and shots
correctly with the xml file
"""
path = os.path.abspath('./test_data/test_v003.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_xml(path)
# we should have 2 shots only
self.assertEqual(2, len(seq.shots.get()))
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
# removed
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(55.0, shot3.sequenceStartFrame.get())
self.assertEqual(110.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(120.0, shot3.endFrame.get())
def test_to_xml_will_generate_proper_xml_string(self):
"""testing if a proper xml compatible string will be generated with
to_xml() method
"""
path = os.path.abspath('./test_data/test_v001.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_shot_name_template('<Sequence>_<Shot>_<Version>')
sm.set_version('v001')
seq1 = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq1.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq1.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq1.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
result = sm.to_xml()
with open(path) as f:
expected = f.read()
self.maxDiff = None
self.assertEqual(expected, result)
def test_create_sequence_is_working_properly(self):
"""testing if create_sequence is working properly
"""
seq = self.sm.create_sequence()
self.assertEqual(seq.type(), 'sequencer')
self.maxDiff = None
self.assertEqual(self.sm, seq.message.connections()[0])
def test_create_sequence_is_properly_setting_the_sequence_name(self):
"""testing if create_sequence is working properly
"""
seq = self.sm.create_sequence('Test Sequence')
self.assertEqual(
'Test Sequence',
seq.sequence_name.get()
)
def test_to_edl_is_working_properly(self):
"""testing if to_edl method is working properly
"""
import edl
# create a sequence
seq1 = self.sm.create_sequence('sequence1')
seq1.create_shot('shot1')
seq1.create_shot('shot2')
seq1.create_shot('shot3')
l = self.sm.to_edl()
self.assertIsInstance(
l,
edl.List
)
def test_to_edl_will_generate_a_proper_edl_content(self):
"""testing if to_edl will generate a proper edl content
"""
edl_path = os.path.abspath('./test_data/test_v001.edl')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_shot_name_template('<Sequence>_<Shot>_<Version>')
sm.set_version('v001')
seq1 = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq1.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq1.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq1.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
l = sm.to_edl()
result = l.to_string()
with open(edl_path) as f:
expected_edl_content = f.read()
self.assertEqual(
expected_edl_content,
result
)
def test_generate_sequence_structure_returns_a_sequence_instance(self):
"""testing if generate_sequence_structure() method will return a
Sequence instance
"""
sm = pymel.core.PyNode('sequenceManager1')
seq1 = sm.create_sequence('sequence1')
shot1 = seq1.create_shot('shot1')
shot1.output.set('/tmp/shot1.mov')
shot2 = seq1.create_shot('shot2')
shot2.output.set('/tmp/shot2.mov')
result = sm.generate_sequence_structure()
self.assertIsInstance(
result,
Sequence
)
def test_generate_sequence_structure_will_generate_sequences_and_shots_with_correct_number_of_tracks(self):
"""testing if a proper sequence structure will be generated by using
the generate_sequence_structure() method with correct number of tracks
"""
path = os.path.abspath('./test_data/test_v001.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.from_xml(path)
seq1 = sm.sequences.get()[0]
shots = seq1.shots.get()
shot1 = shots[0]
shot2 = shots[1]
shot3 = shots[2]
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
seq = sm.generate_sequence_structure()
tracks = seq.media.video.tracks
self.assertEqual(len(tracks), 1)
track1 = tracks[0]
clips = track1.clips
self.assertEqual(len(clips), 3)
def test_set_shot_name_template_is_working_properly(self):
"""testing if set_shot_name_template() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('shot_name_template'))
test_template = '<Sequence>_<Shot>_<Version>'
sm.set_shot_name_template(test_template)
self.assertTrue(sm.hasAttr('shot_name_template'))
self.assertEqual(sm.shot_name_template.get(), test_template)
def test_get_shot_name_template_is_working_properly(self):
"""testing if set_shot_name_template() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('shot_name_template'))
test_template = '<Sequence>_<Shot>_<Version>'
sm.set_shot_name_template(test_template)
self.assertTrue(sm.hasAttr('shot_name_template'))
self.assertEqual(sm.get_shot_name_template(), test_template)
def test_get_shot_name_template_will_create_shot_name_template_attribute_if_missing(self):
"""testing if set_shot_name_template() will create the
shot_name_template attribute if missing
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('shot_name_template'))
result = sm.get_shot_name_template()
self.assertTrue(sm.hasAttr('shot_name_template'))
self.assertEqual(result, '<Sequence>_<Shot>_<Version>')
def test_set_version_is_working_properly(self):
"""testing if set_version() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('version'))
test_version = 'v001'
sm.set_version(test_version)
self.assertTrue(sm.hasAttr('version'))
self.assertEqual(sm.version.get(), test_version)
def test_get_version_is_working_properly(self):
"""testing if set_version() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('version'))
test_version = 'v001'
sm.set_version(test_version)
self.assertTrue(sm.hasAttr('version'))
self.assertEqual(sm.get_version(), test_version)
def test_get_version_will_create_attribute_if_missing(self):
"""testing if get_version() will create the missing version attribute
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('version'))
result = sm.get_version()
self.assertTrue(sm.hasAttr('version'))
self.assertEqual(result, '')
def test_set_task_name_is_working_properly(self):
"""testing if set_task_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('task_name'))
test_task_name = 'Animation'
sm.set_task_name(test_task_name)
self.assertTrue(sm.hasAttr('task_name'))
self.assertEqual(sm.task_name.get(), test_task_name)
def test_get_task_name_is_working_properly(self):
"""testing if set_task_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('task_name'))
test_task_name = 'Animation'
sm.set_task_name(test_task_name)
self.assertTrue(sm.hasAttr('task_name'))
self.assertEqual(sm.get_task_name(), test_task_name)
def test_get_task_name_will_create_attribute_if_missing(self):
"""testing if get_task_name() will create the missing task_name attribute
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('task_name'))
result = sm.get_task_name()
self.assertTrue(sm.hasAttr('task_name'))
self.assertEqual(result, '')
def test_set_take_name_is_working_properly(self):
"""testing if set_take_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('take_name'))
test_take_name = 'Main'
sm.set_take_name(test_take_name)
self.assertTrue(sm.hasAttr('take_name'))
self.assertEqual(sm.take_name.get(), test_take_name)
def test_get_take_name_is_working_properly(self):
"""testing if set_take_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('take_name'))
test_take_name = 'Main'
sm.set_take_name(test_take_name)
self.assertTrue(sm.hasAttr('take_name'))
self.assertEqual(sm.get_take_name(), test_take_name)
def test_get_take_name_will_create_attribute_if_missing(self):
"""testing if get_take_name() will create the missing take_name attribute
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('take_name'))
result = sm.get_take_name()
self.assertTrue(sm.hasAttr('take_name'))
self.assertEqual(result, '')
def test_generate_sequence_structure_is_working_properly(self):
"""testing if generate_sequence_structure() method is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
from anima.env import mayaEnv
mayaEnv.Maya.set_fps(fps=24)
sm.set_shot_name_template('<Sequence>_<Shot>_<Version>')
sm.set_version('v001')
seq1 = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq1.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(24)
shot1.sequenceStartFrame.set(0)
shot1.track.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot2 = seq1.create_shot('0020')
shot2.startFrame.set(10)
shot2.endFrame.set(35)
shot2.sequenceStartFrame.set(25)
shot2.track.set(1)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(15)
shot3 = seq1.create_shot('0030')
shot3.startFrame.set(25)
shot3.endFrame.set(50)
shot3.sequenceStartFrame.set(45)
shot3.track.set(2)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(20)
seq = sm.generate_sequence_structure()
self.assertIsInstance(seq, Sequence)
rate = seq.rate
self.assertEqual('24', rate.timebase)
self.assertEqual(False, rate.ntsc)
self.assertEqual('00:00:00:00', seq.timecode)
self.assertEqual(False, seq.ntsc)
media = seq.media
self.assertIsInstance(media, Media)
video = media.video
self.assertIsInstance(video, Video)
self.assertIsNone(media.audio)
self.assertEqual(2, len(video.tracks))
track1 = video.tracks[0]
self.assertIsInstance(track1, Track)
self.assertEqual(len(track1.clips), 2)
self.assertEqual(track1.enabled, True)
track2 = video.tracks[1]
self.assertIsInstance(track2, Track)
self.assertEqual(len(track2.clips), 1)
self.assertEqual(track2.enabled, True)
clip1 = track1.clips[0]
self.assertIsInstance(clip1, Clip)
self.assertEqual('Video', clip1.type)
self.assertEqual('SEQ001_HSNI_003_0010_v001', clip1.id)
self.assertEqual('SEQ001_HSNI_003_0010_v001', clip1.name)
self.assertEqual(10, clip1.in_) # handle
self.assertEqual(35, clip1.out) # handle + duration
self.assertEqual(0, clip1.start) # sequenceStartFrame
self.assertEqual(25, clip1.end) # sequenceEndFrame + 1
clip2 = track1.clips[1]
self.assertIsInstance(clip2, Clip)
self.assertEqual('Video', clip2.type)
self.assertEqual('SEQ001_HSNI_003_0020_v001', clip2.id)
self.assertEqual('SEQ001_HSNI_003_0020_v001', clip2.name)
self.assertEqual(15, clip2.in_) # handle
self.assertEqual(41, clip2.out) # handle + duration
self.assertEqual(25, clip2.start) # sequenceStartFrame
self.assertEqual(51, clip2.end) # sequenceEndFrame + 1
clip3 = track2.clips[0]
self.assertIsInstance(clip3, Clip)
self.assertEqual('Video', clip3.type)
self.assertEqual('SEQ001_HSNI_003_0030_v001', clip3.id)
self.assertEqual('SEQ001_HSNI_003_0030_v001', clip3.name)
self.assertEqual(20, clip3.in_) # startFrame
self.assertEqual(46, clip3.out) # endFrame + 1
self.assertEqual(45, clip3.start) # sequenceStartFrame
self.assertEqual(71, clip3.end) # sequenceEndFrame + 1
file1 = clip1.file
self.assertIsInstance(file1, File)
self.assertEqual('SEQ001_HSNI_003_0010_v001', file1.name)
self.assertEqual('file://localhost/tmp/SEQ001_HSNI_003_0010_v001.mov',
file1.pathurl)
self.assertEqual(45, file1.duration) # including handles
file2 = clip2.file
self.assertIsInstance(file2, File)
self.assertEqual('SEQ001_HSNI_003_0020_v001', file2.name)
self.assertEqual('file://localhost/tmp/SEQ001_HSNI_003_0020_v001.mov',
file2.pathurl)
self.assertEqual(56, file2.duration) # including handles
file3 = clip3.file
self.assertIsInstance(file3, File)
self.assertEqual('SEQ001_HSNI_003_0030_v001', file3.name)
self.assertEqual('file://localhost/tmp/SEQ001_HSNI_003_0030_v001.mov',
file3.pathurl)
self.assertEqual(66, file3.duration) # including handles
|
StarcoderdataPython
|
89428
|
# This file is a part of DeerLab. License is MIT (see LICENSE.md).
# Copyright(c) 2019-2021: <NAME>, <NAME> and other contributors.
import numpy as np
from deerlab.utils import Jacobian, nearest_psd
from scipy.stats import norm
from scipy.signal import fftconvolve
from scipy.linalg import block_diag
from scipy.optimize import brentq
from scipy.interpolate import interp1d
import copy
class FitResult(dict):
# ========================================================================
r""" Represents the results of a fit.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
cost : float
Value of the cost function at the solution.
residuals : ndarray
Vector of residuals at the solution.
stats : dict
Goodness of fit statistical estimators:
* ``stats['chi2red']`` - Reduced \chi^2 test
* ``stats['r2']`` - R^2 test
* ``stats['rmsd']`` - Root-mean squared deviation (RMSD)
* ``stats['aic']`` - Akaike information criterion
* ``stats['aicc']`` - Corrected Akaike information criterion
* ``stats['bic']`` - Bayesian information criterion
Methods
-------
plot()
Display the fit results on a Matplotlib window. The script returns a
`matplotlib.axes <https://matplotlib.org/api/axes_api.html>`_ object.
All graphical parameters can be adjusted from this object.
Notes
-----
There may be additional attributes not listed above depending of the
specific fit function. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
# =========================================================================
class UQResult:
# =========================================================================
r""" Represents the uncertainty quantification of fit results.
Attributes
----------
type : string
Uncertainty quantification approach:
* 'covariance' - Covariance-based uncertainty analysis
* 'bootstrap' - Bootstrapped uncertainty analysis
mean : ndarray
Mean values of the uncertainty distribution of the parameters.
median : ndarray
Median values of the uncertainty distribution of the parameters.
std : ndarray
Standard deviations of the uncertainty distribution of the parameters.
covmat : ndarray
Covariance matrix
nparam : int scalar
Number of parameters in the analysis.
Methods
-------
"""
def __init__(self,uqtype,data=None,covmat=None,lb=None,ub=None,threshold=None,profiles=None,noiselvl=None):
#Parse inputs schemes
if uqtype=='covariance':
# Scheme 1: UQResult('covariance',parfit,covmat,lb,ub)
self.type = uqtype
parfit = data
nParam = len(parfit)
elif uqtype == 'profile':
# Scheme 2: UQResult('profile',profiles)
if not isinstance(profiles,list):
profiles = [profiles]
self.type = uqtype
self.__parfit = data
self.__noiselvl = noiselvl
self.profile = profiles
self.threshold = threshold
nParam = len(np.atleast_1d(data))
elif uqtype == 'bootstrap':
# Scheme 2: UQResult('bootstrap',samples)
self.type = uqtype
samples = data
self.samples = samples
nParam = np.shape(samples)[1]
elif uqtype=='void':
# Scheme 2: UQResult('void')
self.type = uqtype
self.mean, self.median, self.std, self.covmat, self.nparam = ([] for _ in range(5))
return
else:
raise NameError('uqtype not found. Must be: ''covariance'', ''bootstrap'' or ''void''.')
if lb is None:
lb = np.full(nParam, -np.inf)
if ub is None:
ub = np.full(nParam, np.inf)
# Set private variables
self.__lb = lb
self.__ub = ub
self.nparam = nParam
# Create confidence intervals structure
if uqtype=='covariance':
self.mean = parfit
self.median = parfit
self.std = np.sqrt(np.diag(covmat))
self.covmat = covmat
# Profile-based CI specific fields
elif uqtype == 'profile':
xs = [self.pardist(n)[0] for n in range(nParam)]
pardists = [self.pardist(n)[1] for n in range(nParam)]
means = [np.trapz(pardist*x,x) for x,pardist in zip(xs,pardists)]
std = [np.sqrt(np.trapz(pardist*(x-mean)**2,x)) for x,pardist,mean in zip(xs,pardists,means)]
self.mean = means
self.median = self.percentile(50)
self.std = std
self.covmat = np.diag(np.array(std)**2)
# Bootstrap-based CI specific fields
elif uqtype == 'bootstrap':
means = np.mean(samples,0)
covmat = np.squeeze(samples)[email protected](samples)/np.shape(samples)[0] - means*means.T
self.mean = means
self.median = self.percentile(50)
self.std = np.squeeze(np.std(samples,0))
self.covmat = covmat
# Gets called when an attribute is accessed
#--------------------------------------------------------------------------------
def __getattribute__(self, attr):
try:
# Calling the super class to avoid recursion
if attr!='type' and super(UQResult, self).__getattribute__('type') == 'void':
# Check if the uncertainty quantification has been done, if not report that there is nothing in the object
raise ValueError('The requested attribute/method is not available. Uncertainty quantification has not been calculated during the fit by using the `uq=None` keyword.')
except AttributeError:
# Catch cases where 'type' attribute has still not been defined (e.g. when using copy.deepcopy)
pass
# Otherwise return requested attribute
return super(UQResult, self).__getattribute__(attr)
#--------------------------------------------------------------------------------
# Combination of multiple uncertainties
#--------------------------------------------------------------------------------
def join(self,*args):
"""
Combine multiple uncertainty quantification instances.
Parameters
----------
uq : any number of :ref:`UQResult`
Uncertainty quantification objects with ``N1,N2,...,Nn`` parameters to be joined
to the object calling the method with ``M`` parameters.
Returns
-------
uq_joined : :ref:`UQResult`
Joined uncertainty quantification object with a total of ``M + N1 + N2 + ... + Nn`` parameters.
The parameter vectors are concatenated on the order they are passed.
"""
# Original metadata
mean = self.mean
covmat = self.covmat
lbm = self.__lb
ubm = self.__ub
for uq in args:
if not isinstance(uq, UQResult):
raise TypeError('Only UQResult objects can be joined.')
if uq.type=='void':
raise TypeError('Void UQResults cannot be joined.')
# Concatenate metadata of external UQResult objects
mean = np.concatenate([mean, uq.mean])
covmat = block_diag(covmat, uq.covmat)
lbm = np.concatenate([lbm, uq.__lb])
ubm = np.concatenate([ubm, uq.__ub])
# Return new UQResult object with combined information
return UQResult('covariance',mean,covmat,lbm,ubm)
#--------------------------------------------------------------------------------
# Parameter distributions
#--------------------------------------------------------------------------------
def pardist(self,n=0):
"""
Generate the uncertainty distribution of the n-th parameter
Parameters
----------
n : int scalar
Index of the parameter
Returns
-------
ax : ndarray
Parameter values at which the distribution is evaluated
pdf : ndarray
Probability density function of the parameter uncertainty.
"""
if n > self.nparam or n < 0:
raise ValueError('The input must be a valid integer number.')
if self.type == 'covariance':
# Generate Gaussian distribution based on covariance matrix
sig = np.sqrt(self.covmat[n,n])
xmean = self.mean[n]
x = np.linspace(xmean-4*sig,xmean+4*sig,500)
pdf = 1/sig/np.sqrt(2*np.pi)*np.exp(-((x-xmean)/sig)**2/2)
if self.type == 'bootstrap':
# Get bw using silverman's rule (1D only)
samplen = self.samples[:, n].real
if np.all(samplen == samplen[0]):
# Dirac's delta distribution
x = np.array([0.9*samplen[0],samplen[0],1.1*samplen[0]])
pdf = np.array([0,1,0])
else:
sigma = np.std(samplen, ddof=1)
bw = sigma*(len(samplen)*3/4.0)**(-1/5)
# Make histogram
maxbin = np.maximum(np.max(samplen),np.mean(samplen)+3*sigma)
minbin = np.minimum(np.min(samplen),np.mean(samplen)-3*sigma)
bins = np.linspace(minbin,maxbin, 2**10 + 1)
count, edges = np.histogram(samplen, bins=bins)
# Generate kernel
delta = np.maximum(np.finfo(float).eps,(edges.max() - edges.min()) / (len(edges) - 1))
kernel_x = np.arange(-4*bw, 4*bw + delta, delta)
kernel = norm(0, bw).pdf(kernel_x)
# Convolve
pdf = fftconvolve(count, kernel, mode='same')
# Set x coordinate of pdf to midpoint of bin
x = edges[:-1] + delta
if self.type=='profile':
if not isinstance(self.profile,list) and n==0:
profile = self.profile
else:
profile = self.profile[n]
σ = self.__noiselvl
obj2likelihood = lambda f: 1/np.sqrt(σ*2*np.pi)*np.exp(-1/2*f/σ**2)
profileinterp = interp1d(profile['x'], profile['y'], kind='slinear', fill_value=1e6,bounds_error=False)
x = np.linspace(np.min(profile['x']), np.max(profile['x']), 2**10 + 1)
pdf = obj2likelihood(profileinterp(x))
# Generate kernel
sigma = np.sum(x*pdf/np.sum(pdf))
bw = sigma*(1e12*3/4.0)**(-1/5)
delta = np.maximum(np.finfo(float).eps,(x.max() - x.min()) / (len(x) - 1))
kernel_x = np.arange(-5*bw, 5*bw + delta, delta)
kernel = norm(0, bw).pdf(kernel_x)
# Convolve
pdf = fftconvolve(pdf, kernel, mode='same')
# Clip the distributions outside the boundaries
pdf[x < self.__lb[n]] = 0
pdf[x > self.__ub[n]] = 0
# Enforce non-negativity (takes care of negative round-off errors)
pdf = np.maximum(pdf,0)
# Ensure normalization of the probability density function
pdf = pdf/np.trapz(pdf, x)
return x, pdf
#--------------------------------------------------------------------------------
# Parameter percentiles
#--------------------------------------------------------------------------------
def percentile(self,p):
"""
Compute the p-th percentiles of the parameters uncertainty distributions
Parameters
----------
p : float scalar
Percentile (between 0-100)
Returns
-------
prctiles : ndarray
Percentile values of all parameters
"""
if p>100 or p<0:
raise ValueError('The input must be a number between 0 and 100')
x = np.zeros(self.nparam)
for n in range(self.nparam):
# Get parameter PDF
values,pdf = self.pardist(n)
# Compute corresponding CDF
cdf = np.cumsum(pdf)
cdf /= max(cdf)
# Eliminate duplicates
cdf, index = np.lib.arraysetops.unique(cdf,return_index=True)
# Interpolate requested percentile
x[n] = np.interp(p/100,cdf,values[index])
return x
#--------------------------------------------------------------------------------
# Covariance-based confidence intervals
#--------------------------------------------------------------------------------
def ci(self,coverage):
"""
Compute the confidence intervals for the parameters.
Parameters
----------
coverage : float scalar
Coverage (confidence level) of the confidence intervals (between 0-100)
Returns
-------
ci : 2D-ndarray
Confidence intervals for the parameters:
* ``ci[:,0]`` - Lower confidence intervals
* ``ci[:,1]`` - Upper confidence intervals
"""
if coverage>100 or coverage<0:
raise ValueError('The input must be a number between 0 and 100')
value = self.mean if hasattr(self,'mean') else self.__parfit
iscomplex = np.iscomplexobj(value)
alpha = 1 - coverage/100
p = 1 - alpha/2 # percentile
confint = np.zeros((self.nparam,2))
if iscomplex: confint = confint.astype(complex)
if self.type=='covariance':
# Compute covariance-based confidence intervals
# Clip at specified box boundaries
standardError = norm.ppf(p)*np.sqrt(np.diag(self.covmat))
confint[:,0] = np.maximum(self.__lb, self.mean.real - standardError)
confint[:,1] = np.minimum(self.__ub, self.mean.real + standardError)
if iscomplex:
confint[:,0] = confint[:,0] + 1j*np.maximum(self.__lb, self.mean.imag - standardError)
confint[:,1] = confint[:,1] + 1j*np.minimum(self.__ub, self.mean.imag + standardError)
elif self.type=='bootstrap':
# Compute bootstrap-based confidence intervals
# Clip possible artifacts from the percentile estimation
confint[:,0] = np.minimum(self.percentile((1-p)*100), np.amax(self.samples))
confint[:,1] = np.maximum(self.percentile(p*100), np.amin(self.samples))
elif self.type=='profile':
# Compute likelihood-profile-based confidence intervals
for n,profile in enumerate(self.profile):
# Construct interpolator for the profile
profileinterp = interp1d(profile['x'], profile['y'], kind='slinear', fill_value=1e6,bounds_error=False)
#-----------------------------------------------------------------
def getCIbound(boundary,optimum):
def getprofile_at(value):
return profileinterp(value) - self.threshold(coverage/100)
# Evaluate the profile function
fbound = getprofile_at(boundary)
f0 = getprofile_at(optimum)
# Check the signs of the shifted profile
if np.sign(fbound)==np.sign(f0):
# If both edges have the same sign return one of the edges
ci_bound = boundary
else:
searchrange = [boundary,optimum] if boundary<optimum else [optimum,boundary]
ci_bound = brentq(getprofile_at, *searchrange,maxiter=int(1e4))
return ci_bound
#-----------------------------------------------------------------
# Get the upper and lower bounds of the confidence interval
confint[n,0] = getCIbound(profile['x'].min(),self.__parfit[n])
confint[n,1] = getCIbound(profile['x'].max(),self.__parfit[n])
# Remove singleton dimensions
confint = np.squeeze(confint)
return confint
# Error Propagation (covariance-based only)
#--------------------------------------------------------------------------------
def propagate(self,model,lb=None,ub=None,samples=None):
"""
Uncertainty propagation. This function takes the uncertainty analysis of the
parameters and propagates it to another functon depending on those parameters.
Parameters
----------
model : callable
Callable model function taking an array of ``nparam`` parameters.
lbm : ndarray
Lower bounds of the values returned by ``model``, by default assumed unconstrained.
ubm : ndarray
Upper bounds of the values returned by ``model``, by default assumed unconstrained.
Returns
-------
modeluq : :ref:`UQResult`
New uncertainty quantification analysis for the ouputs of ``model``.
"""
parfit = self.mean
# Evaluate model with fit parameters
modelfit = model(parfit)
iscomplex = np.iscomplexobj(modelfit)
# Validate input boundaries
if lb is None:
lb = np.full(np.size(modelfit), -np.inf)
if ub is None:
ub = np.full(np.size(modelfit), np.inf)
lb,ub = (np.atleast_1d(var) for var in [lb,ub])
if np.size(modelfit)!=np.size(lb) or np.size(modelfit)!=np.size(ub):
raise IndexError ('The 2nd and 3rd input arguments must have the same number of elements as the model output.')
if samples is None:
Nsamples = 1000
else:
Nsamples = samples
if self.type=='covariance':
if iscomplex:
model_ = model
model = lambda p: np.concatenate([model_(p).real,model_(p).imag])
# Get jacobian of model to be propagated with respect to parameters
J = Jacobian(model,parfit,self.__lb,self.__ub)
# Clip at boundaries
modelfit = np.maximum(modelfit,lb)
modelfit = np.minimum(modelfit,ub)
# Error progation
modelcovmat = nearest_psd(<EMAIL>)
if iscomplex:
N = modelcovmat.shape[0]
Nreal = np.arange(0,N/2).astype(int)
Nimag = np.arange(N/2,N).astype(int)
modelcovmat = modelcovmat[np.ix_(Nreal,Nreal)] + 1j* modelcovmat[np.ix_(Nimag,Nimag)]
# Construct new uncertainty object
return UQResult('covariance',modelfit,modelcovmat,lb,ub)
elif self.type=='bootstrap':
sampled_parameters = [[]]*self.nparam
for n in range(self.nparam):
# Get the parameter uncertainty distribution
values,pdf = self.pardist(n)
# Random sampling form the uncertainty distribution
sampled_parameters[n] = [np.random.choice(values, p=pdf/sum(pdf)) for _ in range(Nsamples)]
# Convert to matrix
sampled_parameters = np.atleast_2d(sampled_parameters)
# Bootstrap sampling of the model response
sampled_model = [model(sampled_parameters[:,n]) for n in range(Nsamples)]
# Convert to matrix
sampled_model = np.atleast_2d(sampled_model)
# Construct new uncertainty object
return UQResult('bootstrap',data=sampled_model,lb=lb,ub=ub)
#--------------------------------------------------------------------------------
# =========================================================================
|
StarcoderdataPython
|
5011919
|
import logging
import numpy as np
from sklearn.ensemble import IsolationForest
from typing import Dict, Union
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
logger = logging.getLogger(__name__)
class IForest(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
n_estimators: int = 100,
max_samples: Union[str, int, float] = 'auto',
max_features: Union[int, float] = 1.,
bootstrap: bool = False,
n_jobs: int = 1,
data_type: str = 'tabular'
) -> None:
"""
Outlier detector for tabular data using isolation forests.
Parameters
----------
threshold
Threshold used for outlier score to determine outliers.
n_estimators
Number of base estimators in the ensemble.
max_samples
Number of samples to draw from the training data to train each base estimator.
If int, draw 'max_samples' samples.
If float, draw 'max_samples * number of features' samples.
If 'auto', max_samples = min(256, number of samples)
max_features
Number of features to draw from the training data to train each base estimator.
If int, draw 'max_features' features.
If float, draw 'max_features * number of features' features.
bootstrap
Whether to fit individual trees on random subsets of the training data, sampled with replacement.
n_jobs
Number of jobs to run in parallel for 'fit' and 'predict'.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.threshold = threshold
self.isolationforest = IsolationForest(n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
n_jobs=n_jobs)
# set metadata
self.meta['detector_type'] = 'offline'
self.meta['data_type'] = data_type
def fit(self,
X: np.ndarray,
sample_weight: np.ndarray = None
) -> None:
"""
Fit isolation forest.
Parameters
----------
X
Training batch.
sample_weight
Sample weights.
"""
self.isolationforest.fit(X, sample_weight=sample_weight)
def infer_threshold(self,
X: np.ndarray,
threshold_perc: float = 95.
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
outliers in a sample of the dataset.
Parameters
----------
X
Batch of instances.
threshold_perc
Percentage of X considered to be normal based on the outlier score.
"""
# compute outlier scores
iscore = self.score(X)
# update threshold
self.threshold = np.percentile(iscore, threshold_perc)
def score(self, X: np.ndarray) -> np.ndarray:
"""
Compute outlier scores.
Parameters
----------
X
Batch of instances to analyze.
Returns
-------
Array with outlier scores for each instance in the batch.
"""
return - self.isolationforest.decision_function(X)
def predict(self,
X: np.ndarray,
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Compute outlier scores and transform into outlier predictions.
Parameters
----------
X
Batch of instances.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing 'meta' and 'data' dictionaries.
'meta' has the model's metadata.
'data' contains the outlier predictions and instance level outlier scores.
"""
# compute outlier scores
iscore = self.score(X)
# values above threshold are outliers
outlier_pred = (iscore > self.threshold).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_instance_score:
od['data']['instance_score'] = iscore
return od
|
StarcoderdataPython
|
6639217
|
<gh_stars>100-1000
#
# Copyright (c) 2021 Facebook, Inc. and its affiliates.
#
# This file is part of NeuralDB.
# See https://github.com/facebookresearch/NeuralDB for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pymongo import UpdateOne
from tqdm import tqdm
from ndb_data.wikidata_common.wikidata import Wikidata
def write_updates(batch_update):
bulks = []
for k, v in batch_update:
bulks.append(UpdateOne(k, v))
collection.bulk_write(bulks)
if __name__ == "__main__":
client = Wikidata()
collection = client.collection
batch_update = []
num_ops = 0
tqdm_iter = tqdm(
collection.find({}, {"_id": 1, "sitelinks": 1}),
total=collection.estimated_document_count(),
)
for i in tqdm_iter:
if type(i["sitelinks"]) == dict:
batch_update.append(
(
{"_id": i["_id"]},
{"$set": {"sitelinks": list(i["sitelinks"].values())}},
)
)
if len(batch_update) > 10000:
write_updates(batch_update)
batch_update = []
num_ops += 1
tqdm_iter.desc = f"Performed update {num_ops}"
|
StarcoderdataPython
|
3352993
|
#!/usr/bin/python3
input_file = 'input.in'
data_file = list(map(lambda x: int(x),
open(input_file).read().split()))
class TNode(object):
def __init__(self, no, children_count=0, metadata_count=0):
self.no = no
self.children_count = children_count
self.metadata_count = metadata_count
self.children = []
self.metadata = []
def put_data(self, data):
self.children_count = data[0]
self.metadata_count = data[1]
child_data = data[2:]
for i in range(self.children_count):
child = TNode(self.no + 1)
child_data = child.put_data(child_data)
self.children.append(child)
self.metadata = child_data[:self.metadata_count]
if len(child_data) > self.metadata_count:
return child_data[self.metadata_count:]
return []
def sum_metadata(self):
total = 0
for c in self.children:
total += c.sum_metadata()
total += sum(self.metadata)
return total
def __repr__(self):
return str(self.__dict__)
root = TNode(0)
root.put_data(data_file)
total_metadata = root.sum_metadata()
print(total_metadata)
|
StarcoderdataPython
|
6476764
|
import time
from unittest import TestCase
import sys
from src.algorithms.math.Fibonacci import Fibonacci
sys.setrecursionlimit(6000)
class TestFibonacciTime(TestCase):
index = 36
number = 14930352
def test_fib_iterative_time(self):
print("Time of iterative calculating of Fibonacci number (O(n)): "),
start = time.clock()
result = Fibonacci.fib_iterative(self.index)
end = time.clock()
print(str(end - start) + " s\n")
self.assertEqual(self.number, result)
def test_fib_recursive_time(self):
print("Time of recursive calculating of Fibonacci number (O(2^n)): "),
start = time.clock()
result = Fibonacci.fib_recursive(self.index)
end = time.clock()
print(str(end - start) + " s\n")
self.assertEqual(self.number, result)
|
StarcoderdataPython
|
1817137
|
# This file is auto-generated, please don't modify it directly.
# Modify source xls file and use model_gen to regenerate again.
#
# Last generate time: 2018-05-23 13:11:16
from enum import Enum
class EnumGameType(Enum):
Drier = 0 # 吹风机
Laser = 1 # 激光笔
Feed = 2 # 喂食
PutUp = 3 # 托起
Stroke = 4 # 抚摸
Click = 5 # 点击
class EnumFoodType(Enum):
CatFood = 0 # 猫粮
GreenFood = 1 # 蔬菜
Banana = 2 # 香蕉
Fish = 3 # 鱼干
Cheese = 4 # 奶酪
CatCan = 5 # 猫罐头
MedicineOne = 6 # 一号药瓶
MedicineTwo = 7 # 二号药瓶
class EnumCatType(Enum):
MiniCat = 0 # 幼年期
YoungCat = 1 # 成长期
class AudioType(Enum):
Auido2D = 0 # 2D音效
Audio3D = 1 # 3D音效
class EnumShopItemType(Enum):
Coin = 0 # 金币
Mint = 1 # 薄荷
Prop_Grass = 2 # 道具猫草
Prop_Toy = 3 # 道具玩具
class EnumShopItemTagType(Enum):
Hot = 0 # 热卖
Promotion = 1 # 促销
class EnumUserGuideConditionType(Enum):
OpenView = 0 # 打开界面
Property = 1 # 属性判断
ViewBtnPress = 2 # 按钮摁下
class EnumUserGuideEventType(Enum):
MoveToButton = 0 # 移动按钮事件
TextShow = 1 # 文本显示
AIAction = 2 # AI行为
SetGuideFinish = 3 # 设置引导结束
class EnumOwnItemType(Enum):
Food = 0 # 粮食
Costume = 1 # 服装
Grass = 2 # 猫草
Toy = 3 # 玩具
Card = 4 # 卡片
class EnumDialogType(Enum):
Hat = 0 # 帽子
Neckcloth = 1 # 领带
Glass = 2 # 眼镜
CostumeClearAllFashionDialog = 3 # 换衣间时装清除
class EnumPropType(Enum):
Grass = 0 # 猫草
Toy = 1 # 玩具
class EnumEventType(Enum):
Stroke = 0 # 抚摸
Feed = 1 # 喂食
Enjoy = 2 # 分享
class EnumCardBagType(Enum):
Common = 0 # 普通
Rare = 1 # 稀有
Toy = 2 # 玩具
class EnumCostumeType(Enum):
Hat = 0 # 帽子
Neckcloth = 1 # 领带
Glass = 2 # 眼镜
Hair = 3 # 毛色
|
StarcoderdataPython
|
72156
|
<filename>lib/python/treadmill/cleanup.py
"""Listens to Treadmill cleanup events.
When a treadmill app needs to be cleaned up then there will exist a symlink
to the app in the cleanup directory. A cleanup app will be created to do
the cleanup work necessary:
<treadmillroot>/
cleanup/
<instance>
cleaning/
<instance> -> ../cleanup_apps/<instance>
cleanup_apps/
<instance>
Treadmill runs svscan process pointing to 'cleaning' scan directory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
import logging
import os
import time
from treadmill import dirwatch
from treadmill import fs
from treadmill import logcontext as lc
from treadmill import runtime as app_runtime
from treadmill import subproc
from treadmill import supervisor
_LOGGER = logging.getLogger(__name__)
# FIXME: This extremely high timeout value comes from the fact that we
# have a very high watchdog value in runtime.
_WATCHDOG_HEARTBEAT_SEC = 5 * 60
# Maximum number of cleanup request to process per cycle. Be careful of
# watchdog timeouts when increasing this value.
_MAX_REQUEST_PER_CYCLE = 1
_SERVICE_NAME = 'Cleanup'
class Cleanup:
"""Orchestrate the cleanup of apps which are scheduled to be stopped and/or
removed.
"""
__slots__ = (
'tm_env',
)
def __init__(self, tm_env):
self.tm_env = tm_env
def _refresh_supervisor(self):
"""Notify the supervisor of new cleanup instances.
"""
_LOGGER.info('Refreshing svscan')
supervisor.control_svscan(self.tm_env.cleaning_dir, (
supervisor.SvscanControlAction.alarm,
supervisor.SvscanControlAction.nuke
))
def _add_cleanup_app(self, path):
"""Configure a new cleanup app.
"""
name = os.path.basename(path)
if name.startswith('.'):
_LOGGER.warning('Ignore %s', name)
return
cleaning_link = os.path.join(self.tm_env.cleaning_dir, name)
if os.path.islink(cleaning_link):
_LOGGER.warning('Cleaning app already configured %s', name)
return
cleanup_link = os.path.join(self.tm_env.cleanup_dir, name)
if not os.path.islink(cleanup_link):
_LOGGER.info('Ignore - not a link: %s', cleanup_link)
return
_LOGGER.info('Configure cleaning app: %s', name)
command = (
'{treadmill}/bin/treadmill sproc cleanup instance'
' --approot {tm_root}'
' {instance}'
).format(
treadmill=subproc.resolve('treadmill'),
tm_root=self.tm_env.root,
instance=name
)
if os.name == 'posix':
command = 'exec ' + command
supervisor.create_service(
self.tm_env.cleanup_apps_dir,
name=name,
app_run_script=command,
userid='root',
monitor_policy={
'limit': 5,
'interval': 60,
'tombstone': os.path.join(self.tm_env.cleanup_tombstone_dir,
name),
'skip_path': os.path.join(self.tm_env.cleanup_dir, name)
},
log_run_script=None,
)
fs.symlink_safe(
cleaning_link,
os.path.join(self.tm_env.cleanup_apps_dir, name)
)
_LOGGER.debug('Cleanup app %s ready', name)
self._refresh_supervisor()
def _remove_cleanup_app(self, path):
"""Stop and remove a cleanup app.
"""
name = os.path.basename(path)
if name.startswith('.'):
_LOGGER.warning('Ignore %s', name)
return
cleaning_link = os.path.join(self.tm_env.cleaning_dir, name)
app_path = os.path.join(self.tm_env.cleanup_apps_dir, name)
_LOGGER.info('Removing cleanup app %s -> %s', cleaning_link, app_path)
if os.path.exists(cleaning_link):
_LOGGER.debug('Removing cleanup link %s', cleaning_link)
fs.rm_safe(cleaning_link)
self._refresh_supervisor()
_LOGGER.debug('Waiting on %s not being supervised', app_path)
supervisor.ensure_not_supervised(app_path)
else:
_LOGGER.debug('Cleanup link %s does not exist', cleaning_link)
_LOGGER.debug('Removing app directory %s', app_path)
fs.rmtree_safe(app_path)
def invoke(self, runtime, instance, runtime_param=None):
"""Actually do the cleanup of the instance.
"""
cleanup_link = os.path.join(self.tm_env.cleanup_dir, instance)
container_dir = os.readlink(cleanup_link)
_LOGGER.info('Cleanup: %s => %s', instance, container_dir)
if os.path.exists(container_dir):
with lc.LogContext(_LOGGER, os.path.basename(container_dir),
lc.ContainerAdapter) as log:
try:
app_runtime.get_runtime(
runtime, self.tm_env, container_dir, runtime_param
).finish()
except Exception: # pylint: disable=W0703
if not os.path.exists(container_dir):
log.info('Container dir does not exist: %s',
container_dir)
else:
log.exception('Fatal error running finish %r.',
container_dir)
raise
else:
_LOGGER.info('Container dir does not exist: %r', container_dir)
fs.rm_safe(cleanup_link)
def _sync(self):
"""Synchronize cleanup to cleaning.
"""
cleanup_list = [
os.path.basename(filename)
for filename in glob.glob(os.path.join(self.tm_env.cleanup_dir,
'*'))
]
cleanup_apps = {
os.path.basename(filename)
for filename in glob.glob(
os.path.join(self.tm_env.cleanup_apps_dir, '*')
)
}
for instance in cleanup_list:
self._add_cleanup_app(instance)
cleanup_apps.discard(instance)
for instance in cleanup_apps:
self._remove_cleanup_app(instance)
def run(self):
"""Setup directories' watches and start the re-scan ticker.
"""
# Setup the watchdog
watchdog_lease = self.tm_env.watchdogs.create(
name='svc-{svc_name}'.format(svc_name=_SERVICE_NAME),
timeout='{hb:d}s'.format(hb=_WATCHDOG_HEARTBEAT_SEC),
content='Service {svc_name!r} failed'.format(
svc_name=_SERVICE_NAME),
)
# Wait on svscan starting up first to avoid race conditions with
# refreshing it later.
while True:
try:
self._refresh_supervisor()
_LOGGER.info('svscan is running.')
break
except subproc.CalledProcessError:
_LOGGER.info('Waiting on svscan running.')
time.sleep(0.2)
watcher = dirwatch.DirWatcher(self.tm_env.cleanup_dir)
watcher.on_created = self._add_cleanup_app
watcher.on_deleted = self._remove_cleanup_app
self._sync()
loop_timeout = _WATCHDOG_HEARTBEAT_SEC // 2
while True:
if watcher.wait_for_events(timeout=loop_timeout):
watcher.process_events(max_events=_MAX_REQUEST_PER_CYCLE)
# Heartbeat
watchdog_lease.heartbeat()
_LOGGER.info('Cleanup service shutdown.')
watchdog_lease.remove()
|
StarcoderdataPython
|
8117932
|
import subprocess
def test_CLI_coffee_call():
return_code = subprocess.call([
# path is or should be in a setting somewhere
'/home/pi/Programming/Automation/executables/rfoutlets_coffee.py',
'1000',
'-d',
'0',
'--test'
])
assert return_code == 0
|
StarcoderdataPython
|
8159579
|
<reponame>jiashunwang/Long-term-Motion-in-3D-Scenes<filename>train_subgoal.py
import torch
import torch.optim as optim
import numpy as np
from sub_data import SUBDATA
import time
import torch.nn.functional as F
from human_body_prior.tools.model_loader import load_vposer
from utils import BodyParamParser, ContinousRotReprDecoder, GeometryTransformer
import smplx
import chamfer_pytorch.dist_chamfer as ext
from sub_goal import Pointnet,SUBGOAL
start = time.time()
batch_size = 8
dataset = SUBDATA()
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
contact_id_folder = './data/body_segments'
contact_part = ['back','gluteus','L_Hand','R_Hand','L_Leg','R_Leg','thighs']
vposer, _ = load_vposer('./vposer_v1_0', vp_model='snapshot')
vposer = vposer.cuda()
body_mesh_model = smplx.create('./models',
model_type='smplx',
gender='neutral', ext='npz',
num_pca_comps=12,
create_global_orient=True,
create_body_pose=True,
create_betas=True,
create_left_hand_pose=True,
create_right_hand_pose=True,
create_expression=True,
create_jaw_pose=True,
create_leye_pose=True,
create_reye_pose=True,
create_transl=True,
batch_size=batch_size
)
body_mesh_model = body_mesh_model.cuda()
print('finish data loading time:',time.time()-start)
model = SUBGOAL(n_dim_body=65)
model = model.cuda()
lrate = 0.0005
optimizer = optim.Adam(model.parameters(), lr=lrate)
for epoch in range(50):
start_time = time.time()
total_loss = 0
total_collision_loss = 0
total_contact_loss = 0
total_kl_loss = 0
total_rec_loss = 0
total_rec_orient_loss = 0
total_rec_transl_loss = 0
for j,data in enumerate(dataloader,0):
optimizer.zero_grad()
middle_list,_,scene_name,sdf,scene_points,cam_extrinsic,s_grid_min_batch,s_grid_max_batch = data
body = middle_list[:,0,6:16].cuda()
middle_list = torch.cat([middle_list[:,:,:6],middle_list[:,:,16:]],dim=2)
middle = middle_list[:,0,:].cuda()
scene_points = scene_points.cuda()
sdf = sdf.cuda()
s_grid_max_batch = s_grid_max_batch.cuda()
s_grid_min_batch = s_grid_min_batch.cuda()
middle = GeometryTransformer.convert_to_6D_rot(middle)
rec, mu, logsigma2 = model(middle,scene_points.transpose(1,2),middle[:,:9],body)
loss_rec_transl = 0.5*(F.l1_loss(rec[:,:3], middle[:,:3]))
loss_rec_orient = (F.l1_loss(rec[:,3:9], middle[:,3:9]))
loss_rec = (F.l1_loss(rec[:,9:41], middle[:,9:41]))+loss_rec_transl+loss_rec_orient+0.1*F.l1_loss(rec[:,41:], middle[:,41:])
fca = 1.0
fca = min(1.0, max(float(epoch) / (10*0.75),0) )
loss_KL = (fca**2 * 0.1*torch.mean(torch.exp(logsigma2) +mu**2 -1.0 -logsigma2))
#body mesh
rec = GeometryTransformer.convert_to_3D_rot(rec)
body_param_rec = BodyParamParser.body_params_encapsulate_batch_nobody_hand(rec)
body_param_rec['body_pose'] = vposer.decode(body_param_rec['body_pose'],
output_type='aa').view(rec.shape[0], -1)
body_param_rec['betas'] = body
smplx_output = body_mesh_model(return_verts=True, **body_param_rec)
#body_verts_batch is with scene pointcloud
#body_verts_batch_ is with scene sdf
body_verts_batch = smplx_output.vertices #[b, 10475,3]
body_verts_batch_ = GeometryTransformer.verts_transform(body_verts_batch, torch.tensor(cam_extrinsic,dtype=torch.float32).cuda())
#contact loss
vid, fid = GeometryTransformer.get_contact_id(body_segments_folder=contact_id_folder,
contact_body_parts=contact_part)
body_verts_contact_batch = body_verts_batch[:, vid, :]
dist_chamfer_contact = ext.chamferDist()
contact_dist, _ = dist_chamfer_contact(
body_verts_contact_batch.contiguous(),
scene_points.contiguous()
)
loss_contact = (1 * torch.mean( torch.sqrt(contact_dist+1e-4)
/(torch.sqrt(contact_dist+1e-4)+1.0) ) )
#collision loss
norm_verts_batch = ((body_verts_batch_ - s_grid_min_batch.unsqueeze(1))
/ (s_grid_max_batch.unsqueeze(1) - s_grid_min_batch.unsqueeze(1)) *2 -1)
n_verts = norm_verts_batch.shape[1]
body_sdf_batch = F.grid_sample(sdf.unsqueeze(1),
norm_verts_batch[:,:,[2,1,0]].view(-1, n_verts,1,1,3),
padding_mode='border')
if body_sdf_batch.lt(0).sum().item() < 1:
loss_sdf_pene = torch.tensor(0.0, dtype=torch.float32).cuda()
else:
loss_sdf_pene = body_sdf_batch[body_sdf_batch < 0].abs().mean()
loss_KL = loss_KL
loss_rec = 0.1*loss_rec
loss_sdf_pene = 0.01*loss_sdf_pene
loss_contact = 0.01*loss_contact
loss = loss_KL+loss_rec+loss_sdf_pene+loss_contact
loss.backward()
optimizer.step()
total_collision_loss = total_collision_loss+loss_sdf_pene
total_loss = total_loss+loss
total_contact_loss = total_contact_loss+loss_contact
total_kl_loss = total_kl_loss+loss_KL
total_rec_loss = total_rec_loss+loss_rec
total_rec_orient_loss = total_rec_orient_loss+0.1*loss_rec_orient
total_rec_transl_loss = total_rec_transl_loss+0.1*loss_rec_transl
print('##################################')
print('##################################')
print('epoch:',epoch)
end_time=time.time()
print('time:',end_time-start_time)
print('total:',total_loss/((j+1)))
print('collison:',total_collision_loss/(j+1))
print('contact:',total_contact_loss/((j+1)))
print('kl:',total_kl_loss/(j+1))
print('rec_orient:',total_rec_orient_loss/(j+1))
print('rec_transl:',total_rec_transl_loss/(j+1))
print('rec:',total_rec_loss/(j+1))
print('##################################')
print('##################################')
if (epoch+1) % 5 == 0:
save_path = './saved_model/subgoal_'+str(epoch)+'.model'
print(save_path)
torch.save(model.state_dict(),save_path)
|
StarcoderdataPython
|
6632009
|
import os
from pathlib import Path
import pytest
import yaml
from plumbum import local
from plumbum.cmd import git
with open("copier.yml") as copier_fd:
COPIER_SETTINGS = yaml.safe_load(copier_fd)
# Diferentes tests diferentes versiones de odoo
OLDEST_SUPPORTED_ODOO_VERSION = 8.0
ALL_ODOO_VERSIONS = tuple(COPIER_SETTINGS["odoo_version"]["choices"])
SUPPORTED_ODOO_VERSIONS = tuple(
v for v in ALL_ODOO_VERSIONS if v >= OLDEST_SUPPORTED_ODOO_VERSION
)
LAST_ODOO_VERSION = max(SUPPORTED_ODOO_VERSIONS)
SELECTED_ODOO_VERSIONS = (
frozenset(map(float, os.environ.get("SELECTED_ODOO_VERSIONS", "").split()))
or ALL_ODOO_VERSIONS
)
@pytest.fixture(params=ALL_ODOO_VERSIONS)
def any_odoo_version(request) -> float:
"""Devuelve cualquier version odoo utilizable."""
if request.param not in SELECTED_ODOO_VERSIONS:
pytest.skip("La version odoo no esta en el rango seleccionado")
return request.param
@pytest.fixture(params=SUPPORTED_ODOO_VERSIONS)
def supported_odoo_version(request) -> float:
"""Devuelve cualquier version odoo soportada."""
if request.param not in SELECTED_ODOO_VERSIONS:
pytest.skip("La version de Odoo Soportada no esta en el rango seleccionado")
return request.param
@pytest.fixture()
def cloned_template(tmp_path_factory):
"""Este repositorio clonado a un destino temporal.
El clon incluirá cambios sucios y tendrá una etiqueta de 'prueba' en su HEAD.
Devuelve el `Path` local al clon.
"""
patches = [git("diff", "--cached"), git("diff")]
with tmp_path_factory.mktemp("cloned_template_") as dirty_template_clone:
git("clone", ".", dirty_template_clone)
with local.cwd(dirty_template_clone):
for patch in patches:
if patch:
(git["apply", "--reject"] << patch)()
git("add", ".")
git(
"commit",
"--author=Test<test@test>",
"--message=dirty changes",
"--no-verify",
)
git("tag", "--force", "test")
yield dirty_template_clone
@pytest.fixture()
def versionless_odoo_autoskip(request):
"""Accesorio para omitir automaticamente las pruebas en versiones anteriores de odoo."""
is_version_specific_test = (
"any_odoo_version" in request.fixturenames
or "supported_odoo_version" in request.fixturenames
)
if LAST_ODOO_VERSION not in SELECTED_ODOO_VERSIONS and not is_version_specific_test:
pytest.skip(
"test version-independent en la sesión de prueba de odoo versionada antigua"
)
def teardown_function(function):
pre_commit_log = (
Path("~") / ".cache" / "pre-commit" / "pre-commit.log"
).expanduser()
if pre_commit_log.is_file():
print(pre_commit_log.read_text())
pre_commit_log.unlink()
|
StarcoderdataPython
|
3321138
|
# Generated by Django 3.2.6 on 2021-09-07 07:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0009_auto_20210907_0530'),
]
operations = [
migrations.AddField(
model_name='isolatedfilecollection',
name='name',
field=models.CharField(blank=True, max_length=128, verbose_name='name'),
),
]
|
StarcoderdataPython
|
236109
|
from bokeh.charts import TimeSeries, show, output_file
def read_weather_data(url):
data = pd.read_csv(url, parse_dates=['CST'])
data.columns = data.columns.str.strip()
return data
vicksburg_data = read_weather_data(vicksburg_url)
austin_data = read_weather_data(austin_url)
data = dict(
VICKSBURG = vicksburg_data['Mean TemperatureF'],
AUSTIN = austin_data['Mean TemperatureF'],
Date=vicksburg_data['CST'],
)
tsline = TimeSeries(data,
x='Date', y=['VICKSBURG', 'AUSTIN'],
color=['VICKSBURG', 'AUSTIN'], dash=['VICKSBURG', 'AUSTIN'],
title="2015", ylabel='Mean TemperatureF', legend=True)
output_file("timeseries.html")
show(tsline)
|
StarcoderdataPython
|
6549474
|
<reponame>harsh183/nerodia
from re import compile
import pytest
from nerodia.exception import UnknownObjectException
pytestmark = pytest.mark.page('non_control_elements.html')
class TestLiExist(object):
def test_returns_true_if_the_element_exists(self, browser):
assert browser.li(id='non_link_1').exists is True
assert browser.li(id=compile(r'non_link_1')).exists is True
assert browser.li(text='Non-link 3').exists is True
assert browser.li(text=compile(r'Non-link 3')).exists is True
assert browser.li(class_name='nonlink').exists is True
assert browser.li(class_name=compile(r'nonlink')).exists is True
assert browser.li(index=0).exists is True
assert browser.li(xpath="//li[@id='non_link_1']").exists is True
def test_returns_the_first_li_if_given_no_args(self, browser):
assert browser.li().exists
def test_returns_false_if_the_element_does_not_exist(self, browser):
assert browser.li(id='no_such_id').exists is False
assert browser.li(id=compile(r'no_such_id')).exists is False
assert browser.li(text='no_such_text').exists is False
assert browser.li(text=compile(r'no_such_text')).exists is False
assert browser.li(class_name='no_such_class').exists is False
assert browser.li(class_name=compile(r'no_such_class')).exists is False
assert browser.li(index=1337).exists is False
assert browser.li(xpath="//li[@id='no_such_id']").exists is False
def test_raises_correct_exception_when_what_argument_is_invalid(self, browser):
with pytest.raises(TypeError):
browser.li(id=3.14).exists
class TestLiAttributes(object):
# id
def test_returns_the_id_if_the_element_exists_and_has_id(self, browser):
assert browser.li(class_name='nonlink').id == 'non_link_1'
def test_returns_an_empty_string_if_the_li_exists_and_the_id_doesnt(self, browser):
assert browser.li(index=0).id == ''
@pytest.mark.usefixtures('quick_timeout')
def test_raises_correct_exception_for_id_if_the_element_doesnt_exist(self, browser):
with pytest.raises(UnknownObjectException):
browser.li(index=1337).id
# title
def test_returns_the_title_if_the_element_exists_and_has_title(self, browser):
assert browser.li(id='non_link_1').title == 'This is not a link!'
def test_returns_an_empty_string_if_the_li_exists_and_the_title_doesnt(self, browser):
assert browser.li(index=0).title == ''
@pytest.mark.usefixtures('quick_timeout')
def test_raises_correct_exception_for_title_if_the_element_doesnt_exist(self, browser):
with pytest.raises(UnknownObjectException):
browser.li(index=1337).title
# text
def test_returns_the_text_if_the_element_exists_and_has_text(self, browser):
assert browser.li(id='non_link_1').text == 'Non-link 1'
def test_returns_an_empty_string_if_the_li_exists_and_the_text_doesnt(self, browser):
assert browser.li(index=0).text == ''
@pytest.mark.usefixtures('quick_timeout')
def test_raises_correct_exception_for_text_if_the_element_doesnt_exist(self, browser):
with pytest.raises(UnknownObjectException):
browser.li(index=1337).text
def test_finds_all_attribute_methods(browser):
assert hasattr(browser.li(index=0), 'class_name')
assert hasattr(browser.li(index=0), 'id')
assert hasattr(browser.li(index=0), 'text')
assert hasattr(browser.li(index=0), 'title')
|
StarcoderdataPython
|
95860
|
<reponame>Mehdishishehbor/gpytorch<filename>test/kernels/test_rff_kernel.py
#!/usr/bin/env python3
import unittest
from unittest.mock import MagicMock, patch
import torch
import Lgpytorch
from Lgpytorch.kernels import RFFKernel
from Lgpytorch.test.base_kernel_test_case import BaseKernelTestCase
class TestModel(Lgpytorch.models.ExactGP):
def __init__(self, train_x, train_y):
likelihood = Lgpytorch.likelihoods.GaussianLikelihood()
super().__init__(train_x, train_y, likelihood)
self.mean_module = Lgpytorch.means.ZeroMean()
self.covar_module = Lgpytorch.kernels.ScaleKernel(RFFKernel(num_samples=50))
def forward(self, input):
mean = self.mean_module(input)
covar = self.covar_module(input)
return Lgpytorch.distributions.MultivariateNormal(mean, covar)
class TestRFFKernel(unittest.TestCase, BaseKernelTestCase):
def create_kernel_no_ard(self, **kwargs):
return RFFKernel(num_samples=5, **kwargs)
def create_kernel_ard(self, num_dims, **kwargs):
return RFFKernel(num_dims=num_dims, num_samples=7, ard_num_dims=num_dims, **kwargs)
def test_active_dims_list(self):
kernel = self.create_kernel_no_ard(active_dims=[0, 2, 4, 6])
x = self.create_data_no_batch()
covar_mat = kernel(x).evaluate_kernel().evaluate()
randn_weights = kernel.randn_weights
kernel_basic = self.create_kernel_no_ard()
kernel_basic._init_weights(randn_weights=randn_weights)
covar_mat_actual = kernel_basic(x[:, [0, 2, 4, 6]]).evaluate_kernel().evaluate()
self.assertLess(torch.norm(covar_mat - covar_mat_actual) / covar_mat_actual.norm(), 1e-4)
def test_active_dims_range(self):
active_dims = list(range(3, 9))
kernel = self.create_kernel_no_ard(active_dims=active_dims)
x = self.create_data_no_batch()
covar_mat = kernel(x).evaluate_kernel().evaluate()
randn_weights = kernel.randn_weights
kernel_basic = self.create_kernel_no_ard()
kernel_basic._init_weights(randn_weights=randn_weights)
covar_mat_actual = kernel_basic(x[:, active_dims]).evaluate_kernel().evaluate()
self.assertLess(torch.norm(covar_mat - covar_mat_actual) / covar_mat_actual.norm(), 1e-4)
def test_kernel_getitem_single_batch(self):
kernel = self.create_kernel_no_ard(batch_shape=torch.Size([2]))
x = self.create_data_single_batch()
res1 = kernel(x).evaluate()[0] # Result of first kernel on first batch of data
randn_weights = kernel.randn_weights
new_kernel = kernel[0]
new_kernel._init_weights(randn_weights=randn_weights[0])
res2 = new_kernel(x[0]).evaluate() # Should also be result of first kernel on first batch of data.
self.assertLess(torch.norm(res1 - res2) / res1.norm(), 1e-4)
def test_kernel_getitem_double_batch(self):
# TODO: Fix randomization
kernel = self.create_kernel_no_ard(batch_shape=torch.Size([3, 2]))
x = self.create_data_double_batch()
res1 = kernel(x).evaluate()[0, 1] # Result of first kernel on first batch of data
randn_weights = kernel.randn_weights
new_kernel = kernel[0, 1]
new_kernel._init_weights(randn_weights=randn_weights[0, 1])
res2 = new_kernel(x[0, 1]).evaluate() # Should also be result of first kernel on first batch of data.
self.assertLess(torch.norm(res1 - res2) / res1.norm(), 1e-4)
def test_kernel_output_fewer_features_than_data(self):
# not fixing the seed can result in occasional bad params that cause a flaky test
torch.manual_seed(1234)
train_x = torch.randn(1000, 3)
train_y = torch.randn(1000)
test_x = torch.randn(500, 3)
model = TestModel(train_x, train_y)
# Make sure that the prior kernel is the correct type
model.train()
output = model(train_x).lazy_covariance_matrix.evaluate_kernel()
self.assertIsInstance(output, Lgpytorch.lazy.LowRankRootLazyTensor)
# Make sure that the prior predictive kernel is the correct type
model.train()
output = model.likelihood(model(train_x)).lazy_covariance_matrix.evaluate_kernel()
self.assertIsInstance(output, Lgpytorch.lazy.LowRankRootAddedDiagLazyTensor)
# Make sure we're calling the correct prediction strategy
_wrapped_ps = MagicMock(wraps=Lgpytorch.models.exact_prediction_strategies.RFFPredictionStrategy)
with patch("gpytorch.models.exact_prediction_strategies.RFFPredictionStrategy", new=_wrapped_ps) as ps_mock:
model.eval()
output = model.likelihood(model(test_x))
_ = output.mean + output.variance # Compute something to break through any lazy evaluations
self.assertTrue(ps_mock.called)
def test_kernel_output_more_features_than_data(self):
train_x = torch.randn(50, 3)
train_y = torch.randn(50)
test_x = torch.randn(500, 3)
model = TestModel(train_x, train_y)
# Make sure that the prior kernel is the correct type
model.train()
output = model(train_x).lazy_covariance_matrix.evaluate_kernel()
self.assertIsInstance(output, Lgpytorch.lazy.RootLazyTensor)
self.assertNotIsInstance(output, Lgpytorch.lazy.LowRankRootLazyTensor)
# Make sure that the prior predictive kernel is the correct type
model.train()
output = model.likelihood(model(train_x)).lazy_covariance_matrix.evaluate_kernel()
self.assertIsInstance(output, Lgpytorch.lazy.AddedDiagLazyTensor)
self.assertNotIsInstance(output, Lgpytorch.lazy.LowRankRootAddedDiagLazyTensor)
# Make sure we're calling the correct prediction strategy
_wrapped_ps = MagicMock(wraps=Lgpytorch.models.exact_prediction_strategies.RFFPredictionStrategy)
with patch("gpytorch.models.exact_prediction_strategies.RFFPredictionStrategy", new=_wrapped_ps) as ps_mock:
model.eval()
output = model.likelihood(model(test_x))
_ = output.mean + output.variance # Compute something to break through any lazy evaluations
self.assertTrue(ps_mock.called)
|
StarcoderdataPython
|
5107884
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
# Create your models here.
###########################
# USER AND PROFILE
class Profile(models.Model):
#user types
UNKNOWN_TYPE = 'U'
DRIVER_TYPE = 'D'
STUDENT_TYPE = 'S'
ADMIN_TYPE = 'A'
USER_TYPE_OPTIONS = (
(UNKNOWN_TYPE, 'Unknown'),
(DRIVER_TYPE, 'Driver'),
(STUDENT_TYPE, 'Student'),
(ADMIN_TYPE, 'Admin'),
)
user = models.OneToOneField(User, on_delete=models.CASCADE)
user_type = models.CharField(max_length=1, choices=USER_TYPE_OPTIONS,
default=UNKNOWN_TYPE)
geo_long = models.FloatField(blank=True, null=True)
geo_lat = models.FloatField(blank=True, null=True)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
class Attendance(models.Model):
school_date = models.DateTimeField(auto_now_add=True)
going = models.BooleanField(default=True)
picked_time = models.DateTimeField(blank=True, null=True)
closeby_alerted = models.BooleanField(default=False)
student = models.ForeignKey(Profile, on_delete=models.CASCADE)
class Notification(models.Model):
#notification types
UNKNOWN_TYPE = 'U'
CLOSEBY_TYPE = 'C'
ACCIDENT_TYPE = 'A'
NOTIFICATION_TYPE_OPTIONS = (
(UNKNOWN_TYPE, 'Unknown'),
(CLOSEBY_TYPE, 'Close By'),
(ACCIDENT_TYPE, 'Accident'),
)
profile = models.ForeignKey(Profile, on_delete=models.CASCADE)
notification_type = models.CharField(max_length=1, choices=NOTIFICATION_TYPE_OPTIONS,
default=UNKNOWN_TYPE)
alerted = models.BooleanField(default=False)
|
StarcoderdataPython
|
6577423
|
<reponame>netzwerkrecherche/auskunftsrecht
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('rulings', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='ruling',
name='previous',
field=models.ForeignKey(related_name='next', to='rulings.Ruling', null=True, on_delete=models.SET_NULL),
preserve_default=True,
),
]
|
StarcoderdataPython
|
11213641
|
<filename>tests/test_jax.py
import unittest
import time
import jax.numpy as np
from common import gpu_test
from jax import grad, jit
class TestJAX(unittest.TestCase):
def tanh(self, x):
y = np.exp(-2.0 * x)
return (1.0 - y) / (1.0 + y)
def test_grad(self):
grad_tanh = grad(self.tanh)
ag = grad_tanh(1.0)
self.assertEqual(0.4199743, ag)
|
StarcoderdataPython
|
3321390
|
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import pdb
torch.manual_seed(123)
class USCNNSep(nn.Module):
def __init__(self,h,nx,ny,nVarIn=1,nVarOut=1,initWay=None,k=5,s=1,p=2):
super(USCNNSep, self).__init__()
"""
Extract basic information
"""
self.initWay=initWay
self.nVarIn=nVarIn
self.nVarOut=nVarOut
self.k=k
self.s=1
self.p=2
self.deltaX=h
self.nx=nx
self.ny=ny
"""
Define net
"""
self.source=torch.tensor(torch.ones(1,47))*0.5
self.source =torch.nn.Parameter(self.source)
self.source.requires_grad = True
W1=16
W2=32
self.relu=nn.ReLU()
self.US=nn.Upsample(size=[self.ny-2,self.nx-2],mode='bicubic')
self.conv1=nn.Conv2d(self.nVarIn,W1,kernel_size=k, stride=s, padding=p)
self.conv2=nn.Conv2d(W1,W2,kernel_size=k, stride=s, padding=p)
self.conv3=nn.Conv2d(W2,W1,kernel_size=k, stride=s, padding=p)
self.conv4=nn.Conv2d(W1,self.nVarOut,kernel_size=k, stride=s, padding=p)
self.pixel_shuffle1 = nn.PixelShuffle(1)
self.conv11=nn.Conv2d(self.nVarIn,W1,kernel_size=k, stride=s, padding=p)
self.conv22=nn.Conv2d(W1,W2,kernel_size=k, stride=s, padding=p)
self.conv33=nn.Conv2d(W2,W1,kernel_size=k, stride=s, padding=p)
self.conv44=nn.Conv2d(W1,self.nVarOut,kernel_size=k, stride=s, padding=p)
self.pixel_shuffle11 = nn.PixelShuffle(1)
self.conv111=nn.Conv2d(self.nVarIn,W1,kernel_size=k, stride=s, padding=p)
self.conv222=nn.Conv2d(W1,W2,kernel_size=k, stride=s, padding=p)
self.conv333=nn.Conv2d(W2,W1,kernel_size=k, stride=s, padding=p)
self.conv444=nn.Conv2d(W1,self.nVarOut,kernel_size=k, stride=s, padding=p)
self.pixel_shuffle111 = nn.PixelShuffle(1)
if self.initWay is not None:
self._initialize_weights()
#Specify filter
dxiFilter=torch.Tensor([[[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[1., -8., 0., 8., -1.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]]]).to("cuda")/12./self.deltaX
self.convdxi=nn.Conv2d(1, 1, (5,5),stride=1, padding=0, bias=None)
self.convdxi.weight=nn.Parameter(dxiFilter, requires_grad=False)
detaFilter=torch.Tensor([[[[0., 0., 1., 0., 0.],
[0., 0., -8., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 8., 0., 0.],
[0., 0., -1., 0., 0.]]]]).to("cuda")/12./self.deltaX
self.convdeta=nn.Conv2d(1,1,(5,5),stride=1,padding=0,bias=None)
self.convdeta.weight=nn.Parameter(detaFilter,requires_grad=False)
lapFilter=torch.Tensor([[[[0., 0., -1., 0., 0.],
[0., 0., 16., 0., 0.],
[-1., 16., -60., 16., -1.],
[0., 0., 16., 0., 0.],
[0., 0., -1., 0., 0.]]]]).to("cuda")/12./self.deltaX/self.deltaX
self.convlap = nn.Conv2d(1, 1, (5,5),stride=1, padding=0, bias=None)
self.convlap.weight=nn.Parameter(lapFilter, requires_grad=False)
def forward(self, x):
x=self.US(x)
x1=self.relu(self.conv1(x))
x1=self.relu(self.conv2(x1))
x1=self.relu(self.conv3(x1))
x1=self.pixel_shuffle1(self.conv4(x1))
x2=self.relu(self.conv11(x))
x2=self.relu(self.conv22(x2))
x2=self.relu(self.conv33(x2))
x2=self.pixel_shuffle11(self.conv44(x2))
x3=self.relu(self.conv111(x))
x3=self.relu(self.conv222(x3))
x3=self.relu(self.conv333(x3))
x3=self.pixel_shuffle111(self.conv444(x3))
return torch.cat([x1,x2,x3],axis=1)
def _initialize_weights(self):
if self.initWay=='kaiming':
init.kaiming_normal_(self.conv1.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv2.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv3.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv4.weight)
init.kaiming_normal_(self.conv11.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv22.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv33.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv44.weight)
init.kaiming_normal_(self.conv111.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv222.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv333.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv444.weight)
elif self.initWay=='ortho':
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight)
init.orthogonal_(self.conv11.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv22.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv33.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv44.weight)
init.orthogonal_(self.conv111.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv222.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv333.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv444.weight)
else:
print('Only Kaiming or Orthogonal initializer can be used!')
exit()
|
StarcoderdataPython
|
8123012
|
<reponame>OscarXiberta/bclearer<gh_stars>0
class AttributeToScopePatternConfigurationObjects:
def __init__(
self,
attributed_type_name: str,
attributed_type_ea_guid: str,
attribute_name: str,
attribute_ea_guid: str,
scoping_type: str,
scoping_type_name: str):
self.attributed_type_name = \
attributed_type_name
self.attributed_type_ea_guid = \
attributed_type_ea_guid
self.attribute_name = \
attribute_name
self.attribute_ea_guid = \
attribute_ea_guid
self.scoping_type = \
scoping_type
self.scoping_type_name = \
scoping_type_name
def __enter__(
self):
return \
self
def __exit__(
self,
exception_type,
exception_value,
traceback):
pass
|
StarcoderdataPython
|
240872
|
# -*- coding: utf-8 -*-
import os
import json
from collections import defaultdict
from nltk.tokenize import punkt
from .sentence_tokenizer import SentenceTokenizer
class Parser(object):
def __init__(self, ideal=20.0, stop_words=None, tokenizer=None):
self.ideal = 20.0
if not stop_words:
stop_words = self._get_stop_words()
self.stop_words = stop_words
if not tokenizer:
fname = os.path.dirname(os.path.abspath(__file__)) + '/trainer/english.json'
with open(fname, 'r') as fp:
data = json.load(fp)
self.training = self.load_training(
set(data['AbbrevTypes']),
set(data['Collocations']),
set(data['SentStarters']),
defaultdict(int, data['OrthoContext'])
)
tokenizer = SentenceTokenizer()
tokenizer._params = self.training
self.tokenizer = tokenizer
def load_training(self, abbrev_types, collocations, sent_starters, ortho_context):
""" Manually supply training data instead of using nltk's default pickle.
This will allow us to extend PunktSentenceTokenizer to fix its warts or
add data to our training data. """
training = punkt.PunktParameters()
training.abbrev_types = abbrev_types
training.collocations = collocations
training.sent_starters = sent_starters
training.ortho_context = ortho_context
return training
def _get_stop_words(self):
with open(os.path.dirname(os.path.abspath(__file__)) + '/trainer/stop_words.txt') as file:
words = file.readlines()
return [word.replace('\n', '') for word in words]
def get_keywords(self, text):
text = self.remove_punctations(text)
words = self.words(text)
words = self.remove_stop_words(words)
unique_words = list(set(words))
keywords = [{'word': word, 'count': words.count(word)} for word in unique_words]
keywords = sorted(keywords, key=lambda x: -x['count'])
return (keywords, len(words))
def get_sentence_length_score(self, sentence):
return (self.ideal - abs(self.ideal - len(sentence))) / self.ideal
# <NAME>., <NAME>., & <NAME>. (2005). Sentence Extraction Based Single Document Summarization. International Institute of Information Technology, Hyderabad, India, 5.
def get_sentence_position_score(self, i, sentence_count):
normalized = i / (sentence_count * 1.0)
if normalized > 0 and normalized <= 0.1:
return 0.17
elif normalized > 0.1 and normalized <= 0.2:
return 0.23
elif normalized > 0.2 and normalized <= 0.3:
return 0.14
elif normalized > 0.3 and normalized <= 0.4:
return 0.08
elif normalized > 0.4 and normalized <= 0.5:
return 0.05
elif normalized > 0.5 and normalized <= 0.6:
return 0.04
elif normalized > 0.6 and normalized <= 0.7:
return 0.06
elif normalized > 0.7 and normalized <= 0.8:
return 0.04
elif normalized > 0.8 and normalized <= 0.9:
return 0.04
elif normalized > 0.9 and normalized <= 1.0:
return 0.15
else:
return 0
def get_title_score(self, title, sentence):
title_words = self.remove_stop_words(title)
sentence_words = self.remove_stop_words(sentence)
matched_words = [word for word in sentence_words if word in title_words]
return len(matched_words) / (len(title) * 1.0)
def sentences(self, text):
return self.tokenizer.tokenize(text)
def tokens(self, text):
""" Get a list of annotated tokens instead of a list of sentences """
tokens = self.tokenizer._tokenize_words(text)
annotated_tokens = self.tokenizer._annotate_tokens(tokens)
return annotated_tokens
def words(self, sentence):
return sentence.lower().split()
def remove_punctations(self, text):
return ''.join(t for t in text if t.isalnum() or t == ' ')
def remove_stop_words(self, words):
return [word for word in words if word not in self.stop_words]
|
StarcoderdataPython
|
1645923
|
from os import path
import unittest
from prudentia.utils import io
class TestIO(unittest.TestCase):
def test_xstr(self):
self.assertEqual(io.xstr(None), '')
def test_yes(self):
self.assertTrue(io.input_yes_no('test topic', prompt_fn=lambda m: 'y'))
self.assertTrue(io.input_yes_no('test topic', prompt_fn=lambda m: 'yes'))
def test_no(self):
self.assertFalse(io.input_yes_no('test topic', prompt_fn=lambda m: 'whatever'))
self.assertFalse(io.input_yes_no('test topic', prompt_fn=lambda m: 'no'))
def test_yes_no_default(self):
self.assertFalse(io.input_yes_no('test topic', prompt_fn=lambda m: ''))
def test_mandatory_input(self):
self.assertRaises(ValueError, io.input_value, 'mandatory topic', prompt_fn=lambda m: '')
def test_int_input(self):
self.assertEqual(io.input_value('int topic', default_value=1, prompt_fn=lambda m: '123'), 123)
self.assertRaises(ValueError, io.input_value, 'int topic', default_value=1, prompt_fn=lambda m: 'aaa')
def test_value_hidden(self):
pwd = '<PASSWORD>'
self.assertEqual(io.input_value('pwd', hidden=True, hidden_prompt_fn=lambda m: pwd), pwd)
def test_path_file(self):
f = "./uname.yml"
self.assertNotEqual(io.input_path('cwd file', prompt_fn=lambda m: f), None)
def test_prudentia_dir(self):
expected_path = path.join(path.dirname(path.realpath('.')), 'prudentia')
self.assertEqual(io.prudentia_python_dir(), expected_path)
def test_invalid_path_file(self):
self.assertRaises(ValueError, io.input_path, 'cwd file', prompt_fn=lambda m: 'foo')
self.assertRaises(ValueError, io.input_path, 'cwd file', prompt_fn=lambda m: '.')
self.assertRaises(ValueError, io.input_path, 'cwd file', is_file=False, prompt_fn=lambda m: './uname.yml')
def test_sanity_choices(self):
self.assertRaises(ValueError, io.input_choice, 'choice topic', choices=None)
self.assertRaises(ValueError, io.input_choice, 'choice topic', choices=[])
self.assertRaises(ValueError, io.input_choice, 'choice topic', default='d', choices=['a', 'b', 'c'])
def test_choice(self):
c = ['well', 'Iam', 'gonna', 'be', 'chosen']
self.assertEqual(io.input_choice('choice topic', choices=c, prompt_fn=lambda m: 'be'), 'be')
self.assertEqual(io.input_choice('choice topic', default='Iam', choices=c, prompt_fn=lambda m: ''), 'Iam')
def test_invalid_retry_choice(self):
self.assertRaises(ValueError, io.input_choice, 'choice topic', choices=['choice'], prompt_fn=lambda m: 'bla')
|
StarcoderdataPython
|
3302561
|
import tornado.ioloop
import tornado.web
import socket
import bokeh.layouts
import tornado.web
import tornado.ioloop
import bokeh.plotting
import bokeh.core.properties
import weakref
import uuid
import sys
from . import serverutils
class MainHandler(tornado.web.RequestHandler):
def get(self):
download_id = self.request.path.split("/")[-1]
if download_id in downloads:
downloads[download_id].get(self)
else:
self.set_status(404, "Unknown download ID %s (existing: %s)" % (download_id, ",".join(str(key) for key in downloads.keys())))
def downloadify(server):
if not hasattr(server, "bokeh_garden_download"):
server.bokeh_garden_download = True
server.add_handlers(r".*", [
tornado.web.URLSpec(r"/bokeh-garden/download/.*", MainHandler, name="bokeh-garden-download"),
])
downloads = weakref.WeakValueDictionary()
class Download(serverutils.HTTPModel, bokeh.models.Div):
__view_model__ = bokeh.models.Div.__view_model__
__view_module__ = bokeh.models.Div.__view_module__
__subtype__ = "Download"
content = bokeh.core.properties.Any(serialized=False)
filename = bokeh.core.properties.String(default="file.txt", serialized=False)
def __init__(self, **kw):
bokeh.models.Div.__init__(self, **kw)
self._download_id = None
def http_init(self):
downloadify(self.bokeh_tornado)
self._download_id = str(uuid.uuid4())
downloads[self._download_id] = self
self.text = "<a href='%s%s/%s' target='_new'>%s</a>" % (self.base_url, "/bokeh-garden/download", self._download_id, self.text)
def get(self, request_handler):
request_handler.add_header("Content-Disposition", 'attachment; filename="%s"' % self.filename)
request_handler.write(bytes(self.content))
|
StarcoderdataPython
|
30163
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_flcdrpf_ckl_mmp_cds
@file marine-integrations/mi/dataset/parser/test/test_flcdrpf_ckl_mmp_cds.py
@author <NAME>
@brief Test code for a flcdrpf_ckl_mmp_cds data parser
"""
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import SampleException
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.flntu_x.mmp_cds.resource import RESOURCE_PATH
from mi.dataset.parser.mmp_cds_base import MmpCdsParser
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
@attr('UNIT', group='mi')
class FlntuXMmpCdsParserUnitTestCase(ParserUnitTestCase):
"""
flntu_x_mmp_cds Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.flntu_x_mmp_cds',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'FlntuXMmpCdsParserDataParticle'
}
def test_simple(self):
"""
This test reads in a small number of particles and verifies the result of one of the particles.
"""
with open(os.path.join(RESOURCE_PATH, 'flntu_1_20131124T005004_458.mpk'), 'rb') as stream_handle:
parser = MmpCdsParser(self.config, stream_handle, self.exception_callback)
particles = parser.get_records(6)
# this yml file only has particle 0 in it
self.assert_particles(particles[0:1], 'first.yml', RESOURCE_PATH)
# this yml file only has particle 1 in it
self.assert_particles(particles[1:2], 'second.yml', RESOURCE_PATH)
# this yml file only has particle 5 in it
self.assert_particles(particles[5:6], 'good.yml', RESOURCE_PATH)
def test_get_many(self):
"""
This test exercises retrieving 20 particles, verifying the 20th particle, then retrieves 30 particles
and verifies the 30th particle.
"""
with open(os.path.join(RESOURCE_PATH, 'flntu_1_20131124T005004_458.mpk'), 'rb') as stream_handle:
parser = MmpCdsParser(self.config, stream_handle, self.exception_callback)
particles = parser.get_records(20)
# Should end up with 20 particles
self.assertTrue(len(particles) == 20)
# this yml file only has particle 0 in it
self.assert_particles(particles[0:1], 'first.yml', RESOURCE_PATH)
# this yml file only has particle 19 in it
self.assert_particles(particles[19:20], 'get_many_one.yml', RESOURCE_PATH)
particles = parser.get_records(30)
# Should end up with 30 particles
self.assertTrue(len(particles) == 30)
# this yml file only has particle 29 in it
self.assert_particles(particles[29:30], 'get_many_two.yml', RESOURCE_PATH)
def test_long_stream(self):
"""
This test exercises retrieve approximately 200 particles.
"""
# Using two concatenated msgpack files to simulate two chunks to get more particles.
with open(os.path.join(RESOURCE_PATH, 'flntu_concat.mpk'), 'rb') as stream_handle:
parser = MmpCdsParser(self.config, stream_handle, self.exception_callback)
# Attempt to retrieve 200 particles, but we will retrieve less
particles = parser.get_records(200)
# Should end up with 172 particles
self.assertTrue(len(particles) == 184)
def test_bad_data_one(self):
"""
This test verifies that a SampleException is raised when msgpack data is malformed.
"""
with open(os.path.join(RESOURCE_PATH, 'flntu_1_20131124T005004_458-BAD.mpk'), 'rb') as stream_handle:
parser = MmpCdsParser(self.config, stream_handle, self.exception_callback)
parser.get_records(1)
self.assertEqual(len(self.exception_callback_value), 1)
self.assert_(isinstance(self.exception_callback_value[0], SampleException))
def test_bad_data_two(self):
"""
This test verifies that a SampleException is raised when an entire msgpack buffer is not msgpack.
"""
with open(os.path.join(RESOURCE_PATH, 'not-msg-pack.mpk'), 'rb') as stream_handle:
parser = MmpCdsParser(self.config, stream_handle, self.exception_callback)
parser.get_records(1)
self.assertTrue(len(self.exception_callback_value) >= 1)
self.assert_(isinstance(self.exception_callback_value[0], SampleException))
|
StarcoderdataPython
|
3281619
|
<reponame>wqu-bom/pybufrkit
"""
pybufrkit.script
~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import
from __future__ import print_function
import functools
import ast
from pybufrkit.dataquery import QueryResult
from pybufrkit.query import BufrMessageQuerent
__all__ = ['process_embedded_query_expr', 'ScriptRunner']
STATE_IDLE = ''
STATE_EMBEDDED_QUERY = '${'
STATE_SINGLE_QUOTE = "'"
STATE_DOUBLE_QUOTE = '"'
STATE_COMMENT = '#'
def process_embedded_query_expr(input_string):
"""
This function scans through the given script and identify any path/metadata
expressions. For each expression found, an unique python variable name will
be generated. The expression is then substituted by the variable name.
:param str input_string: The input script
:return: A 2-element tuple of the substituted string and a dict of substitutions
:rtype: (str, dict)
"""
keep = []
state = ''
idx_char = idx_var = 0
substitutions = {} # keyed by query expression
query_expr = []
while idx_char < len(input_string):
c = input_string[idx_char]
if state == STATE_EMBEDDED_QUERY:
if c == '}':
state = STATE_IDLE
s = ''.join(query_expr).strip()
query_expr = []
if s not in substitutions:
varname = 'PBK_{}'.format(idx_var)
idx_var += 1
substitutions[s] = varname
else:
varname = substitutions[s]
keep.append(varname)
else:
query_expr.append(c)
elif (c == "'" or c == '"') and state != STATE_EMBEDDED_QUERY:
if state == c: # quoting pair found, pop it
state = STATE_IDLE
elif state == '': # new quote begins
state = c
keep.append(c)
elif c == '$' and state == STATE_IDLE: # an unquoted $
if idx_char + 1 < len(input_string) and input_string[idx_char + 1] == '{':
state = STATE_EMBEDDED_QUERY
# Once it enters the embedded query state, any pond,
# double/single quotes will be ignored
idx_char += 1
else:
keep.append(c)
elif c == '#' and state == STATE_IDLE:
state = STATE_COMMENT
keep.append(c)
elif c == '\n' and state == STATE_COMMENT:
state = STATE_IDLE
keep.append(c)
else:
keep.append(c)
idx_char += 1
return ''.join(keep), substitutions
# The following constants represent the nesting levels for values from BUFR data
# section. The nesting levels are decided by the level of parenthesis, which is
# represented by the numbers. A number Zero means no parenthesis at all, i.e.
# scalar. A number One means one level of parenthesis, i.e. a simple list with
# no nesting.
DATA_VALUES_NEST_LEVEL_0 = 0 # flatten to scalar by return only the first element
DATA_VALUES_NEST_LEVEL_1 = 1 # flatten to a list with no nesting, this is the default
DATA_VALUES_NEST_LEVEL_2 = 2 # flatten to a list nested with subsets
DATA_VALUES_NEST_LEVEL_4 = 4 # no flatten at all, fully nested by subsets, replications
class ScriptRunner(object):
"""
This class is responsible for running the given script against BufrMessage
object.
.. attribute:: code_string
The processed/substituted source code.
.. attribute:: code_object
The compiled code object from the code string.
.. attribute:: pragma
Extra processing directives
.. attribute:: metadata_only
Whether the script requires only metadata part of the BUFR message to work.
.. attribute:: querent
The BufrMessageQuerent object for performing the values query.
"""
def __init__(self, input_string,
data_values_nest_level=None):
self.code_string, self.substitutions = process_embedded_query_expr(input_string)
self.pragma = {
'data_values_nest_level': DATA_VALUES_NEST_LEVEL_1,
}
# Read pragma from inside the script
self.process_pragma()
# Pragma passed from function call has higher priority
if data_values_nest_level is not None:
self.pragma['data_values_nest_level'] = data_values_nest_level
self.code_object = compile(self.code_string, '', 'exec')
self.metadata_only = True
for query_str in self.substitutions.keys():
if not query_str.startswith('%'):
self.metadata_only = False
break
self.querent = BufrMessageQuerent()
def run(self, bufr_message):
variables = {
varname: self.get_query_result(bufr_message, query_string)
for query_string, varname in self.substitutions.items()
}
variables.update(
{
'PBK_BUFR_MESSAGE': bufr_message,
'PBK_FILENAME': bufr_message.filename,
}
)
exec (self.code_object, variables)
return variables
def get_query_result(self, bufr_message, query_expr):
qr = self.querent.query(bufr_message, query_expr)
if isinstance(qr, QueryResult):
return self.flatten_data_values(qr)
return qr
def flatten_data_values(self, qr):
data_values_nest_level = self.pragma['data_values_nest_level']
if data_values_nest_level == DATA_VALUES_NEST_LEVEL_0:
values = qr.all_values(flat=True)
values = functools.reduce(lambda x, y: x + y, values, [])
return values[0] if len(values) > 0 else None
elif data_values_nest_level == DATA_VALUES_NEST_LEVEL_1:
values = qr.all_values(flat=True)
return functools.reduce(lambda x, y: x + y, values, [])
elif data_values_nest_level == DATA_VALUES_NEST_LEVEL_2:
return qr.all_values(flat=True)
else: # No flatten, fully nested
return qr.all_values()
def process_pragma(self):
for line in self.code_string.splitlines():
if not line.startswith('#$'):
return
for assignment in line[3:].split(','):
k, v = assignment.split('=')
k = k.strip()
if k in self.pragma:
self.pragma[k] = ast.literal_eval(v.strip())
|
StarcoderdataPython
|
1792304
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import keras.layers.advanced_activations as activations
from ...common._apply_operation import apply_elu, apply_leaky_relu, apply_prelu
from ...common._registration import register_converter
def convert_keras_advanced_activation(scope, operator, container):
op = operator.raw_operator
if isinstance(op, activations.LeakyReLU):
alpha = op.get_config()['alpha']
apply_leaky_relu(scope, operator.input_full_names[0], operator.output_full_names[0], container,
operator_name=operator.full_name, alpha=alpha)
elif isinstance(op, activations.ELU):
alpha = op.get_config()['alpha']
apply_elu(scope, operator.input_full_names[0], operator.output_full_names[0], container,
operator_name=operator.full_name, alpha=alpha)
elif isinstance(op, activations.PReLU):
weights = op.get_weights()[0].flatten()
apply_prelu(scope, operator.input_full_names[0], operator.output_full_names[0], container,
operator_name=operator.full_name, slope=weights)
else:
attrs = {'name': operator.full_name}
input_tensor_names = [operator.input_full_names[0]]
if isinstance(op, activations.ThresholdedReLU):
op_type = 'ThresholdedRelu'
attrs['alpha'] = op.get_config()['theta']
# elif isinstance(op, activations.Softmax):
# attrs['axis'] = op.get_config()['axis']
else:
raise RuntimeError('Unsupported advanced layer found %s' % type(op))
container.add_node(op_type, input_tensor_names, operator.output_full_names, **attrs)
register_converter(activations.LeakyReLU, convert_keras_advanced_activation)
register_converter(activations.ThresholdedReLU, convert_keras_advanced_activation)
register_converter(activations.ELU, convert_keras_advanced_activation)
register_converter(activations.PReLU, convert_keras_advanced_activation)
# TODO:Following layer is not supported by the checked-in keras version and requires an upgrade of the checked-in keras
# register_converter(activations.Softmax, convert_keras_advanced_activation)
|
StarcoderdataPython
|
9709222
|
<reponame>t0mmyt/cloud_computing
#!/usr/bin/env python2
import re
from sys import stdin, stdout
import json
def mapper():
"""
Stripes mapper function
Reads stdin, (emits {w1: {w2_1: n},{w2_2: n}) on stdout
"""
word_1 = None
# Dict to store counts before emitting
n = dict()
line = stdin.readline()
while line:
if len(line) != 0:
# Make everything lower case
line = line.lower().strip()
# Strip non word/whitespace characters
line = re.sub(r'[^\w\s]', '', line)
# Replace any whitespace with a single space
line = re.sub(r'\s+', ' ', line)
# Loop though words on line
for word_2 in line.split(' '):
# Is this a new pair? (following a paragraph)
if word_1:
if word_1 not in n.keys():
n[word_1] = dict()
if word_2 not in n[word_1].keys():
n[word_1][word_2] = 1
else:
n[word_1][word_2] += 1
# current word is previous word now
word_1 = word_2
else:
# There was a new paragraph
word_1 = None
line = stdin.readline()
for k, v in n.items():
stdout.write("%s: %s\n" % (k, json.dumps(v)))
if __name__ == '__main__':
mapper()
|
StarcoderdataPython
|
296821
|
<gh_stars>0
import discord
from discord.ext import commands
import datetime
#the present date
present = datetime.date.today()
#the first day of school
first_day = datetime.date(2022, 2, 3)
#assign the first day to a value
day = 1
#list of dates that are weekdays that we do not have school
no_school = [datetime.date(2022, 2, 18), datetime.date(2022, 2, 21), datetime.date(2022, 3, 14), datetime.date(2022, 3, 15), datetime.date(2022, 3, 16), datetime.date(2022, 3, 17), datetime.date(2022, 3, 18), datetime.date(2022, 4, 15), datetime.date(2022, 4, 18), datetime.date(2022, 5, 23), datetime.date(2022, 6, 3), datetime.date(2022, 6, 30)]
odd_day1 = [datetime.date(2022, 4, 18), datetime.date(2022, 4, 19), datetime.date(2022, 5, 10)]
odd_day2 = [datetime.date(2022, 4, 27), datetime.date(2022, 4, 28), datetime.date(2022, 5, 12), datetime.date(2022, 6, 27), datetime.date(2022, 6, 28)]
#time delta that will increase the day by 1
next_day = datetime.timedelta(days = 1)
#assign variables that will give the result in ctx.send command
weekend = "Today is a weekend"
give_day = "Today is a day "
no_school_today = "There is no school today"
class Schedule(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def schedule(self, ctx):
#function that checks all of the days that are weekdays where there is no school
def no_school_days():
#check through all of no school dates
for i in range (11):
#if today is a day during the week where there is no school
if no_school[i] == present:
return True
return False
#function that checks that if the date that is being checked is a day were there is no school
def no_school_days2():
global first_day
#check through all of the no school days
for i in range(11):
#validify if the date that is being checked is during a day where there is no school so the program can skip over it when counting "school days"
if no_school[i] == first_day:
return True
return False
#function that checks if the date being checked is a day where there is a day 1, 2 days in a row
def odd_day1_check():
global first_day
for i in range(3):
#validify if the date that is being checked is where there is a second day 1 so the program can skip over it when counting "school days"
if odd_day1[i] == first_day:
return True
return False
#function that checks if the date being checked is a day where there is a day 2, 2 days in a row
def odd_day2_check():
global first_day
for i in range(5):
#validify if the date that is being checked is where there is a second day 2 so the program can skip over it when counting "school days"
if odd_day2[i] == first_day:
return True
return False
#function that checks if school is today
def is_school_today():
global give_result
global first_day
global day
"""
the first day of school is a day 1 so this function will check if the first day of school is the present. if this is not true
1 day will be added to the first day and the day will switch from 1 to 2 (if it is not a weekend and there is school). This is so
that the function can assign each school day a 1 or a 2 until we get to the present and will print what day it is.
"""
#check through each day in the second semester
for i in range (149):
if present == first_day:
#if today is saturday
if present.isoweekday() == 6:
#the result will tell the user it is a weekend
give_result = weekend
return False
if present.isoweekday() == 7:
give_result = weekend
return False
#if today is a school day
else:
give_result = "Today is a day " + str(day)
return True
#if we have not reached the present
else:
#if the date that is being checked is a day where there is no school
if no_school_days2() == True:
#go to the next day
first_day += next_day
#if the date that is being checked is a saturday
elif first_day.isoweekday() == 6:
#go to the next day
first_day += next_day
#if the day that is being checked is a sunday
elif first_day.isoweekday() == 7:
#go to the next day
first_day += next_day
elif odd_day1_check() == True:
first_day += next_day
elif odd_day2_check() == True:
first_day += next_day
#if the date being checked is a school day
else:
#go to the next day
first_day = first_day + next_day
#change the school day
day += 1
if day == 3:
day = 1
return True
#if today is not a holiday
if no_school_days() == False:
#if there is no school
if is_school_today() == False:
give_result = weekend
else:
give_result = give_day + str(day)
else:
give_result = no_school_today
await ctx.send(give_result)
def setup(client):
client.add_cog(Schedule(client))
|
StarcoderdataPython
|
5068126
|
import pandas as pd
import numpy as np
import warnings
import io
import itertools
import yaml
import math
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import os
# read csv data
#df = pd.read_excel('./assessment/output/fig2.9_data_table.xlsx')
df = pd.read_csv('./assessment/output/CombinedCsvsWithOutcome.csv')
# drop the negative negatives
#df = df[df.Variable != 'Emissions|CO2|Net-negative-negative']
yrs = np.linspace(2020,2100,17) # every 5 years included
print(df.Variable.unique())
print(df.shape)
x = df.groupby(['model', 'scenario']).size().reset_index().rename(columns={0:'count'})
print(type(x))
print(x.iloc[0])
print(x.iloc[0].model)
print(len(x))
#newRows = make a new dataframe/new rows for dac
for n in range(len(x)):
y = df[df.model == x.iloc[n].model]
y = y[y.scenario == x.iloc[n].scenario].drop(columns = ['model', 'scenario', 'Unnamed: 0', 'marker', 'category'])
t = y[y.Variable == 'Total CDR'].drop(columns=['Variable']).values
a = y[y.Variable == 'AFOLU CDR'].drop(columns=['Variable']).values
b = y[y.Variable == 'BECCS'].drop(columns=['Variable']).values
nn = y[y.Variable == 'Net negative CO2'].drop(columns=['Variable']).values
c = y[y.Variable == 'Compensate CDR'].drop(columns=['Variable']).values
if a.size == 0:
d = np.round(t-b,4)
elif b.size == 0:
d = np.round(t-a, 4)
else:
d = np.round((t-(a+b)),4)
print(d)
print(np.sum(d))
#if np.sum(d)>0:
#print(df.groupby(['model', 'scenario']).count())
# check to see how much DAC is included
dfcost = df # make a copy
afolu_cost = 50 #$/ton
ccs_biomass = 80 #$/ton
ccs_dac = 100 #$/ton
ccs_ew = 100 #$/ton I have no idea on this one
ccs_other = 100 # need to figure out if net negative is just unspecified get rid of it or what the deal is
# calculate costs for different types of ccs
for n in range(len(dfcost)):
if dfcost.Variable.iloc[n] == 'AFOLU CDR':
c = afolu_cost
elif dfcost.Variable.iloc[n] == 'BECCS':
c = ccs_biomass
elif dfcost.Variable.iloc[n] == 'Net negative CO2':
c = 0
elif dfcost.Variable.iloc[n] == 'Compensate CDR':
c = 0
else:
c = 0
for r in range(17):
if math.isnan(dfcost[str(int(yrs[r]))][n])==False:
dfcost[str(int(yrs[r]))].iloc[n] = dfcost[str(int(yrs[r]))].iloc[n]*c*1000*1000*1000 #convert from Gt to tons
dfcost.to_csv('costouttest.csv') # still has nans
# calculate one annual total per model & scenario combo
g = dfcost.groupby(['category', 'model', 'scenario']).agg({'2020': 'sum', '2025': 'sum', '2030': 'sum', '2035': 'sum', '2040': 'sum', '2045': 'sum', '2050': 'sum', '2055': 'sum', '2060': 'sum', '2065': 'sum', '2070': 'sum', '2075': 'sum', '2080': 'sum', '2085': 'sum', '2090': 'sum', '2095': 'sum', '2100':'sum'})
g = g.reset_index()
print(len(g))
print(g.columns)
# add a category column
g.to_csv('midpointout.csv')
# interpolate where necessary
for n in range(len(g)):
for r in range(15):
# check previous and next values
if g.iloc[n,r+4]==0:
if g.iloc[n, r+3] != 0:
if g.iloc[n,r+5] != 0:
g.iat[n, r+4] = (g.iloc[n, r+3] + g.iloc[n, r+5])/2
# reformat
for r in range(17-1):
lb = yrs[r]
ub = yrs[r+1]
for m in range(4):
g[str(int(lb+m+1))] = np.zeros(len(g))
for r in range(16):
lb = yrs[r]
ub = yrs[r+1]
for m in range(len(g)):
if math.isnan(df[str(int(lb))].iloc[m])==True:
lb = yrs[r-1]
if math.isnan(df[str(int(ub))].iloc[m])==True:
ub = yrs[r+2]
z = int(ub - lb - 1)
for q in range(z):
g[str(int(lb+q+1))].iloc[m] = (g[str(int(lb))].iloc[m]*(z-q)+g[str(int(ub))].iloc[m]*(q+1))/(z+1)
a = np.linspace(2000, 2100, 101).astype(int) #tolist()
a = a.tolist()
#print(a)
#print(df.columns)
#print(g.columns)
g = g.reindex(columns = ['model', 'scenario', 'category', '2020', '2021', '2022', '2023', '2024', '2025', '2026', '2027', '2028', '2029', '2030', '2031', '2032', '2033', '2034', '2035', '2036', '2037', '2038', '2039', '2040', '2041', '2042', '2043', '2044', '2045', '2046', '2047', '2048', '2049', '2050', '2051', '2052', '2053', '2054', '2055', '2056', '2057', '2058', '2059', '2060', '2061', '2062', '2063', '2064', '2065', '2066', '2067', '2068', '2069', '2070', '2071', '2072', '2073', '2074', '2075', '2076', '2077', '2078', '2079', '2080', '2081', '2082', '2083', '2084', '2085', '2086', '2087', '2088', '2089', '2090', '2091', '2092', '2093', '2094', '2095', '2096', '2097', '2098', '2099', '2100'])
print(df.Variable.unique())
g_discounted_stern = g.copy()
g_discounted_nordhaus = g.copy()
g_discounted_avg = g.copy()
# do some discounting
stern_delta = 0.001
nordhaus_delta = 0.015
avg_delta = (stern_delta+nordhaus_delta)/2
global_growth = 0.03 # this is a guesstimate, would need to review actual historical data
r = 0.03 # dummy discount rate
#n = 81
#g_discounted_stern['2100'] = g_discounted_stern['2100']*(1/((1+(r+stern_delta))**(n+1)))
for n in range(2100-2020+1):
g_discounted_stern[str(int(n+2020))] = g[str(int(n+2020))]*(1/((1+(r+stern_delta))**(n+1)))
g_discounted_nordhaus[str(int(n+2020))] = g[str(int(n+2020))]*(1/((1+(r+nordhaus_delta))**(n+1)))
g_discounted_avg[str(int(n+2020))] = g[str(int(n+2020))]*(1/((1+(r+avg_delta))**(n+1)))
# calculate NPVs
def calcNPV(df):
df['NPV'] = np.zeros(len(df))
for n in range(2100-2020+1):
df['NPV'] = df['NPV'] + df[str(int(n+2020))]
return df
g_discounted_stern = calcNPV(g_discounted_stern)
g_discounted_nordhaus = calcNPV(g_discounted_nordhaus)
g_discounted_avg = calcNPV(g_discounted_avg)
def makePlot(df, figName):
df1 = df.drop(columns = ['category', 'model', 'scenario', 'NPV'])
d = df1.values
plt.figure(figsize=(5,3.5))
ax1 = plt.subplot(position=[0.15, 0.13, 0.6, 0.7])
for m in range(len(d)):
plt.plot(np.linspace(2020, 2100, 81), d[m,:]/1000000000) #billions of dollars
plt.ylim(0,1100)
plt.xlabel('Year')
plt.ylabel('Billions of dollars')
plt.title('Annual Costs (discounted)')
npv = np.sum(d, axis = 1)
print(npv.shape)
ax2 = plt.subplot(position = [0.85, 0.13, 0.1, 0.7])
plt.boxplot(npv/1000000000000) #trillions of dollars
plt.ylim(0, 50)
plt.title('NPV')
plt.ylabel('Trillions of dollars')
plt.savefig(figName, dpi=300)
makePlot(g_discounted_stern, 'Figures/SternOut.png')
makePlot(g_discounted_nordhaus, 'Figures/NordhausOut.png')
makePlot(g_discounted_avg, 'Figures/AvgOut.png')
#print(g_discounted_avg[:5])
def makePlotbyCategory(df, figName):
d1 = df[df.category == 'Below 1.5C'].drop(columns = ['category', 'model', 'scenario', 'NPV'])
d2 = df[df.category == '1.5C low overshoot'].drop(columns = ['category', 'model', 'scenario', 'NPV'])
d3 = df[df.category == '1.5C high overshoot'].drop(columns = ['category', 'model', 'scenario', 'NPV'])
d4 = df[df.category == 'Lower 2C'].drop(columns = ['category', 'model', 'scenario', 'NPV'])
d5 = df[df.category == 'Higher 2C'].drop(columns = ['category', 'model', 'scenario', 'NPV'])
c = {'d1':d1.values, 'd2':d2.values, 'd3':d3.values, 'd4':d4.values, 'd5':d5.values}
labs = ('Below 1.5C', '1.5C low\novershoot', '1.5C high\novershoot', 'Lower 2C', 'Higher 2C')
plt.figure(figsize=(7, 4))
for n in range(5):
p = c['d'+str(n+1)]
plt.subplot(position = [0.08+0.135*n, 0.14, 0.12, 0.78])
#plt.subplot(position = [0.08+0.17*n, 0.14, 0.15, 0.78])
for m in range(len(p)):
plt.plot(np.linspace(2020, 2100, 81), p[m,:]/1000000000) #billions of dollars
plt.xlabel('Year', fontsize = 8)
plt.ylim(-100, 1000)
plt.title(labs[n], fontsize = 8)
plt.yticks([0, 200, 400, 600, 800, 1000], labels = (' ', ' ', ' ', ' ', ' ', ' '), fontsize = 8)
plt.xticks([2020, 2030, 2040, 2050, 2060, 2070, 2080, 2090, 2100], labels = ('2020', ' ', ' ', ' ', '2060', ' ', ' ', ' ', '2100'), fontsize =6, rotation = 90)
if n == 0:
plt.ylabel('Billions of dollars', fontsize = 8)
plt.yticks([0, 200, 400, 600, 800, 1000], labels = ('0', '200', '400', '600', '800', '1000'), fontsize = 6)
plt.subplot(position = [0.81, 0.14, 0.17, 0.78])
for n in range(5):
p = c['d'+str(n+1)]
npv = np.sum(p, axis = 1)/1000000000000 # trillions of dollars
plt.boxplot(npv, positions = [n]) #trillions of dollars
plt.ylim(-10, 50)
plt.xlim(-0.5, 4.5)
plt.ylabel('Trillions of dollars', fontsize = 8)
plt.yticks([0, 10, 20, 30, 40, 50], labels = ('0', '10', '20', '30', '40', '50'), fontsize = 6)
plt.xticks([0, 1,2,3,4], labels = labs, fontsize = 6, rotation = 90)
#plt.xticks([0, 1,2], labels = labs, fontsize = 6, rotation = 90)
plt.savefig(figName, dpi=300)
makePlotbyCategory(g_discounted_stern, 'Figures/SternByScenario.png')
makePlotbyCategory(g_discounted_nordhaus, 'Figures/NordhausByScenario.png')
makePlotbyCategory(g_discounted_avg, 'Figures/AvgByScenario.png')
|
StarcoderdataPython
|
3314397
|
<reponame>LowerSilesians/ursa-rest-sqlserver<gh_stars>0
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from __future__ import unicode_literals
from django.db import models
class Adventureworksdwbuildversion(models.Model):
dbversion = models.CharField(db_column='DBVersion', max_length=50, blank=True, null=True) # Field name made lowercase.
versiondate = models.DateTimeField(db_column='VersionDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'AdventureWorksDWBuildVersion'
class Databaselog(models.Model):
databaselogid = models.IntegerField(db_column='DatabaseLogID') # Field name made lowercase.
posttime = models.DateTimeField(db_column='PostTime') # Field name made lowercase.
databaseuser = models.CharField(db_column='DatabaseUser', max_length=128) # Field name made lowercase.
event = models.CharField(db_column='Event', max_length=128) # Field name made lowercase.
schema = models.CharField(db_column='Schema', max_length=128, blank=True, null=True) # Field name made lowercase.
object = models.CharField(db_column='Object', max_length=128, blank=True, null=True) # Field name made lowercase.
tsql = models.TextField(db_column='TSQL') # Field name made lowercase.
xmlevent = models.TextField(db_column='XmlEvent') # Field name made lowercase.
class Meta:
managed = False
db_table = '[dbo].[DatabaseLog]'
class Dimaccount(models.Model):
accountkey = models.IntegerField(db_column='AccountKey', primary_key=True) # Field name made lowercase.
parentaccountkey = models.ForeignKey('self', models.DO_NOTHING, db_column='ParentAccountKey', blank=True, null=True) # Field name made lowercase.
accountcodealternatekey = models.IntegerField(db_column='AccountCodeAlternateKey', blank=True, null=True) # Field name made lowercase.
parentaccountcodealternatekey = models.IntegerField(db_column='ParentAccountCodeAlternateKey', blank=True, null=True) # Field name made lowercase.
accountdescription = models.CharField(db_column='AccountDescription', max_length=50, blank=True, null=True) # Field name made lowercase.
accounttype = models.CharField(db_column='AccountType', max_length=50, blank=True, null=True) # Field name made lowercase.
operator = models.CharField(db_column='Operator', max_length=50, blank=True, null=True) # Field name made lowercase.
custommembers = models.CharField(db_column='CustomMembers', max_length=300, blank=True, null=True) # Field name made lowercase.
valuetype = models.CharField(db_column='ValueType', max_length=50, blank=True, null=True) # Field name made lowercase.
custommemberoptions = models.CharField(db_column='CustomMemberOptions', max_length=200, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimAccount'
class Dimcurrency(models.Model):
currencykey = models.IntegerField(db_column='CurrencyKey', primary_key=True) # Field name made lowercase.
currencyalternatekey = models.CharField(db_column='CurrencyAlternateKey', unique=True, max_length=3) # Field name made lowercase.
currencyname = models.CharField(db_column='CurrencyName', max_length=50) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimCurrency'
class Dimcustomer(models.Model):
customerkey = models.IntegerField(db_column='CustomerKey', primary_key=True) # Field name made lowercase.
geographykey = models.ForeignKey('Dimgeography', models.DO_NOTHING, db_column='GeographyKey', blank=True, null=True) # Field name made lowercase.
customeralternatekey = models.CharField(db_column='CustomerAlternateKey', unique=True, max_length=15) # Field name made lowercase.
title = models.CharField(db_column='Title', max_length=8, blank=True, null=True) # Field name made lowercase.
firstname = models.CharField(db_column='FirstName', max_length=50, blank=True, null=True) # Field name made lowercase.
middlename = models.CharField(db_column='MiddleName', max_length=50, blank=True, null=True) # Field name made lowercase.
lastname = models.CharField(db_column='LastName', max_length=50, blank=True, null=True) # Field name made lowercase.
namestyle = models.NullBooleanField(db_column='NameStyle') # Field name made lowercase.
birthdate = models.CharField(db_column='BirthDate', max_length=10, blank=True, null=True) # Field name made lowercase.
maritalstatus = models.CharField(db_column='MaritalStatus', max_length=1, blank=True, null=True) # Field name made lowercase.
suffix = models.CharField(db_column='Suffix', max_length=10, blank=True, null=True) # Field name made lowercase.
gender = models.CharField(db_column='Gender', max_length=1, blank=True, null=True) # Field name made lowercase.
emailaddress = models.CharField(db_column='EmailAddress', max_length=50, blank=True, null=True) # Field name made lowercase.
yearlyincome = models.DecimalField(db_column='YearlyIncome', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
totalchildren = models.SmallIntegerField(db_column='TotalChildren', blank=True, null=True) # Field name made lowercase.
numberchildrenathome = models.SmallIntegerField(db_column='NumberChildrenAtHome', blank=True, null=True) # Field name made lowercase.
englisheducation = models.CharField(db_column='EnglishEducation', max_length=40, blank=True, null=True) # Field name made lowercase.
spanisheducation = models.CharField(db_column='SpanishEducation', max_length=40, blank=True, null=True) # Field name made lowercase.
frencheducation = models.CharField(db_column='FrenchEducation', max_length=40, blank=True, null=True) # Field name made lowercase.
englishoccupation = models.CharField(db_column='EnglishOccupation', max_length=100, blank=True, null=True) # Field name made lowercase.
spanishoccupation = models.CharField(db_column='SpanishOccupation', max_length=100, blank=True, null=True) # Field name made lowercase.
frenchoccupation = models.CharField(db_column='FrenchOccupation', max_length=100, blank=True, null=True) # Field name made lowercase.
houseownerflag = models.CharField(db_column='HouseOwnerFlag', max_length=1, blank=True, null=True) # Field name made lowercase.
numbercarsowned = models.SmallIntegerField(db_column='NumberCarsOwned', blank=True, null=True) # Field name made lowercase.
addressline1 = models.CharField(db_column='AddressLine1', max_length=120, blank=True, null=True) # Field name made lowercase.
addressline2 = models.CharField(db_column='AddressLine2', max_length=120, blank=True, null=True) # Field name made lowercase.
phone = models.CharField(db_column='Phone', max_length=20, blank=True, null=True) # Field name made lowercase.
datefirstpurchase = models.CharField(db_column='DateFirstPurchase', max_length=10, blank=True, null=True) # Field name made lowercase.
commutedistance = models.CharField(db_column='CommuteDistance', max_length=15, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimCustomer'
class Dimdate(models.Model):
datekey = models.IntegerField(db_column='DateKey', primary_key=True) # Field name made lowercase.
fulldatealternatekey = models.CharField(db_column='FullDateAlternateKey', unique=True, max_length=10) # Field name made lowercase.
daynumberofweek = models.SmallIntegerField(db_column='DayNumberOfWeek') # Field name made lowercase.
englishdaynameofweek = models.CharField(db_column='EnglishDayNameOfWeek', max_length=10) # Field name made lowercase.
spanishdaynameofweek = models.CharField(db_column='SpanishDayNameOfWeek', max_length=10) # Field name made lowercase.
frenchdaynameofweek = models.CharField(db_column='FrenchDayNameOfWeek', max_length=10) # Field name made lowercase.
daynumberofmonth = models.SmallIntegerField(db_column='DayNumberOfMonth') # Field name made lowercase.
daynumberofyear = models.SmallIntegerField(db_column='DayNumberOfYear') # Field name made lowercase.
weeknumberofyear = models.SmallIntegerField(db_column='WeekNumberOfYear') # Field name made lowercase.
englishmonthname = models.CharField(db_column='EnglishMonthName', max_length=10) # Field name made lowercase.
spanishmonthname = models.CharField(db_column='SpanishMonthName', max_length=10) # Field name made lowercase.
frenchmonthname = models.CharField(db_column='FrenchMonthName', max_length=10) # Field name made lowercase.
monthnumberofyear = models.SmallIntegerField(db_column='MonthNumberOfYear') # Field name made lowercase.
calendarquarter = models.SmallIntegerField(db_column='CalendarQuarter') # Field name made lowercase.
calendaryear = models.SmallIntegerField(db_column='CalendarYear') # Field name made lowercase.
calendarsemester = models.SmallIntegerField(db_column='CalendarSemester') # Field name made lowercase.
fiscalquarter = models.SmallIntegerField(db_column='FiscalQuarter') # Field name made lowercase.
fiscalyear = models.SmallIntegerField(db_column='FiscalYear') # Field name made lowercase.
fiscalsemester = models.SmallIntegerField(db_column='FiscalSemester') # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimDate'
class Dimdepartmentgroup(models.Model):
departmentgroupkey = models.IntegerField(db_column='DepartmentGroupKey', primary_key=True) # Field name made lowercase.
parentdepartmentgroupkey = models.ForeignKey('self', models.DO_NOTHING, db_column='ParentDepartmentGroupKey', blank=True, null=True) # Field name made lowercase.
departmentgroupname = models.CharField(db_column='DepartmentGroupName', max_length=50, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimDepartmentGroup'
class Dimemployee(models.Model):
employeekey = models.IntegerField(db_column='EmployeeKey', primary_key=True) # Field name made lowercase.
parentemployeekey = models.ForeignKey('self', models.DO_NOTHING, db_column='ParentEmployeeKey', blank=True, null=True) # Field name made lowercase.
employeenationalidalternatekey = models.CharField(db_column='EmployeeNationalIDAlternateKey', max_length=15, blank=True, null=True) # Field name made lowercase.
parentemployeenationalidalternatekey = models.CharField(db_column='ParentEmployeeNationalIDAlternateKey', max_length=15, blank=True, null=True) # Field name made lowercase.
salesterritorykey = models.ForeignKey('Dimsalesterritory', models.DO_NOTHING, db_column='SalesTerritoryKey', blank=True, null=True) # Field name made lowercase.
firstname = models.CharField(db_column='FirstName', max_length=50) # Field name made lowercase.
lastname = models.CharField(db_column='LastName', max_length=50) # Field name made lowercase.
middlename = models.CharField(db_column='MiddleName', max_length=50, blank=True, null=True) # Field name made lowercase.
namestyle = models.BooleanField(db_column='NameStyle') # Field name made lowercase.
title = models.CharField(db_column='Title', max_length=50, blank=True, null=True) # Field name made lowercase.
hiredate = models.CharField(db_column='HireDate', max_length=10, blank=True, null=True) # Field name made lowercase.
birthdate = models.CharField(db_column='BirthDate', max_length=10, blank=True, null=True) # Field name made lowercase.
loginid = models.CharField(db_column='LoginID', max_length=256, blank=True, null=True) # Field name made lowercase.
emailaddress = models.CharField(db_column='EmailAddress', max_length=50, blank=True, null=True) # Field name made lowercase.
phone = models.CharField(db_column='Phone', max_length=25, blank=True, null=True) # Field name made lowercase.
maritalstatus = models.CharField(db_column='MaritalStatus', max_length=1, blank=True, null=True) # Field name made lowercase.
emergencycontactname = models.CharField(db_column='EmergencyContactName', max_length=50, blank=True, null=True) # Field name made lowercase.
emergencycontactphone = models.CharField(db_column='EmergencyContactPhone', max_length=25, blank=True, null=True) # Field name made lowercase.
salariedflag = models.NullBooleanField(db_column='SalariedFlag') # Field name made lowercase.
gender = models.CharField(db_column='Gender', max_length=1, blank=True, null=True) # Field name made lowercase.
payfrequency = models.SmallIntegerField(db_column='PayFrequency', blank=True, null=True) # Field name made lowercase.
baserate = models.DecimalField(db_column='BaseRate', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
vacationhours = models.SmallIntegerField(db_column='VacationHours', blank=True, null=True) # Field name made lowercase.
sickleavehours = models.SmallIntegerField(db_column='SickLeaveHours', blank=True, null=True) # Field name made lowercase.
currentflag = models.BooleanField(db_column='CurrentFlag') # Field name made lowercase.
salespersonflag = models.BooleanField(db_column='SalesPersonFlag') # Field name made lowercase.
departmentname = models.CharField(db_column='DepartmentName', max_length=50, blank=True, null=True) # Field name made lowercase.
startdate = models.CharField(db_column='StartDate', max_length=10, blank=True, null=True) # Field name made lowercase.
enddate = models.CharField(db_column='EndDate', max_length=10, blank=True, null=True) # Field name made lowercase.
status = models.CharField(db_column='Status', max_length=50, blank=True, null=True) # Field name made lowercase.
employeephoto = models.BinaryField(db_column='EmployeePhoto', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimEmployee'
class Dimgeography(models.Model):
geographykey = models.IntegerField(db_column='GeographyKey', primary_key=True) # Field name made lowercase.
city = models.CharField(db_column='City', max_length=30, blank=True, null=True) # Field name made lowercase.
stateprovincecode = models.CharField(db_column='StateProvinceCode', max_length=3, blank=True, null=True) # Field name made lowercase.
stateprovincename = models.CharField(db_column='StateProvinceName', max_length=50, blank=True, null=True) # Field name made lowercase.
countryregioncode = models.CharField(db_column='CountryRegionCode', max_length=3, blank=True, null=True) # Field name made lowercase.
englishcountryregionname = models.CharField(db_column='EnglishCountryRegionName', max_length=50, blank=True, null=True) # Field name made lowercase.
spanishcountryregionname = models.CharField(db_column='SpanishCountryRegionName', max_length=50, blank=True, null=True) # Field name made lowercase.
frenchcountryregionname = models.CharField(db_column='FrenchCountryRegionName', max_length=50, blank=True, null=True) # Field name made lowercase.
postalcode = models.CharField(db_column='PostalCode', max_length=15, blank=True, null=True) # Field name made lowercase.
salesterritorykey = models.ForeignKey('Dimsalesterritory', models.DO_NOTHING, db_column='SalesTerritoryKey', blank=True, null=True) # Field name made lowercase.
ipaddresslocator = models.CharField(db_column='IpAddressLocator', max_length=15, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimGeography'
class Dimorganization(models.Model):
organizationkey = models.IntegerField(db_column='OrganizationKey', primary_key=True) # Field name made lowercase.
parentorganizationkey = models.ForeignKey('self', models.DO_NOTHING, db_column='ParentOrganizationKey', blank=True, null=True) # Field name made lowercase.
percentageofownership = models.CharField(db_column='PercentageOfOwnership', max_length=16, blank=True, null=True) # Field name made lowercase.
organizationname = models.CharField(db_column='OrganizationName', max_length=50, blank=True, null=True) # Field name made lowercase.
currencykey = models.ForeignKey(Dimcurrency, models.DO_NOTHING, db_column='CurrencyKey', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimOrganization'
class Dimproduct(models.Model):
productkey = models.IntegerField(db_column='ProductKey', primary_key=True) # Field name made lowercase.
productalternatekey = models.CharField(db_column='ProductAlternateKey', max_length=25, blank=True, null=True) # Field name made lowercase.
productsubcategorykey = models.ForeignKey('Dimproductsubcategory', models.DO_NOTHING, db_column='ProductSubcategoryKey', blank=True, null=True) # Field name made lowercase.
weightunitmeasurecode = models.CharField(db_column='WeightUnitMeasureCode', max_length=3, blank=True, null=True) # Field name made lowercase.
sizeunitmeasurecode = models.CharField(db_column='SizeUnitMeasureCode', max_length=3, blank=True, null=True) # Field name made lowercase.
englishproductname = models.CharField(db_column='EnglishProductName', max_length=50) # Field name made lowercase.
spanishproductname = models.CharField(db_column='SpanishProductName', max_length=50) # Field name made lowercase.
frenchproductname = models.CharField(db_column='FrenchProductName', max_length=50) # Field name made lowercase.
standardcost = models.DecimalField(db_column='StandardCost', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
finishedgoodsflag = models.BooleanField(db_column='FinishedGoodsFlag') # Field name made lowercase.
color = models.CharField(db_column='Color', max_length=15) # Field name made lowercase.
safetystocklevel = models.SmallIntegerField(db_column='SafetyStockLevel', blank=True, null=True) # Field name made lowercase.
reorderpoint = models.SmallIntegerField(db_column='ReorderPoint', blank=True, null=True) # Field name made lowercase.
listprice = models.DecimalField(db_column='ListPrice', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
size = models.CharField(db_column='Size', max_length=50, blank=True, null=True) # Field name made lowercase.
sizerange = models.CharField(db_column='SizeRange', max_length=50, blank=True, null=True) # Field name made lowercase.
weight = models.FloatField(db_column='Weight', blank=True, null=True) # Field name made lowercase.
daystomanufacture = models.IntegerField(db_column='DaysToManufacture', blank=True, null=True) # Field name made lowercase.
productline = models.CharField(db_column='ProductLine', max_length=2, blank=True, null=True) # Field name made lowercase.
dealerprice = models.DecimalField(db_column='DealerPrice', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
class_field = models.CharField(db_column='Class', max_length=2, blank=True, null=True) # Field name made lowercase. Field renamed because it was a Python reserved word.
style = models.CharField(db_column='Style', max_length=2, blank=True, null=True) # Field name made lowercase.
modelname = models.CharField(db_column='ModelName', max_length=50, blank=True, null=True) # Field name made lowercase.
largephoto = models.BinaryField(db_column='LargePhoto', blank=True, null=True) # Field name made lowercase.
englishdescription = models.CharField(db_column='EnglishDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
frenchdescription = models.CharField(db_column='FrenchDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
chinesedescription = models.CharField(db_column='ChineseDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
arabicdescription = models.CharField(db_column='ArabicDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
hebrewdescription = models.CharField(db_column='HebrewDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
thaidescription = models.CharField(db_column='ThaiDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
germandescription = models.CharField(db_column='GermanDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
japanesedescription = models.CharField(db_column='JapaneseDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
turkishdescription = models.CharField(db_column='TurkishDescription', max_length=400, blank=True, null=True) # Field name made lowercase.
startdate = models.DateTimeField(db_column='StartDate', blank=True, null=True) # Field name made lowercase.
enddate = models.DateTimeField(db_column='EndDate', blank=True, null=True) # Field name made lowercase.
status = models.CharField(db_column='Status', max_length=7, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimProduct'
unique_together = (('productalternatekey', 'startdate'),)
class Dimproductcategory(models.Model):
productcategorykey = models.IntegerField(db_column='ProductCategoryKey', primary_key=True) # Field name made lowercase.
productcategoryalternatekey = models.IntegerField(db_column='ProductCategoryAlternateKey', unique=True, blank=True, null=True) # Field name made lowercase.
englishproductcategoryname = models.CharField(db_column='EnglishProductCategoryName', max_length=50) # Field name made lowercase.
spanishproductcategoryname = models.CharField(db_column='SpanishProductCategoryName', max_length=50) # Field name made lowercase.
frenchproductcategoryname = models.CharField(db_column='FrenchProductCategoryName', max_length=50) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimProductCategory'
class Dimproductsubcategory(models.Model):
productsubcategorykey = models.IntegerField(db_column='ProductSubcategoryKey', primary_key=True) # Field name made lowercase.
productsubcategoryalternatekey = models.IntegerField(db_column='ProductSubcategoryAlternateKey', unique=True, blank=True, null=True) # Field name made lowercase.
englishproductsubcategoryname = models.CharField(db_column='EnglishProductSubcategoryName', max_length=50) # Field name made lowercase.
spanishproductsubcategoryname = models.CharField(db_column='SpanishProductSubcategoryName', max_length=50) # Field name made lowercase.
frenchproductsubcategoryname = models.CharField(db_column='FrenchProductSubcategoryName', max_length=50) # Field name made lowercase.
productcategorykey = models.ForeignKey(Dimproductcategory, models.DO_NOTHING, db_column='ProductCategoryKey', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimProductSubcategory'
class Dimpromotion(models.Model):
promotionkey = models.IntegerField(db_column='PromotionKey', primary_key=True) # Field name made lowercase.
promotionalternatekey = models.IntegerField(db_column='PromotionAlternateKey', unique=True, blank=True, null=True) # Field name made lowercase.
englishpromotionname = models.CharField(db_column='EnglishPromotionName', max_length=255, blank=True, null=True) # Field name made lowercase.
spanishpromotionname = models.CharField(db_column='SpanishPromotionName', max_length=255, blank=True, null=True) # Field name made lowercase.
frenchpromotionname = models.CharField(db_column='FrenchPromotionName', max_length=255, blank=True, null=True) # Field name made lowercase.
discountpct = models.FloatField(db_column='DiscountPct', blank=True, null=True) # Field name made lowercase.
englishpromotiontype = models.CharField(db_column='EnglishPromotionType', max_length=50, blank=True, null=True) # Field name made lowercase.
spanishpromotiontype = models.CharField(db_column='SpanishPromotionType', max_length=50, blank=True, null=True) # Field name made lowercase.
frenchpromotiontype = models.CharField(db_column='FrenchPromotionType', max_length=50, blank=True, null=True) # Field name made lowercase.
englishpromotioncategory = models.CharField(db_column='EnglishPromotionCategory', max_length=50, blank=True, null=True) # Field name made lowercase.
spanishpromotioncategory = models.CharField(db_column='SpanishPromotionCategory', max_length=50, blank=True, null=True) # Field name made lowercase.
frenchpromotioncategory = models.CharField(db_column='FrenchPromotionCategory', max_length=50, blank=True, null=True) # Field name made lowercase.
startdate = models.DateTimeField(db_column='StartDate') # Field name made lowercase.
enddate = models.DateTimeField(db_column='EndDate', blank=True, null=True) # Field name made lowercase.
minqty = models.IntegerField(db_column='MinQty', blank=True, null=True) # Field name made lowercase.
maxqty = models.IntegerField(db_column='MaxQty', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimPromotion'
class Dimreseller(models.Model):
resellerkey = models.IntegerField(db_column='ResellerKey', primary_key=True) # Field name made lowercase.
geographykey = models.ForeignKey(Dimgeography, models.DO_NOTHING, db_column='GeographyKey', blank=True, null=True) # Field name made lowercase.
reselleralternatekey = models.CharField(db_column='ResellerAlternateKey', unique=True, max_length=15, blank=True, null=True) # Field name made lowercase.
phone = models.CharField(db_column='Phone', max_length=25, blank=True, null=True) # Field name made lowercase.
businesstype = models.CharField(db_column='BusinessType', max_length=20) # Field name made lowercase.
resellername = models.CharField(db_column='ResellerName', max_length=50) # Field name made lowercase.
numberemployees = models.IntegerField(db_column='NumberEmployees', blank=True, null=True) # Field name made lowercase.
orderfrequency = models.CharField(db_column='OrderFrequency', max_length=1, blank=True, null=True) # Field name made lowercase.
ordermonth = models.SmallIntegerField(db_column='OrderMonth', blank=True, null=True) # Field name made lowercase.
firstorderyear = models.IntegerField(db_column='FirstOrderYear', blank=True, null=True) # Field name made lowercase.
lastorderyear = models.IntegerField(db_column='LastOrderYear', blank=True, null=True) # Field name made lowercase.
productline = models.CharField(db_column='ProductLine', max_length=50, blank=True, null=True) # Field name made lowercase.
addressline1 = models.CharField(db_column='AddressLine1', max_length=60, blank=True, null=True) # Field name made lowercase.
addressline2 = models.CharField(db_column='AddressLine2', max_length=60, blank=True, null=True) # Field name made lowercase.
annualsales = models.DecimalField(db_column='AnnualSales', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
bankname = models.CharField(db_column='BankName', max_length=50, blank=True, null=True) # Field name made lowercase.
minpaymenttype = models.SmallIntegerField(db_column='MinPaymentType', blank=True, null=True) # Field name made lowercase.
minpaymentamount = models.DecimalField(db_column='MinPaymentAmount', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
annualrevenue = models.DecimalField(db_column='AnnualRevenue', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
yearopened = models.IntegerField(db_column='YearOpened', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimReseller'
class Dimsalesreason(models.Model):
salesreasonkey = models.IntegerField(db_column='SalesReasonKey', primary_key=True) # Field name made lowercase.
salesreasonalternatekey = models.IntegerField(db_column='SalesReasonAlternateKey') # Field name made lowercase.
salesreasonname = models.CharField(db_column='SalesReasonName', max_length=50) # Field name made lowercase.
salesreasonreasontype = models.CharField(db_column='SalesReasonReasonType', max_length=50) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimSalesReason'
class Dimsalesterritory(models.Model):
salesterritorykey = models.IntegerField(db_column='SalesTerritoryKey', primary_key=True) # Field name made lowercase.
salesterritoryalternatekey = models.IntegerField(db_column='SalesTerritoryAlternateKey', unique=True, blank=True, null=True) # Field name made lowercase.
salesterritoryregion = models.CharField(db_column='SalesTerritoryRegion', max_length=50) # Field name made lowercase.
salesterritorycountry = models.CharField(db_column='SalesTerritoryCountry', max_length=50) # Field name made lowercase.
salesterritorygroup = models.CharField(db_column='SalesTerritoryGroup', max_length=50, blank=True, null=True) # Field name made lowercase.
salesterritoryimage = models.BinaryField(db_column='SalesTerritoryImage', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimSalesTerritory'
class Dimscenario(models.Model):
scenariokey = models.IntegerField(db_column='ScenarioKey', primary_key=True) # Field name made lowercase.
scenarioname = models.CharField(db_column='ScenarioName', max_length=50, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'DimScenario'
class Factadditionalinternationalproductdescription(models.Model):
productkey = models.IntegerField(db_column='ProductKey') # Field name made lowercase.
culturename = models.CharField(db_column='CultureName', max_length=50) # Field name made lowercase.
productdescription = models.TextField(db_column='ProductDescription') # Field name made lowercase.
class Meta:
managed = False
db_table = 'FactAdditionalInternationalProductDescription'
unique_together = (('productkey', 'culturename'),)
class Factcallcenter(models.Model):
factcallcenterid = models.IntegerField(db_column='FactCallCenterID', primary_key=True) # Field name made lowercase.
datekey = models.ForeignKey(Dimdate, models.DO_NOTHING, db_column='DateKey') # Field name made lowercase.
wagetype = models.CharField(db_column='WageType', max_length=15) # Field name made lowercase.
shift = models.CharField(db_column='Shift', max_length=20) # Field name made lowercase.
leveloneoperators = models.SmallIntegerField(db_column='LevelOneOperators') # Field name made lowercase.
leveltwooperators = models.SmallIntegerField(db_column='LevelTwoOperators') # Field name made lowercase.
totaloperators = models.SmallIntegerField(db_column='TotalOperators') # Field name made lowercase.
calls = models.IntegerField(db_column='Calls') # Field name made lowercase.
automaticresponses = models.IntegerField(db_column='AutomaticResponses') # Field name made lowercase.
orders = models.IntegerField(db_column='Orders') # Field name made lowercase.
issuesraised = models.SmallIntegerField(db_column='IssuesRaised') # Field name made lowercase.
averagetimeperissue = models.SmallIntegerField(db_column='AverageTimePerIssue') # Field name made lowercase.
servicegrade = models.FloatField(db_column='ServiceGrade') # Field name made lowercase.
date = models.DateTimeField(db_column='Date', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'FactCallCenter'
unique_together = (('datekey', 'shift'),)
class Factcurrencyrate(models.Model):
currencykey = models.ForeignKey(Dimcurrency, models.DO_NOTHING, db_column='CurrencyKey') # Field name made lowercase.
datekey = models.ForeignKey(Dimdate, models.DO_NOTHING, db_column='DateKey') # Field name made lowercase.
averagerate = models.FloatField(db_column='AverageRate') # Field name made lowercase.
endofdayrate = models.FloatField(db_column='EndOfDayRate') # Field name made lowercase.
date = models.DateTimeField(db_column='Date', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'FactCurrencyRate'
unique_together = (('currencykey', 'datekey'),)
class Factfinance(models.Model):
financekey = models.IntegerField(db_column='FinanceKey') # Field name made lowercase.
datekey = models.ForeignKey(Dimdate, models.DO_NOTHING, db_column='DateKey') # Field name made lowercase.
organizationkey = models.ForeignKey(Dimorganization, models.DO_NOTHING, db_column='OrganizationKey') # Field name made lowercase.
departmentgroupkey = models.ForeignKey(Dimdepartmentgroup, models.DO_NOTHING, db_column='DepartmentGroupKey') # Field name made lowercase.
scenariokey = models.ForeignKey(Dimscenario, models.DO_NOTHING, db_column='ScenarioKey') # Field name made lowercase.
accountkey = models.ForeignKey(Dimaccount, models.DO_NOTHING, db_column='AccountKey') # Field name made lowercase.
amount = models.FloatField(db_column='Amount') # Field name made lowercase.
date = models.DateTimeField(db_column='Date', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'FactFinance'
class Factinternetsales(models.Model):
productkey = models.ForeignKey(Dimproduct, models.DO_NOTHING, db_column='ProductKey') # Field name made lowercase.
orderdatekey = models.ForeignKey(Dimdate, models.DO_NOTHING, db_column='OrderDateKey', related_name="fis_by_orderdatekey",) # Field name made lowercase.
duedatekey = models.ForeignKey(Dimdate, models.DO_NOTHING, db_column='DueDateKey', related_name="fis_by_duedatekey",) # Field name made lowercase.
shipdatekey = models.ForeignKey(Dimdate, models.DO_NOTHING, db_column='ShipDateKey', related_name="fis_by_shipdatekey",) # Field name made lowercase.
customerkey = models.ForeignKey(Dimcustomer, models.DO_NOTHING, db_column='CustomerKey') # Field name made lowercase.
promotionkey = models.ForeignKey(Dimpromotion, models.DO_NOTHING, db_column='PromotionKey') # Field name made lowercase.
currencykey = models.ForeignKey(Dimcurrency, models.DO_NOTHING, db_column='CurrencyKey') # Field name made lowercase.
salesterritorykey = models.ForeignKey(Dimsalesterritory, models.DO_NOTHING, db_column='SalesTerritoryKey') # Field name made lowercase.
salesordernumber = models.CharField(db_column='SalesOrderNumber', max_length=20) # Field name made lowercase.
salesorderlinenumber = models.SmallIntegerField(db_column='SalesOrderLineNumber') # Field name made lowercase.
revisionnumber = models.SmallIntegerField(db_column='RevisionNumber') # Field name made lowercase.
orderquantity = models.SmallIntegerField(db_column='OrderQuantity') # Field name made lowercase.
unitprice = models.DecimalField(db_column='UnitPrice', max_digits=19, decimal_places=4) # Field name made lowercase.
extendedamount = models.DecimalField(db_column='ExtendedAmount', max_digits=19, decimal_places=4) # Field name made lowercase.
unitpricediscountpct = models.FloatField(db_column='UnitPriceDiscountPct') # Field name made lowercase.
discountamount = models.FloatField(db_column='DiscountAmount') # Field name made lowercase.
productstandardcost = models.DecimalField(db_column='ProductStandardCost', max_digits=19, decimal_places=4) # Field name made lowercase.
totalproductcost = models.DecimalField(db_column='TotalProductCost', max_digits=19, decimal_places=4) # Field name made lowercase.
salesamount = models.DecimalField(db_column='SalesAmount', max_digits=19, decimal_places=4) # Field name made lowercase.
taxamt = models.DecimalField(db_column='TaxAmt', max_digits=19, decimal_places=4) # Field name made lowercase.
freight = models.DecimalField(db_column='Freight', max_digits=19, decimal_places=4) # Field name made lowercase.
carriertrackingnumber = models.CharField(db_column='CarrierTrackingNumber', max_length=25, blank=True, null=True) # Field name made lowercase.
customerponumber = models.CharField(db_column='CustomerPONumber', max_length=25, blank=True, null=True) # Field name made lowercase.
orderdate = models.DateTimeField(db_column='OrderDate', blank=True, null=True) # Field name made lowercase.
duedate = models.DateTimeField(db_column='DueDate', blank=True, null=True) # Field name made lowercase.
shipdate = models.DateTimeField(db_column='ShipDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'FactInternetSales'
unique_together = (('salesordernumber', 'salesorderlinenumber'),)
class Factinternetsalesreason(models.Model):
salesordernumber = models.ForeignKey(Factinternetsales, models.DO_NOTHING, db_column='SalesOrderNumber', related_name='fisr_by_salesordernumber') # Field name made lowercase.
salesorderlinenumber = models.ForeignKey(Factinternetsales, models.DO_NOTHING, db_column='SalesOrderLineNumber', related_name='fisr_by_salesorderlinenumber') # Field name made lowercase.
salesreasonkey = models.ForeignKey(Dimsalesreason, models.DO_NOTHING, db_column='SalesReasonKey') # Field name made lowercase.
class Meta:
managed = False
db_table = 'FactInternetSalesReason'
unique_together = (('salesordernumber', 'salesorderlinenumber', 'salesreasonkey'),)
class Factproductinventory(models.Model):
productkey = models.ForeignKey(Dimproduct, models.DO_NOTHING, db_column='ProductKey') # Field name made lowercase.
datekey = models.ForeignKey(Dimdate, models.DO_NOTHING, db_column='DateKey') # Field name made lowercase.
movementdate = models.CharField(db_column='MovementDate', max_length=10) # Field name made lowercase.
unitcost = models.DecimalField(db_column='UnitCost', max_digits=19, decimal_places=4) # Field name made lowercase.
unitsin = models.IntegerField(db_column='UnitsIn') # Field name made lowercase.
unitsout = models.IntegerField(db_column='UnitsOut') # Field name made lowercase.
unitsbalance = models.IntegerField(db_column='UnitsBalance') # Field name made lowercase.
class Meta:
managed = False
db_table = 'FactProductInventory'
unique_together = (('productkey', 'datekey'),)
class Factresellersales(models.Model):
productkey = models.ForeignKey(Dimproduct, models.DO_NOTHING, db_column='ProductKey') # Field name made lowercase.
orderdatekey = models.ForeignKey(Dimdate, models.DO_NOTHING, db_column='OrderDateKey', related_name='frs_by_orderdatekey') # Field name made lowercase.
duedatekey = models.ForeignKey(Dimdate, models.DO_NOTHING, db_column='DueDateKey', related_name='frs_by_duedatekey') # Field name made lowercase.
shipdatekey = models.ForeignKey(Dimdate, models.DO_NOTHING, db_column='ShipDateKey', related_name='frs_by_shipdatekey') # Field name made lowercase.
resellerkey = models.ForeignKey(Dimreseller, models.DO_NOTHING, db_column='ResellerKey') # Field name made lowercase.
employeekey = models.ForeignKey(Dimemployee, models.DO_NOTHING, db_column='EmployeeKey') # Field name made lowercase.
promotionkey = models.ForeignKey(Dimpromotion, models.DO_NOTHING, db_column='PromotionKey') # Field name made lowercase.
currencykey = models.ForeignKey(Dimcurrency, models.DO_NOTHING, db_column='CurrencyKey') # Field name made lowercase.
salesterritorykey = models.ForeignKey(Dimsalesterritory, models.DO_NOTHING, db_column='SalesTerritoryKey') # Field name made lowercase.
salesordernumber = models.CharField(db_column='SalesOrderNumber', max_length=20) # Field name made lowercase.
salesorderlinenumber = models.SmallIntegerField(db_column='SalesOrderLineNumber') # Field name made lowercase.
revisionnumber = models.SmallIntegerField(db_column='RevisionNumber', blank=True, null=True) # Field name made lowercase.
orderquantity = models.SmallIntegerField(db_column='OrderQuantity', blank=True, null=True) # Field name made lowercase.
unitprice = models.DecimalField(db_column='UnitPrice', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
extendedamount = models.DecimalField(db_column='ExtendedAmount', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
unitpricediscountpct = models.FloatField(db_column='UnitPriceDiscountPct', blank=True, null=True) # Field name made lowercase.
discountamount = models.FloatField(db_column='DiscountAmount', blank=True, null=True) # Field name made lowercase.
productstandardcost = models.DecimalField(db_column='ProductStandardCost', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
totalproductcost = models.DecimalField(db_column='TotalProductCost', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
salesamount = models.DecimalField(db_column='SalesAmount', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
taxamt = models.DecimalField(db_column='TaxAmt', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
freight = models.DecimalField(db_column='Freight', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
carriertrackingnumber = models.CharField(db_column='CarrierTrackingNumber', max_length=25, blank=True, null=True) # Field name made lowercase.
customerponumber = models.CharField(db_column='CustomerPONumber', max_length=25, blank=True, null=True) # Field name made lowercase.
orderdate = models.DateTimeField(db_column='OrderDate', blank=True, null=True) # Field name made lowercase.
duedate = models.DateTimeField(db_column='DueDate', blank=True, null=True) # Field name made lowercase.
shipdate = models.DateTimeField(db_column='ShipDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'FactResellerSales'
unique_together = (('salesordernumber', 'salesorderlinenumber'),)
class Factsalesquota(models.Model):
salesquotakey = models.IntegerField(db_column='SalesQuotaKey', primary_key=True) # Field name made lowercase.
employeekey = models.ForeignKey(Dimemployee, models.DO_NOTHING, db_column='EmployeeKey') # Field name made lowercase.
datekey = models.ForeignKey(Dimdate, models.DO_NOTHING, db_column='DateKey') # Field name made lowercase.
calendaryear = models.SmallIntegerField(db_column='CalendarYear') # Field name made lowercase.
calendarquarter = models.SmallIntegerField(db_column='CalendarQuarter') # Field name made lowercase.
salesamountquota = models.DecimalField(db_column='SalesAmountQuota', max_digits=19, decimal_places=4) # Field name made lowercase.
date = models.DateTimeField(db_column='Date', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'FactSalesQuota'
class Factsurveyresponse(models.Model):
surveyresponsekey = models.IntegerField(db_column='SurveyResponseKey', primary_key=True) # Field name made lowercase.
datekey = models.ForeignKey(Dimdate, models.DO_NOTHING, db_column='DateKey') # Field name made lowercase.
customerkey = models.ForeignKey(Dimcustomer, models.DO_NOTHING, db_column='CustomerKey') # Field name made lowercase.
productcategorykey = models.IntegerField(db_column='ProductCategoryKey') # Field name made lowercase.
englishproductcategoryname = models.CharField(db_column='EnglishProductCategoryName', max_length=50) # Field name made lowercase.
productsubcategorykey = models.IntegerField(db_column='ProductSubcategoryKey') # Field name made lowercase.
englishproductsubcategoryname = models.CharField(db_column='EnglishProductSubcategoryName', max_length=50) # Field name made lowercase.
date = models.DateTimeField(db_column='Date', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'FactSurveyResponse'
class Prospectivebuyer(models.Model):
prospectivebuyerkey = models.IntegerField(db_column='ProspectiveBuyerKey', primary_key=True) # Field name made lowercase.
prospectalternatekey = models.CharField(db_column='ProspectAlternateKey', max_length=15, blank=True, null=True) # Field name made lowercase.
firstname = models.CharField(db_column='FirstName', max_length=50, blank=True, null=True) # Field name made lowercase.
middlename = models.CharField(db_column='MiddleName', max_length=50, blank=True, null=True) # Field name made lowercase.
lastname = models.CharField(db_column='LastName', max_length=50, blank=True, null=True) # Field name made lowercase.
birthdate = models.DateTimeField(db_column='BirthDate', blank=True, null=True) # Field name made lowercase.
maritalstatus = models.CharField(db_column='MaritalStatus', max_length=1, blank=True, null=True) # Field name made lowercase.
gender = models.CharField(db_column='Gender', max_length=1, blank=True, null=True) # Field name made lowercase.
emailaddress = models.CharField(db_column='EmailAddress', max_length=50, blank=True, null=True) # Field name made lowercase.
yearlyincome = models.DecimalField(db_column='YearlyIncome', max_digits=19, decimal_places=4, blank=True, null=True) # Field name made lowercase.
totalchildren = models.SmallIntegerField(db_column='TotalChildren', blank=True, null=True) # Field name made lowercase.
numberchildrenathome = models.SmallIntegerField(db_column='NumberChildrenAtHome', blank=True, null=True) # Field name made lowercase.
education = models.CharField(db_column='Education', max_length=40, blank=True, null=True) # Field name made lowercase.
occupation = models.CharField(db_column='Occupation', max_length=100, blank=True, null=True) # Field name made lowercase.
houseownerflag = models.CharField(db_column='HouseOwnerFlag', max_length=1, blank=True, null=True) # Field name made lowercase.
numbercarsowned = models.SmallIntegerField(db_column='NumberCarsOwned', blank=True, null=True) # Field name made lowercase.
addressline1 = models.CharField(db_column='AddressLine1', max_length=120, blank=True, null=True) # Field name made lowercase.
addressline2 = models.CharField(db_column='AddressLine2', max_length=120, blank=True, null=True) # Field name made lowercase.
city = models.CharField(db_column='City', max_length=30, blank=True, null=True) # Field name made lowercase.
stateprovincecode = models.CharField(db_column='StateProvinceCode', max_length=3, blank=True, null=True) # Field name made lowercase.
postalcode = models.CharField(db_column='PostalCode', max_length=15, blank=True, null=True) # Field name made lowercase.
phone = models.CharField(db_column='Phone', max_length=20, blank=True, null=True) # Field name made lowercase.
salutation = models.CharField(db_column='Salutation', max_length=8, blank=True, null=True) # Field name made lowercase.
unknown = models.IntegerField(db_column='Unknown', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'ProspectiveBuyer'
class Sysdiagrams(models.Model):
name = models.CharField(max_length=128)
principal_id = models.IntegerField()
diagram_id = models.IntegerField(primary_key=True)
version = models.IntegerField(blank=True, null=True)
definition = models.BinaryField(blank=True, null=True)
class Meta:
managed = False
db_table = 'sysdiagrams'
unique_together = (('principal_id', 'name'),)
|
StarcoderdataPython
|
5007789
|
<gh_stars>0
import numpy as np
import pandas as pd
import random
import math
import matplotlib.pyplot as plt
#Gaussian kernal function
def gaussian_kernal(u):
return (1/(math.sqrt(2*math.pi)))*math.e**(-0.5*(u[0,0]**2+u[0,1]**2+u[0,2]**2+u[0,3]**2))
#calculate conditional probability
def cal_pcb(cv,train,cal_pcb_h):
sum=0
for i in range(len(train)):
sum+=(1/h**4)*gaussian_kernal((cv-train[i])/cal_pcb_h)
p_cp=(1/len(train))*sum
#print(p_cp)
return p_cp
def def_h(data1,data2,def_h_h):
#separate the training dataset into 3 groups by their labels
data_train_1 = []
data_train_2 = []
data_train_3 = []
for x in range(len(data1)):
if data1[x, 4] == 0:
data_train_1.append(data1[x])
elif data1[x, 4] == 1:
data_train_2.append(data1[x])
else:
data_train_3.append(data1[x])
#print(data_train_1,data_train_2,data_train_3)
# calculate prior class probability
pw1 = len(data_train_1) / len(data1)#P(wk)1
pw2 = len(data_train_2) / len(data1)#P(wk)2
pw3 = len(data_train_3) / len(data1)#P(wk)3
#print(pw1,pw2,pw3)
right = 0
for j in range(len(data2)):
p_list = []
pcp_label1 = cal_pcb(data2[j], data_train_1, def_h_h)#P(x|wk)1
pcp_label2 = cal_pcb(data2[j], data_train_2, def_h_h)#P(x|wk)2
pcp_label3 = cal_pcb(data2[j], data_train_3, def_h_h)#P(x|wk)3
p_list.append(pw1 * pcp_label1)#P(wk|x)1
p_list.append(pw2 * pcp_label2)#P(wk|x)2
p_list.append(pw3 * pcp_label3)#P(wk|x)3
#record the times of right prediction
if p_list.index(max(p_list)) == data2[j, 4]:
right = right + 1
#print(p_list)
#print(right)
return right
if __name__=='__main__':
#read data
iris_data = pd.read_csv("iris.data", header=None)
labels_codes = pd.Categorical(iris_data[4]).codes
#print(iris_data)
for i in range(150):
iris_data.loc[i, 4] = labels_codes[i]
datalist = iris_data.values.tolist()
#print(datalist)
#shuffle data
random.seed(17)
random.shuffle(datalist)
#print(datalist)
date_set = np.mat(datalist)
# print(dateset)
#split data
data_set = np.vsplit(date_set, 5)
#print("number of subset:",len(data_set))
#print(data_set)
sub_set1 = data_set[0].copy()
sub_set2 = data_set[1].copy()
sub_set3 = data_set[2].copy()
sub_set4 = data_set[3].copy()
sub_set5 = data_set[4].copy()
h = 1
step=0.05
average_acc=[]
h_list_cv=[]
while(h>0):
cv_acc = []
for i in range(5):
data_cv = data_set[4 - i].copy()
if i == 0:
data_train = np.vstack((sub_set1, sub_set2, sub_set3, sub_set4))
elif i == 1:
data_train = np.vstack((sub_set1, sub_set2, sub_set3, sub_set5))
elif i == 2:
data_train = np.vstack((sub_set1, sub_set2, sub_set4, sub_set5))
elif i == 3:
data_train = np.vstack((sub_set1, sub_set3, sub_set4, sub_set5))
else:
data_train = np.vstack((sub_set2, sub_set3, sub_set4, sub_set5))
cv_acc.append(def_h(data_train,data_cv,h) / len(data_cv))
cv_acc=np.mat(cv_acc)
average_acc.append(np.mean(cv_acc))
h_list_cv.append(h)
h=h-step
#print(average_acc)
#print(h_list_cv)
plt.plot(h_list_cv,average_acc)
plt.xlabel("hyperparameter h")
plt.ylabel("average accuracy")
plt.title("Parzen window")
plt.show()
h_list=[]
for x in range(len(average_acc)):
if average_acc[x]==max(average_acc):
h_list.append(h_list_cv[x])
print("highest average accuracy:",round(max(average_acc),3))
for z in range(len(h_list)):
h_list[z]=round(h_list[z],2)
print("corresponding h:",h_list)
|
StarcoderdataPython
|
1726054
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Purpose: This script is to add a specific number to image names in order to increment their number.
Functionality: In order to merge Test set and Training set together, Test set image names should be added with the highest number in the Training set.
Input: Train_DIR=Training set path, Test_DIR=Test set path,
Output: Together_DIR=Merged sets together path,
Usage: Python addNumtoImageNames.py --Train_DIR --Test_DIR --Together_DIR
Author: <NAME>
Date: 11th September 2017
"""
import sys
import os
import argparse
import progressbar
from glob import glob
from skimage import io
import numpy as np
from termcolor import colored
#import subprocess
sys.path.append("/home/azim_se")
np.random.seed(5) # for reproducibility
progress = progressbar.ProgressBar(widgets=[progressbar.Bar('*', '[', ']'), progressbar.Percentage(), ' '])
"""
try:
import cv2
except ImportError:
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
'configure environemnt variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
"""
def parse_args():
"""Parse input arguments"""
parser = argparse.ArgumentParser(description='addNumtoImageNames')
parser.add_argument('--Train_DIR', dest='_trainDir', help='Path to Train set Directory',
default='./train', type=str)
parser.add_argument('--Test_DIR', dest='_testDir', help='Path to Test set Directory',
default='./test', type=str)
parser.add_argument('--Together_DIR', dest='_mergedDir', help='Path to Together set Directory',
default='./together', type=str)
args = parser.parse_args()
return args
class Incrimentation(object):
'''
Read each image and its name. Add a specific number to each file name and save it.
INPUT list 'inputpath': filepaths to all images of specific set
INPUT list 'outputpath': filepaths to output images of specific set
'''
def __init__(self, inputpath, outputpath, number=55680):
self.inputpath=inputpath
self.outputpath=outputpath
self.number=number
print(colored(("\nInput Path is: {}".format(self.inputpath)), 'yellow'))
self._ImagesNames = glob(self.inputpath+'/**') #/**/*more* '/**/**'
print(colored(self._ImagesNames, 'blue'))
self.read(self._ImagesNames)
def read(self, _ImagesNames):
progress.currval = 0
for image_idx in progress(range(len(self._ImagesNames))):
#Incriment *imagePtr=image
image = self.readImage(self._ImagesNames[image_idx])
_IncImageName = self.incrementName(self._ImagesNames[image_idx],self.number)
self.saveIncImage(image, _IncImageName, self.outputpath)
def readImage(self, imagepath):
'''
Reading each image
input: imagepath= path to image
output: img= image file
'''
try:
print(colored(("\nimage path being read is : {}".format(imagepath)), 'green'))
img = io.imread(imagepath)#plugin='simpleitk').astype(float)
except Exception as e:
raise("Can not read image")
return img
def incrementName(self, ImageName, number):
'''
Increment file name by an number
>>> f = 'C:\\X\\Data\\foo.txt'
>>> import os
>>> os.path.basename(f)
'foo.txt'
>>> os.path.dirname(f)
'C:\\X\\Data'
>>> os.path.splitext(f)
('C:\\X\\Data\\foo', '.txt')
>>> os.path.splitext(os.path.basename(f))
('foo', '.txt')
or
>>> filename = "example.jpeg"
>>> filename.split(".")[-1]
'jpeg'
No error when file doesn't have an extension:
>>> "filename".split(".")[-1]
'filename'
But you must be careful:
>>> "png".split(".")[-1]
'png' # But file doesn't have an extension
head, tail = os.path.split("a/b/c/00001.dat")
print(head,tail
'''
# split file base name from head of path file
head, basename = os.path.split(ImageName)
print("Head and Basename are: ", head, basename)
# find out RGB category or grayscale
category = os.path.split(head)[-1]
#split file name from its format
_fileName,_fileformat = os.path.splitext(basename)
print("_fileName and _fileformat are: ", _fileName, _fileformat)
#increment file name
_incfileName = str(int(_fileName)+self.number)+_fileformat
print("Incremented base Name is: ", _incfileName)
#join paths all together
if category=='RGB' or category=='grayscale':
_incfileName=os.path.join(c_incfileName)
print("incremented full name is: ", _incfileName)
return _incfileName
def saveIncImage(self, image, _incfileName, _outpath):
# append output directory path to incremented file path
_fileName = os.path.join(_outpath,_incfileName)
print(colored("\nSaving path is: {}".format(_fileName), 'red'))
io.imsave(_fileName,image)
#cv2.imwrite(outputpath+'/'+'{}'.format(_incfileName))
if __name__ == '__main__':
args = parse_args()
#_trainImages = glob(args._trainDir+'/**.jpg')#/**/*more*
#_testImages = glob(args._testDir+'/**.jpg')#/**/*more*
data = Incrimentation(args._testDir,args._mergedDir)
|
StarcoderdataPython
|
9684712
|
<filename>fewshot/data/iterators/semisupervised_episode_iterator.py<gh_stars>10-100
"""Iterator for semi-supervised episodes.
Author: <NAME> (<EMAIL>)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import tensorflow as tf
from fewshot.data.iterators.episode_iterator import EpisodeIterator
from fewshot.data.registry import RegisterIterator
@RegisterIterator('semisupervised-episode')
class SemiSupervisedEpisodeIterator(EpisodeIterator):
"""Generates semi-supervised episodes. Note that this class doesn't support a
fixed label/unlabel split on the image level."""
def __init__(self,
dataset,
sampler,
batch_size,
nclasses,
nquery,
expand=False,
preprocessor=None,
episode_processor=None,
fix_unknown=False,
maxlen=-1,
prefetch=True,
**kwargs):
super(SemiSupervisedEpisodeIterator, self).__init__(
dataset,
sampler,
batch_size,
nclasses,
nquery,
expand=expand,
preprocessor=preprocessor,
episode_processor=episode_processor,
fix_unknown=fix_unknown,
maxlen=maxlen,
prefetch=prefetch,
**kwargs)
assert 'label_ratio' in kwargs, 'Must specify label ratio'
self._label_ratio = kwargs['label_ratio']
assert fix_unknown, 'Must fix unknown token for semi-supervised task.'
def process_one(self, collection):
"""Process one episode.
Args:
Collection dictionary that contains the following keys:
support: np.ndarray. Image ID in the support set.
flag: np.ndarray. Binary flag indicating whether it is labeled (1) or
unlabeled (0).
query: np.ndarray. Image ID in the query set.
"""
s, flag, q = collection['support'], collection['flag'], collection['query']
del collection['support']
del collection['query']
del collection['flag']
dataset = self.dataset
nclasses = self.nclasses
img_s = dataset.get_images(s)
lbl_s = np.array(collection['support_label'])
del collection['support_label']
T = self.maxlen
# Mask off unlabeled set.
labeled = flag == 1
unlabeled = flag == 0
lbl_s_l = lbl_s[labeled]
lbl_s_u = lbl_s[unlabeled]
# Note numpy does not give the desired behavior here.
# lbl_map, lbl_s_l = np.unique(lbl_s_l, return_inverse=True)
lbl_map, lbl_s_l = tf.unique(lbl_s_l)
def query_tf(x):
x = tf.expand_dims(x, 1) # [T, 1]
x_eq = tf.cast(tf.equal(x, lbl_map), tf.float32) # [T, N]
x_valid = tf.reduce_sum(x_eq, [1]) # [T]
# Everything that has not been found -> fixed unknown.
# This means it's a distractor.
x = tf.cast(tf.argmax(x_eq, axis=1), tf.float32)
x = x_valid * x + (1 - x_valid) * nclasses
x = tf.cast(x, tf.int32)
return x
def query_np(x):
x = np.expand_dims(x, 1) # [T, 1]
x_eq = np.equal(x, lbl_map).astype(np.float32) # [T, N]
x_valid = np.sum(x_eq, axis=1) # [T]
# Everything that has not been found -> fixed unknown.
# This means it's a distractor.
x = np.argmax(x_eq, axis=1).astype(np.float32)
x = x_valid * x + (1 - x_valid) * nclasses
x = x.astype(np.int32)
return x
# Find distractors.
lbl_s_eq = tf.cast(tf.equal(tf.expand_dims(lbl_s, 1), lbl_map), tf.float32)
distractor_flag = tf.cast(1.0 - tf.reduce_sum(lbl_s_eq, [1]), tf.int32)
# Re-indexed labels.
lbl_s[labeled] = lbl_s_l
lbl_s[unlabeled] = query_np(lbl_s_u)
# Label fed into the network.
lbl_s_masked = np.copy(lbl_s)
lbl_s_masked[unlabeled] = nclasses
# We assumed fix unknown.
# Make the first appearing item to be unknown in groundtruth.
lbl_s_np = np.copy(lbl_s)
lbl_s_np2 = np.copy(lbl_s_np)
lbl_s_np2[unlabeled] = -1
lbl_s_gt = np.zeros([len(lbl_s_np)], dtype=np.int32)
cummax = np.maximum.accumulate(lbl_s_np2)
lbl_s_gt[0] = nclasses
# Labeled to be trained as target.
cond = lbl_s_np[1:] > cummax[:-1]
lbl_s_gt[1:] = np.where(cond, nclasses, lbl_s_np[1:])
if self.nquery > 0:
img_q = dataset.get_images(q)
lbl_q = collection['query_label']
del collection['query_label']
lbl_q = query_tf(lbl_q)
else:
img_q = None
lbl_q = None
epi = {
'x_s': self.pad_x(img_s, T),
'y_s': self.pad_y(lbl_s_masked, T),
'y_gt': self.pad_y(lbl_s_gt, T),
'y_dis': self.pad_y(distractor_flag, T),
'y_full': self.pad_y(lbl_s, T),
'flag_s': self.get_flag(lbl_s, T)
}
if self.nquery > 0:
assert False, 'Not supported'
# For remaining additional info.
for k in collection:
epi[k] = self.pad_y(collection[k], T)
if self.episode_processor is not None:
epi = self.episode_processor(epi)
return epi
def _next(self):
"""Next example."""
collection = self.sampler.sample_collection(self.nclasses, self.nquery,
**self.kwargs)
return self.process_one(collection)
|
StarcoderdataPython
|
1728254
|
<filename>tests/pyspark_testing/integration/__init__.py
from __future__ import print_function
from functools import partial
import atexit
import glob
import logging
import os
import sys
import subprocess
import time
import unittest
from pyspark_testing.version import __version__ as version
from ... import relative_file
log = logging.getLogger(__name__)
here = partial(relative_file, __file__)
def initialize_pyspark(spark_home, app_name, add_files=None):
py4j = glob.glob(os.path.join(spark_home, 'python', 'lib', 'py4j*.zip'))[0]
pyspark_path = os.path.join(spark_home, 'python')
add_files = add_files or []
sys.path.insert(0, py4j)
sys.path.insert(0, pyspark_path)
for file in add_files:
sys.path.insert(0, file)
from pyspark.context import SparkContext
logging.getLogger('py4j.java_gateway').setLevel(logging.WARN)
sc = SparkContext(appName=app_name, pyFiles=add_files)
log.debug('SparkContext initialized')
return sc
class PySparkIntegrationTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not hasattr(cls, 'sc'):
spark_home = os.environ['SPARK_HOME']
build_zip = here('../../../dist/pyspark_testing-{}-py2.7.egg'.format(version))
app_name = '{} Tests'.format(cls.__name__)
cls.sc = initialize_pyspark(spark_home, app_name, [build_zip])
log.debug('SparkContext initialized on %s', cls.__name__)
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'sc'):
cls.sc.stop()
|
StarcoderdataPython
|
5674
|
#!/usr/bin/env python
#
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import absolute_import, division, print_function
import argparse
import os
import glog as log
import numpy as np
import cv2
from lxml import etree
from tqdm import tqdm
def parse_args():
"""Parse arguments of command line"""
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
description='Convert CVAT XML annotations to masks'
)
parser.add_argument(
'--cvat-xml', metavar='FILE', required=True,
help='input file with CVAT annotation in xml format'
)
parser.add_argument(
'--background-color', metavar='COLOR_BGR', default="0,0,0",
help='specify background color (by default: 0,0,0)'
)
parser.add_argument(
'--label-color', metavar='LABEL:COLOR_BGR', action='append',
default=[],
help="specify a label's color (e.g. 255 or 255,0,0). The color will " +
"be interpreted in accordance with the mask format."
)
parser.add_argument(
'--mask-bitness', type=int, choices=[8, 24], default=8,
help='choose bitness for masks'
)
parser.add_argument(
'--output-dir', metavar='DIRECTORY', required=True,
help='directory for output masks'
)
return parser.parse_args()
def parse_anno_file(cvat_xml):
root = etree.parse(cvat_xml).getroot()
anno = []
for image_tag in root.iter('image'):
image = {}
for key, value in image_tag.items():
image[key] = value
image['shapes'] = []
for poly_tag in image_tag.iter('polygon'):
polygon = {'type': 'polygon'}
for key, value in poly_tag.items():
polygon[key] = value
image['shapes'].append(polygon)
for box_tag in image_tag.iter('box'):
box = {'type': 'box'}
for key, value in box_tag.items():
box[key] = value
box['points'] = "{0},{1};{2},{1};{2},{3};{0},{3}".format(
box['xtl'], box['ytl'], box['xbr'], box['ybr'])
image['shapes'].append(box)
image['shapes'].sort(key=lambda x: int(x.get('z_order', 0)))
anno.append(image)
return anno
def create_mask_file(mask_path, width, height, bitness, color_map, background, shapes):
mask = np.zeros((height, width, bitness // 8), dtype=np.uint8)
for shape in shapes:
color = color_map.get(shape['label'], background)
points = [tuple(map(float, p.split(','))) for p in shape['points'].split(';')]
points = np.array([(int(p[0]), int(p[1])) for p in points])
mask = cv2.fillPoly(mask, [points], color=color)
cv2.imwrite(mask_path, mask)
def to_scalar(str, dim):
scalar = list(map(int, str.split(',')))
if len(scalar) < dim:
scalar.extend([scalar[-1]] * dim)
return tuple(scalar[0:dim])
def main():
args = parse_args()
anno = parse_anno_file(args.cvat_xml)
color_map = {}
dim = args.mask_bitness // 8
for item in args.label_color:
label, color = item.split(':')
color_map[label] = to_scalar(color, dim)
background = to_scalar(args.background_color, dim)
for image in tqdm(anno, desc='Generate masks'):
mask_path = os.path.join(args.output_dir, os.path.splitext(image['name'])[0] + '.png')
mask_dir = os.path.dirname(mask_path)
if mask_dir:
os.makedirs(mask_dir, exist_ok=True)
create_mask_file(mask_path, int(image['width']), int(image['height']),
args.mask_bitness, color_map, background, image['shapes'])
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
5174579
|
<reponame>manazhao/tf_recsys
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import tensorflow as tf
import ml.common.flags as my_flags
import ml.module.proto.feature_column_schema_pb2 as fcs_pb2
import ml.module.feature_column_factory as fcf
FLAGS = my_flags.FLAGS
class FeatureColumnFactoryTest(tf.test.TestCase):
def setUp(self):
self.schema_pbtxt = os.path.join(
FLAGS.test_srcdir, "ml/module/testing_data/feature_column_schema.pbtxt")
self.int_fixed = tf.contrib.layers.real_valued_column(
"int_fixed", dimension=10, dtype=tf.int64)
self.label = tf.contrib.layers.real_valued_column(
fcf.FEATURE_LABEL_NAME, dimension=1, dtype=tf.int64)
self.float_fixed = tf.contrib.layers.real_valued_column(
"float_fixed", dimension=20, dtype=tf.float32)
self.integerized_sparse = tf.contrib.layers.sparse_column_with_integerized_feature(
"integerized_sparse", bucket_size=10, combiner="sum", dtype=tf.int32)
self.hash_bucket_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(
"hash_bucket_sparse", hash_bucket_size=5000, combiner="mean", dtype=tf.int64)
self.embedding = tf.contrib.layers.embedding_column(
sparse_id_column=self.hash_bucket_sparse, dimension=50, combiner="sqrt")
def testNotIncludeTarget(self):
self.feature_columns = fcf.feature_columns_from_file(
self.schema_pbtxt, include_target=False)
self.assertItemsEqual(self.feature_columns, set(
[self.int_fixed, self.float_fixed, self.integerized_sparse, self.hash_bucket_sparse, self.embedding]))
def testIncludeTarget(self):
self.feature_columns = fcf.feature_columns_from_file(
self.schema_pbtxt, include_target=True)
self.maxDiff = None
self.assertItemsEqual(self.feature_columns, set(
[self.int_fixed, self.label, self.float_fixed, self.integerized_sparse, self.hash_bucket_sparse, self.embedding]))
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.test.main()
|
StarcoderdataPython
|
6530270
|
<gh_stars>0
# データベースの接続文字列など、アプリケーションの設定情報の指定
import os
class DevelopmentConfig:
# SQLAlchemy
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://{user}:{password}@{host}/{database}?charset=utf8'.format(
**{
'user': os.getenv('DB_USER', 'root'),
'password': os.getenv('DB_PASSWORD', '<PASSWORD>'),
'host': os.getenv('DB_HOST', 'db'),
'database': os.getenv('DB_DATABASE', 'hoge'),
})
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
Config = DevelopmentConfig
|
StarcoderdataPython
|
4987351
|
import json
import os
from datetime import datetime
import cactus
import dotenv
import pandas
import pytz
import requests
import twitter
# Load the .env
dotenv.load_dotenv(dotenv.find_dotenv())
STATS_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__), '../vaccine-stats.json'))
def current_timestamp():
return (pytz.utc
.localize(datetime.utcnow())
.astimezone(pytz.timezone('America/Los_Angeles'))
.isoformat()
)
def load_stats():
if not os.path.exists(STATS_FILE):
return {}
with open(STATS_FILE, 'r') as f:
return json.loads(f.read())
def save_stats(stats):
stats['updated'] = current_timestamp()
with open(STATS_FILE, 'w') as f:
f.write(json.dumps(stats))
def fetch_json(url):
response = requests.get(url)
response.raise_for_status()
return response.json()
def get_population():
# url = 'https://cofgisonline.maps.arcgis.com/sharing/rest/content/items/664add6af8a9465b8842b24ec2c492e4/data?f=json'
# data = fetch_json(url)
# return int(data['widgets'][3]['datasets'][1]['data'])
# return 793501 # Population aged 16+
return 1042157 # Total population
def get_vaccinated_FCDPH():
url = 'https://services3.arcgis.com/ibgDyuD2DLBge82s/arcgis/rest/services/COVID19_Immunization_Unique_View/FeatureServer/0/query?f=json&where=(recip_county_desc%3D%27Fresno%27%20AND%20vax_series_complete%3D%27YES%27%20OR%20recip_county_desc%20IS%20NULL%20AND%20vax_series_complete%3D%27YES%27)&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&outStatistics=%5B%7B%22statisticType%22%3A%22count%22%2C%22onStatisticField%22%3A%22OBJECTID%22%2C%22outStatisticFieldName%22%3A%22value%22%7D%5D&resultType=standard&cacheHint=true'
data = fetch_json(url)
return int(data['features'][0]['attributes']['value'])
def get_vaccinated_CADPH():
url = 'https://data.chhs.ca.gov/dataset/e283ee5a-cf18-4f20-a92c-ee94a2866ccd/resource/130d7ba2-b6eb-438d-a412-741bde207e1c/download/covid19vaccinesbycounty.csv'
df = pandas.read_csv(url)
return int(df[df['county'] == 'Fresno'].iloc[-1]['cumulative_fully_vaccinated'])
def get_seven_day_average_increase(df):
df = df.iloc[len(df) - 7:]
vaccinated_average = df['fully_vaccinated'].mean()
vaccinated_percent = (100. / get_population()) * vaccinated_average
one_dose_average = df['partially_vaccinated'].mean()
one_dose_percent = (100. / get_population()) * one_dose_average
return {
'seven_day_vaccinated': int(round(vaccinated_average)),
'seven_day_vaccinated_percent': vaccinated_percent,
'seven_day_one_dose': int(round(one_dose_average)),
'seven_day_one_dose_percent': one_dose_percent,
}
def fetch_current_stats():
url = 'https://data.chhs.ca.gov/dataset/e283ee5a-cf18-4f20-a92c-ee94a2866ccd/resource/130d7ba2-b6eb-438d-a412-741bde207e1c/download/covid19vaccinesbycounty.csv'
df = pandas.read_csv(url)
df = df[df['county'] == 'Fresno']
df = df.iloc[len(df) - 60:]
stats = {
'population': get_population(),
'vaccinated': int(df.iloc[-1]['cumulative_fully_vaccinated']),
'one_dose': int(df.iloc[-1]['cumulative_at_least_one_dose']),
}
stats.update(
vaccinated_percent=(100. / stats['population']) * stats['vaccinated'],
one_dose_percent=(100. / stats['population']) * stats['one_dose'],
)
stats.update(get_seven_day_average_increase(df))
stats['historical'] = {
'dates': df['administered_date'].to_list(),
'fully_vaccinated': df['fully_vaccinated'].to_list(),
'cumulative_fully_vaccinated': df['cumulative_fully_vaccinated'].to_list(),
'percents': [round((100. / stats['population']) * x, 1) for x in df['cumulative_fully_vaccinated'].to_list()]
}
return stats
def progress_bar(percent):
bar_filled = '▓'
bar_empty = '░'
length = 15
progress_bar = bar_filled * int((percent / (100. / length)))
progress_bar += bar_empty * (length - len(progress_bar))
return f'{progress_bar} {percent:.1f}%'
def send_tweet(population, vaccinated, vaccinated_percent, *args, **kwargs):
message = '\n'.join([
'‼️ Fresno Vaccination Progress:',
'',
progress_bar(vaccinated_percent),
'',
'Learn more at FresnoVax.com'
])
api = twitter.Api(
consumer_key=os.environ['TWITTER_CONSUMER_KEY'],
consumer_secret=os.environ['TWITTER_CONSUMER_SECRET'],
access_token_key=os.environ['TWITTER_ACCESS_KEY'],
access_token_secret=os.environ['TWITTER_ACCESS_SECRET']
)
if api.VerifyCredentials() is not None:
api.PostUpdate(message)
def main():
previous = load_stats()
previous.pop('updated', None)
current = fetch_current_stats()
if previous != current:
save_stats(current)
os.system('cactus build')
send_tweet(**current)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4905978
|
<filename>watertap/edb/tests/test_commands.py
import contextlib
from dataclasses import dataclass, field, asdict
from functools import singledispatch
import json
import logging
import os
from pathlib import Path
import shutil
from typing import List, Optional, Tuple, Union, Any
import pytest
from click import Command
from click.testing import CliRunner, Result
from _pytest.monkeypatch import MonkeyPatch
import pymongo
from watertap.edb import commands, ElectrolyteDB
@pytest.fixture(scope="module")
def runner():
return CliRunner()
# TODO move this to edb.commands
LogLevel = type(logging.INFO)
ExitCode = commands.ExitCode
@dataclass
class Expected:
exit_code: ExitCode = ExitCode.OK
file_name: Optional[str] = None
file_path: Optional[Path] = None
log_level: Optional[LogLevel] = None
@property
def success(self) -> bool:
return self.exit_code == ExitCode.OK
def __post_init__(self):
if self.file_name is not None and self.file_path is None:
self.file_path = Path(self.file_name)
def __repr__(self):
kv_repr = [f"{k}={v!r}" for k, v in asdict(self).items() if v is not None]
return f"{type(self).__name__}" f"({', '.join(kv_repr)})"
def __eq__(self, other: Union["Expected", Result]):
if isinstance(other, Result):
return self.exit_code == other.exit_code
return type(self) == type(other) and asdict(self) == asdict(other)
Outcome = Tuple[Result, Expected]
@dataclass
class Invocation:
args: List[str]
expected: Optional[Expected] = field(default_factory=Expected)
command: Optional[Command] = commands.command_base
def run(self, runner: CliRunner) -> Result:
runner = runner or CliRunner()
return runner.invoke(self.command, self.args)
self.result = result
return result
def _display_param(obj: Any) -> str:
if isinstance(obj, Invocation):
args = " ".join(obj.args)
return f"{args!r}, {obj.expected}"
if isinstance(obj, tuple):
return _display_param(Invocation(*obj))
return str(obj)
class _INVALID:
url: str = "__INVALID__"
file_name: str = "__INVALID__"
data_type: str = "__INVALID__"
EDBClient = pymongo.MongoClient
@dataclass
class EDBClientFactory:
instance: EDBClient = None
def __call__(self, url=None, **kwargs) -> EDBClient:
if url == _INVALID.url:
raise pymongo.errors.ConnectionFailure(f"Invalid url: {url}")
return self.instance
@pytest.fixture(scope="function")
def mock_edb(monkeypatch) -> EDBClientFactory:
import mongomock
# NOTE since mongomock clients store data in memory,
# the same MongoClient instance must be kept and used for the lifetime of the fixture
# to be able to access the EDB data for e.g. assertions
client_factory = EDBClientFactory(mongomock.MongoClient())
with MonkeyPatch.context() as mp:
mp.setattr("watertap.edb.db_api.MongoClient", client_factory)
yield client_factory
@pytest.fixture(scope="function")
def edb(mock_edb) -> ElectrolyteDB:
# NOTE to switch to a non-mock EDB instance, simply do not use "mock_edb" in this fixture
edb = ElectrolyteDB()
assert edb._client is mock_edb.instance
return edb
@pytest.fixture(scope="function")
def empty_edb(edb) -> ElectrolyteDB:
edb._client.drop_database(edb.database)
return edb
@pytest.fixture(scope="function")
def populated_edb(empty_edb) -> ElectrolyteDB:
edb = empty_edb
commands._load_bootstrap(empty_edb)
return edb
@contextlib.contextmanager
def _changing_cwd(dest: Path) -> Path:
origin = Path.cwd()
try:
os.chdir(dest)
yield dest
finally:
os.chdir(origin)
@pytest.fixture(scope="function")
def run_in_empty_dir(tmp_path: Path) -> Path:
with _changing_cwd(tmp_path) as dest:
yield dest
class TestBaseCommand:
@pytest.fixture(
scope="function",
params=[
(["-q"], Expected(log_level=logging.FATAL)),
([], Expected(log_level=logging.ERROR)),
(["-v"], Expected(log_level=logging.WARN)),
(["-vv"], Expected(log_level=logging.INFO)),
(["-vvv"], Expected(log_level=logging.DEBUG)),
(["-qv"], Expected(exit_code=ExitCode.INVALID_USAGE)),
],
ids=_display_param,
)
def run_command(
self,
request,
runner,
edb,
mandatory_subcommand_args=tuple(["info", "--type", "reaction"]),
) -> Outcome:
flags, expected = request.param
args = flags + list(mandatory_subcommand_args)
result = runner.invoke(commands.command_base, args)
return result, expected
@pytest.fixture(
scope="function",
)
def current_log_level(self, logger_to_check: str = "watertap") -> LogLevel:
logger = logging.getLogger(logger_to_check)
return logger.getEffectiveLevel()
@pytest.mark.unit
def test_verbosity_options_set_log_level(
self, run_command: Outcome, current_log_level: LogLevel
):
result, expected = run_command
assert result == expected
if expected.success:
assert current_log_level == expected.log_level
class TestLoad:
@pytest.fixture(scope="function")
def access_loadable_data(self, tmp_path: Path) -> Path:
src = commands.get_edb_data("filename").parent
dest = tmp_path / "data"
# TODO: dirs_exist_ok is only available for Python 3.8+, so we use a workaround
# until we can drop support for 3.7
# shutil.copytree(src, dest, dirs_exist_ok=False)
assert list(dest.rglob("*")) == [], f"Directory {dest} is not empty"
shutil.copytree(src, dest)
with _changing_cwd(dest):
yield dest
@pytest.fixture(
scope="function",
params=[
pytest.param(
Invocation(
["load", "--bootstrap", "--validate"], Expected(ExitCode.OK)
),
marks=pytest.mark.xfail(
reason="Validation for 'base' not yet available"
),
),
Invocation(["load", "--bootstrap", "--no-validate"]),
Invocation(["load"], Expected(ExitCode.INVALID_USAGE)),
Invocation(["load", "--file", "reaction.json", "--type", "reaction"]),
Invocation(["load", "--file", "component.json", "--type", "component"]),
Invocation(["load", "--file", "base.json", "--type", "base"]),
Invocation(
["load", "--file", "base.json"], Expected(ExitCode.INVALID_USAGE)
),
# to test the validation from the point of view of the command line, we use valid files and data types,
# but switched, e.g. base.json as component.
# the command should fail unless the validation is turned off (default is on)
Invocation(
["load", "--file", "base.json", "--type", "component", "--no-validate"],
Expected(ExitCode.OK),
),
Invocation(
["load", "--file", "base.json", "--type", "component", "--validate"],
Expected(ExitCode.DATABASE_ERROR),
),
# this tests that the default behavior is to validate if no flag is given
Invocation(
["load", "--file", "base.json", "--type", "component"],
Expected(ExitCode.DATABASE_ERROR),
),
Invocation(
["load", "--bootstrap", "--url", _INVALID.url],
Expected(ExitCode.DATABASE_ERROR),
),
],
ids=_display_param,
)
def run_from_empty_db(
self, request, runner, empty_edb, access_loadable_data
) -> Outcome:
invocation = request.param
result = runner.invoke(commands.command_base, invocation.args)
return result, invocation.expected
@pytest.mark.unit
def test_from_empty_db(self, run_from_empty_db: Outcome, edb: ElectrolyteDB):
result, expected = run_from_empty_db
assert result == expected, result.stdout
if expected.success:
assert not edb.is_empty()
@pytest.fixture(
scope="function",
params=[
(["load", "--bootstrap"], Expected(ExitCode.DATABASE_ERROR)),
],
ids=_display_param,
)
def run_from_populated_db(self, request, runner, populated_edb) -> Outcome:
args, expected = request.param
result = runner.invoke(commands.command_base, args)
return result, expected
@pytest.mark.unit
def test_from_populated_db(
self, run_from_populated_db: Outcome, edb: ElectrolyteDB
):
result, expected = run_from_populated_db
assert result == expected, result.stdout
assert not edb.is_empty()
class TestDump:
@pytest.fixture(
scope="function",
params=[
(
["dump", "--type", "reaction", "--file", "reaction.json"],
Expected(file_name="reaction.json"),
),
(
["dump", "--type", "base", "--file", "base.json"],
Expected(file_name="base.json"),
),
(
["dump", "--type", "component", "--file", "component.json"],
Expected(file_name="component.json"),
),
(
["dump", "--type", _INVALID.data_type, "--file", "invalid.json"],
Expected(ExitCode.INVALID_USAGE),
),
],
ids=_display_param,
)
def run_from_populated_db(
self, request, runner, populated_edb, tmp_path
) -> Outcome:
args, expected = request.param
with _changing_cwd(tmp_path) as dest_dir:
result = runner.invoke(commands.command_base, args)
if expected.file_name:
expected.file_path = dest_dir / expected.file_name
yield result, expected
@pytest.mark.unit
def test_from_populated_db(self, run_from_populated_db: Outcome):
result, expected = run_from_populated_db
assert result == expected, result.stdout
if expected.file_name is not None:
assert expected.file_path.exists()
class TestDrop:
@pytest.fixture(
scope="function",
params=[
(["drop", "--yes"], Expected(ExitCode.OK)),
# TODO handle case where expected behavior is to prompt the user,
# so that we can test this command without the "--yes" flag as well
],
ids=_display_param,
)
def run_from_populated_db(self, request, runner, populated_edb) -> Outcome:
args, expected = request.param
result = runner.invoke(commands.command_base, args)
return result, expected
@pytest.mark.unit
def test_from_populated_db(self, run_from_populated_db: Outcome, edb):
result, expected = run_from_populated_db
assert result == expected, result.stdout
if expected.success:
assert edb.is_empty()
class TestInfo:
@pytest.fixture(
scope="function",
params=[
(["info", "--type", "base"], Expected(ExitCode.OK)),
(["info", "--type", "component"], Expected(ExitCode.OK)),
(["info", "--type", "reaction"], Expected(ExitCode.OK)),
(["info", "--type", _INVALID.data_type], Expected(ExitCode.INVALID_USAGE)),
],
ids=_display_param,
)
def run_command(self, request, runner, edb) -> Outcome:
args, expected = request.param
result = runner.invoke(commands.command_base, args)
return result, expected
@pytest.mark.unit
def test_command(self, run_command: Outcome, min_text_length=10):
result, expected = run_command
assert result == expected, result.stdout
if expected.success:
assert len(result.stdout) >= min_text_length
class TestSchema:
format_available = {
"json": True,
"json-compact": False,
"markdown": False,
"html": False,
"html-js": False,
}
@pytest.fixture(
scope="function",
params=[
fmt
if is_available
else pytest.param(
fmt,
marks=pytest.mark.xfail(
reason=f"Schema output format '{fmt}' not yet supported"
),
)
for fmt, is_available in format_available.items()
],
ids=str,
)
def output_format(self, request):
return request.param
@pytest.fixture(
scope="function",
params=[
(["schema", "--type", "component"], Expected(ExitCode.OK)),
(["schema", "--type", "reaction"], Expected(ExitCode.OK)),
(["schema", "--type", "base"], Expected(ExitCode.INVALID_USAGE)),
(
["schema", "--type", "component", "--file", "component.json"],
Expected(ExitCode.OK, file_name="component.json"),
),
],
ids=_display_param,
)
def run_command(
self, request, output_format: str, runner, edb, run_in_empty_dir
) -> Outcome:
args, expected = request.param
args.extend(["--format", output_format])
result = runner.invoke(commands.command_base, args)
return result, expected
@pytest.mark.unit
def test_command(self, run_command: Outcome, min_text_length=20):
result, expected = run_command
assert result == expected, result.stdout
if expected.success:
text = (
expected.file_path.read_text() if expected.file_path else result.stdout
)
assert len(text) >= min_text_length
|
StarcoderdataPython
|
9754514
|
from jupydoc import DocPublisher
__docs__ = ['Simple']
class Simple(DocPublisher):
"""
title: Development
abstract: This ia a very basic class
sections:
section1 section2
subsections:
section1: sub1 sub2
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
pass
def section1(self):
"""First Section
"""
r = 1/3
self.publishme()
def sub1(self):
"""First Subsection
r={r:.2f}
"""
self.publishme()
def sub2(self):
"""Second subsection
"""
self.publishme()
def section2(self):
"""Second Section
"""
self.publishme()
|
StarcoderdataPython
|
11319546
|
from client.klaytn import Klaytn
from client.utils import *
import urllib.request
import json, requests, subprocess
import logging
logging.basicConfig(level=logging.DEBUG)
# check update
with open('./info.json', 'r') as f:
info = json.load(f)
res = check_update(info)
if not res: # not updated
exit(0)
# updated
klay = Klaytn(info['klaytn_node'])
# get file key
file_id = res['file_id']
txhash = res['txHash']
key = klay.getInputData(txhash)
# get public URL
file_url = get_realfirmwareurl(file_id, key, info)
print(file_url)
file_name = file_url[file_url.rfind("/")+1:]
print(file_name)
# download file
urllib.request.urlretrieve(file_url, './firms/src/' + file_name)
# get file checksum
file_hash = hash_file('./firms/src/' + file_name)
# check hash in blockchain
output = subprocess.Popen(['node', 'client/send.js', 'hash', str(file_id), file_hash], stdout=subprocess.PIPE ).communicate()[0]
result = json.loads(output.strip().decode())['result']
try:
if result:
print('[*] Success: File Equal! (local-blockchain)')
# check hash(reporting)
res = requests.post(info['firmware_server'] + '/api/check/hash/' + str(file_id), json={
'hash': file_hash,
'wallet': info['device']['wallet']
})
print(res.text)
data = json.loads(res.text)
if data['equal']:
print('[*] Success: File Equal! (local-server)')
# upload to arduino
if get_ext('./firms/src/' + file_name) == '.ino':
print('[*] Success: Firmware file is .ino file!')
upload_device()
else:
print('[*] Error: Firmware file is NOT .ino file!')
else:
print('[*] Error: File is Different! (local-server)')
else:
print('[*] Error: File is Different! (local-blockchain)')
except:
print('[*] Error while executing')
print('[*] Finish')
|
StarcoderdataPython
|
6670701
|
<gh_stars>1-10
import unittest
import hypothesis.strategies as st
from hypothesis import given
from {exercise_name} import Solution
class Test(unittest.TestCase):
def test_1(self):
solution = Solution()
self.assertEqual(solution.{method}(), True)
@given(st.lists(st.integers(), min_size=1), st.lists(st.integers()))
def test_random(self, x, y):
solution = Solution()
self.assertEqual(solution.{method}(), True)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
3244787
|
<filename>eviction_tracker/app.py
import flask
from flask import Flask, request, redirect
from flask_security import hash_password, auth_token_required
from eviction_tracker.extensions import cors, db, marshmallow, migrate, api, login_manager, security
from eviction_tracker.admin.models import User, user_datastore
import os
import time
import calendar
from sqlalchemy import and_, or_, func, desc
from sqlalchemy.sql import text
from eviction_tracker import commands, detainer_warrants, admin, direct_action
import json
from datetime import datetime, date, timedelta
from dateutil.rrule import rrule, MONTHLY
from collections import OrderedDict
import flask_wtf
from flask_security import current_user
from flask_apscheduler import APScheduler
from datadog import initialize, statsd
import logging.config
import eviction_tracker.config as config
logging.config.dictConfig(config.LOGGING)
logger = logging.getLogger(__name__)
options = {
'statsd_host': '127.0.0.1',
'statsd_port': 8125
}
Attorney = detainer_warrants.models.Attorney
DetainerWarrant = detainer_warrants.models.DetainerWarrant
Defendant = detainer_warrants.models.Defendant
Plaintiff = detainer_warrants.models.Plaintiff
Judgment = detainer_warrants.models.Judgment
security_config = dict(
SECURITY_PASSWORD_SALT=os.environ['SECURITY_PASSWORD_SALT'],
SECURITY_FLASH_MESSAGES=False,
# Need to be able to route backend flask API calls. Use 'accounts'
# to be the Flask-Security endpoints.
SECURITY_URL_PREFIX='/api/v1/accounts',
# These need to be defined to handle redirects
# As defined in the API documentation - they will receive the relevant context
SECURITY_POST_CONFIRM_VIEW="/confirmed",
SECURITY_CONFIRM_ERROR_VIEW="/confirm-error",
SECURITY_RESET_VIEW="/reset-password",
SECURITY_RESET_ERROR_VIEW="/reset-password",
SECURITY_REDIRECT_BEHAVIOR="spa",
# CSRF protection is critical for all session-based browser UIs
# enforce CSRF protection for session / browser - but allow token-based
# API calls to go through
SECURITY_CSRF_PROTECT_MECHANISMS=["session", "basic"],
SECURITY_CSRF_IGNORE_UNAUTH_ENDPOINTS=True,
SECURITY_CSRF_COOKIE={"key": "XSRF-TOKEN"},
WTF_CSRF_CHECK_DEFAULT=False,
WTF_CSRF_TIME_LIMIT=None
)
def env_var_bool(key, default=None):
return os.getenv(key, default if default else 'False').lower() in ('true', '1', 't')
def create_app(testing=False):
app = Flask(__name__.split('.')[0])
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['SQLALCHEMY_DATABASE_URI']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = os.environ['SQLALCHEMY_TRACK_MODIFICATIONS']
app.config['SECRET_KEY'] = os.environ['SECRET_KEY']
app.config['TWILIO_ACCOUNT_SID'] = os.environ['TWILIO_ACCOUNT_SID']
app.config['TWILIO_AUTH_TOKEN'] = os.environ['TWILIO_AUTH_TOKEN']
app.config['GOOGLE_ACCOUNT_PATH'] = os.environ['GOOGLE_ACCOUNT_PATH']
app.config['ROLLBAR_CLIENT_TOKEN'] = os.environ['ROLLBAR_CLIENT_TOKEN']
app.config['VERSION'] = os.environ['VERSION']
app.config['SCHEDULER_API_ENABLED'] = env_var_bool('SCHEDULER_API_ENABLED')
app.config['SCHEDULER_TIMEZONE'] = os.environ.get(
'SCHEDULER_TIMEZONE', 'UTC')
app.config['CASELINK_USERNAME'] = os.environ['CASELINK_USERNAME']
app.config['CASELINK_PASSWORD'] = os.environ['CASELINK_PASSWORD']
app.config['TESTING'] = testing
app.config['LOGIN_WAIT'] = float(os.environ['LOGIN_WAIT'])
app.config['SEARCH_WAIT'] = float(os.environ['SEARCH_WAIT'])
app.config['SQLALCHEMY_ECHO'] = env_var_bool('SQLALCHEMY_ECHO')
app.config['CHROMEDRIVER_HEADLESS'] = env_var_bool(
'CHROMEDRIVER_HEADLESS', default='True')
app.config.update(**security_config)
if app.config['ENV'] == 'production':
initialize(**options)
register_extensions(app)
register_shellcontext(app)
register_commands(app)
return app
def between_dates(start, end, query):
return query.filter(
and_(
func.date(DetainerWarrant.file_date) >= start,
func.date(DetainerWarrant.file_date) <= end
)
)
def months_since(start):
end = date.today()
dates = [(dt, next_month(dt))
for dt in rrule(MONTHLY, dtstart=start, until=end)]
return dates, end
def count_between_dates(start, end):
return between_dates(start, end, db.session.query(DetainerWarrant)).count()
def next_month(dt):
return datetime(dt.year + (1 if dt.month == 12 else 0), max(1, (dt.month + 1) % 13), 1)
def top_evictions_between_dates(start, end):
return between_dates(start, end, db.session.query(Plaintiff.id, Plaintiff.name, func.count(DetainerWarrant.plaintiff_id)))\
.join(Plaintiff)\
.group_by(DetainerWarrant.plaintiff_id, Plaintiff.id)\
.order_by(desc(func.count('*')))\
.limit(10)\
.all()
def top_evictions_between_dates(start, end):
return between_dates(start, end, db.session.query(Plaintiff, func.count(DetainerWarrant.plaintiff_id)))\
.join(Plaintiff)\
.group_by(DetainerWarrant.plaintiff_id, Plaintiff.id)\
.order_by(desc(func.count('*')))\
.limit(10)\
.all()
def evictions_by_month(plaintiff_id, months):
return db.session.execute(text("""
select
count(cases.docket_id) filter (where date(cases.file_date) >= :d_1_start AND date(cases.file_date) <= :d_1_end) as ":1",
count(cases.docket_id) filter (where date(cases.file_date) >= :d_2_start AND date(cases.file_date) <= :d_2_end) as ":2",
count(cases.docket_id) filter (where date(cases.file_date) >= :d_3_start AND date(cases.file_date) <= :d_3_end) as ":3",
count(cases.docket_id) filter (where date(cases.file_date) >= :d_4_start AND date(cases.file_date) <= :d_4_end) as ":4",
count(cases.docket_id) filter (where date(cases.file_date) >= :d_5_start AND date(cases.file_date) <= :d_5_end) as ":5",
count(cases.docket_id) filter (where date(cases.file_date) >= :d_6_start AND date(cases.file_date) <= :d_6_end) as ":6",
count(cases.docket_id) filter (where date(cases.file_date) >= :d_7_start AND date(cases.file_date) <= :d_7_end) as ":7",
count(cases.docket_id) filter (where date(cases.file_date) >= :d_8_start AND date(cases.file_date) <= :d_8_end) as ":8",
count(cases.docket_id) filter (where date(cases.file_date) >= :d_9_start AND date(cases.file_date) <= :d_9_end) as ":9",
count(cases.docket_id) filter (where date(cases.file_date) >= :d_10_start AND date(cases.file_date) <= :d_10_end) as ":10",
count(cases.docket_id) filter (where date(cases.file_date) >= :d_11_start AND date(cases.file_date) <= :d_11_end) as ":11",
count(cases.docket_id) filter (where date(cases.file_date) >= :d_12_start AND date(cases.file_date) <= :d_12_end) as ":12"
FROM plaintiffs p JOIN cases ON p.id = cases.plaintiff_id AND cases.type = 'detainer_warrant'
WHERE cases.plaintiff_id = :plaintiff_id
"""), {"plaintiff_id": plaintiff_id, **months})
def top_plaintiff_attorneys_bet(start, end):
# TODO: perhaps figure this out in python
return db.session.execute("""
with top as
(select a.name, count(dw.docket_id) as warrantCount
from attorneys a
inner join cases dw on dw.plaintiff_attorney_id = a.id
where a.id <> -1 and dw.type = 'detainer_warrant'
group by a.id, a.name
order by count(dw.docket_id) desc)
select *
from top
union
(select 'ALL OTHER' as name,
sum(top.warrantCount) as warrantCount
from top
where top.name not in
(select top.name
from top
limit 5))
order by warrantCount desc
limit 6;
""")
def top_judges_bet(start, end):
# TODO: perhaps figure this out in python
return db.session.execute("""
with top as
(select j.name, count(jm.detainer_warrant_id) as warrantCount
from judges j
inner join judgments jm on jm.judge_id = j.id
group by j.id, j.name
order by count(jm.detainer_warrant_id) desc)
select *
from top
union
(select 'ALL OTHER' as name,
sum(top.warrantCount) as warrantCount
from top
where top.name not in
(select top.name
from top
limit 5))
order by warrantCount desc
limit 6;
""")
def top_plaintiff_ranges_bet(start, end):
# TODO: perhaps figure this out in python
return db.session.execute("""
with top as
(select p.name,
count(dw.docket_id) as warrant_count,
sum(CASE WHEN dw.amount_claimed > 2000 THEN 1 ELSE 0 END) as high,
sum(case when dw.amount_claimed > 1500 and dw.amount_claimed <= 2000 then 1 else 0 end) as medium_high,
sum(case when dw.amount_claimed > 1000 and dw.amount_claimed <= 1500 then 1 else 0 end) as medium,
sum(case when dw.amount_claimed > 500 and dw.amount_claimed <= 1000 then 1 else 0 end) as medium_low,
sum(CASE WHEN dw.amount_claimed < 500 THEN 1 ELSE 0 END) as low
from plaintiffs p
inner join cases dw on dw.plaintiff_id = p.id
where dw.type = 'detainer_warrant'
group by p.id, p.name
order by warrant_count desc)
select *
from top
union
(select 'ALL OTHER' as name,
sum(top.warrant_count) as warrant_count,
sum(top.high),
sum(top.medium_high),
sum(top.medium),
sum(top.medium_low),
sum(top.low)
from top
where top.name not in
(select top.name
from top
limit 5))
order by warrant_count desc
limit 6;
""")
def pending_scheduled_case_count(start, end):
return db.session.query(DetainerWarrant)\
.filter(
and_(
func.date(DetainerWarrant.court_date) >= start,
func.date(DetainerWarrant.court_date) < end,
DetainerWarrant.status_id == DetainerWarrant.statuses['PENDING']
)
)\
.count()
def amount_awarded_between(start, end):
amount = db.session.query(func.sum(Judgment.awards_fees))\
.filter(
and_(
func.date(Judgment.court_date) >= start,
func.date(Judgment.court_date) < end
)
).scalar()
if amount is None:
return 0
else:
return amount
def round_dec(dec):
return int(round(dec))
def millis_timestamp(dt):
return round(dt.timestamp() * 1000)
def millis(d):
return millis_timestamp(datetime.combine(d, datetime.min.time()))
def register_extensions(app):
"""Register Flask extensions."""
db.init_app(app)
migrate.init_app(app, db)
marshmallow.init_app(app)
api.init_app(app)
login_manager.init_app(app)
login_manager.login_view = None
security.init_app(app, user_datastore)
cors.init_app(app)
flask_wtf.CSRFProtect(app)
api.add_resource('/attorneys/', detainer_warrants.views.AttorneyListResource,
detainer_warrants.views.AttorneyResource, app=app)
api.add_resource('/defendants/', detainer_warrants.views.DefendantListResource,
detainer_warrants.views.DefendantResource, app=app)
api.add_resource('/courtrooms/', detainer_warrants.views.CourtroomListResource,
detainer_warrants.views.CourtroomResource, app=app)
api.add_resource('/hearings/', detainer_warrants.views.HearingListResource,
detainer_warrants.views.HearingResource, app=app)
api.add_resource('/plaintiffs/', detainer_warrants.views.PlaintiffListResource,
detainer_warrants.views.PlaintiffResource, app=app)
api.add_resource('/judgments/', detainer_warrants.views.JudgmentListResource,
detainer_warrants.views.JudgmentResource, app=app)
api.add_resource('/judges/', detainer_warrants.views.JudgeListResource,
detainer_warrants.views.JudgeResource, app=app)
api.add_resource('/detainer-warrants/', detainer_warrants.views.DetainerWarrantListResource,
detainer_warrants.views.DetainerWarrantResource, app=app)
api.add_resource('/pleading-documents/', detainer_warrants.views.PleadingDocumentListResource,
detainer_warrants.views.PleadingDocumentResource, app=app)
api.add_resource('/phone-number-verifications/', detainer_warrants.views.PhoneNumberVerificationListResource,
detainer_warrants.views.PhoneNumberVerificationResource, app=app)
api.add_resource('/users/', admin.views.UserListResource,
admin.views.UserResource, app=app)
api.add_resource('/roles/', admin.views.RoleListResource,
admin.views.RoleResource, app=app)
api.add_resource('/campaigns/', direct_action.views.CampaignListResource,
direct_action.views.CampaignResource, app=app)
api.add_resource('/events/', direct_action.views.EventListResource,
direct_action.views.EventResource, app=app)
api.add_resource('/phone_bank_events/', direct_action.views.PhoneBankEventListResource,
direct_action.views.PhoneBankEventResource, app=app)
@app.route('/api/v1/rollup/detainer-warrants')
def detainer_warrant_rollup_by_month():
start_dt = (date.today() - timedelta(weeks=52)).replace(day=1)
end_dt = date.today()
dates = [(dt, next_month(dt))
for dt in rrule(MONTHLY, dtstart=start_dt, until=end_dt)]
counts = [{'time': millis_timestamp(start), 'total_warrants': count_between_dates(
start, end)} for start, end in dates]
return flask.jsonify(counts)
@app.route('/api/v1/rollup/plaintiffs')
def plaintiff_rollup_by_month():
start_dt = (date.today() - timedelta(weeks=52)).replace(day=1)
end_dt = date.today()
dates = [(dt, next_month(dt))
for dt in rrule(MONTHLY, dtstart=start_dt, until=end_dt)]
months = {}
for i, d_range in enumerate(dates):
start, end = d_range
months[str(i)] = str(i)
months[f'd_{i}_start'] = start.strftime('%Y-%m-%d')
months[f'd_{i}_end'] = end.strftime('%Y-%m-%d')
top_ten = top_evictions_between_dates(start_dt, end_dt)
plaintiffs = {}
top_evictors = []
for plaintiff, plaintiff_total_evictions in top_ten:
counts = evictions_by_month(plaintiff.id, months).fetchone()
history = []
for i in range(12):
history.append({'date': millis_timestamp(
dates[i][0]), 'eviction_count': counts[i]
})
top_evictors.append({'name': plaintiff.name, 'history': history})
return flask.jsonify(top_evictors)
@app.route('/api/v1/rollup/plaintiffs/amount_claimed_bands')
def plaintiffs_by_amount_claimed():
start_dt = (date.today() - timedelta(weeks=52)).replace(day=1)
dates, end_dt = months_since(start_dt)
top_six = top_plaintiff_ranges_bet(start_dt, end_dt)
top_plaintiffs = [{
'plaintiff_name': result[0],
'warrant_count': round_dec(result[1]),
'greater_than_2k': round_dec(result[2]),
'between_1.5k_and_2k': round_dec(result[3]),
'between_1k_and_1.5k': round_dec(result[4]),
'between_500_and_1k': round_dec(result[5]),
'less_than_500': round_dec(result[6]),
'start_date': millis(start_dt),
'end_date': millis(end_dt)
} for result in top_six]
return flask.jsonify(top_plaintiffs)
@app.route('/api/v1/rollup/plaintiff-attorney')
def plaintiff_attorney_warrant_share():
start_dt = date(2020, 1, 1)
dates, end_dt = months_since(start_dt)
top_six = top_plaintiff_attorneys_bet(start_dt, end_dt)
top_plaintiffs = [{
'warrant_count': int(round(warrant_count)),
'plaintiff_attorney_name': attorney_name,
'start_date': millis(start_dt),
'end_date': millis(end_dt)
} for attorney_name, warrant_count in top_six]
prs = {
'warrant_count': between_dates(start_dt, end_dt, Attorney.query.filter_by(id=-1).join(DetainerWarrant)).count(),
'plaintiff_attorney_name': Attorney.query.get(-1).name,
'start_date': millis(start_dt),
'end_date': millis(end_dt),
}
return flask.jsonify(top_plaintiffs + [prs])
@app.route('/api/v1/rollup/judges')
def judge_warrant_share():
start_dt = date(2020, 1, 1)
dates, end_dt = months_since(start_dt)
top_six = top_judges_bet(start_dt, end_dt)
top_judges = [{
'warrant_count': int(round(warrant_count)),
'presiding_judge_name': judge_name,
'start_date': millis(start_dt),
'end_date': millis(end_dt)
} for judge_name, warrant_count in top_six]
return flask.jsonify(top_judges)
@app.route('/api/v1/rollup/detainer-warrants/pending')
def pending_detainer_warrants():
start_of_month = date.today().replace(day=1)
end_of_month = (date.today().replace(day=1) +
timedelta(days=32)).replace(day=1)
return flask.jsonify({'pending_scheduled_case_count': pending_scheduled_case_count(start_of_month, end_of_month)})
@app.route('/api/v1/rollup/amount-awarded')
def amount_awarded():
start_of_month = date.today().replace(day=1)
end_of_month = (date.today().replace(day=1) +
timedelta(days=32)).replace(day=1)
return flask.jsonify({'data': round_dec(amount_awarded_between(start_of_month, end_of_month))})
@app.route('/api/v1/rollup/amount-awarded/history')
def amount_awarded_history():
start_dt = date(2021, 3, 1)
end_dt = date.today()
dates = [(dt, next_month(dt))
for dt in rrule(MONTHLY, dtstart=start_dt, until=end_dt)]
awards = [{'time': millis_timestamp(start), 'total_amount': round_dec(amount_awarded_between(
start, end))} for start, end in dates]
return flask.jsonify({'data': awards})
@app.route('/api/v1/rollup/meta')
def data_meta():
last_warrant = db.session.query(DetainerWarrant).order_by(
desc(DetainerWarrant.updated_at)).first()
return flask.jsonify({
'last_detainer_warrant_update': last_warrant.updated_at if last_warrant else None
})
@app.route('/api/v1/rollup/year/<int:year_number>/month/<int:month_number>')
def monthly_rollup(year_number, month_number):
start_date, end_date = calendar.monthrange(year_number, month_number)
start_of_month = date(year_number, month_number, start_date + 1)
end_of_month = date(year_number, month_number, end_date)
return flask.jsonify({
'detainer_warrants_filed': between_dates(start_of_month, end_of_month, DetainerWarrant.query).count(),
'eviction_judgments': Judgment.query.filter(Judgment._file_date > start_of_month, Judgment._file_date < end_of_month, Judgment.awards_possession == True).count(),
'plaintiff_awards': float(db.session.query(func.sum(Judgment.awards_fees)).filter(Judgment._file_date > start_of_month, Judgment._file_date < end_of_month, Judgment.awards_fees != None).scalar()),
'evictions_entered_by_default': float(Judgment.query.filter(Judgment._file_date > start_of_month, Judgment._file_date < end_of_month, Judgment.entered_by_id == 0).count())
})
@app.route('/api/v1/current-user')
@auth_token_required
def me():
return admin.serializers.user_schema.dump(current_user)
def register_shellcontext(app):
def shell_context():
return {
'user_datastore': user_datastore,
'hash_password': <PASSWORD>
}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.test)
app.cli.add_command(commands.import_from_caselink)
app.cli.add_command(commands.sync)
app.cli.add_command(commands.sync_judgments)
app.cli.add_command(commands.parse_docket)
app.cli.add_command(commands.parse_mismatched_pleading_documents)
app.cli.add_command(commands.scrape_docket)
app.cli.add_command(commands.scrape_dockets)
app.cli.add_command(commands.export)
app.cli.add_command(commands.export_courtroom_dockets)
app.cli.add_command(commands.verify_phone)
app.cli.add_command(commands.verify_phones)
app.cli.add_command(commands.extract_all_pleading_document_details)
app.cli.add_command(commands.retry_detainer_warrant_extraction)
app.cli.add_command(commands.bulk_extract_pleading_document_details)
app.cli.add_command(commands.extract_pleading_document_text)
app.cli.add_command(commands.update_judgment_from_document)
app.cli.add_command(commands.update_judgments_from_documents)
app.cli.add_command(commands.gather_pleading_documents)
app.cli.add_command(commands.gather_pleading_documents_in_bulk)
app.cli.add_command(commands.gather_warrants_csv)
app.cli.add_command(commands.bootstrap)
|
StarcoderdataPython
|
3228454
|
<filename>script.py<gh_stars>0
import os
import sys
import json
import requests
from bs4 import BeautifulSoup
DOMAIN= "<domain>"
BASE_URL= "https://{domain}.atlassian.net/wiki/rest/api/".format(domain=DOMAIN)
USERNAME= "<username>"
PASSWORD= "<password>"
AUTH= (USERNAME, PASSWORD)
def get_page_info(auth, title):
url = '{base}content?title={title}'.format(base=BASE_URL, title=title)
r =requests.get(url, auth=auth)
r.raise_for_status()
return r.json()
def delete_page(auth, title):
info = get_page_info(auth, title)
ID = info['results'][0]['id']
url = '{base}content/{ID}'.format(base=BASE_URL, ID=ID)
r = requests.delete(url, auth=auth)
r.raise_for_status()
print("Deleted "+title)
def write_data(auth, html, title, parent=""):
if parent:
info = get_page_info(auth, parent)
parentId = info['results'][0]['id']
data = {
'space': {
'key': 'TTIHEROES'
},
'type': 'page',
'version': {
'number': 0
},
'title': title,
'ancestors':[
{
'id': str(parentId)
}
],
'body': {
'storage':
{
'representation': 'storage',
'value': str(html),
}
}
}
else:
data = {
'space': {
'key': 'TTIHEROES'
},
'type': 'page',
'title': title,
'version': {
'number': 0
},
'body': {
'storage':
{
'representation': 'storage',
'value': str(html),
}
}
}
data = json.dumps(data)
url = '{base}content'.format(base=BASE_URL)
r = requests.post(url, data=data, auth=AUTH, headers = { 'Content-Type' : 'application/json' })
r.raise_for_status()
print("Created "+title)
def iterateThroughDir():
for subdir, dirs, files in os.walk("."):
name = selectLastDir(subdir)
if(name.find(".")<0):
if(sys.argv[1]=="delete"):
delete_page(AUTH, name)
else:
parentpath = removeLastDirFromFilepath(subdir)
if(parentpath!="./"):
parentpath =replaceBetween(len(parentpath)-1, len(parentpath), parentpath, "")
parent = selectLastDir(parentpath)
write_data(AUTH, "<p>Section relatives to "+name+".</p>", name, parent)
else:
write_data(AUTH, "<p>Section relatives to "+name+".</p>", name, "Documentation")
for file in files:
sub_dir = selectLastDir(subdir)
filepath = subdir + os.sep + file
filename = selectLastDir(filepath)
if filepath.endswith(".md"):
filename = replaceBetween(len(filename)-3,len(filename), filename, "")
if(sys.argv[1]=="delete"):
delete_page(AUTH, filename+" - "+sub_dir)
else:
filePath = removeLastDirFromFilepath(filepath)
filePath = replaceBetween(0, 1, filePath, "")
htmlpath = replaceBetween(len(filepath)-3, len(filepath), filepath, ".html")
os.system("pandoc {fp} -f markdown -t html -o {htmlfile}".format(fp=filepath, htmlfile=htmlpath))
with open(htmlpath, 'r') as fd:
html = fd.read()
html = prepare_html(html, filePath)
write_data(AUTH, html, filename+" - "+sub_dir, sub_dir)
os.system("rm {htmlfile}".format(htmlfile=htmlpath))
def replaceBetween(startIndex, endIndex, oldStr, replacement):
string = oldStr[0:startIndex] + replacement + oldStr[endIndex+1:]
return string
def removeLastDirFromFilepath(filepath):
i=0
if(filepath.endswith("/") and len(filepath)>1):
filepath = replaceBetween(len(filepath)-1, len(filepath), filepath, "")
while(i<len(filepath)):
res = filepath.find("/", i)
if res < 0 :
break
else:
i = res + 1
if i == len(filepath):
return ""
else:
withoutLastDir = replaceBetween(i, len(filepath), filepath, "")
return withoutLastDir
def selectLastDir(filepath):
i=0
while i<len(filepath):
res = filepath.find("/", i)
if res < 0:
return filepath
else:
filepath = replaceBetween(0, res, filepath, "")
def prepare_html(html, filepath):
soup = BeautifulSoup(html, "html.parser")
img_tags = soup.findAll('img')
sources = []
i = 0
if(img_tags):
while len(sources) < len(img_tags):
src = soup.findAll('img')[i]['src']
sources.append(src)
i = i+1
for src in sources:
sources.remove(src)
relpath = src.find('../')
if (relpath >= 0):
doublerelpath = src.find('../../')
if(doublerelpath>=0):
fin = 5
src = replaceBetween(0, fin, src, "https://www.thethingsnetwork.org/docs/" +removeLastDirFromFilepath(removeLastDirFromFilepath(filepath)))
else:
fin = 2
src = replaceBetween(0, fin, src, "https://www.thethingsnetwork.org/docs/" + removeLastDirFromFilepath(filepath))
else:
src = "https://www.thethingsnetwork.org/docs/" + filepath + src
sources.insert(0, src)
count = len(sources)
while(i<len(html)):
debut_img = html.find("<img", i)
if(debut_img>=0):
i = debut_img
fin_img = html.find("/>", i)
html = replaceBetween(i, fin_img+1, html, '<ac:image ac:height="250"><ri:url ri:value="' + sources[count-1] + '"/></ac:image>')
count = count - 1
i = fin_img + 1
else:
break
return html
iterateThroughDir()
|
StarcoderdataPython
|
8051067
|
import tests.periodicities.period_test as per
per.buildModel((12 , 'B' , 25));
|
StarcoderdataPython
|
225043
|
#!/usr/bin/env python
#pylint: skip-file
# This source code is licensed under the Apache license found in the
# LICENSE file in the root directory of this project.
class ScheduleInfoOutput(object):
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'description': 'str',
'startTime': 'date-time',
'endTime': 'date-time',
'origin': 'str',
'operation': 'str',
'taskId': 'str',
'groupName': 'str',
'scheduledWorkSpecId': 'str',
'prevTime': 'date-time',
'nextTime': 'date-time'
}
self.attributeMap = {
'description': 'description',
'startTime': 'startTime',
'endTime': 'endTime',
'origin': 'origin',
'operation': 'operation',
'taskId': 'taskId',
'groupName': 'groupName',
'scheduledWorkSpecId': 'scheduledWorkSpecId',
'prevTime': 'prevTime',
'nextTime': 'nextTime'
}
#Simple description to be shown to end-users
self.description = None # str
#The time at which the trigger should first fire. If the schedule has fired and will not fire again, this value will be null
self.startTime = None # date-time
#The time at which the trigger should quit repeating
self.endTime = None # date-time
#Contextual field used to identify work spcifications originating from the same source
self.origin = None # str
#Contextual field used by the service to identify an operation
self.operation = None # str
#UUID of the Task
self.taskId = None # str
#A grouping name that can be specified by the service to allow for filtered work spec retrieval
self.groupName = None # str
#UUID of the ScheduledWorkSpec associated with the scheduled task
self.scheduledWorkSpecId = None # str
#The previous time at which the trigger fired. If the trigger has not yet fired, null will be returned
self.prevTime = None # date-time
#The next time at which the trigger should fire
self.nextTime = None # date-time
|
StarcoderdataPython
|
3551073
|
<filename>tests/parsing/test_loops.py
# pylint: disable=missing-docstring,unused-import,no-member
from .util import create_scripttest_func
test_until_do = create_scripttest_func('until_do')
test_restart = create_scripttest_func('restart')
test_for_every = create_scripttest_func('for_every')
|
StarcoderdataPython
|
3536930
|
import pygame
import sys
import copy
import time
from settings import *
from player_class import *
from enemy_class import *
vec = pygame.math.Vector2
class App:
# Costruttore
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode((WIDTH, HEIGHT))
self.clock = pygame.time.Clock()
self.running = True
self.state = 'start'
self.cell_width = MAZE_WIDTH//28
self.cell_height = MAZE_HEIGHT//31
self.walls = []
self.barrier = []
self.dots = []
self.pellets = []
self.crossroads = []
self.crossroad_L = None
self.crossroad_R = None
self.total_dots = 0
self.enemies = []
self.enemies_names = ["Clyde", "Pinky", "Inky", "Blinky"]
self.load()
self.player = Player(self)
self.make_enemies()
# Funzione run: definisce cosa deve fare il gioco in base allo stato in cui si trova
def run(self):
while self.running:
self.clock.tick(FPS)
if self.state == "start":
self.start_events()
self.start_update()
self.start_draw()
elif self.state == "playing":
self.playing_events()
self.playing_update()
self.playing_draw()
elif self.state == "game over":
self.game_over_events()
self.game_over_update()
self.game_over_draw()
elif self.state == "victory":
self.victory_events()
self.victory_update()
self.victory_draw()
else:
self.running = False
pygame.quit()
sys.exit()
########## HELPER FUNCTIONS ##########
# Disegna il testo sullo schermo nella posizione indicata
# Viene usato il colore e il font passati come parametri
def draw_text(self, words, screen, pos, size, colour, font_name, centered=False):
font = pygame.font.SysFont(font_name, size)
text = font.render(words, False, colour)
text_size = text.get_size()
if centered:
pos[0] = pos[0]-text_size[0]//2
pos[1] = pos[1]-text_size[1]//2
screen.blit(text, pos)
# Carica l'immagine "maze.png" e legge il file "walls.txt"
# Traduce il file "walls.txt" in informazioni riguardo il labirinto
def load(self):
self.background = pygame.image.load("maze.png")
self.background = pygame.transform.scale(self.background, (MAZE_WIDTH, MAZE_HEIGHT))
# Identifica i punti importanti della mappa
# (pareti, barriera, incroci, dots, pellets)
with open("walls.txt", "r") as file:
for yidx, line in enumerate(file):
for xidx, char in enumerate(line):
if char in ["1", "B"]:
self.walls.append(vec(xidx, yidx))
if char == "B":
self.barrier.append(vec(xidx, yidx))
elif char in ["D", "X", "L", "R", "P"]:
self.total_dots += 1
if char == "P":
self.pellets.append(vec(xidx, yidx))
else:
self.dots.append(vec(xidx, yidx))
if char == "X":
self.crossroads.append(vec(xidx, yidx))
elif char == "L":
self.crossroad_L = vec(xidx, yidx)
elif char == "R":
self.crossroad_R = vec(xidx, yidx)
elif char == "Y":
self.crossroads.append(vec(xidx, yidx))
# Crea gli oggetti "enemies" (i fantasmi)
def make_enemies(self):
for ind, name in enumerate(self.enemies_names):
# Se il fantasma si chiama "Clyde" allora si trova all'esterno della
# Zona di spawn, altrimenti si trova all'intero della zona di spawn
if name == "Clyde":
self.enemies.append(Enemy(self, name, ind, True))
else:
self.enemies.append(Enemy(self, name, ind, False))
# Resetta il gioco
def reset(self):
self.player = None
self.enemies = []
self.player = Player(self)
self.make_enemies()
self.load()
self.state = "playing"
########## INTRO FUNCTIONS ##########
def start_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
self.state = "playing"
def start_update(self):
pass
def start_draw(self):
self.screen.fill(BLACK)
self.draw_text("PUSH SPACE BAR", self.screen, [
WIDTH//2, HEIGHT//2-50], START_TEXT_SIZE, (170, 132, 58), START_FONT, centered=True)
self.draw_text("1 PLAYER ONLY", self.screen, [
WIDTH//2, HEIGHT//2+50], START_TEXT_SIZE, (44, 167, 198), START_FONT, centered=True)
self.draw_text("HIGH SCORE", self.screen, [4, 0],
START_TEXT_SIZE, (255, 255, 255), START_FONT)
pygame.display.update()
########## PLAYING FUNCTIONS ##########
def playing_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.player.move(vec(-1, 0))
if event.key == pygame.K_RIGHT:
self.player.move(vec(1, 0))
if event.key == pygame.K_UP:
self.player.move(vec(0, -1))
if event.key == pygame.K_DOWN:
self.player.move(vec(0, 1))
def playing_update(self):
# Effettua l'udpate del player
self.player.update()
# Effettua lúpdate dei fantasmi
for enemy in self.enemies:
# Se il fantasma si chiama Inky e sono stati mangiati meno di 30 dots
# Allora il fantasma non può essere aggiornato
if enemy.name == "Inky" and self.player.eaten_dots < 30:
enemy.update_state_only()
# Se il fantasma si chiama Inky e sono stati mangiati meno di 1/3 dei dots
# Allora il fantasma non può essere aggiornato
elif enemy.name == "Blinky" and self.player.eaten_dots < self.total_dots//3:
enemy.update_state_only()
else:
enemy.update()
# Se il fantasma ènella stessa casella del player
if self.player.grid_pos == enemy.grid_pos:
# Se il fantasma è nello stato di "Chase"
# Allora viene tolta una vita al player
if enemy.state == "Chase":
self.remove_life()
# Se il fantasma è nello stato di "Frightened"
# Allora il fantasma viene mangiato
elif enemy.state == "Frightened":
self.player.eat_enemy(enemy)
# Se il player ha mangiato tutti i dots, allora il player ha vinto
if self.player.eaten_dots == self.total_dots:
self.state = "victory"
def playing_draw(self):
self.screen.fill(BLACK)
self.screen.blit(self.background, (TOP_BOTTOM_BUFFER//2, TOP_BOTTOM_BUFFER//2))
self.draw_dots()
self.draw_pellets()
self.draw_text("CURRENT SCORE: {}".format(self.player.current_score),
self.screen, [60, 0], 18, WHITE, START_FONT)
self.player.draw()
for enemy in self.enemies:
enemy.draw()
pygame.display.update()
# Rimuove una vita al player e riposiziona player e fantasmi
# Se le vite del player vanno a 0, allora il giocatore perdfe
def remove_life(self):
self.player.lives -= 1
if self.player.lives == 0:
self.state = "game over"
else:
self.player.grid_pos = copy.deepcopy(self.player.starting_pos)
self.player.pix_pos = self.player.get_pix_pos()
self.player.direction = vec(1, 0)
for enemy in self.enemies:
enemy.grid_pos = vec(enemy.starting_pos)
enemy.pix_pos = enemy.get_pix_pos()
enemy.direction = vec(1, 0)
if enemy.name == "Clyde":
enemy.outside = True
else:
enemy.outside = False
def draw_dots(self):
for dot in self.dots:
pygame.draw.circle(self.screen, DOT_PELLET_COLOUR,
(int(dot.x*self.cell_width)+self.cell_width//2+TOP_BOTTOM_BUFFER//2,
int(dot.y*self.cell_height)+self.cell_height//2+TOP_BOTTOM_BUFFER//2), 2)
def draw_pellets(self):
for pellet in self.pellets:
pygame.draw.circle(self.screen, DOT_PELLET_COLOUR,
(int(pellet.x*self.cell_width)+self.cell_width//2+TOP_BOTTOM_BUFFER//2,
int(pellet.y*self.cell_height)+self.cell_height//2+TOP_BOTTOM_BUFFER//2), 4)
########## GAME OVER FUNCTIONS ##########
def game_over_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
self.reset()
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.running = False
def game_over_update(self):
pass
def game_over_draw(self):
self.screen.fill(BLACK)
quit_text = "Press the escape button to QUIT"
again_text = "Press SPACE bar to PLAY AGAIN"
self.draw_text("GAME OVER", self.screen, [WIDTH//2, 100], 52, RED, "arial", centered=True)
self.draw_text(again_text, self.screen, [
WIDTH//2, HEIGHT//2], 36, (190, 190, 190), "arial", centered=True)
self.draw_text(quit_text, self.screen, [
WIDTH//2, HEIGHT//1.5], 36, (190, 190, 190), "arial", centered=True)
pygame.display.update()
########## VICTORY FUNCTIONS ##########
def victory_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
self.reset()
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.running = False
def victory_update(self):
pass
def victory_draw(self):
self.screen.fill(BLACK)
quit_text = "Press the escape button to QUIT"
again_text = "Press SPACE bar to PLAY AGAIN"
self.draw_text("VICTORY!", self.screen, [WIDTH//2, 100], 52, VICTORY_COLOUR, "arial", centered=True)
self.draw_text(again_text, self.screen, [
WIDTH//2, HEIGHT//2], 36, (190, 190, 190), "arial", centered=True)
self.draw_text(quit_text, self.screen, [
WIDTH//2, HEIGHT//1.5], 36, (190, 190, 190), "arial", centered=True)
pygame.display.update()
|
StarcoderdataPython
|
5062037
|
import eHive
import os
from VCFIntegration.SNPTools import SNPTools
class SNPTools_prob2vcf(eHive.BaseRunnable):
"""Run SNPTools prob2vcf on a VCF containing biallelic SNPs"""
def run(self):
self.warning("Analysing file {0}".format(self.param_required('vcf_file')))
vcf_i = SNPTools(vcf=self.param_required('vcf_file'),
snptools_folder=self.param_required('snptools_folder'))
chro = self.param_required('chr').rstrip('\n')
work_dir = "{0}/{1}".format(self.param_required('work_dir'), chro)
if not os.path.isdir(work_dir):
os.makedirs(work_dir)
outprefix = os.path.split(self.param_required('outprefix'))[1]
vcf_f = ""
if self.param_is_defined('verbose'):
vcf_f = vcf_i.run_prob2vcf(probf=self.param_required('probf'),
outprefix=outprefix+".{0}".format(chro),
chro=self.param_required('chr'),
outdir=work_dir,
verbose=True)
else:
vcf_f = vcf_i.run_prob2vcf(probf=self.param_required('probf'),
outprefix=outprefix+".{0}".format(chro),
chro=self.param_required('chr'),
outdir=work_dir,
verbose=False)
self.param('vcf_f', vcf_f)
def write_output(self):
self.warning('Work is done!')
self.dataflow({'vcf_f': self.param('vcf_f')}, 1)
|
StarcoderdataPython
|
4928313
|
import plotly.graph_objects as go
from dataclasses import dataclass, field
from typing import List, Tuple
from configuration_parameters import *
@dataclass
class Parameter:
"""Parameter used for decision making
Args:
value (float): Value of the parameter
is_increasing_better (bool): True if the parameter is better if its value is increasing
unit(str): Unit of the parameter
name (str): Name of the parameter
range (Tuple[int]): Range of the parameter
weight (float): Weight of the parameter (0, 1)
normalized_value (float): Normalized value of the parameter
Raises:
ValueError: range is not valid
ValueError: value is not valid
ValueError: weight is not valid
"""
value: int
is_increasing_better: bool = True
unit: str = ""
name: str = ""
range: Tuple[int] = None
weight: float = 1.0 # From 0 to 1
normalized_value: int = 0
def __post_init__(self):
if self.range is not None and self.range[1] <= self.range[0]:
raise ValueError("Parameter {} range {} is not valid".format(self.name, self.range))
if self.range is not None and (self.value < self.range[0] or self.value > self.range[1]):
raise ValueError("Parameter {} value is not valid: value {} not in range {}".format(
self.name, self.value, self.range))
if self.weight > 1.0:
raise ValueError("Parameter {} weight is not valid: weight {} is not in range 0 to 1".format(
self.name, self.weight))
def normalize(self, min_value, max_value):
if self.range is not None:
min_value = self.range[0]
max_value = self.range[1]
denominator = max_value - min_value
if denominator == 0:
self.normalized_value = 0
return
self.normalized_value = (self.value - min_value) / (max_value - min_value)
if not self.is_increasing_better:
self.normalized_value = 1 - self.normalized_value
def calculate_weighted_value(self):
return round(self.normalized_value * self.weight, 2)
K = 1000
class Price(Parameter):
"""Parameter which reflects a price
Args:
value (float): Value of the parameter
weight (float): Weight of the parameter (0, 1)
"""
def __init__(self, value, weight=1.0):
super().__init__(value, is_increasing_better=False, unit="euro", name="price", range=price_range, weight=weight)
class Area(Parameter):
def __init__(self, value, weight=1.0):
super().__init__(value, is_increasing_better=True, unit="sqm", name="area", range=area_range, weight=weight)
class Year(Parameter):
def __init__(self, value, weight=1.0):
super().__init__(value, is_increasing_better=True, unit="year", name="year", range=year_range, weight=weight)
class Vastike(Parameter):
def __init__(self, value, weight=1.0):
super().__init__(value, is_increasing_better=False, unit="euro", name="vastike", range=vastike_range, weight=weight)
class Floor(Parameter):
def __init__(self, value, weight=1.0):
super().__init__(value, is_increasing_better=True, unit="", name="floor", range=floor_range, weight=weight)
class Rooms(Parameter):
def __init__(self, value, weight=1.0):
super().__init__(value, is_increasing_better=True, unit="", name="rooms", range=rooms_range, weight=weight)
class Zone(Parameter):
def __init__(self, value: str):
value = value.lower()
numerical_value = 0
if(zone_weights.get(value) is not None):
numerical_value = zone_weights[value]
super().__init__(numerical_value, is_increasing_better=True, unit="", name="zone", range=zone_range)
@dataclass
class Apartment:
"""Apartment class which stores all relevant parameters for an apartment
"""
categories = ["price", "area", "year", "vastike", "floor", "rooms", "zone"]
name: str
price: Price
area: Area
year: Year
vastike: Vastike
floor: Floor
rooms: Rooms
zone: Zone
url: str = ""
parameters: List[Parameter] = None
def __post_init__(self):
self.update_parameters()
def update_parameters(self):
self.parameters = [self.price, self.area, self.year, self.vastike, self.floor, self.rooms, self.zone]
def get_values(self):
return [p.normalized_value for p in self.parameters]
def calculate_weighted_value(self):
return sum([p.calculate_weighted_value() for p in self.parameters])
def __str__(self) -> str:
return "Apartment: {} Price: {} Area: {} Year: {} Vastike: {} Floor: {} Rooms: {} Zone: {}".format(self.name,
self.price.value,
self.area.value,
self.year.value,
self.vastike.value,
self.floor.value,
self.rooms.value,
self.zone.value)
@dataclass
class Apartments:
apartments: List[Apartment] = None
def __post_init__(self):
self.normalize()
def normalize(self):
n_parameters = len(self.apartments[0].parameters)
for i in range(n_parameters):
self.__normalizeParameters([apartment.parameters[i] for apartment in self.apartments])
def __normalizeParameters(self, parameters: List[Parameter]):
max_value = max(parameter.value for parameter in parameters)
min_value = min(parameter.value for parameter in parameters)
for parameter in parameters:
parameter.normalize(min_value, max_value)
def plot(self):
self.apartments.sort(key=lambda a: a.calculate_weighted_value(), reverse=True)
categories = Apartment.categories
fig = go.Figure()
for i, a in enumerate(self.apartments):
if i > number_of_apartments_to_plot - 1:
break
fig.add_trace(go.Scatterpolar(
r=a.get_values(),
theta=categories,
fill='toself',
hoveron='points+fills',
name=a.name
))
fig.update_layout(polar=dict(
radialaxis=dict(
visible=True,
range=[0, 1]
)),
showlegend=False
)
fig.show()
def rank(self):
self.apartments.sort(key=lambda a: a.calculate_weighted_value(), reverse=True)
for i in range(len(self.apartments)):
if i > number_of_apartments_to_rank - 1:
break
ranking = i + 1
score = self.apartments[i].calculate_weighted_value()
name = self.apartments[i].name
url = self.apartments[i].url
print("{}. Name: {}, Score: {:0.2f}, Url: {}".format(
ranking, name, score, url))
|
StarcoderdataPython
|
1810707
|
<gh_stars>10-100
from numba import njit, int64, float64
from numba.typed import List as L
from numba.types import Tuple, List, ListType as LT
import numpy as np
#edges, vtx2vtx, vtx2edge, vtx2poly, edge2vtx, edge2edge, edge2poly, poly2vtx, poly2edge, poly2poly , LT(LT(int64)),LT(LT(int64)), LT(LT(int64)), int64[:,::1], LT(LT(int64)), LT(LT(int64)), int64[:,::1], int64[:,::1], int64[:,::1]
@njit(Tuple((int64[:,::1], LT(LT(int64)), LT(LT(int64)), LT(LT(int64)), int64[:,::1], LT(LT(int64)), LT(LT(int64)), int64[:,::1],int64[:,::1],int64[:,::1] ))(int64, int64[:,::1]), cache=True)
def get_connectivity_info_surface(num_vertices, polys):
vtx2vtx = L()
vtx2edge = L()
vtx2poly = L()
edge2edge = L()
edge2poly = L()
for i in range(num_vertices):
tmp1 = L()
tmp2 = L()
tmp3 = L()
tmp1.append(-1)
tmp2.append(-1)
tmp3.append(-1)
vtx2vtx.append(tmp1)
vtx2edge.append(tmp2)
vtx2poly.append(tmp3)
for i in range(polys.shape[0]*4):
tmp_1 = L()
tmp_2 = L()
tmp_1.append(-1)
tmp_2.append(-1)
edge2edge.append(tmp_1)
edge2poly.append(tmp_2)
poly2edge = np.zeros((polys.shape[0], polys.shape[1]), dtype=np.int64)-1
poly2poly = np.zeros((polys.shape[0], polys.shape[1]), dtype=np.int64)-1
edges_list = [(0,0)]
edges_map = dict()
edge_poly_map = dict()
for pid in range(polys.shape[0]):
edges_tmp = [[0,0]]
if(polys.shape[1] == 3):
edges_tmp = [[polys[pid][0], polys[pid][1]], [polys[pid][1], polys[pid][2]], [polys[pid][2], polys[pid][0]]]
else:
edges_tmp = [[polys[pid][0], polys[pid][1]], [polys[pid][1], polys[pid][2]], [polys[pid][2], polys[pid][3]], [polys[pid][3], polys[pid][0]]]
for e_idx in range(len(edges_tmp)):
edges_tmp[e_idx].sort()
e = (edges_tmp[e_idx][0], edges_tmp[e_idx][1])
eid = 0
not_checked = False
if e not in edges_map:
eid = len(edges_list)-1
edges_list.append(e)
edges_map[e] = eid
not_checked = True
else:
eid = edges_map[e]
adj_pid = -1
if eid in edge_poly_map:
adj_pid = edge_poly_map[eid]
else:
edge_poly_map[eid] = pid
if not_checked:
#vtx2vtx
if(vtx2vtx[e[0]][0] == -1):
vtx2vtx[e[0]][0] = e[1]
else:
vtx2vtx[e[0]].append(e[1])
if(vtx2vtx[e[1]][0] == -1):
vtx2vtx[e[1]][0] = e[0]
else:
vtx2vtx[e[1]].append(e[0])
#vtx2edge
if(vtx2edge[e[0]][0] == -1):
vtx2edge[e[0]][0] = eid
else:
vtx2edge[e[0]].append(eid)
if(vtx2edge[e[1]][0] == -1):
vtx2edge[e[1]][0] = eid
else:
vtx2edge[e[1]].append(eid)
#edge2poly
if(edge2poly[eid][0] == -1):
edge2poly[eid][0] = pid
else:
edge2poly[eid].append(pid)
#poly2edge
idx_to_append = np.where(poly2edge[pid] == -1)[0][0]
poly2edge[pid][idx_to_append] = eid
#poly2poly
if adj_pid != -1:
idx_to_append1 = np.where(poly2poly[pid] == -1)[0][0]
idx_to_append2 = np.where(poly2poly[adj_pid] == -1)[0][0]
poly2poly[pid][idx_to_append1] = adj_pid
poly2poly[adj_pid][idx_to_append2] = pid
for vid in polys[pid]:
#vtx2poly
if(vtx2poly[vid][0] == -1):
vtx2poly[vid][0] = pid
else:
vtx2poly[vid].append(pid)
for eid, e in enumerate(edges_list[1:]):
#edge2edge
a = vtx2edge[e[0]].copy()
b = vtx2edge[e[1]].copy()
a.remove(eid)
b.remove(eid)
for el in b:
a.append(el)
edge2edge[eid] = a
edges = np.array(edges_list[1:])
edge2poly = edge2poly[:edges.shape[0]]
edge2edge = edge2edge[:edges.shape[0]]
return edges, vtx2vtx, vtx2edge, vtx2poly, edges, edge2edge, edge2poly, polys, poly2edge, poly2poly
@njit(Tuple((int64[:,::1], LT(LT(int64)), LT(LT(int64)), LT(LT(int64)), int64[:,::1], LT(LT(int64)), LT(LT(int64)), int64[:,::1],int64[:,::1], LT(LT(int64))))(int64, int64[:,::1]), cache=True)
def get_connectivity_info_volume_faces(num_vertices, polys):
vtx2vtx = L()
vtx2edge = L()
vtx2poly = L()
edge2edge = L()
edge2poly = L()
poly2poly = L()
for i in range(num_vertices):
tmp1 = L()
tmp2 = L()
tmp3 = L()
tmp1.append(-1)
tmp2.append(-1)
tmp3.append(-1)
vtx2vtx.append(tmp1)
vtx2edge.append(tmp2)
vtx2poly.append(tmp3)
for i in range(polys.shape[0]*4):
tmp_1 = L()
tmp_2 = L()
tmp_3 = L()
tmp_1.append(-1)
tmp_2.append(-1)
tmp_3.append(-1)
edge2edge.append(tmp_1)
edge2poly.append(tmp_2)
if(i < polys.shape[0]):
poly2poly.append(tmp3)
poly2edge = np.zeros((polys.shape[0], polys.shape[1]), dtype=np.int64)-1
edges_list = [(0,0)]
edges_map = dict()
edge_poly_map = dict()
for pid in range(polys.shape[0]):
edges_tmp = [[0,0]]
if(polys.shape[1] == 3):
edges_tmp = [[polys[pid][0], polys[pid][1]], [polys[pid][1], polys[pid][2]], [polys[pid][2], polys[pid][0]]]
else:
edges_tmp = [[polys[pid][0], polys[pid][1]], [polys[pid][1], polys[pid][2]], [polys[pid][2], polys[pid][3]], [polys[pid][3], polys[pid][0]]]
for e_idx in range(len(edges_tmp)):
edges_tmp[e_idx].sort()
e = (edges_tmp[e_idx][0], edges_tmp[e_idx][1])
eid = 0
not_checked = False
if e not in edges_map:
eid = len(edges_list)-1
edges_list.append(e)
edges_map[e] = eid
not_checked = True
else:
eid = edges_map[e]
adj_pid = -1
if eid in edge_poly_map:
adj_pid = edge_poly_map[eid]
else:
edge_poly_map[eid] = pid
if not_checked:
#vtx2vtx
if(vtx2vtx[e[0]][0] == -1):
vtx2vtx[e[0]][0] = e[1]
else:
vtx2vtx[e[0]].append(e[1])
if(vtx2vtx[e[1]][0] == -1):
vtx2vtx[e[1]][0] = e[0]
else:
vtx2vtx[e[1]].append(e[0])
#vtx2edge
if(vtx2edge[e[0]][0] == -1):
vtx2edge[e[0]][0] = eid
else:
vtx2edge[e[0]].append(eid)
if(vtx2edge[e[1]][0] == -1):
vtx2edge[e[1]][0] = eid
else:
vtx2edge[e[1]].append(eid)
#edge2poly
if(edge2poly[eid][0] == -1):
edge2poly[eid][0] = pid
else:
edge2poly[eid].append(pid)
#poly2edge
idx_to_append = np.where(poly2edge[pid] == -1)[0][0]
poly2edge[pid][idx_to_append] = eid
for vid in polys[pid]:
#vtx2poly
if(vtx2poly[vid][0] == -1):
vtx2poly[vid][0] = pid
else:
vtx2poly[vid].append(pid)
for eid, e in enumerate(edges_list[1:]):
#edge2edge
a = vtx2edge[e[0]].copy()
b = vtx2edge[e[1]].copy()
a.remove(eid)
b.remove(eid)
for el in b:
a.append(el)
edge2edge[eid] = a
for pid in range(polys.shape[0]):
adjs = L()
for eid in poly2edge[pid]:
for adj_pid in edge2poly[eid]:
if adj_pid != pid:
adjs.append(adj_pid)
poly2poly[pid] = adjs[1:]
edges = np.array(edges_list[1:])
edge2poly = edge2poly[:edges.shape[0]]
edge2edge = edge2edge[:edges.shape[0]]
return edges, vtx2vtx, vtx2edge, vtx2poly, edges, edge2edge, edge2poly, polys, poly2edge, poly2poly
#faces, edges, vtx2vtx, vtx2edge, vtx2face, vtx2poly, edges, edge2edge, edge2face, edge2poly, faces, face2edge, face2face, face2poly, polys, poly2edge, poly2face, poly2poly
@njit(Tuple((int64[:,::1], int64[:,::1], LT(LT(int64)), LT(LT(int64)), LT(LT(int64)), LT(LT(int64)), int64[:,::1], LT(LT(int64)), LT(LT(int64)), LT(LT(int64)), int64[:,::1], int64[:,::1], LT(LT(int64)), int64[:,::1], int64[:,::1], int64[:,::1], int64[:,::1], int64[:,::1]))(int64, int64[:,::1]), cache=True)
def get_connectivity_info_volume_tet(num_vertices, polys):
vtx2poly = L()
edge2poly = L()
for i in range(num_vertices):
tmp1 = L()
tmp1.append(-1)
vtx2poly.append(tmp1)
face2poly = np.zeros((polys.shape[0]*4, 2), dtype=np.int64)-1
poly2face = np.zeros((polys.shape[0], 4), dtype=np.int64)-1
poly2poly = np.zeros((polys.shape[0], 4), dtype=np.int64)-1
poly2edge = np.zeros((polys.shape[0], 6), dtype=np.int64)-1
faces_list = [(0,0,0)]
faces_map = dict()
face_poly_map = dict()
for pid in range(polys.shape[0]):
faces_tmp = [[polys[pid][0], polys[pid][2], polys[pid][1]], [polys[pid][0], polys[pid][1], polys[pid][3]], [polys[pid][1], polys[pid][2], polys[pid][3]], [polys[pid][0], polys[pid][3], polys[pid][2]] ]
for f_idx in range(len(faces_tmp)):
faces_tmp[f_idx].sort()
f = (faces_tmp[f_idx][0], faces_tmp[f_idx][1], faces_tmp[f_idx][2])
fid = 0
if f not in faces_map:
fid = len(faces_list)-1
faces_list.append(f)
faces_map[f] = fid
else:
fid = faces_map[f]
adj_pid = -1
if fid in face_poly_map:
adj_pid = face_poly_map[fid]
else:
face_poly_map[fid] = pid
#face2poly
idx_to_append = np.where(face2poly[fid] == -1)[0][0]
face2poly[fid][idx_to_append] = pid
#poly2face
idx_to_append = np.where(poly2face[pid] == -1)[0][0]
poly2face[pid][idx_to_append] = fid
if adj_pid != -1:
idx_to_append1 = np.where(poly2poly[pid] == -1)[0][0]
idx_to_append2 = np.where(poly2poly[adj_pid] == -1)[0][0]
poly2poly[pid][idx_to_append1] = adj_pid
poly2poly[adj_pid][idx_to_append2] = pid
for vid in polys[pid]:
#vtx2poly
if(vtx2poly[vid][0] == -1):
vtx2poly[vid][0] = pid
else:
vtx2poly[vid].append(pid)
faces = np.array(faces_list[1:])
face2poly = face2poly[:faces.shape[0]]
edges, vtx2vtx, vtx2edge, vtx2face, edges, edge2edge, edge2face, faces, face2edge, face2face = get_connectivity_info_volume_faces(num_vertices, faces)
for i in range(edges.shape[0]):
tmp_2 = L()
tmp_2.append(-1)
edge2poly.append(tmp_2)
for pid in range(polys.shape[0]):
adj_edges = []
for fid in poly2face[pid]:
for eid in face2edge[fid]:
adj_edges.append(eid)
unique = np.unique(np.array(adj_edges))
poly2edge[pid] = unique
for eid in unique:
if(edge2poly[eid][0] == -1):
edge2poly[eid][0] = pid
else:
edge2poly[eid].append(pid)
return faces, edges, vtx2vtx, vtx2edge, vtx2face, vtx2poly, edges, edge2edge, edge2face, edge2poly, faces, face2edge, face2face, face2poly, polys, poly2edge, poly2face, poly2poly
@njit(Tuple((int64[:,::1], int64[:,::1], LT(LT(int64)), LT(LT(int64)), LT(LT(int64)), LT(LT(int64)), int64[:,::1], LT(LT(int64)), LT(LT(int64)), LT(LT(int64)), int64[:,::1], int64[:,::1], LT(LT(int64)), int64[:,::1], int64[:,::1], int64[:,::1], int64[:,::1], int64[:,::1]))(int64, int64[:,::1]), cache=True)
def get_connectivity_info_volume_hex(num_vertices, polys):
vtx2poly = L()
edge2poly = L()
for i in range(num_vertices):
tmp1 = L()
tmp1.append(-1)
vtx2poly.append(tmp1)
face2poly = np.zeros((polys.shape[0]*6, 2), dtype=np.int64)-1
poly2face = np.zeros((polys.shape[0], 6), dtype=np.int64)-1
poly2poly = np.zeros((polys.shape[0], 6), dtype=np.int64)-1
poly2edge = np.zeros((polys.shape[0], 12), dtype=np.int64)-1
faces_list = [(0,0,0,0)]
faces_map = dict()
face_poly_map = dict()
for pid in range(polys.shape[0]):
faces_tmp = [[polys[pid][0], polys[pid][3], polys[pid][2], polys[pid][1]],
[polys[pid][1], polys[pid][2], polys[pid][6], polys[pid][5]],
[polys[pid][4], polys[pid][5], polys[pid][6], polys[pid][7]],
[polys[pid][3], polys[pid][0], polys[pid][4], polys[pid][7]],
[polys[pid][0], polys[pid][1], polys[pid][5], polys[pid][4]],
[polys[pid][2], polys[pid][3], polys[pid][7], polys[pid][6]]]
for f_idx in range(len(faces_tmp)):
face_original = (faces_tmp[f_idx][0], faces_tmp[f_idx][1], faces_tmp[f_idx][2], faces_tmp[f_idx][3])
faces_tmp[f_idx].sort()
f = (faces_tmp[f_idx][0], faces_tmp[f_idx][1], faces_tmp[f_idx][2], faces_tmp[f_idx][3])
fid = 0
if f not in faces_map:
fid = len(faces_list)-1
faces_list.append(face_original)
faces_map[f] = fid
else:
fid = faces_map[f]
adj_pid = -1
if fid in face_poly_map:
adj_pid = face_poly_map[fid]
else:
face_poly_map[fid] = pid
#face2poly
idx_to_append = np.where(face2poly[fid] == -1)[0][0]
face2poly[fid][idx_to_append] = pid
#poly2face
idx_to_append = np.where(poly2face[pid] == -1)[0][0]
poly2face[pid][idx_to_append] = fid
if adj_pid != -1:
idx_to_append1 = np.where(poly2poly[pid] == -1)[0][0]
idx_to_append2 = np.where(poly2poly[adj_pid] == -1)[0][0]
poly2poly[pid][idx_to_append1] = adj_pid
poly2poly[adj_pid][idx_to_append2] = pid
for vid in polys[pid]:
#vtx2poly
if(vtx2poly[vid][0] == -1):
vtx2poly[vid][0] = pid
else:
vtx2poly[vid].append(pid)
faces = np.array(faces_list[1:])
face2poly = face2poly[:faces.shape[0]]
edges, vtx2vtx, vtx2edge, vtx2face, edges, edge2edge, edge2face, faces, face2edge, face2face = get_connectivity_info_volume_faces(num_vertices, faces)
for i in range(edges.shape[0]):
tmp_2 = L()
tmp_2.append(-1)
edge2poly.append(tmp_2)
for pid in range(polys.shape[0]):
adj_edges = []
for fid in poly2face[pid]:
for eid in face2edge[fid]:
adj_edges.append(eid)
unique = np.unique(np.array(adj_edges))
poly2edge[pid] = unique
for eid in unique:
if(edge2poly[eid][0] == -1):
edge2poly[eid][0] = pid
else:
edge2poly[eid].append(pid)
return faces, edges, vtx2vtx, vtx2edge, vtx2face, vtx2poly, edges, edge2edge, edge2face, edge2poly, faces, face2edge, face2face, face2poly, polys, poly2edge, poly2face, poly2poly
def compute_face_normals(vertices, faces, quad=False):
e1_v = vertices[faces][:,1] - vertices[faces][:,0]
e2_v = vertices[faces][:,2] - vertices[faces][:,1]
if quad:
e2_v = vertices[faces][:,2] - vertices[faces][:,0]
face_normals = np.cross(e1_v, e2_v)
norm = np.linalg.norm(face_normals, axis=1)
norm.shape = (-1,1)
return face_normals / norm
@njit(float64[:,::1](float64[:,::1], LT(LT(int64))), cache=True)
def compute_vertex_normals(face_normals, vtx2face):
mean = np.zeros((1, 3), dtype=np.float64)
vtx_normals = np.zeros((len(vtx2face),3), dtype=np.float64)
idx = 0
for v2f in vtx2face:
for v in v2f:
mean = mean+face_normals[v]
mean/=len(v2f)
vtx_normals[idx] = mean
mean-=mean
idx+=1
norm = np.sqrt(np.sum(vtx_normals**2, axis=1))
norm=np.reshape(norm, (-1,1))
return vtx_normals/norm
def _compute_three_vertex_normals(tri_soup):
np.errstate(divide='ignore',invalid='ignore')
tmp = tri_soup[0::3]
a = tri_soup[1::3] - tmp
b = tri_soup[2::3] - tmp
cross = np.cross(a,b)
face_normals = cross / np.linalg.norm(cross, axis=1, keepdims=True)
vtx_normals = np.repeat(face_normals, 3, axis=0)
return vtx_normals
|
StarcoderdataPython
|
6409843
|
<reponame>konung-yaropolk/pyABF
"""
This file lists the size of every structure in the structures file.
sectionSizes = {'HeaderV1': 1678, 'HeaderV2': 76, 'SectionMap': 216,
'ProtocolSection': 208, 'ADCSection': 82, 'DACSection': 132,
'EpochPerDACSection': 30, 'EpochSection': 4, 'TagSection': 64,
'StringsSection': 0, 'StringsIndexed': 0}
"""
import struct
import os
PATH_HERE = os.path.abspath(os.path.dirname(__file__))
PATH_SRC = os.path.abspath(PATH_HERE+"/../../src/")
with open(PATH_SRC+"/pyabf/structures.py") as f:
lines = f.readlines()
bytePositions = []
className = None
for line in lines:
line = line.strip()
if line.startswith("class") and line.endswith(":"):
className = line.replace("class ", "").replace(":", "").strip()
continue
if not className:
continue
if className == "StringsSection":
continue
if "readStruct(fb" in line:
if line.count(",")==2:
bytePos = int(line.split(",")[-1][:-1])
else:
bytePos = None
varName = line.split("=")[0].strip().replace("[i]","").replace("self.","")
structFormat = line.split(",")[1].strip().replace(")", "")
structFormat = eval(structFormat)
byteSize = struct.calcsize(structFormat)
print(className, varName, byteSize, line.count(","))
|
StarcoderdataPython
|
11385580
|
<gh_stars>0
# Generated by Django 2.2.24 on 2022-01-07 08:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hoodapp', '0023_auto_20220107_1111'),
]
operations = [
migrations.RenameField(
model_name='neighborhood',
old_name='admin',
new_name='user',
),
]
|
StarcoderdataPython
|
8155854
|
# script to make graph of connected components in a volume
import argparse
import logging
import pickle
from collections import defaultdict
from typing import Dict, Set, Tuple, Union
import cupy as cp
import cupyx.scipy.ndimage as cpnd
import h5py
import numpy as np
VolumeFile = Tuple[str, str]
ArrayTypes = Union[np.ndarray, cp.ndarray, h5py.Dataset, VolumeFile]
TILE_SIZE = np.array([500, 500, 500])
EROSION_STEPS = 10
FOREGROUND_THRESHOLD = 0.5
def read_h5(volume_file: str, data_label: str):
h5file = h5py.File(volume_file)
return h5file[data_label]
def get_unique(in_array: cp.ndarray):
"""
Find unique values in array. assume array is shape (x,y), and want to find all unique values over y (ie reduce
the y dimension)
:param cp.ndarray in_array: Input array of shape (x,y), which will be reduced over the y dimension
:returns: Array of unique values of in_array, shape (x, unique_y)
"""
sorted = cp.sort(in_array, axis=1)
new_values = (sorted[:, 1:] != sorted[:, :-1]).any(axis=0) # shape (y,)
# add first value as a new value
new_values_full = cp.concatenate([cp.array([1], dtype="bool"), new_values])
chosen_newvalues = sorted[:, new_values_full]
return chosen_newvalues
def process_tile(
tile_idx: Tuple[int, int, int],
h5array: h5py.Dataset,
assoc_map: defaultdict,
tile_edges: Dict,
tile_components: Dict,
):
"""
Find content for given tile. extend boundary past reference region in order to
allow accurate erosion result within the reference region. assume that erosion
uses a filter of size 3x3, which requires 1 pixel of surrounding region for each
erosion step
"""
tile_idx_arr = np.array(tile_idx)
reference_start = tile_idx_arr * TILE_SIZE
reference_max = reference_start + TILE_SIZE
extended_start = reference_start - EROSION_STEPS
extended_max = reference_max + EROSION_STEPS
# get extended region from data. to handle cases at the edges where the extended
# region is not populated by data, create a zero-filled array and then copy the
# defined region
extended_size = TILE_SIZE + 2 * EROSION_STEPS
extended_region = cp.zeros(extended_size, dtype=bool)
valid_start = np.maximum(extended_start, 0)
source_size = np.array(h5array.shape[1:])
valid_end = np.minimum(extended_max, source_size)
valid_data_raw = cp.array(
h5array[
0,
valid_start[0] : valid_end[0],
valid_start[1] : valid_end[1],
valid_start[2] : valid_end[2],
]
)
valid_data_bool = cp.greater_equal(valid_data_raw, FOREGROUND_THRESHOLD)
insert_start = np.maximum(-extended_start, 0)
insert_end = extended_size - np.maximum(extended_max - source_size, 0)
extended_region[
insert_start[0] : insert_end[0],
insert_start[1] : insert_end[1],
insert_start[2] : insert_end[2],
] = valid_data_bool
# produce eroded results
current_region = extended_region
erosion_results = [current_region]
for _ in range(EROSION_STEPS):
eroded_region = cpnd.binary_erosion(current_region)
erosion_results.append(eroded_region)
current_region = eroded_region
# find connected components for each erosion level
label_results = [
cpnd.label(erosion_result) for erosion_result in erosion_results
]
# find size and bounds of each component, and relationships between connected components in each level
tile_component_details = []
prev_label_array = None
for label_array, label_count in label_results:
level_component_details = {}
for label_num in range(1, label_count + 1):
value_mask = label_array == label_num
# find bounds
xvals, yvals, zvals = cp.where(value_mask)
bounds = cp.stack(
[
cp.min(xvals),
cp.max(xvals) + 1,
cp.min(yvals),
cp.max(yvals) + 1,
cp.min(zvals),
cp.max(zvals) + 1,
]
).get()
center = cp.array(
[
cp.mean(xvals),
cp.mean(yvals),
cp.mean(zvals),
]
).get()
size = int(cp.sum(value_mask))
# find parent as the component label in the previous erosion level. there should
# always be a unique parent component that covers all defined pixels for this component
# choose an arbitrary position within this region
if prev_label_array is None:
parent_component_num = None
else:
parent_component_num = prev_label_array[
xvals[0], yvals[0], zvals[0]
]
level_component_details[label_num] = (
bounds,
center,
size,
parent_component_num,
)
prev_label_array = label_array
tile_component_details.append(level_component_details)
tile_components[tile_idx] = tile_component_details
# find connections between tiles by comparing with preceding neighbours
for assoc in ["x", "y", "z"]:
if assoc == "x":
if tile_idx[0] == 0:
continue
prev_tile = tile_idx_arr - [1, 0, 0]
elif assoc == "y":
if tile_idx[:, 0] == 0:
continue
prev_tile = tile_idx_arr - [0, 1, 0]
elif assoc == "z":
if tile_idx[:, :, 0] == 0:
continue
prev_tile = tile_idx_arr - [0, 0, 1]
# get surfaces for matching previous tile, and remove from dict as it will no longer
# be needed
tile_pair = (prev_tile, tile_idx)
prev_surfaces = tile_edges.pop(tile_pair)
# level_associations = []
for level_num, ((label_array, label_num), prev_surface) in enumerate(
zip(label_results, prev_surfaces)
):
if assoc == "x":
this_surface = label_array[0, :, :]
elif assoc == "y":
this_surface = label_array[:, 0, :]
elif assoc == "z":
this_surface = label_array[:, :, 0]
joined_surfaces = cp.stack(
[prev_surface, this_surface]
) # shape (2, y, z)
joined_surfaces_flat = cp.reshape(joined_surfaces, (2, -1))
unique_pairs = get_unique(joined_surfaces_flat)
zero_mask = (unique_pairs == 0).any(axis=0)
nonzero_pairs = unique_pairs[
:, ~zero_mask
].T.get() # shape (unique_nonzero_vals, 2)
# find association pairs and record in bi-directional association map
for assoc_pair in nonzero_pairs:
# if (assoc_pair == 0).any():
# continue
prev_key = (prev_tile, level_num, int(assoc_pair[0]))
this_key = (tile_idx, level_num, int(assoc_pair[1]))
assoc_map[this_key].add(prev_key)
assoc_map[prev_key].add(this_key)
# level_associations.append(unique_pairs)
# # record associations
# component_associations[tile_pair] = level_associations
# record surfaces for following neighbours
neighbour_surfaces_x, neighbour_surfaces_y, neighbour_surfaces_z = (
[],
[],
[],
)
for label_array, label_num in label_results:
neighbour_surfaces_x = label_array[-1, :, :]
neighbour_surfaces_y = label_array[:, -1, :]
neighbour_surfaces_z = label_array[:, :, -1]
tile_edges[
(tile_idx, tuple(tile_idx_arr + [1, 0, 0]))
] = neighbour_surfaces_x
tile_edges[
(tile_idx, tuple(tile_idx_arr + [0, 1, 0]))
] = neighbour_surfaces_y
tile_edges[
(tile_idx, tuple(tile_idx_arr + [0, 0, 1]))
] = neighbour_surfaces_z
def find_volume_components(
volume_file: str,
outfile: str,
data_label: str,
):
"""
Find connected components at various erosion levels in the given volume
"""
# open file as HDF5
logging.info(
"Opening volume file %s with data label %s" % (volume_file, data_label)
)
h5array = read_h5(volume_file, data_label)
# initialise tile association maps
# component_associations maps from a tuple (prev_tile_idx, next_tile_idx) to a list over
# erosion levels, each an array of shape (2, connection_pairs) representing components that
# are connected between tiles.
# assoc_map maps from a tuple (tile_idx, level, id) to a set of connected tiles
# (other_tile_idx, level, other_id), as a bi-directional map of connections
# tile_edges is a map from a tuple (prev_tile_idx, next_tile_idx) to a list over
# erosion levels, each an array of shape (tile_size, tile_size) representing the surface of
# tile prev_tile_idx that adjoins tile next_tile_idx
# tile_components is a map from tile_idx to a list over erosion levels, each a dict mapping
# from each label number to a tuple of (bounds, center, size, parent_num). bounds and center
# are defined within the tile, size is the number of covered voxels within the tile, and
# parent_num is the component number in the previous erosion level within the tile (or None if
# erosion level is zero).
# component_associations = {}
assoc_map = defaultdict(set)
tile_edges = {}
tile_components = {}
# step over individual tiles and collect properties
dims = np.array(h5array.shape[1:])
tile_steps = np.ceil(dims / TILE_SIZE).astype("int")
for tile_x in range(tile_steps[0]):
for tile_y in range(tile_steps[1]):
for tile_z in range(tile_steps[1]):
tile_idx = (tile_x, tile_y, tile_z)
# process tile
process_tile(
tile_idx,
h5array,
assoc_map,
tile_edges,
tile_components,
)
# combine results
find_combined_components(tile_components, assoc_map, tile_steps, outfile)
def find_combined_components(
tile_components: Dict,
assoc_map: defaultdict,
tile_steps: np.ndarray,
outfile: str,
):
"""
Given a dictionary representing components within individual tiles, and associations between
components in different tiles, find global components by combining associated components from
different tiles and defining based on merged properties (eg size, center) in global coordinates.
Save results in output directory
:param Dict tile_components: Map from tile_idx to a list over erosion levels, each a dict mapping
from each label number to a tuple of (bounds, center, size, parent_num). bounds and center
are defined within the tile, size is the number of covered voxels within the tile, and
parent_num is the component number in the previous erosion level within the tile (or None if
erosion level is zero).
:param Dict component_associations: Map from a tuple (prev_tile_idx, next_tile_idx) to a list over
erosion levels, each an array of shape (2, connection_pairs) representing components that
are connected between tiles.
:param np.ndarray tile_steps: Number of tiles, shape (x, y, z)
:param str outfile: Output file to write global component results (as pickle)
"""
# global_components is a list over erosion levels, each a dict mapping from global component id
# to a tuple of (bounds, center, size, global_parent_num)
global_components = [{}] * (EROSION_STEPS + 1)
# global_id_map is a map from a tuple of (tile_idx, erosion_level, local_id) to global_id
global_id_map = {}
# first make bi-directional graph of connected components between tiles, so that when one
# component is examined all connected components in neighbouring tiles can also be identified
# component_connections is a map from a tuple (tile_idx, erosion_level, id) to a set of tuples
# (tile_idx, erosion_level, id) of all connected neighbours. this is defined bi-directionally
# define next available global component ids for each level. start ids at 1 (0 is background)
next_global_ids = [1] * (EROSION_STEPS + 1)
# step over tiles and local components, and map each to global components, including merging
for tile_x in tile_steps[0]:
for tile_y in tile_steps[1]:
for tile_z in tile_steps[2]:
tile_id = (tile_x, tile_y, tile_z)
tile_level_components = tile_components[tile_id]
for level_num, level_components in enumerate(
tile_level_components
):
level_global_components = global_components[level_num]
for (
label_number,
local_component_details,
) in level_components.items():
# check if this component is already associated with a global component
component_key = (tile_id, level_num, label_number)
if component_key in global_id_map:
# tile component is associated with an existing global component, merge into
# the global component
global_id = global_id_map[component_key]
merged_global_component = merge_components(
local_component_details,
level_global_components[global_id],
)
# replace with merged global component
level_global_components[
global_id
] = merged_global_component
else:
# this component is not associated with an existing global component.
# create a new global component and mark all connected components as
# associated
# find global component of parent of this component in the previous
# erosion level. assume this is defined.
global_parent_id = None
(
bounds,
center,
size,
local_parent,
) = local_component_details
if local_parent is not None:
parent_local_key = (
tile_id,
level_num - 1,
local_parent,
)
global_parent_id = global_id_map[
parent_local_key
]
global_component_details = (
bounds,
center,
size,
global_parent_id,
)
global_id = next_global_ids[level_num]
next_global_ids[level_num] += 1
level_global_components[
global_id
] = global_component_details
# find associated components and mark as associated with this global
# component
found_keys = set()
find_all_associations(
assoc_map, component_key, found_keys
)
for assoc_key in found_keys:
global_id_map[assoc_key] = global_id
# level components
# erosion level
# z
# y
# x
# write out results to file
components_per_level = [
len(level_components) for level_components in global_components
]
logging.info(
"Found components per erosion level: %s" % components_per_level
)
logging.info("Writing output to %s")
write_data = global_components
with open(outfile, "wb") as out_handle:
pickle.dump(write_data, out_handle)
def merge_components(local_component, target_component):
"""
Find resulting component from merging the first (local) component into the second. The resulting
component will maintain the parent identifier of the target component.
"""
local_bounds, local_center, local_size, local_parent = local_component
target_bounds, target_center, target_size, target_parent = target_component
merged_bounds = [
min(local_bounds[0], target_bounds[0]),
max(local_bounds[1], target_bounds[1]),
min(local_bounds[2], target_bounds[2]),
max(local_bounds[3], target_bounds[3]),
min(local_bounds[4], target_bounds[4]),
max(local_bounds[5], target_bounds[5]),
]
merged_size = local_size + target_size
# use weighted averaging to find center. the center point is not guaranteed to occur at a
# position containing the component (eg if it is "C" shape)
merged_center = (
local_center * local_size + target_center * target_size
) / merged_size
return merged_bounds, merged_center, merged_size, target_parent
def find_all_associations(
assoc_map: defaultdict, component_key: Tuple, found_keys: Set
):
"""
Find all associated components from the given component key and populate found_keys. Recursive
function.
"""
out_assocs = assoc_map[component_key]
for assoc in out_assocs:
if assoc not in found_keys:
found_keys.add(assoc)
find_all_associations(assoc_map, assoc, found_keys)
def init_logging():
""" Initialise logging """
root = logging.getLogger()
root.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
root.addHandler(handler)
def main():
init_logging()
parser = argparse.ArgumentParser()
parser.add_argument(
"-v", "--volume_file", help="Volume file to read (HDF5)", required=True
)
parser.add_argument(
"-o",
"--output",
help="Output file",
default="graph_output.pkl",
required=False,
)
parser.add_argument(
"-l",
"--data_label",
help="Data array label in volume file",
default="data",
required=False,
)
args = parser.parse_args()
logging.info("Find graph of components in volume")
find_volume_components(
args.volume_file,
args.output,
args.data_label,
)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1819953
|
"""
Max-p regions algorithm
Source: <NAME>, <NAME>, and <NAME> (2020) "Efficient
regionalization for spatially explicit neighborhood delineation." International
Journal of Geographical Information Science. Accepted 2020-04-12.
"""
from ..BaseClass import BaseSpOptHeuristicSolver
from .base import (w_to_g, move_ok, ok_moves, region_neighbors, _centroid,
_closest, _seeds, is_neighbor)
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
import pandas as pd
import geopandas as gp
import time
import numpy as np
from copy import deepcopy
from scipy.sparse.csgraph import connected_components
ITERCONSTRUCT=999
ITERSA=10
def maxp(gdf, w, attrs_name, threshold_name, threshold, top_n, max_iterations_construction=ITERCONSTRUCT,
max_iterations_sa=ITERSA, verbose=True):
"""
Arguments
---------
gdf: geodataframe
w: pysal W
attrs_name: list of strings for attribute names (cols of gdf)
threshold_name: string (name of threshold variable)
threshold: numeric
value for threshold
top_n: int
Max number of candidate regions for enclave assignment
max_iterations_construction: int
max number of iterations for construction phase
max_iterations_SA: int
max number of iterations for customized simulated annealing
verbose: boolean
True
Returns
-------
max_p: int
number of regions
labels: array
region ids for observations
"""
attr = gdf[attrs_name].values
threshold_array = gdf[threshold_name].values
distance_matrix = squareform(pdist(attr, metric='cityblock'))
n,k = attr.shape
arr = np.arange(n)
max_p, rl_list = construction_phase(arr, attr, threshold_array,
distance_matrix, w, threshold, top_n,
max_iterations_construction)
if verbose:
print("max_p: ", max_p)
print('number of good partitions:', len(rl_list))
alpha = 0.998
tabuLength = 10
max_no_move = attr.size
best_obj_value = np.inf
best_label = None
best_fn = None
best_sa_time = np.inf
for irl, rl in enumerate(rl_list):
label, regionList, regionSpatialAttr = rl
if verbose:
print(irl)
for saiter in range(max_iterations_sa):
sa_start_time = time.time()
finalLabel, finalRegionList, finalRegionSpatialAttr = performSA(
label, regionList, regionSpatialAttr, threshold_array,
w, distance_matrix, threshold, alpha, tabuLength, max_no_move)
sa_end_time = time.time()
totalWithinRegionDistance = calculateWithinRegionDistance(
finalRegionList, distance_matrix)
if verbose:
print("totalWithinRegionDistance after SA: ")
print(totalWithinRegionDistance)
if totalWithinRegionDistance < best_obj_value:
best_obj_value = totalWithinRegionDistance
best_label = finalLabel
best_fn = irl
best_sa_time = sa_end_time - sa_start_time
if verbose:
print("best objective value:")
print(best_obj_value)
return max_p, best_label
def construction_phase(arr,
attr,
threshold_array,
distance_matrix,
weight,
spatialThre,
random_assign_choice,
max_it=999):
labels_list = []
pv_list = []
max_p = 0
maxp_labels = None
maxp_regionList = None
maxp_regionSpatialAttr = None
for _ in range(max_it):
labels = [0] * len(threshold_array)
C = 0
regionSpatialAttr = {}
enclave = []
regionList = {}
np.random.shuffle(arr)
labeledID = []
for arr_index in range(0, len(threshold_array)):
P = arr[arr_index]
if not (labels[P] == 0):
continue
NeighborPolys = deepcopy(weight.neighbors[P])
if len(NeighborPolys) < 0:
labels[P] = -1
else:
C += 1
labeledID, spatialAttrTotal = growClusterForPoly(
labels, threshold_array, P, NeighborPolys, C,
weight, spatialThre)
print('spatialAttrTotal, LabelID ', (spatialAttrTotal, labeledID))
if spatialAttrTotal < spatialThre:
enclave.extend(labeledID)
else:
regionList[C] = labeledID
regionSpatialAttr[C] = spatialAttrTotal
num_regions = len(regionList)
for i, l in enumerate(labels):
if l == -1:
enclave.append(i)
if num_regions < max_p:
continue
else:
max_p = num_regions
maxp_labels, maxp_regionList, maxp_regionSpatialAttr = assignEnclave(
enclave,
labels,
regionList,
regionSpatialAttr,
threshold_array,
weight,
distance_matrix,
random_assign=random_assign_choice)
pv_list.append(max_p)
labels_list.append(
[maxp_labels, maxp_regionList, maxp_regionSpatialAttr])
realLabelsList = []
realmaxpv = max(pv_list)
for ipv, pv in enumerate(pv_list):
if pv == realmaxpv:
realLabelsList.append(labels_list[ipv])
return [realmaxpv, realLabelsList]
def growClusterForPoly(labels, threshold_array, P, NeighborPolys, C,
weight, spatialThre):
labels[P] = C
labeledID = [P]
spatialAttrTotal = threshold_array[P]
i = 0
while i < len(NeighborPolys):
if spatialAttrTotal >= spatialThre:
break
Pn = NeighborPolys[i]
if labels[Pn] == 0:
labels[Pn] = C
labeledID.append(Pn)
spatialAttrTotal += threshold_array[Pn]
if spatialAttrTotal < spatialThre:
PnNeighborPolys = weight.neighbors[Pn]
for pnn in PnNeighborPolys:
if not pnn in NeighborPolys:
NeighborPolys.append(pnn)
i += 1
return labeledID, spatialAttrTotal
def assignEnclave(enclave,
labels,
regionList,
regionSpatialAttr,
threshold_array,
weight,
distance_matrix,
random_assign=1):
enclave_index = 0
while len(enclave) > 0:
ec = enclave[enclave_index]
ecNeighbors = weight.neighbors[ec]
minDistance = np.Inf
assignedRegion = 0
ecNeighborsList = []
ecTopNeighborsList = []
for ecn in ecNeighbors:
if ecn in enclave:
continue
rm = np.array(regionList[labels[ecn]])
totalDistance = distance_matrix[ec, rm].sum()
ecNeighborsList.append((ecn, totalDistance))
ecNeighborsList = sorted(ecNeighborsList, key=lambda tup: tup[1])
top_num = min([len(ecNeighborsList), random_assign])
if top_num > 0:
ecn_index = np.random.randint(top_num)
assignedRegion = labels[ecNeighborsList[ecn_index][0]]
if assignedRegion == 0:
enclave_index += 1
else:
labels[ec] = assignedRegion
regionList[assignedRegion].append(ec)
regionSpatialAttr[assignedRegion] += threshold_array[ec]
del enclave[enclave_index]
enclave_index = 0
return [
deepcopy(labels),
deepcopy(regionList),
deepcopy(regionSpatialAttr)
]
def calculateWithinRegionDistance(regionList, distance_matrix):
totalWithinRegionDistance = 0
for k, v in regionList.items():
nv = np.array(v)
regionDistance = distance_matrix[nv, :][:, nv].sum() / 2
totalWithinRegionDistance += regionDistance
return totalWithinRegionDistance
def pickMoveArea(labels, regionLists, regionSpatialAttrs,
threshold_array, weight, distance_matrix, threshold):
potentialAreas = []
labels_array = np.array(labels)
for k, v in regionSpatialAttrs.items():
rla = np.array(regionLists[k])
rasa = threshold_array[rla]
lostSA = v - rasa
pas_indices = np.where(lostSA > threshold)[0]
if pas_indices.size > 0:
for pasi in pas_indices:
leftAreas = np.delete(rla, pasi)
ws = weight.sparse
cc = connected_components(ws[leftAreas, :][:, leftAreas])
if cc[0] == 1:
potentialAreas.append(rla[pasi])
else:
continue
return potentialAreas
def checkMove(poa, labels, regionLists, threshold_array, weight,
distance_matrix, threshold):
poaNeighbor = weight.neighbors[poa]
donorRegion = labels[poa]
rm = np.array(regionLists[donorRegion])
lostDistance = distance_matrix[poa, rm].sum()
potentialMove = None
minAddedDistance = np.Inf
for poan in poaNeighbor:
recipientRegion = labels[poan]
if donorRegion != recipientRegion:
rm = np.array(regionLists[recipientRegion])
addedDistance = distance_matrix[poa, rm].sum()
if addedDistance < minAddedDistance:
minAddedDistance = addedDistance
potentialMove = (poa, donorRegion, recipientRegion)
return [lostDistance, minAddedDistance, potentialMove]
def performSA(initLabels, initRegionList, initRegionSpatialAttr,
threshold_array, weight, distance_matrix, threshold,
alpha, tabuLength, max_no_move):
t = 1
ni_move_ct = 0
make_move_flag = False
tabuList = []
potentialAreas = []
labels = deepcopy(initLabels)
regionLists = deepcopy(initRegionList)
regionSpatialAttrs = deepcopy(initRegionSpatialAttr)
while ni_move_ct <= max_no_move:
if len(potentialAreas) == 0:
potentialAreas = pickMoveArea(labels, regionLists,
regionSpatialAttrs,
threshold_array, weight,
distance_matrix, threshold)
if len(potentialAreas) == 0:
break
poa = potentialAreas[np.random.randint(len(potentialAreas))]
lostDistance, minAddedDistance, potentialMove = checkMove(
poa, labels, regionLists, threshold_array, weight,
distance_matrix, threshold)
if potentialMove == None:
potentialAreas.remove(poa)
continue
diff = lostDistance - minAddedDistance
donorRegion = potentialMove[1]
recipientRegion = potentialMove[2]
if diff > 0:
make_move_flag = True
if (poa, recipientRegion, donorRegion) not in tabuList:
if len(tabuList) == tabuLength:
tabuList.pop(0)
tabuList.append((poa, recipientRegion, donorRegion))
ni_move_ct = 0
else:
ni_move_ct += 1
prob = np.exp(diff / t)
if prob > np.random.random() and potentialMove not in tabuList:
make_move_flag = True
else:
make_move_flag = False
potentialAreas.remove(poa)
if make_move_flag:
labels[poa] = recipientRegion
regionLists[donorRegion].remove(poa)
regionLists[recipientRegion].append(poa)
regionSpatialAttrs[donorRegion] -= threshold_array[poa]
regionSpatialAttrs[recipientRegion] += threshold_array[
poa]
impactedAreas = []
for pa in potentialAreas:
if labels[pa] == recipientRegion or labels[pa] == donorRegion:
impactedAreas.append(pa)
for pa in impactedAreas:
potentialAreas.remove(pa)
t = t * alpha
return [labels, regionLists, regionSpatialAttrs]
class MaxPHeuristic(BaseSpOptHeuristicSolver):
def __init__(self, gdf, w, attrs_name, threshold_name, threshold, top_n, max_iterations_construction=99, max_iterations_sa=ITERSA):
self.gdf = gdf
self.w = w
self.attrs_name = attrs_name
self.threshold_name = threshold_name
self.threshold = threshold
self.top_n = top_n
self.max_iterations_construction = max_iterations_construction
self.max_iterations_sa = max_iterations_sa
def solve(self):
max_p, label = maxp(self.gdf, self.w, self.attrs_name, self.threshold_name,
self.threshold, self.top_n, self.max_iterations_construction, self.max_iterations_sa)
self.labels_ = label
self.p = max_p
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.