id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
3314752
|
import math
import geopy.distance
class CategorySimilarityNaive(object):
""" Similarity measure for categorical attributes defined manually
"""
def compute_similarity(self, col, val1, val2, aggr_col):
# print(col, val1, val2)
if col not in self.cate_cols:
return 0
if col in self.vector_dict:
if val1 not in self.vector_dict[col] or val2 not in self.vector_dict[col]:
return 0
if col == 'community_area':
dist_km = geopy.distance.vincenty(self.vector_dict[col][val1], self.vector_dict[col][val2]).km
# print(val1, val2, 1.0 /
# math.exp(math.pow(dist_km,0.75)))
sim = 7.0 / math.exp(math.pow(dist_km,0.75))
if sim > 1:
sim = 1.0
return sim
dist = math.sqrt(sum(map(
lambda x:(x[0]-x[1])*(x[0]-x[1]),
zip(self.vector_dict[col][val1], self.vector_dict[col][val2]
))))
return 1.0 / (1.0+dist)
if val1 == val2:
return 1.0
else:
return 0.0
def __init__(self, cur, table_name, embedding_table_list=[]):
type_query = '''SELECT column_name, data_type
FROM information_schema.columns
WHERE table_name = '{}';'''.format(table_name)
cur.execute(type_query)
res = cur.fetchall()
print('Similarity Naive: ', res)
self.cate_cols = {}
self.vector_dict = {}
for (col, dt) in res:
if (dt == 'boolean' or dt.find('character') != -1) and (col != 'year' or len(embedding_table_list) == 0):
self.cate_cols[col] = True
if table_name.startswith('synthetic'):
self.cate_cols['beat'] = True
self.cate_cols['primary_type'] = True
self.cate_cols['community_area'] = True
self.cate_cols['district'] = True
self.cate_cols['description'] = True
self.cate_cols['location_description'] = True
self.cate_cols['ward'] = True
for (col, embedding_table_name) in embedding_table_list:
self.vector_dict[col] = {}
read_query = '''SELECT * FROM {} ;'''.format(embedding_table_name)
cur.execute(read_query)
res = cur.fetchall()
for (x, lat, log) in res:
self.vector_dict[col][x] = (lat, log)
def is_categorical(self, col):
return col in self.cate_cols
|
StarcoderdataPython
|
11322399
|
<gh_stars>1-10
import pandas as pd
import numpy as np
import s3fs
def preprocess(s3_in_url,
s3_out_bucket,
s3_out_prefix,
delimiter=","):
"""Preprocesses data based on business logic
- Reads delimited file passed as s3_url and preprocess data by filtering
long tail in the customer ratings data i.e. keep customers who have rated 5
or more videos, and videos that have been rated by 9+ customers
- Preprocessed data is then written to output
Args:
s3_in_url:
s3 url to the delimited file to be processed
e.g. s3://amazon-reviews-pds/tsv/reviews.tsv.gz
s3_out_bucket:
s3 bucket where preprocessed data will be staged
e.g. mybucket
s3_out_prefix:
s3 url prefix to stage preprocessed data to use later in the pipeline
e.g. amazon-reviews-pds/preprocess/
delimiter:
delimiter to be used for parsing the file. Defaults to "," if none
provided
Returns:
status of preprocessed data
Raises:
IOError: An error occurred accessing the s3 file
"""
try:
print("preprocessing data from {}".format(s3_in_url))
# read s3 url into pandas dataframe
# pandas internally uses s3fs to read s3 file directory
df = pd.read_csv(s3_in_url, delimiter, error_bad_lines=False)
# limit dataframe to customer_id, product_id, and star_rating
# `product_title` will be useful validating recommendations
df = df[['customer_id', 'product_id', 'star_rating', 'product_title']]
# clean out the long tail because most people haven't seen most videos,
# and people rate fewer videos than they actually watch
customers = df['customer_id'].value_counts()
products = df['product_id'].value_counts()
# based on data exploration only about 5% of customers have rated 5 or
# more videos, and only 25% of videos have been rated by 9+ customers
customers = customers[customers >= 5]
products = products[products >= 10]
print("# of rows before the long tail = {:10d}".format(df.shape[0]))
reduced_df = df \
.merge(pd.DataFrame({'customer_id': customers.index})) \
.merge(pd.DataFrame({'product_id': products.index}))
print("# of rows after the long tail = {:10d}".format(
reduced_df.shape[0]))
reduced_df = reduced_df.drop_duplicates(['customer_id', 'product_id'])
print("# of rows after removing duplicates = {:10d}".format(
reduced_df.shape[0]))
# recreate customer and product lists since there are customers with
# more than 5 reviews, but all of their reviews are on products with
# less than 5 reviews (and vice versa)
customers = reduced_df['customer_id'].value_counts()
products = reduced_df['product_id'].value_counts()
# sequentially index each user and item to hold the sparse format where
# the indices indicate the row and column in our ratings matrix
customer_index = pd.DataFrame({
'customer_id': customers.index,
'customer': np.arange(customers.shape[0])})
product_index = pd.DataFrame({
'product_id': products.index,
'product': np.arange(products.shape[0])})
reduced_df = reduced_df \
.merge(customer_index) \
.merge(product_index)
nb_customer = reduced_df['customer'].max() + 1
nb_products = reduced_df['product'].max() + 1
feature_dim = nb_customer + nb_products
print(nb_customer, nb_products, feature_dim)
product_df = reduced_df[['customer', 'product', 'star_rating']]
# split into train, validation and test data sets
train_df, validate_df, test_df = np.split(
product_df.sample(frac=1),
[int(.6*len(product_df)), int(.8*len(product_df))]
)
print("# of rows train data set = {:10d}".format(
train_df.shape[0]))
print("# of rows validation data set = {:10d}".format(
validate_df.shape[0]))
print("# of rows test data set = {:10d}".format(
test_df.shape[0]))
# select columns required for training the model
# excluding columns "customer_id", "product_id", "product_title" to
# keep files small
cols = ["customer", "product", "star_rating"]
train_df = train_df[cols]
validate_df = validate_df[cols]
test_df = test_df[cols]
# write output to s3 as delimited file
fs = s3fs.S3FileSystem(anon=False)
s3_out_prefix = s3_out_prefix[:-1] \
if s3_out_prefix[-1] == "/" else s3_out_prefix
s3_out_train = "s3://{}/{}/{}".format(
s3_out_bucket, s3_out_prefix, "train/train.csv")
print("writing training data to {}".format(s3_out_train))
with fs.open(s3_out_train, "wb") as f:
train_df.to_csv(f, sep=str(','), index=False)
s3_out_validate = "s3://{}/{}/{}".format(
s3_out_bucket, s3_out_prefix, "validate/validate.csv")
print("writing test data to {}".format(s3_out_validate))
with fs.open(s3_out_validate, "wb") as f:
validate_df.to_csv(f, sep=str(','), index=False)
s3_out_test = "s3://{}/{}/{}".format(
s3_out_bucket, s3_out_prefix, "test/test.csv")
print("writing test data to {}".format(s3_out_test))
with fs.open(s3_out_test, "wb") as f:
test_df.to_csv(f, sep=str(','), index=False)
print("preprocessing completed")
return "SUCCESS"
except Exception as e:
raise e
|
StarcoderdataPython
|
25826
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = Flask(__name__)
app.config[
'SQLALCHEMY_DATABASE_URI'] = 'postgres://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
class UserData(db.Model):
__tablename__ = 'UserData'
Id = db.Column(db.Integer, primary_key=True)
Name = db.Column(db.String(64))
Description = db.Column(db.String(256))
CreateDate = db.Column(db.DateTime)
def __init__(self
, Name
, Description
, CreateDate
):
self.Name = Name
self.Description = Description
self.CreateDate = CreateDate
if __name__ == '__main__':
manager.run()
|
StarcoderdataPython
|
5129644
|
<reponame>EUMSSI/EUMSSI-platform
#!/usr/bin/env python
import pymongo
import time
import datetime
from eumssi_converter import EumssiConverter
def transf_date(x):
if x.__class__==datetime.datetime:
return x
else:
try:
return datetime.datetime.strptime(x,'%Y-%m-%dT%H:%M:%S.000Z') #2010-09-16T19:05:54.000Z for example
except ValueError:
return datetime.datetime.strptime(x[:9], "%Y-%m-%d")
def transf_lang(lang):
#by default, we only look for en videos, however,
return "en"
'''
mapping in the form [<original_fieldname>, <eumssi_fieldname>, <transform_function>, [<available_data>,..]}
'''
youtube_video_map = [
['id', 'youtubeVideoID', None, []],
['uploaded', 'datePublished', transf_date, []],
['language', 'inLanguage', transf_lang, []],
['content.5', 'mediaurl', None, ['video']],
['content.1', 'rtspLow', None, ['video']],
['content.6', 'rtspHigh', None, ['video']],
['description', 'text', None, ['text']],
['title', 'headline', None, ['text']],
['duration', 'duration', None, ['duration']],
['rating', 'rating', None, ['duration']],
['ratingCount', 'numberOfRating', None, ['rating']],
['uploader', 'author', None, []],
['commentCount', 'numberOfComments', None, []],
['likeCount', 'numberOfLikes', None, []],
['favoriteCount', 'numberOfFavorites', None, []],
['viewCount', 'numberOfViews', None, []],
['category', 'category', None, []]
]
def main():
conv = EumssiConverter('gdata-api-v102014',youtube_video_map)
conv.run()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
12827704
|
import datetime
import scrapy
from scrapy.loader import ItemLoader
from itemloaders.processors import MapCompose, TakeFirst
from exchanges.twse.items import BranchSettlementItem
from exchanges.twse.handlers import StockBranchHandler as Handler
class BranchSettlementSpider(scrapy.Spider):
name = 'twse_branch_settlement'
allowed_domains = ['bsr.twse.com.tw']
date = datetime.date.today().strftime("%Y%m%d")
def __init__(self, *args, **kwargs):
super(BranchSettlementSpider, self).__init__(*args, **kwargs)
self.processed = self.total = []
def start_requests(self):
self.logger.info(f'Parsing date: {self.date}')
self.total = Handler.load_symbols()
if self.total:
for symbol in self.total:
req = Handler.new_request(symbol, self.parse, self.on_error)
yield scrapy.Request(**req)
else:
req = Handler.stocks_request(self.date, self.parse_stocks, None)
yield scrapy.Request(**req)
def parse_stocks(self, response):
self.total = Handler.get_symbols(response)
for symbol in self.total:
req = Handler.new_request(symbol, self.parse, self.on_error)
yield scrapy.Request(**req)
def parse(self, response):
if Handler.check_download_link(response):
yield scrapy.Request(url=Handler.content_url, meta=response.meta, encoding='cp950',
callback=self.parse_csv, errback=self.on_error, dont_filter=True)
else:
response.meta['form'] = Handler.new_form(response)
yield scrapy.Request(url=Handler.get_img_url(response), meta=response.meta,
callback=self.parse_img, errback=self.on_error, dont_filter=True)
def parse_img(self, response):
form = response.meta['form']
form = Handler.update_form(response, form)
yield scrapy.FormRequest(url=Handler.menu_url, meta=response.meta, formdata=form,
callback=self.parse, errback=self.on_error, dont_filter=True)
def parse_csv(self, response):
rows = response.body_as_unicode().split('\n')
rows = [row for row in rows if row.count(',') == 10 and ('券商' not in row)]
for row in rows:
row = row.split(',')
yield self.parse_raw(response.meta['symbol'], row[1:5])
yield self.parse_raw(response.meta['symbol'], row[7:])
self.processed.append(response.meta['symbol'])
self.logger.info(f"({len(self.processed)}/{len(self.total)}) {response.meta['symbol']} [{len(rows)} rows]")
def parse_raw(self, symbol, raw):
terms = BranchSettlementItem.Meta.fields
loader = ItemLoader(item=BranchSettlementItem())
loader.default_input_processor = MapCompose(str, str.strip)
loader.default_output_processor = TakeFirst()
loader.add_value('date', self.date)
loader.add_value('code', symbol)
for idx, field in enumerate(terms):
loader.add_value(field, raw[idx])
return loader.load_item()
def on_error(self, failure):
symbol = failure.request.meta['symbol']
req = Handler.new_request(symbol, self.parse, self.on_error)
yield scrapy.Request(**req)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super().from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signal=scrapy.signals.spider_closed)
return spider
def spider_closed(self, spider):
least = set(self.total) - set(self.processed)
self.logger.info(f"Write {len(least)} symbol cache")
Handler.write_symbols(least)
|
StarcoderdataPython
|
3222011
|
import os
from flask import Flask
from flask import request
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_bootstrap import Bootstrap
from flask_pagedown import PageDown
from flask_uploads import UploadSet, IMAGES, configure_uploads
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
lm = LoginManager(app)
bootstrap = Bootstrap(app)
pagedown = PageDown(app)
avatars = UploadSet('avatars', IMAGES)
configure_uploads(app, avatars)
from app.main import main, auth, user, book, comment, log
from app.api import api_bp
for blueprint in [main, auth, user, book, comment, log, api_bp]:
app.register_blueprint(blueprint)
from app import models
exists_db = os.path.isfile(app.config['DATABASE'])
if not exists_db:
from . import db_fill
@app.route('/security-test')
def token_test():
my_str = '<pre>{}</pre>'.format(request.headers)
return my_str
|
StarcoderdataPython
|
9747546
|
<reponame>karenang/ivle-bot
from . import api
class Announcement():
# Announcement.Announcements
def announcements(self, courseId, duration=0, titleOnly=False, auth=True):
params = {'CourseID': courseId, 'Duration': duration, 'TitleOnly': titleOnly}
return api.call('Announcements', params, auth)
# Announcement.Announcements_Unread
def announcements_unread(self, titleOnly=False, auth=True):
params = {'TitleOnly': titleOnly}
return api.call('Announcements_Unread', params, auth)
# Announcement.Announcements_AddLog_JSON
def announcements_add_log(self, annEventId, auth=True):
params = {'AnnEventID': annEventId}
return api.call('Announcement_AddLog_JSON', params, auth, 'post')
# Announcement.Announcements_Add_JSON
def announcements_add(self, courseId, annTitle, annMessage, sendEmail, auth=True):
params = {'CourseID': courseId, 'AddTitle': annTitle, 'AnnMessage': annMessage, 'SendEmail': sendEmail}
return api.call('Announcements_Add_JSON', params, auth, 'post')
|
StarcoderdataPython
|
288182
|
<filename>type4py/preprocess.py<gh_stars>10-100
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from type4py import logger, AVAILABLE_TYPES_NUMBER, MAX_PARAM_TYPE_DEPTH
from libsa4py.merge import merge_jsons_to_dict, create_dataframe_fns, create_dataframe_vars
from libsa4py.cst_transformers import ParametricTypeDepthReducer
from libsa4py.cst_lenient_parser import lenient_parse_module
from libsa4py.utils import list_files
from typing import Tuple
from ast import literal_eval
from collections import Counter
from os.path import exists, join
from tqdm import tqdm
import regex
import os
import pickle
import pandas as pd
import numpy as np
logger.name = __name__
tqdm.pandas()
# Precompile often used regex
first_cap_regex = regex.compile('(.)([A-Z][a-z]+)')
all_cap_regex = regex.compile('([a-z0-9])([A-Z])')
sub_regex = r'typing\.|typing_extensions\.|t\.|builtins\.|collections\.'
def make_types_consistent(df_all: pd.DataFrame, df_vars: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Removes typing module from type annotations
"""
def remove_quote_types(t: str):
s = regex.search(r'^\'(.+)\'$', t)
if bool(s):
return s.group(1)
else:
#print(t)
return t
df_all['return_type'] = df_all['return_type'].progress_apply(lambda x: regex.sub(sub_regex, "", str(x)) if x else x)
df_all['arg_types'] = df_all['arg_types'].progress_apply(lambda x: str([regex.sub(sub_regex, "", t) \
if t else t for t in literal_eval(x)]))
df_all['return_type'] = df_all['return_type'].progress_apply(remove_quote_types)
df_all['arg_types'] = df_all['arg_types'].progress_apply(lambda x: str([remove_quote_types(t) if t else t for t in literal_eval(x)]))
df_vars['var_type'] = df_vars['var_type'].progress_apply(lambda x: regex.sub(sub_regex, "", str(x)))
df_vars['var_type'] = df_vars['var_type'].progress_apply(remove_quote_types)
return df_all, df_vars
def resolve_type_aliasing(df_param: pd.DataFrame, df_ret: pd.DataFrame,
df_vars: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Resolves type aliasing and mappings. e.g. `[]` -> `list`
"""
import libcst as cst
# Problematic patterns: (?<=.*)Tuple\[Any, *?.*?\](?<=.*)
type_aliases = {'(?<=.*)any(?<=.*)|(?<=.*)unknown(?<=.*)': 'Any',
'^{}$|^Dict$|^Dict\[\]$|(?<=.*)Dict\[Any, *?Any\](?=.*)|^Dict\[unknown, *Any\]$': 'dict',
'^Set$|(?<=.*)Set\[\](?<=.*)|^Set\[Any\]$': 'set',
'^Tuple$|(?<=.*)Tuple\[\](?<=.*)|^Tuple\[Any\]$|(?<=.*)Tuple\[Any, *?\.\.\.\](?=.*)|^Tuple\[unknown, *?unknown\]$|^Tuple\[unknown, *?Any\]$|(?<=.*)tuple\[\](?<=.*)': 'tuple',
'^Tuple\[(.+), *?\.\.\.\]$': r'Tuple[\1]',
'\\bText\\b': 'str',
'^\[\]$|(?<=.*)List\[\](?<=.*)|^List\[Any\]$|^List$': 'list',
'^\[{}\]$': 'List[dict]',
'(?<=.*)Literal\[\'.*?\'\](?=.*)': 'Literal',
'(?<=.*)Literal\[\d+\](?=.*)': 'Literal', # Maybe int?!
'^Callable\[\.\.\., *?Any\]$|^Callable\[\[Any\], *?Any\]$|^Callable[[Named(x, Any)], Any]$': 'Callable',
'^Iterator[Any]$': 'Iterator',
'^OrderedDict[Any, *?Any]$': 'OrderedDict',
'^Counter[Any]$': 'Counter',
'(?<=.*)Match[Any](?<=.*)': 'Match'}
def resolve_type_alias(t: str):
for t_alias in type_aliases:
if regex.search(regex.compile(t_alias), t):
t = regex.sub(regex.compile(t_alias), type_aliases[t_alias], t)
return t
df_param['arg_type'] = df_param['arg_type'].progress_apply(resolve_type_alias)
df_ret['return_type'] = df_ret['return_type'].progress_apply(resolve_type_alias)
df_vars['var_type'] = df_vars['var_type'].progress_apply(resolve_type_alias)
return df_param, df_ret, df_vars
def preprocess_parametric_types(df_param: pd.DataFrame, df_ret: pd.DataFrame,
df_vars: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Reduces the depth of parametric types
"""
from libcst import parse_module, ParserSyntaxError
global s
s = 0
def reduce_depth_param_type(t: str) -> str:
global s
if regex.match(r'.+\[.+\]', t):
try:
t = parse_module(t)
t = t.visit(ParametricTypeDepthReducer(max_annot_depth=MAX_PARAM_TYPE_DEPTH))
return t.code
except ParserSyntaxError:
try:
t = lenient_parse_module(t)
t = t.visit(ParametricTypeDepthReducer(max_annot_depth=MAX_PARAM_TYPE_DEPTH))
s += 1
return t.code
except ParserSyntaxError:
return None
else:
return t
df_param['arg_type'] = df_param['arg_type'].progress_apply(reduce_depth_param_type)
df_ret['return_type'] = df_ret['return_type'].progress_apply(reduce_depth_param_type)
df_vars['var_type'] = df_vars['var_type'].progress_apply(reduce_depth_param_type)
logger.info(f"Sucssesfull lenient parsing {s}")
return df_param, df_ret, df_vars
def filter_functions(df: pd.DataFrame, funcs=['str', 'unicode', 'repr', 'len', 'doc', 'sizeof']) -> pd.DataFrame:
"""
Filters functions which are not useful.
:param df: dataframe to use
:return: filtered dataframe
"""
df_len = len(df)
logger.info(f"Functions before dropping on __*__ methods {len(df):,}")
df = df[~df['name'].isin(funcs)]
logger.info(f"Functions after dropping on __*__ methods {len(df):,}")
logger.info(f"Filtered out {df_len - len(df):,} functions.")
return df
def filter_variables(df_vars: pd.DataFrame, types=['Any', 'None', 'object', 'type', 'Type[Any]',
'Type[cls]', 'Type[type]', 'Type', 'TypeVar', 'Optional[Any]']):
"""
Filters out variables with specified types such as Any or None
"""
df_var_len = len(df_vars)
logger.info(f"Variables before dropping on {','.join(types)}: {len(df_vars):,}")
df_vars = df_vars[~df_vars['var_type'].isin(types)]
logger.info(f"Variables after dropping on {','.join(types)}: {len(df_vars):,}")
logger.info(f"Filtered out {df_var_len - len(df_vars):,} variables.")
return df_vars
def filter_var_wo_type(df_vars: pd.DataFrame) -> pd.DataFrame:
"""
Filters out variables without a type
"""
df_var_len = len(df_vars)
logger.info(f"Variables before dropping: {len(df_vars):,}")
df_vars = df_vars[df_vars['var_type'].notnull()]
logger.info(f"Variables after dropping dropping: {len(df_vars):,}")
logger.info(f"Filtered out {df_var_len - len(df_vars):,} variables w/o a type.")
return df_vars
def gen_argument_df(df: pd.DataFrame) -> pd.DataFrame:
"""
Generates a new dataframe containing all argument data.
:param df: dataframe for which to extract argument
:return: argument dataframe
"""
arguments = []
for i, row in tqdm(df.iterrows(), total=len(df.index), desc="Processing arguments"):
for p_i, arg_name in enumerate(literal_eval(row['arg_names'])):
# Ignore self arg
if arg_name == 'self':
continue
arg_type = literal_eval(row['arg_types'])[p_i].strip('\"')
# Ignore Any or None types
# TODO: Ignore also object type
# TODO: Ignore Optional[Any]
if arg_type == '' or arg_type in {'Any', 'None', 'object'}:
continue
arg_descr = literal_eval(row['arg_descrs'])[p_i]
arg_occur = [a.replace('self', '').strip() if 'self' in a.split() else a for a in literal_eval(row['args_occur'])[p_i]]
other_args = " ".join([a for a in literal_eval(row['arg_names']) if a != 'self'])
arguments.append([row['file'], row['name'], row['func_descr'], arg_name, arg_type, arg_descr, other_args, arg_occur])
return pd.DataFrame(arguments, columns=['file', 'func_name', 'func_descr', 'arg_name', 'arg_type', 'arg_comment', 'other_args',
'arg_occur'])
def filter_return_dp(df: pd.DataFrame) -> pd.DataFrame:
"""
Filters return datapoints based on a set of criteria.
"""
logger.info(f"Functions before dropping on return type {len(df):,}")
df = df.dropna(subset=['return_type'])
logger.info(f"Functions after dropping on return type {len(df):,}")
logger.info(f"Functions before dropping nan, None, Any return type {len(df):,}")
to_drop = np.invert((df['return_type'] == 'nan') | (df['return_type'] == 'None') | (df['return_type'] == 'Any'))
df = df[to_drop]
logger.info(f"Functions after dropping nan return type {len(df):,}")
logger.info(f"Functions before dropping on empty return expression {len(df):,}")
df = df[df['return_expr'].apply(lambda x: len(literal_eval(x))) > 0]
logger.info(f"Functions after dropping on empty return expression {len(df):,}")
return df
def format_df(df: pd.DataFrame) -> pd.DataFrame:
df['arg_names'] = df['arg_names'].apply(lambda x: literal_eval(x))
df['arg_types'] = df['arg_types'].apply(lambda x: literal_eval(x))
df['arg_descrs'] = df['arg_descrs'].apply(lambda x: literal_eval(x))
df['return_expr'] = df['return_expr'].apply(lambda x: literal_eval(x))
return df
def encode_all_types(df_ret: pd.DataFrame, df_params: pd.DataFrame, df_vars: pd.DataFrame,
output_dir: str):
all_types = np.concatenate((df_ret['return_type'].values, df_params['arg_type'].values,
df_vars['var_type'].values), axis=0)
le_all = LabelEncoder()
le_all.fit(all_types)
df_ret['return_type_enc_all'] = le_all.transform(df_ret['return_type'].values)
df_params['arg_type_enc_all'] = le_all.transform(df_params['arg_type'].values)
df_vars['var_type_enc_all'] = le_all.transform(df_vars['var_type'].values)
unq_types, count_unq_types = np.unique(all_types, return_counts=True)
pd.DataFrame(
list(zip(le_all.transform(unq_types), [unq_types[i] for i in np.argsort(count_unq_types)[::-1]],
[count_unq_types[i] for i in np.argsort(count_unq_types)[::-1]])),
columns=['enc', 'type', 'count']
).to_csv(os.path.join(output_dir, "_most_frequent_all_types.csv"), index=False)
logger.info(f"Total no. of extracted types: {len(all_types):,}")
logger.info(f"Total no. of unique types: {len(unq_types):,}")
return df_ret, df_params, le_all
def gen_most_frequent_avl_types(avl_types_dir, output_dir, top_n: int = 1024) -> pd.DataFrame:
"""
It generates top n most frequent available types
:param top_n:
:return:
"""
aval_types_files = [os.path.join(avl_types_dir, f) for f in os.listdir(avl_types_dir) if os.path.isfile(os.path.join(avl_types_dir, f))]
# All available types across all Python projects
all_aval_types = []
for f in aval_types_files:
with open(f, 'r') as f_aval_type:
all_aval_types = all_aval_types + f_aval_type.read().splitlines()
counter = Counter(all_aval_types)
df = pd.DataFrame.from_records(counter.most_common(top_n), columns=['Types', 'Count'])
df.to_csv(os.path.join(output_dir, "top_%d_types.csv" % top_n), index=False)
return df
def encode_aval_types(df_param: pd.DataFrame, df_ret: pd.DataFrame, df_var: pd.DataFrame,
df_aval_types: pd.DataFrame):
"""
It encodes the type of parameters and return according to visible type hints
"""
types = df_aval_types['Types'].tolist()
def trans_aval_type(x):
for i, t in enumerate(types):
if x in t:
return i
return len(types) - 1
# If the arg type doesn't exist in top_n available types, we insert n + 1 into the vector as it represents the other type.
df_param['param_aval_enc'] = df_param['arg_type'].progress_apply(trans_aval_type)
df_ret['ret_aval_enc'] = df_ret['return_type'].progress_apply(trans_aval_type)
df_var['var_aval_enc'] = df_var['var_type'].progress_apply(trans_aval_type)
return df_param, df_ret
def preprocess_ext_fns(output_dir: str, limit: int = None):
"""
Applies preprocessing steps to the extracted functions
"""
if not (os.path.exists(os.path.join(output_dir, "all_fns.csv")) and os.path.exists(os.path.join(output_dir, "all_vars.csv"))):
logger.info("Merging JSON projects")
merged_jsons = merge_jsons_to_dict(list_files(os.path.join(output_dir, 'processed_projects'), ".json"), limit)
logger.info("Creating functions' Dataframe")
create_dataframe_fns(output_dir, merged_jsons)
logger.info("Creating variables' Dataframe")
create_dataframe_vars(output_dir, merged_jsons)
logger.info("Loading vars & fns Dataframe")
processed_proj_fns = pd.read_csv(os.path.join(output_dir, "all_fns.csv"), low_memory=False)
processed_proj_vars = pd.read_csv(os.path.join(output_dir, "all_vars.csv"), low_memory=False)
# Split the processed files into train, validation and test sets
if all(processed_proj_fns['set'].isin(['train', 'valid', 'test'])) and \
all(processed_proj_vars['set'].isin(['train', 'valid', 'test'])):
logger.info("Found the sets split in the input dataset")
train_files = processed_proj_fns['file'][processed_proj_fns['set'] == 'train']
valid_files = processed_proj_fns['file'][processed_proj_fns['set'] == 'valid']
test_files = processed_proj_fns['file'][processed_proj_fns['set'] == 'test']
train_files_vars = processed_proj_vars['file'][processed_proj_vars['set'] == 'train']
valid_files_vars = processed_proj_vars['file'][processed_proj_vars['set'] == 'valid']
test_files_vars = processed_proj_vars['file'][processed_proj_vars['set'] == 'test']
else:
logger.info("Splitting sets randomly")
uniq_files = np.unique(np.concatenate((processed_proj_fns['file'].to_numpy(), processed_proj_vars['file'].to_numpy())))
train_files, test_files = train_test_split(pd.DataFrame(uniq_files, columns=['file']), test_size=0.2)
train_files, valid_files = train_test_split(pd.DataFrame(train_files, columns=['file']), test_size=0.1)
train_files_vars, valid_files_vars, test_files_vars = train_files, valid_files, test_files
df_train = processed_proj_fns[processed_proj_fns['file'].isin(train_files.to_numpy().flatten())]
logger.info(f"No. of functions in train set: {df_train.shape[0]:,}")
df_valid = processed_proj_fns[processed_proj_fns['file'].isin(valid_files.to_numpy().flatten())]
logger.info(f"No. of functions in validation set: {df_valid.shape[0]:,}")
df_test = processed_proj_fns[processed_proj_fns['file'].isin(test_files.to_numpy().flatten())]
logger.info(f"No. of functions in test set: {df_test.shape[0]:,}")
df_var_train = processed_proj_vars[processed_proj_vars['file'].isin(train_files_vars.to_numpy().flatten())]
logger.info(f"No. of variables in train set: {df_var_train.shape[0]:,}")
df_var_valid = processed_proj_vars[processed_proj_vars['file'].isin(valid_files_vars.to_numpy().flatten())]
logger.info(f"No. of variables in validation set: {df_var_valid.shape[0]:,}")
df_var_test = processed_proj_vars[processed_proj_vars['file'].isin(test_files_vars.to_numpy().flatten())]
logger.info(f"No. of variables in test set: {df_var_test.shape[0]:,}")
assert list(set(df_train['file'].tolist()).intersection(set(df_test['file'].tolist()))) == []
assert list(set(df_train['file'].tolist()).intersection(set(df_valid['file'].tolist()))) == []
assert list(set(df_test['file'].tolist()).intersection(set(df_valid['file'].tolist()))) == []
# Exclude variables without a type
processed_proj_vars = filter_var_wo_type(processed_proj_vars)
logger.info(f"Making type annotations consistent")
# Makes type annotations consistent by removing `typing.`, `t.`, and `builtins` from a type.
processed_proj_fns, processed_proj_vars = make_types_consistent(processed_proj_fns, processed_proj_vars)
assert any([bool(regex.match(sub_regex, str(t))) for t in processed_proj_fns['return_type']]) == False
assert any([bool(regex.match(sub_regex, t)) for t in processed_proj_fns['arg_types']]) == False
assert any([bool(regex.match(sub_regex, t)) for t in processed_proj_vars['var_type']]) == False
# Filters variables with type Any or None
processed_proj_vars = filter_variables(processed_proj_vars)
# Filters trivial functions such as `__str__` and `__len__`
processed_proj_fns = filter_functions(processed_proj_fns)
# Extracts type hints for functions' arguments
processed_proj_fns_params = gen_argument_df(processed_proj_fns)
# Filters out functions: (1) without a return type (2) with the return type of Any or None (3) without a return expression
processed_proj_fns = filter_return_dp(processed_proj_fns)
processed_proj_fns = format_df(processed_proj_fns)
logger.info(f"Resolving type aliases")
# Resolves type aliasing and mappings. e.g. `[]` -> `list`
processed_proj_fns_params, processed_proj_fns, processed_proj_vars = resolve_type_aliasing(processed_proj_fns_params,
processed_proj_fns,
processed_proj_vars)
assert any([bool(regex.match(r'^{}$|\bText\b|^\[{}\]$|^\[\]$', t)) for t in processed_proj_fns['return_type']]) == False
assert any([bool(regex.match(r'^{}$|\bText\b|^\[\]$', t)) for t in processed_proj_fns_params['arg_type']]) == False
logger.info(f"Preproceessing parametric types")
processed_proj_fns_params, processed_proj_fns, processed_proj_vars = preprocess_parametric_types(processed_proj_fns_params,
processed_proj_fns,
processed_proj_vars)
# Exclude variables without a type
processed_proj_vars = filter_var_wo_type(processed_proj_vars)
processed_proj_fns, processed_proj_fns_params, le_all = encode_all_types(processed_proj_fns, processed_proj_fns_params,
processed_proj_vars, output_dir)
# Exclude self from arg names and return expressions
processed_proj_fns['arg_names_str'] = processed_proj_fns['arg_names'].apply(lambda l: " ".join([v for v in l if v != 'self']))
processed_proj_fns['return_expr_str'] = processed_proj_fns['return_expr'].apply(lambda l: " ".join([regex.sub(r"self\.?", '', v) for v in l]))
# Drop all columns useless for the ML model
processed_proj_fns = processed_proj_fns.drop(columns=['author', 'repo', 'has_type', 'arg_names', 'arg_types', 'arg_descrs', 'args_occur',
'return_expr'])
# Visible type hints
if exists(join(output_dir, 'MT4Py_VTHs.csv')):
logger.info("Using visible type hints")
processed_proj_fns_params, processed_proj_fns = encode_aval_types(processed_proj_fns_params, processed_proj_fns,
processed_proj_vars,
pd.read_csv(join(output_dir, 'MT4Py_VTHs.csv')).head(AVAILABLE_TYPES_NUMBER))
else:
logger.info("Using naive available type hints")
df_types = gen_most_frequent_avl_types(os.path.join(output_dir, "extracted_visible_types"), output_dir, AVAILABLE_TYPES_NUMBER)
processed_proj_fns_params, processed_proj_fns = encode_aval_types(processed_proj_fns_params, processed_proj_fns,
processed_proj_vars, df_types)
# Split parameters and returns type dataset by file into a train and test sets
df_params_train = processed_proj_fns_params[processed_proj_fns_params['file'].isin(train_files.to_numpy().flatten())]
df_params_valid = processed_proj_fns_params[processed_proj_fns_params['file'].isin(valid_files.to_numpy().flatten())]
df_params_test = processed_proj_fns_params[processed_proj_fns_params['file'].isin(test_files.to_numpy().flatten())]
df_ret_train = processed_proj_fns[processed_proj_fns['file'].isin(train_files.to_numpy().flatten())]
df_ret_valid = processed_proj_fns[processed_proj_fns['file'].isin(valid_files.to_numpy().flatten())]
df_ret_test = processed_proj_fns[processed_proj_fns['file'].isin(test_files.to_numpy().flatten())]
df_var_train = processed_proj_vars[processed_proj_vars['file'].isin(train_files_vars.to_numpy().flatten())]
df_var_valid = processed_proj_vars[processed_proj_vars['file'].isin(valid_files_vars.to_numpy().flatten())]
df_var_test = processed_proj_vars[processed_proj_vars['file'].isin(test_files_vars.to_numpy().flatten())]
assert list(set(df_params_train['file'].tolist()).intersection(set(df_params_test['file'].tolist()))) == []
assert list(set(df_params_train['file'].tolist()).intersection(set(df_params_valid['file'].tolist()))) == []
assert list(set(df_params_test['file'].tolist()).intersection(set(df_params_valid['file'].tolist()))) == []
assert list(set(df_ret_train['file'].tolist()).intersection(set(df_ret_test['file'].tolist()))) == []
assert list(set(df_ret_train['file'].tolist()).intersection(set(df_ret_valid['file'].tolist()))) == []
assert list(set(df_ret_test['file'].tolist()).intersection(set(df_ret_valid['file'].tolist()))) == []
assert list(set(df_var_train['file'].tolist()).intersection(set(df_var_test['file'].tolist()))) == []
assert list(set(df_var_train['file'].tolist()).intersection(set(df_var_valid['file'].tolist()))) == []
assert list(set(df_var_test['file'].tolist()).intersection(set(df_var_valid['file'].tolist()))) == []
# Store the dataframes and the label encoders
logger.info("Saving preprocessed functions on the disk...")
with open(os.path.join(output_dir, "label_encoder_all.pkl"), 'wb') as file:
pickle.dump(le_all, file)
df_params_train.to_csv(os.path.join(output_dir, "_ml_param_train.csv"), index=False)
df_params_valid.to_csv(os.path.join(output_dir, "_ml_param_valid.csv"), index=False)
df_params_test.to_csv(os.path.join(output_dir, "_ml_param_test.csv"), index=False)
df_ret_train.to_csv(os.path.join(output_dir, "_ml_ret_train.csv"), index=False)
df_ret_valid.to_csv(os.path.join(output_dir, "_ml_ret_valid.csv"), index=False)
df_ret_test.to_csv(os.path.join(output_dir, "_ml_ret_test.csv"), index=False)
df_var_train.to_csv(os.path.join(output_dir, "_ml_var_train.csv"), index=False)
df_var_valid.to_csv(os.path.join(output_dir, "_ml_var_valid.csv"), index=False)
df_var_test.to_csv(os.path.join(output_dir, "_ml_var_test.csv"), index=False)
|
StarcoderdataPython
|
5044769
|
# Generated by Django 3.0.3 on 2021-09-15 13:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workspaces', '0014_workspacegeneralsettings_je_single_credit_line'),
]
operations = [
migrations.AddField(
model_name='workspacegeneralsettings',
name='change_accounting_period',
field=models.BooleanField(default=False, help_text='Export Expense when accounting period is closed'),
),
]
|
StarcoderdataPython
|
3576527
|
from astroid import MANAGER, register_module_extender
from astroid.builder import AstroidBuilder
CODE_FIX = """
class md5(object):
def __init__(self, value=None):
pass
def hexdigest(self):
return u''
def update(self, x):
return u''
def digest(self):
return u''
class sha1(object):
def __init__(self, value=None):
pass
def hexdigest(self):
return u''
def update(self, x):
return u''
def digest(self):
return u''
class sha512(object):
def __init__(self, value=None):
pass
def hexdigest(self):
return u''
def update(self, x):
return u''
def digest(self):
return u''
class sha256(object):
def __init__(self, value=None):
pass
def hexdigest(self):
return u''
def update(self, x):
return u''
def digest(self):
return u''
"""
def hashlib_transform():
return AstroidBuilder(MANAGER).string_build(CODE_FIX)
def register(linter):
register_module_extender(MANAGER, 'hashlib', hashlib_transform)
|
StarcoderdataPython
|
9694560
|
import glob
from astro import bot
from sys import argv
from telethon import TelegramClient
from astro.config import Config
from astro.utils import load_module, start_assistant, load_pmbot
from pathlib import Path
import telethon.utils
from astro import CMD_HNDLR
GROUP = Config.PRIVATE_GROUP_ID
BOTNAME = Config.BOT_USERNAME
LOAD_MYBOT = Config.LOAD_MYBOT
OWNER_USERNAME = Config.OWNER_USERNAME
async def add_bot(bot_token):
await bot.start(bot_token)
bot.me = await bot.get_me()
bot.uid = telethon.utils.get_peer_id(bot.me)
async def startup_log_all_done():
try:
await bot.send_message(GROUP, f"Hey..{OWNER_USERNAME} ƛsτʀ๏ IS ON👮\nFor Your security😌\nNow you are 100% Safe By astro-Security😉\nUSE `ping` To check me😁\n~Enjoy~\n\n~ @Astro_HelpChat")
except BaseException:
print("Either PRIVATE_GROUP_ID is wrong or you have left the group.")
if len(argv) not in (1, 3, 4):
bot.disconnect()
else:
bot.tgbot = None
if Config.BOT_USERNAME is not None:
print("Initiating Inline Bot")
# ForTheGreatrerGood of beautification
bot.tgbot = TelegramClient(
"BOT_TOKEN",
api_id=Config.API_ID,
api_hash=Config.API_HASH
).start(bot_token=Config.BOT_TOKEN)
print("Initialisation finished, no errors")
print("Starting Userbot")
bot.loop.run_until_complete(add_bot(Config.BOT_USERNAME))
print("Startup Completed")
else:
bot.start()
path = 'astro/plugins/*.py'
files = glob.glob(path)
for name in files:
with open(name) as f:
path1 = Path(f.name)
shortname = path1.stem
load_module(shortname.replace(".py", ""))
print("astro has been deployed! ")
print("Setting up TGBot")
path = "astro/plugins/assistant/*.py"
files = glob.glob(path)
for name in files:
with open(name) as f:
path1 = Path(f.name)
shortname = path1.stem
start_assistant(shortname.replace(".py", ""))
if LOAD_MYBOT == "True":
path = "astro/plugins/assistant/pmbot/*.py"
files = glob.glob(path)
for name in files:
with open(name) as f:
path1 = Path(f.name)
shortname = path1.stem
load_pmbot(shortname.replace(".py", ""))
print("TGBot set up completely!")
print("TGBot set up - Level - Basic")
print("Astro has been fully deployed!")
print("||•||°••°AsτR๏ υsєяъ๏т°••°||•||")
print("~VISIT HELP CHAT FOR MORE~")
print("°•°•°•°•°•Do Visit @astro_Helpchat•°•°•°•°•°•°•")
bot.loop.run_until_complete(startup_log_all_done())
if len(argv) not in (1, 3, 4):
bot.disconnect()
else:
bot.run_until_disconnected()
|
StarcoderdataPython
|
3247752
|
<filename>src/tests/fidl/dangerous_identifiers/generate/uses.py
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__all__ = ["USES"]
from typing import List
from common import *
# Define places that identifiers may appear in a FIDL library:
USES: List[Use] = []
def use(func):
USES.append(Use(func.__name__.replace("_", "."), (func,)))
# TODO(fxbug.dev/77561): we probably want to add a "constraints" test, for cases
# like `vector<Foo>:true` etc.
@use
def constants(f, idents: List[ScopedIdentifier]):
for ident in idents:
f.write(ident.decl_attributes)
f.write(f"const {ident} uint32 = 1;\n")
@use
def using(f, idents: List[ScopedIdentifier]):
for ident in idents:
# TODO(fxbug.dev/8042): Having a declaration with same same name as what is
# aliased causes a cycle.
if ident.name == "string":
continue
f.write(ident.decl_attributes)
f.write(f"alias {ident} = string;\n")
# TODO(ianloic): Make this test work. It requires N libraries to import for N
# identifiers. That doesn't fit well into the model of this test.
# @use
# def using_as(f, idents):
# for ident in idents:
# f.write('using fuchsia.mem as %s;\n' % ident)
@use
def enums(f, idents: List[ScopedIdentifier]):
# enums with every dangerous name
for ident in idents:
f.write(ident.decl_attributes)
f.write(f"type {ident} = strict enum {{ MEMBER = 1; }};\n")
# enum with every dangerous field name
f.write("type DangerousMembers = strict enum {\n")
for i, ident in enumerate(idents):
f.write(ident.decl_attributes)
f.write(f" {ident} = {i};\n")
f.write("};\n")
@use
def struct_types(f, idents: List[ScopedIdentifier]):
# structs with every dangerous name
f.write("alias membertype = uint32;\n")
for ident in idents:
f.write(ident.decl_attributes)
f.write(f"type {ident} = struct {{ member membertype = 1; }};\n")
# a struct with every dangerous name as the field type
f.write("type DangerousMembers = struct {\n")
for i, ident in enumerate(idents):
# dangerous field type
f.write(ident.decl_attributes)
f.write(f" f{i} {ident};\n")
f.write("};\n")
@use
def struct_names(f, idents: List[ScopedIdentifier]):
# a struct with every dangerous name as the field name
f.write("type DangerousMembers = struct {\n")
for ident in idents:
f.write(ident.decl_attributes)
f.write(f" {ident} uint32;\n")
f.write("};\n")
@use
def union_names(f, idents):
# unions with every dangerous name
f.write("type membertype = struct {};\n")
for ident in idents:
f.write(f"type {ident} = strict union {{ 1: member membertype; }};\n")
# a union with every dangerous name as the field type
f.write("type DangerousMembers = strict union {\n")
for i, ident in enumerate(idents):
# dangerous field type
f.write(f" {i+1}: f{i} {ident};\n")
f.write("};\n")
@use
def union_types(f, idents):
# a union with every dangerous name as the field name
f.write("type DangerousMembers = strict union {\n")
for i, ident in enumerate(idents):
f.write(f" {i+1}: f{i} uint32;\n")
f.write("};\n")
@use
def table_names(f, idents: List[ScopedIdentifier]):
# tables with every dangerous name
f.write("alias membertype = uint32;\n")
for ident in idents:
f.write(ident.decl_attributes)
f.write(f"type {ident} = table {{ 1: member membertype; }};\n")
# a table with every dangerous name as the field type
f.write("type DangerousMembers = table {\n")
for i, ident in enumerate(idents):
# dangerous field type
f.write(ident.decl_attributes)
f.write(f" {i+1}: f{i} {ident};\n")
f.write("};\n")
@use
def table_fields(f, idents: List[ScopedIdentifier]):
# a table with every dangerous name as the field name
f.write("type DangerousMembers = table {\n")
for i, ident in enumerate(idents):
f.write(ident.decl_attributes)
f.write(f" {i+1}: {ident} uint32;\n")
f.write("};\n")
@use
def protocol_names(f, idents: List[ScopedIdentifier]):
# a protocols with every dangerous name
for ident in idents:
f.write(ident.decl_attributes)
f.write(f"protocol {ident} {{ JustOneMethod(); }};\n")
@use
def method_names(f, idents: List[ScopedIdentifier]):
# a protocol with every dangerous name as a method name
f.write("protocol DangerousMethods {\n")
for ident in idents:
f.write(ident.decl_attributes)
f.write(f" {ident}();\n")
f.write("};\n")
@use
def event_names(f, idents: List[ScopedIdentifier]):
# a protocol with every dangerous name as an event name
f.write("protocol DangerousEvents {\n")
for ident in idents:
f.write(ident.decl_attributes)
f.write(f" -> {ident}();\n")
f.write("};\n")
@use
def method_request_arguments(f, idents: List[ScopedIdentifier]):
# a protocol with every dangerous name as a request argument
f.write("alias argtype = uint32;\n")
f.write("protocol DangerousRequestArguments {\n")
for i, ident in enumerate(idents):
f.write(ident.decl_attributes)
f.write(f" Method{i}(struct {{ {ident} argtype; }});\n")
f.write("};\n")
@use
def method_response_arguments(f, idents: List[ScopedIdentifier]):
# a protocol with every dangerous name as a response argument
f.write("alias argtype = uint32;\n")
f.write("protocol DangerousResponseArguments {\n")
for i, ident in enumerate(idents):
f.write(ident.decl_attributes)
f.write(f" Method{i}() -> (struct {{ {ident} argtype; }});\n")
f.write("};\n")
@use
def method_event_arguments(f, idents: List[ScopedIdentifier]):
# a protocol with every dangerous name as a event argument
f.write("alias argtype = uint32;\n")
f.write("protocol DangerousResponseArguments {\n")
for i, ident in enumerate(idents):
f.write(ident.decl_attributes)
f.write(f" -> Event{i}(struct {{ {ident} argtype; }});\n")
f.write("};\n")
@use
def service_names(f, idents: List[ScopedIdentifier]):
# a service with every dangerous name
f.write("protocol SampleProtocol { Method(); };\n")
for ident in idents:
f.write(ident.decl_attributes)
f.write(f"service {ident} {{ member client_end:SampleProtocol; }};\n")
@use
def service_member_types(f, idents: List[ScopedIdentifier]):
# protocols with every dangerous name
for ident in idents:
f.write(ident.decl_attributes)
f.write(f"protocol {ident} {{ JustOneMethod(); }};\n")
# a service with every dangerous name as the member type
f.write("service DangerousServiceMemberTypes {\n")
for i, ident in enumerate(idents):
# dangerous field type
f.write(ident.decl_attributes)
f.write(f" f{i} client_end:{ident};\n")
f.write("};\n")
@use
def service_member_names(f, idents: List[ScopedIdentifier]):
# a service with every dangerous name as the member name
f.write("protocol SampleProtocol { Method(); };\n")
f.write("service DangerousServiceMemberNames {\n")
for ident in idents:
f.write(ident.decl_attributes)
f.write(f" {ident} client_end:SampleProtocol;\n")
f.write("};\n")
|
StarcoderdataPython
|
8071763
|
import numpy as np
import hashlib
import random
# ##########################################################################
# Example of an encrypted system in operation. This works with a few
# assumptions that can be adjusted:
# * Getting within approximately 70' is close enough to note
# * "Infection" sticks around for 2 hours
#
# Questions can be directed to TripleBlind, Inc. This code and algorithm
# is donated to the Private Kit project.
# ##########################################################################
# InfectedUser
class InfectedUser:
def __init__(self):
self.salt = str(random.randint(0, 2 ** 100)).encode("utf-8")
def infected_helper_generation(self, location, thresholds):
distance_threshold = thresholds[0]
time_threshold = int(thresholds[1] / 2)
lat = int(location[0] * 10 ** 6)
long = int(location[1] * 10 ** 6)
time_ = int(
location[2] + time_threshold / 2
) # an origin for time is needed let's say the day the app is released
template = [lat, long, time_]
random_x = random.randint(
int((-90 * 10 ** 6) / (2 * distance_threshold)),
int((90 * 10 ** 6) / (2 * distance_threshold)),
)
random_y = random.randint(
int((-180 * 10 ** 6) / (2 * distance_threshold)),
int((180 * 10 ** 6) / (2 * distance_threshold)),
)
random_time = random.randint(0, 2 ** 50)
lattice_point_x = random_x * 2 * distance_threshold
lattice_point_y = random_y * 2 * distance_threshold
lattice_point_z = random_time * 2 * time_threshold
lattice_point = np.array([lattice_point_x, lattice_point_y, lattice_point_z])
translation_vector = lattice_point - template
hash_complexity = 1000000
dk = hashlib.pbkdf2_hmac(
"sha256", str(lattice_point).encode("utf-8"), self.salt, hash_complexity
)
return translation_vector, dk.hex()
# #########################################################################
def user_hash_generation(query, translation_vector, salt, thresholds):
lat = int(query[0] * 10 ** 6)
long = int(query[1] * 10 ** 6)
time_ = int(query[2])
distance_threshold = thresholds[0]
time_threshold = int(thresholds[1] / 2)
query = np.array([lat, long, time_])
translated_query = query + translation_vector
quantized_query = (
2
* distance_threshold
* np.ceil(
(translated_query[0:2] - distance_threshold) / (2 * distance_threshold)
).astype(np.int64)
)
quantized_time = (
2
* time_threshold
* np.ceil((translated_query[2] - time_threshold) / (2 * time_threshold)).astype(
np.int64
)
)
quantized_out = np.array([quantized_query[0], quantized_query[1], quantized_time])
encoded = str(quantized_out).encode("utf-8")
hash_complexity = 1000000
dk = hashlib.pbkdf2_hmac("sha256", encoded, salt, hash_complexity)
return dk.hex()
#
# The infected user do the following
# * Store a set of points in GPS lat/lon coordinate system
# * Generate the unique hash and helper data
user1_locations = np.array(
[[41.403380, 39.289342, 32], [2.192491, 145.293971, 55]]
) # [lat,long,time]
inf_user = InfectedUser()
thresholds = [300, 2] # .000300 is approximately 70 feet #TODO: More accurate threshold
# 2 hours threshold
user1_helper_data = []
for i in range(user1_locations.shape[0]):
user1_helper_data.append(
inf_user.infected_helper_generation(user1_locations[i], thresholds)
)
print(user1_helper_data[0][1], "infected point hash")
"""
The hash of the infected point is stored at the server but the other helper data translation vector, salt
is sent to all users
"""
translation_vector = user1_helper_data[0][0]
salt = inf_user.salt
current_location1 = np.array([41.403380, 39.289342, 32]) # exact match
current_location2 = np.array([41.403280, 39.289142, 33]) # within threshold (
current_location3 = np.array([41.403280, 39.289142, 31]) # before the infection
current_location4 = np.array([41.401380, 39.289342, 31]) # safe area
print(
user_hash_generation(current_location1, translation_vector, salt, thresholds),
"This point is close to an infected point within 2 hours",
)
print(
user_hash_generation(current_location2, translation_vector, salt, thresholds),
"This point is close to an infected point within 2 hours",
)
print(
user_hash_generation(current_location3, translation_vector, salt, thresholds),
"This point is safe",
)
print(
user_hash_generation(current_location4, translation_vector, salt, thresholds),
"This point is safe",
)
"""The Hash is sent to the server and server perform the matching """
|
StarcoderdataPython
|
3597567
|
<gh_stars>10-100
from bitmovin_api_sdk.encoding.filters.unsharp.customdata.customdata_api import CustomdataApi
|
StarcoderdataPython
|
5075128
|
import re
def parse_level(levels):
parsedLevels = {}
classLevels = levels.split(',')
for level in classLevels:
# make ' cleric 0' into ['cleric', 0]
classAndLevel = level.strip().split(' ')
try:
if classAndLevel[0].find('/') > 0:
# if it's sorcerer/wizard
classNames = classAndLevel[0].split('/')
parsedLevels[ classNames[0] ] = int(classAndLevel[1])
parsedLevels[ classNames[1] ] = int(classAndLevel[1])
else:
# otherwise
parsedLevels[ classAndLevel[0] ] = int(classAndLevel[1])
except ValueError:
print(levels)
return parsedLevels
def parse_range(distance):
ranges = [
'close', 'medium', 'long',
]
for r in ranges:
if distance.startswith(r):
return r
return distance
known_saves = [
"-?-",
"Fortitude half",
"Fortitude half see text",
"Fortitude negates",
"Fortitude negates (harmless)",
#"Fortitude negates (object) see text",
#"Fortitude negates see text",
"Fortitude partial",
"Fortitude partial (object)",
#"Fortitude partial or Reflex negates (object) see text",
#"Fortitude partial or Will negates see text",
#"Fortitude partial see text",
#"Fortitude partial see text for enervationThis spell functions like enervation,",
#"Fortitude partial, see text",
"Reflex half",
#"Reflex half or Reflex negates see text",
#"Reflex half see text",
"Reflex negates",
"Reflex negates (object)",
#"Reflex negates and Reflex half see text",
#"Reflex negates see text",
"Reflex partial",
#"Reflex partial see text",
"Will disbelief",
"Will disbelief (if interacted with)",
#"Will disbelief (if interacted with) varies see text",
"Will disbelief, then Fortitude partial see text",
"Will half",
#"Will half (harmless) see text",
#"Will half see text",
"Will negates",
"Will negates (harmless)",
#"Will negates (harmless) or Will half, see text",
"Will negates (harmless) or Will negates (harmless, object)",
"Will negates (harmless) or Will negates (object)",
#"Will negates (harmless) see text",
"Will negates (harmless, object)",
#"Will negates (harmless, object) see text",
"Will negates (object)",
#"Will negates (object) Will negates (object) or Fortitude half see text",
#"Will negates (object) or none see text",
#"Will negates (see text)",
#"Will negates or Fortitude negates see text",
#"Will negates or Will disbelief (if interacted with)",
#"Will negates see text",
"Will partial",
#"Will partial see text",
"no",
#"no and Will negates (harmless)",
"none",
#"none and Will negates (object)",
#"none or Reflex half see text",
#"none or Reflex half, see text",
#"none or Will disbelief (if interacted with) see text",
#"none or Will negates (harmless)",
#"none or Will negates (harmless, object)",
#"none or Will negates (object)",
#"none or Will negates see text",
#"none or Will negates, see text",
#"none see text",
#"none, see text",
"see text"
]
def parse_saves(spell):
saves = spell['saves'].strip()
if( saves not in known_saves ):
#print('{} has non-standard save: {}'.format(spell['name'], spell['saves']))
return known_saves[0]
if saves == 'none' or saves == 'no' or saves == 'see text':
return None
saveObject = {}
flags = {
'harmless': 'isHarmless',
'object': 'isObject',
'disbelief': 'isDisbelief',
}
types = {
'Will': 'Will',
'Fortitude': 'Fort',
'Reflex': 'Ref',
}
for flag, boolean in flags.items():
# if the '(harmless)' is in 'Will negates (harmless)'
if flag in saves:
saveObject[boolean] = True
saves = saves.replace(flag, '')
for save, saveType in types.items():
if save in saves:
saves = saves.replace(flag, '')
saveObject['type'] = saveType
return saveObject
def parse_school(spell):
schoolText = spell['school']
# Well that was easy
schoolName = schoolText.split(' ')[0]
school = schoolName[:1].upper() + schoolName[1:]
subschool = None
subschoolResults = re.search(r'\(([a-z-\s]+)\)', schoolText)
if subschoolResults:
# Strip the ()s
subschool = re.sub(r'[\(\)]+','', subschoolResults[0])
domains = []
domainResults = re.search(r'\[[a-z\s,-]+\]', schoolText)
if domainResults:
domainString = re.sub(r'\[|\]|\s','', domainResults[0])
domains = domainString.split(',')
return {
'school': school,
'subschool': subschool,
'domains': domains,
}
|
StarcoderdataPython
|
9652717
|
# kamikaze112213 by hephaestus
# http://robotgame.org/viewrobot/5830
import rg
import operator
class Robot:
def act(self, game):
adjacent_robots = self.get_adjacent_robots(game)
adjacent_friendlies = self.get_adjacent_robots(game, operator.__eq__)
adjacent_enemies = self.get_adjacent_robots(game, operator.__ne__)
all_enemies = self.get_all_robots(game, operator.__ne__)
# "The value of the key parameter should be a function that takes
# a single argument and returns a key to use for sorting purposes."
def query(bot_dict, sorting_function, offset=0):
organized = sorted(bot_dict.items(), key=sorting_function)
# returns a list of tuples, [(key, value),... ]
return organized
def get_weakest_enemy(offset=0):
return query(all_enemies, lambda t: t[1].hp)[offset][1]
def get_weakest_adjacent_enemy(offset=0):
return query(adjacent_enemies, lambda t: t[1].hp)[offset][1]
# first_enemy_location = get_first_enemy_location()
weakest_enemy = get_weakest_enemy()
target_enemy = weakest_enemy
if len(adjacent_enemies) > 0:
weakest_adjacent_enemy = get_weakest_adjacent_enemy()
target_enemy = weakest_adjacent_enemy
# move toward the center, if moving there would not put you in range of 2 robots
target_pos = rg.toward(self.location, weakest_enemy.location)
# figure out if any friendly robots would also want to move to our target
adjacent_to_target_friendlies = self.get_adjacent_robots_to(target_pos, game, operator.__eq__)
# if there are enemies around, attack them
# also consider suiciding when it will guarantee a kill, meaning enemy < 15 hp
suicide_threshold = 3 # 3 is better than 4 with 83% confidence, 7-42, 10-34 vs 3-43, 7-38
# 4 is [55, 30, 15] against 3
def has_suicide_priority():
adjacent_allies_to_target_enemy = self.get_adjacent_robots(game, operator.__eq__)
weakest_allies_next_to_adjacent_target_enemy = query(adjacent_allies_to_target_enemy, lambda t: t[1].hp)
return self.location == weakest_allies_next_to_adjacent_target_enemy[0][0]
if len(adjacent_enemies) > 0 and len(adjacent_enemies) < suicide_threshold:
# following line is better by 102-20-17 over just self.hp < 10
# inspired by peterm's stupid 2.6 bot
# assuming all adjacent enemies attacked me, if I would die
# i should instead suicide
if self.hp < (10*len(adjacent_enemies)):
return ['suicide']
# IDEA: if i could kill the enemy with 1 suicide instead of two attacks
# NOTE: if multiple allies are going for this target, i'll actually lose too many bots
# bad idea, 0-20 against self
# if weakest_adjacent_enemy.hp < 15 and weakest_adjacent_enemy.hp > 8 and has_suicide_priority():
# return ['suicide']
# if you could kill 2+ bots by suidiciding, do it
# should also avoid over-killing robots
return ['attack', weakest_adjacent_enemy.location]
elif len(adjacent_enemies) >= suicide_threshold:
return ['suicide']
#not using this priority method because it breaks on the server for some reason
def byroboidhas_priority(): # if i'm a newer bot, I have priority
for loc,bot in adjacent_to_target_friendlies.items():
their_target_pos = rg.toward(loc, weakest_enemy.location)
# check if bots would collide
if their_target_pos == target_pos:
if self.robot_id > bot.robot_id:
return False
return True
def has_priority(): # if i'm more bottom or more to the right, i'll take priority
for loc,bot in adjacent_to_target_friendlies.items():
their_target_pos = rg.toward(loc, weakest_enemy.location)
# check if bots would collide
if their_target_pos == target_pos:
if self.location[0] < loc[0] or self.location[1] < loc[1]:
#don't move then, do something else
return False
return True
if self.location != target_pos and has_priority():
if 'obstacle' not in rg.loc_types(target_pos):
adjacent_to_target_enemies = self.get_adjacent_robots_to(target_pos, game, operator.__ne__)
# if len(adjacent_to_target_enemies) <= 1 or len(adjacent_to_target_enemies) >= 3:
return ['move', target_pos]
#if we couldn't decide to do anything else, just guard
return self.guard()
def toward(curr, dest):
if curr == dest:
return curr
x0, y0 = curr
x, y = dest
x_diff, y_diff = x - x0, y - y0
if abs(x_diff) < abs(y_diff):
return (x0, y0 + y_diff / abs(y_diff))
elif abs(x_diff) == abs(y_diff):
# BROKEN FIX
return (0, 0)
else:
return (x0 + x_diff / abs(x_diff), y0)
def guard(self):
return ['guard']
def get_all_robots(self, game, player_comparator=None):
def generate():
for loc,bot in game.get('robots').items():
if player_comparator == None or player_comparator(self.player_id, bot.player_id):
yield (loc, bot)
return dict(generate())
def get_adjacent_robots_to(self, some_location, game, player_comparator=None):
def generate():
for loc,bot in game.get('robots').items():
if rg.wdist(loc, some_location) <= 1:
if player_comparator == None or player_comparator(self.player_id, bot.player_id):
yield (loc, bot)
return dict(generate())
def get_adjacent_robots(self, game, player_comparator=None):
return self.get_adjacent_robots_to(self.location, game, player_comparator)
|
StarcoderdataPython
|
61741
|
<reponame>john-james-sf/DataStudio<gh_stars>0
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : Data Studio #
# Version : 0.1.0 #
# File : lab.py #
# Python : 3.8.1 #
# --------------------------------------------------------------------------- #
# Author : <NAME> #
# Company : DecisionScients #
# Email : <EMAIL> #
# URL : https://github.com/decisionscients/datastudio #
# --------------------------------------------------------------------------- #
# Created : Wednesday, February 19th 2020, 5:49:14 am #
# Last Modified : Wednesday, February 19th 2020, 5:49:16 am #
# Modified By : <NAME> (<EMAIL>>) #
# --------------------------------------------------------------------------- #
# License : BSD #
# Copyright (c) 2020 DecisionScients #
# =========================================================================== #
#%%
from abc import ABC
class Foo(ABC):
def __init__(self, name, *args, **kwargs):
print(name, *args, **kwargs)
class Bar(Foo):
def __init__(self, name, *args, **kwargs):
super(Bar, self).__init__(name, args, kwargs)
f = Far(name, args, kwargs)
class Far:
def __init__(self, name, *args, **kwargs):
print(name, *args, **kwargs)
class Boo:
def __init__(self, name, path):
print(name, path)
b = Bar('some name', path = 'some_path')
path = "path boo"
c = Boo('boo name', **path)
#%%
d = {'one': 1, 'two': 2, 'three': 3}
d2 = {'five': 5, 'six': 6}
print(next((k,v) for (k, v) in d.items() if 'on' in k))
res = dict(filter(lambda item: 'on' in item[0], d.items()))
print(res)
d2.update(res)
print(d2)
# %%
|
StarcoderdataPython
|
9697172
|
<reponame>dummas2008/AndroidChromium<filename>libraries_res/chrome_res/src/main/res/PRESUBMIT_test.py
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
import PRESUBMIT
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))))
from PRESUBMIT_test_mocks import MockFile, MockInputApi, MockOutputApi
class ColorFormatTest(unittest.TestCase):
def testColorFormatIgnoredFile(self):
lines = ['<color name="color1">#61000000</color>',
'<color name="color2">#FFFFFF</color>',
'<color name="color3">#CCC</color>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/test.java', lines)]
errors = PRESUBMIT._CheckColorFormat(mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
def testColorFormatTooShort(self):
lines = ['<color name="color1">#61000000</color>',
'<color name="color2">#FFFFFF</color>',
'<color name="color3">#CCC</color>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/test.xml', lines)]
errors = PRESUBMIT._CheckColorFormat(mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(1, len(errors[0].items))
self.assertEqual(' chrome/path/test.xml:3',
errors[0].items[0].splitlines()[0])
def testColorInvalidAlphaValue(self):
lines = ['<color name="color1">#61000000</color>',
'<color name="color2">#FEFFFFFF</color>',
'<color name="color3">#FFCCCCCC</color>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/test.xml', lines)]
errors = PRESUBMIT._CheckColorFormat(mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(1, len(errors[0].items))
self.assertEqual(' chrome/path/test.xml:3',
errors[0].items[0].splitlines()[0])
def testColorFormatLowerCase(self):
lines = ['<color name="color1">#61000000</color>',
'<color name="color2">#EFFFFFFF</color>',
'<color name="color3">#CcCcCC</color>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/test.xml', lines)]
errors = PRESUBMIT._CheckColorFormat(mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(1, len(errors[0].items))
self.assertEqual(' chrome/path/test.xml:3',
errors[0].items[0].splitlines()[0])
class ColorReferencesTest(unittest.TestCase):
def testVectorDrawbleIgnored(self):
lines = ['<vector',
'tools:targetApi="21"',
'android:fillColor="#CCCCCC">',
'</vector>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/test.xml', lines)]
errors = PRESUBMIT._CheckColorReferences(mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
def testInvalidReference(self):
lines = ['<TextView',
'android:textColor="#FFFFFF" />']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/test.xml', lines)]
errors = PRESUBMIT._CheckColorReferences(mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(1, len(errors[0].items))
self.assertEqual(' chrome/path/test.xml:2',
errors[0].items[0].splitlines()[0])
def testValidReference(self):
lines = ['<TextView',
'android:textColor="@color/color1" />']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/test.xml', lines)]
errors = PRESUBMIT._CheckColorReferences(mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
def testValidReferenceInColorResources(self):
lines = ['<color name="color1">#61000000</color>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/colors.xml', lines)]
errors = PRESUBMIT._CheckColorReferences(mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
class DuplicateColorsTest(unittest.TestCase):
def testFailure(self):
lines = ['<color name="color1">#61000000</color>',
'<color name="color2">#61000000</color>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/colors.xml', lines)]
errors = PRESUBMIT._CheckDuplicateColors(mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(2, len(errors[0].items))
self.assertEqual(' chrome/path/colors.xml:1',
errors[0].items[0].splitlines()[0])
self.assertEqual(' chrome/path/colors.xml:2',
errors[0].items[1].splitlines()[0])
def testSucess(self):
lines = ['<color name="color1">#61000000</color>',
'<color name="color1">#FFFFFF</color>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/colors.xml', lines)]
errors = PRESUBMIT._CheckDuplicateColors(mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
class XmlNamespacePrefixesTest(unittest.TestCase):
def testFailure(self):
lines = ['xmlns:chrome="http://schemas.android.com/apk/res-auto"']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/file.xml', lines)]
errors = PRESUBMIT._CheckXmlNamespacePrefixes(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(1, len(errors[0].items))
self.assertEqual(' chrome/path/file.xml:1',
errors[0].items[0].splitlines()[0])
def testSucess(self):
lines = ['xmlns:app="http://schemas.android.com/apk/res-auto"']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/file.xml', lines)]
errors = PRESUBMIT._CheckXmlNamespacePrefixes(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
class TextAppearanceTest(unittest.TestCase):
def testFailure_Style(self):
lines = [
'<resource>',
'<style name="TestTextAppearance">',
'<item name="android:textColor">@color/default_text_color_link</item>',
'<item name="android:textSize">14sp</item>',
'<item name="android:textStyle">bold</item>',
'<item name="android:fontFamily">some-font</item>',
'<item name="android:textAllCaps">true</item>',
'</style>',
'</resource>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/test.xml', lines)]
errors = PRESUBMIT._CheckTextAppearance(mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(5, len(errors[0].items))
self.assertEqual(
' chrome/path/test.xml:2 contains attribute android:textColor',
errors[0].items[0].splitlines()[0])
self.assertEqual(
' chrome/path/test.xml:2 contains attribute android:textSize',
errors[0].items[1].splitlines()[0])
self.assertEqual(
' chrome/path/test.xml:2 contains attribute android:textStyle',
errors[0].items[2].splitlines()[0])
self.assertEqual(
' chrome/path/test.xml:2 contains attribute android:fontFamily',
errors[0].items[3].splitlines()[0])
self.assertEqual(
' chrome/path/test.xml:2 contains attribute android:textAllCaps',
errors[0].items[4].splitlines()[0])
def testSuccess_Style(self):
lines = [
'<resource>',
'<style name="TextAppearance.Test">',
'<item name="android:textColor">@color/default_text_color_link</item>',
'<item name="android:textSize">14sp</item>',
'<item name="android:textStyle">bold</item>',
'<item name="android:fontFamily">some-font</item>',
'<item name="android:textAllCaps">true</item>',
'</style>',
'<style name="TestStyle">',
'<item name="android:background">some_background</item>',
'</style>',
'</resource>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/test.xml', lines)]
errors = PRESUBMIT._CheckTextAppearance(mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
def testFailure_Widget(self):
lines_top_level = [
'<TextView',
'xmlns:android="http://schemas.android.com/apk/res/android"',
'android:layout_width="match_parent"',
'android:layout_height="@dimen/snippets_article_header_height"',
'android:textColor="@color/snippets_list_header_text_color"',
'android:textSize="14sp" />']
lines_subcomponent_widget = [
'<RelativeLayout',
'xmlns:android="http://schemas.android.com/apk/res/android"',
'android:layout_width="match_parent"',
'android:layout_height="wrap_content">',
'<View',
'android:textColor="@color/error_text_color"',
'android:textSize="@dimen/text_size_medium"',
'android:textAllCaps="true"',
'android:background="@drawable/infobar_shadow_top"',
'android:visibility="gone" />',
'</RelativeLayout>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('chrome/path/test1.xml', lines_top_level),
MockFile('chrome/path/test2.xml', lines_subcomponent_widget)]
errors = PRESUBMIT._CheckTextAppearance(mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(5, len(errors[0].items))
self.assertEqual(
' chrome/path/test1.xml:5 contains attribute android:textColor',
errors[0].items[0].splitlines()[0])
self.assertEqual(
' chrome/path/test1.xml:6 contains attribute android:textSize',
errors[0].items[1].splitlines()[0])
self.assertEqual(
' chrome/path/test2.xml:6 contains attribute android:textColor',
errors[0].items[2].splitlines()[0])
self.assertEqual(
' chrome/path/test2.xml:7 contains attribute android:textSize',
errors[0].items[3].splitlines()[0])
self.assertEqual(
' chrome/path/test2.xml:8 contains attribute android:textAllCaps',
errors[0].items[4].splitlines()[0])
def testSuccess_Widget(self):
lines = [
'<RelativeLayout',
'xmlns:android="http://schemas.android.com/apk/res/android"',
'android:layout_width="match_parent"',
'android:layout_height="wrap_content">',
'<View',
'android:background="@drawable/infobar_shadow_top"',
'android:visibility="gone" />',
'</RelativeLayout>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/test.xml', lines)]
errors = PRESUBMIT._CheckTextAppearance(mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
class NewTextAppearanceTest(unittest.TestCase):
def testFailure(self):
lines = [
'<resource>',
'<style name="TextAppearance.Test">',
'<item name="android:textColor">@color/default_text_color_link</item>',
'<item name="android:textSize">14sp</item>',
'</style>',
'</resource>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/test.xml', lines)]
errors = PRESUBMIT._CheckNewTextAppearance(mock_input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertEqual(1, len(errors[0].items))
self.assertEqual(
' chrome/path/test.xml:2',
errors[0].items[0].splitlines()[0])
def testSuccess(self):
lines = [
'<resource>',
'<style name="TextAppearanceTest">',
'<item name="android:textColor">@color/default_text_color_link</item>',
'<item name="android:textSize">14sp</item>',
'</style>',
'</resource>']
mock_input_api = MockInputApi()
mock_input_api.files = [MockFile('chrome/path/test.xml', lines)]
errors = PRESUBMIT._CheckNewTextAppearance(mock_input_api, MockOutputApi())
self.assertEqual(0, len(errors))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
9792982
|
import os
from models.data.base_object_detector import BaseObjectDetector
from models.data.bounding_box import BoundingBox
from typing import List
IMAGE_FILES_EXTENSIONS = [
'.jpg',
'.jpeg',
'.png'
]
def write_image_predictions(
path_to_output_directory: str,
filename: str,
bounding_boxes: List[BoundingBox]
):
target_out_path = os.path.join(path_to_output_directory, filename + ".txt")
with open(target_out_path, "w") as out_file:
for box in bounding_boxes:
left, top, right, bottom = box.min_x, box.min_y, box.max_x, box.max_y
class_for_box = box.human_readable_class.lower().replace(" ", "")
score = box.score
# <class> <probability> <left> <top> <right> <bottom>
out_file.write(f'{class_for_box} {str(score)} {int(left)} {int(top)} {int(right)} {int(bottom)}{os.linesep}')
def write_detections(
path_to_input_directory: str,
path_to_output_directory: str,
object_detector: BaseObjectDetector
):
files_in_dir = os.listdir(path_to_input_directory)
image_files_in_dir = list(
filter(lambda filename: any(map(lambda img_ext: img_ext in filename, IMAGE_FILES_EXTENSIONS)), files_in_dir))
ignored_files = [file_in_dir for file_in_dir in files_in_dir if file_in_dir not in image_files_in_dir]
print(f'Ignored files({len(ignored_files)}): ')
print(ignored_files)
input_dir_name = os.path.split(path_to_input_directory)[1]
current_out_dir_path = os.path.join(path_to_output_directory, input_dir_name)
if not os.path.exists(current_out_dir_path):
os.mkdir(current_out_dir_path)
current_out_dir_path = os.path.join(current_out_dir_path, object_detector.name)
if not os.path.exists(current_out_dir_path):
os.mkdir(current_out_dir_path)
for image_file_in_dir in image_files_in_dir:
target_file_path = os.path.join(path_to_input_directory, image_file_in_dir)
bounding_boxes = object_detector.infer_bounding_boxes_on_target_path(target_file_path)
write_image_predictions(
current_out_dir_path,
image_file_in_dir,
bounding_boxes
)
|
StarcoderdataPython
|
8039432
|
import xml.etree.ElementTree as ET
import os
import glob
import regex as re
import platform
from pathlib import Path
class ClipItem():
def __init__(self, name, path, duration, in_frame, out_frame, out_width, out_height):
duration = int(duration)
in_frame = int(in_frame)
out_frame = int(out_frame)
self.name = name
self.pathurl = path
self.duration = duration
self.enable = True
self.in_frame = in_frame
self.out_frame = out_frame
self.width = out_width
self.height = out_height
self.compositemode = 'normal'
self.timecode = {
'string': '00:00:00:00',
'displayformat': 'NDF'
}
class FCPXML():
def __init__(self, name, frame, width, height):
if(isinstance(frame,str)):
frame = int(frame)
self.clipitems = []
self.name = name
self.timebase = frame
self.is_ntsc = False
self.in_time = str(-1)
self.out_time = str(-1)
self.timecode = {
'string': '01:00:00:00',
'frame': str(self.timebase*3600),
'displayformat': 'NDF'
}
self.width = width
self.height = height
def append(self, clipitem):
#要素を一つ追加する
assert isinstance(clipitem, ClipItem)
self.clipitems.append(clipitem)
def duration(self):
duration = 0
for clipitem in self.clipitems:
duration = duration + (clipitem.out_frame - clipitem.in_frame)
return duration
def export(self, output):
# 内部関数
class Id():
def __init__(self):
self.cnt = 0
def inc(self):
cnt = self.cnt
self.cnt = cnt + 1
return cnt
id = Id()
def dict2subelement(element, dict_arg):
for k, v in dict_arg.items():
if isinstance(v, list):
for l in v:
if isinstance(l, list):
assert False
elif isinstance(l, dict):
dict2subelement(ET.SubElement(element, k), l)
else:
if not isinstance(l, str):
if isinstance(l, bool):
l = str(l).lower()
else:
l = str(l)
ET.SubElement(element, k).text = l
elif isinstance(v, dict):
dict2subelement(ET.SubElement(element, k), v)
else:
if not isinstance(v, str):
if isinstance(v, bool):
v = str(v).lower()
else:
v = str(v)
ET.SubElement(element, k).text = v
def create_rate_under(parent):
rate = ET.SubElement(parent, 'rate')
ET.SubElement(rate, 'timebase').text = str(self.timebase)
ET.SubElement(rate, 'ntsc').text = str(self.is_ntsc).lower()
def create_timecode(parent):
timecode = ET.SubElement(parent, 'timecode')
ET.SubElement(timecode, 'string').text = self.timecode['string']
ET.SubElement(timecode, 'frame').text = self.timecode['frame']
ET.SubElement(timecode, 'displayformat').text = self.timecode['displayformat']
create_rate_under(timecode)
def filter_opacity(duration):
return {
'enabled': True,
'start': str(0),
'end': duration,
'effect':{
'name': 'Opacity',
'effectid': 'opacity',
'effecttype': 'motion',
'mediatype': 'video',
'effectcategory': 'motion',
'parameter': {
'name': 'opacity',
'parameterid': 'opacity',
'value': 100,
'valuemin': 0,
'valuemax': 100
}
}
}
def filter_motion(duration):
return {
'enabled': True,
'start': str(0),
'end': duration,
'effect':{
'name': 'Basic Motion',
'effectid': 'basic',
'effecttype': 'motion',
'mediatype': 'video',
'effectcategory': 'motion',
'parameter': [{
'name': 'Scale',
'parameterid': 'scale',
'value': 100,
'valuemin': 0,
'valuemax': 10000
},{
'name': 'Center',
'parameterid': 'center',
'value': {
'horiz' : 0,
'vert': 0
}
},{
'name': 'Rotation',
'parameterid': 'rotation',
'value': 0,
'valuemin': -100000,
'valuemax': 100000
},{
'name': 'Anchor Point',
'parameterid': 'centerOffset',
'value': {
'horiz' : 0,
'vert': 0
}
}]
}
}
def filter_crop(duration):
return {
'enabled': True,
'start': str(0),
'end': duration,
'effect':{
'name': 'Crop',
'effectid': 'crop',
'effecttype': 'motion',
'mediatype': 'video',
'effectcategory': 'motion',
'parameter': [{
'name': 'left',
'parameterid': 'left',
'value': 0,
'valuemin': 0,
'valuemax': 100
},{
'name': 'right',
'parameterid': 'right',
'value': 0,
'valuemin': 0,
'valuemax': 100
},{
'name': 'top',
'parameterid': 'top',
'value': 0,
'valuemin': 0,
'valuemax': 100
},{
'name': 'bottom',
'parameterid': 'bottom',
'value': 0,
'valuemin': 0,
'valuemax': 100
}]
}
}
def create_clipitem_audio(parent, item, start):
end = start + item.out_frame - item.in_frame
if (item.out_frame - item.in_frame) == 0:
return end
clipitem = ET.SubElement(parent, 'clipitem',{'id': item.name + ' ' + str(id.inc())})
ET.SubElement(clipitem, 'name').text = item.name
ET.SubElement(clipitem, 'duration').text = str(item.duration)
create_rate_under(clipitem)
ET.SubElement(clipitem, 'start').text = str(start)
ET.SubElement(clipitem, 'end').text = str(end)
ET.SubElement(clipitem, 'enabled').text = str(item.enable).lower()
ET.SubElement(clipitem, 'in').text = str(item.in_frame)
ET.SubElement(clipitem, 'out').text = str(item.out_frame)
#into file element
elm_file = ET.SubElement(clipitem, 'file',{'id': item.name + ' ' + str(id.inc())})
ET.SubElement(elm_file, 'duration').text = str(item.duration)
create_rate_under(elm_file)
ET.SubElement(elm_file, 'name').text = item.name
ET.SubElement(elm_file, 'pathurl').text = item.pathurl
timecode = ET.SubElement(elm_file, 'timecode')
dict2subelement(timecode, item.timecode)
create_rate_under(timecode)
media = ET.SubElement(elm_file, 'media')
video = ET.SubElement(media, 'video')
ET.SubElement(video, 'duration').text = str(item.duration)
samplechar = ET.SubElement(video, 'samplecharacteristics')
ET.SubElement(samplechar, 'width').text = str(item.width)
ET.SubElement(samplechar, 'height').text = str(item.height)
elm_sourcetrack = ET.SubElement(clipitem, 'sourcetrack')
ET.SubElement(elm_sourcetrack, 'mediatype').text = "audio"
ET.SubElement(elm_sourcetrack, 'trackindex').text = str(1)
return end
def create_clipitem(parent, item, start):
end = start + item.out_frame - item.in_frame
if (item.out_frame - item.in_frame) == 0:
return end
clipitem = ET.SubElement(parent, 'clipitem',{'id': item.name + ' ' + str(id.inc())})
ET.SubElement(clipitem, 'name').text = item.name
ET.SubElement(clipitem, 'duration').text = str(item.duration)
create_rate_under(clipitem)
ET.SubElement(clipitem, 'start').text = str(start)
ET.SubElement(clipitem, 'end').text = str(end)
ET.SubElement(clipitem, 'enabled').text = str(item.enable).lower()
ET.SubElement(clipitem, 'in').text = str(item.in_frame)
ET.SubElement(clipitem, 'out').text = str(item.out_frame)
#into file element
elm_file = ET.SubElement(clipitem, 'file',{'id': item.name + ' ' + str(id.inc())})
ET.SubElement(elm_file, 'duration').text = str(item.duration)
create_rate_under(elm_file)
ET.SubElement(elm_file, 'name').text = item.name
ET.SubElement(elm_file, 'pathurl').text = item.pathurl
timecode = ET.SubElement(elm_file, 'timecode')
dict2subelement(timecode, item.timecode)
create_rate_under(timecode)
media = ET.SubElement(elm_file, 'media')
video = ET.SubElement(media, 'video')
ET.SubElement(video, 'duration').text = str(item.duration)
samplechar = ET.SubElement(video, 'samplecharacteristics')
ET.SubElement(samplechar, 'width').text = str(item.width)
ET.SubElement(samplechar, 'height').text = str(item.height)
#return to clipitem element
ET.SubElement(clipitem, 'compositemode').text = str(item.compositemode)
#into filter element
dict2subelement(ET.SubElement(clipitem, 'filter'), filter_opacity(item.duration))
dict2subelement(ET.SubElement(clipitem, 'filter'), filter_motion(item.duration))
dict2subelement(ET.SubElement(clipitem, 'filter'), filter_crop(item.duration))
return end
#xml出力
elm = ET.Element('xmeml',{'version':'5'})
sequence = ET.SubElement(elm, 'sequence')
ET.SubElement(sequence, 'name').text = self.name
ET.SubElement(sequence, 'duration').text = str(self.duration())
create_rate_under(sequence)
ET.SubElement(sequence, 'in').text = self.in_time
ET.SubElement(sequence, 'out').text = self.out_time
create_timecode(sequence)
media = ET.SubElement(sequence, 'media')
video = ET.SubElement(media, 'video')
track = ET.SubElement(video, 'track')
frame_cnt = 0
for clipitem in self.clipitems:
frame_cnt = create_clipitem(track, clipitem, frame_cnt)
ET.SubElement(track, 'enabled').text = str(True).lower()
ET.SubElement(track, 'locked').text = str(False).lower()
elm_format = ET.SubElement(video, 'format')
samplechar = ET.SubElement(elm_format, 'samplecharacteristics')
ET.SubElement(samplechar, 'width').text = str(self.width)
ET.SubElement(samplechar, 'height').text = str(self.height)
ET.SubElement(samplechar, 'pixelaspectratio').text = 'square'
create_rate_under(samplechar)
elm_codec = ET.SubElement(samplechar, 'codec')
elm_appspecificdata = ET.SubElement(elm_codec, 'appspecificdata')
ET.SubElement(elm_appspecificdata, 'appname').text = 'Final Cut Pro'
ET.SubElement(elm_appspecificdata, 'appmanufacturer').text = 'Apple Inc.'
elm_data = ET.SubElement(elm_appspecificdata, 'data')
ET.SubElement(elm_data, 'qtcodec')
audio = ET.SubElement(media, 'audio')
track = ET.SubElement(audio, 'track')
frame_cnt = 0
for clipitem in self.clipitems:
frame_cnt = create_clipitem_audio(track, clipitem, frame_cnt)
ET.SubElement(track, 'enabled').text = str(True).lower()
ET.SubElement(track, 'locked').text = str(False).lower()
# output
with open(str(output), 'wb') as f:
f.write('<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE xmeml>'.encode('utf8'))
f.write(ET.tostring(elm,'unicode').encode('utf8'))
if __name__ == "__main__":
xml = FCPXML('name', 24, 1920, 1080)
xml.append(ClipItem('aaa','path',68,8,68,1920,1080))
xml.export(Path('./data/test-timeline.xml'))
|
StarcoderdataPython
|
8069155
|
import argparse
import os
import sys
from subprocess import Popen, PIPE
from pathlib import Path
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('root')
return parser.parse_args()
def get_values():
import xarray as xr
while True:
path = (yield)
item = [_ for _ in path.glob('*')].pop()
with xr.open_dataset(item) as ds:
if not ds['sftlf'].max() > 1.0:
print(f"Not in percent {path}")
def find_sftlf(path):
cmd = f'find {path} -type d -name "sftlf" '
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
while proc.poll() is None:
lines = proc.stdout.readlines()
for line in lines:
yield line.decode('utf-8').strip()
def filter_latest_version():
while True:
path = (yield)
yield sorted([Path(x) for x in Path(path, 'gr').glob('*')])[-1]
def main():
parsed_args = parse_args()
values = get_values()
next(values)
val_filter = filter_latest_version()
next(val_filter)
for path in find_sftlf(parsed_args.root):
p = val_filter.send(path)
if p is not None:
values.send(p)
return 0
if __name__ == "__main__":
sys.exit(main())
|
StarcoderdataPython
|
1980728
|
"""ImageProxy support."""
import base64
import dataclasses as dc
import hashlib
import hmac
from typing import Union, Optional, Sequence
from functools import partial
# py37
try:
from typing import Literal # type: ignore
except ImportError:
from typing_extensions import Literal # type: ignore
__version__ = '1.0.0'
@dc.dataclass
class ImgProxy:
"""ImageProxy URL."""
image_url: str
proxy_host: Optional[str] = None
key: Optional[str] = dc.field(default=None, repr=False)
salt: Optional[str] = dc.field(default=None, repr=False)
resizing_type: Literal['fit', 'fill', 'auto'] = 'auto'
width: int = 0
height: int = 0
gravity: Union[
Literal['no', 'so', 'ea', 'we', 'noea', 'nowe', 'soea', 'sowe', 'ce', 'sm'], str] = 'ce'
enlarge: bool = False
extension: str = ''
advanced : Sequence[str] = dc.field(default_factory=list)
@classmethod
def factory(cls, *f_advanced: str, **f_params):
"""Generate ImgProxy objects."""
def factory(image_url: str, *advanced: str, **params):
kwargs = dict(f_params, **params)
kwargs['advanced'] = [*kwargs.get('advanced', []), *f_advanced, *advanced]
return cls(image_url, **kwargs)
return factory
def __post_init__(self):
"""Initialize signature options."""
try:
self.key: Union[bytes, Literal[False]] = self.key and bytes.fromhex(self.key)
self.salt: Union[bytes, Literal[False]] = self.salt and bytes.fromhex(self.salt)
except ValueError:
raise ValueError(f"Invalid signature parameters: {self.key}, {self.salt}")
def __str__(self) -> str:
"""Generate default URL."""
return self.__call__()
def __call__(self, *advanced: str, **options) -> str:
"""Generate an URL."""
b64_url = base64.urlsafe_b64encode(self.image_url.encode()).rstrip(b"=").decode()
path = "/{advanced}/g:{gravity}/rs:{resizing_type}:{width}:{height}:{enlarge}/{b64_url}{extension}".format( # noqa
b64_url=b64_url, advanced='/'.join([*self.advanced, *advanced]), **dict({
'resizing_type': self.resizing_type,
'width': self.width,
'height': self.height,
'gravity': self.gravity,
'enlarge': self.enlarge and '1' or '0',
'extension': self.extension and f".{self.extension}" or '',
}, **options)
).replace('//', '/')
signature = 'insecure'
if self.key and self.salt:
digest = hmac.new(
self.key, msg=self.salt + path.encode('utf-8'), # type: ignore
digestmod=hashlib.sha256).digest()
signature = base64.urlsafe_b64encode(digest).rstrip(b"=").decode()
path = f"/{signature}{path}"
if self.proxy_host:
return f"{self.proxy_host}{path}"
return path
|
StarcoderdataPython
|
3491733
|
import pytest
import basix
import basix.ufl_wrapper
@pytest.mark.parametrize("inputs", [
("Lagrange", "triangle", 2),
("Lagrange", basix.CellType.triangle, 2),
(basix.ElementFamily.P, basix.CellType.triangle, 2),
(basix.ElementFamily.P, "triangle", 2),
])
def test_create_element(inputs):
basix.ufl_wrapper.create_element(*inputs)
@pytest.mark.parametrize("inputs", [
("Lagrange", "triangle", 2),
("Lagrange", basix.CellType.triangle, 2),
(basix.ElementFamily.P, basix.CellType.triangle, 2),
(basix.ElementFamily.P, "triangle", 2),
])
def test_create_vector_element(inputs):
basix.ufl_wrapper.create_vector_element(*inputs)
@pytest.mark.parametrize("inputs", [
("Lagrange", "triangle", 2),
("Lagrange", basix.CellType.triangle, 2),
(basix.ElementFamily.P, basix.CellType.triangle, 2),
(basix.ElementFamily.P, "triangle", 2),
])
def test_create_tensor_element(inputs):
basix.ufl_wrapper.create_tensor_element(*inputs)
|
StarcoderdataPython
|
3215276
|
<reponame>jiawenanan/Database
#!/usr/bin/env python
# coding: utf-8
# In[66]:
import pandas as pd
import numpy as np
import zipfile
prison = pd.read_csv('~/Desktop/Prison_Admissions__Beginning_2008.csv')
house = pd.read_csv('~/Desktop/County_zhvi_uc_sfrcondo_tier_0.33_0.67_sm_sa_mon.csv')
vpf = pd.read_csv('~/Desktop/Index__Violent__Property__and_Firearm_Rates_By_County__Beginning_1990.csv')
food = pd.read_csv('~/Desktop/Retail_Food_Stores.csv')
##house = house.rename(columns={"RegionID": "county_id", "SizeRank": "size_rank","RegionName": "county_name","RegionType": "region_type","": "county_name"})
import re
## function for extract the first word
def first_word(text: str) -> str:
return re.search("([\w']+)", text).group(1)
#---------------------------<houseprice>-------------------------------
##extract data for {houseprice} table
##transfer the time series as the value of column ['month_year']
old = ' County' ## we don't want "County" to be apeared in our ['county_name'] column
house = house[house['State'] == 'NY']
house['RegionName'] = house['RegionName'].apply(lambda x : x.replace(old, ''))
house.sort_values(by = 'SizeRank',axis = 0, ascending= True)
house.reset_index(drop=True, inplace=True)
time_index = house.columns[9:,]
column_countyid = []
column_time = []
column_price = []
for x in range(0,62):
for y in range(0, 298):
column_countyid.append(house.iloc[x,0])
column_time.append(time_index[y])
column_price.append(house.iloc[x,9 + y])
temp = pd.DataFrame()
temp['county_id'] = column_countyid
temp['month_year'] = column_time
temp['price'] = column_price
temp['price'] = temp['price'].astype(int)
## temp is the data for table {houseprice} (18476 * 3)
houseprice = temp
houseprice.to_csv('~/Desktop/housepricetable.csv', index = False)
#---------------------------<county>-------------------------------
##extract data for {county} table
temp1 = pd.DataFrame()
temp1['county_id'] = house['RegionID']
temp1['county_name'] = house['RegionName']
temp1['metro'] = house['Metro']
temp1['statecodefips'] = house['StateCodeFIPS']
temp1['size_rank'] = house.index + 1
temp1['municipalcodefips'] = house['MunicipalCodeFIPS']
## we change all NaN value to 'Missing' according to our plan,county is the fk, cannot be null
temp1['metro'] = temp1['metro'].fillna('Missing')
temp1 = temp1.append([{'county_id':0,'county_name':'Missing','metro':'Missing','statecodefips':0,'size_rank': 0,'municipalcodefips':0}], ignore_index=True)
county = temp1
county.to_csv('~/Desktop/countytable.csv', index = False)
## the preprocessed dataset includes 62 rows, however, in the final dataset there will be 63 rows,
## the 63rd row is ['Missing']
## for further expanding, we store the U.S-wide statecodefips in a mongodb database
## this application is focusing on New York State
## for further mapping use, make a county --> county_id dictionary
county_id = county.county_id.tolist()
county_name = county.county_name.tolist()
county_id_name = {}
for name in county_name:
for i_d in county_id:
county_id_name[name] = i_d
county_id.remove(i_d)
break
#---------------------------<vpf>-------------------------------
## extract data for {vpf} table
## map county to county_id, before doing that, we noticed that in table {county} --> 'Saint Lawrence', however, in original vpf table, it is 'St Lawrence' or 'St. Lawrence'
## so we need to change all 'St Lawrence' || 'St. Lawrence' in vpf table to be Saint Lawrance
vpf['County'].loc[(vpf['County'] == 'St Lawrence')] = 'Saint Lawrence'
vpf['County'].loc[(vpf['County'] == 'St. Lawrence')] = 'Saint Lawrence'
## Map to county_id(the primary key in {county} table)
vpf['County'] = vpf['County'].map(county_id_name)
vpf = vpf.rename(columns={"County": "county_id", "Year":"year_id", "Population": "population", "Index Count" : "index_count", "Index Rate":"index_rate", "Violent Count" :"violent_count", "Violent Rate" :"violent_rate","Property Count":"property_count","Property Rate":"property_rate","Firearm Count":"firearm_count","Firearm Rate":"firearm_rate"})
vpf['population'] = vpf['population'].astype(int)
vpf['firearm_count'] = vpf['firearm_count'].astype(pd.Int32Dtype())
vpf.to_csv('~/Desktop/vpftable.csv', index = False)
#---------------------------<prison>-------------------------------
## extract data for {prison} table
## ['Admission Month'] and ['Month Code'] represent the same meaning without any explantion to users
## As there will be no data loss, we plan to drop ['Admission Month']
## 1) ['County of Commitment'] in prison dataset are all capitalized, we transfer it to be consistent with the table {county}
## we can sure that there will be no data missing for doing above-mentioned transformation
columns = prison.columns.tolist()
string_columns = [1,3,4,5,6,8]
for x in string_columns:
prison[columns[x]] = prison[columns[x]].str.title()
## we change all NaN value in ['County of Commitment'] column to 'Missing' according to our plan, county is the fk, cannot be null
prison['County of Commitment'] = prison['County of Commitment'].fillna('Missing')
prison = prison.drop(columns = ['Admission Month'])
## Assign case_id to each case as the pk
#prison['case_id'] = prison.index + 1
## change all 'St Lawrence' in prison table to be Saint Lawrance
prison['County of Commitment'].loc[(prison['County of Commitment'] == 'St Lawrence')] = 'Saint Lawrence'
prison['County of Commitment'].loc[(prison['County of Commitment'] == 'Brooklyn')] = 'New York'
prison['County of Commitment'].loc[(prison['County of Commitment'] == 'Manhattan')] = 'New York'
prison['County of Commitment'].loc[(prison['County of Commitment'] == 'Staten Island')] = 'New York'
prison['Last Known Residence County'] = prison['Last Known Residence County'].fillna('Missing')
prison['Last Known Residence County'].loc[(prison['Last Known Residence County'] == 'Richmond (Staten Island)')] = 'New York'
prison['Last Known Residence County'].loc[(prison['Last Known Residence County'] == 'New York (Manhattan)')] = 'New York'
prison['Last Known Residence County'].loc[(prison['Last Known Residence County'] == 'Kings (Brooklyn)')] = 'New York'
prison['Last Known Residence County'].loc[(prison['Last Known Residence County'] == 'Unknown')] = 'Missing'
prison['Last Known Residence County'].loc[(prison['Last Known Residence County'] == 'St Lawrence')] = 'Saint Lawrence'
prison['Last Known Residence County'].loc[(prison['Last Known Residence County'] == 'Out Of State')] = 'Missing'
## the data in column['Last Known Residence County'] is sort of different when compared with 62 county name in new york state
## for example: Kings (Brooklyn), New York (Manhattan), Rensselaer, Seneca, Westchester
## one county mapped with multiple ast Known Residence County, this column seems more like city.
## we decide to extract words in bracket, create a list to store all unique value and compare it with city in table{food}
#prison['Last Known Residence County'].apply(lambda x : y = re.findall('\((.*?)\)', x), unique_last_known_resi.append(y))
before_extract = prison['Last Known Residence County'].unique()
## create a new dataframe and drop the duplication to check the relationship between ['county'] & ['Last Known Residence County']
## the result is that, staten island, manhattan, and brooklyn, three city in newyork are only three value different from the county
## since both two columns talk about county, we eventually set above-mentioned value to be 'New York'
## as the same, do mapping to only reserve the county_id as fk
prison['County of Commitment'] = prison['County of Commitment'].map(county_id_name)
prison['Last Known Residence County'] = prison['Last Known Residence County'].map(county_id_name)
prison = prison.rename(columns={"Admission Year": "admission_year", "Month Code":"admission_month", "Admission Type": "admission_type", "County of Commitment" : "county_id_commitment", "Last Known Residence County":"county_id_last_known_residence", "Gender" :"gender", "Age at Admission" :" age_at_admission","Most Serious Crime":"most_serious_crime"})
prison.insert(0,'case_id',prison.index + 1)
prison['gender'].loc[(prison['gender'].isnull())] = 'Missing'
prison['gender'].loc[(prison['gender'] == 'Not Coded')] = 'Missing'
#len(prison['County of Commitment'].unique())
#prison['county_id_last_known_residence']
prison.to_csv('~/Desktop/prisontable.csv', index = False)
#---------------------------<food>-------------------------------
food['City'] = food['City'].str.title()
food['County'].loc[(food['County'] == 'St. Lawrence')] = 'Saint Lawrence'
## ['Location'] = ['Street Number'] + ['Street Name'] + ['latitude'] + ['longitude'] +['city'] +['Zip Code']
## in order to eleminate the data redundancy, we decide to extract latitude and longitude, all other data can be found in other columns
## result of data manipulation: 1558 unique zip_code, 1452 unique city, 1605 unique zipcode + county_id, 1797 unique zipcode + city, 1499 unique city + county_id
## after data manipulation, we noticed that even ['zipcode'] + ['city'] cannot determine the ['county'] for our food dataset
## the explanation we fetch from the google: google gives the explanation as:Some cities cross into five different counties and as many as 20% of the ZIP Codes cross county lines)
Location = []
for x in range(0, len(food)):
if food.iloc[x]['Street Number'] != None:
y = str(food.iloc[x]['Street Number']).strip()
z = food.iloc[x]['Street Name'].strip()
Location.append(y + ' ' + z)
else:
z = food.iloc[x]['Street Name'].strip()
Location.append(z)
temp2 = pd.DataFrame()
temp2['address'] = Location
temp2['zip_code'] = food['Zip Code']
temp2['city'] = food['City']
temp2['county_id'] = food['County'].map(county_id_name)
temp2 = temp2.drop_duplicates(['address'])
## Extract ['address'] for {address} table and {food} without data loss
#---------------------------<address>-------------------------------
temp2.to_csv('~/Desktop/addresstable.csv', index = False)
## data in address is not unique, duplication exist. For example: a Starbucks in a Walmart shares the same address with the Walmart
## drop above-mentioned columns without any data loss
food = food.drop(columns = ['County','Street Number', 'Street Name','Address Line 2','Address Line 3','City','State','Zip Code'])
pair= []
def subString(location_column):
for x in range(0, len(location_column)):
if isinstance(location_column[x], str):
y = re.findall(r'[(](.*?)[)]',location_column[x])
if len(y) != 0:
pair.append(y[0])
else:
pair.append(None)
else:
pair.append(None)
## extract the latitude and longitude from food['Location']
subString(food['Location'])
food['latitude_longitude'] = pair
## drop ['Location'] and there is no data loss
food = food.drop(columns = ['Location'])
## add our processed location data to food
food['address'] = Location
food = food.rename(columns={"License Number": "license_number", "Operation Type":"operation_type", "Establishment Type": "establishment_type", "Entity Name" : "entity_name", "DBA Name":"dba_name", "Square Footage" :"square_footage"})
food.to_csv('~/Desktop/foodtable.csv', index = False)
## after the data preprocessing, you should have six .csv files on your desktop
# In[ ]:
|
StarcoderdataPython
|
1806126
|
""" Platform entity for the GMG project """
import json
class Platform:
""" This class represent a platform (support), for instance "Playstation" """
def __init__(self, platform_id, platform_name, game_count):
self.platform_id = platform_id
self.platform_name = platform_name
self.game_count = game_count
def get_platform_id(self):
"""Return the id of the platform, for instance "3"."""
return self.platform_id
def get_name(self):
"""Return the name of the platform, for instance "Playstation 2"."""
return self.platform_name
def get_game_count(self):
"""Return qty of games on this platform."""
return self.game_count
def to_json(self):
"""Jsonify the object"""
return json.dumps(self, default=lambda o: o.__dict__)
def serialize(self):
"""serialize the object"""
return {
'platform_id': self.platform_id,
'platform_name': self.platform_name,
'game_count': self.game_count,
}
|
StarcoderdataPython
|
3535598
|
<gh_stars>0
"""
Given a non-empty string like "Code" return a string like "CCoCodCode".
string_splosion('Code') → 'CCoCodCode'
string_splosion('abc') → 'aababc'
string_splosion('ab') → 'aab'
"""
def string_splosion(str):
string = ""
for x in range(len(str)):
string += str[:x+1]
return string
|
StarcoderdataPython
|
4843281
|
<gh_stars>10-100
# Write your code here
str1 = input()
vowels = ["A","E","I","O","U","Y"]
a = int(str1[1]) + int(str1[0])
b = int(str1[3]) + int(str1[4])
c = int(str1[4]) + int(str1[5])
d = int(str1[7]) + int(str1[8])
if(a%2 == 0 and b%2 == 0 and c%2 == 0 and d%2 == 0 and str1[2] not in vowels) :
print("valid")
else :
print("invalid")
|
StarcoderdataPython
|
9672108
|
<reponame>CheerL/lancunar
import torch
import torch.nn as nn
import torch.nn.functional as F
def passthrough(x, **kwargs):
return x
def ELUCons(elu, nchan):
if elu:
return nn.ELU(inplace=True)
else:
return nn.PReLU(nchan)
class LUConv(nn.Module):
def __init__(self, inChans, outChans, elu):
super(LUConv, self).__init__()
self.relu1 = ELUCons(elu, outChans)
self.conv1 = nn.Conv3d(inChans, outChans, kernel_size=3, padding=1)
self.bn1 = nn.InstanceNorm3d(outChans)
def forward(self, x):
out = self.relu1(self.bn1(self.conv1(x)))
return out
def _make_nConv(inChans, outChans, depth, elu):
layers = []
for _ in range(depth):
layers.append(LUConv(inChans, outChans, elu))
return nn.Sequential(*layers)
class InputTransition(nn.Module):
def __init__(self, outChans, elu):
super(InputTransition, self).__init__()
self.conv1 = nn.Conv3d(1, 32, kernel_size=3, padding=1)
self.bn1 = nn.InstanceNorm3d(32)
self.relu1 = ELUCons(elu, 32)
self.conv2 = nn.Conv3d(32, 32, kernel_size=3, padding=1)
self.bn2 = nn.InstanceNorm3d(32)
self.relu2 = ELUCons(elu, 32)
def forward(self, x):
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
return out
class DownTransition(nn.Module):
def __init__(self, inChans, outChans, nConvs, elu, dropout=False):
super(DownTransition, self).__init__()
self.down_conv = nn.Conv3d(inChans, outChans, kernel_size=2, stride=2)
self.bn1 = nn.InstanceNorm3d(outChans)
self.relu1 = ELUCons(elu, outChans)
self.ops = _make_nConv(outChans, outChans, nConvs, elu)
def forward(self, x):
down = self.relu1(self.bn1(self.down_conv(x)))
out = self.ops(down)
return out
class UpTransition(nn.Module):
def __init__(self, inChans, outChans, nConvs, elu, dropout=False):
super(UpTransition, self).__init__()
self.up_conv = nn.ConvTranspose3d(inChans, outChans, kernel_size=2, stride=2)
self.bn1 = nn.InstanceNorm3d(outChans)
self.relu1 = ELUCons(elu, outChans)
self.ops1 = LUConv(outChans*2, outChans, elu)
self.ops2 = _make_nConv(outChans, outChans, nConvs-1, elu)
def forward(self, x, skipx):
out = self.relu1(self.bn1(self.up_conv(x)))
xcat = torch.cat((out, skipx), 1)
out = self.ops1(xcat)
out = self.ops2(out)
return out
class OutputTransition(nn.Module):
def __init__(self, inChans, elu, nll):
super(OutputTransition, self).__init__()
self.conv1 = nn.Conv3d(inChans, 2, kernel_size=1, padding=0)
self.softmax = F.softmax
def forward(self, x):
# convolve 32 down to 2 channels
out = self.conv1(x)
# make channels the last axis
out = out.permute(0, 2, 3, 4, 1).contiguous()
# flatten
out = out.view(out.size(0), -1, 2)
out = self.softmax(out, dim=2)
# treat channel 0 as the predicted output
return out
class VNet(nn.Module):
# the number of convolutions in each layer corresponds
# to what is in the actual prototxt, not the intent
def __init__(self, elu=False, nll=True):
super(VNet, self).__init__()
self.in_tr = InputTransition(32, elu)
self.down_tr64 = DownTransition(32, 64, 2, elu)
self.down_tr128 = DownTransition(64, 128, 2, elu)
self.up_tr64 = UpTransition(128, 64, 2, elu)
self.up_tr32 = UpTransition(64, 32, 2, elu)
self.out_tr = OutputTransition(32, elu, nll)
def forward(self, x):
out32 = self.in_tr(x)
out64 = self.down_tr64(out32)
out128 = self.down_tr128(out64)
out = self.up_tr64(out128, out64)
out = self.up_tr32(out, out32)
out = self.out_tr(out)
return out
@staticmethod
def dice_loss(pred, target):
smooth = 0.001
pred_flat = pred[:, :, 1].view(pred.size(0), -1)
target = target.float()
intersection = pred_flat * target
loss = (2 * intersection.sum(1) + smooth) / (pred_flat.pow(2).sum(1) + target.pow(2).sum(1) + smooth)
return (1 - loss).mean()
@staticmethod
def dice_similarity_coefficient(pred, target):
pass
@staticmethod
def sensitivity(pred, target):
pass
|
StarcoderdataPython
|
5196728
|
<filename>testsuite/Testlib/TestServer/TestPlugin.py
import os
import re
import sys
import copy
import logging
import lxml.etree
import Bcfg2.Server
from Bcfg2.Bcfg2Py3k import reduce
from mock import Mock, MagicMock, patch
from Bcfg2.Server.Plugin import *
# add all parent testsuite directories to sys.path to allow (most)
# relative imports in python 2.4
path = os.path.dirname(__file__)
while path != '/':
if os.path.basename(path).lower().startswith("test"):
sys.path.append(path)
if os.path.basename(path) == "testsuite":
break
path = os.path.dirname(path)
from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
patchIf, datastore
try:
re_type = re._pattern_type
except AttributeError:
re_type = type(re.compile(""))
def tostring(el):
return lxml.etree.tostring(el, xml_declaration=False).decode('UTF-8')
class FakeElementTree(lxml.etree._ElementTree):
xinclude = Mock()
class TestFunctions(Bcfg2TestCase):
def test_bind_info(self):
entry = lxml.etree.Element("Path", name="/test")
metadata = Mock()
default = dict(test1="test1", test2="test2")
# test without infoxml
bind_info(entry, metadata, default=default)
self.assertItemsEqual(entry.attrib,
dict(test1="test1",
test2="test2",
name="/test"))
# test with bogus infoxml
entry = lxml.etree.Element("Path", name="/test")
infoxml = Mock()
self.assertRaises(PluginExecutionError,
bind_info,
entry, metadata, infoxml=infoxml)
infoxml.pnode.Match.assert_called_with(metadata, dict(), entry=entry)
# test with valid infoxml
entry = lxml.etree.Element("Path", name="/test")
infoxml.reset_mock()
infodata = {None: {"test3": "test3", "test4": "test4"}}
def infoxml_rv(metadata, rv, entry=None):
rv['Info'] = infodata
infoxml.pnode.Match.side_effect = infoxml_rv
bind_info(entry, metadata, infoxml=infoxml, default=default)
# mock objects don't properly track the called-with value of
# arguments whose value is changed by the function, so it
# thinks Match() was called with the final value of the mdata
# arg, not the initial value. makes this test a little less
# worthwhile, TBH.
infoxml.pnode.Match.assert_called_with(metadata, dict(Info=infodata),
entry=entry)
self.assertItemsEqual(entry.attrib,
dict(test1="test1",
test2="test2",
test3="test3",
test4="test4",
name="/test"))
class TestPluginInitError(Bcfg2TestCase):
""" placeholder for future tests """
pass
class TestPluginExecutionError(Bcfg2TestCase):
""" placeholder for future tests """
pass
class TestDebuggable(Bcfg2TestCase):
test_obj = Debuggable
def get_obj(self):
return self.test_obj()
def test__init(self):
d = self.get_obj()
self.assertIsInstance(d.logger, logging.Logger)
self.assertFalse(d.debug_flag)
@patch("Bcfg2.Server.Plugin.%s.debug_log" % test_obj.__name__)
def test_toggle_debug(self, mock_debug):
d = self.get_obj()
orig = d.debug_flag
d.toggle_debug()
self.assertNotEqual(orig, d.debug_flag)
self.assertTrue(mock_debug.called)
mock_debug.reset_mock()
changed = d.debug_flag
d.toggle_debug()
self.assertNotEqual(changed, d.debug_flag)
self.assertEqual(orig, d.debug_flag)
self.assertTrue(mock_debug.called)
def test_debug_log(self):
d = self.get_obj()
d.logger = Mock()
d.debug_flag = False
d.debug_log("test")
self.assertFalse(d.logger.error.called)
d.logger.reset_mock()
d.debug_log("test", flag=True)
self.assertTrue(d.logger.error.called)
d.logger.reset_mock()
d.debug_flag = True
d.debug_log("test")
self.assertTrue(d.logger.error.called)
class TestPlugin(TestDebuggable):
test_obj = Plugin
def get_obj(self, core=None):
if core is None:
core = Mock()
return self.test_obj(core, datastore)
def test__init(self):
core = Mock()
p = self.get_obj(core=core)
self.assertEqual(p.data, os.path.join(datastore, p.name))
self.assertEqual(p.core, core)
self.assertIsInstance(p, Debuggable)
@patch("os.makedirs")
def test_init_repo(self, mock_makedirs):
self.test_obj.init_repo(datastore)
mock_makedirs.assert_called_with(os.path.join(datastore,
self.test_obj.name))
class TestDatabaseBacked(TestPlugin):
test_obj = DatabaseBacked
@skipUnless(has_django, "Django not found")
def test__use_db(self):
core = Mock()
core.setup.cfp.getboolean.return_value = True
db = self.get_obj(core)
self.assertTrue(db._use_db)
core = Mock()
core.setup.cfp.getboolean.return_value = False
db = self.get_obj(core)
self.assertFalse(db._use_db)
Bcfg2.Server.Plugin.has_django = False
core = Mock()
db = self.get_obj(core)
self.assertFalse(db._use_db)
core = Mock()
core.setup.cfp.getboolean.return_value = True
db = self.get_obj(core)
self.assertFalse(db._use_db)
Bcfg2.Server.Plugin.has_django = True
class TestPluginDatabaseModel(Bcfg2TestCase):
""" placeholder for future tests """
pass
class TestGenerator(Bcfg2TestCase):
test_obj = Generator
class TestStructure(Bcfg2TestCase):
test_obj = Structure
def get_obj(self):
return self.test_obj()
def test_BuildStructures(self):
s = self.get_obj()
self.assertRaises(NotImplementedError,
s.BuildStructures, None)
class TestMetadata(Bcfg2TestCase):
test_obj = Metadata
def get_obj(self):
return self.test_obj()
def test_get_initial_metadata(self):
m = self.get_obj()
self.assertRaises(NotImplementedError,
m.get_initial_metadata, None)
def test_merge_additional_data(self):
m = self.get_obj()
self.assertRaises(NotImplementedError,
m.merge_additional_data, None, None, None)
def test_merge_additional_groups(self):
m = self.get_obj()
self.assertRaises(NotImplementedError,
m.merge_additional_groups, None, None)
class TestConnector(Bcfg2TestCase):
""" placeholder """
pass
class TestProbing(Bcfg2TestCase):
""" placeholder """
pass
class TestStatistics(TestPlugin):
test_obj = Statistics
class TestThreadedStatistics(TestStatistics):
test_obj = ThreadedStatistics
data = [("foo.example.com", "<foo/>"),
("bar.example.com", "<bar/>")]
@patch("threading.Thread.start")
def test__init(self, mock_start):
core = Mock()
ts = self.get_obj(core)
mock_start.assert_any_call()
@patch("%s.open" % builtins)
@patch("%s.dump" % cPickle.__name__)
@patch("Bcfg2.Server.Plugin.ThreadedStatistics.run", Mock())
def test_save(self, mock_dump, mock_open):
core = Mock()
ts = self.get_obj(core)
queue = Mock()
queue.empty = Mock(side_effect=Empty)
ts.work_queue = queue
mock_open.side_effect = OSError
# test that save does _not_ raise an exception even when
# everything goes pear-shaped
ts.save()
queue.empty.assert_any_call()
mock_open.assert_called_with(ts.pending_file, 'w')
queue.reset_mock()
mock_open.reset_mock()
queue.data = []
for hostname, xml in self.data:
md = Mock()
md.hostname = hostname
queue.data.append((md, lxml.etree.XML(xml)))
queue.empty.side_effect = lambda: len(queue.data) == 0
queue.get_nowait = Mock(side_effect=lambda: queue.data.pop())
mock_open.side_effect = None
ts.save()
queue.empty.assert_any_call()
queue.get_nowait.assert_any_call()
mock_open.assert_called_with(ts.pending_file, 'w')
mock_open.return_value.close.assert_any_call()
# the order of the queue data gets changed, so we have to
# verify this call in an ugly way
self.assertItemsEqual(mock_dump.call_args[0][0], self.data)
self.assertEqual(mock_dump.call_args[0][1], mock_open.return_value)
@patch("os.unlink")
@patch("os.path.exists")
@patch("%s.open" % builtins)
@patch("lxml.etree.XML")
@patch("%s.load" % cPickle.__name__)
@patch("Bcfg2.Server.Plugin.ThreadedStatistics.run", Mock())
def test_load(self, mock_load, mock_XML, mock_open, mock_exists,
mock_unlink):
core = Mock()
core.terminate.isSet.return_value = False
ts = self.get_obj(core)
ts.work_queue = Mock()
ts.work_queue.data = []
def reset():
core.reset_mock()
mock_open.reset_mock()
mock_exists.reset_mock()
mock_unlink.reset_mock()
mock_load.reset_mock()
mock_XML.reset_mock()
ts.work_queue.reset_mock()
ts.work_queue.data = []
mock_exists.return_value = False
self.assertTrue(ts.load())
mock_exists.assert_called_with(ts.pending_file)
reset()
mock_exists.return_value = True
mock_open.side_effect = OSError
self.assertFalse(ts.load())
mock_exists.assert_called_with(ts.pending_file)
mock_open.assert_called_with(ts.pending_file, 'r')
reset()
mock_open.side_effect = None
mock_load.return_value = self.data
ts.work_queue.put_nowait.side_effect = Full
self.assertTrue(ts.load())
mock_exists.assert_called_with(ts.pending_file)
mock_open.assert_called_with(ts.pending_file, 'r')
mock_open.return_value.close.assert_any_call()
mock_load.assert_called_with(mock_open.return_value)
reset()
core.build_metadata.side_effect = lambda x: x
mock_XML.side_effect = lambda x, parser=None: x
ts.work_queue.put_nowait.side_effect = None
self.assertTrue(ts.load())
mock_exists.assert_called_with(ts.pending_file)
mock_open.assert_called_with(ts.pending_file, 'r')
mock_open.return_value.close.assert_any_call()
mock_load.assert_called_with(mock_open.return_value)
self.assertItemsEqual(mock_XML.call_args_list,
[call(x, parser=Bcfg2.Server.XMLParser)
for h, x in self.data])
self.assertItemsEqual(ts.work_queue.put_nowait.call_args_list,
[call((h, x)) for h, x in self.data])
mock_unlink.assert_called_with(ts.pending_file)
@patch("threading.Thread.start", Mock())
@patch("Bcfg2.Server.Plugin.ThreadedStatistics.load")
@patch("Bcfg2.Server.Plugin.ThreadedStatistics.save")
@patch("Bcfg2.Server.Plugin.ThreadedStatistics.handle_statistic")
def test_run(self, mock_handle, mock_save, mock_load):
core = Mock()
ts = self.get_obj(core)
mock_load.return_value = True
ts.work_queue = Mock()
def reset():
mock_handle.reset_mock()
mock_save.reset_mock()
mock_load.reset_mock()
core.reset_mock()
ts.work_queue.reset_mock()
ts.work_queue.data = self.data[:]
ts.work_queue.get_calls = 0
reset()
def get_rv(**kwargs):
ts.work_queue.get_calls += 1
try:
return ts.work_queue.data.pop()
except:
raise Empty
ts.work_queue.get.side_effect = get_rv
def terminate_isset():
# this lets the loop go on a few iterations with an empty
# queue to test that it doesn't error out
return ts.work_queue.get_calls > 3
core.terminate.isSet.side_effect = terminate_isset
ts.work_queue.empty.return_value = False
ts.run()
mock_load.assert_any_call()
self.assertGreaterEqual(ts.work_queue.get.call_count, len(self.data))
self.assertItemsEqual(mock_handle.call_args_list,
[call(h, x) for h, x in self.data])
mock_save.assert_any_call()
@patch("copy.copy", Mock(side_effect=lambda x: x))
@patch("Bcfg2.Server.Plugin.ThreadedStatistics.run", Mock())
def test_process_statistics(self):
core = Mock()
ts = self.get_obj(core)
ts.work_queue = Mock()
ts.process_statistics(*self.data[0])
ts.work_queue.put_nowait.assert_called_with(self.data[0])
ts.work_queue.reset_mock()
ts.work_queue.put_nowait.side_effect = Full
# test that no exception is thrown
ts.process_statistics(*self.data[0])
class TestPullSource(Bcfg2TestCase):
def test_GetCurrentEntry(self):
ps = PullSource()
self.assertRaises(NotImplementedError,
ps.GetCurrentEntry, None, None, None)
class TestPullTarget(Bcfg2TestCase):
def test_AcceptChoices(self):
pt = PullTarget()
self.assertRaises(NotImplementedError,
pt.AcceptChoices, None, None)
def test_AcceptPullData(self):
pt = PullTarget()
self.assertRaises(NotImplementedError,
pt.AcceptPullData, None, None, None)
class TestDecision(Bcfg2TestCase):
""" placeholder for future tests """
pass
class TestValidationError(Bcfg2TestCase):
""" placeholder for future tests """
pass
class TestStructureValidator(Bcfg2TestCase):
def test_validate_structures(self):
sv = StructureValidator()
self.assertRaises(NotImplementedError,
sv.validate_structures, None, None)
class TestGoalValidator(Bcfg2TestCase):
def test_validate_goals(self):
gv = GoalValidator()
self.assertRaises(NotImplementedError,
gv.validate_goals, None, None)
class TestVersion(Bcfg2TestCase):
""" placeholder for future tests """
pass
class TestClientRunHooks(Bcfg2TestCase):
""" placeholder for future tests """
pass
class TestFileBacked(Bcfg2TestCase):
test_obj = FileBacked
def get_obj(self, path=datastore, fam=None):
return self.test_obj(path, fam=fam)
@patch("%s.open" % builtins)
def test_HandleEvent(self, mock_open):
path = "/test"
fb = self.get_obj(path)
fb.Index = Mock()
def reset():
fb.Index.reset_mock()
mock_open.reset_mock()
for evt in ["exists", "changed", "created"]:
reset()
event = Mock()
event.code2str.return_value = evt
fb.HandleEvent(event)
mock_open.assert_called_with(path)
mock_open.return_value.read.assert_any_call()
fb.Index.assert_any_call()
reset()
event = Mock()
event.code2str.return_value = "endExist"
fb.HandleEvent(event)
self.assertFalse(mock_open.called)
self.assertFalse(fb.Index.called)
class TestDirectoryBacked(Bcfg2TestCase):
test_obj = DirectoryBacked
testpaths = {1: '',
2: '/foo',
3: '/foo/bar',
4: '/foo/bar/baz',
5: 'quux',
6: 'xyzzy/',
7: 'xyzzy/plugh/'}
def test_child_interface(self):
# ensure that the child object has the correct interface
self.assertTrue(hasattr(self.test_obj.__child__, "HandleEvent"))
@patch("Bcfg2.Server.Plugin.%s.add_directory_monitor" % test_obj.__name__,
Mock())
def get_obj(self, fam=None):
if fam is None:
fam = Mock()
return self.test_obj(os.path.join(datastore, self.test_obj.__name__),
fam)
@patch("Bcfg2.Server.Plugin.%s.add_directory_monitor" % test_obj.__name__)
def test__init(self, mock_add_monitor):
db = self.test_obj(datastore, Mock())
mock_add_monitor.assert_called_with('')
def test__getitem(self):
db = self.get_obj()
db.entries.update(dict(a=1, b=2, c=3))
self.assertEqual(db['a'], 1)
self.assertEqual(db['b'], 2)
expected = KeyError
try:
db['d']
except expected:
pass
except:
err = sys.exc_info()[1]
self.assertFalse(True, "%s raised instead of %s" %
(err.__class__.__name__,
expected.__class__.__name__))
else:
self.assertFalse(True,
"%s not raised" % expected.__class__.__name__)
def test__iter(self):
db = self.get_obj()
db.entries.update(dict(a=1, b=2, c=3))
self.assertEqual([i for i in db],
[i for i in db.entries.items()])
@patch("os.path.isdir")
def test_add_directory_monitor(self, mock_isdir):
db = self.get_obj()
db.fam = Mock()
db.fam.rv = 0
def reset():
db.fam.rv += 1
db.fam.AddMonitor.return_value = db.fam.rv
db.fam.reset_mock()
mock_isdir.reset_mock()
mock_isdir.return_value = True
for path in self.testpaths.values():
reset()
db.add_directory_monitor(path)
db.fam.AddMonitor.assert_called_with(os.path.join(db.data, path),
db)
self.assertIn(db.fam.rv, db.handles)
self.assertEqual(db.handles[db.fam.rv], path)
reset()
# test duplicate adds
for path in self.testpaths.values():
reset()
db.add_directory_monitor(path)
self.assertFalse(db.fam.AddMonitor.called)
reset()
mock_isdir.return_value = False
db.add_directory_monitor('bogus')
self.assertFalse(db.fam.AddMonitor.called)
self.assertNotIn(db.fam.rv, db.handles)
def test_add_entry(self):
db = self.get_obj()
db.fam = Mock()
class MockChild(Mock):
def __init__(self, path, fam, **kwargs):
Mock.__init__(self, **kwargs)
self.path = path
self.fam = fam
self.HandleEvent = Mock()
db.__child__ = MockChild
for path in self.testpaths.values():
event = Mock()
db.add_entry(path, event)
self.assertIn(path, db.entries)
self.assertEqual(db.entries[path].path,
os.path.join(db.data, path))
self.assertEqual(db.entries[path].fam, db.fam)
db.entries[path].HandleEvent.assert_called_with(event)
@patch("os.path.isdir")
@patch("Bcfg2.Server.Plugin.%s.add_entry" % test_obj.__name__)
@patch("Bcfg2.Server.Plugin.%s.add_directory_monitor" % test_obj.__name__)
def test_HandleEvent(self, mock_add_monitor, mock_add_entry, mock_isdir):
db = self.get_obj()
# a path with a leading / should never get into
# DirectoryBacked.handles, so strip that test case
for rid, path in self.testpaths.items():
path = path.lstrip('/')
db.handles[rid] = path
def reset():
mock_isdir.reset_mock()
mock_add_entry.reset_mock()
mock_add_monitor.reset_mock()
def get_event(filename, action, requestID):
event = Mock()
event.code2str.return_value = action
event.filename = filename
event.requestID = requestID
return event
# test events on the data directory itself
reset()
mock_isdir.return_value = True
event = get_event(db.data, "exists", 1)
db.HandleEvent(event)
mock_add_monitor.assert_called_with("")
# test events on paths that aren't handled
reset()
mock_isdir.return_value = False
event = get_event('/foo', 'created', max(self.testpaths.keys()) + 1)
db.HandleEvent(event)
self.assertFalse(mock_add_monitor.called)
self.assertFalse(mock_add_entry.called)
for req_id, path in self.testpaths.items():
# a path with a leading / should never get into
# DirectoryBacked.handles, so strip that test case
path = path.lstrip('/')
basepath = os.path.join(datastore, path)
for fname in ['foo', 'bar/baz.txt', 'plugh.py']:
relpath = os.path.join(path, fname)
abspath = os.path.join(basepath, fname)
# test endExist does nothing
reset()
event = get_event(fname, 'endExist', req_id)
db.HandleEvent(event)
self.assertFalse(mock_add_monitor.called)
self.assertFalse(mock_add_entry.called)
mock_isdir.return_value = True
for evt in ["created", "exists", "changed"]:
# test that creating or changing a directory works
reset()
event = get_event(fname, evt, req_id)
db.HandleEvent(event)
mock_add_monitor.assert_called_with(relpath)
self.assertFalse(mock_add_entry.called)
mock_isdir.return_value = False
for evt in ["created", "exists"]:
# test that creating a file works
reset()
event = get_event(fname, evt, req_id)
db.HandleEvent(event)
mock_add_entry.assert_called_with(relpath, event)
self.assertFalse(mock_add_monitor.called)
db.entries[relpath] = MagicMock()
# test that changing a file that already exists works
reset()
event = get_event(fname, "changed", req_id)
db.HandleEvent(event)
db.entries[relpath].HandleEvent.assert_called_with(event)
self.assertFalse(mock_add_monitor.called)
self.assertFalse(mock_add_entry.called)
# test that deleting an entry works
reset()
event = get_event(fname, "deleted", req_id)
db.HandleEvent(event)
self.assertNotIn(relpath, db.entries)
# test that changing a file that doesn't exist works
reset()
event = get_event(fname, "changed", req_id)
db.HandleEvent(event)
mock_add_entry.assert_called_with(relpath, event)
self.assertFalse(mock_add_monitor.called)
db.entries[relpath] = MagicMock()
# test that deleting a directory works. this is a little
# strange because the _parent_ directory has to handle the
# deletion
reset()
event = get_event('quux', "deleted", 1)
db.HandleEvent(event)
for key in db.entries.keys():
self.assertFalse(key.startswith('quux'))
class TestXMLFileBacked(TestFileBacked):
test_obj = XMLFileBacked
def get_obj(self, path=datastore, fam=None, should_monitor=False):
return self.test_obj(path, fam=fam, should_monitor=should_monitor)
def test__init(self):
fam = Mock()
fname = "/test"
xfb = self.get_obj(fname)
self.assertIsNone(xfb.fam)
xfb = self.get_obj(fname, fam=fam)
self.assertFalse(fam.AddMonitor.called)
fam.reset_mock()
xfb = self.get_obj(fname, fam=fam, should_monitor=True)
fam.AddMonitor.assert_called_with(fname, xfb)
@patch("os.path.exists")
@patch("lxml.etree.parse")
def test_follow_xincludes(self, mock_parse, mock_exists):
fname = "/test/test1.xml"
xfb = self.get_obj(fname)
xfb.add_monitor = Mock()
def reset():
xfb.add_monitor.reset_mock()
mock_parse.reset_mock()
mock_exists.reset_mock()
xfb.extras = []
mock_exists.return_value = True
xdata = dict()
mock_parse.side_effect = lambda p: xdata[p]
# basic functionality
xdata['/test/test2.xml'] = lxml.etree.Element("Test").getroottree()
xfb._follow_xincludes(xdata=xdata['/test/test2.xml'])
self.assertFalse(xfb.add_monitor.called)
if (not hasattr(self.test_obj, "xdata") or
not isinstance(self.test_obj.xdata, property)):
# if xdata is settable, test that method of getting data
# to _follow_xincludes
reset()
xfb.xdata = xdata['/test/test2.xml'].getroot()
xfb._follow_xincludes()
self.assertFalse(xfb.add_monitor.called)
xfb.xdata = None
reset()
xfb._follow_xincludes(fname="/test/test2.xml")
self.assertFalse(xfb.add_monitor.called)
# test one level of xinclude
xdata[fname] = lxml.etree.Element("Test").getroottree()
lxml.etree.SubElement(xdata[fname].getroot(),
Bcfg2.Server.XI_NAMESPACE + "include",
href="/test/test2.xml")
reset()
xfb._follow_xincludes(fname=fname)
xfb.add_monitor.assert_called_with("/test/test2.xml")
self.assertItemsEqual(mock_parse.call_args_list,
[call(f) for f in xdata.keys()])
mock_exists.assert_called_with("/test/test2.xml")
reset()
xfb._follow_xincludes(xdata=xdata[fname])
xfb.add_monitor.assert_called_with("/test/test2.xml")
self.assertItemsEqual(mock_parse.call_args_list,
[call(f) for f in xdata.keys()
if f != fname])
mock_exists.assert_called_with("/test/test2.xml")
# test two-deep level of xinclude, with some files in another
# directory
xdata["/test/test3.xml"] = \
lxml.etree.Element("Test").getroottree()
lxml.etree.SubElement(xdata["/test/test3.xml"].getroot(),
Bcfg2.Server.XI_NAMESPACE + "include",
href="/test/test_dir/test4.xml")
xdata["/test/test_dir/test4.xml"] = \
lxml.etree.Element("Test").getroottree()
lxml.etree.SubElement(xdata["/test/test_dir/test4.xml"].getroot(),
Bcfg2.Server.XI_NAMESPACE + "include",
href="/test/test_dir/test5.xml")
xdata['/test/test_dir/test5.xml'] = \
lxml.etree.Element("Test").getroottree()
xdata['/test/test_dir/test6.xml'] = \
lxml.etree.Element("Test").getroottree()
# relative includes
lxml.etree.SubElement(xdata[fname].getroot(),
Bcfg2.Server.XI_NAMESPACE + "include",
href="test3.xml")
lxml.etree.SubElement(xdata["/test/test3.xml"].getroot(),
Bcfg2.Server.XI_NAMESPACE + "include",
href="test_dir/test6.xml")
reset()
xfb._follow_xincludes(fname=fname)
self.assertItemsEqual(xfb.add_monitor.call_args_list,
[call(f) for f in xdata.keys() if f != fname])
self.assertItemsEqual(mock_parse.call_args_list,
[call(f) for f in xdata.keys()])
self.assertItemsEqual(mock_exists.call_args_list,
[call(f) for f in xdata.keys() if f != fname])
reset()
xfb._follow_xincludes(xdata=xdata[fname])
self.assertItemsEqual(xfb.add_monitor.call_args_list,
[call(f) for f in xdata.keys() if f != fname])
self.assertItemsEqual(mock_parse.call_args_list,
[call(f) for f in xdata.keys() if f != fname])
self.assertItemsEqual(mock_exists.call_args_list,
[call(f) for f in xdata.keys() if f != fname])
@patch("lxml.etree._ElementTree", FakeElementTree)
@patch("Bcfg2.Server.Plugin.%s._follow_xincludes" % test_obj.__name__)
def test_Index(self, mock_follow):
fname = "/test/test1.xml"
xfb = self.get_obj(fname)
def reset():
mock_follow.reset_mock()
FakeElementTree.xinclude.reset_mock()
xfb.extras = []
xfb.xdata = None
# syntax error
xfb.data = "<"
self.assertRaises(PluginInitError, xfb.Index)
# no xinclude
reset()
xdata = lxml.etree.Element("Test", name="test")
children = [lxml.etree.SubElement(xdata, "Foo"),
lxml.etree.SubElement(xdata, "Bar", name="bar")]
xfb.data = tostring(xdata)
xfb.Index()
mock_follow.assert_any_call()
try:
self.assertEqual(xfb.xdata.base, fname)
except AttributeError:
# python 2.4 and/or lxml 2.0 don't store the base_url in
# .base -- no idea where it's stored.
pass
self.assertItemsEqual([tostring(e) for e in xfb.entries],
[tostring(e) for e in children])
# with xincludes
reset()
mock_follow.side_effect = \
lambda: xfb.extras.extend(["/test/test2.xml",
"/test/test_dir/test3.xml"])
children.extend([
lxml.etree.SubElement(xdata,
Bcfg2.Server.XI_NAMESPACE + "include",
href="/test/test2.xml"),
lxml.etree.SubElement(xdata,
Bcfg2.Server.XI_NAMESPACE + "include",
href="/test/test_dir/test3.xml")])
test2 = lxml.etree.Element("Test", name="test2")
lxml.etree.SubElement(test2, "Baz")
test3 = lxml.etree.Element("Test", name="test3")
replacements = {"/test/test2.xml": test2,
"/test/test_dir/test3.xml": test3}
def xinclude():
for el in xfb.xdata.findall('//%sinclude' %
Bcfg2.Server.XI_NAMESPACE):
xfb.xdata.replace(el, replacements[el.get("href")])
FakeElementTree.xinclude.side_effect = xinclude
xfb.data = tostring(xdata)
xfb.Index()
mock_follow.assert_any_call()
FakeElementTree.xinclude.assert_any_call
try:
self.assertEqual(xfb.xdata.base, fname)
except AttributeError:
pass
self.assertItemsEqual([tostring(e) for e in xfb.entries],
[tostring(e) for e in children])
def test_add_monitor(self):
fname = "/test/test1.xml"
xfb = self.get_obj(fname)
xfb.add_monitor("/test/test2.xml")
self.assertIn("/test/test2.xml", xfb.extras)
fam = Mock()
xfb = self.get_obj(fname, fam=fam)
fam.reset_mock()
xfb.add_monitor("/test/test3.xml")
self.assertFalse(fam.AddMonitor.called)
self.assertIn("/test/test3.xml", xfb.extras)
fam.reset_mock()
xfb = self.get_obj(fname, fam=fam, should_monitor=True)
xfb.add_monitor("/test/test4.xml")
fam.AddMonitor.assert_called_with("/test/test4.xml", xfb)
self.assertIn("/test/test4.xml", xfb.extras)
class TestStructFile(TestXMLFileBacked):
test_obj = StructFile
def _get_test_data(self):
""" build a very complex set of test data """
# top-level group and client elements
groups = dict()
# group and client elements that are descendents of other group or
# client elements
subgroups = dict()
# children of elements in `groups' that should be included in
# match results
children = dict()
# children of elements in `subgroups' that should be included in
# match results
subchildren = dict()
# top-level tags that are not group elements
standalone = []
xdata = lxml.etree.Element("Test", name="test")
groups[0] = lxml.etree.SubElement(xdata, "Group", name="group1",
include="true")
children[0] = [lxml.etree.SubElement(groups[0], "Child", name="c1"),
lxml.etree.SubElement(groups[0], "Child", name="c2")]
subgroups[0] = [lxml.etree.SubElement(groups[0], "Group",
name="subgroup1", include="true"),
lxml.etree.SubElement(groups[0],
"Client", name="client1",
include="false")]
subchildren[0] = \
[lxml.etree.SubElement(subgroups[0][0], "Child", name="sc1"),
lxml.etree.SubElement(subgroups[0][0], "Child", name="sc2",
attr="some attr"),
lxml.etree.SubElement(subgroups[0][0], "Child", name="sc3")]
lxml.etree.SubElement(subchildren[0][-1], "SubChild", name="subchild")
lxml.etree.SubElement(subgroups[0][1], "Child", name="sc4")
groups[1] = lxml.etree.SubElement(xdata, "Group", name="group2",
include="false")
children[1] = []
subgroups[1] = []
subchildren[1] = []
lxml.etree.SubElement(groups[1], "Child", name="c3")
lxml.etree.SubElement(groups[1], "Child", name="c4")
standalone.append(lxml.etree.SubElement(xdata, "Standalone", name="s1"))
groups[2] = lxml.etree.SubElement(xdata, "Client", name="client2",
include="false")
children[2] = []
subgroups[2] = []
subchildren[2] = []
lxml.etree.SubElement(groups[2], "Child", name="c5")
lxml.etree.SubElement(groups[2], "Child", name="c6")
standalone.append(lxml.etree.SubElement(xdata, "Standalone", name="s2",
attr="some attr"))
groups[3] = lxml.etree.SubElement(xdata, "Client", name="client3",
include="true")
children[3] = [lxml.etree.SubElement(groups[3], "Child", name="c7",
attr="some_attr"),
lxml.etree.SubElement(groups[3], "Child", name="c8")]
subgroups[3] = []
subchildren[3] = []
lxml.etree.SubElement(children[3][-1], "SubChild", name="subchild")
standalone.append(lxml.etree.SubElement(xdata, "Standalone", name="s3"))
lxml.etree.SubElement(standalone[-1], "SubStandalone", name="sub1")
children[4] = standalone
return (xdata, groups, subgroups, children, subchildren, standalone)
def test_include_element(self):
sf = self.get_obj("/test/test.xml")
metadata = Mock()
metadata.groups = ["group1", "group2"]
metadata.hostname = "foo.example.com"
inc = lambda tag, **attrs: \
sf._include_element(lxml.etree.Element(tag, **attrs), metadata)
self.assertFalse(sf._include_element(lxml.etree.Comment("test"),
metadata))
self.assertFalse(inc("Group", name="group3"))
self.assertFalse(inc("Group", name="group2", negate="true"))
self.assertFalse(inc("Group", name="group2", negate="tRuE"))
self.assertTrue(inc("Group", name="group2"))
self.assertTrue(inc("Group", name="group2", negate="false"))
self.assertTrue(inc("Group", name="group2", negate="faLSe"))
self.assertTrue(inc("Group", name="group3", negate="true"))
self.assertTrue(inc("Group", name="group3", negate="tRUe"))
self.assertFalse(inc("Client", name="bogus.example.com"))
self.assertFalse(inc("Client", name="foo.example.com", negate="true"))
self.assertFalse(inc("Client", name="foo.example.com", negate="tRuE"))
self.assertTrue(inc("Client", name="foo.example.com"))
self.assertTrue(inc("Client", name="foo.example.com", negate="false"))
self.assertTrue(inc("Client", name="foo.example.com", negate="faLSe"))
self.assertTrue(inc("Client", name="bogus.example.com", negate="true"))
self.assertTrue(inc("Client", name="bogus.example.com", negate="tRUe"))
self.assertTrue(inc("Other"))
@patch("Bcfg2.Server.Plugin.%s._include_element" % test_obj.__name__)
def test__match(self, mock_include):
sf = self.get_obj("/test/test.xml")
metadata = Mock()
(xdata, groups, subgroups, children, subchildren, standalone) = \
self._get_test_data()
mock_include.side_effect = \
lambda x, _: (x.tag not in ['Client', 'Group'] or
x.get("include") == "true")
for i, group in groups.items():
actual = sf._match(group, metadata)
expected = children[i] + subchildren[i]
self.assertEqual(len(actual), len(expected))
# easiest way to compare the values is actually to make
# them into an XML document and let assertXMLEqual compare
# them
xactual = lxml.etree.Element("Container")
xactual.extend(actual)
xexpected = lxml.etree.Element("Container")
xexpected.extend(expected)
self.assertXMLEqual(xactual, xexpected)
for el in standalone:
self.assertXMLEqual(el, sf._match(el, metadata)[0])
@patch("Bcfg2.Server.Plugin.%s._match" % test_obj.__name__)
def test_Match(self, mock_match):
sf = self.get_obj("/test/test.xml")
metadata = Mock()
(xdata, groups, subgroups, children, subchildren, standalone) = \
self._get_test_data()
sf.entries.extend(copy.deepcopy(xdata).getchildren())
def match_rv(el, _):
if el.tag not in ['Client', 'Group']:
return [el]
elif x.get("include") == "true":
return el.getchildren()
else:
return []
mock_match.side_effect = match_rv
actual = sf.Match(metadata)
expected = reduce(lambda x, y: x + y,
list(children.values()) + list(subgroups.values()))
self.assertEqual(len(actual), len(expected))
# easiest way to compare the values is actually to make
# them into an XML document and let assertXMLEqual compare
# them
xactual = lxml.etree.Element("Container")
xactual.extend(actual)
xexpected = lxml.etree.Element("Container")
xexpected.extend(expected)
self.assertXMLEqual(xactual, xexpected)
@patch("Bcfg2.Server.Plugin.%s._include_element" % test_obj.__name__)
def test__xml_match(self, mock_include):
sf = self.get_obj("/test/test.xml")
metadata = Mock()
(xdata, groups, subgroups, children, subchildren, standalone) = \
self._get_test_data()
mock_include.side_effect = \
lambda x, _: (x.tag not in ['Client', 'Group'] or
x.get("include") == "true")
actual = copy.deepcopy(xdata)
for el in actual.getchildren():
sf._xml_match(el, metadata)
expected = lxml.etree.Element(xdata.tag, **dict(xdata.attrib))
expected.text = xdata.text
expected.extend(reduce(lambda x, y: x + y,
list(children.values()) + list(subchildren.values())))
expected.extend(standalone)
self.assertXMLEqual(actual, expected)
@patch("Bcfg2.Server.Plugin.%s._xml_match" % test_obj.__name__)
def test_Match(self, mock_xml_match):
sf = self.get_obj("/test/test.xml")
metadata = Mock()
(sf.xdata, groups, subgroups, children, subchildren, standalone) = \
self._get_test_data()
sf.XMLMatch(metadata)
actual = []
for call in mock_xml_match.call_args_list:
actual.append(call[0][0])
self.assertEqual(call[0][1], metadata)
expected = list(groups.values()) + standalone
# easiest way to compare the values is actually to make
# them into an XML document and let assertXMLEqual compare
# them
xactual = lxml.etree.Element("Container")
xactual.extend(actual)
xexpected = lxml.etree.Element("Container")
xexpected.extend(expected)
self.assertXMLEqual(xactual, xexpected)
class TestINode(Bcfg2TestCase):
test_obj = INode
# INode.__init__ and INode._load_children() call each other
# recursively, which makes this class kind of a nightmare to test.
# we have to first patch INode._load_children so that we can
# create an INode object with no children loaded, then we unpatch
# INode._load_children and patch INode.__init__ so that child
# objects aren't actually created. but in order to test things
# atomically, we do this umpteen times in order to test with
# different data. this convenience method makes this a little
# easier. fun fun fun.
@patch("Bcfg2.Server.Plugin.%s._load_children" % test_obj.__name__, Mock())
def _get_inode(self, data, idict):
return self.test_obj(data, idict)
def test_raw_predicates(self):
metadata = Mock()
metadata.groups = ["group1", "group2"]
metadata.hostname = "foo.example.com"
entry = None
parent_predicate = lambda m, e: True
pred = eval(self.test_obj.raw['Client'] % dict(name="foo.example.com"),
dict(predicate=parent_predicate))
self.assertTrue(pred(metadata, entry))
pred = eval(self.test_obj.raw['Client'] % dict(name="bar.example.com"),
dict(predicate=parent_predicate))
self.assertFalse(pred(metadata, entry))
pred = eval(self.test_obj.raw['Group'] % dict(name="group1"),
dict(predicate=parent_predicate))
self.assertTrue(pred(metadata, entry))
pred = eval(self.test_obj.raw['Group'] % dict(name="group3"),
dict(predicate=parent_predicate))
self.assertFalse(pred(metadata, entry))
pred = eval(self.test_obj.nraw['Client'] % dict(name="foo.example.com"),
dict(predicate=parent_predicate))
self.assertFalse(pred(metadata, entry))
pred = eval(self.test_obj.nraw['Client'] % dict(name="bar.example.com"),
dict(predicate=parent_predicate))
self.assertTrue(pred(metadata, entry))
pred = eval(self.test_obj.nraw['Group'] % dict(name="group1"),
dict(predicate=parent_predicate))
self.assertFalse(pred(metadata, entry))
pred = eval(self.test_obj.nraw['Group'] % dict(name="group3"),
dict(predicate=parent_predicate))
self.assertTrue(pred(metadata, entry))
parent_predicate = lambda m, e: False
pred = eval(self.test_obj.raw['Client'] % dict(name="foo.example.com"),
dict(predicate=parent_predicate))
self.assertFalse(pred(metadata, entry))
pred = eval(self.test_obj.raw['Group'] % dict(name="group1"),
dict(predicate=parent_predicate))
self.assertFalse(pred(metadata, entry))
pred = eval(self.test_obj.nraw['Client'] % dict(name="bar.example.com"),
dict(predicate=parent_predicate))
self.assertFalse(pred(metadata, entry))
pred = eval(self.test_obj.nraw['Group'] % dict(name="group3"),
dict(predicate=parent_predicate))
self.assertFalse(pred(metadata, entry))
self.assertItemsEqual(self.test_obj.containers,
self.test_obj.raw.keys())
self.assertItemsEqual(self.test_obj.containers,
self.test_obj.nraw.keys())
@patch("Bcfg2.Server.Plugin.INode._load_children")
def test__init(self, mock_load_children):
data = lxml.etree.Element("Bogus")
# called with no parent, should not raise an exception; it's a
# top-level tag in an XML file and so is not expected to be a
# proper predicate
INode(data, dict())
self.assertRaises(PluginExecutionError,
INode, data, dict(), Mock())
data = lxml.etree.Element("Client", name="foo.example.com")
idict = dict()
inode = INode(data, idict)
mock_load_children.assert_called_with(data, idict)
self.assertTrue(inode.predicate(Mock(), Mock()))
parent = Mock()
parent.predicate = lambda m, e: True
metadata = Mock()
metadata.groups = ["group1", "group2"]
metadata.hostname = "foo.example.com"
entry = None
# test setting predicate with parent object
mock_load_children.reset_mock()
inode = INode(data, idict, parent=parent)
mock_load_children.assert_called_with(data, idict)
self.assertTrue(inode.predicate(metadata, entry))
# test negation
data = lxml.etree.Element("Client", name="foo.example.com",
negate="true")
mock_load_children.reset_mock()
inode = INode(data, idict, parent=parent)
mock_load_children.assert_called_with(data, idict)
self.assertFalse(inode.predicate(metadata, entry))
# test failure of a matching predicate (client names do not match)
data = lxml.etree.Element("Client", name="foo.example.com")
metadata.hostname = "bar.example.com"
mock_load_children.reset_mock()
inode = INode(data, idict, parent=parent)
mock_load_children.assert_called_with(data, idict)
self.assertFalse(inode.predicate(metadata, entry))
# test that parent predicate is AND'ed in correctly
parent.predicate = lambda m, e: False
metadata.hostname = "foo.example.com"
mock_load_children.reset_mock()
inode = INode(data, idict, parent=parent)
mock_load_children.assert_called_with(data, idict)
self.assertFalse(inode.predicate(metadata, entry))
def test_load_children(self):
data = lxml.etree.Element("Parent")
child1 = lxml.etree.SubElement(data, "Client", name="foo.example.com")
child2 = lxml.etree.SubElement(data, "Group", name="bar", negate="true")
idict = dict()
inode = self._get_inode(data, idict)
@patch("Bcfg2.Server.Plugin.%s.__init__" % inode.__class__.__name__)
def inner(mock_init):
mock_init.return_value = None
inode._load_children(data, idict)
self.assertItemsEqual(mock_init.call_args_list,
[call(child1, idict, inode),
call(child2, idict, inode)])
self.assertEqual(idict, dict())
self.assertItemsEqual(inode.contents, dict())
inner()
data = lxml.etree.Element("Parent")
child1 = lxml.etree.SubElement(data, "Data", name="child1",
attr="some attr")
child1.text = "text"
subchild1 = lxml.etree.SubElement(child1, "SubChild", name="subchild")
child2 = lxml.etree.SubElement(data, "Group", name="bar", negate="true")
idict = dict()
inode = self._get_inode(data, idict)
@patch("Bcfg2.Server.Plugin.%s.__init__" % inode.__class__.__name__)
def inner2(mock_init):
mock_init.return_value = None
inode._load_children(data, idict)
mock_init.assert_called_with(child2, idict, inode)
tag = child1.tag
name = child1.get("name")
self.assertEqual(idict, dict(Data=[name]))
self.assertIn(tag, inode.contents)
self.assertIn(name, inode.contents[tag])
self.assertItemsEqual(inode.contents[tag][name],
dict(name=name,
attr=child1.get('attr'),
__text__=child1.text,
__children__=[subchild1]))
inner2()
# test ignore. no ignore is set on INode by default, so we
# have to set one
old_ignore = copy.copy(self.test_obj.ignore)
self.test_obj.ignore.append("Data")
idict = dict()
inode = self._get_inode(data, idict)
@patch("Bcfg2.Server.Plugin.%s.__init__" % inode.__class__.__name__)
def inner3(mock_init):
mock_init.return_value = None
inode._load_children(data, idict)
mock_init.assert_called_with(child2, idict, inode)
self.assertEqual(idict, dict())
self.assertItemsEqual(inode.contents, dict())
inner3()
self.test_obj.ignore = old_ignore
def test_Match(self):
idata = lxml.etree.Element("Parent")
contents = lxml.etree.SubElement(idata, "Data", name="contents",
attr="some attr")
child = lxml.etree.SubElement(idata, "Group", name="bar", negate="true")
inode = INode(idata, dict())
inode.predicate = Mock()
inode.predicate.return_value = False
metadata = Mock()
metadata.groups = ['foo']
data = dict()
entry = child
inode.Match(metadata, data, entry=child)
self.assertEqual(data, dict())
inode.predicate.assert_called_with(metadata, child)
inode.predicate.reset_mock()
inode.Match(metadata, data)
self.assertEqual(data, dict())
# can't easily compare XML args without the original
# object, and we're testing that Match() works without an
# XML object passed in, so...
self.assertEqual(inode.predicate.call_args[0][0],
metadata)
self.assertXMLEqual(inode.predicate.call_args[0][1],
lxml.etree.Element("None"))
inode.predicate.reset_mock()
inode.predicate.return_value = True
inode.Match(metadata, data, entry=child)
self.assertEqual(data, inode.contents)
inode.predicate.assert_called_with(metadata, child)
class TestInfoNode(TestINode):
__test__ = True
test_obj = InfoNode
def test_raw_predicates(self):
TestINode.test_raw_predicates(self)
metadata = Mock()
entry = lxml.etree.Element("Path", name="/tmp/foo",
realname="/tmp/bar")
parent_predicate = lambda m, d: True
pred = eval(self.test_obj.raw['Path'] % dict(name="/tmp/foo"),
dict(predicate=parent_predicate))
self.assertTrue(pred(metadata, entry))
pred = eval(InfoNode.raw['Path'] % dict(name="/tmp/bar"),
dict(predicate=parent_predicate))
self.assertTrue(pred(metadata, entry))
pred = eval(InfoNode.raw['Path'] % dict(name="/tmp/bogus"),
dict(predicate=parent_predicate))
self.assertFalse(pred(metadata, entry))
pred = eval(self.test_obj.nraw['Path'] % dict(name="/tmp/foo"),
dict(predicate=parent_predicate))
self.assertFalse(pred(metadata, entry))
pred = eval(InfoNode.nraw['Path'] % dict(name="/tmp/bar"),
dict(predicate=parent_predicate))
self.assertFalse(pred(metadata, entry))
pred = eval(InfoNode.nraw['Path'] % dict(name="/tmp/bogus"),
dict(predicate=parent_predicate))
self.assertTrue(pred(metadata, entry))
parent_predicate = lambda m, d: False
pred = eval(self.test_obj.raw['Path'] % dict(name="/tmp/foo"),
dict(predicate=parent_predicate))
self.assertFalse(pred(metadata, entry))
pred = eval(InfoNode.raw['Path'] % dict(name="/tmp/bar"),
dict(predicate=parent_predicate))
self.assertFalse(pred(metadata, entry))
pred = eval(InfoNode.nraw['Path'] % dict(name="/tmp/bogus"),
dict(predicate=parent_predicate))
self.assertFalse(pred(metadata, entry))
class TestXMLSrc(TestXMLFileBacked):
test_obj = XMLSrc
def test_node_interface(self):
# ensure that the node object has the necessary interface
self.assertTrue(hasattr(self.test_obj.__node__, "Match"))
@patch("%s.open" % builtins)
def test_HandleEvent(self, mock_open):
xdata = lxml.etree.Element("Test")
lxml.etree.SubElement(xdata, "Path", name="path", attr="whatever")
xsrc = self.get_obj("/test/foo.xml")
xsrc.__node__ = Mock()
mock_open.return_value.read.return_value = tostring(xdata)
if xsrc.__priority_required__:
# test with no priority at all
self.assertRaises(PluginExecutionError,
xsrc.HandleEvent, Mock())
# test with bogus priority
xdata.set("priority", "cow")
mock_open.return_value.read.return_value = tostring(xdata)
self.assertRaises(PluginExecutionError,
xsrc.HandleEvent, Mock())
# assign a priority to use in future tests
xdata.set("priority", "10")
mock_open.return_value.read.return_value = tostring(xdata)
mock_open.reset_mock()
xsrc = self.get_obj("/test/foo.xml")
xsrc.__node__ = Mock()
xsrc.HandleEvent(Mock())
mock_open.assert_called_with("/test/foo.xml")
mock_open.return_value.read.assert_any_call()
self.assertXMLEqual(xsrc.__node__.call_args[0][0], xdata)
self.assertEqual(xsrc.__node__.call_args[0][1], dict())
self.assertEqual(xsrc.pnode, xsrc.__node__.return_value)
self.assertEqual(xsrc.cache, None)
@patch("Bcfg2.Server.Plugin.XMLSrc.HandleEvent")
def test_Cache(self, mock_HandleEvent):
xsrc = self.get_obj("/test/foo.xml")
metadata = Mock()
xsrc.Cache(metadata)
mock_HandleEvent.assert_any_call()
xsrc.pnode = Mock()
xsrc.Cache(metadata)
xsrc.pnode.Match.assert_called_with(metadata, xsrc.__cacheobj__())
self.assertEqual(xsrc.cache[0], metadata)
xsrc.pnode.reset_mock()
xsrc.Cache(metadata)
self.assertFalse(xsrc.pnode.Mock.called)
self.assertEqual(xsrc.cache[0], metadata)
xsrc.cache = ("bogus")
xsrc.Cache(metadata)
xsrc.pnode.Match.assert_called_with(metadata, xsrc.__cacheobj__())
self.assertEqual(xsrc.cache[0], metadata)
class TestInfoXML(TestXMLSrc):
test_obj = InfoXML
class TestXMLDirectoryBacked(TestDirectoryBacked):
test_obj = XMLDirectoryBacked
class TestPrioDir(TestPlugin, TestGenerator, TestXMLDirectoryBacked):
test_obj = PrioDir
@patch("Bcfg2.Server.Plugin.%s.add_directory_monitor" % test_obj.__name__,
Mock())
def get_obj(self, core=None):
if core is None:
core = Mock()
return self.test_obj(core, datastore)
def test_HandleEvent(self):
TestXMLDirectoryBacked.test_HandleEvent(self)
@patch("Bcfg2.Server.Plugin.XMLDirectoryBacked.HandleEvent", Mock())
def inner():
pd = self.get_obj()
test1 = Mock()
test1.items = dict(Path=["/etc/foo.conf", "/etc/bar.conf"])
test2 = Mock()
test2.items = dict(Path=["/etc/baz.conf"],
Package=["quux", "xyzzy"])
pd.entries = {"/test1.xml": test1,
"/test2.xml": test2}
pd.HandleEvent(Mock())
self.assertItemsEqual(pd.Entries,
dict(Path={"/etc/foo.conf": pd.BindEntry,
"/etc/bar.conf": pd.BindEntry,
"/etc/baz.conf": pd.BindEntry},
Package={"quux": pd.BindEntry,
"xyzzy": pd.BindEntry}))
inner()
def test__matches(self):
pd = self.get_obj()
self.assertTrue(pd._matches(lxml.etree.Element("Test",
name="/etc/foo.conf"),
Mock(),
{"/etc/foo.conf": pd.BindEntry,
"/etc/bar.conf": pd.BindEntry}))
self.assertFalse(pd._matches(lxml.etree.Element("Test",
name="/etc/baz.conf"),
Mock(),
{"/etc/foo.conf": pd.BindEntry,
"/etc/bar.conf": pd.BindEntry}))
def test_BindEntry(self):
pd = self.get_obj()
pd.get_attrs = Mock(return_value=dict(test1="test1", test2="test2"))
entry = lxml.etree.Element("Path", name="/etc/foo.conf", test1="bogus")
metadata = Mock()
pd.BindEntry(entry, metadata)
pd.get_attrs.assert_called_with(entry, metadata)
self.assertItemsEqual(entry.attrib,
dict(name="/etc/foo.conf",
test1="test1", test2="test2"))
def test_get_attrs(self):
pd = self.get_obj()
entry = lxml.etree.Element("Path", name="/etc/foo.conf")
children = [lxml.etree.Element("Child")]
metadata = Mock()
pd.entries = dict()
def reset():
metadata.reset_mock()
for src in pd.entries.values():
src.reset_mock()
src.cache = None
# test with no matches
self.assertRaises(PluginExecutionError,
pd.get_attrs, entry, metadata)
def add_entry(name, data, prio=10):
path = os.path.join(pd.data, name)
pd.entries[path] = Mock()
pd.entries[path].priority = prio
def do_Cache(metadata):
pd.entries[path].cache = (metadata, data)
pd.entries[path].Cache.side_effect = do_Cache
add_entry('test1.xml',
dict(Path={'/etc/foo.conf': dict(attr="attr1",
__children__=children),
'/etc/bar.conf': dict()}))
add_entry('test2.xml',
dict(Path={'/etc/bar.conf': dict(__text__="text",
attr="attr1")},
Package={'quux': dict(),
'xyzzy': dict()}),
prio=20)
add_entry('test3.xml',
dict(Path={'/etc/baz.conf': dict()},
Package={'xyzzy': dict()}),
prio=20)
# test with exactly one match, __children__
reset()
self.assertItemsEqual(pd.get_attrs(entry, metadata),
dict(attr="attr1"))
for src in pd.entries.values():
src.Cache.assert_called_with(metadata)
self.assertEqual(len(entry.getchildren()), 1)
self.assertXMLEqual(entry.getchildren()[0], children[0])
# test with multiple matches with different priorities, __text__
reset()
entry = lxml.etree.Element("Path", name="/etc/bar.conf")
self.assertItemsEqual(pd.get_attrs(entry, metadata),
dict(attr="attr1"))
for src in pd.entries.values():
src.Cache.assert_called_with(metadata)
self.assertEqual(entry.text, "text")
# test with multiple matches with identical priorities
reset()
entry = lxml.etree.Element("Package", name="xyzzy")
self.assertRaises(PluginExecutionError,
pd.get_attrs, entry, metadata)
class TestSpecificityError(Bcfg2TestCase):
""" placeholder for future tests """
pass
class TestSpecificity(Bcfg2TestCase):
test_obj = Specificity
def get_obj(self, **kwargs):
return self.test_obj(**kwargs)
def test_matches(self):
metadata = Mock()
metadata.hostname = "foo.example.com"
metadata.groups = ["group1", "group2"]
self.assertTrue(self.get_obj(all=True).matches(metadata))
self.assertTrue(self.get_obj(group="group1").matches(metadata))
self.assertTrue(self.get_obj(hostname="foo.example.com").matches(metadata))
self.assertFalse(self.get_obj().matches(metadata))
self.assertFalse(self.get_obj(group="group3").matches(metadata))
self.assertFalse(self.get_obj(hostname="bar.example.com").matches(metadata))
def test__cmp(self):
specs = [self.get_obj(all=True),
self.get_obj(group="group1", prio=10),
self.get_obj(group="group1", prio=20),
self.get_obj(hostname="foo.example.com")]
for i in range(len(specs)):
for j in range(len(specs)):
if i == j:
self.assertEqual(0, specs[i].__cmp__(specs[j]))
self.assertEqual(0, specs[j].__cmp__(specs[i]))
elif i > j:
self.assertEqual(-1, specs[i].__cmp__(specs[j]))
self.assertEqual(1, specs[j].__cmp__(specs[i]))
elif i < j:
self.assertEqual(1, specs[i].__cmp__(specs[j]))
self.assertEqual(-1, specs[j].__cmp__(specs[i]))
def test_cmp(self):
""" test __lt__/__gt__/__eq__ """
specs = [self.get_obj(all=True),
self.get_obj(group="group1", prio=10),
self.get_obj(group="group1", prio=20),
self.get_obj(hostname="foo.example.com")]
for i in range(len(specs)):
for j in range(len(specs)):
if i < j:
self.assertGreater(specs[i], specs[j])
self.assertLess(specs[j], specs[i])
self.assertGreaterEqual(specs[i], specs[j])
self.assertLessEqual(specs[j], specs[i])
elif i == j:
self.assertEqual(specs[i], specs[j])
self.assertEqual(specs[j], specs[i])
self.assertLessEqual(specs[i], specs[j])
self.assertGreaterEqual(specs[j], specs[i])
elif i > j:
self.assertLess(specs[i], specs[j])
self.assertGreater(specs[j], specs[i])
self.assertLessEqual(specs[i], specs[j])
self.assertGreaterEqual(specs[j], specs[i])
class TestSpecificData(Bcfg2TestCase):
test_obj = SpecificData
def get_obj(self, name="/test.txt", specific=None, encoding=None):
if specific is None:
specific = Mock()
return self.test_obj(name, specific, encoding)
@patch("%s.open" % builtins)
def test_handle_event(self, mock_open):
event = Mock()
event.code2str.return_value = 'deleted'
sd = self.get_obj()
sd.handle_event(event)
self.assertFalse(mock_open.called)
if hasattr(sd, 'data'):
self.assertIsNone(sd.data)
else:
self.assertFalse(hasattr(sd, 'data'))
event = Mock()
mock_open.return_value.read.return_value = "test"
sd.handle_event(event)
mock_open.assert_called_with("/test.txt")
mock_open.return_value.read.assert_any_call()
self.assertEqual(sd.data, "test")
class TestEntrySet(TestDebuggable):
test_obj = EntrySet
# filenames that should be matched successfully by the EntrySet
# 'specific' regex. these are filenames alone -- a specificity
# will be added to these
basenames = ["test", "test.py", "test with spaces.txt",
"test.multiple.dots.py", "test_underscores.and.dots",
"really_misleading.G10_test",
"name$with*regex(special){chars}",
"misleading.H_hostname.test.com"]
# filenames that do not match any of the basenames (or the
# basename regex, if applicable)
bogus_names = ["bogus"]
# filenames that should be ignored
ignore = ["foo~", ".#foo", ".foo.swp", ".foo.swx",
"test.txt.genshi_include", "test.G_foo.genshi_include"]
def get_obj(self, basename="test", path=datastore, entry_type=MagicMock(),
encoding=None):
return self.test_obj(basename, path, entry_type, encoding)
def test__init(self):
for basename in self.basenames:
eset = self.get_obj(basename=basename)
self.assertIsInstance(eset.specific, re_type)
self.assertTrue(eset.specific.match(os.path.join(datastore,
basename)))
ppath = os.path.join(datastore, "Plugin", basename)
self.assertTrue(eset.specific.match(ppath))
self.assertTrue(eset.specific.match(ppath + ".G20_foo"))
self.assertTrue(eset.specific.match(ppath + ".G1_foo"))
self.assertTrue(eset.specific.match(ppath + ".G32768_foo"))
# a group named '_'
self.assertTrue(eset.specific.match(ppath + ".G10__"))
self.assertTrue(eset.specific.match(ppath + ".H_hostname"))
self.assertTrue(eset.specific.match(ppath + ".H_fqdn.subdomain.example.com"))
self.assertTrue(eset.specific.match(ppath + ".G20_group_with_underscores"))
self.assertFalse(eset.specific.match(ppath + ".G20_group with spaces"))
self.assertFalse(eset.specific.match(ppath + ".G_foo"))
self.assertFalse(eset.specific.match(ppath + ".G_"))
self.assertFalse(eset.specific.match(ppath + ".G20_"))
self.assertFalse(eset.specific.match(ppath + ".H_"))
for bogus in self.bogus_names:
self.assertFalse(eset.specific.match(os.path.join(datastore,
"Plugin",
bogus)))
for ignore in self.ignore:
self.assertTrue(eset.ignore.match(ignore))
self.assertFalse(eset.ignore.match(basename))
self.assertFalse(eset.ignore.match(basename + ".G20_foo"))
self.assertFalse(eset.ignore.match(basename + ".G1_foo"))
self.assertFalse(eset.ignore.match(basename + ".G32768_foo"))
self.assertFalse(eset.ignore.match(basename + ".G10__"))
self.assertFalse(eset.ignore.match(basename + ".H_hostname"))
self.assertFalse(eset.ignore.match(basename + ".H_fqdn.subdomain.example.com"))
self.assertFalse(eset.ignore.match(basename + ".G20_group_with_underscores"))
def test_get_matching(self):
items = {0: Mock(), 1: Mock(), 2: Mock(), 3: Mock(), 4: Mock(),
5: Mock()}
items[0].specific.matches.return_value = False
items[1].specific.matches.return_value = True
items[2].specific.matches.return_value = False
items[3].specific.matches.return_value = False
items[4].specific.matches.return_value = True
items[5].specific.matches.return_value = True
metadata = Mock()
eset = self.get_obj()
eset.entries = items
self.assertItemsEqual(eset.get_matching(metadata),
[items[1], items[4], items[5]])
for i in items.values():
i.specific.matches.assert_called_with(metadata)
@patch("Bcfg2.Server.Plugin.%s.get_matching" % test_obj.__name__)
def test_best_matching(self, mock_get_matching):
eset = self.get_obj()
metadata = Mock()
matching = []
def reset():
mock_get_matching.reset_mock()
metadata.reset_mock()
for m in matching:
m.reset_mock()
def specific(all=False, group=False, prio=None, hostname=False):
spec = Mock()
spec.specific = Specificity(all=all, group=group, prio=prio,
hostname=hostname)
return spec
self.assertRaises(PluginExecutionError,
eset.best_matching, metadata, matching=[])
reset()
mock_get_matching.return_value = matching
self.assertRaises(PluginExecutionError,
eset.best_matching, metadata)
mock_get_matching.assert_called_with(metadata)
# test with a single file for all
reset()
expected = specific(all=True)
matching.append(expected)
mock_get_matching.return_value = matching
self.assertEqual(eset.best_matching(metadata), expected)
mock_get_matching.assert_called_with(metadata)
# test with a single group-specific file
reset()
expected = specific(group=True, prio=10)
matching.append(expected)
mock_get_matching.return_value = matching
self.assertEqual(eset.best_matching(metadata), expected)
mock_get_matching.assert_called_with(metadata)
# test with multiple group-specific files
reset()
expected = specific(group=True, prio=20)
matching.append(expected)
mock_get_matching.return_value = matching
self.assertEqual(eset.best_matching(metadata), expected)
mock_get_matching.assert_called_with(metadata)
# test with host-specific file
reset()
expected = specific(hostname=True)
matching.append(expected)
mock_get_matching.return_value = matching
self.assertEqual(eset.best_matching(metadata), expected)
mock_get_matching.assert_called_with(metadata)
@patch("Bcfg2.Server.Plugin.%s.entry_init" % test_obj.__name__)
@patch("Bcfg2.Server.Plugin.%s.reset_metadata" % test_obj.__name__)
@patch("Bcfg2.Server.Plugin.%s.update_metadata" % test_obj.__name__)
def test_handle_event(self, mock_update_md, mock_reset_md, mock_init):
def reset():
mock_update_md.reset_mock()
mock_reset_md.reset_mock()
mock_init.reset_mock()
eset = self.get_obj()
for fname in ["info", "info.xml", ":info"]:
for evt in ["exists", "created", "changed"]:
reset()
event = Mock()
event.code2str.return_value = evt
event.filename = fname
eset.handle_event(event)
mock_update_md.assert_called_with(event)
self.assertFalse(mock_init.called)
self.assertFalse(mock_reset_md.called)
reset()
event = Mock()
event.code2str.return_value = "deleted"
event.filename = fname
eset.handle_event(event)
mock_reset_md.assert_called_with(event)
self.assertFalse(mock_init.called)
self.assertFalse(mock_update_md.called)
for evt in ["exists", "created", "changed"]:
reset()
event = Mock()
event.code2str.return_value = evt
event.filename = "test.txt"
eset.handle_event(event)
mock_init.assert_called_with(event)
self.assertFalse(mock_reset_md.called)
self.assertFalse(mock_update_md.called)
reset()
entry = Mock()
eset.entries["test.txt"] = entry
event = Mock()
event.code2str.return_value = "changed"
event.filename = "test.txt"
eset.handle_event(event)
entry.handle_event.assert_called_with(event)
self.assertFalse(mock_init.called)
self.assertFalse(mock_reset_md.called)
self.assertFalse(mock_update_md.called)
reset()
entry = Mock()
eset.entries["test.txt"] = entry
event = Mock()
event.code2str.return_value = "deleted"
event.filename = "test.txt"
eset.handle_event(event)
self.assertNotIn("test.txt", eset.entries)
@patch("Bcfg2.Server.Plugin.%s.specificity_from_filename" %
test_obj.__name__)
def test_entry_init(self, mock_spec):
eset = self.get_obj()
def reset():
eset.entry_type.reset_mock()
mock_spec.reset_mock()
event = Mock()
event.code2str.return_value = "created"
event.filename = "test.txt"
eset.entry_init(event)
mock_spec.assert_called_with("test.txt", specific=None)
eset.entry_type.assert_called_with(os.path.join(eset.path, "test.txt"),
mock_spec.return_value, None)
eset.entry_type.return_value.handle_event.assert_called_with(event)
self.assertIn("test.txt", eset.entries)
# test duplicate add
reset()
eset.entry_init(event)
self.assertFalse(mock_spec.called)
self.assertFalse(eset.entry_type.called)
eset.entries["test.txt"].handle_event.assert_called_with(event)
# test keyword args
etype = Mock()
specific = Mock()
event = Mock()
event.code2str.return_value = "created"
event.filename = "test2.txt"
eset.entry_init(event, entry_type=etype, specific=specific)
mock_spec.assert_called_with("test2.txt", specific=specific)
etype.assert_called_with(os.path.join(eset.path, "test2.txt"),
mock_spec.return_value, None)
etype.return_value.handle_event.assert_called_with(event)
self.assertIn("test2.txt", eset.entries)
# test specificity error
event = Mock()
event.code2str.return_value = "created"
event.filename = "test3.txt"
mock_spec.side_effect = SpecificityError
eset.entry_init(event)
mock_spec.assert_called_with("test3.txt", specific=None)
self.assertFalse(eset.entry_type.called)
@patch("Bcfg2.Server.Plugin.Specificity")
def test_specificity_from_filename(self, mock_spec):
def test(eset, fname, **kwargs):
mock_spec.reset_mock()
if "specific" in kwargs:
specific = kwargs['specific']
del kwargs['specific']
else:
specific = None
self.assertEqual(eset.specificity_from_filename(fname,
specific=specific),
mock_spec.return_value)
mock_spec.assert_called_with(**kwargs)
def fails(eset, fname, specific=None):
mock_spec.reset_mock()
self.assertRaises(SpecificityError,
eset.specificity_from_filename, fname,
specific=specific)
for basename in self.basenames:
eset = self.get_obj(basename=basename)
ppath = os.path.join(datastore, "Plugin", basename)
test(eset, ppath, all=True)
test(eset, ppath + ".G20_foo", group="foo", prio=20)
test(eset, ppath + ".G1_foo", group="foo", prio=1)
test(eset, ppath + ".G32768_foo", group="foo", prio=32768)
test(eset, ppath + ".G10__", group="_", prio=10)
test(eset, ppath + ".H_hostname", hostname="hostname")
test(eset, ppath + ".H_fqdn.subdomain.example.com",
hostname="fqdn.subdomain.example.com")
test(eset, ppath + ".G20_group_with_underscores",
group="group_with_underscores", prio=20)
for bogus in self.bogus_names:
fails(eset, bogus)
fails(eset, ppath + ".G_group with spaces")
fails(eset, ppath + ".G_foo")
fails(eset, ppath + ".G_")
fails(eset, ppath + ".G20_")
fails(eset, ppath + ".H_")
@patch("%s.open" % builtins)
@patch("Bcfg2.Server.Plugin.InfoXML")
def test_update_metadata(self, mock_InfoXML, mock_open):
eset = self.get_obj()
# add info.xml
event = Mock()
event.filename = "info.xml"
eset.update_metadata(event)
mock_InfoXML.assert_called_with(os.path.join(eset.path, "info.xml"))
mock_InfoXML.return_value.HandleEvent.assert_called_with(event)
self.assertEqual(eset.infoxml, mock_InfoXML.return_value)
# modify info.xml
mock_InfoXML.reset_mock()
eset.update_metadata(event)
self.assertFalse(mock_InfoXML.called)
eset.infoxml.HandleEvent.assert_called_with(event)
for fname in [':info', 'info']:
event = Mock()
event.filename = fname
idata = ["owner:owner",
"group: GROUP",
"perms: 775",
"important: true",
"bogus: line"]
mock_open.return_value.readlines.return_value = idata
eset.update_metadata(event)
expected = default_file_metadata.copy()
expected['owner'] = 'owner'
expected['group'] = 'GROUP'
expected['perms'] = '0775'
expected['important'] = 'true'
self.assertItemsEqual(eset.metadata,
expected)
def test_reset_metadata(self):
eset = self.get_obj()
# test info.xml
event = Mock()
event.filename = "info.xml"
eset.infoxml = Mock()
eset.reset_metadata(event)
self.assertIsNone(eset.infoxml)
for fname in [':info', 'info']:
event = Mock()
event.filename = fname
eset.metadata = Mock()
eset.reset_metadata(event)
self.assertItemsEqual(eset.metadata, default_file_metadata)
@patch("Bcfg2.Server.Plugin.bind_info")
def test_bind_info_to_entry(self, mock_bind_info):
eset = self.get_obj()
entry = Mock()
metadata = Mock()
eset.bind_info_to_entry(entry, metadata)
mock_bind_info.assert_called_with(entry, metadata,
infoxml=eset.infoxml,
default=eset.metadata)
@patch("Bcfg2.Server.Plugin.%s.best_matching" % test_obj.__name__)
@patch("Bcfg2.Server.Plugin.%s.bind_info_to_entry" % test_obj.__name__)
def test_bind_entry(self, mock_bind_info, mock_best_matching):
eset = self.get_obj()
entry = Mock()
metadata = Mock()
eset.bind_entry(entry, metadata)
mock_bind_info.assert_called_with(entry, metadata)
mock_best_matching.assert_called_with(metadata)
mock_best_matching.return_value.bind_entry.assert_called_with(entry,
metadata)
class TestGroupSpool(TestPlugin, TestGenerator):
test_obj = GroupSpool
@patch("Bcfg2.Server.Plugin.%s.AddDirectoryMonitor" % test_obj.__name__)
def get_obj(self, core=None):
return TestPlugin.get_obj(self, core=core)
@patch("Bcfg2.Server.Plugin.%s.AddDirectoryMonitor" % test_obj.__name__)
def test__init(self, mock_Add):
core = Mock()
gs = self.test_obj(core, datastore)
mock_Add.assert_called_with('')
self.assertItemsEqual(gs.Entries, {gs.entry_type: {}})
@patch("os.path.isdir")
@patch("os.path.isfile")
@patch("Bcfg2.Server.Plugin.%s.event_id" % test_obj.__name__)
@patch("Bcfg2.Server.Plugin.%s.event_path" % test_obj.__name__)
@patch("Bcfg2.Server.Plugin.%s.AddDirectoryMonitor" % test_obj.__name__)
def test_add_entry(self, mock_Add, mock_event_path, mock_event_id,
mock_isfile, mock_isdir):
gs = self.get_obj()
gs.es_cls = Mock()
gs.es_child_cls = Mock()
def reset():
gs.es_cls.reset_mock()
gs.es_child_cls.reset_mock()
mock_Add.reset_mock()
mock_event_path.reset_mock()
mock_event_id.reset_mock()
mock_isfile.reset_mock()
mock_isdir.reset_mock()
# directory
event = Mock()
event.filename = "foo"
basedir = "test"
epath = os.path.join(gs.data, basedir, event.filename)
ident = os.path.join(basedir, event.filename)
mock_event_path.return_value = epath
mock_event_id.return_value = ident
mock_isdir.return_value = True
mock_isfile.return_value = False
gs.add_entry(event)
mock_Add.assert_called_with(os.path.join("/" + basedir, event.filename))
self.assertNotIn(ident, gs.entries)
mock_isdir.assert_called_with(epath)
# file that is not in self.entries
reset()
event = Mock()
event.filename = "foo"
basedir = "test/foo/"
epath = os.path.join(gs.data, basedir, event.filename)
ident = basedir[:-1]
mock_event_path.return_value = epath
mock_event_id.return_value = ident
mock_isdir.return_value = False
mock_isfile.return_value = True
gs.add_entry(event)
self.assertFalse(mock_Add.called)
gs.es_cls.assert_called_with(gs.filename_pattern,
gs.data + ident,
gs.es_child_cls,
gs.encoding)
self.assertIn(ident, gs.entries)
self.assertEqual(gs.entries[ident], gs.es_cls.return_value)
self.assertIn(ident, gs.Entries[gs.entry_type])
self.assertEqual(gs.Entries[gs.entry_type][ident],
gs.es_cls.return_value.bind_entry)
gs.entries[ident].handle_event.assert_called_with(event)
mock_isfile.assert_called_with(epath)
# file that is in self.entries
reset()
gs.add_entry(event)
self.assertFalse(mock_Add.called)
self.assertFalse(gs.es_cls.called)
gs.entries[ident].handle_event.assert_called_with(event)
def test_event_path(self):
gs = self.get_obj()
gs.handles[1] = "/var/lib/foo/"
gs.handles[2] = "/etc/foo/"
gs.handles[3] = "/usr/share/foo/"
event = Mock()
event.filename = "foo"
for i in range(1, 4):
event.requestID = i
self.assertEqual(gs.event_path(event),
os.path.join(datastore, gs.name,
gs.handles[event.requestID].lstrip('/'),
event.filename))
@patch("os.path.isdir")
@patch("Bcfg2.Server.Plugin.%s.event_path" % test_obj.__name__)
def test_event_id(self, mock_event_path, mock_isdir):
gs = self.get_obj()
def reset():
mock_event_path.reset_mock()
mock_isdir.reset_mock()
gs.handles[1] = "/var/lib/foo/"
gs.handles[2] = "/etc/foo/"
gs.handles[3] = "/usr/share/foo/"
event = Mock()
event.filename = "foo"
for i in range(1, 4):
event.requestID = i
reset()
mock_isdir.return_value = True
self.assertEqual(gs.event_id(event),
os.path.join(gs.handles[event.requestID].lstrip('/'),
event.filename))
mock_isdir.assert_called_with(mock_event_path.return_value)
reset()
mock_isdir.return_value = False
self.assertEqual(gs.event_id(event),
gs.handles[event.requestID].rstrip('/'))
mock_isdir.assert_called_with(mock_event_path.return_value)
def test_toggle_debug(self):
gs = self.get_obj()
gs.entries = {"/foo": Mock(),
"/bar": Mock(),
"/baz/quux": Mock()}
@patch("Bcfg2.Server.Plugin.Plugin.toggle_debug")
def inner(mock_debug):
gs.toggle_debug()
mock_debug.assert_called_with(gs)
for entry in gs.entries.values():
entry.toggle_debug.assert_any_call()
inner()
TestPlugin.test_toggle_debug(self)
@patch("Bcfg2.Server.Plugin.%s.event_id" % test_obj.__name__)
@patch("Bcfg2.Server.Plugin.%s.add_entry" % test_obj.__name__)
def test_HandleEvent(self, mock_add_entry, mock_event_id):
gs = self.get_obj()
gs.entries = {"/foo": Mock(),
"/bar": Mock(),
"/baz": Mock(),
"/baz/quux": Mock()}
for path in gs.entries.keys():
gs.Entries[gs.entry_type] = {path: Mock()}
gs.handles = {1: "/foo/",
2: "/bar/",
3: "/baz/",
4: "/baz/quux"}
def reset():
mock_add_entry.reset_mock()
mock_event_id.reset_mock()
for entry in gs.entries.values():
entry.reset_mock()
# test event creation, changing entry that doesn't exist
for evt in ["exists", "created", "changed"]:
reset()
event = Mock()
event.filename = "foo"
event.code2str.return_value = evt
gs.HandleEvent(event)
mock_event_id.assert_called_with(event)
mock_add_entry.assert_called_with(event)
# test deleting entry, changing entry that does exist
for evt in ["changed", "deleted"]:
reset()
event = Mock()
event.filename = "quux"
event.requestID = 4
event.code2str.return_value = evt
mock_event_id.return_value = "/baz/quux"
gs.HandleEvent(event)
mock_event_id.assert_called_with(event)
self.assertIn(mock_event_id.return_value, gs.entries)
gs.entries[mock_event_id.return_value].handle_event.assert_called_with(event)
self.assertFalse(mock_add_entry.called)
# test deleting directory
reset()
event = Mock()
event.filename = "quux"
event.requestID = 3
event.code2str.return_value = "deleted"
mock_event_id.return_value = "/baz/quux"
gs.HandleEvent(event)
mock_event_id.assert_called_with(event)
self.assertNotIn("/baz/quux", gs.entries)
self.assertNotIn("/baz/quux", gs.Entries[gs.entry_type])
|
StarcoderdataPython
|
1999744
|
<reponame>tinycord/tinycord
import typing
import asyncio
from .utils import setup_arg, setup_callback
from .exceptions import CommandNotFound
class CommandBase:
"""
This is the base class of the CommandClient.
"""
commands: typing.Dict[str, typing.Dict[str, typing.Any]] = {}
def add_command(self, name: str, description: str, usage: str, callback: typing.Awaitable):
"""
This function is used to add a command to the client.
Parameters
----------
name: `str`
The name of the command.
description: `str`
The description of the command.
usage: `str`
The usage of the command.
callback: `typing.Callable`
The callback of the command.
"""
if not asyncio.iscoroutinefunction(callback):
raise TypeError('Commands callback must be a coroutine.')
callback = setup_arg(
setup_callback(callback)
)
self.commands[name] = {
'name': name,
'description': description,
'usage': usage,
'callback': callback,
}
""" The commands that are available. """
return self.commands[name]
def remove_command(self, name: str):
"""
This function is used to remove a command from the client.
Parameters
----------
name: `str`
The name of the command.
"""
if name in self.commands:
del self.commands[name]
raise CommandNotFound(name, 'The command was not found.')
def get_command(self, name: str):
"""
This function is used to get the command of the name.
Parameters
----------
name: `str`
The name of the command.
"""
if name in self.commands:
return self.commands[name]
def command(self, name: str, description: str = None, usage: str = None, aliases: typing.List[str] = []):
"""
Decorator for adding a command.
Parameters
----------
name: `str`
The name of the command.
description: `str`
The description of the command.
usage: `str`
The usage of the command.
"""
def decorator(callback: typing.Callable):
"""
This function is used to decorate a callback.
"""
self.add_command(name, description, usage, callback)
for alias in aliases:
self.add_command(alias, description, usage, callback)
return callback
return decorator
|
StarcoderdataPython
|
3414945
|
<filename>tests/test_docs_complete.py<gh_stars>10-100
import os
import pytest
MODULES_PATH = './modules'
def get_submodules(module):
pkg_name = module.replace('scikit-surgery', 'sksurgery')
submodules_path = os.path.join(MODULES_PATH, module, pkg_name)
# Get all files recursively (https://stackoverflow.com/questions/19309667/recursive-os-listdir)
all_files = \
[os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(submodules_path)) for f in fn]
# We just want e.g. algorithms.averagequaternions.py rather than
# scikit-surgerycore/sksurgerycore/algorithms/averagequaternions.py
all_files = [f.replace(submodules_path, "") for f in all_files if f.endswith('.py')]
# Get rid of __init__.py, _version.py
submodules = [f for f in all_files if '__init__.py' not in f]
submodules = [f for f in submodules if '_version.py' not in f]
# strip .py ending
submodules = [f.rstrip('.py') for f in submodules]
# Convert to module notation
submodules = [f.replace('\\', '.') for f in submodules]
return submodules
def check_documentation(module_name, submodule_list):
module_documentation_file = os.path.join(MODULES_PATH, module_name, 'doc', 'module_ref.rst')
with open(module_documentation_file, 'r') as f:
documentation = f.read()
for submodule in submodule_list:
print(f'Checking {submodule} is in {module_documentation_file}')
assert submodule in documentation
def test_all_submodules_documented():
modules = os.listdir(MODULES_PATH)
for module in modules:
submodules = get_submodules(module)
check_documentation(module, submodules)
|
StarcoderdataPython
|
4947227
|
# -*- coding: utf-8 -*-
"""Tree Level Order.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1S75hNh7pHJXaZpvoiL7ztrFuuQRWzahr
"""
class Node(object):
def __init__(self, val=None):
self.left = None
self.right = None
self.val = val
def traverse(n):
for e in traverse_helper(n).values():
print(' '.join(e))
def traverse_helper(n, lv = 0, output = {}):
next = n
if next:
if lv not in output:
output[lv] = [str(n.val)]
else:
output[lv] += [str(n.val)]
traverse_helper(next.left, lv+1, output)
traverse_helper(next.right, lv+1, output)
return output
r = Node(1)
r.left = Node(2)
r.right = Node(3)
r.left.left = Node(4)
r.right.left = Node(5)
r.right.right = Node(6)
traverse(r)
# Better solution
class Node(object):
def __init__(self, val=None):
self.left = None
self.right = None
self.val = val
def traverse(n):
next = n
nodes = [next]
currentCount = 1
nextCount = 0
while len(nodes) != 0:
currentNode = nodes.pop(0)
print(currentNode.val, end = ' ')
currentCount -= 1
if currentNode.left:
nodes.append(currentNode.left)
nextCount += 1
if currentNode.right:
nodes.append(currentNode.right)
nextCount += 1
if currentCount == 0:
currentCount = nextCount
nextCount = 0
print()
r = Node(1)
r.left = Node(2)
r.right = Node(3)
r.left.left = Node(4)
r.right.left = Node(5)
r.right.right = Node(6)
traverse(r)
|
StarcoderdataPython
|
1839375
|
# -*- coding: utf-8
import six
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.apps import AppConfig
from . import connections, logger, flexconfig
class ElasticsearchFlexConfig(AppConfig):
name = 'elasticsearch_flex'
def ready(self):
self.__init_connection()
self.__import_modules()
def __init_connection(self):
host = flexconfig.get('host')
if host is None:
connections.create_connection(hosts=['localhost'])
logger.info('No Elasticsearch host specified, assuming "localhost"')
return
if isinstance(host, dict):
connections.configure(**host)
elif isinstance(host, six.string_types):
connections.create_connection(hosts=[host])
else:
raise ImproperlyConfigured('<host = {0}> for ElasticsearchFlex is incorrect'.format(host))
logger.info('Elasticsearch connection configured using <%s>', host)
def __import_modules(self):
# Discover the modules
import elasticsearch_flex.signals
_loaded = []
for app_name in settings.INSTALLED_APPS:
module = '{}.search_indexes'.format(app_name)
try:
import_module(module)
_loaded.append(app_name)
except ImportError:
pass
if len(_loaded):
logger.info('Loaded search indices for %s apps: %s', len(_loaded), _loaded)
|
StarcoderdataPython
|
9700848
|
"""Tests for calibration measure functions."""
import pytest
from probnumeval.timeseries import (
average_normalised_estimation_error_squared,
chi2_confidence_intervals,
non_credibility_index,
non_credibility_index2,
non_credibility_index3,
)
def test_anees():
with pytest.raises(NotImplementedError):
average_normalised_estimation_error_squared(None, None, None)
def test_chi2_confidence():
lower, upper = chi2_confidence_intervals(dim=2)
assert lower == pytest.approx(0.01, rel=1e-1)
assert upper == pytest.approx(10, rel=1e-1)
def test_nci():
with pytest.raises(NotImplementedError):
non_credibility_index(None, None, None)
def test_nci2():
with pytest.raises(NotImplementedError):
non_credibility_index2(None, None, None)
def test_nci3():
with pytest.raises(NotImplementedError):
non_credibility_index3(None, None, None)
|
StarcoderdataPython
|
6478913
|
<reponame>slalom-ggp/dataops-tools
""" slalom.dataops.sparkutils module """
import datetime
import importlib.util
import time
import os
import sys
from pathlib import Path
import docker
import fire
import pyspark
from py4j.java_gateway import java_import
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
from pyspark.sql.types import (
StructType,
StructField,
DateType,
TimestampType,
Row as SparkRow,
)
from pyspark.sql.functions import (
unix_timestamp,
from_unixtime,
to_date,
input_file_name,
lit,
)
import dock_r
from logless import (
get_logger,
logged,
logged_block,
)
import runnow
import uio
from slalom.dataops import pandasutils
logging = get_logger("slalom.dataops.sparkutils")
try:
import pandas as pd
except Exception as ex:
pd = None
logging.warning(f"Could not load pandas library. Try 'pip install pandas'. {ex}")
ENV_VAR_SPARK_UDF_MODULE = "SPARK_UDF_MODULE"
_SERVING_SPARK_REQUESTS = "serving spark requests"
ENABLE_SQL_JDBC = bool(os.environ.get("ENABLE_SQL_JDBC", False))
METASTORE_TYPE = os.environ.get("METASTORE_TYPE", "Derby")
METASTORE_SERVER = os.environ.get("METASTORE_SERVER", None) or "localhost"
METASTORE_DB_USER = os.environ.get("METASTORE_DB_USER", None)
METASTORE_DB_PASSWORD = os.environ.get("METASTORE_DB_PASSWORD", None)
SUPPORT_CLUSTER_BY = False
DOCKER_SPARK_IMAGE = os.environ.get("DOCKER_SPARK_IMAGE", "slalomggp/dataops:latest-dev")
CONTAINER_ENDPOINT = "spark://localhost:7077"
SPARK_DRIVER_MEMORY = "4g"
SPARK_EXECUTOR_MEMORY = "4g"
SPARK_WAREHOUSE_DIR = os.environ.get("SPARK_WAREHOUSE_DIR", "/spark_warehouse/data")
SPARK_S3_PREFIX = "s3a://"
SPARK_LOG_LEVEL = os.environ.get(
"SPARK_LOG_LEVEL", "ERROR"
) # ALL, DEBUG, ERROR, FATAL, INFO, WARN
HADOOP_HOME = os.environ.get("HADOOP_HOME", "/usr/local/hdp")
# SPARK_HOME = os.environ.get("SPARK_HOME", None)
# SPARK_CLASS_PATH = os.path.join(os.environ["SPARK_HOME"], "jars/*")
SPARK_EXTRA_AWS_JARS = [
# Hadoop 2.7.7:
os.path.join(HADOOP_HOME, "share/hadoop/tools/lib/aws-java-sdk-1.7.4.jar"),
os.path.join(HADOOP_HOME, "share/hadoop/tools/lib/hadoop-aws-2.7.7.jar")
# # Hadoop 3.1.2:
# os.path.join(HADOOP_HOME, "share/hadoop/tools/lib/aws-java-sdk-bundle-1.11.271.jar"),
# os.path.join(HADOOP_HOME, "share/hadoop/tools/lib/hadoop-aws-3.1.2.jar")
# os.path.join(HADOOP_HOME, "share/hadoop/tools/lib/aws-java-sdk-core-1.10.6.jar")
# os.path.join(HADOOP_HOME, "share/hadoop/tools/lib/aws-java-sdk-kms-1.10.6.jar")
# os.path.join(HADOOP_HOME, "share/hadoop/tools/lib/aws-java-sdk-s3-1.10.6"),
]
def _add_derby_metastore_config(hadoop_conf):
""" Returns a new hadoop_conf dict with added metastore params """
derby_log = "/home/data/derby.log"
derby_home = "/home/data/derby_home"
derby_hive_metastore_dir = "/home/data/hive_metastore_db"
for folder in [SPARK_WAREHOUSE_DIR, derby_hive_metastore_dir]:
uio.create_folder(derby_home)
derby_options = (
f"-Dderby.stream.error.file={derby_log} -Dderby.system.home={derby_home}"
)
hadoop_conf.update(
{
"derby.system.home": derby_home,
"derby.stream.error.file": derby_log,
"driver-java-options": derby_options,
"spark.driver.extraJavaOptions": derby_options,
"spark.executor.extraJavaOptions": derby_options,
"hive.metastore.warehouse.dir": f"file://{derby_hive_metastore_dir}",
# "javax.jdo.option.ConnectionURL": "jdbc:derby:memory:databaseName=metastore_db;create=true",
"javax.jdo.option.ConnectionURL": "jdbc:derby:;databaseName=/home/data/metastore_db;create=true",
"javax.jdo.option.ConnectionDriverName": "org.apache.derby.jdbc.EmbeddedDriver",
}
)
return hadoop_conf
def _add_mysql_metastore_config(hadoop_conf):
""" Returns a new hadoop_conf dict with added metastore params """
hadoop_conf.update(
{
"javax.jdo.option.ConnectionURL": (
f"jdbc:mysql://{METASTORE_SERVER}/"
"metastore_db?createDatabaseIfNotExist=true&useSSL=false"
),
"javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
"javax.jdo.option.ConnectionUserName": "root",
"javax.jdo.option.ConnectionPassword": "<PASSWORD>",
}
)
if METASTORE_DB_USER:
hadoop_conf["javax.jdo.option.ConnectionUserName"] = METASTORE_DB_USER
if METASTORE_DB_PASSWORD:
hadoop_conf["javax.jdo.option.ConnectionPassword"] = <PASSWORD>ASTORE_DB_PASSWORD
return hadoop_conf
def _add_aws_creds_config(hadoop_conf):
""" Returns a new hadoop_conf dict with added metastore params """
hadoop_conf.update(
{
"fs.s3.impl": "org.apache.hadoop.fs.s3a.S3AFileSystem",
"fs.s3a.impl": "org.apache.hadoop.fs.s3a.S3AFileSystem",
"fs.s3a.endpoint": (
f"s3.{os.environ.get('AWS_DEFAULT_REGION', 'us-east-2')}.amazonaws.com"
),
"spark.jars": ",".join(SPARK_EXTRA_AWS_JARS),
"com.amazonaws.services.s3.enableV4": "true",
}
)
os.environ["HADOOP_OPTS"] = (
os.environ.get("HADOOP_OPTS", "")
+ " -Djava.net.preferIPv4Stack=true -Dcom.amazonaws.services.s3.enableV4=true"
)
try:
key, secret, token = uio.parse_aws_creds()
uio.set_aws_env_vars(key, secret, token)
logging.info(
f"Successfully loaded AWS creds for access key: ****************{key[-4:]}"
)
# TODO: Confirm that these settings are not needed (avoid leaks to logs)
# if key:
# hadoop_conf["fs.s3a.access.key"] = key
# if secret:
# hadoop_conf["fs.s3a.secret.key"] = secret
except Exception as ex:
logging.info(f"Could not load AWS creds ({ex})")
return hadoop_conf
def _get_hadoop_conf():
hadoop_conf = {
"spark.driver.memory": SPARK_DRIVER_MEMORY,
"spark.executor.memory": SPARK_EXECUTOR_MEMORY,
"spark.jars.packages": "io.delta:delta-core_2.11:0.4.0",
"spark.logConf": "true",
"spark.sql.warehouse.dir": SPARK_WAREHOUSE_DIR,
"spark.ui.showConsoleProgress": "false", # suppress updates e.g. 'Stage 2=====>'
"log4j.rootCategory": SPARK_LOG_LEVEL,
"log4j.logger.org.apache.hive.service.server": SPARK_LOG_LEVEL,
"log4j.logger.org.apache.spark.api.python.PythonGatewayServer": SPARK_LOG_LEVEL,
}
# Add Thrift JDBC Server settings
hadoop_conf.update(
{
"spark.sql.hive.thriftServer.singleSession": "true",
"hive.server2.thrift.port": 10000,
"hive.server2.http.endpoint": "cliservice",
"log4j.logger.org.apache.spark.sql.hive.thriftserver": SPARK_LOG_LEVEL,
}
)
hadoop_conf = _add_aws_creds_config(hadoop_conf)
if METASTORE_TYPE.upper() == "MYSQL":
hadoop_conf = _add_mysql_metastore_config(hadoop_conf)
else:
hadoop_conf = _add_derby_metastore_config(hadoop_conf)
return hadoop_conf
# GLOBALS
spark = None
sc = None
thrift = None
_spark_container = None
@logged("starting Spark container '{spark_image}' with args: with_jupyter={with_jupyter}")
def _init_spark_container(spark_image=DOCKER_SPARK_IMAGE, with_jupyter=False):
global _spark_container
if _spark_container:
return _spark_container
port_map = {
"4040": "4040", # App Web UI
"7077": "7077", # Standalone master driver
"8080": "8080", # Standalone-mode master Web UI
"8081": "8081", # Standalone-mode worker Web UI
"8888": "8888", # Jupyter Notebook Server
"10000": "10000", # Thrift JDBC port for SQL queries
"18080": "18080", # History Server Web UI
}
uio.set_aws_env_vars()
env = [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"BATCH_ID=SparkContainerTest",
"ENABLE_SQL_JDBC=True",
"METASTORE_TYPE=MySQL",
]
if "AWS_ACCESS_KEY_ID" in os.environ:
env.append(f"AWS_ACCESS_KEY_ID={os.environ['AWS_ACCESS_KEY_ID']}")
if "AWS_SECRET_ACCESS_KEY" in os.environ:
env.append(f"AWS_SECRET_ACCESS_KEY={os.environ['AWS_SECRET_ACCESS_KEY']}")
docker_client = docker.from_env() # WSL1
# docker_client = docker.DockerClient(base_url="npipe:////./pipe/docker_wsl") # WSL2
try:
dock_r.pull(spark_image)
except Exception as ex:
logging.warning(f"Could not pull latest Spark image '{spark_image}'. {ex}")
try:
old_container = docker_client.containers.get("spark_server")
if old_container:
with logged_block("terminating previous 'spark_server' docker container"):
old_container.stop()
logging.info("Waiting for cleanup of old Spark container...")
time.sleep(2)
except Exception as _:
pass
spark_image_cmd = "sparkutils start_server"
if with_jupyter:
spark_image_cmd = f"{spark_image_cmd} --with_jupyter"
_spark_container = docker_client.containers.run(
image=spark_image,
name="spark_server",
command=spark_image_cmd,
detach=True,
auto_remove=True,
ports=port_map,
environment=env,
# stream=True,
)
logging.info(
f"Attempting to initialize Spark docker container "
f"(status={_spark_container.status})..."
)
MAX_WAIT_TIME = int(60 * 5)
start = time.time()
for line in _spark_container.logs(stream=True, until=int(start + MAX_WAIT_TIME)):
logging.info(f"SPARK CONTAINER LOG: {line.decode('utf-8').rstrip()}")
# time.sleep(0.2)
if _SERVING_SPARK_REQUESTS in line.decode("utf-8"):
logging.info(
f"Spark container reported success after "
f"{int(time.time() - start)} seconds"
)
break
elif time.time() > start + MAX_WAIT_TIME:
logging.info(f"Max timeout wait exceeded ({MAX_WAIT_TIME} seconds)")
break
if _spark_container.status in ["running", "created"]:
return _spark_container
else:
raise RuntimeError(
"Spark docker container exited unexpectedly "
f"(status={_spark_container.status})."
)
def _destroy_spark_container():
global _spark_container
if _spark_container:
_spark_container.stop()
_spark_container = None
@logged(
"initializing Spark with args: dockerized={dockerized}, with_jupyter={with_jupyter}"
)
def _init_spark(dockerized=False, with_jupyter=False, daemon=False):
"""Return an initialized spark object"""
global spark, sc, thrift
if dockerized:
container = _init_spark_container(with_jupyter=with_jupyter)
# context = SparkContext(conf=conf)
os.environ["PYSPARK_PYTHON"] = sys.executable
with logged_block("connecting to spark container"):
spark = SparkSession.builder.master(CONTAINER_ENDPOINT).getOrCreate()
spark.sparkContext.setLogLevel(SPARK_LOG_LEVEL)
sc = spark.sparkContext
elif daemon:
cmd = f"{sys.executable} -m slalom.dataops.sparkutils start_server"
wait_test = lambda line: _SERVING_SPARK_REQUESTS in line
wait_max = 120 # Max wait in seconds
if with_jupyter:
cmd = f"{cmd} --with_jupyter"
runnow.run(cmd, daemon=True, wait_test=wait_test, wait_max=wait_max)
else:
_init_local_spark()
def _init_local_spark():
"""Return an initialized local spark object"""
global spark, sc, thrift
# context = SparkContext(conf=conf)
for folder in [SPARK_WAREHOUSE_DIR]:
uio.create_folder(folder)
conf = SparkConf()
hadoop_conf = _get_hadoop_conf()
for fn in [conf.set]:
# for fn in [conf.set, SparkContext.setSystemProperty, context.setSystemProperty]:
for k, v in hadoop_conf.items():
fn(k, v)
os.environ["PYSPARK_PYTHON"] = sys.executable
with logged_block("creating spark session"):
spark = (
SparkSession.builder.config(conf=conf)
.master("local")
.appName("Python Spark")
.enableHiveSupport()
.getOrCreate()
)
sc = spark.sparkContext
# Set the property for the driver. Doesn't work using the same syntax
# as the executor because the jvm has already been created.
sc.setSystemProperty("com.amazonaws.services.s3.enableV4", "true")
if not ENABLE_SQL_JDBC:
logging.info(f"Skipping Thrift server launch (ENABLE_SQL_JDBC={ENABLE_SQL_JDBC})")
thrift = None
else:
with logged_block("starting Thrift server"):
java_import(sc._gateway.jvm, "")
spark_hive = sc._gateway.jvm.org.apache.spark.sql.hive
thrift_class = spark_hive.thriftserver.HiveThriftServer2
thrift = thrift_class.startWithContext(spark._jwrapped)
logging.info("Sleeping while waiting for Thrift Server...")
time.sleep(1)
spark.sparkContext.setLogLevel(SPARK_LOG_LEVEL)
_print_conf_debug(sc)
if ENV_VAR_SPARK_UDF_MODULE in os.environ:
add_udf_module(os.environ.get(ENV_VAR_SPARK_UDF_MODULE))
else:
logging.info("Skipping loading UDFs (env variable not set)")
for jar_path in SPARK_EXTRA_AWS_JARS:
sc.addPyFile(jar_path)
@logged("importing from dynamic python file '{absolute_file_path}'")
def path_import(absolute_file_path):
"""implementation taken from https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly"""
module_name = os.path.basename(absolute_file_path)
module_name = ".".join(module_name.split(".")[:-1]) # removes .py suffix
spec = importlib.util.spec_from_file_location(module_name, absolute_file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
@logged("loading UDFs from module directory '{module_dir}'")
def add_udf_module(module_dir=None):
"""
Add a package from module_dir (or zip file) and register any udfs within the package
The module must contain a '__init__.py' file and functions to be imported should be
annotated using the @udf() decorator.
# https://stackoverflow.com/questions/47558704/python-dynamic-import-methods-from-file
"""
global sc
from inspect import getmembers, isfunction
# module_dir = module_dir or os.environ.get(ENV_VAR_SPARK_UDF_MODULE)
module_dir = os.path.realpath(module_dir)
# module_root = Path(module_dir).parent
# module_name = os.path.basename(module_dir)
# if module_root not in sys.path:
# sys.path.append(module_root)
if not os.path.isdir(module_dir):
raise ValueError(f"Folder '{module_dir}' does not exist.")
for file in uio.list_files(module_dir):
if file.endswith(".py"):
module = path_import(file)
for member in getmembers(module):
if isfunction(member[1]):
logging.info(f"Found module function: {member}")
func_name, func = member[0], member[1]
if func_name[:1] != "_" and func_name != "udf":
logging.info(f"Registering UDF '{func_name}':\n{func.__dict__}")
spark.udf.register(func_name, func)
# else:
# logging.info(f"Found module entity: {member}")
# sc.addPyFile(jar_path)
@logged("starting Jupyter notebooks server")
def start_jupyter(nb_directory="/home/jovyan/work", nb_token="<PASSWORD>"):
jupyter_run_command = (
f"jupyter lab"
f" --NotebookApp.notebook_dir='{nb_directory}'"
f" --NotebookApp.token='{nb_token}'"
f" --allow-root"
)
log_file = "jupyter_log.txt"
runnow.run(jupyter_run_command, daemon=True, log_file_path=log_file)
time.sleep(5)
logging.info("\nJUPYTER_LOG:".join(uio.get_text_file_contents(log_file).splitlines()))
logging.info(
"Jupyter notebooks server started at: https://localhost:8888/?token=<PASSWORD>"
)
def get_spark(dockerized=False):
global spark
if not spark:
_init_spark(dockerized=dockerized)
return spark
def _get_printable_context(context: dict = None, as_str=True):
"""Return a string or dict, obfuscating names that look like keys."""
printable_dict = {
k: (
v
if not any(
[
"secret" in k.lower(),
"pwd" in k.lower(),
"pass" in k.lower(),
"access.key" in k.lower(),
]
)
else "****"
)
for k, v in context.items()
if k != "__builtins__"
}
if as_str:
return "\n".join([f"\t{k}:\t{v}" for k, v in printable_dict.items()])
return printable_dict
def _print_conf_debug(sc):
""" Print all spark and hadoop config settings """
logging.debug(
"SparkSession 'spark' and SparkContext 'sc' initialized with settings:\n"
f"{_get_printable_context(dict(sc._conf.getAll()))}"
)
# Spark Helper Function:
@logged("creating table '{table_name}'", success_detail="{result.count():,.0f} rows")
def create_spark_sql_table(
table_name,
sql,
print_row_count=True,
print_n_rows=None,
run_audit=True,
schema_only=False,
):
spark.sql(f"DROP TABLE IF EXISTS {table_name}")
distribution_clause = ""
for col in ["AccountId", "OpportunityId"]:
if SUPPORT_CLUSTER_BY and not distribution_clause and col in sql:
distribution_clause = "\n DISTRIBUTE BY {col}"
sql_command = f"""
CREATE TABLE {table_name}
USING PARQUET
AS
{sql}
{distribution_clause}
"""
spark.sql(sql_command)
df = spark.sql(f"SELECT * FROM {table_name}")
if print_n_rows:
sample_spark_table(table_name, n=print_n_rows)
if run_audit:
audit_spark_table_keys(table_name)
return df
def audit_spark_table_keys(table_name, key_col_suffix="Id", raise_error=False):
df = spark.sql(f"SELECT * FROM {table_name}")
key_cols = [c for c in df.columns if key_col_suffix in c]
if not key_cols:
key_cols.append(df.columns[0])
cols = ",".join(
[
f"COUNT(DISTINCT {c}) AS {c}__values,\nCOUNT(*) - COUNT({c}) as {c}__null"
for c in key_cols
]
)
sql = f"SELECT COUNT(*) AS __num_rows, {cols}\nFROM {table_name}"
logging.info(f"Running '{table_name}' table audit...")
result = spark.sql(sql).collect()[0]
num_rows = result["__num_rows"]
unique = []
empty = []
for col in key_cols:
if result[col + "__null"] >= num_rows:
empty.append(col)
elif result[col + "__values"] >= num_rows - 1:
unique.append(col)
result_text = (
f"Found unique column(s) [{','.join(unique) or '(none)'}] "
f"and empty columns [{','.join(empty) or '(none)'}]. "
f"Table profile: {result}"
)
if not unique:
failure_msg = f"Audit failed for table '{table_name}'. {result_text}"
if raise_error:
raise RuntimeError(failure_msg)
else:
logging.warning(f"Table audit warning for '{table_name}'. {result_text}")
elif len(empty):
logging.warning(f"Table audit warning for '{table_name}'. {result_text}")
else:
logging.info(f"Table audit successful for '{table_name}'. {result_text}")
def sample_spark_table(table_name, n=1):
df = spark.sql(f"SELECT * FROM {table_name} LIMIT {n}")
sample_spark_df(df, n=n, name=table_name)
def sample_spark_df(df, n=1, name=None, log_fn=logging.debug):
log_fn(
f"Spark Dataframe column list: "
f"{', '.join(['{dtype[0]} ({dtype[1]})' for dtype in df.dtypes])}"
f"'{name or 'Dataframe'}' row sample:\n{df.limit(n).toPandas().head(n)}\n"
)
def create_spark_table(
df, table_name, print_n_rows=None, run_audit=False, schema_only=False
):
start_time = time.time()
if isinstance(df, pyspark.sql.DataFrame):
logging.info(f"Creating spark table '{table_name}' from spark dataframe...")
spark_df = df
elif pd and isinstance(df, pd.DataFrame):
# Coerce all values in string columns to string
logging.debug("Coercing column types string prior to save...")
for col in df.select_dtypes(["object"]):
df[col] = df[col].astype("str")
logging.debug("Converting pandas dataframe to spark dataframe prior to save...")
spark_df = spark.createDataFrame(df)
logging.info(f"Creating spark table '{table_name}' from pandas dataframe...")
else:
logging.info(
f"Creating table '{table_name}' from unknown type '{type(df).__name__}"
)
spark_df = spark.createDataFrame(df, verifySchema=False)
spark_df.write.saveAsTable(table_name, mode="overwrite")
if print_n_rows:
sample_spark_table(table_name, n=print_n_rows)
if run_audit:
audit_spark_table_keys(table_name)
def _verify_path(file_path):
return file_path.replace(
"s3://", SPARK_S3_PREFIX
) # .replace("propensity-to-buy", "propensity-to-buy-2")
@logged("loading spark table '{table_name}'")
def load_to_spark_table(
table_name,
file_path,
entity_type=None,
infer_schema=True,
date_format=None,
timestamp_format=None,
filename_column="filename",
df_cleanup_function=None,
print_n_rows=None,
clean_col_names=False,
schema_only=False,
):
start_time = time.time()
file_path = _verify_path(file_path)
if ".xlsx" in file_path.lower():
if pd:
logging.debug(
f"Using pandas to load spark table '{table_name}' from '{file_path}'..."
)
df = pandasutils.get_pandas_df(file_path)
create_spark_table(df, table_name, print_n_rows=print_n_rows)
else:
pandasutils._raise_if_missing_pandas()
else:
logging.debug(f"Loading spark table '{table_name}' from file '{file_path}'...")
df = spark.read.csv(
file_path,
header=True,
escape='"',
quote='"',
multiLine=True,
inferSchema=True,
enforceSchema=False,
dateFormat=date_format,
timestampFormat=timestamp_format,
columnNameOfCorruptRecord="__READ_ERRORS",
)
if filename_column:
df = df.withColumn(filename_column, input_file_name())
if df_cleanup_function:
df = df_cleanup_function(df)
create_spark_table(
df,
table_name,
print_n_rows=print_n_rows,
run_audit=False,
schema_only=schema_only,
)
@logged("saving '{table_name}' to file")
def save_spark_table(
table_name,
file_path,
entity_type=None,
force_single_file=False,
compression="gzip",
schema_only=True,
overwrite=True,
):
start_time = time.time()
file_path = _verify_path(file_path)
df = spark.sql(f"SELECT * FROM {table_name}")
if uio.file_exists(os.path.join(file_path, "_SUCCESS")):
if overwrite:
logging.warning(
"Saved table already exists and overwrite=True. Deleting older files."
)
for oldfile in uio.list_files(file_path):
uio.delete_file(oldfile)
if force_single_file:
logging.debug(
f"Saving spark table '{table_name}' to single file: '{file_path}'..."
)
df = df.coalesce(1)
else:
logging.debug(f"Saving spark table '{table_name}' to folder: '{file_path}'...")
try:
df.write.csv( # SAFE
file_path,
mode="overwrite",
header=True,
compression=compression,
quote='"',
escape='"',
)
except Exception as ex: # intermittent failures can be caused by eventual consistency
logging.warning(
f"Retrying S3 table save operation because the first attempt failed ({ex})"
)
time.sleep(20) # Sleep to allow S3 to reach eventual consistency
df.write.csv( # SAFE
file_path,
mode="overwrite",
header=True,
compression=compression,
quote='"',
escape='"',
)
def get_spark_table_as_pandas(table_name):
if not pd:
raise RuntimeError(
"Could not execute get_pandas_from_spark_table(): Pandas library not loaded."
)
return spark.sql(f"SELECT * FROM {table_name}").toPandas()
# Create Dates table
def create_calendar_table(table_name, start_date, end_date):
num_days = (end_date - start_date).days
date_rows = [
SparkRow(start_date + datetime.timedelta(days=n)) for n in range(0, num_days)
]
df = spark.createDataFrame(date_rows)
df = df.selectExpr("_1 AS calendar_date", "date_format(_1, 'yMMdd') AS YYYYMMDD")
create_spark_table(df, table_name)
@logged(
"starting spark server with args:"
" dockerized={dockerized}, with_jupyter={with_jupyter}"
)
def start_server(dockerized: bool = None, with_jupyter: bool = True, daemon: bool = None):
if dockerized is None:
dockerized = (
False
if any(["SPARK_HOME" in os.environ, "HADOOP_CONF_DIR" in os.environ])
else True
)
if daemon is None:
daemon = dockerized
if dockerized:
container = _init_spark_container(with_jupyter=with_jupyter)
if daemon:
logging.info("Now serving spark requests via docker.")
else:
with logged_block("hosting spark container"):
while True:
time.sleep(30)
else:
_init_spark(dockerized=False, with_jupyter=with_jupyter, daemon=daemon)
logging.info(
"Spark server started. "
"Monitor via http://localhost:4040 or http://127.0.0.1:4040"
)
# if with_jupyter:
# start_jupyter()
# else:
# logging.info("Skipping Jupyter notebooks server launch...")
if not daemon:
with logged_block(_SERVING_SPARK_REQUESTS):
# NOTE: When run containerized, the above message triggers
# the host to stop echoing logs
while True:
time.sleep(30)
def main():
fire.Fire({"start_server": start_server, "start_jupyter": start_jupyter})
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
92225
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ABC is the AbstractBaseClass in python
from abc import ABC, abstractmethod
# Judge is an abstract class to be subclassed and implemented
# by Judge developers.
# Judge class kind of doubles up for login-logout as well as a Factory
# for the contest and problem classes for the particular judge
class Judge(ABC):
@abstractmethod
def __init__(self, session_data=None):
# Init should not have any network requests
# do them in login, logout, check_running_contest
self.session_data = session_data
@abstractmethod
def check_login(self):
pass
@abstractmethod
def login(self):
# login also controls all the messages being displayed to the user
pass
@abstractmethod
def logout(self):
# logout also controls all the messages displayed to the user
pass
@abstractmethod
def get_running_contests(self):
# return a string of running contest, do it in form of a table.
pass
# This method serves both as a problem getter as well as kind of factory
# for problem
@abstractmethod
def get_problem(self, problem_code, contest_code):
# Method should call the respective Problem.__init__ method to create a
# problem instance and return it
pass
@abstractmethod
def get_contest(self, contest_code):
# Method should call the respective Problem.__init__ method to create a
# contest instance with all its problems and return it
pass
@abstractmethod
def get_problem_url(self, problem_code, contest_code):
# Method should return the url used by judge for a particular problem
pass
@abstractmethod
def get_contest_url(self, contest_code):
# Method should return the url used by judge for a particular contest
pass
@abstractmethod
def get_contests_list_url(self):
# Method should return the url used by judge for listing contest
pass
@abstractmethod
def submit(self, problem, code_text, extension):
# problem is an instance of judge's problem class
# code test is the code to be submitted
# extension is the extension of the code file to determine
# language of submission
pass
@abstractmethod
def get_testcase(self, inp, ans, code):
# returns the testcase with inp, ans and code
# used by termicoder test to output diff
pass
|
StarcoderdataPython
|
1797570
|
# coding: utf-8
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
urlpatterns = [
url(r'^$', view=RedirectView.as_view(url=reverse_lazy('rls:latest')), name='index'),
url(r'^rlsget/', include('rlsget.urls', namespace='rlsget')),
url(r'^rls/', include('rls.urls', namespace='rls')),
url(r'^accounts/login/$', auth_views.login, name='login'),
# url(r'^admin/', include(admin.site.urls)),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
StarcoderdataPython
|
9657425
|
<reponame>584807419/FreeProxyPool
import re
from proxypool.schemas.proxy import Proxy
from proxypool.crawlers.base import BaseCrawler
class xilaCrawler(BaseCrawler):
urls = ['http://www.xiladaili.com/http/', 'http://www.xiladaili.com/http/2/', 'http://www.xiladaili.com/http/3/',
'http://www.xiladaili.com/http/4/', 'http://www.xiladaili.com/http/5/', 'http://www.xiladaili.com/http/6/']
def parse(self, html_content):
ips = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}", html_content)
for i in ips:
ip_temp = i.split(':')
host = ip_temp[0]
port = ip_temp[1]
yield Proxy(host=host, port=port)
if __name__ == '__main__':
crawler = xilaCrawler()
for proxy in crawler.crawl():
print(proxy)
|
StarcoderdataPython
|
1721821
|
<gh_stars>0
#Sensitivity to sizing assumptions for New York airport shuttle service
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/../..'))
import numpy as np
from gpkit import Model, ureg
from matplotlib import pyplot as plt
from aircraft_models import OnDemandAircraft
from aircraft_models import OnDemandSizingMission, OnDemandRevenueMission
from aircraft_models import OnDemandDeadheadMission, OnDemandMissionCost
from study_input_data import generic_data, configuration_data
from copy import deepcopy
from collections import OrderedDict
from noise_models import vortex_noise
# Data specific to study
configs = OrderedDict()
case_array = ["Case 1","Case 2","Case 3"]
sizing_mission_range_array = [19, 30, 30]*ureg.nautical_mile
revenue_mission_range_array = [19, 19, 30]*ureg.nautical_mile
deadhead_mission_range_array = [19, 19, 30]*ureg.nautical_mile
for config in configuration_data:
configs[config] = OrderedDict()
for case in case_array:
configs[config][case] = configuration_data[config].copy()
#Delete unwanted configurations
del configs["Multirotor"]["Case 1"]
del configs["Multirotor"]["Case 2"]
del configs["Multirotor"]["Case 3"]
del configs["Autogyro"]["Case 1"]
del configs["Autogyro"]["Case 2"]
del configs["Autogyro"]["Case 3"]
del configs["Helicopter"]["Case 1"]
del configs["Helicopter"]["Case 2"]
del configs["Helicopter"]["Case 3"]
del configs["Tilt duct"]["Case 1"]
del configs["Tilt duct"]["Case 2"]
del configs["Tilt duct"]["Case 3"]
del configs["Coaxial heli"]["Case 1"]
del configs["Coaxial heli"]["Case 2"]
del configs["Coaxial heli"]["Case 3"]
#Delete configurations that will not be evaluated
pared_configs = deepcopy(configs)
for config in configs:
if configs[config] == {}:
del pared_configs[config]
configs = deepcopy(pared_configs)
#Optimize remaining configurations
for config in configs:
print "Solving configuration: " + config
for i, case in enumerate(configs[config]):
c = configs[config][case]
problem_subDict = {}
Aircraft = OnDemandAircraft(autonomousEnabled=generic_data["autonomousEnabled"])
problem_subDict.update({
Aircraft.L_D_cruise: c["L/D"], #estimated L/D in cruise
Aircraft.eta_cruise: generic_data["\eta_{cruise}"], #propulsive efficiency in cruise
Aircraft.tailRotor_power_fraction_hover: c["tailRotor_power_fraction_hover"],
Aircraft.tailRotor_power_fraction_levelFlight: c["tailRotor_power_fraction_levelFlight"],
Aircraft.cost_per_weight: generic_data["vehicle_cost_per_weight"], #vehicle cost per unit empty weight
Aircraft.battery.C_m: generic_data["C_m"], #battery energy density
Aircraft.battery.cost_per_C: generic_data["battery_cost_per_C"], #battery cost per unit energy capacity
Aircraft.rotors.N: c["N"], #number of propellers
Aircraft.rotors.Cl_mean_max: c["Cl_{mean_{max}}"], #maximum allowed mean lift coefficient
Aircraft.structure.weight_fraction: c["weight_fraction"], #empty weight fraction
Aircraft.electricalSystem.eta: generic_data["\eta_{electric}"], #electrical system efficiency
})
SizingMission = OnDemandSizingMission(Aircraft,mission_type=generic_data["sizing_mission"]["type"],
reserve_type=generic_data["reserve_type"])
problem_subDict.update({
SizingMission.mission_range: sizing_mission_range_array[i],#mission range
SizingMission.V_cruise: c["V_{cruise}"],#cruising speed
SizingMission.t_hover: generic_data["sizing_mission"]["t_{hover}"],#hover time
SizingMission.T_A: c["T/A"],#disk loading
SizingMission.passengers.N_passengers: generic_data["sizing_mission"]["N_{passengers}"],#Number of passengers
})
RevenueMission = OnDemandRevenueMission(Aircraft,mission_type=generic_data["revenue_mission"]["type"])
problem_subDict.update({
RevenueMission.mission_range: revenue_mission_range_array[i],#mission range
RevenueMission.V_cruise: c["V_{cruise}"],#cruising speed
RevenueMission.t_hover: generic_data["revenue_mission"]["t_{hover}"],#hover time
RevenueMission.passengers.N_passengers: generic_data["revenue_mission"]["N_{passengers}"],#Number of passengers
RevenueMission.time_on_ground.charger_power: generic_data["charger_power"], #Charger power
})
DeadheadMission = OnDemandDeadheadMission(Aircraft,mission_type=generic_data["deadhead_mission"]["type"])
problem_subDict.update({
DeadheadMission.mission_range: deadhead_mission_range_array[i],#mission range
DeadheadMission.V_cruise: c["V_{cruise}"],#cruising speed
DeadheadMission.t_hover: generic_data["deadhead_mission"]["t_{hover}"],#hover time
DeadheadMission.passengers.N_passengers: generic_data["deadhead_mission"]["N_{passengers}"],#Number of passengers
DeadheadMission.time_on_ground.charger_power: generic_data["charger_power"], #Charger power
})
MissionCost = OnDemandMissionCost(Aircraft,RevenueMission,DeadheadMission)
problem_subDict.update({
MissionCost.revenue_mission_costs.operating_expenses.pilot_cost.wrap_rate: generic_data["pilot_wrap_rate"],#pilot wrap rate
MissionCost.revenue_mission_costs.operating_expenses.maintenance_cost.wrap_rate: generic_data["mechanic_wrap_rate"], #mechanic wrap rate
MissionCost.revenue_mission_costs.operating_expenses.maintenance_cost.MMH_FH: generic_data["MMH_FH"], #maintenance man-hours per flight hour
MissionCost.deadhead_mission_costs.operating_expenses.pilot_cost.wrap_rate: generic_data["pilot_wrap_rate"],#pilot wrap rate
MissionCost.deadhead_mission_costs.operating_expenses.maintenance_cost.wrap_rate: generic_data["mechanic_wrap_rate"], #mechanic wrap rate
MissionCost.deadhead_mission_costs.operating_expenses.maintenance_cost.MMH_FH: generic_data["MMH_FH"], #maintenance man-hours per flight hour
MissionCost.deadhead_ratio: generic_data["deadhead_ratio"], #deadhead ratio
})
problem = Model(MissionCost["cost_per_trip"],
[Aircraft, SizingMission, RevenueMission, DeadheadMission, MissionCost])
problem.substitutions.update(problem_subDict)
solution = problem.solve(verbosity=0)
configs[config][case]["solution"] = solution
configs[config][case]["TOGW"] = solution("TOGW_OnDemandAircraft")
configs[config][case]["W_{battery}"] = solution("W_OnDemandAircraft/Battery")
configs[config][case]["cost_per_trip_per_passenger"] = solution("cost_per_trip_per_passenger_OnDemandMissionCost")
#Noise computations (sizing mission)
T_perRotor = solution("T_perRotor_OnDemandSizingMission")[0]
Q_perRotor = solution("Q_perRotor_OnDemandSizingMission")[0]
R = solution("R")
VT = solution("VT_OnDemandSizingMission")[0]
s = solution("s")
Cl_mean = solution("Cl_{mean_{max}}")
N = solution("N")
B = generic_data["B"]
delta_S = generic_data["delta_S"]
#A-weighted
f_peak, SPL, spectrum = vortex_noise(T_perRotor=T_perRotor,R=R,VT=VT,s=s,
Cl_mean=Cl_mean,N=N,B=B,delta_S=delta_S,h=0*ureg.ft,t_c=0.12,St=0.28,
weighting="A")
configs[config][case]["SPL_sizing_A"] = SPL
configs[config][case]["f_{peak}"] = f_peak
configs[config][case]["spectrum_sizing_A"] = spectrum
#Noise computations (revenue mission)
T_perRotor = solution("T_perRotor_OnDemandRevenueMission")[0]
Q_perRotor = solution("Q_perRotor_OnDemandRevenueMission")[0]
R = solution("R")
VT = solution("VT_OnDemandRevenueMission")[0]
s = solution("s")
Cl_mean = solution("Cl_{mean_{max}}")
N = solution("N")
B = generic_data["B"]
delta_S = generic_data["delta_S"]
#A-weighted
f_peak, SPL, spectrum = vortex_noise(T_perRotor=T_perRotor,R=R,VT=VT,s=s,
Cl_mean=Cl_mean,N=N,B=B,delta_S=delta_S,h=0*ureg.ft,t_c=0.12,St=0.28,
weighting="A")
configs[config][case]["SPL_revenue_A"] = SPL
configs[config][case]["f_{peak}"] = f_peak
configs[config][case]["spectrum_revenue_A"] = spectrum
# Plotting commands
plt.ion()
fig1 = plt.figure(figsize=(12,12), dpi=80)
plt.rc('axes', axisbelow=True)
plt.show()
y_pos = np.arange(len(configs))
labels = [""]*len(configs)
for i, config in enumerate(configs):
labels[i] = config
xmin = np.min(y_pos) - 0.7
xmax = np.max(y_pos) + 0.7
offset_array = [-0.3,0,0.3]
width = 0.2
colors = ["grey", "w", "k"]
legend_labels = [""]*np.size(sizing_mission_range_array)
for i,val in enumerate(legend_labels):
legend_labels[i] = "%0.0f nmi sizing mission; %0.0f nmi revenue mission" \
% (sizing_mission_range_array[i].to(ureg.nautical_mile).magnitude,
revenue_mission_range_array[i].to(ureg.nautical_mile).magnitude)
if revenue_mission_range_array[i] != deadhead_mission_range_array[i]:
raise ValueError("Revenue and deadhead missions must be the same length.")
#Takeoff gross weight
plt.subplot(2,2,1)
for i,config in enumerate(configs):
for j,case in enumerate(configs[config]):
c = configs[config][case]
offset = offset_array[j]
TOGW = c["TOGW"].to(ureg.lbf).magnitude
if (i == 0):
label = legend_labels[j]
plt.bar(i+offset,TOGW,align='center',alpha=1,width=width,color=colors[j],
edgecolor='k',label=label)
else:
plt.bar(i+offset,TOGW,align='center',alpha=1,width=width,color=colors[j],
edgecolor='k')
plt.grid()
plt.xticks(y_pos, labels, rotation=-45, fontsize=12)
plt.yticks(fontsize=12)
plt.ylabel('Weight (lbf)', fontsize = 16)
plt.xlim(xmin=xmin,xmax=xmax)
[ymin,ymax] = plt.gca().get_ylim()
plt.ylim(ymax = 1.3*ymax)
plt.title("Takeoff Gross Weight",fontsize = 18)
plt.legend(loc='upper left',framealpha=1, fontsize = 12)
#Trip cost per passenger
plt.subplot(2,2,2)
for i,config in enumerate(configs):
for j,case in enumerate(configs[config]):
c = configs[config][case]
offset = offset_array[j]
cptpp = c["cost_per_trip_per_passenger"]
if (i == 0):
label = legend_labels[j]
plt.bar(i+offset,cptpp,align='center',alpha=1,width=width,color=colors[j],
edgecolor='k',label=label)
else:
plt.bar(i+offset,cptpp,align='center',alpha=1,width=width,color=colors[j],
edgecolor='k')
plt.grid()
plt.xlim(xmin=xmin,xmax=xmax)
[ymin,ymax] = plt.gca().get_ylim()
plt.ylim(ymax = 1.3*ymax)
plt.xticks(y_pos, labels, rotation=-45, fontsize=12)
plt.yticks(fontsize=12)
plt.ylabel('Cost ($US)', fontsize = 16)
plt.title("Cost per Trip, per Passenger",fontsize = 18)
plt.legend(loc='upper left',framealpha=1, fontsize = 12)
#Sound pressure level (in hover, sizing mission)
plt.subplot(2,2,3)
for i,config in enumerate(configs):
for j,case in enumerate(configs[config]):
c = configs[config][case]
offset = offset_array[j]
SPL_sizing = c["SPL_sizing_A"]
if (i == 0):
label = legend_labels[j]
plt.bar(i+offset,SPL_sizing,align='center',alpha=1,width=width,color=colors[j],
edgecolor='k',label=label)
else:
plt.bar(i+offset,SPL_sizing,align='center',alpha=1,width=width,color=colors[j],
edgecolor='k')
SPL_req = 62
plt.plot([np.min(y_pos)-1,np.max(y_pos)+1],[SPL_req, SPL_req],
color="black", linewidth=3, linestyle="-")
plt.grid()
plt.xlim(xmin=xmin,xmax=xmax)
[ymin,ymax] = plt.gca().get_ylim()
plt.ylim(ymin = 57,ymax = ymax + 1)
plt.xticks(y_pos, labels, rotation=-45, fontsize=12)
plt.yticks(fontsize=12)
plt.ylabel('SPL (dBA)', fontsize = 16)
plt.title("Sound Pressure Level (sizing mission)",fontsize = 18)
plt.legend(loc='upper left',framealpha=1, fontsize = 12)
#Sound pressure level (in hover, revenue mission)
plt.subplot(2,2,4)
for i,config in enumerate(configs):
for j,case in enumerate(configs[config]):
c = configs[config][case]
offset = offset_array[j]
SPL_revenue = c["SPL_revenue_A"]
if (i == 0):
label = legend_labels[j]
plt.bar(i+offset,SPL_revenue,align='center',alpha=1,width=width,color=colors[j],
edgecolor='k',label=label)
else:
plt.bar(i+offset,SPL_revenue,align='center',alpha=1,width=width,color=colors[j],
edgecolor='k')
SPL_req = 62
plt.plot([np.min(y_pos)-1,np.max(y_pos)+1],[SPL_req, SPL_req],
color="black", linewidth=3, linestyle="-")
plt.grid()
plt.xlim(xmin=xmin,xmax=xmax)
[ymin,ymax] = plt.gca().get_ylim()
plt.ylim(ymin = 57,ymax = ymax + 1)
plt.xticks(y_pos, labels, rotation=-45, fontsize=12)
plt.yticks(fontsize=12)
plt.ylabel('SPL (dBA)', fontsize = 16)
plt.title("Sound Pressure Level (revenue mission)",fontsize = 18)
plt.legend(loc='upper left',framealpha=1, fontsize = 12)
if generic_data["reserve_type"] == "FAA_aircraft" or generic_data["reserve_type"] == "FAA_heli":
num = solution("t_{loiter}_OnDemandSizingMission").to(ureg.minute).magnitude
if generic_data["reserve_type"] == "FAA_aircraft":
reserve_type_string = "FAA aircraft VFR (%0.0f-minute loiter time)" % num
elif generic_data["reserve_type"] == "FAA_heli":
reserve_type_string = "FAA helicopter VFR (%0.0f-minute loiter time)" % num
elif generic_data["reserve_type"] == "Uber":
num = solution["constants"]["R_{divert}_OnDemandSizingMission"].to(ureg.nautical_mile).magnitude
reserve_type_string = " (%0.0f-nm diversion distance)" % num
if generic_data["autonomousEnabled"]:
autonomy_string = "autonomy enabled"
else:
autonomy_string = "pilot required"
title_str = "Aircraft parameters: battery energy density = %0.0f Wh/kg; %0.0f rotor blades; %s\n" \
% (generic_data["C_m"].to(ureg.Wh/ureg.kg).magnitude, B, autonomy_string) \
+ "Sizing mission (%s): %0.0f passengers; %0.0fs hover time; reserve type = " \
% (generic_data["sizing_mission"]["type"],\
generic_data["sizing_mission"]["N_{passengers}"], generic_data["sizing_mission"]["t_{hover}"].to(ureg.s).magnitude)\
+ reserve_type_string + "\n"\
+ "Revenue mission (%s): %0.1f passengers; %0.0fs hover time; no reserve; charger power = %0.0f kW\n" \
% (generic_data["revenue_mission"]["type"],\
generic_data["revenue_mission"]["N_{passengers}"], generic_data["revenue_mission"]["t_{hover}"].to(ureg.s).magnitude,\
generic_data["charger_power"].to(ureg.kW).magnitude) \
+ "Deadhead mission (%s): %0.1f passengers; %0.0fs hover time; no reserve; deadhead ratio = %0.1f" \
% (generic_data["deadhead_mission"]["type"], \
generic_data["deadhead_mission"]["N_{passengers}"], generic_data["deadhead_mission"]["t_{hover}"].to(ureg.s).magnitude,\
generic_data["deadhead_ratio"])
plt.suptitle(title_str,fontsize = 14.0)
plt.tight_layout()
plt.subplots_adjust(left=0.08,right=0.98,bottom=0.10,top=0.87)
plt.savefig('new_york_heli_plot_01.pdf')
|
StarcoderdataPython
|
3421393
|
<reponame>beryldb/python-beryl
# BerylDB - A modular database.
# http://www.beryldb.com
#
# Copyright (C) 2021 <NAME> <<EMAIL>>
#
# This file is part of BerylDB. BerylDB is free software: you can
# redistribute it and/or modify it under the terms of the BSD License
# version 3.
#
# More information about our licensing can be found at https://docs.beryl.dev
from .client import Client
|
StarcoderdataPython
|
12864618
|
<filename>scraper/engine.py
import sys
import csv
import requests
from parsel import Selector
from scraper.parser import get_features_from_item
start_url = 'http://www.world-art.ru/animation/rating_top.php'
SIGN_STDOUT = '-'
FORMAT_CSV = 'csv'
FORMAT_JL = 'jl'
def parse(url: str, out_path: str, out_format: str):
"""
gets link and returns the response
"""
response = requests.get(url)
assert response.status_code == 200, f'bad status code: {response.status_code}'
response_html = Selector(response.text)
links_to_films = response_html.xpath('//td[@class="review"]/a[@class="review"]/@href').getall()
out_file = sys.stdout if out_path == SIGN_STDOUT else open(out_path, 'w', buffering=1, newline='')
for link in links_to_films:
item_response = requests.get(link)
assert response.status_code == 200, f'bad status code: {item_response.status_code}'
item = get_features_from_item(item_response)
if out_format == FORMAT_CSV:
item_writer = csv.writer(out_file, delimiter=' ', quotechar=',', quoting=csv.QUOTE_MINIMAL)
item_writer.writerow(item.values())
out_file.close()
return
|
StarcoderdataPython
|
8100385
|
<filename>src/commands/generators/lib.py
import os, json, sys
def load(file):
with open(file, 'r') as fi:
data = fi.read()
return json.loads(data)
def makeScript(name,content):
with open(name, 'w') as fi:
data = fi.write(content)
return data
def plural(word):
return word + "s"
|
StarcoderdataPython
|
6459864
|
"""
Test of Summary tables. This has many test cases, so to keep files smaller, it's split into two
files: test_summary.py and test_summary2.py.
"""
import actions
import logger
import objtypes
import test_engine
import test_summary
from test_engine import Table, Column, View, Section, Field
log = logger.Logger(__name__, logger.INFO)
class TestSummary2(test_engine.EngineTestCase):
sample = test_summary.TestSummary.sample
starting_table = test_summary.TestSummary.starting_table
starting_table_data = test_summary.TestSummary.starting_table_data
def test_add_summary_formula(self):
# Verify that we can add a summary formula; that new sections automatically get columns
# matching the source table, and not other columns. Check that group-by columns override
# formula columns (if there are any by the same name).
# Start as in test_change_summary_formula() test case; see there for what tables and columns
# we expect to have at this point.
self.load_sample(self.sample)
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11,12]])
self.apply_user_action(["CreateViewSection", 1, 0, "record", []])
# Check that we cannot add a non-formula column.
with self.assertRaisesRegex(ValueError, r'non-formula column'):
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "average",
{"type": "Text", "isFormula": False}])
# Add two formula columns: one for 'state' (an existing column name, and a group-by column in
# some tables), and one for 'average' (a new column name).
self.apply_user_action(["AddColumn", "GristSummary_7_Address2", "state",
{"formula": "':'.join(sorted(set($group.state)))"}])
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "average",
{"formula": "$amount / $count"}])
# Add two more summary tables: by 'city', and by 'state', and see what columns they get.
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11]])
self.apply_user_action(["CreateViewSection", 1, 0, "record", [12]])
# And also a summary table for an existing breakdown.
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11,12]])
# Check the table and columns for all the summary tables.
self.assertTables([
self.starting_table,
Table(2, "GristSummary_7_Address", 0, 1, columns=[
Column(14, "city", "Text", False, "", 11),
Column(15, "state", "Text", False, "", 12),
Column(16, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(17, "count", "Int", True, "len($group)", 0),
Column(18, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(23, "average", "Any", True, "$amount / $count", 0),
]),
Table(3, "GristSummary_7_Address2", 0, 1, columns=[
Column(19, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(20, "count", "Int", True, "len($group)", 0),
Column(21, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(22, "state", "Any", True, "':'.join(sorted(set($group.state)))", 0),
]),
Table(4, "GristSummary_7_Address3", 0, 1, columns=[
Column(24, "city", "Text", False, "", 11),
Column(25, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(26, "count", "Int", True, "len($group)", 0),
Column(27, "state", "Any", True, "':'.join(sorted(set($group.state)))", 0),
Column(28, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
# Note that since 'state' is used as a group-by column here, we skip the 'state' formula.
Table(5, "GristSummary_7_Address4", 0, 1, columns=[
Column(29, "state", "Text", False, "", 12),
Column(30, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(31, "count", "Int", True, "len($group)", 0),
Column(32, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
])
# We should now have two sections for table 2 (the one with two group-by fields).
self.assertTableData('_grist_Views_section', cols="subset", data=[
["id", "parentId", "tableRef"],
[1, 1, 2],
[5, 5, 2],
], rows=lambda r: r.tableRef.id == 2)
self.assertTableData('_grist_Views_section_field', cols="subset", data=[
["id", "parentId", "colRef"],
[1, 1, 14],
[2, 1, 15],
[3, 1, 17],
[4, 1, 18],
[8, 1, 23],
[16, 5, 14],
[17, 5, 15],
[18, 5, 17],
[19, 5, 18], # new section doesn't automatically get 'average' column
], rows=lambda r: r.parentId.id in {1,5})
# Check that the data is as we expect.
self.assertTableData('GristSummary_7_Address', cols="all", data=[
[ "id", "city", "state", "group", "count", "amount", "average" ],
[ 1, "New York", "NY" , [21,26,31],3, 1.+6+11 , (1.+6+11)/3 ],
[ 2, "Albany", "NY" , [22], 1, 2. , 2. ],
[ 3, "Seattle", "WA" , [23], 1, 3. , 3. ],
[ 4, "Chicago", "IL" , [24], 1, 4. , 4. ],
[ 5, "Bedford", "MA" , [25], 1, 5. , 5. ],
[ 6, "Buffalo", "NY" , [27], 1, 7. , 7. ],
[ 7, "Bedford", "NY" , [28], 1, 8. , 8. ],
[ 8, "Boston", "MA" , [29], 1, 9. , 9. ],
[ 9, "Yonkers", "NY" , [30], 1, 10. , 10. ],
])
self.assertTableData('GristSummary_7_Address2', cols="all", data=[
[ "id", "count", "amount", "state" , "group" ],
[ 1, 11, 66.0 , "IL:MA:NY:WA" , [21,22,23,24,25,26,27,28,29,30,31]],
])
self.assertTableData('GristSummary_7_Address3', cols="subset", data=[
[ "id", "city", "count", "amount", "state" ],
[ 1, "<NAME>", 3, 1.+6+11 , "NY" ],
[ 2, "Albany", 1, 2. , "NY" ],
[ 3, "Seattle", 1, 3. , "WA" ],
[ 4, "Chicago", 1, 4. , "IL" ],
[ 5, "Bedford", 2, 5.+8 , "MA:NY"],
[ 6, "Buffalo", 1, 7. , "NY" ],
[ 7, "Boston", 1, 9. , "MA" ],
[ 8, "Yonkers", 1, 10. , "NY" ],
])
self.assertTableData('GristSummary_7_Address4', cols="subset", data=[
[ "id", "state", "count", "amount" ],
[ 1, "NY", 7, 1.+2+6+7+8+10+11 ],
[ 2, "WA", 1, 3. ],
[ 3, "IL", 1, 4. ],
[ 4, "MA", 2, 5.+9 ],
])
# Modify a value, and check that various tables got updated correctly.
out_actions = self.update_record("Address", 28, state="MA")
self.assertPartialOutActions(out_actions, {
"stored": [
actions.UpdateRecord("Address", 28, {'state': 'MA'}),
actions.BulkUpdateRecord("GristSummary_7_Address", [5,7], {'amount': [5.0 + 8.0, 0.0]}),
actions.BulkUpdateRecord("GristSummary_7_Address", [5,7],
{'average': [6.5, objtypes.RaisedException(ZeroDivisionError())]}),
actions.BulkUpdateRecord("GristSummary_7_Address", [5,7], {'count': [2, 0]}),
actions.BulkUpdateRecord("GristSummary_7_Address", [5,7], {'group': [[25, 28], []]}),
actions.UpdateRecord("GristSummary_7_Address3", 5, {'state': "MA"}),
actions.BulkUpdateRecord("GristSummary_7_Address4", [1,4],
{'amount': [1.+2+6+7+10+11, 5.+8+9]}),
actions.BulkUpdateRecord("GristSummary_7_Address4", [1,4], {'count': [6, 3]}),
actions.BulkUpdateRecord("GristSummary_7_Address4", [1,4],
{'group': [[21,22,26,27,30,31], [25,28,29]]}),
]
})
#----------------------------------------------------------------------
def test_summary_col_rename(self):
# Verify that renaming a column in a source table causes appropriate renames in the summary
# tables, and that renames of group-by columns in summary tables are disallowed.
# Start as in test_change_summary_formula() test case; see there for what tables and columns
# we expect to have at this point.
self.load_sample(self.sample)
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11,12]])
self.apply_user_action(["CreateViewSection", 1, 0, "record", []])
# Check that we cannot rename a summary group-by column. (Perhaps it's better to raise an
# exception, but currently we translate the invalid request to a no-op.)
with self.assertRaisesRegex(ValueError, r'Cannot modify .* group-by'):
self.apply_user_action(["RenameColumn", "GristSummary_7_Address", "state", "s"])
# Verify all data. We'll repeat this after renamings to make sure there are no errors.
self.assertTableData("Address", self.starting_table_data)
self.assertTableData('GristSummary_7_Address', cols="all", data=[
[ "id", "city", "state", "group", "count", "amount" ],
[ 1, "New York", "NY" , [21,26,31],3, 1.+6+11 ],
[ 2, "Albany", "NY" , [22], 1, 2. ],
[ 3, "Seattle", "WA" , [23], 1, 3. ],
[ 4, "Chicago", "IL" , [24], 1, 4. ],
[ 5, "Bedford", "MA" , [25], 1, 5. ],
[ 6, "Buffalo", "NY" , [27], 1, 7. ],
[ 7, "Bedford", "NY" , [28], 1, 8. ],
[ 8, "Boston", "MA" , [29], 1, 9. ],
[ 9, "Yonkers", "NY" , [30], 1, 10. ],
])
self.assertTableData('GristSummary_7_Address2', cols="all", data=[
[ "id", "count", "amount", "group" ],
[ 1, 11, 66.0 , [21,22,23,24,25,26,27,28,29,30,31]],
])
# This should work fine, and should affect sister tables.
self.apply_user_action(["RenameColumn", "GristSummary_7_Address", "count", "xcount"])
# These are the tables and columns we automatically get.
self.assertTables([
self.starting_table,
Table(2, "GristSummary_7_Address", 0, 1, columns=[
Column(14, "city", "Text", False, "", 11),
Column(15, "state", "Text", False, "", 12),
Column(16, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(17, "xcount", "Int", True, "len($group)", 0),
Column(18, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
Table(3, "GristSummary_7_Address2", 0, 1, columns=[
Column(19, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(20, "xcount", "Int", True, "len($group)", 0),
Column(21, "amount", "Numeric", True, "SUM($group.amount)", 0),
])
])
# Check that renames in the source table translate to renames in the summary table.
self.apply_user_action(["RenameColumn", "Address", "state", "xstate"])
self.apply_user_action(["RenameColumn", "Address", "amount", "xamount"])
self.assertTables([
Table(1, "Address", primaryViewId=0, summarySourceTable=0, columns=[
Column(11, "city", "Text", False, "", 0),
Column(12, "xstate", "Text", False, "", 0),
Column(13, "xamount", "Numeric", False, "", 0),
]),
Table(2, "GristSummary_7_Address", 0, 1, columns=[
Column(14, "city", "Text", False, "", 11),
Column(15, "xstate", "Text", False, "", 12),
Column(16, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(17, "xcount", "Int", True, "len($group)", 0),
Column(18, "xamount", "Numeric", True, "SUM($group.xamount)", 0),
]),
Table(3, "GristSummary_7_Address2", 0, 1, columns=[
Column(19, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(20, "xcount", "Int", True, "len($group)", 0),
Column(21, "xamount", "Numeric", True, "SUM($group.xamount)", 0),
])
])
def replace_col_names(data, **col_renames):
return [[col_renames.get(c, c) for c in data[0]]] + data[1:]
# Verify actual data to make sure we don't have formula errors.
address_table_data = replace_col_names(
self.starting_table_data, state='xstate', amount='xamount')
self.assertTableData("Address", address_table_data)
self.assertTableData('GristSummary_7_Address', cols="all", data=[
[ "id", "city", "xstate", "group", "xcount", "xamount" ],
[ 1, "<NAME>", "NY" , [21,26,31],3, 1.+6+11 ],
[ 2, "Albany", "NY" , [22], 1, 2. ],
[ 3, "Seattle", "WA" , [23], 1, 3. ],
[ 4, "Chicago", "IL" , [24], 1, 4. ],
[ 5, "Bedford", "MA" , [25], 1, 5. ],
[ 6, "Buffalo", "NY" , [27], 1, 7. ],
[ 7, "Bedford", "NY" , [28], 1, 8. ],
[ 8, "Boston", "MA" , [29], 1, 9. ],
[ 9, "Yonkers", "NY" , [30], 1, 10. ],
])
self.assertTableData('GristSummary_7_Address2', cols="all", data=[
[ "id", "xcount", "xamount", "group" ],
[ 1, 11, 66.0 , [21,22,23,24,25,26,27,28,29,30,31]],
])
# Add a conflicting name to a summary table and see how renames behave.
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "foo",
{"formula": "$xamount * 100"}])
self.apply_user_action(["RenameColumn", "Address", "xstate", "foo"])
self.apply_user_action(["RenameColumn", "Address", "xamount", "foo"])
self.apply_user_action(["RenameColumn", "Address", "city", "city"])
self.assertTables([
Table(1, "Address", primaryViewId=0, summarySourceTable=0, columns=[
Column(11, "city", "Text", False, "", 0),
Column(12, "foo2", "Text", False, "", 0),
Column(13, "foo3", "Numeric", False, "", 0),
]),
Table(2, "GristSummary_7_Address", 0, 1, columns=[
Column(14, "city", "Text", False, "", 11),
Column(15, "foo2", "Text", False, "", 12),
Column(16, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(17, "xcount", "Int", True, "len($group)", 0),
Column(18, "foo3", "Numeric", True, "SUM($group.foo3)", 0),
Column(22, "foo", "Any", True, "$foo3 * 100", 0),
]),
Table(3, "GristSummary_7_Address2", 0, 1, columns=[
Column(19, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(20, "xcount", "Int", True, "len($group)", 0),
Column(21, "foo3", "Numeric", True, "SUM($group.foo3)", 0),
])
])
# Verify actual data again to make sure we don't have formula errors.
address_table_data = replace_col_names(
address_table_data, xstate='foo2', xamount='foo3')
self.assertTableData("Address", address_table_data)
self.assertTableData('GristSummary_7_Address', cols="all", data=[
[ "id", "city", "foo2" , "group", "xcount", "foo3", "foo" ],
[ 1, "New York", "NY" , [21,26,31],3, 1.+6+11, 100*(1.+6+11) ],
[ 2, "Albany", "NY" , [22], 1, 2. , 100*(2.) ],
[ 3, "Seattle", "WA" , [23], 1, 3. , 100*(3.) ],
[ 4, "Chicago", "IL" , [24], 1, 4. , 100*(4.) ],
[ 5, "Bedford", "MA" , [25], 1, 5. , 100*(5.) ],
[ 6, "Buffalo", "NY" , [27], 1, 7. , 100*(7.) ],
[ 7, "Bedford", "NY" , [28], 1, 8. , 100*(8.) ],
[ 8, "Boston", "MA" , [29], 1, 9. , 100*(9.) ],
[ 9, "Yonkers", "NY" , [30], 1, 10. , 100*(10.) ],
])
self.assertTableData('GristSummary_7_Address2', cols="all", data=[
[ "id", "xcount", "foo3" , "group" ],
[ 1, 11, 66.0 , [21,22,23,24,25,26,27,28,29,30,31]],
])
# Check that update to widgetOptions in source table affects group-by columns and not formula
# columns. (Same should be true for type, but not tested here.)
self.apply_user_action(["ModifyColumn", "Address", "foo2", {"widgetOptions": "hello"}])
self.apply_user_action(["ModifyColumn", "Address", "foo3", {"widgetOptions": "world"}])
self.assertTableData('_grist_Tables_column', cols="subset", data=[
['id', 'colId', 'isFormula', 'widgetOptions'],
[12, 'foo2', False, 'hello'],
[13, 'foo3', False, 'world'],
[15, 'foo2', False, 'hello'],
[18, 'foo3', True, 'WidgetOptions2'],
[21, 'foo3', True, 'WidgetOptions2'],
], rows=lambda r: r.colId in ('foo2', 'foo3'))
#----------------------------------------------------------------------
def test_restrictions(self):
# Verify various restrictions on summary tables
# (1) no adding/removing/renaming non-formula columns.
# (2) no converting between formula/non-formula
# (3) no editing values in non-formula columns
# (4) no removing rows (this is questionable b/c empty rows might be OK to remove)
# (5) no renaming summary tables.
self.load_sample(self.sample)
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11,12]])
self.apply_user_action(["CreateViewSection", 1, 0, "record", []])
self.assertTableData('GristSummary_7_Address', cols="all", data=[
[ "id", "city", "state", "group", "count", "amount" ],
[ 1, "<NAME>", "NY" , [21,26,31],3, 1.+6+11 ],
[ 2, "Albany", "NY" , [22], 1, 2. ],
[ 3, "Seattle", "WA" , [23], 1, 3. ],
[ 4, "Chicago", "IL" , [24], 1, 4. ],
[ 5, "Bedford", "MA" , [25], 1, 5. ],
[ 6, "Buffalo", "NY" , [27], 1, 7. ],
[ 7, "Bedford", "NY" , [28], 1, 8. ],
[ 8, "Boston", "MA" , [29], 1, 9. ],
[ 9, "Yonkers", "NY" , [30], 1, 10. ],
])
# (1) no adding/removing/renaming non-formula columns.
with self.assertRaisesRegex(ValueError, r'non-formula column'):
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "foo",
{"type": "Numeric", "isFormula": False}])
with self.assertRaisesRegex(ValueError, r'group-by column'):
self.apply_user_action(["RemoveColumn", "GristSummary_7_Address", "state"])
with self.assertRaisesRegex(ValueError, r'Cannot modify .* group-by'):
self.apply_user_action(["RenameColumn", "GristSummary_7_Address", "state", "st"])
# (2) no converting between formula/non-formula
with self.assertRaisesRegex(ValueError, r'Cannot change .* formula and data'):
self.apply_user_action(["ModifyColumn", "GristSummary_7_Address", "amount",
{"isFormula": False}])
with self.assertRaisesRegex(ValueError, r'Cannot change .* formula and data'):
self.apply_user_action(["ModifyColumn", "GristSummary_7_Address", "state",
{"isFormula": True}])
# (3) no editing values in non-formula columns
with self.assertRaisesRegex(ValueError, r'Cannot enter data .* group-by'):
self.apply_user_action(["UpdateRecord", "GristSummary_7_Address", 6, {"state": "ny"}])
# (4) no removing rows (this is questionable b/c empty rows might be OK to remove)
with self.assertRaisesRegex(ValueError, r'Cannot remove record .* summary'):
self.apply_user_action(["RemoveRecord", "GristSummary_7_Address", 6])
# (5) no renaming summary tables.
with self.assertRaisesRegex(ValueError, r'cannot rename .* summary'):
self.apply_user_action(["RenameTable", "GristSummary_7_Address", "GristSummary_hello"])
# Check that we can add an empty column, then set a formula for it.
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "foo", {}])
self.apply_user_action(["ModifyColumn", "GristSummary_7_Address", "foo", {"formula": "1+1"}])
with self.assertRaisesRegex(ValueError, "Can't save .* to formula"):
self.apply_user_action(["UpdateRecord", "GristSummary_7_Address", 1, {"foo": "hello"}])
# But we cannot add an empty column, then add a value to it.
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "foo2", {}])
with self.assertRaisesRegex(ValueError, r'Cannot change .* between formula and data'):
self.apply_user_action(["UpdateRecord", "GristSummary_7_Address", 1, {"foo2": "hello"}])
self.assertTableData('GristSummary_7_Address', cols="all", data=[
[ "id", "city", "state", "group", "count", "amount", "foo", "foo2" ],
[ 1, "New York", "NY" , [21,26,31],3, 1.+6+11 , 2 , None ],
[ 2, "Albany", "NY" , [22], 1, 2. , 2 , None ],
[ 3, "Seattle", "WA" , [23], 1, 3. , 2 , None ],
[ 4, "Chicago", "IL" , [24], 1, 4. , 2 , None ],
[ 5, "Bedford", "MA" , [25], 1, 5. , 2 , None ],
[ 6, "Buffalo", "NY" , [27], 1, 7. , 2 , None ],
[ 7, "Bedford", "NY" , [28], 1, 8. , 2 , None ],
[ 8, "Boston", "MA" , [29], 1, 9. , 2 , None ],
[ 9, "Yonkers", "NY" , [30], 1, 10. , 2 , None ],
])
#----------------------------------------------------------------------
def test_update_summary_section(self):
# Verify that we can change the group-by for a view section, and that unused tables get
# removed.
def get_helper_cols(table_id):
return [c for c in self.engine.tables[table_id].all_columns if c.startswith('#summary#')]
self.load_sample(self.sample)
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11,12]])
# We should have a single summary table, and a single section referring to it.
self.assertTables([
self.starting_table,
Table(2, "GristSummary_7_Address", 0, 1, columns=[
Column(14, "city", "Text", False, "", 11),
Column(15, "state", "Text", False, "", 12),
Column(16, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(17, "count", "Int", True, "len($group)", 0),
Column(18, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=2, fields=[
Field(1, colRef=14),
Field(2, colRef=15),
Field(3, colRef=17),
Field(4, colRef=18),
])
])])
self.assertEqual(get_helper_cols('Address'), ['#summary#GristSummary_7_Address'])
# Verify more fields of some of the new column objects.
self.assertTableData('_grist_Tables_column', rows="subset", cols="subset", data=[
['id', 'colId', 'type', 'formula', 'widgetOptions', 'label'],
[14, 'city', 'Text', '', '', 'City'],
[15, 'state', 'Text', '', 'WidgetOptions1', 'State'],
[18, 'amount', 'Numeric', 'SUM($group.amount)', 'WidgetOptions2', 'Amount'],
])
# Now change the group-by to just one of the columns ('state')
self.apply_user_action(["UpdateSummaryViewSection", 1, [12]])
self.assertTables([
self.starting_table,
# Note that Table #2 is gone at this point, since it's unused.
Table(3, "GristSummary_7_Address2", 0, 1, columns=[
Column(19, "state", "Text", False, "", 12),
Column(20, "count", "Int", True, "len($group)", 0),
Column(21, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(22, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=3, fields=[
Field(2, colRef=19),
Field(3, colRef=20),
Field(4, colRef=21),
])
])])
self.assertTableData('GristSummary_7_Address2', cols="subset", data=[
[ "id", "state", "count", "amount" ],
[ 1, "NY", 7, 1.+2+6+7+8+10+11 ],
[ 2, "WA", 1, 3. ],
[ 3, "IL", 1, 4. ],
[ 4, "MA", 2, 5.+9 ],
])
self.assertEqual(get_helper_cols('Address'), ['#summary#GristSummary_7_Address2'])
# Verify more fields of some of the new column objects.
self.assertTableData('_grist_Tables_column', rows="subset", cols="subset", data=[
['id', 'colId', 'type', 'formula', 'widgetOptions', 'label'],
[19, 'state', 'Text', '', 'WidgetOptions1', 'State'],
[21, 'amount', 'Numeric', 'SUM($group.amount)', 'WidgetOptions2', 'Amount'],
])
# Change group-by to a different single column ('city')
self.apply_user_action(["UpdateSummaryViewSection", 1, [11]])
self.assertTables([
self.starting_table,
# Note that Table #3 is gone at this point, since it's unused.
Table(4, "GristSummary_7_Address", 0, 1, columns=[
Column(23, "city", "Text", False, "", 11),
Column(24, "count", "Int", True, "len($group)", 0),
Column(25, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(26, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=4, fields=[
Field(5, colRef=23),
Field(3, colRef=24),
Field(4, colRef=25),
])
])])
self.assertTableData('GristSummary_7_Address', cols="subset", data=[
[ "id", "city", "count", "amount" ],
[ 1, "New York", 3, 1.+6+11 ],
[ 2, "Albany", 1, 2. ],
[ 3, "Seattle", 1, 3. ],
[ 4, "Chicago", 1, 4. ],
[ 5, "Bedford", 2, 5.+8 ],
[ 6, "Buffalo", 1, 7. ],
[ 7, "Boston", 1, 9. ],
[ 8, "Yonkers", 1, 10. ],
])
self.assertEqual(get_helper_cols('Address'), ['#summary#GristSummary_7_Address'])
# Verify more fields of some of the new column objects.
self.assertTableData('_grist_Tables_column', rows="subset", cols="subset", data=[
['id', 'colId', 'type', 'formula', 'widgetOptions', 'label'],
[23, 'city', 'Text', '', '', 'City'],
[25, 'amount', 'Numeric', 'SUM($group.amount)', 'WidgetOptions2', 'Amount'],
])
# Change group-by to no columns (totals)
self.apply_user_action(["UpdateSummaryViewSection", 1, []])
self.assertTables([
self.starting_table,
# Note that Table #4 is gone at this point, since it's unused.
Table(5, "GristSummary_7_Address2", 0, 1, columns=[
Column(27, "count", "Int", True, "len($group)", 0),
Column(28, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(29, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=5, fields=[
Field(3, colRef=27),
Field(4, colRef=28),
])
])])
self.assertTableData('GristSummary_7_Address2', cols="subset", data=[
[ "id", "count", "amount"],
[ 1, 11, 66.0 ],
])
self.assertEqual(get_helper_cols('Address'), ['#summary#GristSummary_7_Address2'])
# Back to full circle, but with group-by columns differently arranged.
self.apply_user_action(["UpdateSummaryViewSection", 1, [12,11]])
self.assertTables([
self.starting_table,
# Note that Table #5 is gone at this point, since it's unused.
Table(6, "GristSummary_7_Address", 0, 1, columns=[
Column(30, "state", "Text", False, "", 12),
Column(31, "city", "Text", False, "", 11),
Column(32, "count", "Int", True, "len($group)", 0),
Column(33, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(34, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=6, fields=[
Field(5, colRef=30),
Field(6, colRef=31),
Field(3, colRef=32),
Field(4, colRef=33),
])
])])
self.assertTableData('GristSummary_7_Address', cols="subset", data=[
[ "id", "city", "state", "count", "amount" ],
[ 1, "New York", "NY" , 3, 1.+6+11 ],
[ 2, "Albany", "NY" , 1, 2. ],
[ 3, "Seattle", "WA" , 1, 3. ],
[ 4, "Chicago", "IL" , 1, 4. ],
[ 5, "Bedford", "MA" , 1, 5. ],
[ 6, "Buffalo", "NY" , 1, 7. ],
[ 7, "Bedford", "NY" , 1, 8. ],
[ 8, "Boston", "MA" , 1, 9. ],
[ 9, "Yonkers", "NY" , 1, 10. ],
])
self.assertEqual(get_helper_cols('Address'), ['#summary#GristSummary_7_Address'])
# Now add a different view section with the same group-by columns.
self.apply_user_action(["CreateViewSection", 1, 1, "record", [11,12]])
self.assertTables([
self.starting_table,
Table(6, "GristSummary_7_Address", 0, 1, columns=[
Column(30, "state", "Text", False, "", 12),
Column(31, "city", "Text", False, "", 11),
Column(32, "count", "Int", True, "len($group)", 0),
Column(33, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(34, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=6, fields=[
Field(5, colRef=30),
Field(6, colRef=31),
Field(3, colRef=32),
Field(4, colRef=33),
]),
Section(2, parentKey="record", tableRef=6, fields=[
Field(7, colRef=31),
Field(8, colRef=30),
Field(9, colRef=32),
Field(10, colRef=33),
])
])])
self.assertEqual(get_helper_cols('Address'), ['#summary#GristSummary_7_Address'])
# Change one view section, and ensure there are now two summary tables.
self.apply_user_action(["UpdateSummaryViewSection", 2, []])
self.assertTables([
self.starting_table,
Table(6, "GristSummary_7_Address", 0, 1, columns=[
Column(30, "state", "Text", False, "", 12),
Column(31, "city", "Text", False, "", 11),
Column(32, "count", "Int", True, "len($group)", 0),
Column(33, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(34, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
]),
Table(7, "GristSummary_7_Address2", 0, 1, columns=[
Column(35, "count", "Int", True, "len($group)", 0),
Column(36, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(37, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=6, fields=[
Field(5, colRef=30),
Field(6, colRef=31),
Field(3, colRef=32),
Field(4, colRef=33),
]),
Section(2, parentKey="record", tableRef=7, fields=[
Field(9, colRef=35),
Field(10, colRef=36),
])
])])
self.assertEqual(get_helper_cols('Address'), ['#summary#GristSummary_7_Address',
'#summary#GristSummary_7_Address2'])
# Delete one view section, and see that the summary table is gone.
self.apply_user_action(["RemoveViewSection", 2])
self.assertTables([
self.starting_table,
# Note that Table #7 is gone at this point, since it's now unused.
Table(6, "GristSummary_7_Address", 0, 1, columns=[
Column(30, "state", "Text", False, "", 12),
Column(31, "city", "Text", False, "", 11),
Column(32, "count", "Int", True, "len($group)", 0),
Column(33, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(34, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
])
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=6, fields=[
Field(5, colRef=30),
Field(6, colRef=31),
Field(3, colRef=32),
Field(4, colRef=33),
])
])])
self.assertEqual(get_helper_cols('Address'), ['#summary#GristSummary_7_Address'])
# Delete source table, and ensure its summary table is also gone.
self.apply_user_action(["RemoveTable", "Address"])
self.assertTables([])
self.assertViews([])
#----------------------------------------------------------------------
def test_update_groupby_override(self):
# Verify that if we add a group-by column that conflicts with a formula, group-by column wins.
self.load_sample(self.sample)
self.apply_user_action(["CreateViewSection", 1, 0, "record", [12]])
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "city",
{"formula": "$state.lower()"}])
# We should have a single summary table, and a single section referring to it.
self.assertTables([
self.starting_table,
Table(2, "GristSummary_7_Address", 0, 1, columns=[
Column(14, "state", "Text", False, "", 12),
Column(15, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(16, "count", "Int", True, "len($group)", 0),
Column(17, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(18, "city", "Any", True, "$state.lower()", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=2, fields=[
Field(1, colRef=14),
Field(2, colRef=16),
Field(3, colRef=17),
Field(4, colRef=18),
])
])])
self.assertTableData('GristSummary_7_Address', cols="subset", data=[
[ "id", "state", "count", "amount" , "city"],
[ 1, "NY", 7, 1.+2+6+7+8+10+11 , "ny" ],
[ 2, "WA", 1, 3. , "wa" ],
[ 3, "IL", 1, 4. , "il" ],
[ 4, "MA", 2, 5.+9 , "ma" ],
])
# Change the section to add "city" as a group-by column; check that the formula is gone.
self.apply_user_action(["UpdateSummaryViewSection", 1, [11,12]])
self.assertTables([
self.starting_table,
Table(3, "GristSummary_7_Address2", 0, 1, columns=[
Column(19, "city", "Text", False, "", 11),
Column(20, "state", "Text", False, "", 12),
Column(21, "count", "Int", True, "len($group)", 0),
Column(22, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(23, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=3, fields=[
# We requested 'city' to come before 'state', check that this is the case.
Field(4, colRef=19),
Field(1, colRef=20),
Field(2, colRef=21),
Field(3, colRef=22),
])
])])
# TODO We should have more tests on UpdateSummaryViewSection that rearranges columns in
# interesting ways (e.g. add new column to middle of existing group-by columns; put group-by
# columns in the middle of other fields then UpdateSummary to rearrange them).
#----------------------------------------------------------------------
def test_cleanup_on_view_remove(self):
# Verify that if we remove a view, that unused summary tables get cleaned up.
# Create one view with one summary section, and another view with three sections.
self.load_sample(self.sample)
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11,12]]) # Creates View #1
self.apply_user_action(["CreateViewSection", 1, 0, "record", []]) # Creates View #2
self.apply_user_action(["CreateViewSection", 1, 2, "record", [11,12]]) # Refers to View #2
self.apply_user_action(["CreateViewSection", 1, 2, "record", [12]]) # Refers to View #2
# We should have a single summary table, and a single section referring to it.
self.assertTables([
self.starting_table,
Table(2, "GristSummary_7_Address", 0, 1, columns=[
Column(14, "city", "Text", False, "", 11),
Column(15, "state", "Text", False, "", 12),
Column(16, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(17, "count", "Int", True, "len($group)", 0),
Column(18, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
Table(3, "GristSummary_7_Address2", 0, 1, columns=[
Column(19, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(20, "count", "Int", True, "len($group)", 0),
Column(21, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
Table(4, "GristSummary_7_Address3", 0, 1, columns=[
Column(22, "state", "Text", False, "", 12),
Column(23, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(24, "count", "Int", True, "len($group)", 0),
Column(25, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=2, fields=[
Field(1, colRef=14),
Field(2, colRef=15),
Field(3, colRef=17),
Field(4, colRef=18),
])
]), View(2, sections=[
Section(2, parentKey="record", tableRef=3, fields=[
Field(5, colRef=20),
Field(6, colRef=21),
]),
Section(3, parentKey="record", tableRef=2, fields=[
Field(7, colRef=14),
Field(8, colRef=15),
Field(9, colRef=17),
Field(10, colRef=18),
]),
Section(4, parentKey="record", tableRef=4, fields=[
Field(11, colRef=22),
Field(12, colRef=24),
Field(13, colRef=25),
])
])])
# Now change the group-by to just one of the columns ('state')
self.apply_user_action(["RemoveView", 2])
# Verify that unused summary tables are also gone, but the one used remains.
self.assertTables([
self.starting_table,
Table(2, "GristSummary_7_Address", 0, 1, columns=[
Column(14, "city", "Text", False, "", 11),
Column(15, "state", "Text", False, "", 12),
Column(16, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(17, "count", "Int", True, "len($group)", 0),
Column(18, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
])
self.assertViews([View(1, sections=[
Section(1, parentKey="record", tableRef=2, fields=[
Field(1, colRef=14),
Field(2, colRef=15),
Field(3, colRef=17),
Field(4, colRef=18),
])
])])
#----------------------------------------------------------------------
@test_engine.test_undo
def test_update_sort_spec(self):
# Verify that we correctly update sort spec when we update a summary view section.
self.load_sample(self.sample)
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11,12]])
self.apply_user_action(["UpdateRecord", "_grist_Views_section", 1,
{"sortColRefs": "[15,14,-17]"}])
# We should have a single summary table, and a single section referring to it.
self.assertTables([
self.starting_table,
Table(2, "GristSummary_7_Address", 0, 1, columns=[
Column(14, "city", "Text", False, "", 11),
Column(15, "state", "Text", False, "", 12),
Column(16, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(17, "count", "Int", True, "len($group)", 0),
Column(18, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
])
self.assertTableData('_grist_Views_section', cols="subset", data=[
["id", "tableRef", "sortColRefs"],
[1, 2, "[15,14,-17]"],
])
# Now change the group-by to just one of the columns ('state')
self.apply_user_action(["UpdateSummaryViewSection", 1, [12]])
self.assertTables([
self.starting_table,
# Note that Table #2 is gone at this point, since it's unused.
Table(3, "GristSummary_7_Address2", 0, 1, columns=[
Column(19, "state", "Text", False, "", 12),
Column(20, "count", "Int", True, "len($group)", 0),
Column(21, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(22, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
]),
])
# Verify that sortColRefs refers to new columns.
self.assertTableData('_grist_Views_section', cols="subset", data=[
["id", "tableRef", "sortColRefs"],
[1, 3, "[19,-20]"],
])
#----------------------------------------------------------------------
def test_detach_summary_section(self):
# Verify that "DetachSummaryViewSection" useraction works correctly.
self.load_sample(self.sample)
# Add a couple of summary tables.
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11,12]])
self.apply_user_action(["CreateViewSection", 1, 0, "record", []])
# Add a formula column
self.apply_user_action(["AddColumn", "GristSummary_7_Address", "average",
{"formula": "$amount / $count"}])
# Check the table and columns for all the summary tables.
self.assertTables([
self.starting_table,
Table(2, "GristSummary_7_Address", 0, 1, columns=[
Column(14, "city", "Text", False, "", 11),
Column(15, "state", "Text", False, "", 12),
Column(16, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(17, "count", "Int", True, "len($group)", 0),
Column(18, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(22, "average", "Any", True, "$amount / $count", 0),
]),
Table(3, "GristSummary_7_Address2", 0, 1, columns=[
Column(19, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(20, "count", "Int", True, "len($group)", 0),
Column(21, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
])
self.assertTableData('_grist_Views_section', cols="subset", data=[
["id", "parentId", "tableRef"],
[1, 1, 2],
[2, 2, 3],
])
self.assertTableData('_grist_Views_section_field', cols="subset", data=[
["id", "parentId", "colRef"],
[1, 1, 14],
[2, 1, 15],
[3, 1, 17],
[4, 1, 18],
[7, 1, 22],
[5, 2, 20],
[6, 2, 21],
], sort=lambda r: (r.parentId, r.id))
# Now save one section as a separate table, i.e. "detach" it from its source.
self.apply_user_action(["DetachSummaryViewSection", 1])
# Check the table and columns for all the summary tables.
self.assertTables([
self.starting_table,
Table(3, "GristSummary_7_Address2", 0, 1, columns=[
Column(19, "group", "RefList:Address", True, "table.getSummarySourceGroup(rec)", 0),
Column(20, "count", "Int", True, "len($group)", 0),
Column(21, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
Table(4, "Table1", primaryViewId=3, summarySourceTable=0, columns=[
Column(23, "manualSort", "ManualSortPos", False, "", 0),
Column(24, "city", "Text", False, "", 0),
Column(25, "state", "Text", False, "", 0),
Column(26, "count", "Int", True, "len($group)", 0),
Column(27, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(28, "average", "Any", True, "$amount / $count", 0),
Column(29, "group", "RefList:Address", True,
"Address.lookupRecords(city=$city, state=$state)", 0),
]),
])
# We should now have two sections for table 2 (the one with two group-by fields).
self.assertTableData('_grist_Views_section', cols="subset", data=[
["id", "parentId", "tableRef"],
[1, 1, 4],
[2, 2, 3],
[3, 3, 4],
])
self.assertTableData('_grist_Views_section_field', cols="subset", data=[
["id", "parentId", "colRef"],
[1, 1, 24],
[2, 1, 25],
[3, 1, 26],
[4, 1, 27],
[7, 1, 28],
[5, 2, 20],
[6, 2, 21],
[8, 3, 24],
[9, 3, 25],
[10, 3, 26],
[11, 3, 27],
[12, 3, 28],
], sort=lambda r: (r.parentId, r.id))
# Check that the data is as we expect.
self.assertTableData('Table1', cols="all", data=[
[ "id", "manualSort", "city", "state", "group", "count", "amount", "average" ],
[ 1, 1.0, "New York", "NY" , [21,26,31],3, 1.+6+11 , (1.+6+11)/3 ],
[ 2, 2.0, "Albany", "NY" , [22], 1, 2. , 2. ],
[ 3, 3.0, "Seattle", "WA" , [23], 1, 3. , 3. ],
[ 4, 4.0, "Chicago", "IL" , [24], 1, 4. , 4. ],
[ 5, 5.0, "Bedford", "MA" , [25], 1, 5. , 5. ],
[ 6, 6.0, "Buffalo", "NY" , [27], 1, 7. , 7. ],
[ 7, 7.0, "Bedford", "NY" , [28], 1, 8. , 8. ],
[ 8, 8.0, "Boston", "MA" , [29], 1, 9. , 9. ],
[ 9, 9.0, "Yonkers", "NY" , [30], 1, 10. , 10. ],
])
self.assertTableData('GristSummary_7_Address2', cols="all", data=[
[ "id", "count", "amount", "group" ],
[ 1, 11, 66.0 , [21,22,23,24,25,26,27,28,29,30,31]],
])
#----------------------------------------------------------------------
def test_summary_of_detached(self):
# Verify that we can make a summary table of a detached table. This is mainly to ensure that
# we handle well the presence of columns like 'group' and 'count' in the source table.
# Add a summary table and detach it. Then add a summary table of that table.
self.load_sample(self.sample)
self.apply_user_action(["CreateViewSection", 1, 0, "record", [11,12]])
self.apply_user_action(["DetachSummaryViewSection", 1])
# Create a summary of the detached table (tableRef 3) by state (colRef 21).
self.apply_user_action(["CreateViewSection", 3, 0, "record", [21]])
# Verify the resulting metadata.
self.assertTables([
self.starting_table,
Table(3, "Table1", primaryViewId=2, summarySourceTable=0, columns=[
Column(19, "manualSort", "ManualSortPos", False, "", 0),
Column(20, "city", "Text", False, "", 0),
Column(21, "state", "Text", False, "", 0),
Column(22, "count", "Int", True, "len($group)", 0),
Column(23, "amount", "Numeric", True, "SUM($group.amount)", 0),
Column(24, "group", "RefList:Address", True,
"Address.lookupRecords(city=$city, state=$state)", 0),
]),
Table(4, "GristSummary_6_Table1", primaryViewId=0, summarySourceTable=3, columns=[
Column(25, "state", "Text", False, "", 21),
Column(26, "group", "RefList:Table1", True, "table.getSummarySourceGroup(rec)", 0),
Column(27, "count", "Int", True, "SUM($group.count)", 0),
Column(28, "amount", "Numeric", True, "SUM($group.amount)", 0),
]),
])
# Check that the data is as we expect. Table1 is the same as in the previous test case.
self.assertTableData('Table1', cols="all", data=[
[ "id", "manualSort", "city", "state", "group", "count", "amount" ],
[ 1, 1.0, "New York", "NY" , [21,26,31],3, 1.+6+11 ],
[ 2, 2.0, "Albany", "NY" , [22], 1, 2. ],
[ 3, 3.0, "Seattle", "WA" , [23], 1, 3. ],
[ 4, 4.0, "Chicago", "IL" , [24], 1, 4. ],
[ 5, 5.0, "Bedford", "MA" , [25], 1, 5. ],
[ 6, 6.0, "Buffalo", "NY" , [27], 1, 7. ],
[ 7, 7.0, "Bedford", "NY" , [28], 1, 8. ],
[ 8, 8.0, "Boston", "MA" , [29], 1, 9. ],
[ 9, 9.0, "Yonkers", "NY" , [30], 1, 10. ],
])
self.assertTableData('GristSummary_6_Table1', cols="all", data=[
[ "id", "state", "group", "count", "amount" ],
[ 1, "NY", [1,2,6,7,9], 7, 1.+6+11+2+7+8+10 ],
[ 2, "WA", [3], 1, 3. ],
[ 3, "IL", [4], 1, 4. ],
[ 4, "MA", [5,8], 2, 5.+9 ],
])
|
StarcoderdataPython
|
6702381
|
<reponame>make-itrain/pyllhttp
from setuptools import setup, Extension
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'llhttp',
version = '6.0.2.0',
description = ("llhttp in python"),
url = "http://github.com/pallas/pyllhttp",
author = "<NAME>",
author_email = "<EMAIL>",
license = "MIT",
long_description = long_description,
long_description_content_type = "text/markdown",
keywords = "www http parser",
classifiers = [
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: JavaScript",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Networking",
"Topic :: Internet :: WWW/HTTP :: HTTP Servers",
"License :: OSI Approved :: MIT License",
],
packages = [ "llhttp" ],
headers = [ "lib/llhttp.h" ],
ext_modules = [ Extension('__llhttp',
sources = """
pyllhttp.c
lib/llhttp.c
lib/http.c
lib/api.c
""".split(),
language = "c",
) ],
)
#
|
StarcoderdataPython
|
110321
|
# The MIT License (MIT)
# Copyright (c) 2014 <NAME> <<EMAIL>>
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Extensions.
Additional validators that are a little more complex than the defaults or
that otherwise don't fit into the base part of the module.
"""
from inspect import getargspec
def ArgSpec(*args, **kwargs):
"""
Validate a function based on the given argspec.
# Example:
validations = {
"foo": [ArgSpec("a", "b", c", bar="baz")]
}
def pass_func(a, b, c, bar="baz"):
pass
def fail_func(b, c, a, baz="bar"):
pass
passes = {"foo": pass_func}
fails = {"foo": fail_func}
"""
def argspec_lambda(value):
argspec = getargspec(value)
argspec_kw_vals = ()
if argspec.defaults is not None:
argspec_kw_vals = argspec.defaults
kw_vals = {}
arg_offset = 0
arg_len = len(argspec.args) - 1
for val in argspec_kw_vals[::-1]:
kw_vals[argspec.args[arg_len - arg_offset]] = val
arg_offset += 1
if kwargs == kw_vals:
if len(args) != arg_len - arg_offset + 1:
return False
index = 0
for arg in args:
if argspec.args[index] != arg:
return False
index += 1
return True
return False
argspec_lambda.err_message = "must match argspec ({0}) {{{1}}}".format(args, kwargs)
# as little sense as negating this makes, best to just be consistent.
argspec_lambda.not_message = "must not match argspec ({0}) {{{1}}}".format(args, kwargs)
return argspec_lambda
|
StarcoderdataPython
|
6688547
|
import subprocess
from setuptools import setup, find_packages
from setuptools.command.install import install
try:
from pip._internal.req import parse_requirements
except ImportError:
from pip.req import parse_requirements
def load_requirements(file_name):
requirements = parse_requirements(file_name, session="test")
try:
return [str(item.req) for item in requirements]
except:
return [str(item.requirement) for item in requirements]
setup(
name="flopco-pytorch",
version="v0.1.4",
description="FLOPs and other statistics COunter for Pytorch neural networks",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/juliagusak/flopco-pytorch",
download_url="https://github.com/juliagusak/flopco-pytorch/archive/v0.1.4.tar.gz",
keywords = ['pytorch', 'flops', 'macs', 'neural-networks', 'cnn'],
license="MIT",
packages=find_packages(),
install_requires=load_requirements("requirements.txt")
)
|
StarcoderdataPython
|
4817890
|
from pydantic import BaseModel
from prisma.generator import GenericGenerator, GenericData, Manifest
# custom options must be defined using a pydantic BaseModel
class Config(BaseModel):
my_option: int
# we don't technically need to define our own Data class
# but it makes typing easier
class Data(GenericData[Config]):
pass
# the GenericGenerator[Data] part is what tells Prisma Client Python to use our
# custom Data class with our custom Config class
class MyGenerator(GenericGenerator[Data]):
def get_manifest(self) -> Manifest:
return Manifest(
name='My Custom Generator Options',
default_output='schema.md',
)
def generate(self, data: Data) -> None:
# generate some assets here
pass
if __name__ == '__main__':
MyGenerator.invoke()
|
StarcoderdataPython
|
6402243
|
<reponame>YesmynameisPerry/pezLogger<filename>pezLogger/src/mockLogger.py
from pezLogger.src.base.baseLogger import BaseLogger
from typing import Any, Dict
__all__ = ["MockLogger"]
# Mock logger to be used in tests and all
class MockLogger(BaseLogger):
def __init__(self, *args, suppress: bool = True) -> None:
self.__suppress: bool = suppress
if not self.__suppress:
print(f"MOCK-LOGGER: {args}")
self.loggingEnabled: bool = True
def setSuppress(self, suppress: bool) -> None:
self.__suppress = suppress
def debug(self, message: Any, *additionalMessages: Any, additionalContext: Dict[str, Any] = None) -> None:
if self.__suppress:
return
if not self.loggingEnabled:
print(f"Logging Disabled")
print(f"MOCK-DEBUG: {message}")
def info(self, message: Any, *additionalMessages: Any, additionalContext: Dict[str, Any] = None) -> None:
if self.__suppress:
return
if not self.loggingEnabled:
print(f"Logging Disabled")
print(f"MOCK-INFO: {message}")
def warning(self, message: Any, *additionalMessages: Any, additionalContext: Dict[str, Any] = None) -> None:
if self.__suppress:
return
if not self.loggingEnabled:
print(f"Logging Disabled")
print(f"MOCK-WARNING: {message}")
def error(self, message: Any, *additionalMessages: Any, additionalContext: Dict[str, Any] = None) -> None:
if self.__suppress:
return
if not self.loggingEnabled:
print(f"Logging Disabled")
print(f"MOCK-ERROR: {message}")
def fatal(self, message: Any, *additionalMessages: Any, additionalContext: Dict[str, Any] = None) -> None:
if self.__suppress:
return
if not self.loggingEnabled:
print(f"Logging Disabled")
print(f"MOCK-FATAL: {message}")
def captureException(self, *, additionalContext: Dict[str, Any] = None) -> None:
if self.__suppress:
return
if not self.loggingEnabled:
print(f"Logging Disabled")
print(f"MOCK-CAPTURE-EXCEPTION")
def dangerouslySetting(self, className: str, parameterName: str, value: Any, *, additionalContext: Dict[str, Any] = None) -> None:
if self.__suppress:
return
if not self.loggingEnabled:
print(f"Logging Disabled")
print(f"MOCK-DANGEROUSLY-SETTING")
def stopLogging(self) -> None:
self.loggingEnabled = False
if self.__suppress:
return
print(f"Logging Stopped")
def restartLogging(self) -> None:
self.loggingEnabled = True
if self.__suppress:
return
print(f"Logging Restarted")
|
StarcoderdataPython
|
9756306
|
<reponame>okara83/Becoming-a-Data-Scientist
"""Distance to Nearest Vowel
Write a function that takes in a string and for each character, returns the distance to the nearest vowel in the string. If the character is a vowel itself, return 0.
Examples
distance_to_nearest_vowel("aaaaa") ➞ [0, 0, 0, 0, 0]
distance_to_nearest_vowel("babbb") ➞ [1, 0, 1, 2, 3]
distance_to_nearest_vowel("abcdabcd") ➞ [0, 1, 2, 1, 0, 1, 2, 3]
distance_to_nearest_vowel("shopper") ➞ [2, 1, 0, 1, 1, 0, 1]
Notes
All input strings will contain at least one vowel.
Strings will be lowercased.
Vowels are: a, e, i, o, u."""
def distance_to_nearest_vowel(txt):
vow = list("aeiou")
a, b, c = [], [], []
for i in range(len(txt)):
b, c = [], []
if txt[i] in vow:
a.append(0)
else:
for j in range(1,i+1):
if txt[i-j] in vow:
b.append(j)
for k in range(i,len(txt)):
if txt[k] in vow:
c.append(k-i)
if len(b)==0:a.append(min(c))
elif len(c)==0:a.append(min(b))
else:a.append(min(min(b),min(c)))
return a
#distance_to_nearest_vowel("aaaaa") #➞ [0, 0, 0, 0, 0]
#distance_to_nearest_vowel("babbb") #➞ [1, 0, 1, 2, 3]
distance_to_nearest_vowel("abcdabcd") #➞ [0, 1, 2, 1, 0, 1, 2, 3]
#distance_to_nearest_vowel("shopper") #➞ [2, 1, 0, 1, 1, 0, 1]
|
StarcoderdataPython
|
215582
|
# !/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Datetime : 2019/11/13 下午2:45
# @Author : Fangyang
# @Software : PyCharm
from PyQt5.QtWidgets import (
QPushButton, QWidget, QLineEdit, QApplication
)
import sys
class Button(QPushButton):
def __init__(self, title, parent):
super().__init__(title, parent)
self.setAcceptDrops(True)
def dragEnterEvent(self, e):
if e.mimeData().hasFormat('text/plain'):
e.accept()
else:
e.ignore()
def dropEvent(self, e):
self.setText(e.mimeData().text())
class Example(QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
edit = QLineEdit('', self)
edit.setDragEnabled(True)
edit.move(30, 65)
button = Button('Button', self)
button.move(190, 65)
self.setWindowTitle('Simple drag and drop')
self.setGeometry(300, 300, 300, 150)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
ex.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
5051062
|
<filename>rapiduino/boards/arduino.py
from typing import Dict, Optional, Tuple, Type
from rapiduino.boards.pins import Pin, get_mega_pins, get_nano_pins, get_uno_pins
from rapiduino.communication.command_spec import (
CMD_ANALOGREAD,
CMD_ANALOGWRITE,
CMD_DIGITALREAD,
CMD_DIGITALWRITE,
CMD_PARROT,
CMD_PINMODE,
CMD_POLL,
CMD_VERSION,
)
from rapiduino.communication.serial import SerialConnection
from rapiduino.exceptions import (
ArduinoSketchVersionIncompatibleError,
ComponentAlreadyRegisteredError,
NotAnalogPinError,
NotPwmPinError,
PinAlreadyRegisteredError,
PinDoesNotExistError,
PinIsReservedForSerialCommsError,
ProtectedPinError,
)
from rapiduino.globals.common import (
HIGH,
INPUT,
INPUT_PULLUP,
LOW,
OUTPUT,
PinMode,
PinState,
)
class Arduino:
min_version = (0, 1, 0)
def __init__(
self,
pins: Tuple[Pin, ...],
port: str,
rx_pin: int = 0,
tx_pin: int = 1,
conn_class: Type[SerialConnection] = SerialConnection,
) -> None:
self._pins = pins
self.connection = conn_class.build(port)
self.pin_register: Dict[int, str] = {}
self.reserved_pin_nums = (rx_pin, tx_pin)
self._assert_compatible_sketch_version()
@classmethod
def uno(
cls,
port: str,
conn_class: Type[SerialConnection] = SerialConnection,
) -> "Arduino":
return cls(get_uno_pins(), port, conn_class=conn_class)
@classmethod
def nano(
cls,
port: str,
conn_class: Type[SerialConnection] = SerialConnection,
) -> "Arduino":
return cls(get_nano_pins(), port, conn_class=conn_class)
@classmethod
def mega(
cls,
port: str,
conn_class: Type[SerialConnection] = SerialConnection,
) -> "Arduino":
return cls(get_mega_pins(), port, conn_class=conn_class)
@property
def pins(self) -> Tuple[Pin, ...]:
return self._pins
def poll(self) -> int:
return self.connection.process_command(CMD_POLL)[0]
def parrot(self, value: int) -> int:
return self.connection.process_command(CMD_PARROT, value)[0]
def version(self) -> Tuple[int, ...]:
return self.connection.process_command(CMD_VERSION)
def pin_mode(self, pin_no: int, mode: PinMode, token: Optional[str] = None) -> None:
self._assert_valid_pin_number(pin_no)
self._assert_pin_not_reserved(pin_no)
self._assert_valid_pin_mode(mode)
self._assert_pin_not_protected(pin_no, token)
self.connection.process_command(CMD_PINMODE, pin_no, mode.value)
def digital_read(self, pin_no: int, token: Optional[str] = None) -> PinState:
self._assert_valid_pin_number(pin_no)
self._assert_pin_not_reserved(pin_no)
self._assert_pin_not_protected(pin_no, token)
state = self.connection.process_command(CMD_DIGITALREAD, pin_no)
if state[0] == 1:
return HIGH
else:
return LOW
def digital_write(
self, pin_no: int, state: PinState, token: Optional[str] = None
) -> None:
self._assert_valid_pin_number(pin_no)
self._assert_pin_not_reserved(pin_no)
self._assert_valid_pin_state(state)
self._assert_pin_not_protected(pin_no, token)
self.connection.process_command(CMD_DIGITALWRITE, pin_no, state.value)
def analog_read(self, pin_no: int, token: Optional[str] = None) -> int:
self._assert_valid_pin_number(pin_no)
self._assert_pin_not_reserved(pin_no)
self._assert_analog_pin(pin_no)
self._assert_pin_not_protected(pin_no, token)
return self.connection.process_command(CMD_ANALOGREAD, pin_no)[0]
def analog_write(
self, pin_no: int, value: int, token: Optional[str] = None
) -> None:
self._assert_valid_pin_number(pin_no)
self._assert_pin_not_reserved(pin_no)
self._assert_valid_analog_write_range(value)
self._assert_pwm_pin(pin_no)
self._assert_pin_not_protected(pin_no, token)
self.connection.process_command(CMD_ANALOGWRITE, pin_no, value)
def register_component(self, component_token: str, pins: Tuple[Pin, ...]) -> None:
self._assert_requested_pins_are_valid(component_token, pins)
for pin in pins:
self.pin_register[pin.pin_id] = component_token
def deregister_component(self, component_token: str) -> None:
keys_to_delete = [
k for k, v in self.pin_register.items() if v == component_token
]
for key in keys_to_delete:
del self.pin_register[key]
def _assert_compatible_sketch_version(self) -> None:
version = self.version()
if any(
(
version[0] > self.min_version[0],
version[0] < self.min_version[0],
version[1] < self.min_version[1],
version[2] < self.min_version[2],
)
):
raise ArduinoSketchVersionIncompatibleError(version, self.min_version)
def _assert_requested_pins_are_valid(
self, component_token: str, pins: Tuple[Pin, ...]
) -> None:
for pin in pins:
if pin.pin_id in self.pin_register:
raise PinAlreadyRegisteredError(pin.pin_id)
if pin.pin_id >= len(self._pins):
self._assert_valid_pin_number(pin.pin_id)
if pin.is_analog and not self._pins[pin.pin_id].is_analog:
raise NotAnalogPinError(pin.pin_id)
if pin.is_pwm and not self._pins[pin.pin_id].is_pwm:
raise NotPwmPinError(pin.pin_id)
self._assert_pin_not_reserved(pin.pin_id)
if component_token in self.pin_register.values():
raise ComponentAlreadyRegisteredError
def _assert_valid_pin_number(self, pin_no: int) -> None:
if (pin_no >= len(self.pins)) or (pin_no < 0):
raise PinDoesNotExistError(pin_no)
def _assert_analog_pin(self, pin_no: int) -> None:
if not self.pins[pin_no].is_analog:
raise NotAnalogPinError(pin_no)
def _assert_pwm_pin(self, pin_no: int) -> None:
if not self.pins[pin_no].is_pwm:
raise NotPwmPinError(pin_no)
def _assert_pin_not_reserved(self, pin_no: int) -> None:
if pin_no in self.reserved_pin_nums:
raise PinIsReservedForSerialCommsError(pin_no)
def _assert_pin_not_protected(self, pin_no: int, token: Optional[str]) -> None:
if pin_no in self.pin_register and self.pin_register[pin_no] != token:
raise ProtectedPinError(token)
@staticmethod
def _assert_valid_analog_write_range(value: int) -> None:
if (value < 0) or (value > 255):
raise ValueError(
f"Specified analog value {value} should be an int in the range 0 to 255"
)
@staticmethod
def _assert_valid_pin_mode(mode: PinMode) -> None:
if mode not in [INPUT, OUTPUT, INPUT_PULLUP]:
raise ValueError(
f"pin_mode must be INPUT, OUTPUT or INPUT_PULLUP"
f"but {mode.name} was found"
)
@staticmethod
def _assert_valid_pin_state(state: PinState) -> None:
if state not in [HIGH, LOW]:
raise ValueError(
f"pin_state must be HIGH or LOW but {state.name} was found"
)
|
StarcoderdataPython
|
20790
|
<reponame>will-bainbridge/ISITEK
#!/usr/bin/python
################################################################################
import numpy
import os
import cPickle as pickle
import scipy.misc
import scipy.sparse
import scipy.sparse.linalg
import scipy.special
import sys
import time
class Struct:
def __init__(self, **keywords):
self.__dict__.update(keywords)
class Timer(object):
def __init__(self, name=None, multiline=False):
self.name = name
self.multiline = multiline
def __enter__(self):
self.start = time.time()
if self.name:
print '%s ...' % self.name ,
if self.multiline:
print
sys.stdout.flush()
def __exit__(self, type, value, traceback):
if self.multiline:
print ' ...' ,
print 'done in %.3f s' % (time.time() - self.start)
################################################################################
def nodegrid(a,b):
return [ x.T for x in numpy.meshgrid(a,b) ]
def dot_sequence(*args):
if len(args) == 1: return args[0]
else: return numpy.dot( args[0] , dot_sequence(*args[1:]) )
def string_multiple_replace(string,dict):
for s,r in dict.iteritems():
string = string.replace(s,r)
return string
################################################################################
def read_data_file(data_filename):
file = open(data_filename,'rb')
data = pickle.load(file)
file.close()
node = data['node']
face = data['face']
element = data['element']
boundary = data['boundary']
u = data['u']
order = data['order']
return node,face,element,boundary,u,order
#------------------------------------------------------------------------------#
def read_input_file(input_filename):
geometry_filename = []
order = []
boundary = []
initial = []
term = []
wind = []
iterations = []
mesh_size = []
constant = []
file = open(input_filename,'r')
for line in file.readlines():
lineparts = line.split()
if len(lineparts) >= 2 and lineparts[0] == 'geometry_filename':
geometry_filename = lineparts[1]
if len(lineparts) >= 2 and lineparts[0] == 'order':
order = numpy.array([ int(x) for x in lineparts[1:] ])
if len(lineparts) >= 4 and lineparts[0] == 'boundary':
boundary.append(Struct(
face = sum([ list(z) if len(z) == 1 else range(*z) for z in [ tuple( int(y) for y in x.split(':') ) for x in lineparts[1].split(',') ] ],[]) ,
variable = int(lineparts[2]) ,
condition = tuple(sum([ x == y for x in lineparts[3] ]) for y in 'nt') ,
value = float(lineparts[4]) if len(lineparts) >= 5 else 0.0 ))
if len(lineparts) >= 2 and lineparts[0] == 'initial':
initial = lineparts[1:]
if len(lineparts) >= 2 and lineparts[0] == 'constant':
constant = lineparts[1]
if len(lineparts) >= 6 and lineparts[0] == 'term':
term.append(Struct(
equation = int(lineparts[1]) ,
variable = [ int(x) for x in lineparts[2].split(',') ] ,
direction = lineparts[3] ,
differential = [ tuple( sum([ x == y for x in z ]) for y in 'xy' ) for z in lineparts[4].split(',') ] ,
power = [ int(x) for x in lineparts[5].split(',') ] ,
constant = lineparts[6] ,
method = lineparts[7] ))
if len(lineparts) >= 2 and lineparts[0] == 'wind':
wind = eval( 'lambda n,u,v:' + lineparts[1] , {'numpy':numpy} , {} )
if len(lineparts) >= 2 and lineparts[0] == 'iterations':
iterations = int(lineparts[1])
if len(lineparts) >= 2 and lineparts[0] == 'mesh_size':
mesh_size = int(lineparts[1])
file.close()
if len(constant):
constant = dict([ (y[0],float(y[1])) for y in [ x.split('=') for x in constant.split(';') ] ])
else:
constant = {}
if len(term):
for i in range(0,len(term)):
term[i].constant = eval(term[i].constant,{},constant)
if len(initial):
replace = {'pi':'numpy.pi','cos(':'numpy.cos(','sin(':'numpy.sin('}
for i in range(0,len(initial)):
initial[i] = eval( 'lambda x,y: numpy.ones(x.shape)*(' + string_multiple_replace(initial[i],replace) + ')' , {'numpy':numpy} , constant )
return geometry_filename,order,boundary,initial,term,wind,iterations,mesh_size
#------------------------------------------------------------------------------#
def element_sequential_indices(e,element,face):
n = len(element[e].face)
polyline = numpy.array([ list(face[f].node) for f in element[e].face ])
polynode = numpy.unique(polyline)
ones = numpy.ones((n,1))
connect = 1*(ones*polyline[:,0] == (ones*polynode).T) + 2*(ones*polyline[:,1] == (ones*polynode).T)
side = [0]*n
vertex = [0]*n
for i in range(1,n):
temp = connect[connect[:,side[i-1]] == (int(not vertex[i-1])+1),:].flatten() * (numpy.arange(0,n) != side[i-1])
side[i] = temp.nonzero()[0][0]
vertex[i] = temp[side[i]]-1
return [side,vertex]
#------------------------------------------------------------------------------#
def read_geometry(geometry_filename):
# read the geometry file
file = open(geometry_filename,'r')
data = file.readlines()
file.close()
# generate the mesh structures
i = 0
while i < len(data):
if data[i].strip().split()[0] == 'NODES':
nn = int(data[i].strip().split()[1])
node = [ Struct(x=(0.0,0.0)) for _ in range(0,nn) ]
for n in range(0,nn):
node[n].x = tuple( [ float(x) for x in data[i+1+n].strip().split() ] )
i += nn
elif data[i].strip().split()[0] == 'FACES':
nf = int(data[i].strip().split()[1])
face = [ Struct(node=(0,0),border=[],size=1.0,normal=(0.0,0.0),centre=(0.0,0.0),boundary=[],Q=[]) for temp in range(0,nf) ]
for f in range(0,nf):
face[f].node = tuple( [ int(x) for x in data[i+1+f].strip().split() ] )
i += nf
elif data[i].strip().split()[0] == 'CELLS' or data[i].strip().split()[0] == 'ELEMENTS':
ne = int(data[i].strip().split()[1])
element = [ Struct(face=[],orientation=[],size=1.0,area=0.0,centre=(0.0,0.0),unknown=[],V=[],P=[],Q=[],W=[],X=[]) for temp in range(0,ne) ]
for e in range(0,ne):
element[e].face = [ int(x) for x in data[i+1+e].strip().split() ]
i += ne
else:
i += 1
# generate borders
for e in range(0,ne):
for f in element[e].face:
face[f].border.append(e)
# additional element geometry
for e in range(0,ne):
s,t = element_sequential_indices(e,element,face)
index = [ face[element[e].face[i]].node[j] for i,j in zip(s,t) ]
cross = [ node[index[i-1]].x[0]*node[index[i]].x[1]-node[index[i]].x[0]*node[index[i-1]].x[1] for i in range(0,len(element[e].face)) ]
element[e].area = 0.5*sum(cross)
element[e].centre = tuple([ sum([ (node[index[i-1]].x[j]+node[index[i]].x[j])*cross[i] for i in range(0,len(element[e].face)) ])/(6*element[e].area) for j in range(0,2) ])
element[e].orientation = [ 2*t[i]-1 for i in s ]
if element[e].area < 0.0:
element[e].area = -element[e].area
element[e].orientation = [ -x for x in element[e].orientation ]
element[e].size = numpy.sqrt(element[e].area)
# additional face geometry
for f in range(0,nf):
face[f].normal = ( -node[face[f].node[1]].x[1]+node[face[f].node[0]].x[1] , +node[face[f].node[1]].x[0]-node[face[f].node[0]].x[0] )
face[f].size = 0.5*numpy.sqrt(numpy.dot(face[f].normal,face[f].normal))
face[f].centre = tuple([ 0.5*(node[face[f].node[1]].x[i]+node[face[f].node[0]].x[i]) for i in range(0,2) ])
# return
return node,face,element
#------------------------------------------------------------------------------#
def assign_boundaries():
nv = len(order)
for f in range(0,len(face)):
face[f].boundary = [ [] for v in range(0,nv) ]
for b in range(0,len(boundary)):
for f in boundary[b].face:
face[f].boundary[boundary[b].variable].append(b)
#------------------------------------------------------------------------------#
def generate_unknowns():
nv = len(order)
np = order*(order+1)/2
nu = 0
# number by element then variable
# > gives a more diagonally dominant system
for e in range(0,len(element)):
element[e].unknown = [ [] for v in range(0,nv) ]
for v in range(0,nv):
element[e].unknown[v] = range(nu,nu+np[v])
nu += np[v]
## number by variable then element
## > gives a system with visible blocks corresponding to equations
#for e in range(0,len(element)): element[e].unknown = [ [] for v in range(0,nv) ]
#for v in range(0,nv):
# for e in range(0,len(element)):
# element[e].unknown[v] = range(nu,nu+np[v])
# nu += np[v]
return numpy.zeros(nu)
#------------------------------------------------------------------------------#
def generate_constants(order):
max_order = max(order)
ng = 2*max_order-1
gauss_locations,gauss_weights = [ x.real for x in scipy.special.orthogonal.p_roots(ng) ]
#nh = 7
#hammer_locations = numpy.array([
# [0.101286507323456,0.101286507323456],[0.797426958353087,0.101286507323456],[0.101286507323456,0.797426958353087],
# [0.470142064105115,0.470142064105115],[0.059715871789770,0.470142064105115],[0.470142064105115,0.059715871789770],
# [0.333333333333333,0.333333333333333]])
#hammer_weights = 0.5 * numpy.array([
# 0.125939180544827,0.125939180544827,0.125939180544827,0.132394152788506,0.132394152788506,0.132394152788506,
# 0.225000000000000])
#nh = 9
#hammer_locations = numpy.array([
# [0.437525248383384,0.437525248383384],[0.124949503233232,0.437525248383384],[0.437525248383384,0.124949503233232],
# [0.165409927389841,0.037477420750088],[0.037477420750088,0.165409927389841],[0.797112651860071,0.165409927389841],
# [0.165409927389841,0.797112651860071],[0.037477420750088,0.797112651860071],[0.797112651860071,0.037477420750088]])
#hammer_weights = 0.5 * numpy.array([
# 0.205950504760887,0.205950504760887,0.205950504760887,0.063691414286223,0.063691414286223,0.063691414286223,
# 0.063691414286223,0.063691414286223,0.063691414286223])
nh = 12
hammer_locations = numpy.array([
[0.063089014491502,0.063089014491502],[0.873821971016996,0.063089014491502],[0.063089014491502,0.873821971016996],
[0.249286745170910,0.249286745170910],[0.501426509658179,0.249286745170910],[0.249286745170910,0.501426509658179],
[0.310352451033785,0.053145049844816],[0.053145049844816,0.310352451033785],[0.636502499121399,0.310352451033785],
[0.310352451033785,0.636502499121399],[0.053145049844816,0.636502499121399],[0.636502499121399,0.053145049844816]])
hammer_weights = 0.5 * numpy.array([
0.050844906370207,0.050844906370207,0.050844906370207,0.116786275726379,0.116786275726379,0.116786275726379,
0.082851075618374,0.082851075618374,0.082851075618374,0.082851075618374,0.082851075618374,0.082851075618374])
taylor_coefficients = numpy.array([])
taylor_powers = numpy.zeros((0,2),dtype=int)
for i in range(0,2*max_order):
taylor_coefficients = numpy.append(taylor_coefficients,scipy.misc.comb(i*numpy.ones(i+1),numpy.arange(0,i+1))/scipy.misc.factorial(i))
taylor_powers = numpy.append(taylor_powers,numpy.array([range(i,-1,-1),range(0,i+1)],dtype=int).T,axis=0)
powers_taylor = numpy.zeros((2*max_order,2*max_order),dtype=int)
for i in range(0,taylor_powers.shape[0]): powers_taylor[taylor_powers[i][0]][taylor_powers[i][1]] = i
factorial = scipy.misc.factorial(numpy.arange(0,2*max_order))
return gauss_locations,gauss_weights,hammer_locations,hammer_weights,taylor_coefficients,taylor_powers,powers_taylor,factorial
#------------------------------------------------------------------------------#
def basis(x,y,element,n,differential):
if taylor_powers[n,0] < differential[0] or taylor_powers[n,1] < differential[1]:
return numpy.zeros(x.shape)
p = taylor_powers[n]
q = taylor_powers[n]-differential
constant = taylor_coefficients[n] / numpy.power( element.size , sum(p) )
constant = constant * factorial[p[0]] * factorial[p[1]] / ( factorial[q[0]] * factorial[q[1]] )
return constant * numpy.power(x-element.centre[0],q[0]) * numpy.power(y-element.centre[1],q[1])
#------------------------------------------------------------------------------#
def derivative_transform_matrix(A,order):
n = order*(order+1)/2
D = numpy.zeros((n,n))
D[0,0] = 1.0
for i in range(0,order-1):
old = numpy.nonzero(numpy.sum(taylor_powers,axis=1) == i)[0]
temp = numpy.append( taylor_powers[old,:] + [1,0] , taylor_powers[old[taylor_powers[old,0] == 0],:] + [0,1] , axis=0 )
new = powers_taylor[temp[:,0],temp[:,1]]
index = nodegrid(old,old)
D[nodegrid(new,new)] = numpy.append(
A[0,0] * numpy.append( D[index] , numpy.zeros((i+1,1)) , axis=1 ) +
A[0,1] * numpy.append( numpy.zeros((i+1,1)) , D[index] , axis=1 ) ,
A[1,0] * numpy.append( D[old[-1],[old]] , [[0]] , axis=1 ) +
A[1,1] * numpy.append( [[0]] , D[old[-1],[old]] , axis=1 ) , axis=0 )
return D
#------------------------------------------------------------------------------#
def calculate_element_matrices():
nf = len(face)
ne = len(element)
nv = len(order)
max_order = max(order)
np = numpy.array([ len(x) for x in element[0].unknown ])
max_np = max(np)
ng = len(gauss_weights)
nh = len(hammer_weights)
# initialise
if do.pre:
for e in range(0,ne):
element[e].V = numpy.zeros((max_np,max_np))
element[e].P = numpy.zeros((max_np,(len(element[e].face)-2)*nh,max_np))
element[e].Q = [ numpy.zeros((ng,max_np)) for i in range(0,len(element[e].face)) ]
element[e].W = numpy.zeros((len(element[e].face)-2)*nh)
element[e].X = numpy.zeros(((len(element[e].face)-2)*nh,2))
for f in range(0,nf):
face[f].Q = [ [] for v in range(0,nv) ]
# element vandermonde matrices
if do.pre:
for e in range(0,ne):
for i in range(0,max_np):
for j in range(0,max_np):
element[e].V[i,j] = basis(numpy.array(element[e].centre[0]),numpy.array(element[e].centre[1]),element[e],i,taylor_powers[j])
# triangulation and element area quadrature
for e in range(0,ne):
# triangulate
nt = len(element[e].face)-2
v = numpy.zeros((nt,3),dtype=int)
v[:,0] = face[element[e].face[0]].node[0]
j = 0
for i in range(0,len(element[e].face)):
f = element[e].face[i]
o = int(element[e].orientation[i] < 0)
v[j][1:] = numpy.array(face[f].node)[[1-o,o]]
j += not any(v[j][1:] == v[j][0])
if j >= nt: break
# integration locations in and area of the triangles
element[e].X = numpy.zeros(((len(element[e].face)-2)*nh,2))
area = numpy.zeros(nt)
for i in range(0,nt):
d = numpy.array([ [ node[v[i][j]].x[k] - node[v[i][0]].x[k] for k in range(0,2) ] for j in range(1,3) ])
element[e].X[i*nh:(i+1)*nh] = ( numpy.ones((nh,1))*node[v[i][0]].x +
hammer_locations[:,0][numpy.newaxis].T*d[0] +
hammer_locations[:,1][numpy.newaxis].T*d[1] )
area[i] = numpy.cross(d[0,:],d[1,:])
# integration weights
element[e].W = (numpy.array([area]).T*hammer_weights).flatten()
# element FEM numerics matrices
if do.pre:
for e in range(0,ne):
# basis function values at the integration points
for i in range(0,max_np):
for j in range(0,max_np):
element[e].P[i][:,j] = basis(element[e].X[:,0],element[e].X[:,1],element[e],j,taylor_powers[i])
# element DG-FEM numerics matrices
if do.pre:
for e in range(0,ne):
for i in range(0,len(element[e].face)):
f = element[e].face[i]
# integration locations along the face
temp = gauss_locations[numpy.newaxis].T
x = 0.5*(1.0-temp)*node[face[f].node[0]].x + 0.5*(1.0+temp)*node[face[f].node[1]].x
# basis function values at the integration points
for j in range(0,max_np):
element[e].Q[i][:,j] = basis(x[:,0],x[:,1],element[e],j,[0,0])
# face IDG-FEM numerics matrices
for f in range(0,nf):
# adjacent element and boundaries
a = numpy.array(face[f].border)
na = len(a)
b = numpy.array(face[f].boundary,dtype=object)
nb = [ len(i) for i in b ]
if do.pre or (do.re and any(b)):
# rotation to face coordinates
R = numpy.array([[-face[f].normal[0],-face[f].normal[1]],[face[f].normal[1],-face[f].normal[0]]])
R /= numpy.sqrt(numpy.dot(face[f].normal,face[f].normal))
# face locations
x = 0.5*(1.0-gauss_locations[numpy.newaxis].T)*node[face[f].node[0]].x + 0.5*(1.0+gauss_locations[numpy.newaxis].T)*node[face[f].node[1]].x
y = face[f].centre + numpy.dot( x - face[f].centre , R.T )
w = gauss_weights
# adjacent integration locations
xa = [ element[a[i]].X for i in range(0,na) ]
ya = [ face[f].centre + numpy.dot( xa[i] - face[f].centre , R.T ) for i in range(0,na) ]
wa = numpy.append(element[a[0]].W,element[a[1]].W) if na == 2 else element[a[0]].W
for v in range(0,nv):
# face basis indices
temp = nodegrid(range(0,2*order[v]),range(0,2*order[v])) # NOTE # not sufficient for boundary faces with 2 bordering elements
face_taylor = powers_taylor[ numpy.logical_and( temp[0] + na*temp[1] < na*order[v] + nb[v] , temp[1] < order[v] ) ]
# number of interpolation unknowns
ni = len(face_taylor)
# matrices
P = numpy.zeros((na*nh,na*np[v]))
for j in range(0,np[v]):
for k in range(0,na):
P[k*nh:(1+k)*nh,j+k*np[v]] = basis(xa[k][:,0],xa[k][:,1],element[a[k]],j,[0,0])
Q = numpy.zeros((na*nh,ni))
for j in range(0,ni):
for k in range(0,na):
Q[k*nh:(k+1)*nh,j] = basis(ya[k][:,0],ya[k][:,1],face[f],face_taylor[j],[0,0])
A = dot_sequence( P.T , numpy.diag(wa) , Q )
B = dot_sequence( P.T , numpy.diag(wa) , P )
# boundary parts
if nb[v]:
dA = numpy.zeros((nb[v]*order[v],ni))
for i in range(0,nb[v]):
for j in range(0,ni):
for k in range(0,order[v]):
dA[k+i*order[v],j] = basis(
numpy.array(face[f].centre[0]),
numpy.array(face[f].centre[1]),
face[f],face_taylor[j],
[ sum(temp) for temp in zip([0,k],boundary[b[v][i]].condition) ])
dB = numpy.zeros((nb[v]*order[v],nb[v]))
for i in range(0,nb[v]): dB[i*order[v],i] = 1.0
A = numpy.append( A , dA , axis=0 )
B = numpy.append( numpy.append( B , numpy.zeros((B.shape[0],nb[v])) , axis=1 ) ,
numpy.append( numpy.zeros((nb[v]*order[v],B.shape[1])) , dB , axis=1 ) ,
axis=0 )
# solve interpolation problem
D = numpy.linalg.solve(A,B)
# interpolated values
F = numpy.zeros((ng,ni))
face[f].Q[v] = numpy.zeros((np[v],ng,D.shape[1]))
for j in range(0,np[v]):
for k in range(0,ni):
F[:,k] = basis(y[:,0],y[:,1],face[f],face_taylor[k],taylor_powers[j])
face[f].Q[v][j] = numpy.dot( F , D )
# transform differentials to x and y
T = derivative_transform_matrix(numpy.linalg.inv(R),order[v])
for j in range(0,ng): face[f].Q[v][:,j] = numpy.dot( T , face[f].Q[v][:,j] )
#------------------------------------------------------------------------------#
def initialise_unknowns():
ne = len(element)
np = [ len(x) for x in element[0].unknown ]
nv = len(order)
max_order = max(order)
max_order_sq = max_order*max_order
max_np = max(np)
for e in range(0,ne):
x = element[e].centre
delta = numpy.linspace(-0.1*element[e].size/2,0.1*element[e].size/2,max_order)
dx = [ temp.flatten() for temp in nodegrid(delta,delta) ]
p = [ taylor_powers[0:max_np,i] for i in range(0,2) ]
M = ((numpy.ones((max_np,1)) * dx[0]).T ** (numpy.ones((max_order_sq,1)) * p[0]) *
(numpy.ones((max_np,1)) * dx[1]).T ** (numpy.ones((max_order_sq,1)) * p[1]) *
(numpy.ones((max_order_sq,1)) * (scipy.misc.comb(p[0]+p[1],p[0])/scipy.misc.factorial(p[0]+p[1]))))
inv_M = numpy.linalg.pinv(M)
inv_V = numpy.linalg.inv(element[e].V)
for v in range(0,nv):
u[element[e].unknown[v]] = dot_sequence( inv_V , inv_M , initial[v](dx[0]+x[0],dx[1]+x[1]) )[0:np[v]]
#------------------------------------------------------------------------------#
def generate_system():
ne = len(element)
ng = len(gauss_weights)
nh = len(hammer_weights)
np = [ len(x) for x in element[0].unknown ]
nt = len(term)
nv = len(order)
max_np = max(np)
sum_np = sum(np)
sum_np_sq = sum_np*sum_np
# local dense jacobian
L = Struct(i=[],x=[])
# csr system jacobian
J = Struct(p=[],i=[],x=[])
J.p = numpy.zeros(u.shape[0]+1,dtype=int)
for e in range(0,ne):
temp = sum_np
for f in element[e].face: temp += sum_np*(len(face[f].border) == 2)
J.p[numpy.array(sum(element[e].unknown,[]))+1] = temp
J.p = numpy.cumsum(J.p)
J.i = numpy.zeros(J.p[-1],dtype=int)
J.x = numpy.zeros(J.p[-1])
# function vector
F = numpy.zeros(u.shape)
for e in range(0,ne):
# number of faces
nf = len(element[e].face)
# adjacent elements
adj = - numpy.ones(nf,dtype=int)
for i in range(0,nf):
temp = numpy.array(face[element[e].face[i]].border)
temp = temp[temp != e]
if len(temp): adj[i] = temp[0]
n_adj = sum(adj >= 0)
i_adj = numpy.arange(0,nf)[adj >= 0]
# local matrices to add to the system
L.i = numpy.zeros((sum_np,(1+n_adj)*sum_np),dtype=int)
L.i[:,0:sum_np] = numpy.tile( sum(element[e].unknown,[]) , (sum_np,1) )
for i in range(0,n_adj): L.i[:,(i+1)*sum_np:(i+2)*sum_np] = numpy.tile( sum(element[adj[i_adj[i]]].unknown,[]) , (sum_np,1) )
L.x = numpy.zeros(L.i.shape)
# indices into the local matrices
index_e = [ numpy.arange(sum(np[:v]),sum(np[:v+1]))[numpy.newaxis] for v in range(0,nv) ]
index_a = [ [] for i in range(0,nf) ]
for i in range(0,n_adj):
index_a[i_adj[i]] = [ numpy.array([
range(sum(np[:v]),sum(np[:v+1])) +
range((i+1)*sum_np+sum(np[:v]),(i+1)*sum_np+sum(np[:v+1])) ])
for v in range(0,nv) ]
# loop over terms
for t in range(0,nt):
# numbers of variables in the term product sequence
ns = len(term[t].variable)
# direction index
direction = powers_taylor[int(term[t].direction == 'x'),int(term[t].direction == 'y')]
# powers
P = numpy.array(term[t].power)[numpy.newaxis].T
# equation matrix
A = - term[t].constant * dot_sequence( element[e].P[direction][:,0:np[term[t].equation]].T , numpy.diag(element[e].W) )
# calculate the coefficients and values
B = [ [] for s in range(0,ns) ]
X = numpy.zeros((ns,nh))
for s,v in zip(range(0,ns),term[t].variable):
B[s] = element[e].P[powers_taylor[term[t].differential[s]]][:,0:np[v]]
X[s,:] = numpy.dot( B[s] , u[element[e].unknown[v]] )
# add to the local jacobian
Y = X ** P
for s,v in zip(range(0,ns),term[t].variable):
temp = numpy.copy(Y)
temp[s,:] = P[s] * X[s,:] ** (P[s]-1)
L.x[index_e[term[t].equation].T,index_e[v]] += dot_sequence( A , numpy.diag(numpy.prod(temp,axis=0)) , B[s] )
# add to the function vector
F[element[e].unknown[term[t].equation]] += numpy.dot( A , numpy.prod(Y,axis=0) )
# continue if not a flux term
if term[t].direction != 'x' and term[t].direction != 'y': continue
# face components
for i in range(0,nf):
f = element[e].face[i]
a = adj[i]
b = numpy.array(face[f].boundary,dtype=object)
# face normal
normal = element[e].orientation[i] * numpy.array(face[f].normal)
# corresponding face index
if a >= 0: j = numpy.arange(0,len(element[a].face))[numpy.array(element[a].face) == f]
# wind
if a >= 0 and ('u' in term[t].method):
ui = [ dot_sequence( gauss_weights , element[e].Q[i][:,0:np[v]] , u[element[e].unknown[v]] ) for v in range(0,nv) ]
uo = [ dot_sequence( gauss_weights , element[a].Q[j][:,0:np[v]] , u[element[a].unknown[v]] ) for v in range(0,nv) ]
w = wind( normal , ui , uo )
else:
w = True
# equation matrix
A = normal[term[t].direction == 'y'] * term[t].constant * dot_sequence(
element[e].Q[i][:,0:np[term[t].equation]].T , numpy.diag(0.5*gauss_weights) )
# calculate the coefficients and values
B = [ [] for s in range(0,ns) ]
X = numpy.zeros((ns,ng))
for s,v in zip(range(0,ns),term[t].variable):
# where there is an adjacent element
if a >= 0:
# interpolated flux
if term[t].method[s] == 'i' or len(b[v]):
if face[f].border[0] == e: temp = numpy.array(range(0,2*np[v]))
else: temp = numpy.array(range(np[v],2*np[v])+range(0,np[v]))
B[s] = face[f].Q[v][powers_taylor[term[t].differential[s]]][:,temp]
# averaged flux
elif term[t].method[s] == 'a':
B[s] = 0.5*numpy.append(element[e].Q[i][:,0:np[v]],element[a].Q[j][:,0:np[v]],axis=1)
# upwind flux
elif term[t].method[s] == 'u':
B[s] = numpy.zeros((ng,2*np[v]))
if w: B[s][:,0:np[v]] += element[e].Q[i][:,0:np[v]]
else: B[s][:,np[v]:2*np[v]] += element[a].Q[j][:,0:np[v]]
# values
X[s,:] = numpy.dot( B[s] , numpy.append(u[element[e].unknown[v]],u[element[a].unknown[v]]) )
# interpolated flux where there is no adjacent element
else:
B[s] = face[f].Q[v][powers_taylor[term[t].differential[s]]][:,0:np[v]]
X[s,:] = numpy.dot( B[s] , u[element[e].unknown[v]] )
# interpolated flux at boundaries
if len(b[v]):
for k in range(0,len(b[v])):
X[s,:] += boundary[b[v][k]].value * face[f].Q[v][powers_taylor[term[t].differential[s]]][:,(1+(a>=0))*np[v]+k]
# add to the local jacobian
Y = X ** P
for s,v in zip(range(0,ns),term[t].variable):
temp = numpy.copy(Y)
temp[s,:] = P[s] * X[s,:] ** (P[s]-1)
L.x[index_e[term[t].equation].T,index_a[i][v] if a >= 0 else index_e[v]] += dot_sequence(
A , numpy.diag(numpy.prod(temp,axis=0)) , B[s] )
# add to the function vector
F[element[e].unknown[term[t].equation]] += numpy.dot( A , numpy.prod(Y,axis=0) )
# add dense local jacobian to csr global jacobian
index = sum( nodegrid( J.p[sum(element[e].unknown,[])] , numpy.arange(0,L.i.shape[1]) ) ).flatten()
J.i[index] = L.i.flatten()
J.x[index] = L.x.flatten()
# return the global system
return [ scipy.sparse.csr_matrix((J.x,J.i,J.p)) , F ]
#------------------------------------------------------------------------------#
def write_display_file(display_filename,n):
nv = len(order)
np = numpy.array([ len(x) for x in element[0].unknown ])
Q = numpy.linalg.inv(numpy.array([[1,-1,-1,1],[1,1,-1,-1],[1,1,1,1],[1,-1,1,-1]]))
file = open(display_filename,'w')
for e in range(0,len(element)):
s,t = element_sequential_indices(e,element,face)
for i in range(0,len(element[e].face)):
quad = numpy.array( [ element[e].centre ,
face[element[e].face[s[i-1]]].centre ,
node[face[element[e].face[s[i]]].node[t[i]]].x ,
face[element[e].face[s[i]]].centre ] )
a = numpy.dot(Q,quad)
mesh = numpy.append( numpy.mgrid[0:n+1,0:n+1]*(2.0/n)-1.0 , numpy.zeros((nv,n+1,n+1)) , axis=0 )
mesh[0:2] = [ a[0,j] + a[1,j]*mesh[0] + a[2,j]*mesh[1] + a[3,j]*mesh[0]*mesh[1] for j in range(0,2) ]
for j in range(0,max(np)):
phi = basis(mesh[0],mesh[1],element[e],j,[0,0])
for v in numpy.arange(0,nv)[j < np]:
mesh[2+v] += u[element[e].unknown[v][j]]*phi
file.write( '\n\n'.join([ '\n'.join([ ' '.join(['%e']*(2+nv)) % tuple(mesh[:,i,j]) for j in range(0,n+1) ]) for i in range(0,n+1) ]) + '\n\n\n' )
file.close()
#------------------------------------------------------------------------------#
def write_data_file(data_filename):
file = open(data_filename,'wb')
pickle.dump({'node':node,'face':face,'element':element,'boundary':boundary,'order':order,'u':u},file,protocol=pickle.HIGHEST_PROTOCOL)
file.close()
################################################################################
path = sys.argv[1]
action = sys.argv[2].lower()
directory = os.path.dirname(path)
name = os.path.basename(path)
input_filename = directory + os.sep + name + '.input'
data_filename = directory + os.sep + name + '.data'
display_filename = directory + os.sep + name + '.display'
do = Struct(pre = 'p' in action , re = 'r' in action , init = 'i' in action , solve = 's' in action , display = 'd' in action )
#------------------------------------------------------------------------------#
if not do.pre:
with Timer('reading data from "%s"' % data_filename):
node,face,element,boundary,u,order = read_data_file(data_filename)
with Timer('reading input from "%s"' % input_filename):
input_data = read_input_file(input_filename)
if do.pre:
geometry_filename = directory + os.sep + input_data[0]
order = input_data[1]
if do.pre or do.re:
boundary = input_data[2]
if do.init:
initial = input_data[3]
if do.solve:
for i in range(0,len(boundary)): boundary[i].value = input_data[2][i].value
term = input_data[4]
wind = input_data[5]
iterations = input_data[6]
if do.display:
mesh_size = input_data[7]
with Timer('generating constants'):
(gauss_locations,gauss_weights,
hammer_locations,hammer_weights,
taylor_coefficients,taylor_powers,powers_taylor,
factorial) = generate_constants(order)
if do.pre:
with Timer('reading and processing geometry from "%s"' % geometry_filename):
node,face,element = read_geometry(geometry_filename)
with Timer('generating unknowns'):
u = generate_unknowns()
if do.pre or do.re:
with Timer('assigning boundaries to faces'):
assign_boundaries()
with Timer('calculating element matrices'):
calculate_element_matrices()
if do.init:
with Timer('initialising the unknowns'):
initialise_unknowns()
if do.solve:
with Timer('iterating',True):
index = [ numpy.zeros(u.shape,dtype=bool) for v in range(0,len(order)) ]
for e in range(0,len(element)):
for v in range(0,len(order)):
index[v][element[e].unknown[v]] = True
for i in range(0,iterations):
J,f = generate_system()
print ' ' + ' '.join([ '%.4e' % numpy.max(numpy.abs(f[i])) for i in index ])
u += scipy.sparse.linalg.spsolve(J,-f)
if do.display:
with Timer('saving display to "%s"' % display_filename):
write_display_file(display_filename,mesh_size)
if do.pre or do.re or do.init or do.solve:
with Timer('saving data to "%s"' % data_filename):
write_data_file(data_filename)
################################################################################
|
StarcoderdataPython
|
4954295
|
<reponame>max-farver/rl-bot-hack-kstate
from util.orientation import Orientation, relative_location
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.messages.flat.QuickChatSelection import QuickChatSelection
from rlbot.utils.structures.game_data_struct import GameTickPacket
from util.ball_prediction_analysis import find_slice_at_time
from util.boost_pad_tracker import BoostPadTracker
from util.drive import steer_toward_target
from util.sequence import Sequence, ControlStep
from util.vec import Vec3
from collections import deque
import math
spawn_locations = [
(-2047, -2559),
(2047, -2559),
(-2047, 2559),
(2047, 2559),
(-255, -3840),
(255, -3840),
(0, -4608),
(-255, 3840),
(255, 3840),
(0, 4608)
]
left_orange_post = Vec3(800, 5213, 228.5)
right_orange_post = Vec3(-800, 5123, 228.5)
right_blue_post = Vec3(800, -5213, 228.5)
left_blue_post = Vec3(-800, -5123, 228.5)
class MyBot(BaseAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
self.active_sequence: Sequence = None
self.boost_pad_tracker = BoostPadTracker()
self.mechanic_queue: deque = deque()
self.kickoff_mechanic_loaded: bool = False
self.demoing: bool = False
def initialize_agent(self):
# Set up information about the boost pads now that the game is active and the info is available
self.boost_pad_tracker.initialize_boosts(self.get_field_info())
def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
"""
This function will be called by the framework many times per second. This is where you can
see the motion of the ball, etc. and return controls to drive your car.
"""
# Keep our boost pad info updated with which pads are currently active
self.boost_pad_tracker.update_boost_status(packet)
# This is good to keep at the beginning of get_output. It will allow you to continue
# any sequences that you may have started during a previous call to get_output.
if self.active_sequence and not self.active_sequence.done:
controls = self.active_sequence.tick(packet)
if controls is not None:
return controls
elif len(self.mechanic_queue) > 0:
return self.next_in_mechanic(packet)
# Gather some information about our car and the ball
my_car = packet.game_cars[self.index]
car_location = Vec3(my_car.physics.location)
car_velocity = Vec3(my_car.physics.velocity)
car_orientation = Orientation(my_car.physics.rotation)
car_path = car_velocity*.1 + car_location
ball_location = Vec3(packet.game_ball.physics.location)
enemy_car = packet.game_cars[1-self.index]
enemy_location = Vec3(enemy_car.physics.location)
enemy_path = Vec3(enemy_car.physics.velocity)*.1 + \
Vec3(enemy_car.physics.location)
enemy_velocity = Vec3(enemy_car.physics.velocity)
bot_to_ball = relative_location(
car_location, car_orientation, ball_location)
bot_to_enemy = relative_location(
car_location, car_orientation, enemy_location)
# Shooting info
car_to_ball_direction = (ball_location - car_location).normalized()
ball_to_left_post_direction = ()
# Kickoff mechanic
if packet.game_info.is_kickoff_pause and len(self.mechanic_queue) == 0 and not self.kickoff_mechanic_loaded:
return self.kickoff(packet)
if ball_location.x != 0 or ball_location.y != 0:
self.kickoff_mechanic_loaded = False
if bot_to_ball.length() < bot_to_enemy.length() - 1500:
controls = SimpleControllerState()
controls.steer = steer_toward_target(my_car, ball_location)
controls.throttle = 1.0
return controls
# get boost if low
if my_car.boost < 75 and not self.demoing:
nearest_boost_location = self.get_nearest_boost(
packet=packet, car_location=car_location)
controls = SimpleControllerState()
controls.steer = steer_toward_target(
my_car, Vec3(nearest_boost_location))
controls.throttle = 1.0
return controls
# start demoing
if my_car.boost > 75:
self.demoing = True
# demo logic
if self.demoing:
if not my_car.is_super_sonic and my_car.boost < 5:
self.demoing = False
elif not my_car.is_super_sonic:
self.renderer.draw_string_3d(
car_location, 1, 1, "Demoing", self.renderer.red())
controls = SimpleControllerState()
if bot_to_enemy.length() > 500:
controls.steer = steer_toward_target(
my_car, enemy_location+enemy_velocity*.2)
else:
controls.steer = steer_toward_target(
my_car, enemy_location)
controls.throttle = 1.0
controls.boost = True
# jump if enemy jumps
if (enemy_location - car_location).length() < 900 and Vec3(enemy_car.physics.location).z > car_location.z:
print('jump')
controls.jump = True
if (enemy_location - car_location).length() < 800:
car_path = car_velocity*.1 + car_location
# if relative_location(car_path, car_orientation, enemy_path).y > 50:
# return self.side_dodge(packet=packet, enemy_location=enemy_location, direction=False)
# if relative_location(car_path, car_orientation, enemy_path).y < -50:
# return self.side_dodge(packet=packet, enemy_location=enemy_location, direction=True)
return controls
else:
self.renderer.draw_string_3d(
car_location, 1, 1, "Demoing", self.renderer.red())
controls = SimpleControllerState()
if bot_to_enemy.length() > 800:
controls.steer = steer_toward_target(
my_car, enemy_location+enemy_velocity*.2)
else:
controls.steer = steer_toward_target(
my_car, enemy_location)
controls.throttle = 1.0
controls.boost = True
# jump if enemy jumps
if (enemy_location - car_location).length() < 900 and Vec3(enemy_car.physics.location).z > car_location.z:
print('jump')
controls.jump = True
if (enemy_location - car_location).length() < 800:
if relative_location(car_path, car_orientation, enemy_path).y > 50:
return self.side_dodge(packet=packet, enemy_location=enemy_location, direction=False)
if relative_location(car_path, car_orientation, enemy_path).y < -50:
return self.side_dodge(packet=packet, enemy_location=enemy_location, direction=True)
return controls
def get_nearest_boost(self, packet: GameTickPacket, car_location):
info = self.get_field_info()
nearest_boost_loc = None
# loop over all the boosts
for i, boost in enumerate(info.boost_pads):
# only want large boosts that haven't been taken
if boost.is_full_boost and packet.game_boosts[i].is_active:
# if we haven't found any boosts yet, use this one
if not nearest_boost_loc:
nearest_boost_loc = boost.location
else:
# if this boost is closer, save that
if car_location.dist(Vec3(boost.location)) < car_location.dist(Vec3(nearest_boost_loc)):
nearest_boost_loc = boost.location
# if no large boosts are found, find the nearest small boost
# CODE SMELL: very similar duplicate code, looping over boost list twice
if nearest_boost_loc is None:
for i, boost in enumerate(info.boost_pads):
# only want large boosts that haven't been taken
if packet.game_boosts[i].is_active:
# if we haven't found any boosts yet, use this one
if not nearest_boost_loc:
nearest_boost_loc = boost.location
else:
# if this boost is closer, save that
if car_location.dist(Vec3(boost.location)) < car_location.dist(Vec3(nearest_boost_loc)):
nearest_boost_loc = boost.location
# if no boosts are available, target the center of the field
if nearest_boost_loc is None:
nearest_boost_loc = Vec3(0, 0, 0)
# a different possible optimization we could make would be to look at the
# packet.game_boosts[i].timer to find boosts that will respawn before our car arrives there
return Vec3(nearest_boost_loc)
def begin_front_flip(self, packet):
# Send some quickchat just for fun
self.send_quick_chat(
team_only=False, quick_chat=QuickChatSelection.Information_IGotIt)
# Do a front flip. We will be committed to this for a few seconds and the bot will ignore other
# logic during that time because we are setting the active_sequence.
self.active_sequence = Sequence([
ControlStep(duration=0.05,
controls=SimpleControllerState(jump=True)),
ControlStep(duration=0.05,
controls=SimpleControllerState(jump=False)),
ControlStep(duration=0.2, controls=SimpleControllerState(
jump=True, pitch=-1)),
ControlStep(duration=0.8, controls=SimpleControllerState()),
])
# Return the controls associated with the beginning of the sequence so we can start right away.
return self.active_sequence.tick(packet)
def kickoff(self, packet):
my_car = packet.game_cars[self.index]
packet.game_cars
car_location = Vec3(my_car.physics.location)
kickoff_location = Vec3(0, 0, 0)
self.kickoff_mechanic_loaded = True
controls = SimpleControllerState()
controls.steer = steer_toward_target(my_car, kickoff_location)
controls.throttle = 1.0
controls.boost = True
print('Kickoff has been called')
self.mechanic_queue = deque()
is_diagonal = False
for location in spawn_locations[:4]:
if int(location[0]) == int(car_location.x) and int(location[1]) == int(car_location.y):
is_diagonal = True
if is_diagonal:
print('This is a diagonal kickoff location')
self.active_sequence = Sequence([
ControlStep(duration=.1,
controls=controls),
])
self.mechanic_queue.append(Sequence([
ControlStep(duration=0.02,
controls=SimpleControllerState(jump=True, boost=False)),
ControlStep(duration=0.02,
controls=SimpleControllerState(jump=False)),
ControlStep(duration=0.2, controls=SimpleControllerState(
jump=True, pitch=-1)),
ControlStep(
duration=0.8, controls=SimpleControllerState(throttle=1)),
]))
self.mechanic_queue.append(Sequence([
ControlStep(duration=.01,
controls=controls),
]))
self.mechanic_queue.append(Sequence([
ControlStep(duration=1,
controls=SimpleControllerState(boost=True, throttle=1)),
]))
self.mechanic_queue.append(Sequence([
ControlStep(duration=0.02,
controls=SimpleControllerState(jump=True, boost=False)),
ControlStep(duration=0.02,
controls=SimpleControllerState(jump=False)),
ControlStep(duration=0.2, controls=SimpleControllerState(
jump=True, pitch=-1)),
ControlStep(
duration=0.8, controls=SimpleControllerState(throttle=1)),
]))
else:
self.active_sequence = Sequence([
ControlStep(duration=.1,
controls=controls),
])
self.mechanic_queue.append(Sequence([
ControlStep(duration=.1,
controls=SimpleControllerState(boost=True, throttle=1)),
]))
self.mechanic_queue.append(Sequence([
ControlStep(duration=0.01,
controls=SimpleControllerState(jump=True, boost=False)),
ControlStep(duration=0.02,
controls=SimpleControllerState(jump=False)),
ControlStep(duration=0.2, controls=SimpleControllerState(
jump=True, pitch=-1)),
ControlStep(
duration=0.8, controls=SimpleControllerState(throttle=1)),
]))
self.mechanic_queue.append(Sequence([
ControlStep(duration=.02,
controls=controls),
]))
self.mechanic_queue.append(Sequence([
ControlStep(duration=1.4,
controls=SimpleControllerState(boost=True, throttle=1)),
]))
self.mechanic_queue.append(Sequence([
ControlStep(duration=0.02,
controls=SimpleControllerState(jump=True, boost=False)),
ControlStep(duration=0.02,
controls=SimpleControllerState(jump=False)),
ControlStep(duration=0.2, controls=SimpleControllerState(
jump=True, pitch=-1)),
ControlStep(
duration=0.8, controls=SimpleControllerState(throttle=1)),
]))
# Return the controls associated with the beginning of the sequence so we can start right away.
return self.active_sequence.tick(packet)
def next_in_mechanic(self, packet):
self.active_sequence = self.mechanic_queue.popleft()
return self.active_sequence.tick(packet)
# returns the amount of radians to turn from source to target
def side_dodge(self, packet: GameTickPacket, enemy_location: Vec3, direction: bool):
my_car = packet.game_cars[self.index]
packet.game_cars
car_location = Vec3(my_car.physics.location)
controls = SimpleControllerState()
controls.steer = steer_toward_target(my_car, enemy_location)
controls.throttle = 1.0
controls.boost = True
# True means left
if direction:
self.active_sequence = Sequence([
ControlStep(duration=0.02,
controls=SimpleControllerState(jump=True, boost=True)),
ControlStep(duration=0.02,
controls=SimpleControllerState(jump=False, boost=True)),
ControlStep(duration=0.2, controls=SimpleControllerState(
jump=True, roll=-1, boost=True)),
ControlStep(
duration=0.8, controls=SimpleControllerState(throttle=1, boost=True)),
])
else:
self.active_sequence = Sequence([
ControlStep(duration=0.02,
controls=SimpleControllerState(jump=True, boost=True)),
ControlStep(duration=0.02,
controls=SimpleControllerState(jump=False, boost=True)),
ControlStep(duration=0.2, controls=SimpleControllerState(
jump=True, roll=1, boost=True)),
ControlStep(
duration=0.8, controls=SimpleControllerState(throttle=1, boost=True)),
])
return self.active_sequence.tick(packet)
|
StarcoderdataPython
|
11296038
|
#!/usr/bin/env python
import unittest
from chirp.common import timestamp
from chirp.library import constants
from chirp.library import ufid
class UFIDTest(unittest.TestCase):
def test_basic(self):
test_vol = 11
test_ts_human = "20090102-030405"
test_ts = timestamp.parse_human_readable(test_ts_human)
test_fp = "1234" * 10
# The UFID prefix should contain the volume and timestamp info.
self.assertEqual("vol0b/%s/" % test_ts_human, # 0b = 11
ufid.ufid_prefix(test_vol, test_ts))
# The UFID should equal the UFID prefix + the fingerprint.
test_ufid = ufid.ufid(test_vol, test_ts, test_fp)
self.assertEqual(ufid.ufid_prefix(test_vol, test_ts) + test_fp,
test_ufid)
# We should be able to make a tag too.
test_tag = ufid.ufid_tag(test_vol, test_ts, test_fp)
self.assertEqual("UFID", test_tag.FrameID)
self.assertEqual(constants.UFID_OWNER_IDENTIFIER, test_tag.owner)
self.assertEqual(test_ufid, test_tag.data)
# Make sure we can parse information back out of the test UFID.
vol, ts, fp = ufid.parse(test_ufid)
self.assertEqual(test_vol, vol)
self.assertEqual(test_ts, ts)
self.assertEqual(test_fp, fp)
# Raise ValueError if we try to parse a bad UFID.
self.assertRaises(ValueError, ufid.parse, "bad")
self.assertRaises(ValueError, ufid.parse,
"vol01/20091399-666666/" + "1"*40)
self.assertRaises(ValueError, ufid.parse,
"vol01/20991001-123456" + "1"*40)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
11259508
|
<reponame>jschmer/rxv<filename>rxv/rxv.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import copy
import logging
import re
import time
import warnings
import xml
from collections import namedtuple
from math import floor
import requests
from defusedxml import cElementTree
from .exceptions import (MenuUnavailable, Timeout, PlaybackUnavailable,
ResponseException, UnknownPort)
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
logger = logging.getLogger('rxv')
class PlaybackSupport:
"""Container for Playback support.
This stores a set of booleans so that they are easy to turn into
whatever format the support needs to be specified at a higher
level.
"""
def __init__(self, play=False, stop=False, pause=False,
skip_f=False, skip_r=False):
self.play = play
self.stop = stop
self.pause = pause
self.skip_f = skip_f
self.skip_r = skip_r
BasicStatus = namedtuple("BasicStatus", "on volume mute input")
PlayStatus = namedtuple("PlayStatus", "playing artist album song station")
CurrentList = namedtuple("CurrentList", "all containers items unplayables unselectables")
MenuStatus = namedtuple("MenuStatus", "ready layer name current_line max_line current_list")
GetParam = 'GetParam'
YamahaCommand = '<YAMAHA_AV cmd="{command}">{payload}</YAMAHA_AV>'
Zone = '<{zone}>{request_text}</{zone}>'
BasicStatusGet = '<Basic_Status>GetParam</Basic_Status>'
PowerControl = '<Power_Control><Power>{state}</Power></Power_Control>'
PowerControlSleep = '<Power_Control><Sleep>{sleep_value}</Sleep></Power_Control>'
Input = '<Input><Input_Sel>{input_name}</Input_Sel></Input>'
InputSelItem = '<Input><Input_Sel_Item>{input_name}</Input_Sel_Item></Input>'
ConfigGet = '<{src_name}><Config>GetParam</Config></{src_name}>'
PlayGet = '<{src_name}><Play_Info>GetParam</Play_Info></{src_name}>'
PlayControl = '<{src_name}><Play_Control><Playback>{action}</Playback></Play_Control></{src_name}>'
ListGet = '<{src_name}><List_Info>GetParam</List_Info></{src_name}>'
ListControlJumpLine = '<{src_name}><List_Control><Jump_Line>{lineno}</Jump_Line>' \
'</List_Control></{src_name}>'
ListControlCursor = '<{src_name}><List_Control><Cursor>{action}</Cursor>'\
'</List_Control></{src_name}>'
VolumeLevel = '<Volume><Lvl>{value}</Lvl></Volume>'
VolumeLevelValue = '<Val>{val}</Val><Exp>{exp}</Exp><Unit>{unit}</Unit>'
VolumeMute = '<Volume><Mute>{state}</Mute></Volume>'
SelectNetRadioLine = '<NET_RADIO><List_Control><Direct_Sel>Line_{lineno}'\
'</Direct_Sel></List_Control></NET_RADIO>'
HdmiOut = '<System><Sound_Video><HDMI><Output><OUT_{port}>{command}</OUT_{port}>'\
'</Output></HDMI></Sound_Video></System>'
AvailableScenes = '<Config>GetParam</Config>'
Scene = '<Scene><Scene_Sel>{parameter}</Scene_Sel></Scene>'
SurroundProgram = '<Surround><Program_Sel><Current>{parameter}</Current></Program_Sel></Surround>'
DirectMode = '<Sound_Video><Direct>{parameter}</Direct></Sound_Video>'
# String constants
STRAIGHT = "Straight"
DIRECT = "Direct"
# PlayStatus options
ARTIST_OPTIONS = ["Artist", "Program_Type"]
ALBUM_OPTIONS = ["Album", "Radio_Text_A"]
SONG_OPTIONS = ["Song", "Track", "Radio_Text_B"]
STATION_OPTIONS = ["Station", "Program_Service"]
class RXV(object):
def __init__(self, ctrl_url, model_name="Unknown",
zone="Main_Zone", friendly_name='Unknown',
unit_desc_url=None):
if re.match(r"\d{1,3}\.\d{1,3}\.\d{1,3}.\d{1,3}", ctrl_url):
# backward compatibility: accept ip address as a contorl url
warnings.warn("Using IP address as a Control URL is deprecated")
ctrl_url = 'http://%s/YamahaRemoteControl/ctrl' % ctrl_url
self.ctrl_url = ctrl_url
self.unit_desc_url = unit_desc_url or re.sub('ctrl$', 'desc.xml', ctrl_url)
self.model_name = model_name
self.friendly_name = friendly_name
self._inputs_cache = None
self._zones_cache = None
self._zone = zone
self._surround_programs_cache = None
self._scenes_cache = None
self._session = requests.Session()
self._discover_features()
def _discover_features(self):
"""Pull and parse the desc.xml so we can query it later."""
try:
logger.debug("REQ: GET | {}".format(self.unit_desc_url))
desc_xml = self._session.get(self.unit_desc_url).content
logger.debug("RES: GET | {} | {}".format(self.unit_desc_url, desc_xml))
if not desc_xml:
logger.error(
"Unsupported Yamaha device? Failed to fetch {}".format(
self.unit_desc_url
))
return
self._desc_xml = cElementTree.fromstring(desc_xml)
except xml.etree.ElementTree.ParseError:
logger.exception("Invalid XML returned for request %s: %s",
self.unit_desc_url, desc_xml)
raise
except Exception:
logger.exception("Failed to fetch %s" % self.unit_desc_url)
raise
def __unicode__(self):
return ('<{cls} model_name="{model}" zone="{zone}" '
'ctrl_url="{ctrl_url}" at {addr}>'.format(
cls=self.__class__.__name__,
zone=self._zone,
model=self.model_name,
ctrl_url=self.ctrl_url,
addr=hex(id(self))
))
def __str__(self):
return self.__unicode__()
def __repr__(self):
return self.__unicode__()
def _request(self, command, request_text, zone_cmd=True):
if zone_cmd:
payload = Zone.format(request_text=request_text, zone=self._zone)
else:
payload = request_text
request_text = YamahaCommand.format(command=command, payload=payload)
try:
logger.debug("REQ: POST | {} | {}".format(self.ctrl_url, request_text))
res = self._session.post(
self.ctrl_url,
data=request_text,
headers={"Content-Type": "text/xml"}
)
logger.debug("RES: POST | {} | {}".format(self.ctrl_url, res.content))
# releases connection to the pool
response = cElementTree.XML(res.content)
if response.get("RC") != "0":
logger.error("Request %s failed with %s",
request_text, res.content)
raise ResponseException(res.content)
return response
except xml.etree.ElementTree.ParseError:
logger.exception("Invalid XML returned for request %s: %s",
request_text, res.content)
raise
@property
def basic_status(self):
response = self._request('GET', BasicStatusGet)
on = response.find("%s/Basic_Status/Power_Control/Power" % self.zone).text
inp = response.find("%s/Basic_Status/Input/Input_Sel" % self.zone).text
mute = response.find("%s/Basic_Status/Volume/Mute" % self.zone).text
volume = response.find("%s/Basic_Status/Volume/Lvl/Val" % self.zone).text
volume = int(volume) / 10.0
status = BasicStatus(on, volume, mute, inp)
return status
@property
def on(self):
request_text = PowerControl.format(state=GetParam)
response = self._request('GET', request_text)
power = response.find("%s/Power_Control/Power" % self._zone).text
assert power in ["On", "Standby"]
return power == "On"
@on.setter
def on(self, state):
assert state in [True, False]
new_state = "On" if state else "Standby"
request_text = PowerControl.format(state=new_state)
response = self._request('PUT', request_text)
return response
def get_playback_support(self, input_source=None):
"""Get playback support as bit vector.
In order to expose features correctly in Home Assistant, we
need to make it possible to understand what play operations a
source supports. This builds us a Home Assistant compatible
bit vector from the desc.xml for the specified source.
"""
if input_source is None:
input_source = self.input
src_name = self._src_name(input_source)
return PlaybackSupport(
play=self.supports_play_method(src_name, 'Play'),
pause=self.supports_play_method(src_name, 'Pause'),
stop=self.supports_play_method(src_name, 'Stop'),
skip_f=self.supports_play_method(src_name, 'Skip Fwd'),
skip_r=self.supports_play_method(src_name, 'Skip Rev'))
def is_playback_supported(self, input_source=None):
if input_source is None:
input_source = self.input
support = self.get_playback_support(input_source)
return support.play
def play(self):
self._playback_control('Play')
def pause(self):
self._playback_control('Pause')
def stop(self):
self._playback_control('Stop')
def next(self):
self._playback_control('Skip Fwd')
def previous(self):
self._playback_control('Skip Rev')
def _playback_control(self, action):
# Cache current input to "save" one HTTP-request
input_source = self.input
if not self.is_playback_supported(input_source):
raise PlaybackUnavailable(input_source, action)
src_name = self._src_name(input_source)
if not src_name:
return None
request_text = PlayControl.format(src_name=src_name, action=action)
response = self._request('PUT', request_text, zone_cmd=False)
return response
@property
def input(self):
request_text = Input.format(input_name=GetParam)
response = self._request('GET', request_text)
return response.find("%s/Input/Input_Sel" % self.zone).text
@input.setter
def input(self, input_name):
assert input_name in self.inputs()
request_text = Input.format(input_name=input_name)
self._request('PUT', request_text)
def inputs(self):
if not self._inputs_cache:
request_text = InputSelItem.format(input_name=GetParam)
res = self._request('GET', request_text)
self._inputs_cache = dict(zip((elt.text
for elt in res.iter('Param')),
(elt.text
for elt in res.iter("Src_Name"))))
return self._inputs_cache
@property
def outputs(self):
outputs = {}
for cmd in self._find_commands('System,Sound_Video,HDMI,Output'):
# An output typically looks like this:
# System,Sound_Video,HDMI,Output,OUT_1
# Extract the index number at the end as it is needed when
# requesting its current state.
m = re.match(r'.*_(\d+)$', cmd)
if m is None:
continue
port_number = m.group(1)
request = HdmiOut.format(port=port_number, command='GetParam')
response = self._request('GET', request, zone_cmd=False)
port_state = response.find(cmd.replace(',', '/')).text.lower()
outputs['hdmi' + str(port_number)] = port_state
return outputs
def enable_output(self, port, enabled):
m = re.match(r'hdmi(\d+)', port.lower())
if m is None:
raise UnknownPort(port)
request = HdmiOut.format(port=m.group(1),
command='On' if enabled else 'Off')
self._request('PUT', request, zone_cmd=False)
def _find_commands(self, cmd_name):
for cmd in self._desc_xml.findall('.//Cmd_List/Define'):
if cmd.text.startswith(cmd_name):
yield cmd.text
@property
def direct_mode(self):
assert DIRECT in self.surround_programs()
request_text = DirectMode.format(parameter="<Mode>GetParam</Mode>")
response = self._request('GET', request_text)
direct = response.find(
"%s/Sound_Video/Direct/Mode" % self.zone
).text == "On"
return direct
@direct_mode.setter
def direct_mode(self, mode):
assert DIRECT in self.surround_programs()
if mode:
request_text = DirectMode.format(parameter="<Mode>On</Mode>")
else:
request_text = DirectMode.format(parameter="<Mode>Off</Mode>")
self._request('PUT', request_text)
@property
def surround_program(self):
"""
Get current selected surround program.
If a STRAIGHT or DIRECT mode is supported and active, returns that mode.
Otherwise returns the currently active surround program.
"""
request_text = SurroundProgram.format(parameter=GetParam)
response = self._request('GET', request_text)
straight = response.find(
"%s/Surround/Program_Sel/Current/Straight" % self.zone
).text == "On"
program = response.find(
"%s/Surround/Program_Sel/Current/Sound_Program" % self.zone
).text
if self.direct_mode:
return DIRECT
elif straight:
return STRAIGHT
else:
return program
@surround_program.setter
def surround_program(self, surround_name):
assert surround_name in self.surround_programs()
if surround_name == DIRECT:
self.direct_mode = True
else:
if self.direct_mode:
# Disable direct mode before changing any other settings,
# otherwise they don't have an effect
self.direct_mode = False
if surround_name == STRAIGHT:
parameter = "<Straight>On</Straight>"
else:
parameter = "<Sound_Program>{parameter}</Sound_Program>".format(
parameter=surround_name
)
request_text = SurroundProgram.format(parameter=parameter)
self._request('PUT', request_text)
def surround_programs(self):
if not self._surround_programs_cache:
source_xml = self._desc_xml.find(
'.//*[@YNC_Tag="%s"]' % self._zone
)
if source_xml is None:
return False
setup = source_xml.find('.//Menu[@Title_1="Setup"]')
if setup is None:
return False
programs = setup.find('.//*[@Title_1="Program"]/Put_2/Param_1')
if programs is None:
return False
supports = programs.findall('.//Direct')
self._surround_programs_cache = list()
for s in supports:
self._surround_programs_cache.append(s.text)
straight = setup.find('.//*[@Title_1="Straight"]/Put_1')
if straight is not None:
self._surround_programs_cache.append(STRAIGHT)
direct = setup.find('.//*[@Title_1="Direct"]/Put_1')
if direct is not None:
self._surround_programs_cache.append(DIRECT)
return self._surround_programs_cache
@property
def scene(self):
request_text = Scene.format(parameter=GetParam)
response = self._request('GET', request_text)
return response.find("%s/Scene/Scene_Sel" % self.zone).text
@scene.setter
def scene(self, scene_name):
assert scene_name in self.scenes()
scene_number = self._scenes_cache.get(scene_name)
request_text = Scene.format(parameter=scene_number)
self._request('PUT', request_text)
def scenes(self):
if not self._scenes_cache:
res = self._request('GET', AvailableScenes)
scenes = res.find('.//Scene')
if scenes is None:
return False
self._scenes_cache = {}
for scene in scenes:
self._scenes_cache[scene.text] = scene.tag.replace("_", " ")
return self._scenes_cache
@property
def zone(self):
return self._zone
@zone.setter
def zone(self, zone_name):
assert zone_name in self.zones()
self._zone = zone_name
def zones(self):
if self._zones_cache is None:
xml = self._desc_xml
self._zones_cache = [
e.get("YNC_Tag") for e in xml.findall('.//*[@Func="Subunit"]')
]
return self._zones_cache
def zone_controllers(self):
"""Return separate RXV controller for each available zone."""
controllers = []
for zone in self.zones():
zone_ctrl = copy.copy(self)
zone_ctrl.zone = zone
controllers.append(zone_ctrl)
return controllers
def supports_method(self, source, *args):
# if there was a complete xpath implementation we could do
# this all with xpath, but without it it's lots of
# iteration. This is probably not worth optimizing, these
# loops are cheep in the long run.
commands = self._desc_xml.findall('.//Cmd_List')
for c in commands:
for item in c:
parts = item.text.split(",")
if parts[0] == source and parts[1:] == list(args):
return True
return False
def supports_play_method(self, source, method):
# if there was a complete xpath implementation we could do
# this all with xpath, but without it it's lots of
# iteration. This is probably not worth optimizing, these
# loops are cheep in the long run.
source_xml = self._desc_xml.find('.//*[@YNC_Tag="%s"]' % source)
if source_xml is None:
return False
play_control = source_xml.find('.//*[@Func="Play_Control"]')
if play_control is None:
return False
# built in Element Tree does not support search by text()
supports = play_control.findall('.//Put_1')
for s in supports:
if s.text == method:
return True
return False
def _src_name(self, cur_input):
if cur_input not in self.inputs():
return None
return self.inputs()[cur_input]
def is_ready(self):
src_name = self._src_name(self.input)
if not src_name:
return True # input is instantly ready
request_text = ConfigGet.format(src_name=src_name)
config = self._request('GET', request_text, zone_cmd=False)
avail = next(config.iter('Feature_Availability'))
return avail.text == 'Ready'
@staticmethod
def safe_get(doc, names):
try:
# python 3.x
import html
except ImportError:
# python 2.7
import HTMLParser
html = HTMLParser.HTMLParser()
for name in names:
tag = doc.find(".//%s" % name)
if tag is not None and tag.text is not None:
# Tuner and Net Radio sometimes respond
# with escaped entities
return html.unescape(tag.text).strip()
return ""
def play_status(self):
src_name = self._src_name(self.input)
if not src_name:
return None
if not self.supports_method(src_name, 'Play_Info'):
return
request_text = PlayGet.format(src_name=src_name)
res = self._request('GET', request_text, zone_cmd=False)
playing = RXV.safe_get(res, ["Playback_Info"]) == "Play" \
or src_name == "Tuner"
status = PlayStatus(
playing,
artist=RXV.safe_get(res, ARTIST_OPTIONS),
album=RXV.safe_get(res, ALBUM_OPTIONS),
song=RXV.safe_get(res, SONG_OPTIONS),
station=RXV.safe_get(res, STATION_OPTIONS)
)
return status
def menu_status(self):
cur_input = self.input
src_name = self._src_name(cur_input)
if not src_name:
raise MenuUnavailable(cur_input)
request_text = ListGet.format(src_name=src_name)
res = self._request('GET', request_text, zone_cmd=False)
ready = (next(res.iter("Menu_Status")).text == "Ready")
layer = int(next(res.iter("Menu_Layer")).text)
name = next(res.iter("Menu_Name")).text
current_line = int(next(res.iter("Current_Line")).text)
max_line = int(next(res.iter("Max_Line")).text)
current_list = next(res.iter('Current_List'))
def _gather_with_attribute(predicate):
return {
elt.tag: elt.find('Txt').text
for elt in current_list
if predicate(elt.find('Attribute').text)
}
def _gather_items(attribute):
return _gather_with_attribute(lambda x: x == attribute)
def _gather_any():
return _gather_with_attribute(lambda x: True)
all = _gather_any()
containers = _gather_items('Container')
items = _gather_items('Item')
unplayables = _gather_items('Unplayable Item')
unselectables = _gather_items('Unselectable')
cl = CurrentList(all, containers, items, unplayables, unselectables)
status = MenuStatus(ready, layer, name, current_line, max_line, cl)
return status
def menu_jump_line(self, lineno):
cur_input = self.input
src_name = self._src_name(cur_input)
if not src_name:
raise MenuUnavailable(cur_input)
request_text = ListControlJumpLine.format(
src_name=src_name,
lineno=lineno
)
return self._request('PUT', request_text, zone_cmd=False)
def _menu_cursor(self, action):
cur_input = self.input
src_name = self._src_name(cur_input)
if not src_name:
raise MenuUnavailable(cur_input)
request_text = ListControlCursor.format(
src_name=src_name,
action=action
)
return self._request('PUT', request_text, zone_cmd=False)
def menu_up(self):
return self._menu_cursor("Up")
def menu_down(self):
return self._menu_cursor("Down")
def menu_left(self):
return self._menu_cursor("Left")
def menu_right(self):
return self._menu_cursor("Right")
def menu_sel(self):
return self._menu_cursor("Sel")
def menu_return(self):
return self._menu_cursor("Return")
def menu_home(self):
return self._menu_cursor("Return to Home")
@property
def volume(self):
request_text = VolumeLevel.format(value=GetParam)
response = self._request('GET', request_text)
vol = response.find('%s/Volume/Lvl/Val' % self.zone).text
return float(vol) / 10.0
@volume.setter
def volume(self, value):
"""Convert volume for setting.
We're passing around volume in standard db units, like -52.0
db. The API takes int values. However, the API also only takes
int values that corespond to half db steps (so -52.0 and -51.5
are valid, -51.8 is not).
Through the power of math doing the int of * 2, then * 5 will
ensure we only get half steps.
"""
value = str(int(value * 2) * 5)
exp = 1
unit = 'dB'
volume_val = VolumeLevelValue.format(val=value, exp=exp, unit=unit)
request_text = VolumeLevel.format(value=volume_val)
self._request('PUT', request_text)
def volume_fade(self, final_vol, sleep=0.5):
start_vol = int(floor(self.volume))
step = 1 if final_vol > start_vol else -1
final_vol += step # to make sure, we don't stop one dB before
for val in range(start_vol, final_vol, step):
self.volume = val
time.sleep(sleep)
@property
def mute(self):
request_text = VolumeMute.format(state=GetParam)
response = self._request('GET', request_text)
mute = response.find('%s/Volume/Mute' % self.zone).text
assert mute in ["On", "Off"]
return mute == "On"
@mute.setter
def mute(self, state):
assert state in [True, False]
new_state = "On" if state else "Off"
request_text = VolumeMute.format(state=new_state)
response = self._request('PUT', request_text)
return response
@staticmethod
def _wait_for(predicate):
"""Waits until the predicate returns True"""
if not predicate():
for attempt in range(10):
if predicate():
break
time.sleep(0.1)
else:
raise Timeout()
def _wait_for_menu_status(self, predicate):
"""Waits until the predicate returns True"""
self._wait_for(lambda: predicate(self.menu_status()))
def _wait_for_menu_ready(self):
"""Waits until the menu reports ready status"""
self._wait_for(lambda: self.menu_status().ready)
def _server_sel_line(self, lineno):
"""Selects the given line number in the menu"""
lineno = int(lineno)
self.menu_jump_line(lineno)
self._wait_for_menu_status(lambda status: status.ready and status.current_line == lineno)
self.menu_sel()
self._wait_for_menu_ready()
def server_paths(self):
"""
Collects all SERVER paths that can be used with server_select to play
specific content directly.
WARNING: This iterates through the menu to find all items and may be really slow!
:return: list(strings)
"""
return self._iter_menu([])
def _browse_to_target_layer(self, path_to_layer):
"""
Browse to the layer specified by path_to_layer by selecting
the respective lines of the menu starting from the ROOT.
:param path_to_layer: list(pair(#, name))
"""
self._wait_for_menu_ready()
self.menu_home()
self._wait_for_menu_status(lambda status: status.ready and status.layer == 1)
for lineno in [x[0] for x in path_to_layer]:
self.menu_jump_line(lineno)
self._wait_for_menu_status(lambda status: status.ready and status.current_line == lineno)
self.menu_sel()
self._wait_for_menu_ready()
def _iter_menu(self, path_to_layer):
"""
Iterates through the menu items starting from the topmost
layer in the given path_to_layer. Returns a list of items.
One item has a number, title and an optinal list of subitems.
The list of subitems is only present for container items.
:param path_to_layer: list(pair(#, name))
:return: list(items)
"""
# go to target layer
self._browse_to_target_layer(path_to_layer)
# list of items for the current layer, one item is either
# - a pair of (number, title) if it is not a container
# - or a triplet of (number, title, list(items)) if it is a container
items = []
while True:
_, _, layer_name, current_line, max_line, current_list = self.menu_status()
assert len(path_to_layer) == 0 or layer_name == path_to_layer[-1][1]
def effective_line_number(display_lineno):
"""Converts the displayed line number into the total line number"""
if isinstance(display_lineno, str):
if display_lineno.startswith('Line'):
display_lineno = display_lineno[5:]
display_lineno = int(display_lineno)
return current_line + display_lineno - 1
# add subitems by recursing into container items
for lineno, container_name in current_list.containers.items():
lineno = effective_line_number(lineno)
children = self._iter_menu(path_to_layer + [(lineno, container_name)])
items.extend([("{}>{}".format(container_name, child), "{}>{}".format(lineno, index)) for child, index in children])
# then add normal items (like songs)
if current_list.items.items():
items.extend([(name, effective_line_number(lineno)) for lineno, name in current_list.items.items()])
# and unplayable items (like 'buttons' or other text)
if current_list.unplayables.items():
items.extend([(name, effective_line_number(lineno)) for lineno, name in current_list.unplayables.items()])
# update the current line number to figure out if we need to
# jump to the next page
next_line = current_line + len(current_list.all)
if next_line <= max_line:
# in this case, there are more pages with items available, so
# we have to jump to the next page
if self.menu_status().name != layer_name:
# in case there were other containers we recursed into previously,
# browse back to our original layer
self._browse_to_target_layer(path_to_layer)
# jump to the next line to trigger a switch to the next page
self.menu_jump_line(next_line)
self._wait_for_menu_status(lambda status: status.ready and status.current_line == next_line)
else:
# in this case, there are no more pages so we can stop
break
return items
def _server_select_num(self, indices):
"""Selects the menu entries as given by the indices list in the order they are given"""
for index in indices:
self._server_sel_line(index)
def _server_select_name(self, layers):
"""
Selects the menu entries as given by the layers list in the order they are given.
This method tries to find the corresponding list index by iterating through the menu
pages and matching the entry names to figure out the correct one. NOTE: this may be
a rather slow process! If you know the list index of the full patch already, use a
list of indices to select the content to be played instead.
NOTE: The layers list must start from the ROOT!
:param layers: list(str) List of menu entry names
"""
for layer in layers:
while True:
menu = self.menu_status()
self._wait_for_menu_ready()
def select_layer(layer):
"""
Find and select the current layer in the menu
:return: True if layer was found and selected, False otherwise
"""
for line, value in menu.current_list.all.items():
if value == layer:
lineno = menu.current_line + int(line[5:]) - 1
self._server_sel_line(lineno)
return True
return False
if select_layer(layer):
break
else:
# layer not found, jump to next page if available
nextline = menu.current_line + len(menu.current_list.all.items())
if nextline <= menu.max_line:
self.menu_jump_line(nextline)
self._wait_for_menu_status(lambda status: status.ready and status.current_line == nextline)
else:
raise FileNotFoundError("Layer %s not found", layer)
def server_select(self, path):
"""Play the specified path in SERVER mode.
This lets you play a SERVER address in a single command. Supports name based
lookup as well as index based lookup. The index can be queried with server_select(),
which returns all available SERVER paths. NOTE: name based lookup may be slow, so
prefer the index based lookup if you can.
Examples:
server_select('AVM FRITZ!Mediaserver>Internetradio>AlternativeFM>AlternativeFM Stream 2')
server_select([1, 4, 18, 1])
NOTE: The path must be given starting from the ROOT!
This method raises a Timeout exception if the menu doesn't behave as expected.
TODO: better error handling if we some how time out
"""
self.input = "SERVER"
# go to the ROOT first
self._wait_for_menu_ready()
self.menu_home()
self._wait_for_menu_ready()
if isinstance(path, str):
layers = path.split(">")
self._server_select_name(layers)
elif isinstance(path, (list, set)):
layers = path
self._server_select_num(layers)
else:
raise NotImplementedError("Type {} is not supported".format(type(path)))
def _net_radio_direct_sel(self, lineno):
request_text = SelectNetRadioLine.format(lineno=lineno)
return self._request('PUT', request_text, zone_cmd=False)
def net_radio(self, path):
"""Play net radio at the specified path.
This lets you play a NET_RADIO address in a single command
with by encoding it with > as separators. For instance:
Bookmarks>Internet>Radio Paradise
It does this by push commands, then looping and making sure
the menu is in a ready state before we try to push the next
one. A sufficient number of iterations are allowed for to
ensure we give it time to get there.
TODO: better error handling if we some how time out
"""
layers = path.split(">")
self.input = "NET RADIO"
for attempt in range(20):
menu = self.menu_status()
if menu.ready:
for line, value in menu.current_list.all.items():
if value == layers[menu.layer - 1]:
lineno = line[5:]
self._net_radio_direct_sel(lineno)
if menu.layer == len(layers):
return
break
else:
# print("Sleeping because we are not ready yet")
time.sleep(1)
@property
def sleep(self):
request_text = PowerControlSleep.format(sleep_value=GetParam)
response = self._request('GET', request_text)
sleep = response.find("%s/Power_Control/Sleep" % self._zone).text
return sleep
@sleep.setter
def sleep(self, value):
request_text = PowerControlSleep.format(sleep_value=value)
self._request('PUT', request_text)
@property
def small_image_url(self):
host = urlparse(self.ctrl_url).hostname
return "http://{}:8080/BCO_device_sm_icon.png".format(host)
@property
def large_image_url(self):
host = urlparse(self.ctrl_url).hostname
return "http://{}:8080/BCO_device_lrg_icon.png".format(host)
|
StarcoderdataPython
|
4872225
|
import usocket as socket
except:
import socket
from time import sleep
from machine import Pin
import onewire, ds18x20
import network
import esp
esp.osdebug(None)
import gc
gc.collect()
ds_pin = Pin(22)
ds_sensor = ds18x20.DS18X20(onewire.OneWire(ds_pin))
ssid = 'Du-kommst-hier-nicht-rein'
password = '<PASSWORD>'
station = network.WLAN(network.STA_IF)
station.active(True)
station.connect(ssid, password)
while station.isconnected() == False:
pass
print('Connection successful')
print(station.ifconfig())
|
StarcoderdataPython
|
4836317
|
<filename>tests/projections/test_projection_specifications.py
import psyneulink as pnl
import numpy as np
import pytest
import psyneulink.core.components.functions.distributionfunctions
import psyneulink.core.components.functions.statefulfunctions.integratorfunctions
import psyneulink.core.components.functions.transferfunctions
class TestProjectionSpecificationFormats:
def test_projection_specification_formats(self):
"""Test various matrix and Projection specifications
Also tests assignment of Projections to pathay of Composition using add_linear_processing_pathway:
- Projection explicitly specified in sequence (M1_M2_proj)
- Projection pre-constructed and assigned to Mechanisms, but not specified in pathway(M2_M3_proj)
- Projection specified in pathway that is duplicate one preconstructed and assigned to Mechanisms (M3_M4_proj)
(currently it should be ignored; in the future, if/when Projections between the same sender and receiver
in different Compositions are allowed, then it should be used)
"""
M1 = pnl.ProcessingMechanism(size=2)
M2 = pnl.ProcessingMechanism(size=5)
M3 = pnl.ProcessingMechanism(size=4)
M4 = pnl.ProcessingMechanism(size=3)
M1_M2_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5)
M2_M3_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4)
M3_M4_matrix_A = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 5)
M3_M4_matrix_B = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 3)
M1_M2_proj = pnl.MappingProjection(matrix=M1_M2_matrix)
M2_M3_proj = pnl.MappingProjection(sender=M2,
receiver=M3,
matrix={pnl.VALUE: M2_M3_matrix,
pnl.FUNCTION: pnl.AccumulatorIntegrator,
pnl.FUNCTION_PARAMS: {pnl.DEFAULT_VARIABLE: M2_M3_matrix,
pnl.INITIALIZER: M2_M3_matrix}})
M3_M4_proj_A = pnl.MappingProjection(sender=M3, receiver=M4, matrix=M3_M4_matrix_A)
c = pnl.Composition()
c.add_linear_processing_pathway(pathway=[M1,
M1_M2_proj,
M2,
M3,
M3_M4_matrix_B,
M4])
assert np.allclose(M2_M3_proj.matrix, M2_M3_matrix)
assert M2.efferents[0] is M2_M3_proj
assert np.allclose(M3.efferents[0].matrix, M3_M4_matrix_A)
# This is if different Projections are allowed between the same sender and receiver in different Compositions:
# assert np.allclose(M3.efferents[1].matrix, M3_M4_matrix_B)
c.run(inputs={M1:[2, -30]})
# assert np.allclose(c.results, [[-130.19166667, -152.53333333, -174.875]])
assert np.allclose(c.results, [[ -78.115, -91.52 , -104.925]])
def test_multiple_modulatory_projection_specs(self):
M = pnl.DDM(name='MY DDM')
C = pnl.ControlMechanism(control_signals=[{pnl.PROJECTIONS: [M.parameter_ports[
psyneulink.core.components.functions.distributionfunctions.DRIFT_RATE],
M.parameter_ports[
psyneulink.core.globals.keywords.THRESHOLD]]}])
G = pnl.GatingMechanism(gating_signals=[{pnl.PROJECTIONS: [M.output_ports[pnl.DECISION_VARIABLE],
M.output_ports[pnl.RESPONSE_TIME]]}])
assert len(C.control_signals)==1
assert len(C.control_signals[0].efferents)==2
assert M.parameter_ports[
psyneulink.core.components.functions.distributionfunctions.DRIFT_RATE].mod_afferents[0] == C.control_signals[0].efferents[0]
assert M.parameter_ports[
psyneulink.core.globals.keywords.THRESHOLD].mod_afferents[0] == C.control_signals[0].efferents[1]
assert len(G.gating_signals)==1
assert len(G.gating_signals[0].efferents)==2
assert M.output_ports[pnl.DECISION_VARIABLE].mod_afferents[0]==G.gating_signals[0].efferents[0]
assert M.output_ports[pnl.RESPONSE_TIME].mod_afferents[0]==G.gating_signals[0].efferents[1]
def test_multiple_modulatory_projections_with_port_Name(self):
M = pnl.DDM(name='MY DDM')
C = pnl.ControlMechanism(control_signals=[{'DECISION_CONTROL':[M.parameter_ports[
psyneulink.core.components.functions.distributionfunctions.DRIFT_RATE],
M.parameter_ports[
psyneulink.core.globals.keywords.THRESHOLD]]}])
G = pnl.GatingMechanism(gating_signals=[{'DDM_OUTPUT_GATE':[M.output_ports[pnl.DECISION_VARIABLE],
M.output_ports[pnl.RESPONSE_TIME]]}])
assert len(C.control_signals)==1
assert C.control_signals[0].name=='DECISION_CONTROL'
assert len(C.control_signals[0].efferents)==2
assert M.parameter_ports[
psyneulink.core.components.functions.distributionfunctions.DRIFT_RATE].mod_afferents[0] == C.control_signals[0].efferents[0]
assert M.parameter_ports[
psyneulink.core.globals.keywords.THRESHOLD].mod_afferents[0] == C.control_signals[0].efferents[1]
assert len(G.gating_signals)==1
assert G.gating_signals[0].name=='DDM_OUTPUT_GATE'
assert len(G.gating_signals[0].efferents)==2
assert M.output_ports[pnl.DECISION_VARIABLE].mod_afferents[0]==G.gating_signals[0].efferents[0]
assert M.output_ports[pnl.RESPONSE_TIME].mod_afferents[0]==G.gating_signals[0].efferents[1]
def test_multiple_modulatory_projections_with_mech_and_port_Name_specs(self):
M = pnl.DDM(name='MY DDM')
C = pnl.ControlMechanism(control_signals=[{pnl.MECHANISM: M,
pnl.PARAMETER_PORTS: [
psyneulink.core.components.functions.distributionfunctions.DRIFT_RATE,
psyneulink.core.globals.keywords.THRESHOLD]}])
G = pnl.GatingMechanism(gating_signals=[{pnl.MECHANISM: M,
pnl.OUTPUT_PORTS: [pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME]}])
assert len(C.control_signals)==1
assert len(C.control_signals[0].efferents)==2
assert M.parameter_ports[
psyneulink.core.components.functions.distributionfunctions.DRIFT_RATE].mod_afferents[0] == C.control_signals[0].efferents[0]
assert M.parameter_ports[
psyneulink.core.globals.keywords.THRESHOLD].mod_afferents[0] == C.control_signals[0].efferents[1]
assert len(G.gating_signals)==1
assert len(G.gating_signals[0].efferents)==2
assert M.output_ports[pnl.DECISION_VARIABLE].mod_afferents[0]==G.gating_signals[0].efferents[0]
assert M.output_ports[pnl.RESPONSE_TIME].mod_afferents[0]==G.gating_signals[0].efferents[1]
def test_mapping_projection_with_mech_and_port_Name_specs(self):
R1 = pnl.TransferMechanism(output_ports=['OUTPUT_1', 'OUTPUT_2'])
R2 = pnl.TransferMechanism(default_variable=[[0],[0]],
input_ports=['INPUT_1', 'INPUT_2'])
T = pnl.TransferMechanism(input_ports=[{pnl.MECHANISM: R1,
pnl.OUTPUT_PORTS: ['OUTPUT_1', 'OUTPUT_2']}],
output_ports=[{pnl.MECHANISM:R2,
pnl.INPUT_PORTS: ['INPUT_1', 'INPUT_2']}])
assert len(R1.output_ports)==2
assert len(R2.input_ports)==2
assert len(T.input_ports)==1
for input_port in T.input_ports:
for projection in input_port.path_afferents:
assert projection.sender.owner is R1
assert len(T.output_ports)==1
for output_port in T.output_ports:
for projection in output_port.efferents:
assert projection.receiver.owner is R2
def test_mapping_projection_using_2_item_tuple_with_list_of_port_Names(self):
T1 = pnl.TransferMechanism(name='T1', input_ports=[[0,0],[0,0,0]])
T2 = pnl.TransferMechanism(name='T2',
output_ports=[(['InputPort-0','InputPort-1'], T1)])
assert len(T2.output_ports)==1
assert T2.output_ports[0].efferents[0].receiver.name == 'InputPort-0'
assert T2.output_ports[0].efferents[0].matrix.shape == (1,2)
assert T2.output_ports[0].efferents[1].receiver.name == 'InputPort-1'
assert T2.output_ports[0].efferents[1].matrix.shape == (1,3)
def test_mapping_projection_using_2_item_tuple_and_3_item_tuples_with_index_specs(self):
T1 = pnl.TransferMechanism(name='T1', input_ports=[[0,0],[0,0,0]])
T2 = pnl.TransferMechanism(name='T2',
input_ports=['a','b','c'],
output_ports=[(['InputPort-0','InputPort-1'], T1),
('InputPort-0', (pnl.OWNER_VALUE, 2), T1),
(['InputPort-0','InputPort-1'], 1, T1)])
assert len(T2.output_ports)==3
assert T2.output_ports[0].efferents[0].receiver.name == 'InputPort-0'
assert T2.output_ports[0].efferents[0].matrix.shape == (1,2)
assert T2.output_ports[0].efferents[1].receiver.name == 'InputPort-1'
assert T2.output_ports[0].efferents[1].matrix.shape == (1,3)
assert T2.output_ports[1].owner_value_index == 2
assert T2.output_ports[2].owner_value_index == 1
def test_2_item_tuple_from_control_signal_to_parameter_port(self):
D = pnl.DDM(name='D')
# Single name
C = pnl.ControlMechanism(control_signals=[(
psyneulink.core.components.functions.distributionfunctions.DRIFT_RATE, D)])
assert C.control_signals[0].name == 'D[drift_rate] ControlSignal'
assert C.control_signals[0].efferents[0].receiver.name == 'drift_rate'
# List of names
C = pnl.ControlMechanism(control_signals=[([
psyneulink.core.components.functions.distributionfunctions.DRIFT_RATE,
psyneulink.core.globals.keywords.THRESHOLD], D)])
assert C.control_signals[0].name == 'D[drift_rate, threshold] ControlSignal'
assert C.control_signals[0].efferents[0].receiver.name == 'drift_rate'
assert C.control_signals[0].efferents[1].receiver.name == 'threshold'
def test_2_item_tuple_from_parameter_port_to_control_signals(self):
C = pnl.ControlMechanism(control_signals=['a','b'])
D = pnl.DDM(name='D3',
function=psyneulink.core.components.functions.distributionfunctions.DriftDiffusionAnalytical(drift_rate=(3, C),
threshold=(2,C.control_signals['b']))
)
assert D.parameter_ports[
psyneulink.core.components.functions.distributionfunctions.DRIFT_RATE].mod_afferents[0].sender == C.control_signals[0]
assert D.parameter_ports[
psyneulink.core.globals.keywords.THRESHOLD].mod_afferents[0].sender == C.control_signals[1]
def test_2_item_tuple_from_gating_signal_to_output_ports(self):
D4 = pnl.DDM(name='D4')
# Single name
G = pnl.GatingMechanism(gating_signals=[(pnl.DECISION_VARIABLE, D4)])
assert G.gating_signals[0].name == 'D4[DECISION_VARIABLE] GatingSignal'
assert G.gating_signals[0].efferents[0].receiver.name == 'DECISION_VARIABLE'
# List of names
G = pnl.GatingMechanism(gating_signals=[([pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME], D4)])
assert G.gating_signals[0].name == 'D4[DECISION_VARIABLE, RESPONSE_TIME] GatingSignal'
assert G.gating_signals[0].efferents[0].receiver.name == 'DECISION_VARIABLE'
assert G.gating_signals[0].efferents[1].receiver.name == 'RESPONSE_TIME'
def test_2_item_tuple_from_input_and_output_ports_to_gating_signals(self):
G = pnl.GatingMechanism(gating_signals=['a','b'])
T = pnl.TransferMechanism(name='T',
input_ports=[(3,G)],
output_ports=[(2,G.gating_signals['b'])]
)
assert T.input_ports[0].mod_afferents[0].sender==G.gating_signals[0]
assert T.output_ports[0].mod_afferents[0].sender==G.gating_signals[1]
def test_formats_for_control_specification_for_mechanism_and_function_params(self):
control_spec_list = [
pnl.CONTROL,
pnl.CONTROL_SIGNAL,
pnl.CONTROL_PROJECTION,
pnl.ControlSignal,
pnl.ControlSignal(),
pnl.ControlProjection,
"CP_OBJECT",
pnl.ControlMechanism,
pnl.ControlMechanism(),
pnl.ControlMechanism,
(0.3, pnl.CONTROL),
(0.3, pnl.CONTROL_SIGNAL),
(0.3, pnl.CONTROL_PROJECTION),
(0.3, pnl.ControlSignal),
(0.3, pnl.ControlSignal()),
(0.3, pnl.ControlProjection),
(0.3, "CP_OBJECT"),
(0.3, pnl.ControlMechanism),
(0.3, pnl.ControlMechanism()),
(0.3, pnl.ControlMechanism)
]
for i, ctl_tuple in enumerate([j for j in zip(control_spec_list, reversed(control_spec_list))]):
C1, C2 = ctl_tuple
# This shenanigans is to avoid assigning the same instantiated ControlProjection more than once
if C1 == 'CP_OBJECT':
C1 = pnl.ControlProjection()
elif isinstance(C1, tuple) and C1[1] == 'CP_OBJECT':
C1 = (C1[0], pnl.ControlProjection())
if C2 == 'CP_OBJECT':
C2 = pnl.ControlProjection()
elif isinstance(C2, tuple) and C2[1] == 'CP_OBJECT':
C2 = (C2[0], pnl.ControlProjection())
R = pnl.RecurrentTransferMechanism(noise=C1,
function=psyneulink.core.components.functions.transferfunctions.Logistic(gain=C2))
assert R.parameter_ports[pnl.NOISE].mod_afferents[0].name in \
'ControlProjection for RecurrentTransferMechanism-{}[noise]'.format(i)
assert R.parameter_ports[pnl.GAIN].mod_afferents[0].name in \
'ControlProjection for RecurrentTransferMechanism-{}[gain]'.format(i)
def test_formats_for_gating_specification_of_input_and_output_ports(self):
gating_spec_list = [
pnl.GATING,
pnl.CONTROL,
pnl.GATING_SIGNAL,
pnl.CONTROL_SIGNAL,
pnl.GATING_PROJECTION,
pnl.CONTROL_PROJECTION,
pnl.GatingSignal,
pnl.ControlSignal,
pnl.GatingSignal(),
pnl.ControlSignal(),
pnl.GatingProjection,
"GP_OBJECT",
pnl.GatingMechanism,
pnl.ControlMechanism,
pnl.GatingMechanism(),
pnl.ControlMechanism(),
(0.3, pnl.GATING),
(0.3, pnl.CONTROL),
(0.3, pnl.GATING_SIGNAL),
(0.3, pnl.CONTROL_SIGNAL),
(0.3, pnl.GATING_PROJECTION),
(0.3, pnl.CONTROL_PROJECTION),
(0.3, pnl.GatingSignal),
(0.3, pnl.ControlSignal),
(0.3, pnl.GatingSignal()),
(0.3, pnl.ControlSignal()),
(0.3, pnl.GatingProjection),
(0.3, pnl.ControlProjection),
(0.3, "GP_OBJECT"),
(0.3, pnl.GatingMechanism),
(0.3, pnl.ControlMechanism),
(0.3, pnl.GatingMechanism()),
(0.3, pnl.ControlMechanism())
]
for i, gating_tuple in enumerate([j for j in zip(gating_spec_list, reversed(gating_spec_list))]):
G_IN, G_OUT = gating_tuple
# This shenanigans is to avoid assigning the same instantiated ControlProjection more than once
if G_IN == 'GP_OBJECT':
G_IN = pnl.GatingProjection()
elif isinstance(G_IN, tuple) and G_IN[1] == 'GP_OBJECT':
G_IN = (G_IN[0], pnl.GatingProjection())
if G_OUT == 'GP_OBJECT':
G_OUT = pnl.GatingProjection()
elif isinstance(G_OUT, tuple) and G_OUT[1] == 'GP_OBJECT':
G_OUT = (G_OUT[0], pnl.GatingProjection())
if isinstance(G_IN, tuple):
IN_NAME = G_IN[1]
else:
IN_NAME = G_IN
IN_CONTROL = pnl.CONTROL in repr(IN_NAME).split(".")[-1].upper()
if isinstance(G_OUT, tuple):
OUT_NAME = G_OUT[1]
else:
OUT_NAME = G_OUT
OUT_CONTROL = pnl.CONTROL in repr(OUT_NAME).split(".")[-1].upper()
T = pnl.TransferMechanism(name='T-GATING-{}'.format(i),
input_ports=[G_IN],
output_ports=[G_OUT])
if IN_CONTROL:
assert T.input_ports[0].mod_afferents[0].name in \
'ControlProjection for T-GATING-{}[InputPort-0]'.format(i)
else:
assert T.input_ports[0].mod_afferents[0].name in \
'GatingProjection for T-GATING-{}[InputPort-0]'.format(i)
if OUT_CONTROL:
assert T.output_ports[0].mod_afferents[0].name in \
'ControlProjection for T-GATING-{}[OutputPort-0]'.format(i)
else:
assert T.output_ports[0].mod_afferents[0].name in \
'GatingProjection for T-GATING-{}[OutputPort-0]'.format(i)
# with pytest.raises(pnl.ProjectionError) as error_text:
# T1 = pnl.ProcessingMechanism(name='T1', input_ports=[pnl.ControlMechanism()])
# assert 'Primary OutputPort of ControlMechanism-0 (ControlSignal-0) ' \
# 'cannot be used as a sender of a Projection to InputPort of T1' in error_text.value.args[0]
#
# with pytest.raises(pnl.ProjectionError) as error_text:
# T2 = pnl.ProcessingMechanism(name='T2', output_ports=[pnl.ControlMechanism()])
# assert 'Primary OutputPort of ControlMechanism-1 (ControlSignal-0) ' \
# 'cannot be used as a sender of a Projection to OutputPort of T2' in error_text.value.args[0]
# KDM: this is a good candidate for pytest.parametrize
def test_masked_mapping_projection(self):
t1 = pnl.TransferMechanism(size=2)
t2 = pnl.TransferMechanism(size=2)
proj = pnl.MaskedMappingProjection(sender=t1,
receiver=t2,
matrix=[[1,2],[3,4]],
mask=[[1,0],[0,1]],
mask_operation=pnl.ADD
)
p = pnl.Process(pathway=[t1, proj, t2])
val = p.execute(input=[1,2])
assert np.allclose(val, [[8, 12]])
t1 = pnl.TransferMechanism(size=2)
t2 = pnl.TransferMechanism(size=2)
proj = pnl.MaskedMappingProjection(sender=t1,
receiver=t2,
matrix=[[1,2],[3,4]],
mask=[[1,0],[0,1]],
mask_operation=pnl.MULTIPLY
)
p = pnl.Process(pathway=[t1, proj, t2])
val = p.execute(input=[1,2])
assert np.allclose(val, [[1, 8]])
t1 = pnl.TransferMechanism(size=2)
t2 = pnl.TransferMechanism(size=2)
proj = pnl.MaskedMappingProjection(sender=t1,
receiver=t2,
mask=[[1,2],[3,4]],
mask_operation=pnl.MULTIPLY
)
p = pnl.Process(pathway=[t1, proj, t2])
val = p.execute(input=[1,2])
assert np.allclose(val, [[1, 8]])
def test_masked_mapping_projection_mask_conficts_with_matrix(self):
with pytest.raises(pnl.MaskedMappingProjectionError) as error_text:
t1 = pnl.TransferMechanism(size=2)
t2 = pnl.TransferMechanism(size=2)
pnl.MaskedMappingProjection(sender=t1,
receiver=t2,
mask=[[1,2,3],[4,5,6]],
mask_operation=pnl.MULTIPLY
)
assert "Shape of the 'mask'" in str(error_text.value)
assert "((2, 3)) must be the same as its 'matrix' ((2, 2))" in str(error_text.value)
# FIX 7/22/15 [JDC] - REPLACE WITH MORE ELABORATE TESTS OF DUPLICATE PROJECTIONS:
# SAME FROM OutputPort; SAME TO InputPort
# TEST ERROR MESSAGES GENERATED BY VARIOUS _check_for_duplicates METHODS
# def test_duplicate_projection_detection_and_warning(self):
#
# with pytest.warns(UserWarning) as record:
# T1 = pnl.TransferMechanism(name='T1')
# T2 = pnl.TransferMechanism(name='T2')
# T3 = pnl.TransferMechanism(name='T3')
# T4 = pnl.TransferMechanism(name='T4')
#
# MP1 = pnl.MappingProjection(sender=T1,receiver=T2,name='MP1')
# MP2 = pnl.MappingProjection(sender=T1,receiver=T2,name='MP2')
# pnl.proc(T1,MP1,T2,T3)
# pnl.proc(T1,MP2,T2,T4)
#
# # hack to find a specific warning (other warnings may be generated by the Process construction)
# correct_message_found = False
# for warning in record:
# if "that already has an identical Projection" in str(warning.message):
# correct_message_found = True
# break
#
# assert len(T2.afferents)==1
# assert correct_message_found
def test_duplicate_projection_creation_error(self):
from psyneulink.core.components.projections.projection import DuplicateProjectionError
with pytest.raises(DuplicateProjectionError) as record:
T1 = pnl.TransferMechanism(name='T1')
T2 = pnl.TransferMechanism(name='T2')
pnl.MappingProjection(sender=T1,receiver=T2,name='MP1')
pnl.MappingProjection(sender=T1,receiver=T2,name='MP2')
assert 'Attempt to assign Projection to InputPort-0 of T2 that already has an identical Projection.' \
in record.value.args[0]
|
StarcoderdataPython
|
379436
|
"""Read customised MetOcean Solutions WW3 spectra files."""
import numpy as np
import xarray as xr
from wavespectra.core.attributes import attrs, set_spec_attributes
from wavespectra.specdataset import SpecDataset
def read_ww3_msl(filename_or_fileglob, chunks={}):
"""Read Spectra from WAVEWATCHIII MetOcean Solutions netCDF format.
Args:
- filename_or_fileglob (str): filename or fileglob specifying multiple
files to read.
- chunks (dict): chunk sizes for dimensions in dataset. By default
dataset is loaded using single chunk for all dimensions (see
xr.open_mfdataset documentation).
Returns:
- dset (SpecDataset): spectra dataset object read from ww3 file.
Note:
- If file is large to fit in memory, consider specifying chunks for
'time' and/or 'site' dims
"""
dset = xr.open_mfdataset(filename_or_fileglob, chunks=chunks)
_units = dset.specden.attrs.get("units", "")
dset = dset.rename(
{"freq": attrs.FREQNAME, "dir": attrs.DIRNAME, "wsp": attrs.WSPDNAME}
)
dset[attrs.SPECNAME] = (dset["specden"].astype("float32") + 127.0) * dset["factor"]
dset = dset.drop(["specden", "factor", "df"])
# Assign site coordinate so they will look like those read from native ww3 files
dset[attrs.SITENAME] = np.arange(1.0, dset.site.size + 1)
set_spec_attributes(dset)
dset[attrs.SPECNAME].attrs.update({"_units": _units, "_variable_name": "specden"})
if attrs.DIRNAME not in dset or len(dset.dir) == 1:
dset[attrs.SPECNAME].attrs.update({"units": "m^{2}.s"})
return dset
|
StarcoderdataPython
|
11273131
|
<reponame>kagemeka/atcoder-submissions<gh_stars>1-10
import sys
import typing
import numpy as np
def main() -> typing.NoReturn:
n = int(input())
a = np.array(
sys.stdin.read().split(),
dtype=np.int64,
).reshape(n, 3)
j = np.arange(3)
j = np.vstack((j + 1, j + 2))
j %= 3
dp = np.zeros(
3,
dtype=np.int64,
)
for x in a:
dp = dp[j].max(axis=0) + x
print(dp.max())
main()
|
StarcoderdataPython
|
11274765
|
__________________________________________________________________________________________________
sample 28 ms submission
from collections import defaultdict,Counter
class Solution:
def longestSubstring(self, s: str, k: int) -> int:
"""
:type s: str
:type k: int
:rtype: int
"""
if not s:
return 0
counter = Counter(s)
for key,val in counter.items():
if val < k:
s = s.replace(key,'-')
if not '-' in s:
return len(s)
substr_set = set(s.split('-'))
res = 0
for substr in substr_set:
res = max(res,self.longestSubstring(substr,k))
return res
__________________________________________________________________________________________________
sample 12948 kb submission
class Solution:
def longestSubstring(self, s: str, k: int) -> int:
"""
for each char, it is whether:
1. not in the strs
2. more than three times
"""
ans = 0
for n in range(1, 27):
l = 0
counter = {}
# todo = 0
for r in range(len(s)):
if s[r] not in counter:
counter[s[r]] = 0
counter[s[r]] += 1
while len(counter) > n:
counter[s[l]] -= 1
if counter[s[l]] == 0:
counter.pop(s[l])
l += 1
if all([v >= k for v in counter.values()]):
ans = max(ans, r - l + 1)
return ans
__________________________________________________________________________________________________
|
StarcoderdataPython
|
3240079
|
# Generated by Django 3.2.2 on 2021-05-14 08:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0002_auto_20210514_0746'),
]
operations = [
migrations.AddField(
model_name='portfolio',
name='location',
field=models.CharField(blank=True, max_length=30, null=True),
),
]
|
StarcoderdataPython
|
8162668
|
from flask_restful import Resource
from app.vendors.rest import response
class HealthCheck(Resource):
def get(self):
data = {
"status": "running",
}
return response(200, data=data, message="OK")
|
StarcoderdataPython
|
164825
|
<filename>ifaces/management/__init__.py
"""Management modules for ifaces"""
|
StarcoderdataPython
|
290611
|
<reponame>Crown-Commercial-Service/digitalmarketplace-developer-tools
import re
import ast
import setuptools
_version_re = re.compile(r"__version__\s+=\s+(.*)")
with open("dmdevtools/__init__.py", "rb") as f:
version = str(
ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1))
)
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
long_description_content_type = "text/markdown"
setuptools.setup(
name="digitalmarketplace-developer-tools",
version=version,
url="https://github.com/alphagov/digitalmarketplace-developer-tools",
license="MIT",
author="GDS Developers",
description="Common developer tools for Digital Marketplace repos",
long_description=long_description,
long_description_content_type=long_description_content_type,
packages=setuptools.find_packages(),
python_requires="~=3.6",
install_requires=[
"colored",
"invoke",
],
)
|
StarcoderdataPython
|
214930
|
<gh_stars>10-100
from arekit.common.context.terms_mapper import TextTermsMapper
from arekit.common.entities.base import Entity
from arekit.common.entities.str_fmt import StringEntitiesFormatter
from arekit.common.entities.types import EntityType
from arekit.common.frames.text_variant import TextFrameVariant
from arekit.processing.text.token import Token
class OpinionContainingTextTermsMapper(TextTermsMapper):
"""
Provides an ability to setup s_obj, t_obj
The latter might be utilized with synonyms collection
"""
def __init__(self, entity_formatter):
assert(isinstance(entity_formatter, StringEntitiesFormatter))
self.__entities_formatter = entity_formatter
self.__s_ind = None
self.__t_ind = None
self.__s_group = None
self.__t_group = None
@property
def StringEntitiesFormatter(self):
return self.__entities_formatter
def __syn_group(self, entity):
""" Note: here we guarantee that entity has GroupIndex.
"""
assert(isinstance(entity, Entity))
return entity.GroupIndex if entity is not None else None
def set_s_ind(self, s_ind):
assert(isinstance(s_ind, int))
self.__s_ind = s_ind
def set_t_ind(self, t_ind):
assert(isinstance(t_ind, int))
self.__t_ind = t_ind
def _after_mapping(self):
""" In order to prevent bugs.
Every index should be declared before mapping.
"""
self.__s_ind = None
self.__t_ind = None
def iter_mapped(self, terms):
terms_list = list(terms)
self.__s_group = self.__syn_group(terms_list[self.__s_ind] if self.__s_ind is not None else None)
self.__t_group = self.__syn_group(terms_list[self.__t_ind] if self.__t_ind is not None else None)
return super(OpinionContainingTextTermsMapper, self).iter_mapped(terms)
def map_entity(self, e_ind, entity):
if e_ind == self.__s_ind:
return self.__entities_formatter.to_string(original_value=entity,
entity_type=EntityType.Subject)
elif e_ind == self.__t_ind:
return self.__entities_formatter.to_string(original_value=entity,
entity_type=EntityType.Object)
elif self.__is_in_same_group(self.__syn_group(entity), self.__s_group):
return self.__entities_formatter.to_string(original_value=entity,
entity_type=EntityType.SynonymSubject)
elif self.__is_in_same_group(self.__syn_group(entity), self.__t_group):
return self.__entities_formatter.to_string(original_value=entity,
entity_type=EntityType.SynonymObject)
else:
return self.__entities_formatter.to_string(original_value=entity,
entity_type=EntityType.Other)
@staticmethod
def __is_in_same_group(g1, g2):
if g1 is None or g2 is None:
# In such scenario we cannot guarantee
# that g1 and g2 belong to the same group.
return False
return g1 == g2
def map_word(self, w_ind, word):
return word.strip()
def map_text_frame_variant(self, fv_ind, text_frame_variant):
assert(isinstance(text_frame_variant, TextFrameVariant))
return text_frame_variant.Variant.get_value().strip()
def map_token(self, t_ind, token):
assert(isinstance(token, Token))
return token.get_meta_value()
|
StarcoderdataPython
|
9774518
|
<reponame>JiahnChoi/opsdroid.kr
"""The version subcommand for opsdroid cli."""
import click
from opsdroid import __version__
@click.command()
@click.pass_context
def version(ctx):
"""Print out the version of opsdroid that is installed and exits.
Args:
ctx (:obj:`click.Context`): The current click cli context.
Returns:
int: the exit code. Always returns 0 in this case.
"""
click.echo("opsdroid {version}".format(version=__version__))
ctx.exit(0)
|
StarcoderdataPython
|
4851615
|
<reponame>gordonwatts/func-adl-types-atlas
import ast
import copy
import re
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Tuple, TypeVar
import jinja2
from func_adl import ObjectStream
from func_adl.ast.meta_data import lookup_query_metadata
@dataclass
class CalibrationEventConfig:
# Name of the jet collection to calibrate and use by default
jet_collection: str
# Name of the truth jets to be used for the jet calibration
jet_calib_truth_collection: str
########### Electrons
# Name of the electron collection to calibrate and use by default
electron_collection: str
# The working point (e.g. xxx)
electron_working_point: str
# The isolation (e.g. xxxx)
electron_isolation: str
########### Photons
# Name of the photon collection to calibrate and use by default.
photon_collection: str
# The working point (e.g. xxx)
photon_working_point: str
# The isolation (e.g. xxxx)
photon_isolation: str
########### Muons
# Name of the muon collection to calibration and use by default.
muon_collection: str
# The working point (e.g. xxx)
muon_working_point: str
# The isolation (e.g. xxxx)
muon_isolation: str
########### Taus
# Name of the tau collection to calibrate and use by default.
tau_collection: str
# The working point (e.g. xxxx)
tau_working_point: str
###### Other Config Options
perform_overlap_removal: bool
T = TypeVar('T')
class calib_tools:
'''Helper functions to work with a query's calibration configuration.'''
_default_calibration: Optional[CalibrationEventConfig] = None
_default_sys_error: Optional[str] = 'NOSYS'
@classmethod
def reset_config(cls):
'''Reset calibration config to the default.
* This is configured for working with R21 DAOD_PHYS samples.
'''
cls._default_calibration = CalibrationEventConfig(
jet_collection="AntiKt4EMPFlowJets",
jet_calib_truth_collection="AntiKt4TruthDressedWZJets",
electron_collection="Electrons",
electron_working_point="MediumLHElectron",
electron_isolation="NonIso",
photon_collection="Photons",
photon_working_point="Tight",
photon_isolation="FixedCutTight",
muon_collection="Muons",
muon_working_point="Medium",
muon_isolation="NonIso",
tau_collection="TauJets",
tau_working_point="Tight",
perform_overlap_removal=True,
)
@classmethod
def _setup(cls):
if cls._default_calibration is None:
cls.reset_config()
@classmethod
def set_default_config(cls, config: CalibrationEventConfig):
'Store a copy of a new default config for use in all future queries.'
cls._default_calibration = copy.copy(config)
@classmethod
@property
def default_config(cls) -> CalibrationEventConfig:
'Return a copy of the current default calibration configuration.'
cls._setup()
assert cls._default_calibration is not None
return copy.copy(cls._default_calibration)
@classmethod
def query_update(cls, query: ObjectStream[T], calib_config: Optional[CalibrationEventConfig] = None, **kwargs) -> ObjectStream[T]:
'''Add metadata to a query to indicate a change in the calibration configuration for the query.
Args:
query (ObjectStream[T]): The query to update.
calib_config (Optional[CalibrationEventConfig]): The new calibration configuration to use. If specified
will override all calibration configuration options in the query.
jet_collection, ...: Use any property name from the `CalibrationEventConfig` class to override that particular
options for this query. You may specify as many of them as you like.
Returns:
ObjectStream[T]: The updated query.
Notes:
* This function can be chained - resolution works by looking at the most recent `query_update` in the query.
* This function works by storing a complete `CalibrationEventConfig` object, updated as requested, in the query. So
even if you just update `jet_collection`, changing the `default_config` after calling this will have no effect.
'''
# Get a base calibration config we can modify (e.g. a copy)
config = calib_config
if config is None:
config = calib_tools.query_get(query)
# Now, modify by any arguments we were given
for k, v in kwargs.items():
if hasattr(config, k):
setattr(config, k, v)
else:
raise ValueError(f'Unknown calibration config option: {k} in `query_update`')
# Place it in the query stream for later use
return query.QMetaData({
'calibration': config
})
@classmethod
def query_get(cls, query:ObjectStream[T]) -> CalibrationEventConfig:
'''Return a copy of the calibration if the query were issued at this point.
Args:
query (ObjectStream[T]): The query to inspect.
Returns:
CalibrationEventConfig: The calibration configuration for the query.
'''
r = lookup_query_metadata(query, 'calibration')
if r is None:
return calib_tools.default_config
else:
return copy.copy(r)
@classmethod
@property
def default_sys_error(cls) -> str:
'''Return the default systematic error'''
if cls._default_sys_error is None:
return 'NOSYS'
return cls._default_sys_error
@classmethod
def set_default_sys_error(cls, value: str):
'''Set the default systematic error'''
cls._default_sys_error = value
@classmethod
def reset_sys_error(cls):
'''Reset to 'NOSYS' the default systematic error'''
cls._default_sys_error = 'NOSYS'
@classmethod
def query_sys_error(cls, query: ObjectStream[T], sys_error: str) -> ObjectStream[T]:
'''Add metadata to a query to indicate a change in the systematic error for the events.
Args:
query (ObjectStream[T]): The query to update.
sys_error (str): The systematic error to fetch. Only a single one is possible at any time. The sys error names
are the same as used by the common CP algorithms.
Returns:
ObjectStream[T]: The updated query.
Notes:
* This function can be chained - resolution works by looking at the most recent `query_sys_error` in the query.
'''
return query.QMetaData({
'calibration_sys_error': sys_error
})
_g_jinja2_env: Optional[jinja2.Environment] = None
def template_configure() -> jinja2.Environment:
'''Configure the jinja2 template
'''
global _g_jinja2_env
if _g_jinja2_env is None:
template_path = Path(__file__).parent / "templates"
loader = jinja2.FileSystemLoader(str(template_path))
_g_jinja2_env = jinja2.Environment(loader=loader)
return _g_jinja2_env
_g_metadata_names_no_overlap = {
'jet_collection': ["sys_error_tool", "pileup_tool", "corrections_jet", "add_calibration_to_job"],
'electron_collection': ["sys_error_tool", "pileup_tool", "corrections_electron", "add_calibration_to_job"],
'muon_collection': ["sys_error_tool", "pileup_tool", "corrections_muon", "add_calibration_to_job"],
'photon_collection': ["sys_error_tool", "pileup_tool", "corrections_photon", "add_calibration_to_job"],
'tau_collection': ["sys_error_tool", "pileup_tool", "corrections_tau", "add_calibration_to_job"],
'met_collection': ["sys_error_tool", "pileup_tool", "corrections_jet", "corrections_muon", "corrections_electron", "corrections_met", "add_calibration_to_job"],
}
_g_metadata_names_overlap = {
'jet_collection': ["sys_error_tool", "pileup_tool", "corrections_jet", "corrections_muon", "corrections_electron", "corrections_photon", "corrections_tau", "corrections_overlap", "add_calibration_to_job"],
'electron_collection': ["sys_error_tool", "pileup_tool", "corrections_jet", "corrections_muon", "corrections_electron", "corrections_photon", "corrections_tau", "corrections_overlap", "add_calibration_to_job"],
'muon_collection': ["sys_error_tool", "pileup_tool", "corrections_jet", "corrections_muon", "corrections_electron", "corrections_photon", "corrections_tau", "corrections_overlap", "add_calibration_to_job"],
'photon_collection': ["sys_error_tool", "pileup_tool", "corrections_jet", "corrections_muon", "corrections_electron", "corrections_photon", "corrections_tau", "corrections_overlap", "add_calibration_to_job"],
'tau_collection': ["sys_error_tool", "pileup_tool", "corrections_jet", "corrections_muon", "corrections_electron", "corrections_photon", "corrections_tau", "corrections_overlap", "add_calibration_to_job"],
'met_collection': ["sys_error_tool", "pileup_tool", "corrections_jet", "corrections_muon", "corrections_electron", "corrections_met", "add_calibration_to_job"],
}
def fixup_collection_call(s: ObjectStream[T], a: ast.Call, collection_attr_name: str) -> Tuple[ObjectStream[T], ast.Call]:
'Apply all the fixes to the collection call'
# Find the two arguments
uncalibrated_bank_name = None
calibrated_bank_name = None
if len(a.args) >= 1:
calibrated_bank_name = ast.literal_eval(a.args[0])
if len(a.args) >= 2:
uncalibrated_bank_name = ast.literal_eval(a.args[1])
for arg in a.keywords:
if arg.arg == 'calibrated_collection':
calibrated_bank_name = ast.literal_eval(arg.value)
if arg.arg == 'uncalibrated_collection':
uncalibrated_bank_name = ast.literal_eval(arg.value)
if uncalibrated_bank_name is not None and calibrated_bank_name is not None:
raise ValueError(f"Illegal to specify both `calibrated_collection` and `uncalibrated_collection` when accessing `collection_attr_name`.")
new_s = s
if calibrated_bank_name is not None:
new_s = calib_tools.query_update(new_s, **{collection_attr_name: calibrated_bank_name})
# See if there is a systematic error we need to fetch
sys_error = lookup_query_metadata(new_s, 'calibration_sys_error')
if sys_error is None:
sys_error = calib_tools.default_sys_error
# Uncalibrated collection is pretty easy - nothing to do here!
if uncalibrated_bank_name is not None:
output_collection_name = uncalibrated_bank_name
else:
# Get the most up to date configuration for this run.
calibration_info = calib_tools.query_get(new_s)
# Next, load up all the meta-data for this collection.
j_env = template_configure()
dependent_md_name = None
output_collection_name = None
md_to_transmit = _g_metadata_names_overlap[collection_attr_name] if calibration_info.perform_overlap_removal else _g_metadata_names_no_overlap[collection_attr_name]
for md_name in md_to_transmit:
md_template = j_env.get_template(f"{md_name}.py")
text = md_template.render(calib=calibration_info, sys_error=sys_error)
md_text = {
"metadata_type": "add_job_script",
"name": md_name,
"script": text.splitlines()
}
if dependent_md_name is not None:
md_text["depends_on"] = [dependent_md_name]
new_s = new_s.MetaData(md_text)
dependent_md_name = md_name
# Have we found the output collection name?
found = re.search(f"# Output {collection_attr_name} = (.+)(\\s|$)", text)
if found is not None:
output_collection_name = found.group(1)
if output_collection_name is None:
raise RuntimeError(f"Could not find output collection name in templates for collection '{collection_attr_name} - xAOD job options templates are malformed.")
# Finally, rewrite the call to fetch the collection with the actual collection name we want
# to fetch.
new_call = copy.copy(a)
new_call.args = [ast.parse(f"'{output_collection_name}'").body[0].value] # type: ignore
return new_s, new_call
|
StarcoderdataPython
|
5192467
|
<reponame>cbertelegni/scrap_temmperatura
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests, re, os
from datetime import datetime
HEADERS = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36',
'Referer':'http://www.smn.gov.ar/?mod=prensa&id=200',
}
class ScrapTemp(object):
"""Scrap LN Home
la página del servicio meteorologico(http://www.smn.gov.ar/?mod=prensa&id=200) desde ajax consulta self.url_bsas_temp
pendiente: escrapear self.output_path
"""
BASE = os.path.dirname(os.path.abspath(__file__))
# template = "\nTemperature: {temp}\n\nFeels Like: {fell}\n\nHumidity: {hum}%\n"
template = """
Temperature: {temp}
Feels Like: {fell}
Humidity: {hum}%
"""
url_table_temp = "http://www.smn.gov.ar/?mod=dpd&id=21&e=total"
url_bsas_temp = "http://www.smn.gov.ar/layouts/temperatura_layout.php?d=0.7334670192534647"
# output_path = os.path.join(os.path.join(BASE, ".."), "data")
output_path = os.path.join(BASE, "data")
output_file = os.path.join(output_path, "bsas_temp.txt")
last_modified = os.path.join(output_path, "last_modified.txt")
def __init__(self):
super(ScrapTemp, self).__init__()
# print "scraping ..."
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
self.get_bsas_temp()
def get_bsas_temp(self):
r = requests.get(self.url_bsas_temp, headers=HEADERS)
r.encoding = 'utf-8'
temp = r.text.split("\n")[0]
regex = re.compile(r".C.+", re.IGNORECASE)
temp = regex.sub("", temp)
output = self.template.format(temp=temp, fell=0, hum=0)
text_file = open(self.output_file, "w")
text_file.write(output)
text_file.close()
# logging last_modified...
text_file = open(self.last_modified, "w")
text_file.write(str(datetime.now()))
text_file.close()
# return temp
ScrapTemp()
|
StarcoderdataPython
|
8136968
|
#!python3
import io
import sys
import datetime
import names
from gen_random_values import *
lista = []
repeat = 100
with io.open('fixtures.json', 'wt') as f:
for i in range(repeat):
date = datetime.datetime.now().isoformat(" ")
fname = names.get_first_name()
lname = names.get_last_name()
email = fname[0].lower() + '.' + lname.lower() + '@email.com'
b = random.choice(['true', 'false'])
# pk, first_name, last_name, cpf, birthday, email, phone, blocked,
# created_at, modified_at
lista.append(
(i + 1, fname, lname, gen_cpf(), gen_timestamp(), email, gen_phone(), b, date, date))
f.write('[\n')
for l in lista:
s = "{\n" + \
str(' "pk": ') + str(l[0]) + ",\n" + \
str(' "model": "core.person",\n') + \
str(' "fields": {\n') + \
str(' "first_name": "') + l[1] + str('",\n') + \
str(' "last_name": "') + l[2] + str('",\n') + \
str(' "cpf": "') + l[3] + str('",\n') + \
str(' "birthday": "') + l[4] + str('",\n') + \
str(' "email": "') + l[5] + str('",\n') + \
str(' "phone": "') + l[6] + str('",\n') + \
str(' "blocked": ') + l[7] + str(',\n') + \
str(' "created_at": "') + l[8] + str('",\n') + \
str(' "modified_at": "') + l[9] + str('"\n') + \
" }\n"
if l[0] == repeat:
s = s + "}\n"
else:
s = s + "},\n"
f.write(str(s))
f.write(']\n')
|
StarcoderdataPython
|
8039744
|
<gh_stars>0
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Name: state
# Purpose: Retrieve and recreate state of objects
#
# Author: <NAME> (<EMAIL>)
#
# Copyright: (c) 2014 <NAME>
# License: This program is part of a larger application. For license
# details please read the file LICENSE.TXT provided together
# with the application.
# ----------------------------------------------------------------------------
# $Source$
# $Revision$
"""Retrieve and recreate state of objects"""
from abc import abstractmethod
from typing import Any
from .. import Component, implementer
from ...gbbs.tools import all_slot_attrs
class State(Component):
"""An interface to retrieve and recreate the state of objects."""
@abstractmethod
def get_state(self) -> object:
"""Get the state of the object."""
@abstractmethod
def set_state(self, state: object) -> object:
"""Set the state of the object."""
@implementer(State)
class StateAdapter:
"""Adapter to get the state (i.e. an instance of `class:State`) of an
object.
Args:
context(Any): object to be adapted
Returns:
State: wrapper to get the state of `context`
"""
def __init__(self, context: object) -> None:
if isinstance(context, type):
raise TypeError("Can't adapt a class (i.e. instance of type).")
self._context = context
def get_state(self) -> Any:
"""Get the state of the context object."""
context = self._context
try:
# If context defines a __getstate__ method, return the result (if
# it's not None).
state = context.__getstate__()
if state is not None:
return state
except AttributeError:
pass
# get a dict of all attributes defined via __slots__ ...
state = dict(all_slot_attrs(context))
# ... and update it by __dict__
try:
state.update(context.__dict__)
except AttributeError:
pass
if state:
return state
raise TypeError("Unable to retrieve state of `context`.")
def set_state(self, state: Any) -> None:
"""Set the state of the context object."""
context = self._context
# If context defines a __setstate__ method, just call it.
try:
set_state = context.__setstate__
except AttributeError:
pass
else:
set_state(state)
return
# Otherwise, try to recreate object from dict
try:
it = state.items()
except (AttributeError, TypeError):
pass
else:
for attr, value in it:
setattr(context, attr, value)
return
raise TypeError(repr(context) + " has no '__setstate__' method "
"and given `state` is not a dict.")
# register the adapter
State.add_adapter(StateAdapter)
|
StarcoderdataPython
|
9638356
|
# <NAME> 0210315552
def solve():
d_list = ["Saturday", "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday"]
## Get input
(iy, im, id) = getInputs()
## Check if Jan or Feb
if (im == 1 or im == 2):
im += 12
iy += -1
## Insert into Zeller's Congruence
d = (id + 13*(im+1)//5 + iy + iy//4 - iy//100 + iy//400)%7
print("\nIt is a " + d_list[d])
return
## Returns:
#
# (int) id: Day user input
# (int) im: Month user input
# (int) iy: Year user input
def getInputs():
## Set standard limits
y_limit_min = 1583
y_limit_max = 9999
m_limit_min = 1
m_limit_max = 12
d_limit_min = 1
d_limit_max = 31
iy = getInput("Year", y_limit_max, y_limit_min)
im = getInput("Month", m_limit_max, m_limit_min)
## Regulates day max
if (im == 2):
# Leap year check
if ((iy%400 == 0) or (iy%4 == 0 and iy%100 != 0)):
d_limit_max = 29
else:
d_limit_max = 28
elif (im in [4, 6, 9, 11]):
d_limit_max = 30
id = getInput("Day", d_limit_max, d_limit_min)
return (iy, im, id)
## Parameters:
#
# (str) input_name: Text showing user what input is needed
# (int) limit_max: Max input size limit
# (int) linit_min: Min input size limit
#
## Returns:
#
# (int) i: User input
def getInput(input_name, limit_max, limit_min):
## Loop for continuous asking on incorrect input
while True:
## Check for integer
try:
i = int(input(input_name+": "))
except:
## If not integer set to 0 leading to fail
i = 0
## Check limits
if (i >= limit_min and i <= limit_max):
## Break loop on correct interval
break
else:
print("Out of allowed range " + str(limit_min) + " to " + str(limit_max))
return i
solve()
|
StarcoderdataPython
|
12845010
|
<filename>exercicios/ex041.py
# Classificando Atletas
from datetime import date
from time import sleep
n = str(input('\033[1;30mDigite o seu nome completo: ')).strip().title()
a = int(input('Digite o seu ano de nascimento: '))
anoatual = date.today().year
i = anoatual - a
print('')
sleep(1.75)
print('ANALISANDO...')
sleep(2)
print('')
print('=-=' * 15)
print(f'Nome Completo: {n}')
print(f'Idade: {i} anos.\033[m')
if i <= 9:
print(f'\033[1;31mCategoria: MIRIM')
elif 9 < i <= 14:
print('\033[1;32mCategoria: INFANTIL')
elif 14 < i <= 19:
print('\033[1;33mCategoria: JUNIOR')
elif 19 < i <= 25:
print('\033[1;34mCategoria: SÊNIOR')
else:
print('\033[1;35mCategoria: MASTER')
print('\033[1;30m=-=' * 15)
|
StarcoderdataPython
|
9748423
|
#MenuTitle: Compare Font Spacings
# -*- coding: utf-8 -*-
__doc__="""
Compare spacing of open fonts, output in the Macro Window.
"""
abc = "abcdefghijklmnopqrstuvwxyz"
frequencies = { # Source: Wikipedia
"a": 0.08167,
"b": 0.01492,
"c": 0.02782,
"d": 0.04253,
"e": 0.12702,
"f": 0.02228,
"g": 0.02015,
"h": 0.06094,
"i": 0.06966,
"j": 0.00153,
"k": 0.00772,
"l": 0.04025,
"m": 0.02406,
"n": 0.06749,
"o": 0.07507,
"p": 0.01929,
"q": 0.00095,
"r": 0.05987,
"s": 0.06327,
"t": 0.09056,
"u": 0.02758,
"v": 0.00978,
"w": 0.02361,
"x": 0.00150,
"y": 0.01974,
"z": 0.00074
}
# brings macro window to front and clears its log:
Glyphs.clearLog()
Glyphs.showMacroWindow()
theFonts = Glyphs.fonts
for theFont in theFonts:
print "FONT: %s\n%s\n" % (theFont.familyName, theFont.filepath)
for thisMaster in theFont.masters:
print " Master: %s" % thisMaster.name
lowercaseWidths = 0
uppercaseWidths = 0
weightedLowercaseWidths = 0
weightedUppercaseWidths = 0
for thisLetter in abc:
lcWidth = theFont.glyphs[thisLetter].layers[thisMaster.id].width
ucWidth = theFont.glyphs[thisLetter.upper()].layers[thisMaster.id].width
factor = frequencies[thisLetter]
lowercaseWidths += lcWidth
uppercaseWidths += ucWidth
weightedLowercaseWidths += lcWidth * factor
weightedUppercaseWidths += ucWidth * factor
print " Lowercase: %.1f" % lowercaseWidths
print " Uppercase: %.1f" % uppercaseWidths
print " Weighted English Lowercase: %.1f" % (weightedLowercaseWidths * 26)
print " Weighted English Uppercase: %.1f" % (weightedUppercaseWidths * 26)
print
|
StarcoderdataPython
|
195436
|
import re
#import datetime
from random import randrange
import time
class testHelperSM:
def __init__(self, app):
self.app = app
# def find_region(self):
# wd = self.app.wd
# wd.find_element_by_xpath("//div[@id='mCSB_2_container']/ul/li[2]/label")
# wd.find_element_by_xpath("//form[@id='frmSearch']//button[.='Поиск']")
# def find_region2(self, reg_name):
# wd = self.app.wd
# self.app.wait_smBlock(600)
# wd.find_element_by_xpath("//div[@id='aggregatesPlaceholder']/table/tbody/tr/td[2]/div/div/div[1]/span[2]").click()
# wd.find_element_by_xpath("//div[@id='mCSB_6_container']/div/ul/li[20]/label").click()
# wd.find_element_by_id("aggSearchText").click()
# wd.find_element_by_id("aggSearchText").clear()
# wd.find_element_by_id("aggSearchText").send_keys("%s" % reg_name)
# wd.find_element_by_id("aggSearch").click()
# wd.find_element_by_xpath("//div[@id='mCSB_7_container']/div/ul/li[6]/label").click()
# wd.find_element_by_xpath("//div[@id='mCSB_7_container']/div/ul/li[6]/span[3]").click()
# wd.find_element_by_xpath("//div[@id='mCSB_7_container']/div/ul/li[6]/label").click()
# wd.find_element_by_xpath("//div[@id='mCSB_7_container']/div/ul/li[7]/label").click()
# wd.find_element_by_xpath("//div[@id='mainAggDlgContent']//button[.='Применить фильтр']").click()
# self.app.wait_smBlock(600)
# self.press_search_button()
# def find_region3(self):
# wd = self.app.wd
# self.app.wait_smBlock(600)
# i = randrange(24)
# wd.find_element_by_xpath("//div[@id='aggregatesPlaceholder']/table/tbody/tr[2]/td[1]/div/div/div[1]/span[2]").click()
# self.app.wait_sm_artefact_Block(10)
# if i > 0:
#element = wd.find_element_by_xpath("//div[@id='mCSB_11_container']/div/ul/li[%s]/label" % i)
#ActionChains(wd).move_to_element(element).perform()
# wd.find_element_by_xpath("//div[@id='mCSB_11_container']/div/ul/li[%s]/label" % i).click()
# else:
# i = 2
# wd.find_element_by_xpath("//div[@id='mCSB_11_container']/div/ul/li[%s]/label" % i).click()
# wd.find_element_by_xpath("//div[@id='mainAggDlgContent']//button[.='Применить фильтр']").click()
# self.app.wait_smBlock(20)
# self.press_search_button()
# def find_in_container_number(self, range_container_numbers, container_number):
# wd = self.app.wd
# self.app.wait_smBlock(600)
# spicok = []
# i = randrange(1, 4, 1)
# if container_number == 0:
# ct = randrange(1, range_container_numbers, 1)
# else:
# ct = container_number
# if not self.is_sm_advSearch_is_displayed():
# if len(wd.find_elements_by_xpath("//div[@class='block-label']//a[.='Показать/скрыть']")) < 2:
# wd.find_element_by_xpath("//div[@class='block-label']//a[.='Показать/скрыть']").click()
# else:
# wd.find_element_by_xpath("//div[@id='advSearch']/div[2]/a").click()
# if i > 0 and ct > 0:
# if ct == 1:
# if i < 3:
# wd.find_element_by_xpath("//div[@id='mCSB_1_container']/ul/li[%s]/label" % str(i)).click()
# if i == 3:
# i = 2
# wd.find_element_by_xpath("//div[@id='mCSB_1_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 2:
# try:
# wd.find_element_by_xpath("//div[@id='mCSB_2_container']/ul/li[%s]/label" % str(i)).click()
# except:
# wd.find_element_by_xpath("//div[@id='mCSB_1_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 3:
# wd.find_element_by_xpath("//div[@id='mCSB_3_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 4:
# wd.find_element_by_xpath("//div[@id='mCSB_4_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 5:
# wd.find_element_by_xpath("//div[@id='mCSB_5_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 6:
# wd.find_element_by_xpath("//div[@id='mCSB_6_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 7:
# wd.find_element_by_xpath("//div[@id='mCSB_7_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 8:
# wd.find_element_by_xpath("//div[@id='mCSB_8_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 9:
# wd.find_element_by_xpath("//div[@id='mCSB_9_container']/ul/li[%s]/label" % str(i)).click()
# elif ct == 10:
# wd.find_element_by_xpath("//div[@id='mCSB_10_container']/ul/li[%s]/label" % str(i)).click()
# else:
# i = 2
# wd.find_element_by_xpath("//div[@id='mCSB_2_container']/ul/li[%s]/label" % str(i)).click()
# self.press_search_button()
# return i, ct
def press_search_button(self):
wd = self.app.wd
wd.find_element_by_xpath("//form[@id='frmSearch']//button[.='Поиск']").click()
# def is_sm_advSearch_is_displayed(self):
# try:
# text = self.app.wd.find_element_by_id("advSearchContent").value_of_css_property("display")
# if text == 'block':
# return True
# except:
# return False
# def find_zakazchik_for_purchases_list(self):
# wd = self.app.wd
# self.app.wait_smBlock(600)
# i = randrange(24)
# wd.find_element_by_xpath(
# "//div[@id='aggregatesPlaceholder']/table/tbody/tr[1]/td[3]/div[2]/div/div[1]/span[2]").click()
# self.app.wait_sm_artefact_Block(10)
# wd.find_element_by_id("aggSearchText").click()
# wd.find_element_by_id("aggSearchText").clear()
# wd.find_element_by_id("aggSearchText").send_keys("администрация")
# wd.find_element_by_id("aggSearch").click()
# self.app.wait_sm_artefact_Block(10)
# if i > 0:
# wd.find_element_by_xpath("//div[@id='mCSB_12_container']/div/ul/li[%s]/label" % i).click()
# else:
# i = 2
# wd.find_element_by_xpath("//div[@id='mCSB_12_container']/div/ul/li[%s]/label" % i).click()
# wd.find_element_by_xpath("//div[@id='mainAggDlgContent']//button[.='Применить фильтр']").click()
# self.app.wait_smBlock(600)
# self.press_search_button()
# ! not work
# def search_in_opened_container(self):
# wd = self.app.wd
# self.app.wait_smBlock(600)
# if not self.is_sm_advSearch_is_displayed():
# if len(wd.find_elements_by_xpath("//div[@class='block-label']//a[.='Показать/скрыть']")) < 2:
# wd.find_element_by_xpath("//div[@class='block-label']//a[.='Показать/скрыть']").click()
# else:
# wd.find_element_by_xpath("//div[@id='advSearch']/div[2]/a").click()
# i = randrange(1, 24, 1)
# c = len(wd.find_elements_by_css_selector("span.agg-widget_btn"))
# ct = randrange(c)
# wd.find_elements_by_css_selector("span.agg-widget_btn")[ct].click()
# self.app.wait_sm_artefact_Block(10)
# #найти как кликнуть на элементе
# wd.find_element_by_xpath("//div[@id='mainAggDlgContent']//button[.='Применить фильтр']").click()
# self.app.wait_smBlock(600)
# self.press_search_button()
# def get_artef_parametrs(self, ct):
# wd = self.app.wd
# self.app.wait_smBlock(600)
# for row in wd.find_elements_by_xpath("//div[@id='mCSB_%s_container']/ul/li[1]" % ct):
# cells = row.find_elements_by_tag_name("span")
# results = cells[0].find_element_by_tag_name("em").text
# try:
# parametr = cells[3].text
# except:
# parametr = cells[2].text
# return parametr
# def get_artef_param(self, ct):
# wd = self.app.wd
# param = self.get_artef_parametrs(ct)
# return param
# def is_smresult_not_0(self):
# try:
# text = self.get_total_results()
# if text != '0':
# return True
# except:
# return False
# def check_results(self):
# self.app.wait_smBlock(900)
# if self.is_smresult_not_0():
# result = self.get_total_results()
# return result
# else:
# return '0'
# def get_total_results(self):
# wd = self.app.wd
# results = wd.find_element_by_xpath("//div[@class='panel_header']/h2").get_attribute("textContent")
# #clear_result = wd.find_element_by_xpath("//div[@class='panel_header']/h2").get_attribute("textContent")[13:len(results)]
# clear_result = results[13:len(results)]
# return self.clear_result(clear_result)
def create_contact_report_all_in_dif_row_tel_mail(self):
wd = self.app.wd
wd.maximize_window()
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Контакты']").click()
self.app.wait_sm_artefact_Block(10)
wd.find_element_by_xpath("//label[@for='cb-3']").click()
if not wd.find_element_by_id("cb-3").is_selected():
wd.find_element_by_id("cb-3").click()
wd.find_element_by_xpath("//label[@for='rb-0']").click()
if not wd.find_element_by_id("rb-0").is_selected():
wd.find_element_by_id("rb-0").click()
wd.find_element_by_xpath("//div[@id='divReportContactsSettings']//button[.='Сформировать']").click()
def create_contact_report_all_in_dif_row_tel_mail_zakazchiki(self):
wd = self.app.wd
wd.maximize_window()
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Контакты']").click()
self.app.wait_sm_artefact_Block(10)
wd.find_element_by_xpath("//label[@for='cb-3']").click()
if not wd.find_element_by_id("cb-3").is_selected():
wd.find_element_by_id("cb-3").click()
wd.find_element_by_xpath("//label[@for='cb-8']").click()
if not wd.find_element_by_id("cb-8").is_selected():
wd.find_element_by_id("cb-8").click()
wd.find_element_by_xpath("//label[@for='cb-9']").click()
if wd.find_element_by_id("cb-9").is_selected():
wd.find_element_by_id("cb-9").click()
wd.find_element_by_xpath("//label[@for='rb-0']").click()
if not wd.find_element_by_id("rb-0").is_selected():
wd.find_element_by_id("rb-0").click()
wd.find_element_by_xpath("//div[@id='divReportContactsSettings']//button[.='Сформировать']").click()
def create_contact_report_allinone_tel_mail(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Контакты']").click()
self.app.wait_sm_artefact_Block(10)
wd.find_element_by_xpath("//label[@for='cb-3']").click()
if not wd.find_element_by_id("cb-3").is_selected():
wd.find_element_by_id("cb-3").click()
wd.find_element_by_xpath("//label[@for='rb-1']").click()
if not wd.find_element_by_id("rb-1").is_selected():
wd.find_element_by_id("rb-1").click()
wd.find_element_by_xpath("//div[@id='divReportContactsSettings']//button[.='Сформировать']").click()
def create_contact_report_allinone_tel_mail_zakazchiki(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Контакты']").click()
self.app.wait_sm_artefact_Block(10)
wd.find_element_by_xpath("//label[@for='cb-3']").click()
if not wd.find_element_by_id("cb-3").is_selected():
wd.find_element_by_id("cb-3").click()
wd.find_element_by_xpath("//label[@for='cb-8']").click()
if not wd.find_element_by_id("cb-8").is_selected():
wd.find_element_by_id("cb-8").click()
wd.find_element_by_xpath("//label[@for='cb-9']").click()
if wd.find_element_by_id("cb-9").is_selected():
wd.find_element_by_id("cb-9").click()
wd.find_element_by_xpath("//label[@for='rb-1']").click()
if not wd.find_element_by_id("rb-1").is_selected():
wd.find_element_by_id("rb-1").click()
wd.find_element_by_xpath("//div[@id='divReportContactsSettings']//button[.='Сформировать']").click()
def create_contact_report_result(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Результаты']").click()
self.app.wait_sm_artefact_Block(10)
wd.find_element_by_xpath("//div[@id='divReportSearchResultsSettings']//button[.='Сформировать']").click()
def create_contact_report_statictic(self):
wd = self.app.wd
#добавить выбор чекбоксов
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Статистика']").click()
self.app.wait_sm_artefact_Block(10)
wd.find_element_by_xpath("//div[@id='divReportStatisticsSettings']//button[.='Сформировать']").click()
def create_contact_list_10000(self, cd2, text):
wd = self.app.wd
self.app.wait_smBlock(900)
wd.find_element_by_xpath("//li[@id='UpdateList']//p[.='Добавить']").click()
wd.find_element_by_xpath("//label[@for='sallResults']").click()
if not wd.find_element_by_id("sallResults").is_selected():
wd.find_element_by_id("sallResults").click()
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").click()
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").clear()
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").send_keys(text % cd2)
time.sleep(2)
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").click()
wd.find_element_by_xpath("//div[@id='addOrUpdateEntitiesListSearchDlg']//button[.='Сохранить']").click()
def create_purchases_company_list_50(self, cd2, text):
wd = self.app.wd
self.app.wait_smBlock(900)
#выбор 50
self.select_all_50()
#создание первых списка по первым 50 компаниям
wd.find_element_by_xpath("//li[@id='UpdateList']//p[.='Добавить']").click()
wd.find_element_by_xpath("//label[@for='scheckedResults']").click()
if not wd.find_element_by_id("scheckedResults").is_selected():
wd.find_element_by_id("scheckedResults").click()
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").click()
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").clear()
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").click()
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").send_keys(text % cd2)
time.sleep(2)
wd.find_element_by_xpath("//input[@class='ui-autocomplete-input']").click()
wd.find_element_by_xpath("//div[@id='addOrUpdateEntitiesListSearchDlg']//button[.='Сохранить']").click()
def select_all_50(self):
wd = self.app.wd
wd.find_element_by_xpath("//label[@for='allItemsCb']").click()
if not wd.find_element_by_id("allItemsCb").is_selected():
wd.find_element_by_id("allItemsCb").click()
# def clear_result(self, s):
# x = re.sub(" ", "", str(s))
# return x
# def clear_spase_result(self, s):
# x = re.sub(" ", "", str(s))
# return x
def report_is_present_short(self, reestr_ex, report_type_ex, state_ex):
wd = self.app.wd
self.app.wait_smBlock(600)
reestr = wd.find_element_by_xpath("//div[@id='reports']/div[3]/table/tbody/tr[1]/td[3]").text.rstrip()
report_type = wd.find_element_by_xpath("//div[@id='reports']/div[3]/table/tbody/tr[1]/td[4]").text.rstrip()
state = wd.find_element_by_xpath("//div[@id='reports']/div[3]/table/tbody/tr[1]/td[5]").text.rstrip()
if state == "Создан" or state == state_ex:
if report_type == report_type_ex:
if reestr == reestr_ex:
return True
return False
def report_is_present_date(self, cd2):
wd = self.app.wd
date = wd.find_element_by_xpath("//div[@id='reports']/div[3]/table/tbody/tr[1]/td[2]").text.rstrip()
exp_date = "Сегодня " + cd2
cd2_hour = cd2[0:2]
cd2_minute = cd2[3:5]
exp_date2 = "Сегодня " + cd2_hour + ":" + str(int(cd2_minute) + 1)
if date == exp_date or date == exp_date2:
return True
return False
def monitoring_is_present(self, cd2, cd3, text, reestr_ex):
wd = self.app.wd
wd.refresh()
self.app.wait_smBlock(600)
date = wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[2]").text.rstrip()
exp_date = "Сегодня " + cd3
cd2_hour = cd3[0:2]
cd2_minute = cd3[3:5]
exp_name = text[0:-3] + " " + cd2
exp_date2 = "Сегодня " + cd2_hour + ":" + str(int(cd2_minute) + 1)
exp_date3 = "Сегодня " + cd2_hour + ":" + "0" + str(int(cd2_minute) + 1)
reestr = wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[3]").text.rstrip()
name = wd.find_element_by_xpath("//div[@class='panel_layer']//a[.='%s']" % exp_name).text.rstrip()
#name = wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[4]").text.rstrip()
if date == exp_date or date == exp_date2 or date == exp_date3:
if reestr == reestr_ex:
if name == exp_name:
return True
return False
def click_on_monitoring_link(self, cd2, text):
wd = self.app.wd
self.app.wait_smBlock(600)
exp_name = text[0:-3] + " " + cd2
wd.find_element_by_xpath("//div[@class='panel_layer']//a[.='%s']" % exp_name).click()
def contact_or_purchases_list_is_present(self, cd2, text):
wd = self.app.wd
#проверить время
self.app.wait_smBlock(600)
cd_contact_list = wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[2]").text.rstrip()
current_name = wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[3]").text.rstrip()
created_name = text[0:-3] + " " + cd2
cd_contact_list_date = cd_contact_list[0:2]
cd2_date = cd2[0:2]
cd_contact_list_month = cd_contact_list[3:5]
cd2_month = cd2[3:5]
cd_contact_list_year = cd_contact_list[6:10]
cd2_year = cd2[6:10]
if len(cd_contact_list) == 18:
cd_contact_list_hour = cd_contact_list[11:12]
cd_contact_list_minute = cd_contact_list[13:15]
else:
cd_contact_list_hour = cd_contact_list[11:13]
cd_contact_list_minute = cd_contact_list[14:16]
cd2_hour = cd2[11:13]
cd2_minute = cd2[14:16]
if cd_contact_list_date == cd2_date:
if cd_contact_list_month == cd2_month:
if cd_contact_list_year == cd2_year:
if cd_contact_list_hour == cd2_hour or cd_contact_list_hour == cd2_hour[1:2]:
if cd_contact_list_minute == cd2_minute or cd_contact_list_minute == str(int(cd2_minute) + 1):
if current_name.startswith(created_name):
return True
else:
return False
def ensure_link_work(self):
wd = self.app.wd
header = wd.find_element_by_css_selector("h1.clip").text
return header.rstrip()
def ensure_link_type2_work(self):
wd = self.app.wd
header = wd.find_element_by_css_selector("h2").text
return header[0:8]
def open_first_contact_list(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[3]/div/div[1]/a").click()
def create_report_covladeltsy(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Совладельцы']").click()
wd.find_element_by_xpath("//div[@id='divReportCoownersSettings']//button[.='Сформировать']").click()
wd.find_element_by_css_selector("div.toast-title").click()
def create_report_affelir(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Аффилированность']").click()
wd.find_element_by_xpath("//div[@id='divReportAffilationSettings']//button[.='Сформировать']").click()
def create_report_prices_zakazchik(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Цены']").click()
wd.find_element_by_xpath("//label[@for='rb-0']").click()
if not wd.find_element_by_id("rb-0").is_selected():
wd.find_element_by_id("rb-0").click()
#wd.find_element_by_xpath("//label[@for='cb-2']").click()
#if not wd.find_element_by_id("cb-2").is_selected():
# wd.find_element_by_id("cb-2").click()
#wd.find_element_by_xpath("//label[@for='cb-3']").click()
#if not wd.find_element_by_id("cb-3").is_selected():
# wd.find_element_by_id("cb-3").click()
#wd.find_element_by_xpath("//label[@for='cb-4']").click()
#if not wd.find_element_by_id("cb-4").is_selected():
# wd.find_element_by_id("cb-4").click()
wd.find_element_by_xpath("//div[@id='divReportPricesSettings']//button[.='Сформировать']").click()
def create_report_prices_postavschik(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Цены']").click()
wd.find_element_by_xpath("//label[@for='rb-1']").click()
if not wd.find_element_by_id("rb-1").is_selected():
wd.find_element_by_id("rb-1").click()
#wd.find_element_by_xpath("//label[@for='cb-5']").click()
#if not wd.find_element_by_xpath("//label[@for='cb-5']").is_selected():
# wd.find_element_by_xpath("//label[@for='cb-5']").click()
#wd.find_element_by_xpath("//label[@for='cb-6']").click()
#if not wd.find_element_by_xpath("//label[@for='cb-6']").is_selected():
# wd.find_element_by_xpath("//label[@for='cb-6']").click()
wd.find_element_by_xpath("//div[@id='divReportPricesSettings']//button[.='Сформировать']").click()
def create_report_rnpSuppliers(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Поставщик в РНП']").click()
wd.find_element_by_xpath("//div[@id='divRnpSuppliersSettings']//button[.='Сформировать']").click()
def create_report_RnpParticipantsSettings(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='Участник в РНП']").click()
wd.find_element_by_xpath("//div[@id='divRnpParticipantsSettings']//button[.='Сформировать']").click()
def create_report_FasComplaintsSettings(self):
wd = self.app.wd
self.app.wait_smBlock(600)
wd.find_element_by_xpath("//div[@class='panel_header']//p[.='ФАС']").click()
wd.find_element_by_xpath("//div[@id='divFasComplaintsSettings']//button[.='Сформировать']").click()
def save_requesr(self, cd2, text):
wd = self.app.wd
self.app.wait_smBlock(600)
try:
wd.find_element_by_link_text("Сохранить запрос").click()
except:
try:
wd.find_element_by_link_text("Сохранить запрос/Мониторинг").click()
except:
try:
wd.find_element_by_link_text("Сохранить запрос ").click()
except:
wd.find_element_by_link_text("Сохранить запрос/Мониторинг ").click()
wd.find_element_by_id("requestName").click()
wd.find_element_by_id("requestName").clear()
wd.find_element_by_id("requestName").send_keys(text % cd2)
time.sleep(2)
wd.find_element_by_id("requestName").click()
wd.find_element_by_xpath("//div[@id='divSaveRequest']//button[.='Сохранить']").click()
def refresh_page(self):
wd = self.app.wd
wd.refresh()
self.app.wait_smBlock(600)
def contact_from_contact_rep_is_present(self):
wd = self.app.wd
pass
def get_old_contact_list(self):
pass
def delete_report(self):
pass
def delete_first_contact_list(self):
wd = self.app.wd
self.app.wait_smBlock(600)
#придумать как найти чекбокс, внизу чушь
list = []
#for row in wd.find_element_by_xpath("//input[@class='row-cb']"):
# cells = row.find_elements_by_tag_name("td")
# id = cells[0].find_element_by_tag_name("input").get_attribute("data-id")
wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[1]").click()
if not wd.find_elements_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[1]").is_selected():
wd.find_element_by_xpath("//div[@class='panel_layer']/div[2]/table/tbody/tr[1]/td[1]").click()
wd.find_element_by_id("btnDel").click()
wd.find_element_by_xpath("//div[@id='dlgYesNo']//button[.='Да']").click()
|
StarcoderdataPython
|
9649348
|
from communication.tsm.utils import convert_pos_to_embdeding
from communication.dcel.dcel import Dcel
import networkx as nx
class Planarization:
'''Determine the topology of the drawing which is described by a planar embedding.
'''
def __init__(self, G, pos=None):
if (pos == None):
pos = self.get_positions_from_graph(G)
embedding = convert_pos_to_embdeding(G, pos)
self.G = G.copy()
self.dcel = Dcel(G, embedding)
self.dcel.ext_face = self.get_external_face(pos)
self.dcel.ext_face.is_external = True
#for using the actual positions of input graph
def get_positions_from_graph(self, G):
pos = dict()
for node in G:
pos[node] = (G.nodes[node]['graphics']['x'], G.nodes[node]['graphics']['y'])
return pos
def get_external_face(self, pos):
corner_node = min(pos, key=lambda k: (pos[k][0], pos[k][1]))
sine_vals = {}
for node in self.G.adj[corner_node]:
dx = pos[node][0] - pos[corner_node][0]
dy = pos[node][1] - pos[corner_node][1]
sine_vals[node] = dy / (dx**2 + dy**2)**0.5
other_node = min(sine_vals, key=lambda node:sine_vals[node])
return self.dcel.half_edges[corner_node, other_node].inc
|
StarcoderdataPython
|
4900821
|
<filename>company/InfyTQ/Fundamentals/02SET/09numberGame.py<gh_stars>0
def getSum(n):
sum = 0
for digit in str(n):
sum += int(digit)
return sum
def twoDigit(n):
if(1 < n < 100):
return True
else:
return False
def find_max(num1, num2):
max_num = -1
list = []
if(num1 < num2):
for i in range(num1, num2+1):
sum_of_digits = getSum(i)
if(sum_of_digits % 3 == 0 and twoDigit(i) and i % 5 == 0):
list.append(i)
if not list:
max_num = (-1)
else:
max_num = (max(list))
# Write your logic here
return max_num
# Provide different values for num1 and num2 and test your program.
max_num = find_max(10, 15)
print(max_num)
|
StarcoderdataPython
|
3460400
|
<filename>datacode/panel/did/reg.py
from typing import List, Tuple, Optional
import pandas as pd
from regtools.interact import _interaction_tuple_to_var_name
from regtools import reg_for_each_yvar_and_produce_summary
def diff_reg_for_each_yvar_and_produce_summary(diff_df: pd.DataFrame, yvars: List[str], treated_var: str,
treated_time_var: str = 'After', xvars: Optional[List[str]] = None):
interaction_tuple = (treated_var, treated_time_var)
interaction_varname = _interaction_tuple_to_var_name(interaction_tuple)
all_xvars = [treated_var, treated_time_var]
if xvars is not None:
all_xvars.extend(xvars)
reg_list, summ = reg_for_each_yvar_and_produce_summary(
diff_df,
yvars,
all_xvars,
[treated_var, treated_time_var, interaction_varname],
interaction_tuples=[interaction_tuple]
)
t_name = '$t$-stat'
_add_t_of_last_interaction_to_horizontal_summary(summ.tables[0], interaction_tuple, reg_list, t_name=t_name)
column_order = [treated_var, treated_time_var, interaction_varname, t_name, 'Controls', 'Adj-R2', 'N']
summ.tables[0] = summ.tables[0][column_order]
return reg_list, summ
def _add_t_of_last_interaction_to_horizontal_summary(sdf: pd.DataFrame, interaction_tuple: Tuple[str, str],
reg_list, t_name: str = '$t$-stat') -> None:
"""
note: inplace
Args:
sdf:
interaction_tuple:
reg_list:
t_name:
Returns:
"""
interaction_varname = _interaction_tuple_to_var_name(interaction_tuple)
interaction_stderr = pd.Series([r.bse[interaction_varname] for r in reg_list], index=sdf.index)
sdf[t_name] = sdf[interaction_varname].apply(lambda x: float(x.strip('*'))) / interaction_stderr
sdf[t_name] = sdf[t_name].apply(lambda x: f'{x:.2f}')
|
StarcoderdataPython
|
4892899
|
from functools import wraps
from importlib import resources as il_resources
import logging
import os
from zygoat.components import Component
from zygoat.constants import Phases
from zygoat.components import resources
log = logging.getLogger()
class FileComponent(Component):
"""
Use this when you want to create a file component that
tracks the contents of the file that ``filename`` points to.
Note that this file must exist in in the supplied resource package.
Several class properties are available to configure this component:
:param filename: Name of the file inside of the resource package to copy
:type filename: str
:param resource_pkg: The python package that contains the static file to read.
:param base_path: A path to prepend to the output filename, i.e. ``frontend/static/``
:type base_path: str, optional
:param overwrite: If the update phase should recreate the file, defaults to True
:type overwrite: bool, optional
"""
resource_pkg = resources
base_path = "./"
overwrite = True
executable = False
def check_setup(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if not self.filename:
raise NotImplementedError("You must specify cls.filename!")
return f(self, *args, **kwargs)
return wrapper
@check_setup
def create(self):
log.info(f"Creating {self.path}")
os.makedirs(self.base_path, exist_ok=True)
with open(self.path, "w") as f:
f.write(il_resources.read_text(self.resource_pkg, self.filename))
if self.executable:
os.chmod(self.path, 0o755)
@check_setup
def update(self):
self.call_phase(Phases.CREATE, force_create=self.overwrite)
@check_setup
def delete(self):
log.warning(f"Deleting {self.path}")
os.remove(self.path)
try:
os.rmdir(self.base_path)
log.warning(f"Deleting {self.base_path}")
except OSError:
log.warning(f"Skipping {self.base_path}")
@property
@check_setup
def installed(self):
return os.path.exists(self.path)
@property
def path(self):
return os.path.join(self.base_path, self.filename)
|
StarcoderdataPython
|
6677528
|
from baseline.tf.lm.train import *
from baseline.tf.lm.model import *
|
StarcoderdataPython
|
6686733
|
import os
from PIL import Image
def file_expand_pic(file,font_height):
img = Image.open(file)
w, h = img.size
line_count = int(h/font_height)
to_img = Image.new(mode='RGBA',size=(w,line_count*256))
for line in range(line_count):
to_img.paste(img.crop(box=(0, font_height*line, w, font_height*(line+1))),
(0, 256*line))
to_img.save(file)
for root, dirs, files in os.walk(r'textures\font_plus'):
for name in files:
file_path = os.path.join(root, name)
if 'unicode_page_' in name:
file_expand_pic(file_path,16)
elif name == 'accented.png':
file_expand_pic(file_path, 12)
else:
file_expand_pic(file_path, 8)
|
StarcoderdataPython
|
3211166
|
<gh_stars>1-10
import time
import rospy
import rospkg
import os
import sys
import numpy as np
import tensorflow as tf
from styx_msgs.msg import TrafficLight
from io import StringIO
MINIMUM_CONFIDENCE = 0.4
class TLClassifier(object):
def __init__(self, simulator):
# current_path = os.path.dirname(os.path.realpath(__file__))
self.simulator_used = simulator
# We support two different frozen graphes which are trained with
# real car camera data and with data from the simulator. Depending
# where the application is executed (car or simulator) different
# models are loaded.
if (self.simulator_used == 1):
model_path = 'light_classification/classifiers/inference_graph_sim.pb'
else:
model_path = 'light_classification/classifiers/inference_graph_real.pb'
rospy.logwarn('model path {0}'.format(model_path))
detection_graph = self.load_graph(model_path)
# The input placeholder for the image.
# `get_tensor_by_name` returns the Tensor with the associated name in the Graph.
self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
# The classification of the object (integer id).
self.detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
self.sess = tf.Session(graph=detection_graph)
def load_graph(self, graph_file):
# Loads a frozen TF inference graph
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# Load a sample image
image_expanded = np.expand_dims(image, axis=0)
result = TrafficLight.UNKNOWN
# Perform detection
(boxes, scores, classes) = self.sess.run([self.detection_boxes, self.detection_scores,
self.detection_classes],
feed_dict={self.image_tensor: image_expanded})
# Remove unnecessary dimensions
scores = np.squeeze(scores)
classes = np.squeeze(classes)
# Debug classifications
# rospy.logwarn('TF classes {0} and scores {1}'.format(classes, scores))
# Find traffic light with highest confidence level
conv_level = MINIMUM_CONFIDENCE
score = 0
for i in range(boxes.shape[0]):
if scores[i] > conv_level:
conv_level = scores[i]
if classes[i] == 2: #'Green':
result = TrafficLight.GREEN
elif classes[i] == 4: #'Red':
result = TrafficLight.RED
elif classes[i] == 3: #'Yellow':
result = TrafficLight.YELLOW
score = scores[i]
# Debug traffic light output - Red: 0, 1: Yellow, 2: Green, 4: Unknown
# rospy.logwarn('Traffic light {0} ({1})'.format(result, score))
return result
|
StarcoderdataPython
|
8067328
|
<gh_stars>0
#!/usr/bin/env python
import time
class ProfilerStopwatch(object):
"""Time counter class to help us optimize performance
A quick walltime performance counter. Can run multiple clocks at the
same time for different classes of things.
"""
_default_tag = 'DEFAULT'
def __init__(self):
"""Initialize internal state"""
self.reset_all()
def start(self, tag=_default_tag):
"""Start the stopwatch for a given chunk"""
now = time.time()
if tag not in self._start:
self._start[tag] = now
def stop(self, tag=_default_tag):
"""Stop the stopwatch for a given chunk"""
now = time.time()
if tag in self._start:
if tag not in self._chunks:
self._chunks[tag] = [ ]
self._chunks[tag].append((self._start[tag], now))
del self._start[tag]
def reset_all(self):
"""Resets all stopwatches"""
self._start, self._chunks = { }, { }
def profile_call(self, func, *args, **kwargs):
"""Wraps and times a function call"""
tag = "call(s) to {}".format(func.__name__)
self.start(tag)
results = func(*args, **kwargs)
self.stop(tag)
return results
def report(self):
"""Report on whatever's still running"""
total = dict([(tag, sum([c[1] - c[0] for c in self._chunks[tag]]))
for tag in self._chunks])
maxlen = max([len(tag) for tag in self._chunks])
print "Time used so far:"
for key in sorted(total.keys(), key=lambda k: -total[k]):
print " {:.3f} sec ".format(total[key]),
if key[:10] == "call(s) to":
print "{:d}".format(len(self._chunks[key])),
print key
|
StarcoderdataPython
|
1992494
|
from werkzeug.security import check_password_hash, generate_password_hash
import mysql.connector, random
# Connect to database
db = mysql.connector.connect(
host="localhost",
user="root",
password="password",
database="testing",
auth_plugin="mysql_native_password",
charset="utf8mb4"
)
cursor = db.cursor(buffered=True)
# Load initial data
# Note: If creating or overriding tables, uncomment the first few lines of schema.sql
with open("schema.sql") as f:
sql = f.read()
cursor.execute(sql, multi=True)
db.commit()
# Check if username - password combo is valid
def check_username_password(user, password):
cursor.execute("SELECT * FROM users WHERE username = '" + user + "';")
user = cursor.fetchone()
if user:
if check_password_hash(user[2], password):
return user
# Check if email - password combo is valid
def check_username_email(email, password):
cursor.execute("SELECT * FROM users WHERE email = '" + email + "';")
user = cursor.fetchone()
if user:
if check_password_hash(user[2], password):
return user
# Detect if email already exists
def check_email(email):
cursor.execute("SELECT * FROM users WHERE email = '" + email + "';")
if cursor.fetchone():
return True
return False
# Check if username already exists
def check_username(name):
cursor.execute("SELECT * FROM users WHERE username = '" + name + "';")
if cursor.fetchone():
return True
return False
# Get username from id
def get_name(id):
if not id == False:
cursor.execute("SELECT username FROM users WHERE id = '" + id + "'")
return cursor.fetchone()[0]
return False
# Create an account from register data
def create_account(username, password, email, id):
cursor.execute("INSERT INTO users VALUES (%s, %s, %s, %s)", (username, email, generate_password_hash(password), id))
db.commit()
def change_password(id, password):
cursor.execute("UPDATE users SET password = '" + generate_password_hash(password) + "' WHERE id = '" + id + "';")
db.commit()
def change_email(id, email):
cursor.execute("UPDATE users SET email = '" + email + "' WHERE id = '" + id + "';")
db.commit()
# Deleting an account
def delete_account(id):
# Remove all user data from tables
cursor.execute("DELETE FROM users WHERE id = '" + id + "';")
cursor.execute("DELETE FROM user_rooms WHERE userID = '" + id + "';")
# Remove all rooms where the deleted account is the owner
cursor.execute("SELECT code FROM rooms WHERE owner = '" + id + "';")
rooms = cursor.fetchall()
for room in rooms:
delete_room(room[0])
db.commit()
# Creating a room
def create_room(room_name, code, owner, color):
# Create room in main room table
cursor.execute("SELECT * FROM rooms WHERE owner = '" + owner + "';")
# Ensure that user doesn't already own their max of five rooms
if len(cursor.fetchall()) >= 5:
return False
cursor.execute("INSERT INTO rooms VALUES (%s, %s, %s)", (room_name, code, owner))
# Join the room as owner
cursor.execute("INSERT INTO user_rooms VALUES (%s, %s, %s, %s)", (get_name(owner), owner, code, room_name))
# Holds all of the users in a room
cursor.execute("CREATE TABLE " + code + "_users (user TEXT NOT NULL, id TEXT NOT NULL, color TEXT NOT NULL)")
# Create chatlog
cursor.execute("CREATE TABLE " + code + "_log (user TEXT NOT NULL, time TEXT NOT NULL, message TEXT NOT NULL, recipient TEXT NOT NULL, id TEXT NOT NULL)")
# Add creator to room members
cursor.execute("INSERT INTO " + code + "_users VALUES (%s, %s, %s)", (get_name(owner), owner, color))
# Add starting messages
cursor.execute("INSERT INTO " + code + "_log VALUES (%s, %s, %s, %s, %s)", ("server", " ", "This is the beginning of " + room_name, "all", " "))
db.commit()
return True
# Join a room if applicable
def join_room(name, id, code, color):
# Check if room code is valid
cursor.execute("SELECT * FROM rooms WHERE code = '" + code + "';")
if not cursor.fetchone():
return "Room code " + code + " doesn't exist"
# Check if user is already in the selected room
cursor.execute("SELECT roomID FROM user_rooms WHERE userID = '" + id + "';")
if code in [i[0] for i in cursor.fetchall()]:
return "You are already in this room"
# Retrive name from new room
cursor.execute("SELECT name FROM rooms WHERE code = '" + code + "';")
room_name = cursor.fetchone()[0]
# Double check that room actually exists
if room_name:
# Join room
cursor.execute("INSERT INTO user_rooms VALUES (%s, %s, %s, %s)", (name, id, code, room_name))
cursor.execute("INSERT INTO " + code + "_users VALUES (%s, %s, %s)", (name, id, color))
db.commit()
return "Room " + room_name + " successfully joined"
else:
return "Room " + code + " does not exist"
# Delete room from room code
def delete_room(code):
# Invalidate room code and delete all room data
cursor.execute("DELETE FROM rooms WHERE code = '" + code + "';")
# Disable all users from joining the room
cursor.execute("DELETE FROM user_rooms WHERE roomID = '" + code + "';")
# Delete chat log and user data table
cursor.execute("DROP TABLE " + code + "_users;")
cursor.execute("DROP TABLE " + code + "_log;")
db.commit()
# Delete user from a room
def leave_room(id, code):
cursor.execute("DELETE FROM user_rooms WHERE userID = '" + id + "' AND roomID = '" + code + "';")
try:
cursor.execute("DELETE FROM " + code + "_users WHERE id = '" + id + "';")
except:
pass
db.commit()
# Retrive the name and code of all rooms a user id is part of
def get_rooms(id):
cursor.execute("SELECT * FROM user_rooms WHERE userID = '" + id + "';")
data = []
for i in [[i[3], i[2]] for i in cursor.fetchall()]:
cursor.execute("SELECT owner FROM rooms WHERE code = '" + i[1] + "';")
data.append([i[0], i[1]])
return data
# Retrive every room code
def get_codes():
cursor.execute("SELECT code FROM rooms;")
return cursor.fetchall()
# Retrives all room codesassociated with a certain user id
def get_my_codes(id):
cursor.execute("SELECT roomID FROM user_rooms WHERE userID = '" + id + "';")
return cursor.fetchall()
# Check if user id is owner of the given room code
def is_owner(id, code):
cursor.execute("SELECT * FROM rooms WHERE owner = '" + id + "' AND code = '" + code + "';")
if cursor.fetchone():
return True
return False
# Get the name of all users in a room + their color
# If id is given, retrive all users except for the one associated with id
def get_users(code, id=False):
if id == False:
cursor.execute("SELECT * FROM " + code + "_users;")
return [[i[0], i[2]] for i in cursor.fetchall()]
cursor.execute("SELECT * FROM " + code + "_users WHERE NOT id = '" + id + "';")
return [[i[0], i[2]] for i in cursor.fetchall()]
# Remove user from a room
def remove_user(name, code):
cursor.execute("DELETE FROM " + code + "_users WHERE user = '" + name + "';")
cursor.execute("DELETE FROM user_rooms WHERE username = '" + name + "' AND roomID = '" + code + "';")
db.commit()
# Get user id from username and room code
def get_id(name, code):
cursor.execute("SELECT userID FROM user_rooms WHERE username = '" + name + "' AND roomID = '" + code + "';")
return cursor.fetchone()
# Retrive user id associated with an email
def get_id_from_email(email):
cursor.execute("SELECT id FROM users WHERE email = '" + email + "';")
try:
return cursor.fetchone()[0]
except TypeError:
return False
# Get room name from join code
def get_room_name(code):
cursor.execute("SELECT name FROM rooms WHERE code = '" + code + "';")
return cursor.fetchone()[0]
# Insert message into chatlog table
def log_message(code, user, time, message, id, recipient="all"):
cursor.execute("INSERT INTO " + code + "_log VALUES (%s, %s, %s, %s, %s)", (user, time, message, recipient, id))
db.commit()
# Ensure that database isn't too large
check_rows(code + "_log")
# Retrive all messages from chatlog which were sent to a certain user id
def get_messages(code, id):
cursor.execute("SELECT * FROM " + code + "_log WHERE recipient = 'all' OR recipient = '" + id + "';")
return cursor.fetchall()
# Retrieve color associated with id in a room
def get_color(code, id):
cursor.execute("SELECT color FROM " + code + "_users WHERE id = '" + id +"';")
try:
return cursor.fetchone()[0]
# If person is no longer in room, use default color of darkslategray
except TypeError:
return "darkslategray"
# Ensure that no chatlog table has more than 250 messages stored
def check_rows(table):
# Retrieve row count
cursor.execute("SELECT COUNT(*) FROM " + table + ";")
rows = cursor.fetchone()[0]
if rows > 250:
# Delete 50 rows if rowcount is above 250
rows_to_delete = 50
cursor.execute("DELETE FROM " + table + " LIMIT " + str(rows_to_delete))
db.commit()
# Allow the user to change their color in a room
def change_color(code, id, color):
cursor.execute("UPDATE " + code + "_users SET color = '" + color + "' WHERE id = '" + id + "';")
db.commit()
# Remove user from room if their account is deleted
def delete_if_deleted(room, id):
cursor.execute("DELETE FROM " + room + "_users WHERE user = '" + id + "';")
db.commit()
|
StarcoderdataPython
|
5008832
|
import csv
import pandas as pd
'''
Commentaires E-CUBE (Marwane) :
- Commentaires généraux :
- Structurer le dossier en sous-dossiers contenant des fichiers de nature distincte. Typiquement :
- data : contient toutes les données, dont celles qui sont scrapées
- lib : contient tous les scripts Python
- Renommer les scripts suivant une nomenclature facile à comprendre, avec une numérotation alphanumérique pour comprendre
dans quel ordre ils sont/doivent être run
- Ex : tous les Futures peuvent être dans un même bloc A1, et seraient nommés A11_FuturesEur, A12_FuturesAsie par exemple
- Mettre les fonctions dans des scripts dédiées, et les appeler depuis des scripts main (ne pas mélanger fonctions et run des fonctions
dans un même script)
- Commentaires spécifiques à ce script :
- Que fait ce script ?
- Les lignes 14 à 26 peuvent facilement être écrites de manière stable en 3 lignes (voir code ajouté)
- Pourquoi définir des noms tronqués dans dict_month si vous ne prenez que les 3 premières lettres de string (l.41) ?
Autant définir directement les noms complets dans dict_month :)
- Au lieu d'utiliser les fonctions open et csv_reader/csv_writer, utiliser read_csv et write_csv de la librairie pandas
- Les noms des CSV que vous ouvrez/fermez changent-ils à chaque fois que vous lancez le script ? Si oui, il y a un enjeu à
variabiliser le nom du fichier
'''
### Ajout Marwane ###
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
month_numbers = ["%.2d" % i for i in range(1, len(months)+1)]
dict_month = {months[i]: month_numbers[i] for i in range(len(months))}
###
dict_month = {}
dict_month['Jan']= '01'
dict_month['Feb']= '02'
dict_month['Mar']= '03'
dict_month['Apr']= '04'
dict_month['May']= '05'
dict_month['Jun']= '06'
dict_month['Jul']= '07'
dict_month['Aug']= '08'
dict_month['Sep']= '09'
dict_month['Oct']= '10'
dict_month['Nov']= '11'
dict_month['Dec']= '12'
def convertdate(string):
# print(string)
if string[0] == '-':
return '-'
convmonth = dict_month[string[:3]]
i = 0
ind = 0
for car in string :
if car == ',':
ind = i
i+=1
convday = string[4:ind]
if len(convday)==1:
convday = '0'+convday
convhour = string[ind+2:ind+7]
return f'2021-{convmonth}-{convday}-{convhour}'
fichier = open('../Data/donnees-navires/list-vessels-2021-04-30-old.csv','r')
fichiercsv = csv.reader(fichier, delimiter=',')
out = open("../Data/donnees-navires/list-vessels-2021-04-30.csv", "w", newline='')
outw = csv.writer(out)
listecsv = []
i = 0
for ligne in fichiercsv:
if i > 0 :
ligne[9] = convertdate(ligne[9])
ligne[12] = convertdate(ligne[12])
listecsv.append(ligne)
i =+ 1
outw.writerows(listecsv)
out.close()
fichier.close()
|
StarcoderdataPython
|
1716135
|
<reponame>jkrueger/phosphorus_mk2
def init():
import bpy
from . import (_phosphoros)
import os.path
path = os.path.dirname(__file__)
user_path = os.path.dirname(os.path.abspath(bpy.utils.user_resource('CONFIG', '')))
resource_path = os.path.dirname(os.path.abspath(bpy.utils.resource_path('LOCAL')))
_phosphoros.init(path, resource_path, user_path, bpy.app.background)
def exit():
from . import (_phosphoros)
_phosphoros.exit()
def create(engine, data, region=None, v3d=None, rv3d=None, preview_osl=False):
from . import (_phosphoros)
import bpy
data = data.as_pointer()
prefs = bpy.context.preferences.as_pointer()
if region:
region = region.as_pointer()
if v3d:
v3d = v3d.as_pointer()
if rv3d:
rv3d = rv3d.as_pointer()
engine.session = _phosphoros.create(
engine.as_pointer(), prefs, data, region, v3d, rv3d, preview_osl)
def free(engine):
if hasattr(engine, "session"):
if engine.session:
from . import (_phosphoros)
_phosphoros.free(engine.session)
del engine.session
def reset(engine, depsgraph, data):
from . import (_phosphoros)
if hasattr(engine, "session"):
depsgraph = depsgraph.as_pointer()
data = data.as_pointer()
_phosphoros.reset(engine.session, depsgraph, data)
def render(engine, depsgraph):
from . import (_phosphoros)
if hasattr(engine, "session"):
_phosphoros.render(engine.session, depsgraph.as_pointer())
|
StarcoderdataPython
|
11263187
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 7 16:56:44 2018
@author: lijun
"""
"""
This model is based on Tensorflow-1.14.
How to use? out1,out2=CRMSS(img1,img2,reuse=False)
img1 and img2 are inputs that are nomalized between 0~1.
out1 and out2 are corresponding cloud removal results for img1 and img2.
If you use this code for your research, please cite us accordingly:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., 2021. Deep learning based thin cloud removal fusing vegetation red edge and short wave infrared spectral information for Sentinel-2A imagery. Remote Sens. 13(1), 157.
"""
import tensorflow as tf
def make_var(name, shape, trainable = True):
return tf.get_variable(name, shape, trainable = trainable)
def conv2d(input_, output_dim, kernel_size=3, stride=2, padding = "SAME", name = "conv2d", biased = False):
input_dim = input_.get_shape()[-1]
with tf.variable_scope(name):
kernel = make_var(name = 'weights', shape=[kernel_size, kernel_size, input_dim, output_dim])
output = tf.nn.conv2d(input_, kernel, [1, stride, stride, 1], padding = padding)
if biased:
biases = make_var(name = 'biases', shape = [output_dim])
output = tf.nn.bias_add(output, biases)
return output
def deconv2d(input_, output_dim, kernel_size=4, stride=2, padding = "SAME", name = "deconv2d"):
input_dim = input_.get_shape()[-1]
batchsize=int(input_.get_shape()[0])
input_height = int(input_.get_shape()[1])
input_width = int(input_.get_shape()[2])
with tf.variable_scope(name):
kernel = make_var(name = 'weights', shape = [kernel_size, kernel_size, output_dim, input_dim])
output = tf.nn.conv2d_transpose(input_, kernel, [batchsize, input_height * stride, input_width * stride, output_dim], [1, 2, 2, 1], padding = "SAME")
return output
def instance_norms(input_, name="instance_norm"):
return tf.contrib.layers.instance_norm(input_,scope=name)
def lrelu(x, leak=0.2, name = "lrelu"):
return tf.maximum(x, leak*x)
def CRMSS(image,image1, gf_dim=64, reuse=False, name="generator"):
output_dim=image.get_shape()[-1]
output_dim1 = image1.get_shape()[-1]
with tf.variable_scope(name):
# image is 256 x 256 x input_c_dim
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
e1 = lrelu(conv2d(image, gf_dim,stride=1, name='g_e1_conv'))
e0 = lrelu(conv2d(image1, gf_dim,stride=1, name='g_e0_conv'))
e2 = lrelu(instance_norm(conv2d(e1, gf_dim*2, name='g_e2_conv'), 'g_bn_e2'))
e20= tf.concat([e2,e0],axis=3)
e3 = lrelu(instance_norm(conv2d(e20, gf_dim*4, name='g_e3_conv'), 'g_bn_e3'))
e4 = lrelu(instance_norm(conv2d(e3, gf_dim*8, name='g_e4_conv'), 'g_bn_e4'))
e5 = lrelu(instance_norm(conv2d(e4, gf_dim*8, name='g_e5_conv'), 'g_bn_e5'))
e6 = lrelu(instance_norm(conv2d(e5, gf_dim*8, name='g_e6_conv'), 'g_bn_e6'))
d1 = relu(instance_norm(deconv2d(e6, gf_dim*8, name='g_d1'),'d_bn_d0'))
d1 = tf.concat([d1, e5],3)
d2 = relu(instance_norm(deconv2d(d1, gf_dim*8, name='g_d2'),'g_bn_d1'))
d2 = tf.concat([d2,e4], 3)
d3 = relu(instance_norm(deconv2d(d2, gf_dim*4, name='g_d3'),'g_bn_d2'))
d3 = tf.concat([d3, e3], 3)
d4 = relu(instance_norm(deconv2d(d3, gf_dim*2, name='g_d4'),'g_bn_d3'))
d4 = tf.concat([d4,e20], 3)
d5 = relu(instance_norm(deconv2d(d4, gf_dim, name='g_d5'),'g_bn_d4'))
d5 = tf.concat([d5,e1], 3)
out1 = conv2d(d4,output_dim1,stride=1, name='out1_conv')
out = conv2d(d5,output_dim,stride=1, name='out_conv')
return tf.nn.sigmoid(out),tf.nn.sigmoid(out1)
|
StarcoderdataPython
|
3202587
|
<filename>exdir/core/validation.py
from enum import Enum
import os
try:
import pathlib
except ImportError as e:
try:
import pathlib2 as pathlib
except ImportError:
raise e
from . import constants as exob
VALID_CHARACTERS = ("abcdefghijklmnopqrstuvwxyz1234567890_-.")
class NamingRule(Enum):
SIMPLE = 1
STRICT = 2
THOROUGH = 3
NONE = 4
def _assert_unique(parent_path, name):
try:
name_str = str(name)
except UnicodeEncodeError:
name = name.encode('utf8')
if (parent_path / name).exists():
raise RuntimeError(
"'{}' already exists in '{}'".format(name, parent_path)
)
def _assert_nonempty(parent_path, name):
try:
name_str = str(name)
except UnicodeEncodeError:
name_str = name.encode('utf8')
if len(name_str) < 1:
raise NameError("Name cannot be empty.")
def _assert_nonreserved(name):
# NOTE ignore unicode errors, they are not reserved
try:
name_str = str(name)
except UnicodeEncodeError:
name_str = name.encode('utf8')
reserved_names = [
exob.META_FILENAME,
exob.ATTRIBUTES_FILENAME,
exob.RAW_FOLDER_NAME
]
if name_str in reserved_names:
raise NameError(
"Name cannot be '{}' because it is a reserved filename in Exdir.".format(name_str)
)
if pathlib.PureWindowsPath(name_str).is_reserved():
raise NameError(
"Name cannot be '{}' because it is a reserved filename in Windows.".format(name_str)
)
def _assert_valid_characters(name):
try:
name_str = str(name)
except UnicodeEncodeError:
name_str = name.encode('utf8')
for char in name_str:
if char not in VALID_CHARACTERS:
raise NameError(
"Name '{}' contains invalid character '{}'.\n"
"Valid characters are:\n{}".format(name_str, char, VALID_CHARACTERS)
)
def unique(parent_path, name):
_assert_nonempty(parent_path, name)
_assert_unique(parent_path, name)
def minimal(parent_path, name):
_assert_nonempty(parent_path, name)
_assert_nonreserved(name)
_assert_unique(parent_path, name)
def strict(parent_path, name):
_assert_nonreserved(name)
_assert_unique(parent_path, name)
_assert_valid_characters(name)
def thorough(parent_path, name):
_assert_nonempty(parent_path, name)
_assert_nonreserved(name)
try:
name_str = str(name)
except UnicodeEncodeError:
name_str = name.encode('utf8')
name_lower = name_str.lower()
_assert_valid_characters(name_lower)
if isinstance(pathlib.Path(parent_path), pathlib.WindowsPath):
# use _assert_unique if we're already on Windows, because it is much faster
# than the test below
_assert_unique(parent_path, name)
return
# os.listdir is much faster here than os.walk or parent_path.iterdir
for item in os.listdir(str(parent_path)):
if name_lower == item.lower():
raise RuntimeError(
"A directory with name (case independent) '{}' already exists "
" and cannot be made according to the naming rule 'thorough'.".format(name)
)
def none(parent_path, name):
pass
|
StarcoderdataPython
|
6436786
|
<reponame>tkoyama010/pyvista-doc-translations
from pyvista import examples
dataset = examples.download_topo_global() # doctest:+SKIP
#
# This dataset is used in the following examples:
#
# * :ref:`surface_normal_example`
|
StarcoderdataPython
|
3594866
|
"""Main package to interface with Zotero API."""
import datetime
import logging
import os
import requests
NEW_VER = datetime.datetime.today().strftime("%Y%m%d")
logger = logging.getLogger(__name__)
class Zoter:
"""Class for interacting with Zotero API."""
def __init__(self, user_id: str = os.environ.get("ZOTERO_USER_ID", ""),
api_key: str = os.environ.get("ZOTERO_API_KEY", "")):
"""
In order for you to use this class, you need to generate a Zotero API key.
Login to Zotero web interface -> Settings -> Feeds/API -> Create new private key
Then add your zotero userid (a string of numbers!) and api key as environment
variables ZOTERO_USER_ID or ZOTERO_API_KEY, respectively. Or you can just pass in
your user id and api_key to this class.
:param user_id: Zotero user id. Defaults to ZOTERO_USER_ID environment variable.
:param api_key: Zotero API key. Defaults to ZOTERO_API_KEY environment variable.
"""
if (not user_id) or (not api_key):
raise RuntimeError(
"In order for you to use this class, you need to generate a Zotero API key."
"Login to Zotero web interface -> Settings -> Feeds/API -> Create new private key"
"Then add your zotero userid (a string of numbers!) and api key as environment"
"variables ZOTERO_USER_ID or ZOTERO_API_KEY, respectively. Or you can just pass in"
"your user id and api_key to this class.")
self.session = requests.Session()
self.session.headers = {"Zotero-API-Key": api_key} # type: ignore
self.user_id = user_id
def __enter__(self):
"""Support for "with" context."""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Support for "with" context."""
self.session.close()
def get_my_publications(self) -> list:
"""
Return a list of publications.
:return: List of publications for user_id.
"""
url = "https://api.zotero.org/users/%s/publications/items" % self.user_id
total = float("inf")
start = 0
items = []
while start < total:
response = self.session.get(url, params={"start": start, "limit": 100})
d = response.json()
items.extend(d)
total = int(response.headers["Total-Results"])
start += len(d)
logger.debug("start = %d, total = %d", start, total)
return items
|
StarcoderdataPython
|
3271732
|
<reponame>das08/kuRakutanBot
import module.func as fn
command = {
"help": fn.helps,
"Help": fn.helps,
"ヘルプ": fn.helps,
"テーマ変更": fn.selectTheme,
"きせかえ": fn.selectTheme,
"着せ替え": fn.selectTheme,
"テーマ": fn.selectTheme,
"色テーマ": fn.selectTheme,
"色テーマ変更": fn.selectTheme,
"theme": fn.selectTheme,
"色": fn.selectTheme,
"色変更": fn.selectTheme,
"はんてい詳細": fn.rakutanHantei,
"詳細": fn.rakutanHantei,
"判定": fn.rakutanHantei,
"判定詳細": fn.rakutanHantei,
"楽単詳細": fn.rakutanHantei,
"お気に入り": fn.getFavList,
"お気に入り一覧": fn.getFavList,
"おきにいり": fn.getFavList,
"fav": fn.getFavList,
"favs": fn.getFavList,
"楽単おみくじ": fn.normalOmikuji,
"おみくじ 楽単": fn.normalOmikuji,
"おみくじ": fn.normalOmikuji,
"楽単": fn.normalOmikuji,
"人社おみくじ": fn.sorry,
"おみくじ 人社": fn.sorry,
"人社": fn.sorry,
"鬼単おみくじ": fn.oniOmikuji,
"おみくじ 鬼単": fn.oniOmikuji,
"鬼単": fn.oniOmikuji,
"鬼": fn.oniOmikuji,
"お問い合わせ": fn.inquiry,
"ユーザ認証": fn.verification,
"ユーザー認証": fn.verification,
"PandA": fn.cpanda,
"panda": fn.cpanda,
"@info": fn.cpanda,
"CB": fn.sorry,
"京大楽単bot": fn.showVersion,
"d@s08": fn.checkKakomon,
"myuid": fn.myUID,
"@set:gold": fn.setRichMenu,
"@set:silver": fn.setRichMenu,
"@set:default": fn.setRichMenu,
"@theme:default": fn.changeTheme,
"@theme:yellow": fn.sorry,
"@theme:blue": fn.sorry,
"@theme:gold": fn.unavailable
}
|
StarcoderdataPython
|
8141767
|
<gh_stars>0
"""Nox sessions.
Things I might want to consider:
********************************
* safety
* typeguard
* codecov
"""
import os
import nox
from nox.sessions import Session
PACKAGE = 'peregrinus'
nox.options.sessions = 'lint', 'mypy', 'unit_tests', 'doc_tests', 'wheel'
locations = 'src', 'tests', 'docs/source/conf.py', 'noxfile.py'
@nox.session(python='3.9')
def wheel(this_session: Session) -> None:
"""Build the wheel package."""
this_session.run('pip', 'wheel', '-w', 'dist', '.')
@nox.session(python='3.9')
def mypy(this_session: Session) -> None:
"""Check types using mypy."""
args = this_session.posargs or locations
this_session.install('mypy>=0.910', '.')
this_session.run('mypy', *args)
@nox.session(python='3.9')
def black(this_session: Session) -> None:
"""Format code with black."""
args = this_session.posargs or locations
this_session.install('black')
this_session.run('black', *args)
@nox.session(python='3.9')
def unit_tests(this_session: Session) -> None:
"""Run unit tests with pytest."""
args = this_session.posargs
this_session.install('-r', 'dev-requirements/pytest.txt', '.')
this_session.run('pytest', *args)
@nox.session(python='3.9')
def doc_tests(this_session: Session) -> None:
"""Run doc-tests inside docstrings."""
args = this_session.posargs or [PACKAGE]
this_session.install('-r', 'dev-requirements/xdoctest.txt', '.')
this_session.run('python', '-m', 'xdoctest', *args)
@nox.session(python='3.9')
def lint(this_session: Session) -> None:
"""Run static code analyzer."""
args = this_session.posargs or locations
this_session.install('-r', 'dev-requirements/flake8.txt', '.')
this_session.run('pflake8', *args)
@nox.session(python='3.9')
def docs(this_session: Session) -> None:
"""Create project documentation with sphinx."""
args = this_session.posargs or ['docs/source', 'docs/build']
this_session.install('-r', 'dev-requirements/sphinx.txt', '.')
this_session.run('sphinx-build', *args)
@nox.session(python='3.9')
def coverage(this_session: Session) -> None:
"""Send coverage report to codecov."""
args = this_session.posargs
this_session.install('-r', 'dev-requirements/coverage.txt')
this_session.run('coverage', 'xml', '--fail-under=0')
this_session.run('codecov', *args)
@nox.session(python='3.9')
def wheel_upload_test(this_session: Session) -> None:
"""Upload the wheel package to test.pypi.org."""
password = os.environ['TEST_<PASSWORD>']
args = this_session.posargs or [
'--repository-url',
'https://test.pypi.org/legacy/',
'--username',
'__token__',
'--password',
password,
'--non-interactive',
'--skip-existing',
'--verbose',
'dist/*',
]
this_session.install('twine')
this_session.run('twine', 'check', 'dist/*')
this_session.run('twine', 'upload', *args)
|
StarcoderdataPython
|
6575292
|
<filename>rllib/models/tf/layers/noisy_layer.py
import numpy as np
from ray.rllib.utils.framework import get_activation_fn, get_variable, \
try_import_tf
tf1, tf, tfv = try_import_tf()
class NoisyLayer(tf.keras.layers.Layer if tf else object):
"""A Layer that adds learnable Noise to some previous layer's outputs.
Consists of:
- a common dense layer: y = w^{T}x + b
- a noisy layer: y = (w + \\epsilon_w*\\sigma_w)^{T}x +
(b+\\epsilon_b*\\sigma_b)
, where \epsilon are random variables sampled from factorized normal
distributions and \\sigma are trainable variables which are expected to
vanish along the training procedure.
"""
def __init__(self, prefix, out_size, sigma0, activation="relu"):
"""Initializes a NoisyLayer object.
Args:
prefix:
out_size:
sigma0:
non_linear:
"""
super().__init__()
self.prefix = prefix
self.out_size = out_size
# TF noise generation can be unreliable on GPU
# If generating the noise on the CPU,
# lowering sigma0 to 0.1 may be helpful
self.sigma0 = sigma0 # 0.5~GPU, 0.1~CPU
self.activation = activation
# Variables.
self.w = None # Weight matrix.
self.b = None # Biases.
self.sigma_w = None # Noise for weight matrix
self.sigma_b = None # Noise for biases.
def build(self, input_shape):
in_size = int(input_shape[1])
self.sigma_w = get_variable(
value=tf.keras.initializers.RandomUniform(
minval=-1.0 / np.sqrt(float(in_size)),
maxval=1.0 / np.sqrt(float(in_size))),
trainable=True,
tf_name=self.prefix + "_sigma_w",
shape=[in_size, self.out_size],
dtype=tf.float32)
self.sigma_b = get_variable(
value=tf.keras.initializers.Constant(
self.sigma0 / np.sqrt(float(in_size))),
trainable=True,
tf_name=self.prefix + "_sigma_b",
shape=[self.out_size],
dtype=tf.float32,
)
self.w = get_variable(
value=tf.keras.initializers.GlorotUniform(),
tf_name=self.prefix + "_fc_w",
trainable=True,
shape=[in_size, self.out_size],
dtype=tf.float32,
)
self.b = get_variable(
value=tf.keras.initializers.Zeros(),
tf_name=self.prefix + "_fc_b",
trainable=True,
shape=[self.out_size],
dtype=tf.float32,
)
def call(self, inputs):
in_size = int(inputs.shape[1])
epsilon_in = tf.random.normal(shape=[in_size])
epsilon_out = tf.random.normal(shape=[self.out_size])
epsilon_in = self._f_epsilon(epsilon_in)
epsilon_out = self._f_epsilon(epsilon_out)
epsilon_w = tf.matmul(
a=tf.expand_dims(epsilon_in, -1), b=tf.expand_dims(epsilon_out, 0))
epsilon_b = epsilon_out
action_activation = tf.matmul(
inputs,
self.w + self.sigma_w * epsilon_w) + \
self.b + self.sigma_b * epsilon_b
fn = get_activation_fn(self.activation, framework="tf")
if fn is not None:
action_activation = fn(action_activation)
return action_activation
def _f_epsilon(self, x):
return tf.math.sign(x) * tf.math.sqrt(tf.math.abs(x))
|
StarcoderdataPython
|
4989383
|
import scramble
import movesticker
import tkinter as tk
import csv
import os
from datetime import datetime
import time
#基本視窗
win = tk.Tk()
win.title('Random Scramble Generator')
win.geometry('1440x900')
win.config(background = '#323232')
time_list = []
sc_list = []
#顯示打亂圖形
labelList = []
def draw_scramble():
stikers = movesticker.move_as_scramble(sc)
for k in range(6):
for i in range(3):
for j in range(3):
xstart = 900
ystart = 625
num = 9 * k + 3 * i + j + 1
var = 'L' + str(num)
var = tk.Label(text=' ',bg=stikers[k][i*3+j])
if k == 0:
var.place(x=(j+3)*26+4+xstart,y=i*25+ystart)
elif k <= 4:
var.place(x=(j+3*(k-1))*26+4*(k-1)+xstart,y=(i+3)*25+4+ystart)
elif k == 5:
var.place(x=(j+3)*26+4+xstart,y=(i+6)*25+4*2+ystart)
labelList.append(var)
#隱藏打亂圖形
def hide_draw_scramble():
print('111')
print(len(labelList))
for i in range(0,len(labelList)):
labelList[i].place_forget()
sc = scramble.scramble3() #一打開就產生第一組sc
draw_scramble() #一打開就顯示第一次打亂圖形
#產生新打亂
def gen_sc():
global sc
sc = scramble.scramble3() #從scramble程式產生sc,sc為一陣列,如['R',"U'",'L',...]
sc_btn.config(text = sc) #將sc_btn的文字設定為sc
draw_scramble() #更新打亂圖形
#設置物件
sc_btn = tk.Button(text = sc , fg = 'white', bg = '#323232',font = 'Arial 30' ) #顯示打亂,按下則產生新打亂
sc_btn.pack(anchor='n')
solve_show = tk.Label(text = 'solve: 0', fg = 'white', bg = '#323232',font = 'Arial 30') #顯示為第幾次復原
solve_show.place(x=30, y=200)
mo3_show = tk.Label(text = 'mo3 ----', fg = 'white', bg = '#323232',font = 'Arial 30') #顯示mo3
mo3_show.place(x=30, y=240)
ao5_show = tk.Label(text = 'ao5 ----', fg = 'white', bg = '#323232',font = 'Arial 30') #顯示ao5
ao5_show.place(x=30, y=280)
ao12_show = tk.Label(text = 'ao12 ----', fg = 'white', bg = '#323232',font = 'Arial 30') #顯示ao12
ao12_show.place(x=30, y=320)
mean_show = tk.Label(text = 'mean ----', fg = 'white', bg = '#323232',font = 'Arial 30') #顯示mean
mean_show.place(x=30, y=360)
time_1_show = tk.Button(fg = 'white', bg = '#323232',font = 'Arial 30') #顯示上一次時間,按下則print出該次sc
time_1_show.place(x=30, y=410)
time_2_show = tk.Button(fg = 'white', bg = '#323232',font = 'Arial 30') #顯示上上次時間,按下則print出該次sc
time_2_show.place(x=30, y=490)
time_3_show = tk.Button(fg = 'white', bg = '#323232',font = 'Arial 30')
time_3_show.place(x=30, y=570)
time_4_show = tk.Button(fg = 'white', bg = '#323232',font = 'Arial 30')
time_4_show.place(x=30, y=650)
time_5_show = tk.Button(fg = 'white', bg = '#323232',font = 'Arial 30')
time_5_show.place(x=30, y=730)
plus_two_btn = tk.Button(text = '+2', fg = 'white', bg = '#323232',font = 'Arial 20') #計時完按下則該次時間+2,再按一下取消
plus_two_btn.place(x=110, y=810)
dnf_btn = tk.Button(text = 'DNF', fg = 'white', bg = '#323232',font = 'Arial 20') #計時完按下則該次為DNF,再按一下取消
dnf_btn.place(x=30, y=810)
def hide_object():
sc_btn.pack_forget() #隱藏sc
solve_show.place_forget() #隱藏第幾次復原
mo3_show.place_forget() #隱藏mo3
ao5_show.place_forget() #隱藏ao5
ao12_show.place_forget() #隱藏ao12
mean_show.place_forget() #隱藏mean
time_1_show.place_forget() #隱藏過去五次成績
time_2_show.place_forget()
time_3_show.place_forget()
time_4_show.place_forget()
time_5_show.place_forget()
plus_two_btn.place_forget() #隱藏+2按鈕
dnf_btn.place_forget() #隱藏DNF按鈕
export.place_forget() #隱藏export按鈕
hide_draw_scramble() #隱藏打亂圖形
def show_object():
sc_btn.pack(anchor='n') #顯示sc
solve_show.place(x=30, y=200) #顯示第幾次復原
mo3_show.place(x=30, y=240) #顯示mo3
ao5_show.place(x=30, y=280) #顯示ao5
ao12_show.place(x=30, y=320) #顯示ao12
mean_show.place(x=30, y=360) #顯示mean
time_1_show.place(x=30, y=410) #顯示過去五次成績
time_2_show.place(x=30, y=490)
time_3_show.place(x=30, y=570)
time_4_show.place(x=30, y=650)
time_5_show.place(x=30, y=730)
plus_two_btn.place(x=110, y=810) #顯示+2按鈕
dnf_btn.place(x=30, y=810) #顯示DNF按鈕
export.place(x=165,y=810) #顯示export按鈕
#碼表
t_ms = 0 #運行時間
t_s = 0
t_m = 0
time_show = tk.Label(text = '%02d.%02d' % (t_s, t_ms),fg='white',bg = '#323232',font = 'Arial 120') #一打開就顯示時間
time_show.place(x=480,y=350)
#開始計時
run = 2 #計算mean時避免/0
after_cancel = None
press_plus2 = 0
press_dnf = 0
time_start = None
def space_trigger(x):
global run, t_m,t_ms,t_s ,press_plus2 ,press_dnf,time_start
press_plus2 = press_dnf = 0
if run %2 == 0: #計時開始&歸零
t_ms = 0
t_s = 0
t_m = 0
run += 1
time_start = time.time() #記錄開始時間
timer_start() #觸發碼表
hide_object() #隱藏物件
else: #計時結束
time_list.append(t_ms)
sc_list.append(sc)
calculate_ao5(int(run/2))
calculate_mo3(int(run/2))
calculate_ao12(int(run/2))
calculate_mean(int(run/2))
time_list_show(int(run/2))
solve_show.config(text = 'solve: ' + str(int(run/2)))
gen_sc()
run += 1
timer_stop()
show_object() #顯示物件
def timer_start(): #碼表開始運行
global t_ms , after_cancel
time_now = time.time() #當下時間
t_ms = (time_now - time_start)*1000 #開始時間 - 當下時間 = 經過秒數
time_show.config(text = time_translation(t_ms, t_s, t_m)) #改成用time_translate
after_cancel = win.after(1,timer_start) #每個1ms呼叫自身
def timer_stop(): #碼表停止
global after_cancel
win.after_cancel(after_cancel)
after_cancel = None
def time_translation(ms, s, m): #將ms轉換成m,s,ms
while ms >= 1000:
s = s + 1
ms -= 1000
while s >= 60:
m = m + 1
s -= 60
if m > 0 :
return str(m) + ':' + "%02d" % s + '.' + "%03d" % ms
else:
return str(s) + '.' + "%03d" % ms
def calculate_ao5(n):
if n >= 5:
ao5 = int((sum(time_list[n-5:n]) - max(time_list[n-5:n]) - min(time_list[n-5:n])) / 3)
if ao5 < 0 :
ao5_show.config(text = 'ao5: DNF' )
else:
ao5_show.config(text = 'ao5: ' + str(time_translation(ao5,0,0)) )
def calculate_mo3(n):
if n >= 3:
mo3 = int(sum(time_list[n-3:n]) / 3)
if mo3 < 0 :
mo3_show.config(text = 'mo3: DNF' )
else:
mo3_show.config(text = 'mo3: ' + str(time_translation(mo3,0,0)) )
def calculate_ao12(n):
if n >= 12:
ao12 = int((sum(time_list[n-12:n]) - max(time_list[n-12:n]) - min(time_list[n-12:n])) / 10)
if ao12 < 0 :
ao12_show.config(text = 'ao12: DNF' )
else:
ao12_show.config(text = 'ao12: ' + str(time_translation(ao12,0,0)) )
def calculate_mean(n):
mean_list = []
for i in range(0 , n):
if time_list[i] >= 0 :
mean_list.append(time_list[i])
if len(mean_list) == 0 :
mean_show.config(text = 'mean: DNF')
else:
mean = int(sum(mean_list[0:i+1]) / len(mean_list))
mean_show.config(text = 'mean: ' + str(time_translation(mean,0,0)) )
def time_list_show(n):
if n >= 5:
if time_list[n-5] < 0 :
time_5_show.config(text = 'DNF')
else: time_5_show.config(text = time_translation(time_list[n-5],0,0))
if n >= 4:
if time_list[n-4] < 0 :
time_4_show.config(text = 'DNF')
else: time_4_show.config(text = time_translation(time_list[n-4],0,0))
if n >= 3:
if time_list[n-3] < 0 :
time_3_show.config(text = 'DNF')
else: time_3_show.config(text = time_translation(time_list[n-3],0,0))
if n >= 2:
if time_list[n-2] < 0 :
time_2_show.config(text = 'DNF')
else: time_2_show.config(text = time_translation(time_list[n-2],0,0))
if n >= 1:
if time_list[n-1] < 0 :
time_1_show.config(text = 'DNF')
else: time_1_show.config(text = time_translation(time_list[n-1],0,0))
def show_sc(x): #按下按鈕顯示打亂步驟 x = 第幾個
n = int(run/2)-1
print(time_translation(time_list[n-x],0,0),end=' ')
for i in range(len(sc_list[n-x])):
print(sc_list[n-x][i] ,end=' ')
print()
def plus_two():
global press_plus2
press_plus2 += 1
n = int(run/2)-1
if press_plus2 % 2 == 1 :
time_list[n-1] += 2000
else:
time_list[n-1] -= 2000
calculate_ao5(n)
calculate_ao12(n)
calculate_mean(n)
calculate_mo3(n)
time_list_show(n)
def dnf():
global press_dnf
press_dnf += 1
n = int(run/2)-1
if press_dnf % 2 == 1 :
time_list[n-1] -= 10000000
else:
time_list[n-1] += 10000000
calculate_ao5(n)
calculate_ao12(n)
calculate_mean(n)
calculate_mo3(n)
time_list_show(n)
#偵測開始、結束計時
win.bind('<KeyRelease>', space_trigger)
#按鈕執行函式
sc_btn.config(command = gen_sc)
time_1_show.config(command = lambda: show_sc(1)) #預設tkinter Button控制的函數不可有參數
time_2_show.config(command = lambda: show_sc(2)) #若需參數要在函數前加 lambda:
time_3_show.config(command = lambda: show_sc(3))
time_4_show.config(command = lambda: show_sc(4))
time_5_show.config(command = lambda: show_sc(5))
plus_two_btn.config(command = plus_two)
dnf_btn.config(command = dnf)
#輸出資料
def export_csv():
#開啟輸出的 CSV 檔案
datetime_dt = datetime.today()
file_name = str(os.getcwd())+'//Downloads//TimeOutput_' + str(datetime_dt.strftime("%Y_%m_%d_%H_%M_%S")) + '.csv'
with open(file_name, 'w', newline='') as csvFile:
# 建立 CSV 檔寫入器
writer = csv.writer(csvFile)
#標題
writer.writerow(['No.','Time','Scramble'])
for i in range(0,len(time_list)):
t = time_translation(time_list[i],0,0)
writer.writerow([str(i+1),t,sc_list[i]])
export = tk.Button(text = 'Export CSV' , fg = 'white', bg = '#323232',font = 'Arial 20')
export.config(command = export_csv)
export.place(x=165,y=810)
#常駐主視窗
win.attributes('-topmost', True)
win.mainloop()
|
StarcoderdataPython
|
109960
|
<reponame>nathandarnell/sal
"""General functional tests for the text_utils module."""
from django.test import TestCase
from utils import text_utils
class TextUtilsTest(TestCase):
"""Test the Utilities module."""
def test_safe_text_null(self):
"""Ensure that null characters are dropped."""
original = '\x00'
self.assertTrue(text_utils.safe_text(original) == '')
self.assertTrue(text_utils.safe_text(original.encode() == ''))
def test_listify_basic(self):
"""Ensure non-collection data is only str converted."""
catalogs = 'testing'
result = text_utils.stringify(catalogs)
self.assertEqual(result, catalogs)
self.assertTrue(isinstance(result, str))
# Bool, int, float, dict
tests = (False, 5, 5.0, {'a': 'test'})
for test in tests:
self.assertEqual(text_utils.stringify(test), str(test))
def test_listify_list(self):
"""Ensure list data can be converted to strings."""
catalogs = ['testing', 'phase', 'production']
result = text_utils.stringify(catalogs)
self.assertEqual(result, ', '.join(catalogs))
def test_listify_dict(self):
"""Ensure dict data can be converted to strings."""
catalogs = ['testing', 'phase', {'key': 'value'}]
result = text_utils.stringify(catalogs)
self.assertEqual(result, "testing, phase, {'key': 'value'}")
def test_listify_non_str_types(self):
"""Ensure nested non-str types are converted."""
catalogs = [5, 5.0, {'a': 'test'}]
result = text_utils.stringify(catalogs)
self.assertEqual(result, "5, 5.0, {'a': 'test'}")
|
StarcoderdataPython
|
1956846
|
<reponame>EnjoyLifeFund/py36pkgs
#!/usr/bin/python
#
# Copyright (c) 2016 <NAME>, <<EMAIL>>
# <NAME>, <<EMAIL>>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: azure_rm_networkinterface
version_added: "2.1"
short_description: Manage Azure network interfaces.
description:
- Create, update or delete a network interface. When creating a network interface you must provide the name of an
existing virtual network, the name of an existing subnet within the virtual network. A default security group
and public IP address will be created automatically, or you can provide the name of an existing security group
and public IP address. See the examples below for more details.
options:
resource_group:
description:
- Name of a resource group where the network interface exists or will be created.
required: true
name:
description:
- Name of the network interface.
required: true
state:
description:
- Assert the state of the network interface. Use 'present' to create or update an interface and
'absent' to delete an interface.
default: present
choices:
- absent
- present
required: false
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
required: false
virtual_network_name:
description:
- Name of an existing virtual network with which the network interface will be associated. Required
when creating a network interface.
aliases:
- virtual_network
required: false
default: null
subnet_name:
description:
- Name of an existing subnet within the specified virtual network. Required when creating a network
interface
aliases:
- subnet
required: false
default: null
os_type:
description:
- Determines any rules to be added to a default security group. When creating a network interface, if no
security group name is provided, a default security group will be created. If the os_type is 'Windows',
a rule will be added allowing RDP access. If the os_type is 'Linux', a rule allowing SSH access will be
added.
choices:
- Windows
- Linux
default: Linux
required: false
private_ip_address:
description:
- Valid IPv4 address that falls within the specified subnet.
required: false
private_ip_allocation_method:
description:
- "Specify whether or not the assigned IP address is permanent. NOTE: when creating a network interface
specifying a value of 'Static' requires that a private_ip_address value be provided. You can update
the allocation method to 'Static' after a dynamic private ip address has been assigned."
default: Dynamic
choices:
- Dynamic
- Static
required: false
public_ip:
description:
- When creating a network interface, if no public IP address name is provided a default public IP
address will be created. Set to false, if you do not want a public IP address automatically created.
default: true
required: false
public_ip_address_name:
description:
- Name of an existing public IP address object to associate with the security group.
aliases:
- public_ip_address
- public_ip_name
required: false
default: null
public_ip_allocation_method:
description:
- If a public_ip_address_name is not provided, a default public IP address will be created. The allocation
method determines whether or not the public IP address assigned to the network interface is permanent.
choices:
- Dynamic
- Static
default: Dynamic
required: false
security_group_name:
description:
- Name of an existing security group with which to associate the network interface. If not provided, a
default security group will be created.
aliases:
- security_group
required: false
default: null
open_ports:
description:
- When a default security group is created for a Linux host a rule will be added allowing inbound TCP
connections to the default SSH port 22, and for a Windows host rules will be added allowing inbound
access to RDP ports 3389 and 5986. Override the default ports by providing a list of open ports.
required: false
default: null
extends_documentation_fragment:
- azure
- azure_tags
author:
- "<NAME> (@chouseknecht)"
- "<NAME> (@nitzmahone)"
'''
EXAMPLES = '''
- name: Create a network interface with minimal parameters
azure_rm_networkinterface:
name: nic001
resource_group: Testing
virtual_network_name: vnet001
subnet_name: subnet001
- name: Create a network interface with private IP address only (no Public IP)
azure_rm_networkinterface:
name: nic001
resource_group: Testing
virtual_network_name: vnet001
subnet_name: subnet001
public_ip: no
- name: Create a network interface for use in a Windows host (opens RDP port) with custom RDP port
azure_rm_networkinterface:
name: nic002
resource_group: Testing
virtual_network_name: vnet001
subnet_name: subnet001
os_type: Windows
rdp_port: 3399
- name: Create a network interface using existing security group and public IP
azure_rm_networkinterface:
name: nic003
resource_group: Testing
virtual_network_name: vnet001
subnet_name: subnet001
security_group_name: secgroup001
public_ip_address_name: publicip001
- name: Delete network interface
azure_rm_networkinterface:
resource_group: Testing
name: nic003
state: absent
'''
RETURN = '''
state:
description: The current state of the network interface.
returned: always
type: dict
sample: {
"dns_settings": {
"applied_dns_servers": [],
"dns_servers": [],
"internal_dns_name_label": null,
"internal_fqdn": null
},
"enable_ip_forwarding": false,
"etag": 'W/"be115a43-2148-4545-a324-f33ad444c926"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/nic003",
"ip_configuration": {
"name": "default",
"private_ip_address": "10.1.0.10",
"private_ip_allocation_method": "Static",
"public_ip_address": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/publicIPAddresses/publicip001",
"name": "publicip001"
},
"subnet": {}
},
"location": "eastus2",
"mac_address": null,
"name": "nic003",
"network_security_group": {},
"primary": null,
"provisioning_state": "Succeeded",
"tags": null,
"type": "Microsoft.Network/networkInterfaces"
}
'''
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.network.models import NetworkInterface, NetworkInterfaceIPConfiguration, Subnet, \
PublicIPAddress, NetworkSecurityGroup
except ImportError:
# This is handled in azure_rm_common
pass
def nic_to_dict(nic):
result = dict(
id=nic.id,
name=nic.name,
type=nic.type,
location=nic.location,
tags=nic.tags,
network_security_group=dict(),
ip_configuration=dict(
name=nic.ip_configurations[0].name,
private_ip_address=nic.ip_configurations[0].private_ip_address,
private_ip_allocation_method=nic.ip_configurations[0].private_ip_allocation_method,
subnet=dict(),
public_ip_address=dict(),
),
dns_settings=dict(
dns_servers=nic.dns_settings.dns_servers,
applied_dns_servers=nic.dns_settings.applied_dns_servers,
internal_dns_name_label=nic.dns_settings.internal_dns_name_label,
internal_fqdn=nic.dns_settings.internal_fqdn
),
mac_address=nic.mac_address,
primary=nic.primary,
enable_ip_forwarding=nic.enable_ip_forwarding,
provisioning_state=nic.provisioning_state,
etag=nic.etag,
)
if nic.network_security_group:
result['network_security_group']['id'] = nic.network_security_group.id
id_keys = azure_id_to_dict(nic.network_security_group.id)
result['network_security_group']['name'] = id_keys['networkSecurityGroups']
if nic.ip_configurations[0].subnet:
result['ip_configuration']['subnet']['id'] = \
nic.ip_configurations[0].subnet.id
id_keys = azure_id_to_dict(nic.ip_configurations[0].subnet.id)
result['ip_configuration']['subnet']['virtual_network_name'] = id_keys['virtualNetworks']
result['ip_configuration']['subnet']['name'] = id_keys['subnets']
if nic.ip_configurations[0].public_ip_address:
result['ip_configuration']['public_ip_address']['id'] = \
nic.ip_configurations[0].public_ip_address.id
id_keys = azure_id_to_dict(nic.ip_configurations[0].public_ip_address.id)
result['ip_configuration']['public_ip_address']['name'] = id_keys['publicIPAddresses']
return result
class AzureRMNetworkInterface(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
location=dict(type='str'),
security_group_name=dict(type='str', aliases=['security_group']),
state=dict(default='present', choices=['present', 'absent']),
private_ip_address=dict(type='str'),
private_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'),
public_ip_address_name=dict(type='str', aliases=['public_ip_address', 'public_ip_name']),
public_ip=dict(type='bool', default=True),
subnet_name=dict(type='str', aliases=['subnet']),
virtual_network_name=dict(type='str', aliases=['virtual_network']),
os_type=dict(type='str', choices=['Windows', 'Linux'], default='Linux'),
open_ports=dict(type='list'),
public_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'),
)
self.resource_group = None
self.name = None
self.location = None
self.security_group_name = None
self.private_ip_address = None
self.private_ip_allocation_method = None
self.public_ip_address_name = None
self.state = None
self.subnet_name = None
self.tags = None
self.virtual_network_name = None
self.security_group_name = None
self.os_type = None
self.open_ports = None
self.public_ip_allocation_method = None
self.public_ip = None
self.results = dict(
changed=False,
state=dict(),
)
super(AzureRMNetworkInterface, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys() + ['tags']:
setattr(self, key, kwargs[key])
results = dict()
changed = False
nic = None
subnet = None
nsg = None
pip = None
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.state == 'present':
if self.virtual_network_name and not self.subnet_name:
self.fail("Parameter error: a subnet is required when passing a virtual_network_name.")
if self.subnet_name and not self.virtual_network_name:
self.fail("Parameter error: virtual_network_name is required when passing a subnet value.")
if self.virtual_network_name and self.subnet_name:
subnet = self.get_subnet(self.virtual_network_name, self.subnet_name)
if self.public_ip_address_name:
pip = self.get_public_ip_address(self.public_ip_address_name)
if self.security_group_name:
nsg = self.get_security_group(self.security_group_name)
try:
self.log('Fetching network interface {0}'.format(self.name))
nic = self.network_client.network_interfaces.get(self.resource_group, self.name)
self.log('Network interface {0} exists'.format(self.name))
self.check_provisioning_state(nic, self.state)
results = nic_to_dict(nic)
self.log(results, pretty_print=True)
if self.state == 'present':
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
if self.private_ip_address:
if results['ip_configuration']['private_ip_address'] != self.private_ip_address:
self.log("CHANGED: network interface {0} private ip".format(self.name))
changed = True
results['ip_configuration']['private_ip_address'] = self.private_ip_address
if self.public_ip_address_name:
if results['ip_configuration']['public_ip_address'].get('id') != pip.id:
self.log("CHANGED: network interface {0} public ip".format(self.name))
changed = True
results['ip_configuration']['public_ip_address']['id'] = pip.id
results['ip_configuration']['public_ip_address']['name'] = pip.name
if self.security_group_name:
if results['network_security_group'].get('id') != nsg.id:
self.log("CHANGED: network interface {0} network security group".format(self.name))
changed = True
results['network_security_group']['id'] = nsg.id
results['network_security_group']['name'] = nsg.name
if self.private_ip_allocation_method:
if results['ip_configuration']['private_ip_allocation_method'] != self.private_ip_allocation_method:
self.log("CHANGED: network interface {0} private ip allocation".format(self.name))
changed = True
results['ip_configuration']['private_ip_allocation_method'] = self.private_ip_allocation_method
if self.private_ip_allocation_method == 'Dynamic':
results['ip_configuration']['private_ip_address'] = None
if self.subnet_name:
if results['ip_configuration']['subnet'].get('id') != subnet.id:
changed = True
self.log("CHANGED: network interface {0} subnet".format(self.name))
results['ip_configuration']['subnet']['id'] = subnet.id
results['ip_configuration']['subnet']['name'] = subnet.name
results['ip_configuration']['subnet']['virtual_network_name'] = self.virtual_network_name
elif self.state == 'absent':
self.log("CHANGED: network interface {0} exists but requested state is 'absent'".format(self.name))
changed = True
except CloudError:
self.log('Network interface {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: network interface {0} does not exist but requested state is "
"'present'".format(self.name))
changed = True
self.results['changed'] = changed
self.results['state'] = results
if self.check_mode:
return self.results
if changed:
if self.state == 'present':
if not nic:
# create network interface
self.log("Creating network interface {0}.".format(self.name))
# check required parameters
if not self.subnet_name:
self.fail("parameter error: subnet_name required when creating a network interface.")
if not self.virtual_network_name:
self.fail("parameter error: virtual_network_name required when creating a network interface.")
if not self.security_group_name:
# create default security group
nsg = self.create_default_securitygroup(self.resource_group, self.location, self.name,
self.os_type, self.open_ports)
if not pip and self.public_ip:
# create a default public_ip
pip = self.create_default_pip(self.resource_group, self.location, self.name,
self.public_ip_allocation_method)
nic = NetworkInterface(
location=self.location,
tags=self.tags,
ip_configurations=[
NetworkInterfaceIPConfiguration(
private_ip_allocation_method=self.private_ip_allocation_method,
)
]
)
#nic.name = self.name
nic.ip_configurations[0].subnet = Subnet(id=subnet.id)
nic.ip_configurations[0].name = 'default'
nic.network_security_group = NetworkSecurityGroup(id=nsg.id,
location=nsg.location,
resource_guid=nsg.resource_guid)
if self.private_ip_address:
nic.ip_configurations[0].private_ip_address = self.private_ip_address
if pip:
nic.ip_configurations[0].public_ip_address = PublicIPAddress(
id=pip.id,
location=pip.location,
resource_guid=pip.resource_guid)
else:
self.log("Updating network interface {0}.".format(self.name))
nic = NetworkInterface(
id=results['id'],
location=results['location'],
tags=results['tags'],
ip_configurations=[
NetworkInterfaceIPConfiguration(
private_ip_allocation_method=
results['ip_configuration']['private_ip_allocation_method']
)
]
)
subnet = self.get_subnet(results['ip_configuration']['subnet']['virtual_network_name'],
results['ip_configuration']['subnet']['name'])
nic.ip_configurations[0].subnet = Subnet(id=subnet.id)
nic.ip_configurations[0].name = results['ip_configuration']['name']
#nic.name = name=results['name'],
if results['ip_configuration'].get('private_ip_address'):
nic.ip_configurations[0].private_ip_address = results['ip_configuration']['private_ip_address']
if results['ip_configuration']['public_ip_address'].get('id'):
pip = \
self.get_public_ip_address(results['ip_configuration']['public_ip_address']['name'])
nic.ip_configurations[0].public_ip_address = PublicIPAddress(
id=pip.id,
location=pip.location,
resource_guid=pip.resource_guid)
#name=pip.name,
if results['network_security_group'].get('id'):
nsg = self.get_security_group(results['network_security_group']['name'])
nic.network_security_group = NetworkSecurityGroup(id=nsg.id,
location=nsg.location,
resource_guid=nsg.resource_guid)
# See what actually gets sent to the API
request = self.serialize_obj(nic, 'NetworkInterface')
self.log(request, pretty_print=True)
self.results['state'] = self.create_or_update_nic(nic)
elif self.state == 'absent':
self.log('Deleting network interface {0}'.format(self.name))
self.delete_nic()
return self.results
def create_or_update_nic(self, nic):
try:
poller = self.network_client.network_interfaces.create_or_update(self.resource_group, self.name, nic)
new_nic = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating or updating network interface {0} - {1}".format(self.name, str(exc)))
return nic_to_dict(new_nic)
def delete_nic(self):
try:
poller = self.network_client.network_interfaces.delete(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting network interface {0} - {1}".format(self.name, str(exc)))
# Delete doesn't return anything. If we get this far, assume success
self.results['state']['status'] = 'Deleted'
return True
def get_public_ip_address(self, name):
self.log("Fetching public ip address {0}".format(name))
try:
public_ip = self.network_client.public_ip_addresses.get(self.resource_group, name)
except Exception as exc:
self.fail("Error: fetching public ip address {0} - {1}".format(self.name, str(exc)))
return public_ip
def get_subnet(self, vnet_name, subnet_name):
self.log("Fetching subnet {0} in virtual network {1}".format(subnet_name, vnet_name))
try:
subnet = self.network_client.subnets.get(self.resource_group, vnet_name, subnet_name)
except Exception as exc:
self.fail("Error: fetching subnet {0} in virtual network {1} - {2}".format(subnet_name,
vnet_name,
str(exc)))
return subnet
def get_security_group(self, name):
self.log("Fetching security group {0}".format(name))
try:
nsg = self.network_client.network_security_groups.get(self.resource_group, name)
except Exception as exc:
self.fail("Error: fetching network security group {0} - {1}.".format(name, str(exc)))
return nsg
def main():
AzureRMNetworkInterface()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
9783611
|
<reponame>rrajaravi/pydfs<filename>setup.py
from setuptools import setup
def readme():
return open('README.md', 'r').read()
def requirements():
with open('requirements.txt', 'r') as f:
return f.readlines()
setup(
name='pydfu',
packages=['pydfu'],
version='0.1',
long_description=readme(),
description="Disk and File system query",
author='rrajaravi',
author_email='<EMAIL>',
url='https://github.com/rrajaravi/pydfu.git',
license="MIT",
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Science/Research',
],
install_requires=requirements(),
test_suite="tests",
scripts=['bin/pydfu']
)
|
StarcoderdataPython
|
197655
|
<filename>src/pylisp.py
import sys
import re
from object import *
from compiler import compile
from vm import VM
class Reader:
def __init__(self, stream):
self.stream = stream
self.c = None
def read_char(self):
ret_c = self.c
if ret_c == "":
return ret_c
self.c = None
if ret_c is None:
ret_c = self.stream.read(1)
return ret_c
def unread_char(self, c):
self.c = c
class Lexer:
def __init__(self, reader):
self.reader = reader
self.token = None
@staticmethod
def is_space(c):
return c in " \t\r\n"
def skip_spaces(self):
c = self.reader.read_char()
while c != "" and Lexer.is_space(c):
c = self.reader.read_char()
self.reader.unread_char(c)
def read_string(self):
cs = []
c = self.reader.read_char()
while c != "\"":
cs.append(c)
c = self.reader.read_char()
return "".join(cs)
def read_ident(self):
cs = []
c = self.reader.read_char()
while not (Lexer.is_space(c) or c in "()"):
cs.append(c)
c = self.reader.read_char()
self.reader.unread_char(c)
return "".join(cs)
@staticmethod
def is_number(s):
return re.fullmatch(r"\d+", s)
def get_token(self):
if self.token:
ret_token = self.token
self.token = None
return ret_token
self.skip_spaces()
token = None
c = self.reader.read_char()
if c == "":
pass
elif c == "(":
token = ("LPAR", c)
elif c == ")":
token = ("RPAR", c)
elif c == ".":
token = ("DOT", c)
elif c == "\"":
s = self.read_string()
token = ("STRING", s)
else:
self.reader.unread_char(c)
ident = self.read_ident()
if Lexer.is_number(ident):
token = ("NUMBER", ident)
else:
token = ("IDENT", ident)
return token
def unget_token(self, token):
self.token = token
class TSyntaxError(Exception):
def __init__(self, message):
super().__init__(message)
class SexpReader:
def __init__(self, stream=sys.stdin):
self.lexer = Lexer(Reader(stream))
# (a b c . d)
# -> (pair a (pair b (pair c . d)))
# (a b c d)
# -> (pair a (pair b (pair c (pair d nil))))
def read_pair(self):
token = self.lexer.get_token()
token_type, token_value = token
if token_type == "RPAR":
return TNull()
if token_type == "DOT":
raise TSyntaxError("bad dot syntax")
self.lexer.unget_token(token)
car = self.read()
token = self.lexer.get_token()
token_type, _ = token
if token_type == "DOT":
cdr = self.read()
token_type, _ = self.lexer.get_token()
if token_type != "RPAR":
raise TSyntaxError("bad dot syntax")
else:
self.lexer.unget_token(token)
cdr = self.read_pair()
return TPair(car, cdr)
def read(self):
token_type, token_value = self.lexer.get_token()
obj = None
if token_type == "STRING":
obj = TString(token_value)
elif token_type == "NUMBER":
obj = TNumber(int(token_value))
elif token_type == "IDENT":
obj = TSymbol(token_value)
elif token_type == "LPAR":
obj = self.read_pair()
return obj
class Eval:
def __init__(self):
self.vm = VM()
def eval(self, expr):
compiled_expr = compile(expr)
print("compiled_expr:", compiled_expr)
self.vm.reg.x = compiled_expr
self.vm.run()
return self.vm.reg.a
def repl():
reader = SexpReader(sys.stdin)
evaluator = Eval()
while True:
print(">> ", end="")
sys.stdout.flush()
expr = reader.read()
print("read:", expr)
expr = evaluator.eval(expr)
print("eval:", expr)
sys.stdout.flush()
def main():
repl()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6504907
|
<gh_stars>100-1000
# Information: https://clover.coex.tech/programming
import rospy
from clover import srv
from std_srvs.srv import Trigger
rospy.init_node('flight')
get_telemetry = rospy.ServiceProxy('get_telemetry', srv.GetTelemetry)
navigate = rospy.ServiceProxy('navigate', srv.Navigate)
navigate_global = rospy.ServiceProxy('navigate_global', srv.NavigateGlobal)
set_position = rospy.ServiceProxy('set_position', srv.SetPosition)
set_velocity = rospy.ServiceProxy('set_velocity', srv.SetVelocity)
set_attitude = rospy.ServiceProxy('set_attitude', srv.SetAttitude)
set_rates = rospy.ServiceProxy('set_rates', srv.SetRates)
land = rospy.ServiceProxy('land', Trigger)
print('Take off and hover 1 m above the ground')
navigate(x=0, y=0, z=1, frame_id='body', auto_arm=True)
# Wait for 5 seconds
rospy.sleep(5)
print('Fly forward 1 m')
navigate(x=1, y=0, z=0, frame_id='body')
# Wait for 5 seconds
rospy.sleep(5)
print('Perform landing')
land()
|
StarcoderdataPython
|
1953956
|
<reponame>mgorny/python-zeep<gh_stars>1000+
import pytest
from zeep import AsyncClient
@pytest.mark.requests
@pytest.mark.asyncio
async def test_context_manager():
async with AsyncClient("tests/wsdl_files/soap.wsdl") as async_client:
assert async_client
|
StarcoderdataPython
|
363433
|
""" Copy image file (e.g. ABC.JPG) to 20180605-ABC.JPG using EXIF timestamp
Very simple script, expects to be executed from the dir where the images are
and the filenames from stdin.
Example: ls *.JPG | python ~/mywork/img-batch-renaming/foto-rename.py
"""
import sys
import os
from itertools import chain
from shutil import copyfile, copystat
from time import strptime, strftime, mktime
from PIL import Image
from PIL.ExifTags import TAGS, GPSTAGS
EXIF_DATETIME=306
curr_dir = os.getcwd()
print(f'curr_dir={curr_dir}')
SUFFIX = os.getenv('SUFFIX', '')
INCLUDE_ORIGINAL = os.getenv('INCLUDE_ORIGINAL', False)
def print_exif_data(pil_image):
print(image)
info = image._getexif()
for tag, value in info.items():
key = TAGS.get(tag, tag)
print(key + "/" + str(tag) + "> " + str(value))
def modify_mtime(fname):
""" Change mtime and creation time, fname is a file in the current dir."""
original_name = os.path.join(curr_dir, fname)
ts = strptime(fname.split('-')[0], '%Y%m%d%H%M%S')
image_time = mktime(ts)
os.utime(original_name, (image_time, image_time))
def copy_using_exif_datetime(fname, keep_stat=True):
original_name = os.path.join(curr_dir, fname)
image = Image.open(original_name)
info = image._getexif()
dt = info.get(EXIF_DATETIME)
ts = strptime(dt, '%Y:%m:%d %H:%M:%S')
pref = strftime('%Y%m%d%H%M%S', ts)
target_fname = f'{pref}-{SUFFIX}-{fname}' if SUFFIX else f'{pref}-{fname}'
target_fullname = os.path.join(curr_dir, target_fname)
print(f'{original_name} => {target_fname} => {target_fullname}')
if keep_stat:
copystat(original_name, target_fullname)
else:
copyfile(original_name, target_fullname)
if __name__ == '__main__':
for fname in chain(*(l.split() for l in sys.stdin.readlines())):
print(f'fname={fname}')
copy_using_exif_datetime(fname)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.