id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
58920 | load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def grpc_rules_repository():
http_archive(
name = "rules_proto_grpc",
urls = ["https://github.com/rules-proto-grpc/rules_proto_grpc/archive/2.0.0.tar.gz"],
sha256 = "d771584bbff98698e7cb3cb31c132ee206a972569f4dc8b65acbdd934d156b33",
strip_prefix = "rules_proto_grpc-2.0.0",
)
| StarcoderdataPython |
1671111 | <gh_stars>1-10
from flask import Flask
from flask import render_template
from flask import request
from flask import url_for
from flask import has_request_context, request
from flask.logging import default_handler
import logging
class RequestFormatter(logging.Formatter):
def format(self, record):
if has_request_context():
record.url = request.url
record.remote_addr = request.remote_addr
else:
record.url = None
record.remote_addr = None
msg = super().format(record)
msg = re.sub(r'token=[^&]+', 'token=<private>', msg)
return msg
formatter = RequestFormatter(
'%(message)s'
#'[%(asctime)s] %(remote_addr)s requested %(url)s\n'
#'%(levelname)s in %(module)s: %(message)s'
)
default_handler.setFormatter(formatter)
root = logging.getLogger()
root.addHandler(default_handler)
import json
import os
import re
import datetime
import html
import nightscout_to_json
DEBUG = (os.getenv('FLASK_ENV', 'development') == 'development')
CACHE = {}
app = Flask(__name__)
app.logger.addHandler(default_handler)
class InvalidAPIUsage(Exception):
def __init__(self, message, status_code=500, payload=None):
super().__init__()
self.message = message
self.status_code = status_code
self.payload = payload
@app.errorhandler(500)
def invalid_api_usage(e):
return '<p>' + str(e) + '</p>', 500
@app.errorhandler(InvalidAPIUsage)
def invalid_api_usage_exception(e):
return render_template('error.html',
message=e.message,
status_code=e.status_code,
request_url=html.escape(request.url)
), e.status_code
@app.route("/")
def index():
p = {
'stats_url': url_for('stats', url='URL'),
'all_url': url_for('all_data', url='URL')
}
return render_template('index.html', **p)
def get_data(url, request):
start = request.args.get('start', None)
end = request.args.get('end', None)
token = request.args.get('token', None)
api_secret = request.headers.get('api-secret', None)
try:
days = int(request.args.get('days', 7))
except ValueError:
raise InvalidAPIUsage('days needs to be a positive integer.')
raw = bool(request.args.get('raw', False))
if not days or days < 1 or days > 90:
raise InvalidAPIUsage('days need to be positive and smaller than 90.')
url = url.lower()
if not re.match(r'^[0-9a-z\-.]+$', url):
raise InvalidAPIUsage('URL malformed, no http or https needed, https:// is preprepended automatically.')
cache_key = (url, start, end, days, raw)
cache_contents = CACHE.get(cache_key, None)
data = None
if cache_contents:
data = cache_contents['data']
new = cache_contents['raw']
delta = datetime.datetime.now() - cache_contents['date']
if delta > datetime.timedelta(hours=1):
logging.info('Cache too old: %s', delta)
data = None
else:
logging.info('Using cached content from %s', cache_contents['date'])
data['cached'] = True
if not data:
url = 'https://' + url
resp = ""
try:
ret, new, log = nightscout_to_json.run(url, start=start, end=end, days=days, cache=False,
token=token, hashed_secret=api_secret)
except nightscout_to_json.DownloadError as e:
logging.warning('Failed to contact upstream %s: %s' % (url, str(e)))
raise InvalidAPIUsage('failed to get data from Nightscout instance: ' + e.args[1], 504)
except Exception as e:
logging.warning('Error of type %s: %s' % (type(e), e))
logging.exception(e)
if DEBUG or request.args.get('debug', 0):
raise e
else:
raise InvalidAPIUsage('failed to process data from Nightscount instance.', 504)
for l in log:
logging.info(' Debug: ', l)
data = nightscout_to_json.stats(new)
data['url'] = url
data['generated'] = datetime.datetime.now().isoformat()
CACHE[cache_key] = {'date': datetime.datetime.now(), 'data': data, 'raw': new}
return data, new
@app.route("/<url>/stats.json")
def stats(url):
ret = get_data(url, request)
if type(ret) == str:
return ret
data, new = ret
return app.response_class(
response=json.dumps(data, indent=4),
status=200,
mimetype='application/json'
)
@app.route("/<url>/marc.json")
def marc(url):
ret = get_data(url, request)
if type(ret) == str:
return ret
data, new = ret
daily = data['overall']['daily_average']
data = {
'tdd': daily['insulin'],
'basal': daily['prog_basal'],
'carbs': daily['carbs'],
'url': data['url'],
'generated': data['generated']
}
return app.response_class(
response=json.dumps(data, indent=4),
status=200,
mimetype='application/json'
)
@app.route("/<url>/<part>.csv")
def daily_csv(url, part):
ret = get_data(url, request)
if type(ret) == str:
return ret
data, new = ret
s = []
if part == 'daily_average':
for k, v in data['overall']['daily_average'].items():
s.append('"%s",%.1f' % (k, v))
else:
abort(404)
return app.response_class(
response='\n'.join(s),
status=200,
mimetype='text/plain'
)
@app.route("/<url>/all.json")
def all_data(url):
ret = get_data(url, request)
if type(ret) == str:
return ret
data, new = ret
data['all'] = new
return app.response_class(
response=json.dumps(data, indent=4),
status=200,
mimetype='application/json'
)
if __name__ == '__main__':
app.run(debug=DEBUG, host='0.0.0.0')
| StarcoderdataPython |
148056 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: app/views/miniprogram_api.py
import os
import sys
basedir = os.path.join(os.path.dirname(__file__),"..") # 根目录为app
cachedir = os.path.join(basedir,"cache")
textdir = os.path.join(basedir,"text")
import re
from collections import OrderedDict
import simplejson as json
from lxml import etree
from pypinyin import lazy_pinyin
from ..lib.utilfuncs import dictToESC, get_secret
from ..lib.utilclass import Logger, Encipher
from ..lib.wxapi import jscode2session
from ..lib.tfidf import TFIDF
from ..lib.minipgm_api.db import UserDB, NewsDB, ReporterDB
from ..lib.minipgm_api.error import *
from ..lib.minipgm_api.util import *
from flask import Blueprint, request, session, abort, safe_join #, redirect, url_for,
logger = Logger("api")
userDB = UserDB()
rptDB = ReporterDB()
encipher = Encipher(get_secret("flask_secret_key.pkl"))
tfidf = TFIDF().init_for_match()
path_prefix = "/pkuyouth/miniprogram/api"
miniprogram_api = Blueprint('miniprogram_api', __name__, url_prefix=path_prefix, \
root_path=os.path.abspath(basedir), static_folder='static', static_url_path='/static')
Aliyun_Image_Prefix = "https://rabbitzxh.top" + path_prefix + "/static/image"
Qiniu_Image_Prefix = 'https://qiniu.rabbitzxh.top/pkuyouth'
config = {
"prefix": {
"avatar": Aliyun_Image_Prefix + "/reporter_avatar/",
"column": Qiniu_Image_Prefix + "/column_cover/",
"sm_cover": Qiniu_Image_Prefix + "/sm_cover/",
"bg_cover": Qiniu_Image_Prefix + "/bg_cover/"
},
"app_info": {
"name": "北大青年",
"version": "1.0.0",
},
"qr_code": {
"recruit": Aliyun_Image_Prefix + "/qr_code/qrcode_recruit.png"
}
}
index_col_desc = [
{
"id": 0,
"cover": Qiniu_Image_Prefix + '/bg_cover/26508266021.jpeg',
"title": '随便看看',
"desc": '随意翻翻北青的文章',
"path": '/pages/collection-random/collection-random',
}, {
"id": 1,
"cover": Qiniu_Image_Prefix + '/bg_cover/26508283011.jpeg',
"title": '热文排行',
"desc": '看看那些阅读量最高的文章',
"path": '/pages/collection-hot/collection-hot',
}, {
"id": 2,
"cover": Qiniu_Image_Prefix + '/bg_cover/26508251861.jpeg',
"title": '还有更多',
"desc": '主编们正在努力整理 ...',
"path": '',
},
]
columns = OrderedDict({
"调查": "只做好一件事——刨根问底",
"人物": "今天载了位了不得的人物",
"特稿": "不停留在表面",
"视界": "一览众山小",
"光阴": "不忘初心,继续前进",
"姿势": "干货、湿货、杂货,老司机带你涨姿势",
"言己": "说出你的故事",
"又见": "如果在异乡,一个旅人",
"雕龙": "操千曲而后晓声,观千剑而后识器",
"评论": "条条大路,众生喧哗",
"摄影": "我为了把你拍得更漂亮嘛~",
"图说": "边走边看",
"机动": "说走就走,想停就停;可以跑高速,亦可钻胡同",
"现场": "一车载你直达热点",
"对话": "听见你的声音",
"纪念": "为了未来,收藏过去",
"节日": "今天应该很高兴",
"新年献词": "新时代,新青年",
# "翻译": "null",
})
@miniprogram_api.route('/', methods=["GET","POST"])
def root():
return "api root !"
@miniprogram_api.route('/static/image/<path:image_path>', methods=['GET'])
def image(image_path):
return miniprogram_api.send_static_file(safe_join('image/miniprogram_api', image_path))
@miniprogram_api.route("/login", methods=["POST"])
@verify_timestamp
@verify_signature
def login():
try:
js_code = request.json.get("js_code",None)
session_key, openid = jscode2session(js_code)
session["openid"] = openid
session["session_key"] = session_key
userDB.register(openid)
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
else:
jsonPack = {
"errcode": 0,
"b": encipher.get_token(openid), # token
"config": config,
"setting": userDB.get_setting(openid),
}
finally:
return json.dumps(jsonPack)
@miniprogram_api.route("/get_col_desc", methods=["GET"])
@verify_timestamp
@verify_signature
@verify_login
def get_col_desc():
return json.dumps({"errcode": 0, "col_desc": index_col_desc})
@miniprogram_api.route("/get_col_random", methods=["POST"])
@verify_timestamp
@verify_signature
@verify_login
def get_col_random():
try:
count = int_param('count', request.json.get("count"), maxi=10)
newsDB = NewsDB()
newsInfo = newsDB.get_random_news(count)
newsCol = userDB.get_newsCol(session["openid"])
for news in newsInfo:
news.update({"star": news["newsID"] in newsCol})
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
raise err
else:
jsonPack = {"errcode": 0, "news": newsInfo}
finally:
newsDB.close()
return json.dumps(jsonPack)
@miniprogram_api.route("/get_latest_news", methods=["POST"])
@verify_timestamp
@verify_signature
@verify_login
def get_latest_news():
try:
count = int_param('count', request.json.get("count"), maxi=10)
newsDB = NewsDB()
newsInfo = newsDB.get_latest_news(count)
newsCol = userDB.get_newsCol(session["openid"])
for news in newsInfo:
news.update({"star": news["newsID"] in newsCol})
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
raise err
else:
jsonPack = {"errcode": 0, "news": newsInfo}
finally:
newsDB.close()
return json.dumps(jsonPack)
@miniprogram_api.route("/get_col_hot", methods=["POST"])
@verify_timestamp
@verify_signature
@verify_login
def get_col_hot():
try:
reqData = request.json
limit = int_param('limit', reqData.get("limit"), maxi=10)
page = int_param('page', reqData.get("page"))
newsDB = NewsDB()
newsInfo = newsDB.get_hot_news()
newsInfo = newsInfo[(page-1)*limit: page*limit]
newsCol = userDB.get_newsCol(session["openid"])
for news in newsInfo:
news.update({"star": news["newsID"] in newsCol})
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
raise err
else:
jsonPack = {"errcode": 0, "news": newsInfo}
finally:
newsDB.close()
return json.dumps(jsonPack)
@miniprogram_api.route("/get_column_list", methods=["GET"])
@verify_timestamp
@verify_signature
@verify_login
def get_column_list():
try:
newsDB = NewsDB()
newsCount = [item for item in newsDB.group_count("newsDetail","column") if item["column"] in columns]
newsCountDict = {item["column"]:item["count"] for item in newsCount}
columnsInfo = [{
"id": idx,
"title": title,
"desc": desc,
"cover": "%s.jpg" % "".join(lazy_pinyin(title)),
"newsCount": newsCountDict[title]
} for idx, (title, desc) in enumerate(columns.items())]
# columnsInfo.sort(key=lambda column: lazy_pinyin(column["title"]))
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
raise err
else:
jsonPack = {"errcode": 0, "columns": columnsInfo}
finally:
newsDB.close()
return json.dumps(jsonPack)
@miniprogram_api.route("/get_column_news", methods=["POST"])
@verify_timestamp
@verify_signature
@verify_login
def get_column_news():
try:
reqData = request.json
column = limited_param("column", reqData.get("column"), columns)
limit = int_param('limit', reqData.get("limit"), maxi=10)
page = int_param('page', reqData.get("page"), mini=0)
newsDB = NewsDB()
newsInfo = newsDB.get_column_news(column)
if page > 0:
newsInfo = newsInfo[(page-1)*limit: page*limit]
newsCol = userDB.get_newsCol(session["openid"])
for news in newsInfo:
news.update({"star": news["newsID"] in newsCol})
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
raise err
else:
jsonPack = {"errcode": 0, "news": newsInfo}
finally:
newsDB.close()
return json.dumps(jsonPack)
@miniprogram_api.route("/search_reporter", methods=["POST"])
@verify_timestamp
@verify_signature
@verify_login
def search_reporter():
try:
newsDB = NewsDB()
reqData = request.json
name = str_param('name', reqData.get("name")).strip() # 去空格
for char in r'.+*?^$|-#><=(){}[]\\': # 过滤正则表达式关键字
if char in name:
raise re.error('include illegal char')
if name == '':
raise re.error('no name')
regex = re.compile("|".join(name.split())) # | 连接多个名字片段
rpts = [rpt for rpt in rptDB.get_names() if regex.search(rpt) is not None]
rptsInfo = [rptDB.get_rpt(rpt,keys=("name","avatar","news")) for rpt in rpts]
for rpt in rptsInfo:
rpt["newsCount"] = len(rpt.pop("news"))
rptsInfo.sort(key=lambda rpt: rpt["newsCount"], reverse=True)
except re.error as err:
jsonPack = {"errcode": 0, "reporters": [], "error": repr(err)}
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
raise err
else:
jsonPack = {"errcode": 0, "reporters": rptsInfo}
finally:
newsDB.close()
return json.dumps(jsonPack)
@miniprogram_api.route("/get_reporter_news", methods=["POST"])
@verify_timestamp
@verify_signature
@verify_login
def get_reporter_news():
try:
newsDB = NewsDB()
reqData = request.json
name = limited_param("name", reqData.get("name"), rptDB.get_names())
limit = int_param('limit', reqData.get("limit"), maxi=10)
page = int_param('page', reqData.get("page"), mini=0)
newsInfo = newsDB.get_news_by_ID(rptDB.get_rpt(name)['news'])
newsCol = userDB.get_newsCol(session["openid"])
for news in newsInfo:
news.update({
"star": news["newsID"] in newsCol,
})
if page > 0:
newsInfo = newsInfo[(page-1)*limit: page*limit]
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
raise err
else:
jsonPack = {"errcode": 0, "news": newsInfo}
finally:
newsDB.close()
return json.dumps(jsonPack)
@miniprogram_api.route("/get_favorite", methods=["POST"])
@verify_timestamp
@verify_signature
@verify_login
def get_favorite():
try:
reqData = request.json
limit = int_param('limit', reqData.get("limit"), maxi=10)
page = int_param('page', reqData.get("page"), mini=0) # 允许等于0
newsDB = NewsDB()
newsCol = userDB.get_newsCol(session["openid"],withTime=True)
newsInfo = newsDB.get_news_by_ID(list(newsCol.keys()))
for news in newsInfo:
news.update({
"star": True,
"starTime": newsCol[news["newsID"]]
})
newsInfo.sort(key=lambda news: news["starTime"], reverse=True)
if page > 0: # page = 0 则返回全部
newsInfo = newsInfo[(page-1)*limit: page*limit]
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
raise err
else:
jsonPack = {"errcode": 0, "news": newsInfo}
finally:
newsDB.close()
return json.dumps(jsonPack)
@miniprogram_api.route("/star_news", methods=["POST"])
@verify_timestamp
@verify_signature
@verify_login
def star_news():
try:
newsDB = NewsDB()
reqData = request.json
action = limited_param('action', reqData.get("action"), ["star","unstar"])
newsID = limited_param('newsID', reqData.get("newsID"), newsDB.get_newsIDs())
actionTime = int_param('actionTime', reqData.get("actionTime"), mini=None)
userDB.update_newsCol(session["openid"], newsID, action, actionTime)
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
raise err
else:
jsonPack = {"errcode": 0, }
finally:
newsDB.close()
return json.dumps(jsonPack)
@miniprogram_api.route("/search_by_keyword", methods=["POST"])
@verify_timestamp
@verify_signature
@verify_login
def search_by_keyword():
try:
newsDB = NewsDB()
reqData = request.json
keyword = str_param('keyword', reqData.get("keyword"))
limit = int_param('limit', reqData.get("limit"), maxi=10)
page = int_param('page', reqData.get("page"))
newsRange = reqData.get("range")
if newsRange is None:
raise KeyError("param 'range' is missing !")
elif newsRange == 'all':
newsIDs = []
elif newsRange == 'favorite':
newsIDs = userDB.get_newsCol(session["openid"])
elif newsRange in columns:
newsIDs = newsDB.get_column_newsIDs(newsRange)
elif newsRange in rptDB.get_names():
newsIDs = [news["newsID"] for news in rptDB.get_rpt(newsRange)["news"]]
else:
raise KeyError("unexpected value of 'range' -- %s !" % newsRange)
newsInfo = newsDB.search_by_keyword(keyword, limit=limit*page, newsIDs=newsIDs)
newsInfo = newsInfo[(page-1)*limit: page*limit]
newsCol = userDB.get_newsCol(session["openid"])
for news in newsInfo:
news.update({"star": news["newsID"] in newsCol})
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
raise err
else:
jsonPack = {"errcode": 0, "news": newsInfo}
finally:
newsDB.close()
return json.dumps(jsonPack)
@miniprogram_api.route("/get_date_range",methods=["GET"])
@verify_timestamp
@verify_signature
@verify_login
def get_date_range():
try:
newsDB = NewsDB()
dateRange = newsDB.get_date_range()
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
raise err
else:
jsonPack = {"errcode": 0, "range": dateRange}
finally:
newsDB.close()
return json.dumps(jsonPack)
@miniprogram_api.route("/search_by_time",methods=["POST"])
@verify_timestamp
@verify_signature
@verify_login
def search_by_time():
try:
newsDB = NewsDB()
reqData = request.json
method = limited_param("method", reqData.get("method"), ["date","month"])
date = str_param("date", reqData.get("date"))
newsInfo = newsDB.search_by_time(date, method)
newsCol = userDB.get_newsCol(session["openid"])
for news in newsInfo:
news.update({"star": news["newsID"] in newsCol})
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
raise err
else:
jsonPack = {"errcode": 0, "news": newsInfo}
finally:
newsDB.close()
return json.dumps(jsonPack)
@miniprogram_api.route("/recommend", methods=["POST"])
@verify_timestamp
@verify_signature
@verify_login
def recommend():
try:
newsDB = NewsDB()
reqData = request.json
limit = int_param('limit', reqData.get("limit"), maxi=10)
newsID = limited_param('newsID', reqData.get("newsID"), newsDB.get_newsIDs())
Tcs = tfidf.match(newsID, limit)
newsInfo = newsDB.get_news_by_ID(list(Tcs.keys()))
newsCol = userDB.get_newsCol(session["openid"])
for news in newsInfo:
newsID = news["newsID"]
news.update({"star": newsID in newsCol, "rank": Tcs[newsID]})
newsInfo.sort(key=lambda news: news["rank"], reverse=True)
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
raise err
else:
jsonPack = {"errcode": 0, "news": newsInfo}
finally:
newsDB.close()
return json.dumps(jsonPack)
@miniprogram_api.route("/get_update_log", methods=["GET"])
@verify_timestamp
@verify_signature
@verify_login
def get_update_log():
try:
tree = etree.parse(os.path.join(textdir,'miniprogram_update_log.xml'))
logJson = [{
"number": version.get('number'),
"time": version.get('time'),
"content": [{
"row": idx + 1,
"text": p.text,
"strong": "strong" in p.attrib,
} for idx, p in enumerate(version.findall('p'))],
} for version in tree.getroot().findall('version')]
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
raise err
else:
jsonPack = {"errcode": 0, "log": logJson}
finally:
return json.dumps(jsonPack)
@miniprogram_api.route("/change_setting", methods=["POST"])
@verify_timestamp
@verify_signature
@verify_login
def change_setting():
try:
reqData = request.json
key = limited_param('key', reqData.get('key'), ['auto_change_card','use_small_card'])
value = limited_param('value', reqData.get('value'), [True, False])
userDB.update_setting(session['openid'],key,value)
except Exception as err:
jsonPack = {"errcode": -1, "error": repr(err)}
raise err
else:
jsonPack = {"errcode": 0, }
finally:
return json.dumps(jsonPack)
| StarcoderdataPython |
97466 | from . import config
import networkx as nx
import torch
import torch.nn
import torch_geometric as tg
import torch_geometric.data
import torch_geometric.utils
from tqdm.auto import tqdm
def batch_of_dataset(dataset):
loader = tg.data.DataLoader(dataset, batch_size=len(dataset))
for g, h, lb, ub in loader:
g.batch = g.batch.to(config.device)
h.batch = h.batch.to(config.device)
return g, h, lb, ub
def to_pyged(g):
return (torch.argmax(g.x, dim=1).tolist(), list(zip(*g.edge_index.tolist())))
def label_graphs(graphs, num_classes=None):
tqdm.write('move labels to node attrs')
ret = []
if num_classes is None:
num_classes = 1
if(graphs[0].y is not None):
num_classes = max([max(g['y']) for g in graphs]) + 1
for g in tqdm(graphs, desc='graphs'):
if g['y'] is None:
g['y'] = torch.zeros(g.num_nodes, dtype=torch.long)
g.x = torch.nn.functional.one_hot(g.y, num_classes).float()
del g.y
ret.append(g)
return ret
def remove_extra_attrs(graphs):
tqdm.write('remove extra attrs')
ret = []
for g in tqdm(graphs, desc='graphs'):
for k in g.keys:
if k not in ['x', 'edge_index']:
g[k] = None
ret.append(g)
return ret
def similarity_of_sed(sed, gs):
sed, gs = (x.to(config.device) for x in (sed, gs))
return torch.exp(-sed/gs)
def similarity_of_ged(ged, gs, hs):
ged, gs, hs = (x.to(config.device) for x in (ged, gs, hs))
return torch.exp(-2*ged/(gs+hs))
def is_connected(g):
return nx.is_connected(tg.utils.to_networkx(g, to_undirected=True))
def confusion_matrix(pred, gt, n_class):
confusion_matrix = torch.zeros(pred.shape[0], n_class, n_class)
for i in range(pred.shape[0]):
for t, p in zip(gt[i].view(-1), pred[i].view(-1)):
confusion_matrix[i, t.long(), p.long()] += 1
return confusion_matrix
def is_notebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def nanmean(v, *args, inplace=False, **kwargs):
if not inplace:
v = v.clone()
is_nan = torch.isnan(v)
v[is_nan] = 0
return v.sum(*args, **kwargs) / (~is_nan).float().sum(*args, **kwargs)
def norm_sed_func(gx, hx):
gx, hx = (x.to(config.device) for x in (gx, hx))
return torch.norm(torch.nn.functional.relu(gx-hx), dim=-1)
def norm_ged_func(gx, hx):
gx, hx = (x.to(config.device) for x in (gx, hx))
return torch.norm(gx-hx, dim=-1)
def load_nbrs(inpath, total, num_classes=None):
ret = []
with open(inpath, 'r') as infile:
for _ in tqdm(range(total), desc='neighborhoods', unit_scale=True, unit=''):
y = torch.LongTensor([int(x) for x in infile.readline().split()])
e0 = [int(x) for x in infile.readline().split()]
e1 = [int(x) for x in infile.readline().split()]
edge_index = torch.LongTensor([e0,e1])
ret.append(tg.data.Data(edge_index=edge_index, y=y))
return label_graphs(ret, num_classes) | StarcoderdataPython |
1668529 | <reponame>krokce/rapo
"""Contains application logger."""
import pepperoni
from .config import config
logger = pepperoni.logger(file=True)
logger.configure(format='{isodate}\t{thread}\t{rectype}\t{message}\n')
if config.has_section('LOGGING'):
LOGGING = config['LOGGING']
logger.configure(console=LOGGING.getboolean('console'),
file=LOGGING.getboolean('file'),
info=LOGGING.getboolean('info'),
debug=LOGGING.getboolean('debug'),
error=LOGGING.getboolean('error'),
warning=LOGGING.getboolean('warning'),
critical=LOGGING.getboolean('critical'),
maxsize=LOGGING.getint('maxsize'),
maxdays=LOGGING.getint('maxdays'))
| StarcoderdataPython |
1635574 | # Copyright 2016 - Wipro Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pecan App definition for eventlet based wsgi server"""
from galaxia.gapi import config as gapi_config
from oslo_config import cfg
import pecan
from eventlet.corolocal import local
import sys
# Register options for the api service
API_SERVICE_OPTS = [
cfg.IntOpt('port',
default=7777,
help='The port for the API server'),
cfg.StrOpt('host',
default='127.0.0.1',
help='The listen IP for the API server')
]
CONF = cfg.CONF
opt_group = cfg.OptGroup(name='gapi', title='Options for the api service')
CONF.register_group(opt_group)
CONF.register_opts(API_SERVICE_OPTS, opt_group)
def get_pecan_config():
# Set up the pecan configuration
filename = gapi_config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(config=None):
if not config:
config = get_pecan_config()
app_conf = dict(config.app)
app = pecan.make_app(
app_conf.pop('root'),
force_canonical=False,
logging=getattr(config, 'logging', {}),
context_local_factory=local,
**app_conf
)
return app
| StarcoderdataPython |
20456 | <reponame>frc1678/server-2021-public
#!/usr/bin/env python3
# Copyright (c) 2019 FRC Team 1678: Citrus Circuits
import pytest
import numpy as np
import os, sys
current_directory = os.path.dirname(os.path.realpath(__file__))
parent_directory = os.path.dirname(current_directory)
grandparent_directory = os.path.dirname(parent_directory)
sys.path.append(grandparent_directory)
from calculations import inner_goals_regression
def test_dimension_mismatch_error():
b = np.array([[1, 6, 7, 8]]).T
A = np.array([[2, 5, 4], [6, 9, 4], [9, 7, 3]])
with pytest.raises(ValueError):
inner_goals_regression.least_squares(A, b)
def test_singular_matrix_error():
b = np.array([[1, 6, 7, 8]]).T
A = np.array([[2, 5, 4], [6, 9, 4], [0, 0, 0], [0, 0, 0]])
with pytest.raises(ValueError):
inner_goals_regression.least_squares(A, b)
def test_no_cap():
b = np.array([[0, 1, 2]]).T
A = np.array([[0, 0], [1, 0], [1, -1]])
expected_result = np.array([[1], [-1]])
assert (expected_result == inner_goals_regression.least_squares(A, b)).all()
def test_capped():
b = np.array([[0, 1, 2]]).T
A = np.array([[0, 0], [1, 0], [1, -1]])
actual_result = inner_goals_regression.least_squares(A, b, cap_0_to_1=True)
expected_result = np.array([[1], [0]])
assert (abs(actual_result - expected_result) < 0.01).all()
def test_monte_carlo_accuracy():
b = np.array([[16, 78, 10]]).T
A = np.array([[5, 1], [25, 3], [3, 1.001]])
actual_result = inner_goals_regression.least_squares(A, b, cap_0_to_1=True)
expected_result = np.array([[1], [1]])
assert (abs(actual_result - expected_result) < 0.01).all()
| StarcoderdataPython |
1704012 | <filename>mon-put-rq-stats.py
#!/usr/bin/env python
"""
Usage: mon-put-rq-stats.py [--url REDIS_URL] [--env ENV] [--region REGION]
[--pid PIDFILE] [--interval INTERVAL]
[--debug] [--no-cloudwatch]
mon-put-rq-stats.py -h | --help
mon-put-rq-stats -v | --version
Report RQ stats to AWS CloudWatch
Options:
-h --help show help and exit
-v --version show version and exit
--no-cloudwatch do not report stats to AWS CloudWatch, mostly for
debugging. Use "rq info" utility that comes with RQ
if you just want to see RQ info at a glance.
--debug log debug messages, including AWS API calls
Arguments:
--url=REDIS_URL redis URL [default: redis://localhost:6379]
--env=ENV environment name to report on [default: dev]
--region=REGION AWS CloudWatch region to use [default: us-east-1]
--pid=PIDFILE file to write PID to, default is to not write PID
--interval=INTERVAL If supplied report data every INTERVAL seconds. If
not supplied report data once and exit.
"""
import docopt
import os
import sys
from time import sleep
import logging
from boto.ec2 import cloudwatch
from redis import StrictRedis
from rq import Queue, Worker
def put_data(args, log):
"Get RQ data and send to CloudWatch"
log.info('put_data()')
cw = cloudwatch.connect_to_region(args['--region'])
def put_metrics(metrics, dimensions):
dimensions['env'] = args['--env']
log.info('{} --> {}'.format(dimensions, metrics))
if not args['--no-cloudwatch']:
cw.put_metric_data('RQ',
list(metrics.keys()),
list(metrics.values()),
unit='Count', dimensions=dimensions)
try:
redis = StrictRedis.from_url(args['--url'])
redis.ping()
except Exception as e:
log.error('Unable to connect to redis: {}'.format(e))
return
# group workers by queue
workers_by_queue = {}
for w in Worker.all(connection=redis):
for q in w.queues:
ws = workers_by_queue.get(q, [])
ws.append(w)
workers_by_queue[q] = ws
for q in workers_by_queue:
# report queue level rollup
put_metrics({'jobs': len(q), 'workers': len(workers_by_queue[q])},
{'queue': q.name})
# report workers for each queue in each worker state
states = {}
for w in workers_by_queue[q]:
count = states.get(w.state, 0) + 1
states[w.state] = count
for state in states:
put_metrics({'workers': states[state]},
{
'queue': q.name,
'state': state.decode(),
})
if __name__ == '__main__':
args = docopt.docopt(__doc__, version='0.0.2')
log_level = logging.DEBUG if args['--debug'] else logging.INFO
log_format = '%(asctime)-15s %(levelname)s %(message)s'
logging.basicConfig(level=log_level, format=log_format)
log = logging.getLogger('rq-cloudwatch')
if args.get('--pid') is not None:
with open(os.path.expanduser(args['--pid']), "w") as f:
f.write(str(os.getpid()))
log.info('starting {}'.format(args))
interval = args.get('--interval')
if interval is None:
put_data(args, log)
else:
interval = float(interval)
while True:
put_data(args, log)
sleep(interval)
| StarcoderdataPython |
3376493 | import os
import importlib_metadata
def get_source_version():
d = dict(MAJOR='6', MINOR='0', MICRO='0', EXTRA='none')
here = os.path.abspath(os.path.dirname(__file__))
try:
f = open(os.path.join(here, '..', '..', 'CMakeLists.txt'))
except FileNotFoundError:
return None
for line in f.readlines():
if line.lstrip().startswith('set(MAPD_VERSION_'):
k = line.split()[0].rsplit('_', 1)[-1]
n = line.split('"')[1]
d[k] = n
return '{MAJOR}.{MINOR}.{MICRO}{EXTRA}'.format(**d)
def get_package_version():
try:
return importlib_metadata.version(__name__)
except importlib_metadata.PackageNotFoundError:
# package is not installed
return get_source_version()
| StarcoderdataPython |
194372 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._phone_numbers_client_enums import *
class AcquiredPhoneNumber(msrest.serialization.Model):
"""Represents an acquired phone number.
All required parameters must be populated in order to send to Azure.
:param id: Required. The id of the phone number, e.g. 11234567890.
:type id: str
:param phone_number: Required. String of the E.164 format of the phone number, e.g.
+11234567890.
:type phone_number: str
:param country_code: Required. The ISO 3166-2 code of the phone number's country, e.g. US.
:type country_code: str
:param phone_number_type: Required. The phone number's type, e.g. Geographic, TollFree.
Possible values include: "geographic", "tollFree".
:type phone_number_type: str or ~azure.communication.phonenumbers.models.PhoneNumberType
:param capabilities: Required. Capabilities of a phone number.
:type capabilities: ~azure.communication.phonenumbers.models.PhoneNumberCapabilities
:param assignment_type: Required. The assignment type of the phone number. A phone number can
be assigned to a person, or to an application. Possible values include: "person",
"application".
:type assignment_type: str or
~azure.communication.phonenumbers.models.PhoneNumberAssignmentType
:param purchase_date: Required. The date and time that the phone number was purchased.
:type purchase_date: ~datetime.datetime
:param cost: Required. The incurred cost for a single phone number.
:type cost: ~azure.communication.phonenumbers.models.PhoneNumberCost
"""
_validation = {
'id': {'required': True},
'phone_number': {'required': True},
'country_code': {'required': True},
'phone_number_type': {'required': True},
'capabilities': {'required': True},
'assignment_type': {'required': True},
'purchase_date': {'required': True},
'cost': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'phone_number': {'key': 'phoneNumber', 'type': 'str'},
'country_code': {'key': 'countryCode', 'type': 'str'},
'phone_number_type': {'key': 'phoneNumberType', 'type': 'str'},
'capabilities': {'key': 'capabilities', 'type': 'PhoneNumberCapabilities'},
'assignment_type': {'key': 'assignmentType', 'type': 'str'},
'purchase_date': {'key': 'purchaseDate', 'type': 'iso-8601'},
'cost': {'key': 'cost', 'type': 'PhoneNumberCost'},
}
def __init__(
self,
*,
id: str,
phone_number: str,
country_code: str,
phone_number_type: Union[str, "PhoneNumberType"],
capabilities: "PhoneNumberCapabilities",
assignment_type: Union[str, "PhoneNumberAssignmentType"],
purchase_date: datetime.datetime,
cost: "PhoneNumberCost",
**kwargs
):
super(AcquiredPhoneNumber, self).__init__(**kwargs)
self.id = id
self.phone_number = phone_number
self.country_code = country_code
self.phone_number_type = phone_number_type
self.capabilities = capabilities
self.assignment_type = assignment_type
self.purchase_date = purchase_date
self.cost = cost
class AcquiredPhoneNumbers(msrest.serialization.Model):
"""The list of acquired phone numbers.
All required parameters must be populated in order to send to Azure.
:param phone_numbers: Required. Represents a list of phone numbers.
:type phone_numbers: list[~azure.communication.phonenumbers.models.AcquiredPhoneNumber]
:param next_link: Represents the URL link to the next page of phone number results.
:type next_link: str
"""
_validation = {
'phone_numbers': {'required': True},
}
_attribute_map = {
'phone_numbers': {'key': 'phoneNumbers', 'type': '[AcquiredPhoneNumber]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
phone_numbers: List["AcquiredPhoneNumber"],
next_link: Optional[str] = None,
**kwargs
):
super(AcquiredPhoneNumbers, self).__init__(**kwargs)
self.phone_numbers = phone_numbers
self.next_link = next_link
class CommunicationError(msrest.serialization.Model):
"""The Communication Services error.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error code.
:type code: str
:param message: Required. The error message.
:type message: str
:ivar target: The error target.
:vartype target: str
:ivar details: Further details about specific errors that led to this error.
:vartype details: list[~azure.communication.phonenumbers.models.CommunicationError]
:ivar inner_error: The inner error if any.
:vartype inner_error: ~azure.communication.phonenumbers.models.CommunicationError
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
'target': {'readonly': True},
'details': {'readonly': True},
'inner_error': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CommunicationError]'},
'inner_error': {'key': 'innererror', 'type': 'CommunicationError'},
}
def __init__(
self,
*,
code: str,
message: str,
**kwargs
):
super(CommunicationError, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = None
self.details = None
self.inner_error = None
class CommunicationErrorResponse(msrest.serialization.Model):
"""The Communication Services error.
All required parameters must be populated in order to send to Azure.
:param error: Required. The Communication Services error.
:type error: ~azure.communication.phonenumbers.models.CommunicationError
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'CommunicationError'},
}
def __init__(
self,
*,
error: "CommunicationError",
**kwargs
):
super(CommunicationErrorResponse, self).__init__(**kwargs)
self.error = error
class PhoneNumberCapabilities(msrest.serialization.Model):
"""Capabilities of a phone number.
All required parameters must be populated in order to send to Azure.
:param calling: Required. Capability value for calling. Possible values include: "none",
"inbound", "outbound", "inbound+outbound".
:type calling: str or ~azure.communication.phonenumbers.models.PhoneNumberCapabilityType
:param sms: Required. Capability value for SMS. Possible values include: "none", "inbound",
"outbound", "inbound+outbound".
:type sms: str or ~azure.communication.phonenumbers.models.PhoneNumberCapabilityType
"""
_validation = {
'calling': {'required': True},
'sms': {'required': True},
}
_attribute_map = {
'calling': {'key': 'calling', 'type': 'str'},
'sms': {'key': 'sms', 'type': 'str'},
}
def __init__(
self,
*,
calling: Union[str, "PhoneNumberCapabilityType"],
sms: Union[str, "PhoneNumberCapabilityType"],
**kwargs
):
super(PhoneNumberCapabilities, self).__init__(**kwargs)
self.calling = calling
self.sms = sms
class PhoneNumberCapabilitiesRequest(msrest.serialization.Model):
"""Capabilities of a phone number.
:param calling: Capability value for calling. Possible values include: "none", "inbound",
"outbound", "inbound+outbound".
:type calling: str or ~azure.communication.phonenumbers.models.PhoneNumberCapabilityType
:param sms: Capability value for SMS. Possible values include: "none", "inbound", "outbound",
"inbound+outbound".
:type sms: str or ~azure.communication.phonenumbers.models.PhoneNumberCapabilityType
"""
_attribute_map = {
'calling': {'key': 'calling', 'type': 'str'},
'sms': {'key': 'sms', 'type': 'str'},
}
def __init__(
self,
*,
calling: Optional[Union[str, "PhoneNumberCapabilityType"]] = None,
sms: Optional[Union[str, "PhoneNumberCapabilityType"]] = None,
**kwargs
):
super(PhoneNumberCapabilitiesRequest, self).__init__(**kwargs)
self.calling = calling
self.sms = sms
class PhoneNumberCost(msrest.serialization.Model):
"""The incurred cost for a single phone number.
All required parameters must be populated in order to send to Azure.
:param amount: Required. The cost amount.
:type amount: float
:param currency_code: Required. The ISO 4217 currency code for the cost amount, e.g. USD.
:type currency_code: str
:param billing_frequency: Required. The frequency with which the cost gets billed. Possible
values include: "monthly".
:type billing_frequency: str or ~azure.communication.phonenumbers.models.BillingFrequency
"""
_validation = {
'amount': {'required': True},
'currency_code': {'required': True},
'billing_frequency': {'required': True},
}
_attribute_map = {
'amount': {'key': 'amount', 'type': 'float'},
'currency_code': {'key': 'currencyCode', 'type': 'str'},
'billing_frequency': {'key': 'billingFrequency', 'type': 'str'},
}
def __init__(
self,
*,
amount: float,
currency_code: str,
billing_frequency: Union[str, "BillingFrequency"],
**kwargs
):
super(PhoneNumberCost, self).__init__(**kwargs)
self.amount = amount
self.currency_code = currency_code
self.billing_frequency = billing_frequency
class PhoneNumberOperation(msrest.serialization.Model):
"""Long running operation.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param status: Required. Status of operation. Possible values include: "notStarted", "running",
"succeeded", "failed".
:type status: str or ~azure.communication.phonenumbers.models.PhoneNumberOperationStatus
:param resource_location: URL for retrieving the result of the operation, if any.
:type resource_location: str
:param created_date_time: Required. The date that the operation was created.
:type created_date_time: ~datetime.datetime
:param error: The Communication Services error.
:type error: ~azure.communication.phonenumbers.models.CommunicationError
:param id: Required. Id of operation.
:type id: str
:param operation_type: Required. The type of operation, e.g. Search. Possible values include:
"purchase", "releasePhoneNumber", "search", "updatePhoneNumberCapabilities".
:type operation_type: str or ~azure.communication.phonenumbers.models.PhoneNumberOperationType
:ivar last_action_date_time: The most recent date that the operation was changed.
:vartype last_action_date_time: ~datetime.datetime
"""
_validation = {
'status': {'required': True},
'created_date_time': {'required': True},
'id': {'required': True},
'operation_type': {'required': True},
'last_action_date_time': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'resource_location': {'key': 'resourceLocation', 'type': 'str'},
'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
'error': {'key': 'error', 'type': 'CommunicationError'},
'id': {'key': 'id', 'type': 'str'},
'operation_type': {'key': 'operationType', 'type': 'str'},
'last_action_date_time': {'key': 'lastActionDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
*,
status: Union[str, "PhoneNumberOperationStatus"],
created_date_time: datetime.datetime,
id: str,
operation_type: Union[str, "PhoneNumberOperationType"],
resource_location: Optional[str] = None,
error: Optional["CommunicationError"] = None,
**kwargs
):
super(PhoneNumberOperation, self).__init__(**kwargs)
self.status = status
self.resource_location = resource_location
self.created_date_time = created_date_time
self.error = error
self.id = id
self.operation_type = operation_type
self.last_action_date_time = None
class PhoneNumberPurchaseRequest(msrest.serialization.Model):
"""The phone number search purchase request.
:param search_id: The search id.
:type search_id: str
"""
_attribute_map = {
'search_id': {'key': 'searchId', 'type': 'str'},
}
def __init__(
self,
*,
search_id: Optional[str] = None,
**kwargs
):
super(PhoneNumberPurchaseRequest, self).__init__(**kwargs)
self.search_id = search_id
class PhoneNumberSearchRequest(msrest.serialization.Model):
"""Represents a phone number search request to find phone numbers. Found phone numbers are temporarily held for a following purchase.
All required parameters must be populated in order to send to Azure.
:param phone_number_type: Required. The type of phone numbers to search for, e.g. geographic,
or tollFree. Possible values include: "geographic", "tollFree".
:type phone_number_type: str or ~azure.communication.phonenumbers.models.PhoneNumberType
:param assignment_type: Required. The assignment type of the phone numbers to search for. A
phone number can be assigned to a person, or to an application. Possible values include:
"person", "application".
:type assignment_type: str or
~azure.communication.phonenumbers.models.PhoneNumberAssignmentType
:param capabilities: Required. Capabilities of a phone number.
:type capabilities: ~azure.communication.phonenumbers.models.PhoneNumberCapabilities
:param area_code: The area code of the desired phone number, e.g. 425.
:type area_code: str
:param quantity: The quantity of desired phone numbers. The default value is 1.
:type quantity: int
"""
_validation = {
'phone_number_type': {'required': True},
'assignment_type': {'required': True},
'capabilities': {'required': True},
'quantity': {'maximum': 2147483647, 'minimum': 1},
}
_attribute_map = {
'phone_number_type': {'key': 'phoneNumberType', 'type': 'str'},
'assignment_type': {'key': 'assignmentType', 'type': 'str'},
'capabilities': {'key': 'capabilities', 'type': 'PhoneNumberCapabilities'},
'area_code': {'key': 'areaCode', 'type': 'str'},
'quantity': {'key': 'quantity', 'type': 'int'},
}
def __init__(
self,
*,
phone_number_type: Union[str, "PhoneNumberType"],
assignment_type: Union[str, "PhoneNumberAssignmentType"],
capabilities: "PhoneNumberCapabilities",
area_code: Optional[str] = None,
quantity: Optional[int] = 1,
**kwargs
):
super(PhoneNumberSearchRequest, self).__init__(**kwargs)
self.phone_number_type = phone_number_type
self.assignment_type = assignment_type
self.capabilities = capabilities
self.area_code = area_code
self.quantity = quantity
class PhoneNumberSearchResult(msrest.serialization.Model):
"""The result of a phone number search operation.
All required parameters must be populated in order to send to Azure.
:param search_id: Required. The search id.
:type search_id: str
:param phone_numbers: Required. The phone numbers that are available. Can be fewer than the
desired search quantity.
:type phone_numbers: list[str]
:param phone_number_type: Required. The phone number's type, e.g. geographic, or tollFree.
Possible values include: "geographic", "tollFree".
:type phone_number_type: str or ~azure.communication.phonenumbers.models.PhoneNumberType
:param assignment_type: Required. Phone number's assignment type. Possible values include:
"person", "application".
:type assignment_type: str or
~azure.communication.phonenumbers.models.PhoneNumberAssignmentType
:param capabilities: Required. Capabilities of a phone number.
:type capabilities: ~azure.communication.phonenumbers.models.PhoneNumberCapabilities
:param cost: Required. The incurred cost for a single phone number.
:type cost: ~azure.communication.phonenumbers.models.PhoneNumberCost
:param search_expires_by: Required. The date that this search result expires and phone numbers
are no longer on hold. A search result expires in less than 15min, e.g.
2020-11-19T16:31:49.048Z.
:type search_expires_by: ~datetime.datetime
"""
_validation = {
'search_id': {'required': True},
'phone_numbers': {'required': True},
'phone_number_type': {'required': True},
'assignment_type': {'required': True},
'capabilities': {'required': True},
'cost': {'required': True},
'search_expires_by': {'required': True},
}
_attribute_map = {
'search_id': {'key': 'searchId', 'type': 'str'},
'phone_numbers': {'key': 'phoneNumbers', 'type': '[str]'},
'phone_number_type': {'key': 'phoneNumberType', 'type': 'str'},
'assignment_type': {'key': 'assignmentType', 'type': 'str'},
'capabilities': {'key': 'capabilities', 'type': 'PhoneNumberCapabilities'},
'cost': {'key': 'cost', 'type': 'PhoneNumberCost'},
'search_expires_by': {'key': 'searchExpiresBy', 'type': 'iso-8601'},
}
def __init__(
self,
*,
search_id: str,
phone_numbers: List[str],
phone_number_type: Union[str, "PhoneNumberType"],
assignment_type: Union[str, "PhoneNumberAssignmentType"],
capabilities: "PhoneNumberCapabilities",
cost: "PhoneNumberCost",
search_expires_by: datetime.datetime,
**kwargs
):
super(PhoneNumberSearchResult, self).__init__(**kwargs)
self.search_id = search_id
self.phone_numbers = phone_numbers
self.phone_number_type = phone_number_type
self.assignment_type = assignment_type
self.capabilities = capabilities
self.cost = cost
self.search_expires_by = search_expires_by
| StarcoderdataPython |
30365 | <gh_stars>1-10
# Copyright (C) 2016-2018 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from hamcrest import assert_that, calling, equal_to, raises
from mock import patch
from ycmd.server_utils import GetStandardLibraryIndexInSysPath
from ycmd.tests import PathToTestFile
@patch( 'sys.path', [
PathToTestFile( 'python-future', 'some', 'path' ),
PathToTestFile( 'python-future', 'another', 'path' ) ] )
def GetStandardLibraryIndexInSysPath_ErrorIfNoStandardLibrary_test( *args ):
assert_that(
calling( GetStandardLibraryIndexInSysPath ),
raises( RuntimeError,
'Could not find standard library path in Python path.' ) )
@patch( 'sys.path', [
PathToTestFile( 'python-future', 'some', 'path' ),
PathToTestFile( 'python-future', 'standard_library' ),
PathToTestFile( 'python-future', 'another', 'path' ) ] )
def GetStandardLibraryIndexInSysPath_FindFullStandardLibrary_test( *args ):
assert_that( GetStandardLibraryIndexInSysPath(), equal_to( 1 ) )
@patch( 'sys.path', [
PathToTestFile( 'python-future', 'some', 'path' ),
PathToTestFile( 'python-future', 'embedded_standard_library',
'python35.zip' ),
PathToTestFile( 'python-future', 'another', 'path' ) ] )
def GetStandardLibraryIndexInSysPath_FindEmbeddedStandardLibrary_test( *args ):
assert_that( GetStandardLibraryIndexInSysPath(), equal_to( 1 ) )
| StarcoderdataPython |
3338277 | from graph import *
class PathFinder:
"""
Class container for pathfinding solution.
"""
def __init__(self):
self.canPassHomeNodes = False
self.useCyclicBFS = False
# convert the complex Graph object to a simple dictionary to
# faciliate pathfinding without cycles
def _GraphToDictionary(self, graphToConvert):
result = dict()
graphNodes = graphToConvert.GetNodes()
for n in graphNodes:
dictKey = n.id
nodeNeighbours = []
for i in n.GetNeighbourNodes():
nodeNeighbours.append(i.id)
result[dictKey] = nodeNeighbours
return result
# convert the complex Graph object to a node -> GraphEdge object dictionary to
# facilitate pathfinding with cycles
def _GraphToEdgeDictionary(self, graphToConvert):
result = dict()
graphNodes = graphToConvert.GetNodes()
for n in graphNodes:
dictKey = n.id
nodeNeighbourEdges = []
for e in n.GetNeighbourEdges():
nodeNeighbourEdges.append(e)
result[dictKey] = nodeNeighbourEdges
return result
# core BFS pathfinding function
def FindAllPaths(self, graph, start, end):
if self.useCyclicBFS:
# find all paths - allow passing through the same node more than once
convertedGraph = self._GraphToEdgeDictionary(graph)
return self._CycledBFS(graph, convertedGraph, start, end)
else:
# find all paths - don't visit the same node more than once
convertedGraph = self._GraphToDictionary(graph)
return self._ClassicBFS(graph, convertedGraph, start, end)
# perform BFS between node 'start' and node 'end'
def _ClassicBFS(self, fullGraph, graph, start, end, path=[]):
path = path + [start]
if start == end:
return [path]
if not self.canPassHomeNodes:
# we're passing thorugh a home node that is not the end node. We don't want that
if len(path) > 1 and fullGraph.GetNode(start).isHomeNode:
return []
if not graph.has_key(start):
return []
paths = []
for node in graph[start]:
if node not in path:
newpaths = self._ClassicBFS(fullGraph, graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
# perform a modified BFS which allows cyclic paths
def _CycledBFS(self, fullGraph, graph, start, end, prevEdgeId=-1, path=[], edgePath=[]):
path = path + [start]
# we keep track of the passed edges to allow passing through the same node multiple times
# if this is the start of the algorithm, prevEdgeId is set to -1 (no previous edge)
if prevEdgeId != -1:
edgePath = edgePath + [prevEdgeId]
if start == end:
return [path]
if not self.canPassHomeNodes:
# we're passing thorugh a home node that is not the end node. We don't want that
if len(path) > 1 and fullGraph.GetNode(start).isHomeNode:
return []
if not graph.has_key(start):
return []
paths = []
for edge in graph[start]:
# get the node id on the second end of the edge
nodeId = edge.GetSecondNodeId(start)
nextNodeIsNotPrevious = True
# skip if the algorithm tries to backtrack in the next step
if len(path) > 1:
nextNodeIsNotPrevious = nodeId is not path[len(path)-2]
if edge.id not in edgePath and nextNodeIsNotPrevious:
newpaths = self._CycledBFS(fullGraph, graph, nodeId, end, edge.id, path, edgePath)
for newpath in newpaths:
paths.append(newpath)
return paths
| StarcoderdataPython |
1622754 | <reponame>materialsproject/maggpy<gh_stars>0
# coding: utf-8
"""
Module defining a FileStore that enables accessing files in a local directory
using typical maggma access patterns.
"""
import hashlib
import warnings
from pathlib import Path
from datetime import datetime, timezone
from typing import Dict, List, Optional, Union, Iterator, Callable
from pymongo import UpdateOne
from monty.io import zopen
from maggma.core import StoreError, Sort
from maggma.stores.mongolike import MemoryStore, JSONStore
# These keys are automatically populated by the FileStore.read() method and
# hence are not allowed to be manually overwritten
PROTECTED_KEYS = {
"_id",
"name",
"last_updated",
"hash",
"size",
"parent",
"orphan",
"contents",
}
class FileStore(MemoryStore):
"""
A Store for files on disk. Provides a common access method consistent with
other stores. Each Item in the Store represents one file. Files can be organized
into any type of directory structure.
A hash of the full path to each file is used to define a file_id that uniquely
identifies each item.
Any metadata added to the items is written to a .json file in the root directory
of the FileStore.
"""
def __init__(
self,
path: Union[str, Path],
file_filters: Optional[List] = None,
max_depth: Optional[int] = None,
read_only: bool = True,
include_orphans: bool = False,
json_name: str = "FileStore.json",
**kwargs,
):
"""
Initializes a FileStore
Args:
path: parent directory containing all files and subdirectories to process
file_filters: List of fnmatch patterns defining the files to be tracked by
the FileStore. Only files that match one of the patterns provided will
be included in the Store If None (default), all files are included.
Examples: ["*.txt", "test-[abcd].txt"], etc.
See https://docs.python.org/3/library/fnmatch.html for full syntax
max_depth: The maximum depth to look into subdirectories. 0 = no recursion,
1 = include files 1 directory below the FileStore, etc.
None (default) will scan all files below
the FileStore root directory, regardless of depth.
read_only: If True (default), the .update() and .remove_docs()
methods are disabled, preventing any changes to the files on
disk. In addition, metadata cannot be written to disk.
include_orphans: Whether to include orphaned metadata records in query results.
Orphaned metadata records are records found in the local JSON file that can
no longer be associated to a file on disk. This can happen if a file is renamed
or deleted, or if the FileStore is re-initialized with a more restrictive
file_filters or max_depth argument. By default (False), these records
do not appear in query results. Nevertheless, the metadata records are
retained in the JSON file and the FileStore to prevent accidental data loss.
json_name: Name of the .json file to which metadata is saved. If read_only
is False, this file will be created in the root directory of the
FileStore.
kwargs: kwargs passed to MemoryStore.__init__()
"""
# this conditional block is needed in order to guarantee that the 'name'
# property, which is passed to `MemoryStore`, works correctly
# collection names passed to MemoryStore cannot end with '.'
if path == ".":
path = Path.cwd()
self.path = Path(path) if isinstance(path, str) else path
self.json_name = json_name
self.file_filters = file_filters if file_filters else ["*"]
self.collection_name = "file_store"
self.key = "file_id"
self.include_orphans = include_orphans
self.read_only = read_only
self.max_depth = max_depth
self.metadata_store = JSONStore(
paths=[str(self.path / self.json_name)],
read_only=self.read_only,
collection_name=self.collection_name,
key=self.key,
)
self.kwargs = kwargs
super().__init__(
collection_name=self.collection_name,
key=self.key,
**self.kwargs,
)
@property
def name(self) -> str:
"""
Return a string representing this data source
"""
return f"file://{self.path}"
def add_metadata(
self,
metadata: Dict = {},
query: Optional[Dict] = None,
auto_data: Callable[[Dict], Dict] = None,
**kwargs,
):
"""
Add metadata to a record in the FileStore, either manually or by computing it automatically
from another field, such as name or path (see auto_data).
Args:
metadata: dict of additional data to add to the records returned by query.
Note that any protected keys (such as 'name', 'path', etc.)
will be ignored.
query: Query passed to FileStore.query()
auto_data: A function that automatically computes metadata based on a field in
the record itself. The function must take in the item as a dict and
return a dict containing the desired metadata. A typical use case is
to assign metadata based on the name of a file. For example, for
data files named like `2022-04-01_april_fool_experiment.txt`, the
auto_data function could be:
def get_metadata_from_filename(d):
return {"date": d["name"].split("_")[0],
"test_name": d["name"].split("_")[1]
}
Note that in the case of conflict between manual and automatically
computed metadata (for example, if metadata={"name": "another_name"} was
supplied alongside the auto_data function above), the manually-supplied
metadata is used.
kwargs: kwargs passed to FileStore.query()
"""
# sanitize the metadata
filtered_metadata = self._filter_data(metadata)
updated_docs = []
for doc in self.query(query, **kwargs):
if auto_data:
extra_data = self._filter_data(auto_data(doc))
doc.update(extra_data)
doc.update(filtered_metadata)
updated_docs.append(doc)
self.update(updated_docs, key=self.key)
def read(self) -> List[Dict]:
"""
Iterate through all files in the Store folder and populate
the Store with dictionaries containing basic information about each file.
The keys of the documents added to the Store are
name: str = File name
path: Path = Absolute path of this file
parent: str = Name of the parent directory (if any)
file_id: str = Unique identifier for this file, computed from the hash
of its path relative to the base FileStore directory and
the file creation time. The key of this field is 'file_id'
by default but can be changed via the 'key' kwarg to
FileStore.__init__().
size: int = Size of this file in bytes
last_updated: datetime = Time this file was last modified
hash: str = Hash of the file contents
orphan: bool = Whether this record is an orphan
"""
file_list = []
# generate a list of files in subdirectories
for pattern in self.file_filters:
# list every file that matches the pattern
for f in self.path.rglob(pattern):
if f.is_file():
# ignore the .json file created by the Store
if f.name == self.json_name:
continue
# filter based on depth
depth = len(f.relative_to(self.path).parts) - 1
if self.max_depth is None or depth <= self.max_depth:
file_list.append(self._create_record_from_file(f))
return file_list
def _create_record_from_file(self, f: Union[str, Path]) -> Dict:
"""
Given the path to a file, return a Dict that constitues a record of
basic information about that file. The keys in the returned dict
are:
name: str = File name
path: Path = Absolute path of this file
parent: str = Name of the parent directory (if any)
file_id: str = Unique identifier for this file, computed from the hash
of its path relative to the base FileStore directory and
the file creation time. The key of this field is 'file_id'
by default but can be changed via the 'key' kwarg to
FileStore.__init__().
size: int = Size of this file in bytes
last_updated: datetime = Time this file was last modified
hash: str = Hash of the file contents
orphan: bool = Whether this record is an orphan
"""
# make sure f is a Path object
if not isinstance(f, Path):
f = Path(f)
# compute the file_id from the relative path
relative_path = f.relative_to(self.path)
digest = hashlib.md5()
digest.update(str(relative_path).encode())
file_id = str(digest.hexdigest())
# hash the file contents
digest2 = hashlib.md5()
block_size = 128 * digest2.block_size
digest2.update(self.name.encode())
with open(f.as_posix(), "rb") as file:
buf = file.read(block_size)
digest2.update(buf)
content_hash = str(digest2.hexdigest())
d = {
"name": f.name,
"path": f,
"parent": f.parent.name,
"size": f.stat().st_size,
"last_updated": datetime.fromtimestamp(f.stat().st_mtime, tz=timezone.utc),
"orphan": False,
"hash": content_hash,
self.key: file_id,
}
return d
def connect(self, force_reset: bool = False):
"""
Connect to the source data
Read all the files in the directory, create corresponding File
items in the internal MemoryStore.
If there is a metadata .json file in the directory, read its
contents into the MemoryStore
Args:
force_reset: whether to reset the connection or not
"""
# read all files and place them in the MemoryStore
# use super.update to bypass the read_only guard statement
# because we want the file data to be populated in memory
super().connect()
super().update(self.read())
# now read any metadata from the .json file
try:
self.metadata_store.connect()
metadata = [d for d in self.metadata_store.query()]
except FileNotFoundError:
metadata = []
warnings.warn(
f"""
JSON file '{self.json_name}' not found. To create this file automatically, re-initialize
the FileStore with read_only=False.
"""
)
# merge metadata with file data and check for orphaned metadata
requests = []
found_orphans = False
key = self.key
file_ids = self.distinct(self.key)
for d in metadata:
if isinstance(key, list):
search_doc = {k: d[k] for k in key}
else:
search_doc = {key: d[key]}
if d[key] not in file_ids:
found_orphans = True
d.update({"orphan": True})
del d["_id"]
requests.append(UpdateOne(search_doc, {"$set": d}, upsert=True))
if found_orphans:
warnings.warn(
f"Orphaned metadata was found in {self.json_name}. This metadata"
"will be added to the store with {'orphan': True}"
)
if len(requests) > 0:
self._collection.bulk_write(requests, ordered=False)
def update(self, docs: Union[List[Dict], Dict], key: Union[List, str, None] = None):
"""
Update items in the Store. Only possible if the store is not read only. Any new
fields that are added will be written to the JSON file in the root directory
of the FileStore.
Note that certain fields that come from file metadata on disk are protected and
cannot be updated with this method. This prevents the contents of the FileStore
from becoming out of sync with the files on which it is based. The protected fields
are keys in the dict returned by _create_record_from_file, e.g. 'name', 'parent',
'last_updated', 'hash', 'size', 'contents', and 'orphan'. The 'path' and key fields
are retained to make each document in the JSON file identifiable by manual inspection.
Args:
docs: the document or list of documents to update
key: field name(s) to determine uniqueness for a
document, can be a list of multiple fields,
a single field, or None if the Store's key
field is to be used
"""
if self.read_only:
raise StoreError(
"This Store is read-only. To enable file I/O, re-initialize the store with read_only=False."
)
super().update(docs, key)
data = [d for d in self.query()]
filtered_data = []
# remove fields that are populated by .read()
for d in data:
filtered_d = self._filter_data(d)
# don't write records that contain only file_id and path
if len(set(filtered_d.keys()).difference(set(["path", self.key]))) != 0:
filtered_data.append(filtered_d)
self.metadata_store.update(filtered_data, self.key)
def _filter_data(self, d):
"""
Remove any protected keys from a dictionary
Args:
d: Dictionary whose keys are to be filtered
"""
filtered_d = {k: v for k, v in d.items() if k not in PROTECTED_KEYS}
return filtered_d
def query( # type: ignore
self,
criteria: Optional[Dict] = None,
properties: Union[Dict, List, None] = None,
sort: Optional[Dict[str, Union[Sort, int]]] = None,
hint: Optional[Dict[str, Union[Sort, int]]] = None,
skip: int = 0,
limit: int = 0,
contents_size_limit: Optional[int] = None,
) -> Iterator[Dict]:
"""
Queries the Store for a set of documents
Args:
criteria: PyMongo filter for documents to search in
properties: properties to return in grouped documents
sort: Dictionary of sort order for fields. Keys are field names and
values are 1 for ascending or -1 for descending.
hint: Dictionary of indexes to use as hints for query optimizer.
Keys are field names and values are 1 for ascending or -1 for descending.
skip: number documents to skip
limit: limit on total number of documents returned
contents_size_limit: Maximum file size in bytes for which to return contents.
The FileStore will attempt to read the file and populate the 'contents' key
with its content at query time, unless the file size is larger than this value.
"""
return_contents = False
criteria = criteria if criteria else {}
if criteria.get("orphan", None) is None:
if not self.include_orphans:
criteria.update({"orphan": False})
if criteria.get("contents"):
warnings.warn("'contents' is not a queryable field! Ignoring.")
if isinstance(properties, list):
properties = {p: 1 for p in properties}
orig_properties = properties.copy() if properties else None
if properties is None or properties.get("contents"):
return_contents = True
if properties is not None and return_contents:
# remove contents b/c it isn't stored in the MemoryStore
properties.pop("contents")
# add size and path to query so that file can be read
properties.update({"size": 1})
properties.update({"path": 1})
for d in super().query(
criteria=criteria,
properties=properties,
sort=sort,
hint=hint,
skip=skip,
limit=limit,
):
# add file contents to the returned documents, if appropriate
if return_contents:
if contents_size_limit is None or d["size"] <= contents_size_limit:
# attempt to read the file contents and inject into the document
# TODO - could add more logic for detecting different file types
# and more nuanced exception handling
try:
with zopen(d["path"], "r") as f:
data = f.read()
except Exception as e:
data = f"Unable to read: {e}"
elif d["size"] > contents_size_limit:
data = "Unable to read: file too large"
else:
data = "Unable to read: Unknown error"
d.update({"contents": data})
# remove size and path if not explicitly requested
if orig_properties is not None and "size" not in orig_properties:
d.pop("size")
if orig_properties is not None and "path" not in orig_properties:
d.pop("path")
yield d
def query_one(
self,
criteria: Optional[Dict] = None,
properties: Union[Dict, List, None] = None,
sort: Optional[Dict[str, Union[Sort, int]]] = None,
contents_size_limit: Optional[int] = None,
):
"""
Queries the Store for a single document
Args:
criteria: PyMongo filter for documents to search
properties: properties to return in the document
sort: Dictionary of sort order for fields. Keys are field names and
values are 1 for ascending or -1 for descending.
contents_size_limit: Maximum file size in bytes for which to return contents.
The FileStore will attempt to read the file and populate the 'contents' key
with its content at query time, unless the file size is larger than this value.
"""
return next(
self.query(
criteria=criteria,
properties=properties,
sort=sort,
contents_size_limit=contents_size_limit,
),
None,
)
def remove_docs(self, criteria: Dict, confirm: bool = False):
"""
Remove items matching the query dictionary.
Args:
criteria: query dictionary to match
confirm: Boolean flag to confirm that remove_docs should delete
files on disk. Default: False.
"""
if self.read_only:
raise StoreError(
"This Store is read-only. To enable file I/O, re-initialize the "
"store with read_only=False."
)
docs = [d for d in self.query(criteria)]
# this ensures that any modifications to criteria made by self.query
# (e.g., related to orphans or contents) are propagated through to the superclass
new_criteria = {"file_id": {"$in": [d["file_id"] for d in docs]}}
if len(docs) > 0 and not confirm:
raise StoreError(
f"Warning! This command is about to delete {len(docs)} items from disk! "
"If this is what you want, reissue this command with confirm=True."
)
for d in docs:
Path(d["path"]).unlink()
super().remove_docs(criteria=new_criteria)
| StarcoderdataPython |
16996 | <reponame>alixedi/data-hub-api-cd-poc
from elasticsearch_dsl import Boolean, Date, Double, Integer, Keyword, Long, Object, Text
from datahub.search import dict_utils
from datahub.search import fields
from datahub.search.models import BaseESModel
DOC_TYPE = 'investment_project'
def _related_investment_project_field():
"""Field for a related investment project."""
return Object(properties={
'id': Keyword(),
'name': fields.NormalizedKeyword(),
'project_code': fields.NormalizedKeyword(),
})
class InvestmentProject(BaseESModel):
"""Elasticsearch representation of InvestmentProject."""
id = Keyword()
actual_land_date = Date()
actual_uk_regions = fields.id_name_field()
address_1 = Text()
address_2 = Text()
address_town = fields.NormalizedKeyword()
address_postcode = Text()
approved_commitment_to_invest = Boolean()
approved_fdi = Boolean()
approved_good_value = Boolean()
approved_high_value = Boolean()
approved_landed = Boolean()
approved_non_fdi = Boolean()
allow_blank_estimated_land_date = Boolean(index=False)
allow_blank_possible_uk_regions = Boolean(index=False)
anonymous_description = fields.EnglishText()
archived = Boolean()
archived_by = fields.contact_or_adviser_field()
archived_on = Date()
archived_reason = Text()
associated_non_fdi_r_and_d_project = _related_investment_project_field()
average_salary = fields.id_name_field()
business_activities = fields.id_name_field()
client_cannot_provide_foreign_investment = Boolean()
client_cannot_provide_total_investment = Boolean()
client_contacts = fields.contact_or_adviser_field()
client_relationship_manager = fields.contact_or_adviser_field(include_dit_team=True)
client_requirements = Text(index=False)
comments = fields.EnglishText()
country_investment_originates_from = fields.id_name_field()
country_lost_to = Object(
properties={
'id': Keyword(index=False),
'name': Text(index=False),
},
)
created_on = Date()
created_by = fields.contact_or_adviser_field(include_dit_team=True)
date_abandoned = Date()
date_lost = Date()
delivery_partners = fields.id_name_field()
description = fields.EnglishText()
estimated_land_date = Date()
export_revenue = Boolean()
fdi_type = fields.id_name_field()
fdi_value = fields.id_name_field()
foreign_equity_investment = Double()
government_assistance = Boolean()
intermediate_company = fields.id_name_field()
investor_company = fields.id_name_partial_field()
investor_company_country = fields.id_name_field()
investment_type = fields.id_name_field()
investor_type = fields.id_name_field()
level_of_involvement = fields.id_name_field()
likelihood_to_land = fields.id_name_field()
project_assurance_adviser = fields.contact_or_adviser_field(include_dit_team=True)
project_manager = fields.contact_or_adviser_field(include_dit_team=True)
name = Text(
fields={
'keyword': fields.NormalizedKeyword(),
'trigram': fields.TrigramText(),
},
)
new_tech_to_uk = Boolean()
non_fdi_r_and_d_budget = Boolean()
number_new_jobs = Integer()
number_safeguarded_jobs = Long()
modified_on = Date()
project_arrived_in_triage_on = Date()
project_code = fields.NormalizedKeyword(
fields={
'trigram': fields.TrigramText(),
},
)
proposal_deadline = Date()
other_business_activity = Text(index=False)
quotable_as_public_case_study = Boolean()
r_and_d_budget = Boolean()
reason_abandoned = Text(index=False)
reason_delayed = Text(index=False)
reason_lost = Text(index=False)
referral_source_activity = fields.id_name_field()
referral_source_activity_event = fields.NormalizedKeyword()
referral_source_activity_marketing = fields.id_name_field()
referral_source_activity_website = fields.id_name_field()
referral_source_adviser = Object(
properties={
'id': Keyword(index=False),
'first_name': Text(index=False),
'last_name': Text(index=False),
'name': Text(index=False),
},
)
sector = fields.sector_field()
site_decided = Boolean()
some_new_jobs = Boolean()
specific_programme = fields.id_name_field()
stage = fields.id_name_field()
status = fields.NormalizedKeyword()
team_members = fields.contact_or_adviser_field(include_dit_team=True)
total_investment = Double()
uk_company = fields.id_name_partial_field()
uk_company_decided = Boolean()
uk_region_locations = fields.id_name_field()
will_new_jobs_last_two_years = Boolean()
level_of_involvement_simplified = Keyword()
gross_value_added = Double()
MAPPINGS = {
'actual_uk_regions': lambda col: [
dict_utils.id_name_dict(c) for c in col.all()
],
'archived_by': dict_utils.contact_or_adviser_dict,
'associated_non_fdi_r_and_d_project': dict_utils.investment_project_dict,
'average_salary': dict_utils.id_name_dict,
'business_activities': lambda col: [dict_utils.id_name_dict(c) for c in col.all()],
'client_contacts': lambda col: [dict_utils.contact_or_adviser_dict(c) for c in col.all()],
'client_relationship_manager': dict_utils.adviser_dict_with_team,
'country_lost_to': dict_utils.id_name_dict,
'country_investment_originates_from': dict_utils.id_name_dict,
'created_by': dict_utils.adviser_dict_with_team,
'delivery_partners': lambda col: [
dict_utils.id_name_dict(c) for c in col.all()
],
'fdi_type': dict_utils.id_name_dict,
'fdi_value': dict_utils.id_name_dict,
'intermediate_company': dict_utils.id_name_dict,
'investment_type': dict_utils.id_name_dict,
'investor_company': dict_utils.id_name_dict,
'investor_company_country': dict_utils.id_name_dict,
'investor_type': dict_utils.id_name_dict,
'level_of_involvement': dict_utils.id_name_dict,
'likelihood_to_land': dict_utils.id_name_dict,
'project_assurance_adviser': dict_utils.adviser_dict_with_team,
'project_code': str,
'project_manager': dict_utils.adviser_dict_with_team,
'referral_source_activity': dict_utils.id_name_dict,
'referral_source_activity_marketing': dict_utils.id_name_dict,
'referral_source_activity_website': dict_utils.id_name_dict,
'referral_source_adviser': dict_utils.contact_or_adviser_dict,
'sector': dict_utils.sector_dict,
'specific_programme': dict_utils.id_name_dict,
'stage': dict_utils.id_name_dict,
'team_members': lambda col: [
dict_utils.contact_or_adviser_dict(c.adviser, include_dit_team=True) for c in col.all()
],
'uk_company': dict_utils.id_name_dict,
'uk_region_locations': lambda col: [
dict_utils.id_name_dict(c) for c in col.all()
],
}
SEARCH_FIELDS = (
'id',
'name',
'name.trigram',
'uk_company.name',
'uk_company.name.trigram',
'investor_company.name',
'investor_company.name.trigram',
'project_code.trigram',
)
class Meta:
"""Default document meta data."""
doc_type = DOC_TYPE
class Index:
doc_type = DOC_TYPE
| StarcoderdataPython |
3358805 | from torch import nn
from .base_models import BaseEncoderMaskerDecoder
from asteroid_filterbanks import make_enc_dec
from asteroid_filterbanks.transforms import mag, magreim
from ..masknn import norms, activations
from ..utils.torch_utils import pad_x_to_y
import warnings
class DeMask(BaseEncoderMaskerDecoder):
"""
Simple MLP model for surgical mask speech enhancement A transformed-domain masking approach is used.
Args:
input_type (str, optional): whether the magnitude spectrogram "mag" or both real imaginary parts "reim" are
passed as features to the masker network.
Concatenation of "mag" and "reim" also can be used by using "cat".
output_type (str, optional): whether the masker ouputs a mask
for magnitude spectrogram "mag" or both real imaginary parts "reim".
hidden_dims (list, optional): list of MLP hidden layer sizes.
dropout (float, optional): dropout probability.
activation (str, optional): type of activation used in hidden MLP layers.
mask_act (str, optional): Which non-linear function to generate mask.
norm_type (str, optional): To choose from ``'BN'``, ``'gLN'``,
``'cLN'``.
fb_name (str): type of analysis and synthesis filterbanks used,
choose between ["stft", "free", "analytic_free"].
n_filters (int): number of filters in the analysis and synthesis filterbanks.
stride (int): filterbank filters stride.
kernel_size (int): length of filters in the filterbank.
encoder_activation (str)
sample_rate (float): Sampling rate of the model.
**fb_kwargs (dict): Additional kwards to pass to the filterbank
creation.
"""
def __init__(
self,
input_type="mag",
output_type="mag",
hidden_dims=(1024,),
dropout=0.0,
activation="relu",
mask_act="relu",
norm_type="gLN",
fb_name="stft",
n_filters=512,
stride=256,
kernel_size=512,
sample_rate=16000,
**fb_kwargs,
):
encoder, decoder = make_enc_dec(
fb_name,
kernel_size=kernel_size,
n_filters=n_filters,
stride=stride,
sample_rate=sample_rate,
**fb_kwargs,
)
n_masker_in = self._get_n_feats_input(input_type, encoder.n_feats_out)
n_masker_out = self._get_n_feats_output(output_type, encoder.n_feats_out)
masker = build_demask_masker(
n_masker_in,
n_masker_out,
norm_type=norm_type,
activation=activation,
hidden_dims=hidden_dims,
dropout=dropout,
mask_act=mask_act,
)
super().__init__(encoder, masker, decoder)
self.input_type = input_type
self.output_type = output_type
self.hidden_dims = hidden_dims
self.dropout = dropout
self.activation = activation
self.mask_act = mask_act
self.norm_type = norm_type
def _get_n_feats_input(self, input_type, encoder_n_out):
if input_type == "reim":
return encoder_n_out
if input_type not in {"mag", "cat"}:
raise NotImplementedError("Input type should be either mag, reim or cat")
n_feats_input = encoder_n_out // 2
if input_type == "cat":
n_feats_input += encoder_n_out
return n_feats_input
def _get_n_feats_output(self, output_type, encoder_n_out):
if output_type == "mag":
return encoder_n_out // 2
if output_type == "reim":
return encoder_n_out
raise NotImplementedError("Output type should be either mag or reim")
def forward_masker(self, tf_rep):
"""Estimates masks based on time-frequency representations.
Args:
tf_rep (torch.Tensor): Time-frequency representation in
(batch, freq, seq).
Returns:
torch.Tensor: Estimated masks in (batch, freq, seq).
"""
masker_input = tf_rep
if self.input_type == "mag":
masker_input = mag(masker_input)
elif self.input_type == "cat":
masker_input = magreim(masker_input)
est_masks = self.masker(masker_input)
if self.output_type == "mag":
est_masks = est_masks.repeat(1, 2, 1)
return est_masks
def apply_masks(self, tf_rep, est_masks):
"""Applies masks to time-frequency representations.
Args:
tf_rep (torch.Tensor): Time-frequency representations in
(batch, freq, seq).
est_masks (torch.Tensor): Estimated masks in (batch, freq, seq).
Returns:
torch.Tensor: Masked time-frequency representations.
"""
if self.output_type == "reim":
tf_rep = tf_rep.unsqueeze(1)
return est_masks * tf_rep
def get_model_args(self):
""" Arguments needed to re-instantiate the model. """
model_args = {
"input_type": self.input_type,
"output_type": self.output_type,
"hidden_dims": self.hidden_dims,
"dropout": self.dropout,
"activation": self.activation,
"mask_act": self.mask_act,
"norm_type": self.norm_type,
}
model_args.update(self.encoder.filterbank.get_config())
return model_args
def build_demask_masker(
n_in,
n_out,
activation="relu",
dropout=0.0,
hidden_dims=(1024,),
mask_act="relu",
norm_type="gLN",
):
make_layer_norm = norms.get(norm_type)
net = [make_layer_norm(n_in)]
layer_activation = activations.get(activation)()
in_chan = n_in
for hidden_dim in hidden_dims:
net.extend(
[
nn.Conv1d(in_chan, hidden_dim, 1),
make_layer_norm(hidden_dim),
layer_activation,
nn.Dropout(dropout),
]
)
in_chan = hidden_dim
net.extend([nn.Conv1d(in_chan, n_out, 1), activations.get(mask_act)()])
return nn.Sequential(*net)
| StarcoderdataPython |
4829916 | # coding:utf8
from datetime import datetime
from app import db
class User(db.Model):
"""
Informations of users.
"""
__tablename__ = "user"
# __table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True) # user number
name = db.Column(db.String(100), unique=True) # username
password = db.Column(db.String(100)) # user's password
email = db.Column(db.String(100), unique=True) # user's email address
phone = db.Column(db.String(11), unique=True) # user's mobile phone number
info = db.Column(db.Text) # user's signature
avatar = db.Column(db.String(255), unique=True) # user's profile picture
add_time = db.Column(db.DateTime, index=True, default=datetime.now) # registration time
uuid = db.Column(db.String(255), unique=True) # user's unique identifier
user_logs = db.relationship('Userlog', backref='user') # key used to associate with user log table
comments = db.relationship('Comment', backref='user') # key used to associate with comment table
collections = db.relationship('Collection', backref='user') # key used to associate with collection table
def __repr__(self):
return "<User %r>" % self.name
class Userlog(db.Model):
"""
Log when users sign in.
"""
__tablename__ = "userlog"
# __table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True) # log number
user_id = db.Column(db.Integer, db.ForeignKey('user.id')) # corresponding user connected with the user table
ip = db.Column(db.String(100)) # the corresponding users' IP address
add_time = db.Column(db.DateTime, index=True, default=datetime.now) # log generation time
def __repr__(self):
return "<Userlog %r>" % self.id
class Tag(db.Model):
"""
Tag of the movies.
"""
__tablename__ = "tag"
# __table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True) # tag number
name = db.Column(db.String(100), unique=True) # name of tag
add_time = db.Column(db.DateTime, index=True, default=datetime.now) # tag generation time
movies = db.relationship("Movie", backref='tag') # key used to associate with tag table
def __repr__(self):
return "<Tag %r>" % self.name
class Movie(db.Model):
"""
Definition of movie information.
"""
__tablename__ = "movie"
# __table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True) # movie number
title = db.Column(db.String(255), unique=True) # title of the movie
url = db.Column(db.String(255), unique=True) # movie playback link
info = db.Column(db.Text) # introduction of the movie
cover = db.Column(db.String(255), unique=True) # cover of the movie
rating = db.Column(db.SmallInteger) # rating of the movie
views = db.Column(db.BigInteger) # number of times played
review_num = db.Column(db.BigInteger) # number of reviews
tag_id = db.Column(db.Integer, db.ForeignKey('tag.id')) # corresponding tag connected with the tag table
area = db.Column(db.String(255)) # movie release area
release_time = db.Column(db.Date) # movie release time
length = db.Column(db.String(100)) # the length of the movie
add_time = db.Column(db.DateTime, index=True, default=datetime.now) # time when the movie added to the website
comments = db.relationship('Comment', backref='movie') # key used to associate with comment table
collections = db.relationship('Collection', backref='movie') # key used to associate with collection table
def __repr__(self):
return "<Movie %r>" % self.title
class Preview(db.Model):
"""
Movie preview.
"""
__tablename__ = "preview"
# __table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True) # preview number
title = db.Column(db.String(255), unique=True) # title of the movie preview
cover = db.Column(db.String(255), unique=True) # cover of the movie preview
add_time = db.Column(db.DateTime, index=True, default=datetime.now) # time when the preview added to the website
def __repr__(self):
return "<Preview %r>" % self.title
class Comment(db.Model):
"""
Movie comment.
"""
__tablename__ = "comment"
# __table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True) # comment number
content = db.Column(db.Text) # content of the comment
movie_id = db.Column(db.Integer, db.ForeignKey('movie.id')) # commented movie
user_id = db.Column(db.Integer, db.ForeignKey('user.id')) # user who submitted the comment
add_time = db.Column(db.DateTime, index=True, default=datetime.now) # time when the comment submitted
def __repr__(self):
return "<Comment %r>" % self.id
class Collection(db.Model):
"""
Movie collection.
"""
__tablename__ = "collection"
# __table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True) # collection number
movie_id = db.Column(db.Integer, db.ForeignKey('movie.id')) # movie which is collected
user_id = db.Column(db.Integer, db.ForeignKey('user.id')) # user who collect this movie
add_time = db.Column(db.DateTime, index=True, default=datetime.now) # time when the movie is bookmarked
def __repr__(self):
return "<Collection %r>" % self.id
class Authority(db.Model):
"""
The definition of authorities.
"""
__tablename__ = "authority"
# __table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True) # authority number
name = db.Column(db.String(100), unique=True) # authority name
url = db.Column(db.String(255), unique=True) # the routing address corresponded to the authority
add_time = db.Column(db.DateTime, index=True, default=datetime.now) # time when the authority is added
def __repr__(self):
return "<Authority %r>" % self.name
class Role(db.Model):
"""
The definition of roles.
"""
__tablename__ = "role"
# __table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True) # role number
name = db.Column(db.String(100), unique=True) # role name
authorities = db.Column(db.String(600)) # The authorities granted to the role
add_time = db.Column(db.DateTime, index=True, default=datetime.now) # time when the role is added
def __repr__(self):
return "<Role %r>" % self.name
class Admin(db.Model):
"""
The definition of admins.
"""
__tablename__ = "admin"
# __table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True) # admin number
name = db.Column(db.String(100), unique=True) # admin name
password = db.Column(db.String(100)) # admin password
is_super = db.Column(db.SmallInteger) # whether it is a super administrator, 0 is super administrator
role_id = db.Column(db.Integer, db.ForeignKey('role.id')) # the role to which the administrator belongs
add_time = db.Column(db.DateTime, index=True, default=datetime.now) # time when the admin is added
adminlogs = db.relationship('Adminlog', backref='admin') # key used to associate with administrator log table
operationlogs = db.relationship('Operationlog',
backref='admin') # key used to associate with administrator operations log table
def __repr__(self):
return "<Admin %r>" % self.name
def check_password(self, password):
from werkzeug.security import check_password_hash
return check_password_hash(self.password, password)
class Adminlog(db.Model):
"""
Log when administrators sign in.
"""
__tablename__ = "adminlog"
# __table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True) # log number
admin_id = db.Column(db.Integer, db.ForeignKey('admin.id')) # corresponding user connected with the admin table
ip = db.Column(db.String(100)) # the corresponding administrators' IP address
add_time = db.Column(db.DateTime, index=True, default=datetime.now) # log generation time
def __repr__(self):
return "<Adminlog %r>" % self.id
class Operationlog(db.Model):
"""
Log when administrators sign in.
"""
__tablename__ = "operationlog"
# __table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True) # log number
admin_id = db.Column(db.Integer, db.ForeignKey('admin.id')) # corresponding user connected with the admin table
ip = db.Column(db.String(100)) # the corresponding administrators' IP address
reason = db.Column(db.String(100)) # the reason why administrator do this operation
add_time = db.Column(db.DateTime, index=True, default=datetime.now) # log generation time
def __repr__(self):
return "<Operationlog %r>" % self.id
# if __name__ == "__main__":
# # 创建全部表,在创建数据库后仅能运行一次
# db.create_all()
# # 创建超级管理员角色,亦只能运行一次
# role = Role(
# name="超级管理员",
# authorities=""
# )
# db.session.add(role)
# db.session.commit()
# # 创建管理员账号,亦只能运行一次
# from werkzeug.security import generate_password_hash
#
# admin = Admin(
# name="Peter",
# password=generate_password_hash("<PASSWORD>"),
# is_super=0,
# role_id=1
# )
# db.session.add(admin)
# db.session.commit()
| StarcoderdataPython |
162004 | <gh_stars>10-100
#!/usr/bin/env python3
"""\
Usage:
zip_demos.py [<directory>]
"""
import docopt
from pathlib import Path
from zipfile import ZipFile
def demo_uses_asset(asset, content):
asset = Path(asset)
if f"'{asset.name}'" in content:
return True
if asset.suffix == '.py':
return f'import {asset.stem}' in content
args = docopt.docopt(__doc__)
directory = Path(args['<directory>'] or '.')
for demo in directory.glob('*.py'):
demo = Path(demo)
with demo.open() as file:
content = file.read()
# Skip python scripts that don't seem to have main functions.
if not content.strip().endswith('pyglet.app.run()'):
continue
with ZipFile(f'{directory / demo.stem}_assets.zip', 'w') as zip:
print(f"Making archive: '{zip.filename}'")
for asset in directory.glob('*'):
if asset == demo or demo_uses_asset(asset, content):
zip.write(str(asset))
print(f" + {asset}")
| StarcoderdataPython |
4818521 | <filename>orchestration/hca_manage/validation.py
import logging
from typing import Any, Optional
from dagster_utils.contrib.google import parse_gs_path
from google.cloud.storage import Client
from hca_manage.common import DefaultHelpParser
from hca.staging_area_validator import StagingAreaValidator
class HcaValidator:
def validate_staging_area(self, path: str, ignore_inputs: bool, client: Client) -> Any:
"""
Run the validation pre-checks on the staging area
:param path: Google Cloud Storage path for staging area
"""
exit_code = self.validate_structure(path, client)
adapter = StagingAreaValidator(
staging_area=path,
ignore_dangling_inputs=ignore_inputs,
validate_json=True
)
exit_code |= adapter.main()
if not exit_code:
logging.info(f'Staging area {path} is valid')
else:
logging.error(f'Staging area {path} is invalid')
return exit_code
def validate_structure(self, path: str, gs_client: Client) -> int:
well_known_dirs = {'/data', '/descriptors', '/links', '/metadata'}
bucket_with_prefix = parse_gs_path(path)
bucket = gs_client.bucket(bucket_with_prefix.bucket)
exit_code = 0
for well_known_dir in well_known_dirs:
expected_blob = f"{bucket_with_prefix.prefix}{well_known_dir}"
blobs = list(gs_client.list_blobs(bucket, prefix=expected_blob))
if not blobs:
logging.error(f"No {well_known_dir} dir found at path {path}")
exit_code = 1
return exit_code
# CLI
def run(arguments: Optional[list[str]] = None) -> None:
parser = DefaultHelpParser(description="CLI to manage validate GS path and json files.")
parser.add_argument("-p", "--path", help="GS path to validate", required=True)
parser.add_argument("-i", "--ignore_inputs", help="Ignore input metadata files", default=False)
args = parser.parse_args(arguments)
client = Client()
HcaValidator().validate_staging_area(args.path, args.ignore_inputs, client)
if __name__ == "__main__":
run()
| StarcoderdataPython |
34211 | from typing import Optional, List
from aiogram import types, Dispatcher, filters
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters.state import StatesGroup, State
from aiogram.types import ReplyKeyboardMarkup
from handlers.common_actions_handlers import process_manual_enter, process_option_selection, \
process_complete_part_editing, claim_tmp_option_chosen, show_claim_tmp_example
from keyboards import emojis, get_common_start_kb, get_next_actions_kb, get_claim_parts_kb
from repository import Repository
from statistics import collect_statistic
CLAIM_PART: str = "essence"
class EssencePart(StatesGroup):
waiting_for_user_action = State()
waiting_for_option_chosen = State()
@collect_statistic(event_name="essence:start")
async def essence_start(message: types.Message, state: FSMContext):
repository: Repository = Repository()
claim_data: dict = repository.get_claim_data(message.from_user.id)
required_parts: List[str] = ["story"]
if claim_data.get("claim_data") is None or \
not any([part_name in claim_data["claim_data"].keys() for part_name in required_parts]):
claim_parts_kb: ReplyKeyboardMarkup = get_claim_parts_kb(message.from_user.id)
await message.reply("Пожалуйста, сперва заполните раздел 'фабула'.",
reply_markup=claim_parts_kb)
return
await EssencePart.waiting_for_user_action.set()
start_kb: ReplyKeyboardMarkup = get_common_start_kb()
await message.reply("Опишите суть нарушения. "
"Введите, почему вы считаете, что ваши права нарушают. "
"Или выберите одну из следующий опций.",
reply_markup=start_kb)
@collect_statistic(event_name="essence:show_example")
async def show_example(message: types.Message, state: FSMContext):
await show_claim_tmp_example(message, CLAIM_PART)
async def action_selected(message: types.Message, state: FSMContext):
option: Optional[str] = message.text
if option.endswith("выбрать из списка") or option.endswith("добавить еще из списка"):
await process_option_selection(message, CLAIM_PART, EssencePart)
return
if option.endswith("закончить заполнение"):
await process_complete_part_editing(message, state, CLAIM_PART)
return
await process_manual_enter(message, state, EssencePart)
async def option_chosen(callback_query: types.CallbackQuery, state: FSMContext):
await claim_tmp_option_chosen(callback_query, state, CLAIM_PART)
async def finish_option_choosing(callback_query: types.CallbackQuery):
await callback_query.answer()
await EssencePart.waiting_for_user_action.set()
next_actions_kb: ReplyKeyboardMarkup = get_next_actions_kb()
await callback_query.message.answer("Введите свой вариант самостоятельно. "
"Или выберите дальнейшее действие с помощью клавиатуры",
reply_markup=next_actions_kb)
def register_handlers(dp: Dispatcher):
dp.register_message_handler(essence_start, filters.Regexp(f"^{emojis.key} суть нарушения"))
dp.register_message_handler(show_example,
filters.Regexp(f"^{emojis.red_question_mark} показать пример"),
state=EssencePart.states)
dp.register_message_handler(action_selected, state=EssencePart.waiting_for_user_action)
dp.register_callback_query_handler(
option_chosen,
filters.Text(startswith="option"),
state=EssencePart.waiting_for_option_chosen
)
dp.register_callback_query_handler(finish_option_choosing,
filters.Text(equals="complete options"),
state=EssencePart.waiting_for_option_chosen)
| StarcoderdataPython |
3201989 | <filename>goss-testing/scripts/python/check_ncn_uan_ip_dns.py<gh_stars>1-10
#!/usr/bin/env python3
#
# MIT License
#
# (C) Copyright 2014-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
import base64
import subprocess
import json
import sys
import logging
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from urllib.parse import urljoin
from kubernetes import client, config
class APIRequest(object):
"""
Example use:
api_request = APIRequest('http://api.com')
response = api_request('GET', '/get/stuff')
print (f"response.status_code")
print (f"{response.status_code}")
print()
print (f"response.reason")
print (f"{response.reason}")
print()
print (f"response.text")
print (f"{response.text}")
print()
print (f"response.json")
print (f"{response.json()}")
"""
def __init__(self, base_url, headers=None):
if not base_url.endswith('/'):
base_url += '/'
self._base_url = base_url
if headers is not None:
self._headers = headers
else:
self._headers = {}
def __call__(self, method, route, **kwargs):
if route.startswith('/'):
route = route[1:]
url = urljoin(self._base_url, route, allow_fragments=False)
headers = kwargs.pop('headers', {})
headers.update(self._headers)
retry_strategy = Retry(
total=10,
backoff_factor=0.1,
status_forcelist=[429, 500, 502, 503, 504],
method_whitelist=["PATCH", "DELETE", "POST", "HEAD", "GET", "OPTIONS"]
)
adapter = HTTPAdapter(max_retries=retry_strategy)
http = requests.Session()
http.mount("https://", adapter)
http.mount("http://", adapter)
response = http.request(method=method, url=url, headers=headers, **kwargs)
if 'data' in kwargs:
log.debug(f"{method} {url} with headers:"
f"{json.dumps(headers, indent=4)}"
f"and data:"
f"{json.dumps(kwargs['data'], indent=4)}")
elif 'json' in kwargs:
log.debug(f"{method} {url} with headers:"
f"{json.dumps(headers, indent=4)}"
f"and JSON:"
f"{json.dumps(kwargs['json'], indent=4)}")
else:
log.debug(f"{method} {url} with headers:"
f"{json.dumps(headers, indent=4)}")
log.debug(f"Response to {method} {url} => {response.status_code} {response.reason}"
f"{response.text}")
return response
# globals
gw_api = APIRequest('https://api-gw-service-nmn.local')
log = logging.getLogger(__name__)
log.setLevel(logging.WARN)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
def token():
# setup kubernetes client
config.load_kube_config()
v1 = client.CoreV1Api()
# get kubernetes admin secret
secret = v1.read_namespaced_secret("admin-client-auth", "default").data
# decode the base64 secret
token = base64.b64decode(secret['client-secret']).decode('utf-8')
# create post data to keycloak istio ingress
token_data = {'grant_type': 'client_credentials', 'client_id': 'admin-client', 'client_secret': token}
# query keycloack
token_url = '/keycloak/realms/shasta/protocol/openid-connect/token'
token_resp = gw_api('POST', token_url, data=token_data)
access_token = token_resp.json()['access_token']
# print (f'access_token')
return access_token
def main():
error_found = False
bearer_token = token()
# request header passing token
headers = {'Authorization': 'Bearer ' + bearer_token}
# query SMD EthernetInterfaces
smd_url = '/apis/smd/hsm/v2/Inventory/EthernetInterfaces'
smd_resp = gw_api('GET', smd_url, headers=headers)
smd_ethernet_interfaces = smd_resp.json()
# query SLS hardware
sls_url = '/apis/sls/v1/hardware'
sls_resp = gw_api('GET', sls_url, headers=headers)
sls_hardware = sls_resp.json()
ip_set = set()
for smd_entry in smd_ethernet_interfaces:
# print (smd_entry)
if smd_entry['IPAddresses'] != '[]':
ip_addresses = smd_entry['IPAddresses']
for ips in ip_addresses:
ip = ips['IPAddress']
# print (ip)
if ip != '':
if ip in ip_set:
log.error(f'Error: found duplicate IP: {ip}')
error_found = True
nslookup_cmd = subprocess.Popen(('nslookup', ip), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, errors = nslookup_cmd.communicate()
print("output.decode('ascii')")
else:
ip_set.add(ip)
hostname_list = []
for i in range(len(sls_hardware)):
if 'ExtraProperties' in sls_hardware[i]:
if 'Role' in sls_hardware[i]['ExtraProperties'] and (
sls_hardware[i]['ExtraProperties']['Role'] == 'Application' or sls_hardware[i]['ExtraProperties'][
'Role'] == 'Management'):
hostname_list.append(sls_hardware[i]['ExtraProperties']['Aliases'][0] + '.nmn')
hostname_list.append(sls_hardware[i]['ExtraProperties']['Aliases'][0] + '.can')
hostname_list.append(sls_hardware[i]['ExtraProperties']['Aliases'][0] + '.hmn')
hostname_list.append(sls_hardware[i]['ExtraProperties']['Aliases'][0] + '-mgmt')
hostname_list.append(sls_hardware[i]['ExtraProperties']['Aliases'][0] + '.cmn')
hostname_list.append(sls_hardware[i]['ExtraProperties']['Aliases'][0] + '.chn')
for hostname in hostname_list:
dig_cmd = subprocess.Popen(('dig', hostname, '+short'), stdout=subprocess.PIPE)
wc_cmd = subprocess.check_output(('wc', '-l'), stdin=dig_cmd.stdout)
result = int(wc_cmd.decode('ascii').strip())
if result > 1:
error_found = True
log.error(f'ERROR: {hostname} has more than 1 DNS entry')
nslookup_cmd = subprocess.Popen(('nslookup', hostname), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = nslookup_cmd.communicate()
print(f"{output.decode('ascii')}")
if error_found:
log.error('ERRORS: see above output.')
sys.exit(1)
else:
log.debug('No errors found.')
sys.exit(0)
if __name__ == "__main__":
main() | StarcoderdataPython |
179460 | <reponame>kagemeka/atcoder-submissions
import sys
from bisect import bisect_left as bi_l
INF = float("inf")
n, *c = map(int, sys.stdin.read().split())
def main():
res = [INF] * n
for x in c:
i = bi_l(res, x)
res[i] = x
ans = n - bi_l(res, INF)
print(ans)
if __name__ == "__main__":
main()
| StarcoderdataPython |
118753 | # The MIT License
#
# Copyright (c) 2008 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
class Error( Exception ):
"""Base exception class.
Contains a string with an optional error message."""
def __init__( self, message ):
self._message = message
def __str__( self ):
return self._message
def __repr__( self ):
return self._message
def __unicode__( self ):
return self._message
def msg( self ):
return self._message
class UnitializedError( Error ):
"""Thrown when an unitialized variable is accessed."""
def __init__( self, message ):
Error.__init__( self, message )
class BadArgumentError( Error ):
"""Thrown when an invalid argument is provided."""
def __init__( self, message ):
Error.__init__( self, message )
class OutOfBoundsError( Error ):
"""Thrown when the value of an argument is outside the allow range."""
def __init__( self, message ):
Error.__init__( self, message )
class UnsupportedError( Error ):
"""Thrown when an implemented feature is invoked."""
def __init__( self, message ):
Error.__init__( self, message )
class ThirdPartyError( Error ):
"""Thrown when a third party library has an error."""
def __init__( self, message ):
Error.__init__( self, message )
class SilentError( Error ):
"""Thrown when an error has occurred but no message should be printed.
Either there's none to print or something else has already printed it."""
def __init__( self, message ):
Error.__init__( self, message )
class AbortError( Error ):
"""Thrown when an operation has been aborted either by the user or
otherwise."""
def __init__( self, message ):
Error.__init__( self, message )
| StarcoderdataPython |
2028 | import os
import sys
from . import HendrixTestCase, TEST_SETTINGS
from hendrix.contrib import SettingsError
from hendrix.options import options as hx_options
from hendrix import ux
from mock import patch
class TestMain(HendrixTestCase):
def setUp(self):
super(TestMain, self).setUp()
self.DEFAULTS = hx_options()
os.environ['DJANGO_SETTINGS_MODULE'] = ''
self.devnull = open(os.devnull, 'w')
self.args_list = ['hx', 'start']
self.patcher = patch('hendrix.ux.findSettingsModule')
self.patcher.start()
def tearDown(self):
super(TestMain, self).tearDown()
self.devnull.close()
self.patcher.stop()
def test_settings_from_system_variable(self):
django_settings = 'django.inanity'
with patch('hendrix.ux.findSettingsModule') as findSettingsMod:
findSettingsMod.return_value = django_settings
options = self.DEFAULTS
self.assertEqual(options['settings'], '')
options = ux.djangoVsWsgi(options)
self.assertEqual(options['settings'], django_settings)
def test_settings_wsgi_absense(self):
with patch('hendrix.ux.findSettingsModule') as findSettingsMod:
findSettingsMod.return_value = ""
self.assertRaises(SettingsError, ux.djangoVsWsgi, self.DEFAULTS)
def test_user_settings_overrides_system_variable(self):
django_settings = 'django.inanity'
with patch('hendrix.ux.findSettingsModule') as findSettingsMod:
findSettingsMod.return_value = django_settings
options = self.DEFAULTS
user_settings = 'myproject.settings'
options['settings'] = user_settings
self.assertEqual(options['settings'], user_settings)
options = ux.djangoVsWsgi(options)
self.assertEqual(options['settings'], user_settings)
def test_wsgi_correct_wsgi_path_works(self):
wsgi_dot_path = 'hendrix.test.wsgi'
options = self.DEFAULTS
options.update({'wsgi': wsgi_dot_path})
options = ux.djangoVsWsgi(options)
self.assertEqual(options['wsgi'], wsgi_dot_path)
def test_wsgi_wrong_path_raises(self):
wsgi_dot_path = '_this.leads.nowhere.man'
options = self.DEFAULTS
options.update({'wsgi': wsgi_dot_path})
self.assertRaises(ImportError, ux.djangoVsWsgi, options)
def test_cwd_exposure(self):
cwd = os.getcwd()
_path = sys.path
sys.path = [p for p in _path if p != cwd]
self.assertTrue(cwd not in sys.path)
ux.exposeProject(self.DEFAULTS)
self.assertTrue(cwd in sys.path)
def test_pythonpath(self):
options = self.DEFAULTS
test_path = os.path.join(
os.path.dirname(os.getcwd()),
'hendrix/test/testproject'
)
options['pythonpath'] = test_path
ux.exposeProject(options)
self.assertTrue(test_path in sys.path)
sys.path = [p for p in sys.path if p != test_path]
def test_shitty_pythonpath(self):
options = self.DEFAULTS
test_path = '/if/u/have/this/path/you/suck'
options['pythonpath'] = test_path
self.assertRaises(IOError, ux.exposeProject, options)
def test_dev_friendly_options(self):
options = self.DEFAULTS
options['dev'] = True
self.assertFalse(options['reload'])
self.assertFalse(options['loud'])
options = ux.devFriendly(options)
self.assertTrue(options['reload'])
self.assertTrue(options['loud'])
def test_noise_control_daemonize(self):
options = self.DEFAULTS
options['quiet'] = True
options['daemonize'] = True
stdout = sys.stdout
stderr = sys.stderr
redirect = ux.noiseControl(options)
self.assertEqual(sys.stdout.name, stdout.name)
self.assertEqual(sys.stderr.name, stderr.name)
self.assertEqual(redirect, None)
def test_noise_control_traceback(self):
options = self.DEFAULTS
options['quiet'] = True
options['daemonize'] = True
options['traceback'] = True
stdout = sys.stdout
stderr = sys.stderr
redirect = ux.noiseControl(options)
self.assertEqual(sys.stdout.name, stdout.name)
self.assertEqual(sys.stderr.name, stderr.name)
self.assertEqual(redirect, None)
def test_main_with_daemonize(self):
sys.argv = self.args_list + ['-d', '--settings', TEST_SETTINGS]
class Process(object):
def poll(self):
return 0
with patch('time.sleep'):
with patch('subprocess.Popen') as popen:
popen.return_value = Process()
ux.main()
self.assertTrue(popen.called)
self.assertTrue('--settings' in popen.call_args[0][0])
sys.argv = []
def test_options_structure(self):
"""
A test to ensure that HendrixDeploy.options also has the complete set
of options available
"""
deploy = self.wsgiDeploy()
expected_keys = self.DEFAULTS.keys()
actual_keys = deploy.options.keys()
self.assertListEqual(expected_keys, actual_keys)
| StarcoderdataPython |
3392230 | <reponame>cubrink/doltpy
from doltcli import Dolt, Commit, DoltException, DoltHubContext # type: ignore
| StarcoderdataPython |
3384710 | # Generated by Django 3.1.2 on 2020-10-08 18:43
import cropperjs.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('recipe', '0002_auto_20201008_1340'),
]
operations = [
migrations.AlterField(
model_name='recipe',
name='image',
field=cropperjs.models.CropperImageField(null=True, upload_to=''),
),
]
| StarcoderdataPython |
1618078 | <filename>tests/test_round_index.py
# -*- coding: utf-8 -*-
import shlex
import subprocess
from unittest import TestCase
import pandas
from pandas.testing import assert_frame_equal
from tstoolbox import tstoolbox, tsutils
class Testround_index(TestCase):
def setUp(self):
dr = pandas.date_range("2000-01-01", periods=2, freq="D")
ts = pandas.Series([4.5, 4.6], index=dr)
self.round_index_direct = pandas.DataFrame(ts, columns=["Value"])
self.round_index_direct.index.name = "Datetime"
self.round_index_direct = tsutils.memory_optimize(self.round_index_direct)
self.round_index_multiple_direct = pandas.DataFrame(ts, columns=["Value"])
self.round_index_multiple_direct = pandas.concat(
[self.round_index_multiple_direct, pandas.Series(ts, name="Value_r")],
axis="columns",
)
self.round_index_multiple_direct.index.name = "Datetime"
self.round_index_multiple_direct = tsutils.memory_optimize(
self.round_index_multiple_direct
)
self.round_index_cli = b"""Datetime,Value
2000-01-01,4.5
2000-01-02,4.6
"""
self.round_index_multiple_cli = b"""Datetime,Value,Value_r
2000-01-01,4.5,4.5
2000-01-02,4.6,4.6
"""
self.round_index_tsstep_2_daily_cli = b"""Datetime,Value,Value1
2000-01-01,4.5,45.6
2000-01-03,4.7,34.2
2000-01-05,4.5,7.2
"""
self.round_index_tsstep_2_daily = pandas.DataFrame(
[[4.5, 45.6], [4.7, 34.2], [4.5, 7.2]],
columns=["Value", "Value1"],
index=pandas.DatetimeIndex(["2000-01-01", "2000-01-03", "2000-01-05"]),
)
self.round_index_tsstep_2_daily = tsutils.memory_optimize(
self.round_index_tsstep_2_daily
)
self.round_index_tsstep_2_daily.index.name = "Datetime"
self.round_index_blanks = b"""Datetime,Value_mean,Unnamed: 2_mean,Unnamed: 3_mean,Unnamed: 4_mean,Unnamed: 5_mean,Unnamed: 6_mean,Unnamed: 7_mean,Unnamed: 8_mean,Unnamed: 9_mean
2000-01-01,2.46667,,,,,,,,
2000-01-02,3.4,,,,,,,,
"""
def test_round_index_direct(self):
"""Test round_index API for single column - daily."""
out = tstoolbox.read("tests/data_simple.csv", round_index="D")
assert_frame_equal(out, self.round_index_direct)
def test_round_index_mulitple_direct(self):
"""Test round_index API for multiple columns - daily."""
out = tstoolbox.read(
"tests/data_simple.csv tests/data_simple.csv",
append="columns",
round_index="D",
)
assert_frame_equal(out, self.round_index_multiple_direct)
def test_round_index_bi_monthly(self):
"""Test round_index API for bi monthly time series."""
out = tstoolbox.read("tests/data_bi_daily.csv", round_index="D")
assert_frame_equal(out, self.round_index_tsstep_2_daily)
def test_round_index_cli(self):
"""Test round_index CLI for single column - daily."""
args = 'tstoolbox read --round_index="D" tests/data_simple.csv'
args = shlex.split(args)
out = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()
self.assertEqual(out[0], self.round_index_cli)
def test_round_index_multiple_cli(self):
"""Test round_index CLI for multiple columns - daily."""
args = 'tstoolbox read --round_index="D" tests/data_simple.csv tests/data_simple.csv'
args = shlex.split(args)
out = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()
self.assertEqual(out[0], self.round_index_multiple_cli)
def test_round_index_bi_monthly_cli(self):
"""Test round_index CLI for bi monthly time series."""
args = 'tstoolbox read --round_index="D" tests/data_bi_daily.csv'
args = shlex.split(args)
out = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()
self.assertEqual(out[0], self.round_index_tsstep_2_daily_cli)
| StarcoderdataPython |
1796950 | """
Orange Canvas Graphics Items
"""
from .nodeitem import NodeItem, NodeAnchorItem, NodeBodyItem, SHADOW_COLOR
from .nodeitem import SourceAnchorItem, SinkAnchorItem, AnchorPoint
from .linkitem import LinkItem, LinkCurveItem
from .annotationitem import TextAnnotation, ArrowAnnotation
| StarcoderdataPython |
1631138 | #!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import argparse
import os
import os.path as osp
from chainer import cuda
import chainer.serializers as S
from chainer import Variable
import numpy as np
from scipy.misc import imread
from scipy.misc import imsave
import fcn
from fcn.models import FCN16s
from fcn.models import FCN32s
from fcn.models import FCN8s
class Forwarding(object):
def __init__(self, gpu, chainermodel=None):
self.gpu = gpu
self.target_names = fcn.pascal.SegmentationClassDataset.target_names
self.n_class = len(self.target_names)
if chainermodel is None:
chainermodel = osp.join(fcn.data_dir,
'fcn8s_from_caffe.chainermodel')
self.model_name = 'fcn8s'
self.model = FCN8s(n_class=self.n_class)
elif osp.basename(chainermodel).startswith('fcn8s'):
self.model_name = 'fcn8s'
self.model = FCN8s(n_class=self.n_class)
elif osp.basename(chainermodel).startswith('fcn16s'):
self.model_name = 'fcn16s'
self.model = FCN16s(n_class=self.n_class)
elif osp.basename(chainermodel).startswith('fcn32s'):
self.model_name = 'fcn32s'
self.model = FCN32s(n_class=self.n_class)
else:
raise ValueError(
'Chainer model filename must start with fcn8s, '
'fcn16s or fcn32s: {0}'.format(osp.basename(chainermodel)))
S.load_hdf5(chainermodel, self.model)
if self.gpu != -1:
self.model.to_gpu(self.gpu)
def forward_img_file(self, img_file):
print('{0}:'.format(osp.realpath(img_file)))
# setup image
img = imread(img_file, mode='RGB')
img, resizing_scale = fcn.util.resize_img_with_max_size(img)
print(' - resizing_scale: {0}'.format(resizing_scale))
# setup input datum
datum = fcn.pascal.SegmentationClassDataset.img_to_datum(img.copy())
x_data = np.array([datum], dtype=np.float32)
if self.gpu != -1:
x_data = cuda.to_gpu(x_data, device=self.gpu)
x = Variable(x_data, volatile=False)
# forward
self.model.train = False
self.model(x)
pred = self.model.score
# generate computational_graph
psfile = osp.join(
fcn.data_dir, '{0}_forward.ps'.format(self.model_name))
if not osp.exists(psfile):
fcn.util.draw_computational_graph([pred], output=psfile)
print('- computational_graph: {0}'.format(psfile))
pred_datum = cuda.to_cpu(pred.data)[0]
label = np.argmax(pred_datum, axis=0)
return img, label
def visualize_label(self, img, label):
# visualize result
unique_labels, label_counts = np.unique(label, return_counts=True)
print('- labels:')
label_titles = {}
for label_value, label_count in zip(unique_labels, label_counts):
label_region = label_count / label.size
if label_region < 0.001:
continue
title = '{0}:{1} = {2:.1%}'.format(
label_value, self.target_names[label_value], label_region)
label_titles[label_value] = title
print(' - {0}'.format(title))
result_img = fcn.util.draw_label(
label, img, n_class=self.n_class, label_titles=label_titles)
# save result
height, width = img.shape[:2]
if height > width:
vline = np.ones((height, 3, 3), dtype=np.uint8) * 255
out_img = np.hstack((img, vline, result_img))
else:
hline = np.ones((3, width, 3), dtype=np.uint8) * 255
out_img = np.vstack((img, hline, result_img))
return out_img
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default=0, type=int,
help='if -1, use cpu only')
parser.add_argument('-c', '--chainermodel')
parser.add_argument('-i', '--img-files', nargs='+', required=True)
args = parser.parse_args()
img_files = args.img_files
gpu = args.gpu
chainermodel = args.chainermodel
save_dir = osp.join(fcn.data_dir, 'forward_out')
if not osp.exists(save_dir):
os.makedirs(save_dir)
forwarding = Forwarding(gpu, chainermodel)
for img_file in img_files:
img, label = forwarding.forward_img_file(img_file)
out_img = forwarding.visualize_label(img, label)
out_file = osp.join(save_dir, osp.basename(img_file))
imsave(out_file, out_img)
print('- out_file: {0}'.format(out_file))
if __name__ == '__main__':
main()
| StarcoderdataPython |
3223295 | # (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import pytest
from datadog_checks.nginx import Nginx
from . import common
@pytest.mark.e2e
@pytest.mark.skipif(common.USING_VTS, reason="Non-VTS test")
def test_e2e(dd_agent_check, instance):
aggregator = dd_agent_check(instance, rate=True)
aggregator.assert_metric('nginx.net.writing', count=2, tags=common.TAGS)
aggregator.assert_metric('nginx.net.waiting', count=2, tags=common.TAGS)
aggregator.assert_metric('nginx.net.reading', count=2, tags=common.TAGS)
aggregator.assert_metric('nginx.net.conn_dropped_per_s', count=1, tags=common.TAGS)
aggregator.assert_metric('nginx.net.conn_opened_per_s', count=1, tags=common.TAGS)
aggregator.assert_metric('nginx.net.request_per_s', count=1, tags=common.TAGS)
aggregator.assert_metric('nginx.net.connections', count=2, tags=common.TAGS)
aggregator.assert_all_metrics_covered()
tags = common.TAGS + [
'nginx_host:{}'.format(common.HOST),
'port:{}'.format(common.PORT),
]
aggregator.assert_service_check('nginx.can_connect', status=Nginx.OK, tags=tags)
@pytest.mark.e2e
@pytest.mark.skipif(not common.USING_VTS, reason="VTS test")
def test_e2e_vts(dd_agent_check, instance_vts):
aggregator = dd_agent_check(instance_vts, rate=True)
aggregator.assert_metric('nginx.net.writing', count=2, tags=common.TAGS)
aggregator.assert_metric('nginx.net.waiting', count=2, tags=common.TAGS)
aggregator.assert_metric('nginx.net.reading', count=2, tags=common.TAGS)
aggregator.assert_metric('nginx.net.conn_dropped_per_s', count=1, tags=common.TAGS)
aggregator.assert_metric('nginx.net.conn_opened_per_s', count=1, tags=common.TAGS)
aggregator.assert_metric('nginx.net.request_per_s', count=1, tags=common.TAGS)
tags_server_zone = common.TAGS + ['server_zone:*']
aggregator.assert_metric('nginx.connections.active', count=2, tags=common.TAGS)
aggregator.assert_metric('nginx.server_zone.sent', count=2, tags=tags_server_zone)
aggregator.assert_metric('nginx.server_zone.sent_count', count=1, tags=tags_server_zone)
aggregator.assert_metric('nginx.server_zone.received', count=2, tags=tags_server_zone)
aggregator.assert_metric('nginx.server_zone.received_count', count=1, tags=tags_server_zone)
aggregator.assert_metric('nginx.requests.total_count', count=1, tags=common.TAGS)
aggregator.assert_metric('nginx.requests.total', count=2, tags=common.TAGS)
aggregator.assert_metric('nginx.timestamp', count=2, tags=common.TAGS)
aggregator.assert_metric('nginx.server_zone.requests_count', count=1, tags=tags_server_zone)
aggregator.assert_metric('nginx.load_timestamp', count=2, tags=common.TAGS)
aggregator.assert_metric('nginx.server_zone.requests', count=2, tags=tags_server_zone)
aggregator.assert_metric('nginx.connections.accepted', count=2, tags=common.TAGS)
aggregator.assert_metric('nginx.connections.accepted_count', count=1, tags=common.TAGS)
aggregator.assert_metric('nginx.server_zone.responses.1xx_count', count=1, tags=tags_server_zone)
aggregator.assert_metric('nginx.server_zone.responses.2xx_count', count=1, tags=tags_server_zone)
aggregator.assert_metric('nginx.server_zone.responses.3xx_count', count=1, tags=tags_server_zone)
aggregator.assert_metric('nginx.server_zone.responses.4xx_count', count=1, tags=tags_server_zone)
aggregator.assert_metric('nginx.server_zone.responses.5xx_count', count=1, tags=tags_server_zone)
aggregator.assert_metric('nginx.server_zone.responses.1xx', count=2, tags=tags_server_zone)
aggregator.assert_metric('nginx.server_zone.responses.2xx', count=2, tags=tags_server_zone)
aggregator.assert_metric('nginx.server_zone.responses.3xx', count=2, tags=tags_server_zone)
aggregator.assert_metric('nginx.server_zone.responses.4xx', count=2, tags=tags_server_zone)
aggregator.assert_metric('nginx.server_zone.responses.5xx', count=2, tags=tags_server_zone)
aggregator.assert_all_metrics_covered()
tags = common.TAGS + [
'nginx_host:{}'.format(common.HOST),
'port:{}'.format(common.PORT),
]
aggregator.assert_service_check('nginx.can_connect', status=Nginx.OK, tags=tags)
| StarcoderdataPython |
1625822 | <filename>aiocouch/couchdb.py
# Copyright (c) 2019, ZIH,
# Technische Universitaet Dresden,
# Federal Republic of Germany
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of metricq nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from types import TracebackType
from typing import Any, Dict, List, Optional, Type
from .database import Database
from .exception import NotFoundError, PreconditionFailedError
from .remote import RemoteServer
JsonDict = Dict[str, Any]
class CouchDB:
"""CouchDB Server Connection Session
The
:param str server: URL of the CouchDB server
:param str user: user used for authentication
:param str password: <PASSWORD>
:param str cookie: The session cookie used for authentication
:param Any kwargs: Any other kwargs are passed to :class:`aiohttp.ClientSession`
"""
def __init__(self, *args: Any, **kwargs: Any):
self._server = RemoteServer(*args, **kwargs)
async def __aenter__(self) -> "CouchDB":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
await self.close()
async def check_credentials(self) -> None:
"""Check the provided credentials.
:raises ~aiocouch.UnauthorizedError: if provided credentials aren't valid
"""
await self._server._check_session()
async def close(self) -> None:
"""Closes the connection to the CouchDB server"""
await self._server.close()
async def create(
self, id: str, exists_ok: bool = False, **kwargs: Any
) -> "Database":
"""Creates a new database on the server
:raises ~aiocouch.PreconditionFailedError: if the database already
exists and ``exists_ok`` is ``False``
:param id: the identifier of the database
:param exists_ok: If ``True``, don't raise if the database exists
:return: Returns a representation for the created database
"""
db = Database(self, id)
try:
await db._put(**kwargs)
except PreconditionFailedError as e:
if not exists_ok:
raise e
return db
async def __getitem__(self, id: str) -> "Database":
"""Returns a representation for the given database identifier
:raises ~aiocouch.NotFoundError: if the database does not exist
:param id: The identifier of the database
:return: The representation of the database
"""
db = Database(self, id)
if not await db._exists():
raise NotFoundError(f"The database '{id}' does not exist.")
return db
async def keys(self, **params: Any) -> List[str]:
"""Returns all database names
:return: A list containing the names of all databases on the server
"""
return await self._server._all_dbs(**params)
async def info(self) -> JsonDict:
"""Returns the meta information about the connected CouchDB server.
See also :ref:`GET /<couchdb:api/server/root>`
:return: A dict containing the response json.
"""
return await self._server._info()
| StarcoderdataPython |
1745785 | <filename>huygens/interf/__init__.py
'''
===================================
Interfacing with Compiled Libraries
===================================
Functions
=========
c_vector -- Produce a ctype's object to pass as a C pointer (a vector)
c_matrix -- Produce a ctype's object to pass as a C pointer to a pointer (a matrix)
'''
from .interf import *
__all__=[s for s in dir() if not s.startswith('_')]
| StarcoderdataPython |
1724202 | from nose.tools import assert_equal
from networkx import asyn_lpa, Graph
def test_empty_graph():
# empty graph
test = Graph()
# ground truth
ground_truth = set()
communities = asyn_lpa.asyn_lpa_communities(test)
result = {frozenset(c) for c in communities}
assert_equal(result, ground_truth)
def test_single_node():
test = Graph()
test.add_node('a')
# ground truth
ground_truth = set([frozenset(['a'])])
communities = asyn_lpa.asyn_lpa_communities(test)
result = {frozenset(c) for c in communities}
assert_equal(result, ground_truth)
def test_simple_communities():
test = Graph()
# c1
test.add_edge('a', 'b')
test.add_edge('a', 'c')
test.add_edge('b', 'c')
# c2
test.add_edge('d', 'e')
test.add_edge('d', 'f')
test.add_edge('f', 'e')
# ground truth
ground_truth = set([frozenset(['a', 'c', 'b']),
frozenset(['e', 'd', 'f'])])
communities = asyn_lpa.asyn_lpa_communities(test)
result = {frozenset(c) for c in communities}
assert_equal(result, ground_truth)
def test_several_communities():
test = Graph()
# c1
test.add_edge('1a', '1b')
test.add_edge('1a', '1c')
test.add_edge('1b', '1c')
# c2
test.add_edge('2a', '2b')
test.add_edge('2a', '2c')
test.add_edge('2b', '2c')
# c3
test.add_edge('3a', '3b')
test.add_edge('3a', '3c')
test.add_edge('3b', '3c')
# c4
test.add_edge('4a', '4b')
test.add_edge('4a', '4c')
test.add_edge('4b', '4c')
# c5
test.add_edge('5a', '5b')
test.add_edge('5a', '5c')
test.add_edge('5b', '5c')
# ground truth
ground_truth = set([frozenset(['1a', '1c', '1b']),
frozenset(['2a', '2c', '2b']),
frozenset(['3a', '3c', '3b']),
frozenset(['4a', '4c', '4b']),
frozenset(['5a', '5c', '5b'])])
communities = asyn_lpa.asyn_lpa_communities(test)
result = {frozenset(c) for c in communities}
assert_equal(result, ground_truth)
| StarcoderdataPython |
1792435 |
import os
import sys
import fnmatch
from setuptools import setup, find_packages
# For version info
import metrilyx
def fileListBuilder(dirPath, regexp='*'):
matches = []
for root, dirnames, filenames in os.walk(dirPath):
for filename in fnmatch.filter(filenames, regexp):
matches.append(os.path.join(root, filename))
return matches
DESCRIPTION = "Metrilyx data delivery server"
LONG_DESCRIPTION = '''
Metrilyx dataserver is the core to metrilyx and is responsible for analyzing and
delivering data to the client.
'''
INSTALL_REQUIRES = [ p.strip() for p in open('requirements.txt').read().split('\n')
if p != '' and not p.startswith('#') ]
DATA_FILES = [
('/opt/metrilyx/etc/init.d', fileListBuilder('etc/init.d')),
('/opt/metrilyx/bin/', ['metrilyx-dataserver.py']),
]
AUTHOR = "euforia"
AUTHOR_EMAIL = "<EMAIL>"
setup(
name='metrilyx-dataserver',
version=metrilyx.version,
url='https://github.com/metrilyx/metrilyx-dataserver',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license='Apache',
install_requires=INSTALL_REQUIRES,
data_files=DATA_FILES,
dependency_links=['git+https://github.com/metrilyx/opentsdb-pandas.git#egg=opentsdb_pandas-0.0.1'],
packages=find_packages()
)
| StarcoderdataPython |
1714775 | <filename>Course I/Алгоритмы Python/Part2/семинары/pract5/защита/main.py
import time
import numpy as np
from computer_module import ComputerGameClass
from elements_module import BoardClass
from user_module import UserAnalyserClass
from util_module import UtilClass
# Белые - это синие
# Черные - это красные
class JournalWriterClass:
"""Класс для записи данных о ходах каждого участника и формирования отчетов"""
def __init__(self, user_color="white"):
self.journal_arr = np.array([])
self.bufstr = ""
def add_result(self, d):
"""Метод для добавления данных хода"""
journal = self.journal_arr
if self.bufstr == "" and d["user_color"] == "black":
raise ValueError("Белые всегда начинают ходить первые")
movementtype_dict = {"war": ":", "peace": "-"}
locale_str = (
d["from"]["y"].lower()
+ str(d["from"]["x"] + 1)
+ movementtype_dict[d["mode"]]
+ d["to"]["y"].lower()
+ str(d["to"]["x"] + 1)
)
if self.bufstr == "":
self.bufstr = locale_str
else:
self.journal_arr = np.append(journal, self.bufstr + " " + locale_str)
self.bufstr = ""
def winlose_add(self, win_color):
"""Добавление результатов игры"""
# Если у черных нет больше ходов и игра завершена (т.е. у кого-то больше нет вариантов, как пойти)
journal = self.journal_arr
if self.bufstr != "":
self.journal_arr = np.append(journal, self.bufstr + "X")
# Иначе работаем с последним элементом массива
else:
e = journal[-1]
white, black = e.split(" ")
if win_color == "white":
white += "X"
else:
black += "X"
self.journal_arr[-1] = white + " " + black
def __str__(self):
"""Информация о записанном"""
return "\033[93mЗапись ходов:\033[0m\n" + "\n".join(
[
str(i + 1) + ". " + self.journal_arr[i]
for i in np.arange(self.journal_arr.shape[0])
]
)
class GameOverClass:
"""Класс определения окончания игры"""
def __init__(self, board_obj, user_color):
self.result = False
self.won_color = ""
self.user_color = user_color
self.board_obj = board_obj
# На одной итерации может сработать только один из этих методов (не путать с логикой работы UserAnalyserClass)
self.queen_detector()
self.nofigures_detector()
self.deadlock_detector()
def queen_detector(self):
"""Определение прохода шашки одного из игроков в дамки"""
board = self.board_obj.board
uc = self.user_color
reverse_uc = "black" if uc == "white" else "white"
for i in np.arange(board.shape[1]):
if not board[0][i].isfree() and board[0][i].figure_obj.color == reverse_uc:
self.result = True
self.won_color = reverse_uc
print("\033[93m[ИНФО]\033[0m Шашка прошла в дамки")
break
for i in np.arange(board.shape[1]):
if not board[7][i].isfree() and board[7][i].figure_obj.color == uc:
self.result = True
self.won_color = uc
print("\033[93m[ИНФО]\033[0m Шашка прошла в дамки")
break
def nofigures_detector(self):
"""Определение того, что у одного из игроков больше нет фигур"""
board = self.board_obj.board
black_count, white_count = 0, 0
for i in np.arange(board.shape[0]):
for j in np.arange(board.shape[1]):
if not board[i][j].isfree() and board[i][j].figure_obj.color == "black":
black_count += 1
elif (
not board[i][j].isfree() and board[i][j].figure_obj.color == "white"
):
white_count += 1
if white_count == 0:
self.result = True
self.won_color = "black"
print("\033[93m[ИНФО]\033[0m Белых шашек больше нет на поле")
if black_count == 0:
self.result = True
self.won_color = "white"
print("\033[93m[ИНФО]\033[0m Черных шашек больше нет на поле")
def deadlock_detector(self):
"""
Определение тупиковой ситуации для пользователя
Использует логику, аналогичную рандомному ходу компьютера
"""
board_obj = self.board_obj
board = board_obj.board
uc = self.user_color
reverse_uc = "black" if uc == "white" else "white"
all_d = np.array([])
myfields_arr = np.array([])
# Ищем все фигуры пользователя
for i in np.arange(board.shape[0]):
for j in np.arange(board.shape[1]):
if not board[i][j].isfree() and board[i][j].figure_obj.color == uc:
myfields_arr = np.append(myfields_arr, board[i][j])
# Для каждой шашки формируем возможные новые координаты:
for field in myfields_arr:
x, y = field.figure_obj.coord_x, field.figure_obj.coord_y
y_char = UtilClass.xint2char(y)
# Возможные короткие шаги
# [x+1,y-1]
if board_obj.detect_element(y - 1, x + 1):
new_y, new_x = UtilClass.xint2char(y - 1), x + 1
all_d = np.append(
all_d,
{
"from": {"x": x, "y": y_char},
"to": {"x": new_x, "y": new_y},
"mode": "peace",
"user_color": uc,
},
)
# [x+1,y+1]
if board_obj.detect_element(y + 1, x + 1):
new_y, new_x = UtilClass.xint2char(y + 1), x + 1
all_d = np.append(
all_d,
{
"from": {"x": x, "y": y_char},
"to": {"x": new_x, "y": new_y},
"mode": "peace",
"user_color": uc,
},
)
# Длинные шаги
# [x+2,y+2]
if board_obj.detect_element(y + 2, x + 2):
new_y, new_x = UtilClass.xint2char(y + 2), x + 2
all_d = np.append(
all_d,
{
"from": {"x": x, "y": y_char},
"to": {"x": new_x, "y": new_y},
"mode": "war",
"user_color": uc,
},
)
# [x+2,y-2]
if board_obj.detect_element(y - 2, x + 2):
new_y, new_x = UtilClass.xint2char(y - 2), x + 2
all_d = np.append(
all_d,
{
"from": {"x": x, "y": y_char},
"to": {"x": new_x, "y": new_y},
"mode": "war",
"user_color": uc,
},
)
# Перебираем все возможные ходы пользователя
for d in all_d:
obj = UserAnalyserClass(d, self.board_obj)
if obj.boolean_result:
break
else:
self.result = True
self.won_color = reverse_uc
print("\033[91m[ИНФО]\033[0m У пользователя тупиковая ситуация")
class MainClass:
"""Управляющий класс с логикой игры"""
def __init__(self):
# Создаем доску
user_color = input(
"Выберите цвет шашек:\n1. Белый (по умолчанию)\n2. Черный\n-> "
)
self.user_color = "black" if user_color == "2" else "white"
generator_mode = input(
"Введите способ генерации шашек на доске:\n1. Ручная расстановка, 6 фигур (по умолчанию)\n2. Стандартная авторасстановка, 12 фигур\n-> "
)
board_obj = (
BoardClass(2, self.user_color)
if generator_mode == "2"
else BoardClass(1, self.user_color)
)
print(board_obj)
# board_obj.board[3][3].figure_obj = FigureClass("TEST", 3, 3)
self.board_obj = board_obj
self.gameprocess()
def command_parser(self, cmd):
"""
Осуществление парсинга и фильтрации команды, которую ввел пользователь
Если все хорошо - вызывается проверка на уровне
"""
movement_type_dict = {":": "war", "-": "peace"}
# Разделитель строки на 2 части
spliter = ""
detect_flag = False
for key in movement_type_dict.keys():
if key in cmd:
detect_flag = True
spliter = key
break
if not detect_flag:
return {}
command_dict = {
"from": {},
"to": {},
"mode": movement_type_dict[spliter],
"user_color": self.user_color,
}
# Разделяем введенную команду на 2 части
part1, part2 = cmd.split(spliter)
if UtilClass.checkxy_value(part1) and UtilClass.checkxy_value(part2):
command_dict["from"]["x"] = int(part1[1]) - 1
command_dict["from"]["y"] = part1[0]
command_dict["to"]["x"] = int(part2[1]) - 1
command_dict["to"]["y"] = part2[0]
return command_dict
return {}
def gameprocess(self):
"""Управляющая логика работы игры"""
userstepcolor_dict = {"black": 1, "white": 0}
usercolor_dict = {"black": "Черный", "white": "Белый"}
user_color = self.user_color
userstep = userstepcolor_dict[user_color]
# Номер итерации
i = 0
won_color = ""
print("\033[93m*Игра началась*\033[0m")
journal = JournalWriterClass()
while True:
# Проверяем на окончание игры
obj = GameOverClass(self.board_obj, user_color)
if obj.result:
won_color = obj.won_color
print("Выиграл цвет: {}".format(usercolor_dict[obj.won_color]))
break
# Ходит пользователь
if i % 2 == userstep:
print("Ход №{}. Ходит пользователь..".format(i + 1))
cmd = input("Введите команду -> ")
result_dict = self.command_parser(cmd)
# Если нормально прошло фильтрацию
if result_dict != {}:
self.result_dict = result_dict
# Проверка на все критерии
obj = UserAnalyserClass(result_dict, self.board_obj, True)
# Если все хорошо, то осуществлем ход
if obj.boolean_result:
self.result_dict = obj.command_dict
# Пользователь ходит
self.user_mode()
journal.add_result(obj.command_dict)
i += 1
else:
print("\033[91m[Ошибка]\033[0m Некорректный ход")
else:
print(
"\033[91m[Ошибка]\033[0m Некорректный ввод данных. Пример: 'c3:e5' - перемещение с боем, 'c3-b4' - тихое перемещение"
)
# Компьютер ходит
else:
print("Ход №{}. Ходит компьютер..".format(i + 1))
time.sleep(3)
computergame_obj = ComputerGameClass(self.board_obj, user_color)
d = computergame_obj.result_dict
print(
"{} -> {}".format(
UtilClass.getfail_coords(d["from"]),
UtilClass.getfail_coords(d["to"]),
)
)
# Если тупиковый ход со стороны компьютера
if not computergame_obj.result:
won_color = user_color
print("\033[91m[ИНФО]\033[0m У компьютера тупиковая ситуация")
print("Выиграл цвет: {}".format(usercolor_dict[user_color]))
break
journal.add_result(d)
i += 1
# Вывод доски
print(self.board_obj)
# Добавление окончания
journal.winlose_add(won_color)
print(journal)
def user_mode(self):
"""
Осуществление хода пользователем
"""
d = self.result_dict
board = self.board_obj.board
mode = d["mode"]
f1 = [d["from"]["x"], UtilClass.char2xint(d["from"]["y"])]
f2 = [d["to"]["x"], UtilClass.char2xint(d["to"]["y"])]
x1, y1 = f1
x2, y2 = f2
field_from = board[x1][y1]
field_to = board[x2][y2]
# Получаем объект фигуры с ячейки и выставлем для него обновленные координаты
figure_obj = field_from.figure_obj
figure_obj.coord_x, figure_obj.coord_y = f2
# Присваиваем фигуру обновленной ячейке
field_to.field_reserve(figure_obj)
# Освобождаем из старой
field_from.field_free()
# Если мы кого-то бъём, то удаляем фигуру с той ячейки
if mode == "war":
attack_x, attack_y = d["enemy"]["x"], d["enemy"]["y"]
board[attack_x][attack_y].field_free()
self.board_obj.board = board
if __name__ == "__main__":
MainClass()
| StarcoderdataPython |
56991 | class Solution(object):
def XXX(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
def backtrace(nums,track):
if len(nums) == len(track):
res.append(track[:])
return
for i in range(len(nums)):
if nums[i] in track:
continue
track.append(nums[i])
backtrace(nums,track)
track.remove(track[-1])
track = []
res = []
backtrace(nums,track)
return res
| StarcoderdataPython |
164949 | <reponame>Sheidaas/gamee<gh_stars>0
class GuiAbstractObject:
def is_clicked(self, mouse):
area = (self.position[0] * self.screen.engine.settings.graphic['screen']['resolution_scale'][0],
self.position[1] * self.screen.engine.settings.graphic['screen']['resolution_scale'][1],
(self.position[0] + self.position[2]) * self.screen.engine.settings.graphic['screen']['resolution_scale'][0],
(self.position[1] + self.position[3]) * self.screen.engine.settings.graphic['screen']['resolution_scale'][1] )
if mouse[0][0] >= area[0] and mouse[0][0] <= area[2] \
and mouse[0][1] >= area[1] and mouse[0][1] <= area[3]:
return True
return False
def clicked(self, mouse):
print('clicked')
| StarcoderdataPython |
3397975 | from dater.dateset import DataSet
import numpy as np
class DynMemNetDataSet(DataSet):
def __init__(self):
pass
def load_train(self):
pass
def load_dev(self):
pass
def load_train_dev(self):
pass
def load_test(self):
pass
def set_predict_params(self):
self.batch_size = 8
self.story_length = 3
self.sequence_length = 10
pass
def load_predict(self):
pass
def get_train_batch(self):
pass
def get_dev(self):
pass
def get_test(self):
pass
def get_predict(self):
self.story = np.random.randn(self.batch_size, self.story_length, self.sequence_length)
self.story[self.story > 0] = 1
self.story[self.story <= 0] = 0
self.query = np.random.randn(self.batch_size, self.sequence_length)
self.query[self.query > 0] = 1
self.query[self.query <= 0] = 0
self.answer_single = np.sum(self.query, axis=1) + np.round(0.1 * np.sum(np.sum(self.story, axis=1), axis=1))
pass
| StarcoderdataPython |
1787587 | # The following code has been modified from that provided at github.com/piborg/diablo
# by <NAME>
#!/usr/bin/env python
###
#
# diabloSequence.py: A script for controlling motors with the Diablo in a sequence.
#
# 2019-04-26
#
###
# Import library functions we need
from __future__ import print_function
from diablo import *
from time import sleep
from sys import exit
import RPi.GPIO as GPIO # Fetch the GPIO library and initialize the object.
GPIO.setmode(GPIO.BCM)
# Set up pins 9 and 10 for input.
GPIO.setup(24, GPIO.IN)
GPIO.setup(23, GPIO.IN)
# Set up the Diablo
DIABLO = Diablo() # Create a new Diablo object
DIABLO.Init() # Set the board up (checks the board is connected)
if not DIABLO.foundChip:
boards = ScanForDiablo()
if len(boards) == 0:
print('No Diablo found, check you are attached :)')
else:
print('No Diablo at address %02X, but we did find boards:' % (DIABLO.i2cAddress))
for board in boards:
print(' %02X (%d)' % (board, board))
print('If you need to change the I2C address change the set-up line so it is correct, e.g.')
print('DIABLO.i2cAddress = 0x%02X' % (boards[0]))
exit()
#DIABLO.SetEpoIgnore(True) # Uncomment to disable EPO latch, needed if you do not have a switch / jumper
DIABLO.ResetEpo() # Reset the stop switch (EPO) state
# if you do not have a switch across the two pin header then fit the jumper
# Loop over the sequence until the user presses CTRL+C
print ('Press CTRL+C to cancel manual motor control.')
try:
while True:
if (not(GPIO.input(23))):
DIABLO.SetMotor1(0.0) # If the motor on/off switch is set to off, do not operate the motor.
else:
if (GPIO.input(24)):
DIABLO.SetMotor1(+1.0) # Set the motor to go "forward" if switch is on.
else:
DIABLO.SetMotor1(-1.0) # Set the motor to go "backward" if switch is off.
except KeyboardInterrupt:
# User has pressed CTRL+C
DIABLO.MotorsOff() # Turn both motors off
print ('Done')
| StarcoderdataPython |
4828506 | # Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for EventHandler class."""
from typing import Callable
from typing import List
from typing import Optional
from typing import Text
from typing import Tuple
from typing import TYPE_CHECKING
from .event import Event
from .some_actions_type import SomeActionsType
if TYPE_CHECKING:
from .launch_context import LaunchContext # noqa: F401
class BaseEventHandler:
"""
Base class for event handlers, which handle events in the launch system.
Entities yielded by the event handler can access the event being handled
via the context's locals, e.g. `context.locals.event`
As another example, getting the name of the event as a Substitution:
`launch.substitutions.LocalSubstitution('event.name')`.
"""
def __init__(self, *, matcher: Callable[[Event], bool], handle_once: bool = False):
"""
Create a BaseEventHandler.
:param: matcher is a callable that takes an event and returns True if
the event should be handled by this event handler, False otherwise.
:param: handle_once is a flag that, if True, unregisters this EventHandler
after being handled once.
"""
self.__matcher = matcher
self.__handle_once = handle_once
@property
def handle_once(self):
"""Getter for handle_once flag."""
return self.__handle_once
@property
def handler_description(self):
"""
Return the string description of the handler.
This should be overridden.
"""
return None
@property
def matcher_description(self):
"""
Return the string description of the matcher.
This should be overridden.
"""
return None
def matches(self, event: Event) -> bool:
"""Return True if the given event should be handled by this event handler."""
return self.__matcher(event)
def describe(self) -> Tuple[Text, List[SomeActionsType]]:
"""Return the description list with 0 as a string, and then LaunchDescriptionEntity's."""
return (
"{}(matcher='{}', handler='{}', handle_once={})".format(
type(self).__name__,
self.matcher_description,
self.handler_description,
self.handle_once
),
[]
)
def handle(self, event: Event, context: 'LaunchContext') -> Optional[SomeActionsType]:
"""
Handle the given event.
This implementation should always be called by child classes in order to properly
support common event handler functionality.
"""
context.extend_locals({'event': event})
if self.handle_once:
context.unregister_event_handler(self)
class EventHandler(BaseEventHandler):
def __init__(
self,
*,
matcher: Callable[[Event], bool],
entities: Optional[SomeActionsType] = None,
handle_once: bool = False
) -> None:
"""
Create an EventHandler.
:param: matcher is a callable that takes an event and returns True if
the event should be handled by this event handler, False otherwise.
:param: entities is an LaunchDescriptionEntity or list of them, and is
returned by handle() unconditionally if matcher returns True.
:param: handle_once is a flag that, if True, unregisters this EventHandler
after being handled once.
"""
super().__init__(matcher=matcher, handle_once=handle_once)
self.__entities = entities
@property
def entities(self):
"""Getter for entities."""
return self.__entities
def describe(self) -> Tuple[Text, List[SomeActionsType]]:
"""Return the description list with 0 as a string, and then LaunchDescriptionEntity's."""
text, actions = super().describe()
if self.entities:
actions.extend(self.entities)
return (text, actions)
def handle(self, event: Event, context: 'LaunchContext') -> Optional[SomeActionsType]:
"""Handle the given event."""
super().handle(event, context)
return self.entities
| StarcoderdataPython |
122997 | <filename>enter.py<gh_stars>0
from IBI_raspisan import get_updates, handle_updates
import time
updates = get_updates()
while True:
new_updates = get_updates()
if len(new_updates['result']) > len(updates['result']):
handle_updates(new_updates, len(new_updates['result']) - len(updates['result']))
updates = new_updates
elif len(new_updates['result']) == 0:
updates = new_updates
time.sleep(10)
| StarcoderdataPython |
15118 | <reponame>NumanIbnMazid/numanibnmazid.com<filename>backend/utils/management/commands/generate_dummy_skills.py
from portfolios.factories.skill_factory import create_skills_with_factory
from django.db import transaction
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Generates dummy data"
def _generate_dummy_data(self):
# Create dummy data
create_skills_with_factory(
num_of_data=7,
delete_old_data=False
)
@transaction.atomic
def handle(self, *args, **kwargs):
# generate data
self._generate_dummy_data()
| StarcoderdataPython |
3358057 | import home
from ws.handler.event.enum.appliance.light import forced
class Handler(forced.Handler):
KLASS = home.appliance.thermostat.presence.event.forced.event.Event
TEMPLATE = "event/forced_enum.html"
KEEPING = "keeping"
FORCED_KEEPING = "Forced keeping"
ICON_KEEPING = "fas fa-sort"
def _get_str(self, e):
if e == home.appliance.thermostat.presence.event.forced.event.Event.On:
return self.ON
elif e == home.appliance.thermostat.presence.event.forced.event.Event.Off:
return self.OFF
elif e == home.appliance.thermostat.presence.event.forced.event.Event.Keep:
return self.KEEPING
elif e == home.appliance.thermostat.presence.event.forced.event.Event.Not:
return self.NO
return e
def get_description(self, e):
if e == home.appliance.thermostat.presence.event.forced.event.Event.On:
return self.FORCED_ON
elif e == home.appliance.thermostat.presence.event.forced.event.Event.Off:
return self.FORCED_OFF
elif e == home.appliance.thermostat.presence.event.forced.event.Event.Keep:
return self.FORCED_KEEPING
elif e == home.appliance.thermostat.presence.event.forced.event.Event.Not:
return self.FORCED_NOT
return e
def get_icon(self, e):
if e == home.appliance.thermostat.presence.event.forced.event.Event.On:
return self.ICON_UP
elif e == home.appliance.thermostat.presence.event.forced.event.Event.Off:
return self.ICON_DOWN
elif e == home.appliance.thermostat.presence.event.forced.event.Event.Keep:
return self.ICON_KEEPING
elif e == home.appliance.thermostat.presence.event.forced.event.Event.Not:
return self.ICON_OK
return e
| StarcoderdataPython |
3204228 | """Upload script result to Cyberwatch for air gapped scans"""
import os
import argparse
from configparser import ConfigParser
from cbw_api_toolbox.cbw_api import CBWApi
def connect_api():
'''Connect to the API and test connection'''
conf = ConfigParser()
conf.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..', 'api.conf'))
client = CBWApi(conf.get('cyberwatch', 'url'), conf.get(
'cyberwatch', 'api_key'), conf.get('cyberwatch', 'secret_key'))
client.ping()
return client
def upload(client):
"""Upload results from the folder 'Uploads' to Cyberwatch"""
print("INFO: Searching for available results...")
files = ( file for file in sorted(os.listdir(os.path.join(os.path.dirname(__file__), 'Uploads'))) )
for file in files:
file_path = os.path.join(os.path.dirname(__file__), 'Uploads', file)
if os.path.isfile(file_path):
with open(file_path, 'r') as filehandle:
filecontent = filehandle.read()
content = {'output': filecontent , 'groups': 'my_group_1, my_group_2'}
print('INFO: Sending {} content to the API...'.format(file))
client.upload_airgapped_results(content)
def launch_script():
'''Launch script'''
client = connect_api()
upload(client)
print("INFO: Done.")
def main(args=None):
'''Main function'''
parser = argparse.ArgumentParser(
description="Script using Cyberwatch API to upload results from 'Uploads' folder to Cyberwatch")
parser.parse_args(args)
launch_script()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4838879 | # Generated by Django 4.0 on 2022-02-05 13:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0002_bookinstance_borrower_alter_author_date_of_birth_and_more'),
]
operations = [
migrations.AlterModelOptions(
name='bookinstance',
options={'permissions': [('can_mark_returned', 'Set book as returned'), ('can_mark_on_loan', 'Set book as on loan')]},
),
]
| StarcoderdataPython |
3204902 | <filename>tests/test_util.py
import hashlib
from os import makedirs, symlink
from shutil import rmtree
from os.path import join, basename
from unittest.mock import patch
from egcg_core import util
from tests import TestEGCG
fastq_dir = join(TestEGCG.assets_path, 'fastqs')
def test_find_files():
expected = [join(TestEGCG.assets_path, f) for f in ('ftest.txt', 'ftest_2.txt')]
assert util.find_files(TestEGCG.assets_path, 'ftest*.txt') == expected
@patch('logging.Logger.warning')
def test_find_file(mocked_log):
assert util.find_file(TestEGCG.assets_path, 'ftest.txt') == join(TestEGCG.assets_path, 'ftest.txt')
assert util.find_file(TestEGCG.assets_path, 'ftest_.txt') is None
assert util.find_file(TestEGCG.assets_path, 'ftest*.txt') is None
mocked_log.assert_called_with(
'Searched pattern %s for one file, but got %s', (TestEGCG.assets_path, 'ftest*.txt'), 2
)
def test_str_join():
assert util.str_join('this', 'that', 'other', separator='/') == 'this/that/other'
def test_find_fastqs():
fastqs = util.find_fastqs(fastq_dir, '10015AT', '10015AT0001')
for file_name in ('10015AT0001_S6_L004_R1_001.fastq.gz', '10015AT0001_S6_L004_R2_001.fastq.gz',
'10015AT0001_S6_L005_R1_001.fastq.gz', '10015AT0001_S6_L005_R2_001.fastq.gz'):
assert join(fastq_dir, '10015AT', '10015AT0001', file_name) in fastqs
def test_find_fastqs_with_lane():
fastqs = util.find_fastqs(fastq_dir, '10015AT', '10015AT0001', lane=4)
for file_name in ('10015AT0001_S6_L004_R1_001.fastq.gz', '10015AT0001_S6_L004_R2_001.fastq.gz'):
assert join(fastq_dir, '10015AT', '10015AT0001', file_name) in fastqs
def test_find_all_fastqs():
fastqs = util.find_all_fastqs(fastq_dir)
for file_name in ('10015AT0001_S6_L004_R1_001.fastq.gz', '10015AT0001_S6_L004_R2_001.fastq.gz',
'10015AT0002_merged_R1.fastq.gz', '10015AT0002_merged_R2.fastq.gz'):
assert file_name in [basename(f) for f in fastqs]
def test_find_all_fastq_pairs():
observed = util.find_all_fastq_pairs(join(fastq_dir, '10015AT', '10015AT0001'))
expected = [('10015AT0001_S6_L004_R1_001.fastq.gz', '10015AT0001_S6_L004_R2_001.fastq.gz'),
('10015AT0001_S6_L005_R1_001.fastq.gz', '10015AT0001_S6_L005_R2_001.fastq.gz')]
assert [(basename(f), basename(g)) for f, g in observed] == expected
def test_same_fs():
test = join(TestEGCG.assets_path, 'ftest.txt')
test_2 = join(TestEGCG.assets_path, 'ftest_2.txt')
test_nonexistent = join(TestEGCG.assets_path, 'ftest_nonexistent.txt')
assert util.same_fs(test, None) is False
assert util.same_fs(test, test_2)
assert util.same_fs(test, test_nonexistent)
class TestMoveDir(TestEGCG):
@staticmethod
def _create_test_file(f, content='This is a test file'):
with open(f, 'w') as of:
of.write(content)
@staticmethod
def _md5(f):
hash_md5 = hashlib.md5()
with open(f, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def setUp(self):
self.test_dir = join(self.assets_path, 'move_dir')
makedirs(join(self.test_dir, 'from', 'subdir'), exist_ok=True)
self._create_test_file(join(self.test_dir, 'from', 'ftest.txt'))
self._create_test_file(join(self.test_dir, 'from', 'subdir', 'ftest.txt'))
makedirs(join(self.test_dir, 'external'), exist_ok=True)
self._create_test_file(join(self.test_dir, 'external', 'external.txt'), 'External file')
symlink(join(self.test_dir, 'external', 'external.txt'), join(self.test_dir, 'from', 'external_renamed.txt'))
makedirs(join(self.test_dir, 'exists', 'subdir'), exist_ok=True)
self._create_test_file(join(self.test_dir, 'exists', 'subdir', 'ftest.txt'), 'another file')
self._create_test_file(join(self.test_dir, 'exists', 'ftest.txt'), 'another file')
def tearDown(self):
for base in ('to', 'from', 'exists', 'external'):
f = util.find_file(self.test_dir, base)
if f:
rmtree(f)
def test_move_dir(self):
frm = join(self.test_dir, 'from')
to = join(self.test_dir, 'to')
md5_from = self._md5(join(frm, 'ftest.txt'))
assert util.find_file(frm, 'ftest.txt')
assert not util.find_file(to)
assert util.move_dir(frm, to) == 0
assert not util.find_file(frm, 'ftest.txt')
assert util.find_file(to, 'ftest.txt')
assert util.find_file(to, 'subdir', 'ftest.txt')
assert md5_from == self._md5(join(to, 'ftest.txt'))
assert util.find_file(to, 'external_renamed.txt')
def test_move_dir_exists(self):
frm = join(self.test_dir, 'from')
to = join(self.test_dir, 'exists')
md5_from1 = self._md5(join(frm, 'ftest.txt'))
md5_from2 = self._md5(join(frm, 'subdir', 'ftest.txt'))
assert util.find_file(frm, 'ftest.txt')
assert util.find_file(to, 'ftest.txt')
assert not md5_from1 == self._md5(join(to, 'ftest.txt'))
assert not md5_from2 == self._md5(join(to, 'subdir', 'ftest.txt'))
util.move_dir(frm, to)
assert not util.find_file(frm, 'ftest.txt')
assert util.find_file(to, 'ftest.txt')
assert md5_from1 == self._md5(join(to, 'ftest.txt'))
assert md5_from2 == self._md5(join(to, 'subdir', 'ftest.txt'))
def test_query_dict():
data = {'this': {'that': 'other'}}
assert util.query_dict(data, 'this') == {'that': 'other'}
assert util.query_dict(data, 'this.that') == 'other'
assert util.query_dict(data, 'nonexistent') is None
assert util.query_dict(data, 'nonexistent', ret_default='things') == 'things'
| StarcoderdataPython |
1673832 | from __future__ import print_function
import argparse
import shutil
import torch
import torchvision
import random
import os
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import matplotlib.pyplot as plt
writer = SummaryWriter()
from resnet import ResNet_small
from torchsummary import summary
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
def train(model, dataloader, optimizer, scheduler, loss_fn, epoch):
# Set the model into train mode
model.train()
train_loss = 0
correct = 0
total = 0
datacount = len(dataloader)
for batch_idx, (train_batch, labels_batch) in enumerate(dataloader):
# move the data onto the device
train_batch, labels_batch = train_batch.to(device), labels_batch.to(device)
optimizer.zero_grad()
# compute model outputs and loss
outputs = model(train_batch)
loss = loss_fn(outputs, labels_batch.squeeze())
loss.backward()
# after computing gradients based on current batch loss,
# apply them to parameters
optimizer.step()
scheduler.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += labels_batch.size(0)
correct += predicted.eq(labels_batch.squeeze()).sum().item()
# write to tensorboard
writer.add_scalar(
"train/loss",
train_loss / (batch_idx + 1),
(datacount * (epoch + 1)) + (batch_idx + 1),
)
writer.add_scalar(
"train/accuracy",
100.0 * correct / total,
(datacount * (epoch + 1)) + (batch_idx + 1),
)
writer.add_scalar(
"train/lr",
scheduler._last_lr[0],
(datacount * (epoch + 1)) + (batch_idx + 1),
)
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(train_batch),
len(dataloader.dataset),
100.0 * batch_idx / len(dataloader),
(train_loss / (batch_idx + 1)),
# loss,
),
end="\r",
flush=True,
)
print()
return train_loss / datacount, 100.0 * correct / total
def test(model, dataloader, loss_fn, epoch):
model.eval()
test_loss = 0
correct = 0
total = 0
datacount = len(dataloader)
with torch.no_grad():
for batch_idx, (test_batch, labels_batch) in enumerate(dataloader):
# move the data onto device
test_batch, labels_batch = test_batch.to(device), labels_batch.to(device)
# compute the model output
outputs = model(test_batch)
loss = loss_fn(outputs, labels_batch.squeeze())
test_loss += loss.item()
_, predicted = outputs.max(1)
total += labels_batch.size(0)
correct += predicted.eq(labels_batch.squeeze()).sum().item()
# log the test_loss
writer.add_scalar(
"test/loss",
test_loss / (batch_idx + 1),
(datacount * (epoch + 1)) + (batch_idx + 1),
)
writer.add_scalar(
"test/accuracy",
100.0 * correct / total,
(datacount * (epoch + 1)) + (batch_idx + 1),
)
test_loss = test_loss / datacount
acc = 100 * correct / total
print("Test accuracy:", acc)
return test_loss, acc
def save_ckp(state, checkpoint_dir):
f_path = "gender-best-checkpoint.pt"
torch.save(state, f_path)
def main():
# Training settings
parser = argparse.ArgumentParser(description="PyTorch GENDER CV LAB")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 128)",
)
parser.add_argument(
"--epochs",
type=int,
default=200,
metavar="N",
help="number of epochs to train (default: 200)",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--save_model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--load_checkpoint",
type=str,
default=False,
help="Path of checkpoint to restore, if none will start training from 0",
)
args = parser.parse_args()
random.seed(args.seed)
os.environ["PYTHONHASHSEED"] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 8, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
# Load
x_train = np.load("data/x_train.npy")
x_test = np.load("data/x_test.npy")
x_train = x_train / 255
x_test = x_test / 255
x_train = torch.from_numpy(x_train).squeeze().permute(0, 3, 1, 2).float()
x_test = torch.from_numpy(x_test).squeeze().permute(0, 3, 1, 2).float()
y_train = np.load("data/y_train.npy")
y_test = np.load("data/y_test.npy")
y_train = torch.from_numpy(y_train).squeeze().long()
y_test = torch.from_numpy(y_test).squeeze().long()
dataset1 = torch.utils.data.TensorDataset(x_train, y_train.unsqueeze(1))
dataset2 = torch.utils.data.TensorDataset(x_test, y_test.unsqueeze(1))
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = ResNet_small().to(device)
print(summary(model, (3, 100, 100)))
print(
"Trainable parameters",
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer, max_lr=0.1, steps_per_epoch=len(train_loader), epochs=200
) # epoch 187
epoch = 1
loss = nn.CrossEntropyLoss()
if args.load_checkpoint:
print("Loading checkpoint args.load_checkpoint")
checkpoint = torch.load(args.load_checkpoint)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
epoch = checkpoint["epoch"]
best_acc = 0
l_train_loss = []
l_test_loss = []
l_train_acc = []
l_test_acc = []
l_lr = []
for epoch in range(epoch, args.epochs + 1):
train_loss, train_acc = train(
model, train_loader, optimizer, scheduler, loss, epoch
)
test_loss, test_acc = test(model, test_loader, loss, epoch)
if test_acc > best_acc:
best_acc = test_acc
if test_acc > 97.0:
print("Error < 3.0 achieved, stopped training")
break
if args.save_model and test_acc >= best_acc:
checkpoint = {
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
}
print("Saving checkpoint as best model to gender-best-checkpoint.pt")
save_ckp(checkpoint, "")
l_train_loss.append(train_loss)
l_test_loss.append(test_loss)
l_train_acc.append(train_acc)
l_test_acc.append(test_acc)
l_lr.append(scheduler._last_lr[0])
# PLOTS
fig = plt.figure()
plt.plot(l_train_loss, color="red", label="Train")
plt.plot(l_test_loss, color="blue", label="Test")
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Loss", fontsize=8)
plt.legend()
plt.grid()
fig.savefig("figures/gender_loss.png")
plt.close()
fig = plt.figure()
plt.plot(l_train_acc, color="red", label="Train")
plt.plot(l_test_acc, color="blue", label="Test")
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Accuracy", fontsize=8)
plt.legend()
plt.grid()
fig.savefig("figures/gender_acc.png")
plt.close()
fig = plt.figure()
plt.plot(l_lr, color="orange", label="Learning rate")
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Learning rate", fontsize=8)
plt.legend()
plt.grid()
fig.savefig("figures/gender_lr.png")
plt.close()
if __name__ == "__main__":
main()
| StarcoderdataPython |
105703 | <gh_stars>0
import pythagore
problem_type = input("Type 1 for right triangle checker. Type 2 for hypotenuse calc. for right triangle. Type 3 for missing arm value calc. for right triangle. Type 4 for volume of a cylinder calc. Type 5 for cone volume calc. Type 6 for volume of a sphere calc.")
if problem_type == "1":
pythagore.check()
elif problem_type == "2":
a = input("a=")
b = input("b=")
a = float(a)
b = float(b)
a = a ** 2
b = b ** 2
print("the hypotenuse is" , (a + b) ** 0.5 , "and a and b squared equal" , a , "and" , b)
elif problem_type == "3":
a = input("a=")
c = input("c=")
a = float(a)
c = float(c)
a = a ** 2
c = c ** 2
b = c - a
b = b ** 0.5
print("the missing arm value is" , b , "and a and c squared are" , a , "and" , c)
elif problem_type == "4":
a = input("radius=")
b = input("height=")
a = float(a)
b = float(b)
circle_area = a ** 2 * 3.14
print("the volume is" , circle_area * b)
elif problem_type == "5":
a = input("radius=")
b = input("height=")
a = float(a)
b = float(b)
circle_area = a ** 2 * 3.14
volume = circle_area * b / 3
print("the volume is" , volume , "and the base area is" , circle_area)
elif problem_type == "6":
a = input("radius=")
a = float(a)
a = a ** 3
a = a * 3.14
a = a * 4
a = a / 3
print("The volume is about" , a)
else:
yes = True
while yes:
print("That is not a valid function")
| StarcoderdataPython |
1662843 | <filename>experiments/analyze/VisualizeDeepConvRFMap.py
# general imports
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
import torchvision.datasets as datasets
import numpy as np
from sklearn.ensemble import RandomForestClassifier
sns.set()
warnings.filterwarnings("ignore")
cifar_data_path = "data"
class1 = 3 # cat
class2 = 5 # dog
fraction_of_train_samples = 0.35
class ConvRF(object):
def __init__(self, kernel_size=5, stride=2):
self.kernel_size = kernel_size
self.stride = stride
self.kernel_forest = None
def _convolve_chop(self, images, labels=None, flatten=False):
batch_size, in_dim, _, num_channels = images.shape
out_dim = int((in_dim - self.kernel_size) / self.stride) + 1 # calculate output dimensions
# create matrix to hold the chopped images
out_images = np.zeros((batch_size, out_dim, out_dim,
self.kernel_size, self.kernel_size, num_channels))
out_labels = None
curr_y = out_y = 0
# move kernel vertically across the image
while curr_y + self.kernel_size <= in_dim:
curr_x = out_x = 0
# move kernel horizontally across the image
while curr_x + self.kernel_size <= in_dim:
# chop images
out_images[:, out_x, out_y] = images[:, curr_x:curr_x +
self.kernel_size, curr_y:curr_y+self.kernel_size, :]
curr_x += self.stride
out_x += 1
curr_y += self.stride
out_y += 1
if flatten:
out_images = out_images.reshape(batch_size, out_dim, out_dim, -1)
if labels is not None:
out_labels = np.zeros((batch_size, out_dim, out_dim))
out_labels[:, ] = labels.reshape(-1, 1, 1)
return out_images, out_labels
def convolve_fit(self, images, labels):
sub_images, sub_labels = self._convolve_chop(images, labels=labels, flatten=True)
batch_size, out_dim, _, _ = sub_images.shape
all_sub_images = sub_images.reshape(batch_size*out_dim*out_dim, -1)
all_sub_labels = sub_labels.reshape(batch_size*out_dim*out_dim, -1)
self.kernel_forest = RandomForestClassifier(n_estimators=1000, n_jobs=-1)
self.kernel_forest.fit(all_sub_images, all_sub_labels)
convolved_image = np.zeros((images.shape[0], out_dim, out_dim, 1))
for i in range(out_dim):
for j in range(out_dim):
convolved_image[:, i, j] = self.kernel_forest.predict_proba(
sub_images[:, i, j])[..., 1][..., np.newaxis]
return convolved_image
def convolve_predict(self, images):
if not self.kernel_forest:
raise Exception("Should fit training data before predicting")
sub_images, _ = self._convolve_chop(images, flatten=True)
batch_size, out_dim, _, _ = sub_images.shape
kernel_predictions = np.zeros((images.shape[0], out_dim, out_dim, 1))
for i in range(out_dim):
for j in range(out_dim):
kernel_predictions[:, i, j] = self.kernel_forest.predict_proba(
sub_images[:, i, j])[..., 1][..., np.newaxis]
return kernel_predictions
def normalize(x):
scale = np.mean(np.arange(0, 256))
return (x - scale) / float(scale)
def denormalize(y):
scale = np.mean(np.arange(0, 256))
return ((y * scale) + scale).astype(np.int)
def viz_map(index, images, conv_map):
image = denormalize(images[index])
conv_map = np.squeeze(conv_map[index])
print("Min Activation Value: ", np.min(conv_map))
print("Max Activation Value: ", np.max(conv_map))
fig = plt.figure(figsize=(10, 8))
cmap_choice = 'Reds'
fig.add_subplot(1, 2, 1)
plt.imshow(image)
fig.add_subplot(1, 2, 2)
heatmap = plt.imshow(conv_map, cmap=cmap_choice)
plt.clim(0, 1)
cbar = plt.colorbar(heatmap, fraction=0.046, pad=0.04)
ticks = [round(t, 2) for t in cbar.get_ticks()]
top_tick = " (cat)" if ticks[-1] <= 0.5 else " (dog)"
bottom_tick = " (cat)" if ticks[0] <= 0.5 else " (dog)"
ticks = [str(t) for t in ticks]
cbar.ax.set_yticklabels([ticks[0]+bottom_tick] + ticks[1:-1] + [ticks[-1]+top_tick])
plt.show()
# train data
cifar_trainset = datasets.CIFAR10(root=cifar_data_path, train=True, download=True, transform=None)
cifar_train_images = normalize(cifar_trainset.data)
cifar_train_labels = np.array(cifar_trainset.targets)
# test data
cifar_testset = datasets.CIFAR10(root=cifar_data_path, train=False, download=True, transform=None)
cifar_test_images = normalize(cifar_testset.data)
cifar_test_labels = np.array(cifar_testset.targets)
num_train_samples_class_1 = int(np.sum(cifar_train_labels == class1) * fraction_of_train_samples)
num_train_samples_class_2 = int(np.sum(cifar_train_labels == class2) * fraction_of_train_samples)
# get only train images and labels for class 1 and class 2
train_images = np.concatenate([cifar_train_images[cifar_train_labels == class1][:num_train_samples_class_1],
cifar_train_images[cifar_train_labels == class2][:num_train_samples_class_2]])
train_labels = np.concatenate(
[np.repeat(0, num_train_samples_class_1), np.repeat(1, num_train_samples_class_2)])
test_images = np.concatenate([cifar_test_images[cifar_test_labels == class1],
cifar_test_images[cifar_test_labels == class2]])
test_labels = np.concatenate(
[np.repeat(0, np.sum(cifar_test_labels == class1)), np.repeat(1, np.sum(cifar_test_labels == class2))])
# Train
# ConvRF (layer 1)
conv1 = ConvRF(kernel_size=10, stride=2)
conv1_map = conv1.convolve_fit(train_images, train_labels)
conv1_map_test = conv1.convolve_predict(test_images)
np.save("deepconvrf_viz_layer1_train_" + str(fraction_of_train_samples) + ".npy", conv1_map)
np.save("deepconvrf_viz_layer1_test_" + str(fraction_of_train_samples) + ".npy", conv1_map_test)
# ConvRF (layer 2)
conv2 = ConvRF(kernel_size=7, stride=1)
conv2_map = conv2.convolve_fit(conv1_map, train_labels)
conv2_map_test = conv2.convolve_predict(conv1_map_test)
np.save("deepconvrf_viz_layer2_train_" + str(fraction_of_train_samples) + ".npy", conv2_map)
np.save("deepconvrf_viz_layer2_test_" + str(fraction_of_train_samples) + ".npy", conv2_map_test)
| StarcoderdataPython |
400 | <filename>beansdbadmin/core/client.py
#!/usr/bin/python
# encoding: utf-8
'''a rich client
1. for one server (instead of multi like in libmc.Client)
2. encapsulate @, ?, gc ...
use is instead of libmc.Client
'''
import telnetlib
import logging
import libmc
import string
import urllib
import itertools
import warnings
from collections import defaultdict
from beansdbadmin.core.hint import parse_new_hint_body
from beansdbadmin.core.data import parse_records
from beansdbadmin.core.hash import get_khash64
def get_url_content(url):
return urllib.urlopen(url).read()
def check_bucket(bucket):
assert 0 <= bucket < 16
def dir_to_dict(dir_str):
d = dict()
if dir_str:
for line in [x for x in dir_str.split('\n') if x]:
key_or_bucket, _hash, ver_or_count = line.split(' ')
d[key_or_bucket] = int(_hash) & 0xffff, int(ver_or_count)
return d
def get_bucket_keys_count(store, bucket, depth=1):
cmd = "@"
sub = bucket
if depth == 2:
cmd = "@%x" % (bucket/16)
sub = bucket % 16
result = store.get(cmd)
if result:
lines = result.split('\n')
for line in lines:
if len(line) == 0:
continue
d, _, c = line.split()
if d.endswith('/'):
bucket_ = int(d[0], 16)
if bucket_ == sub:
return int(c)
raise Exception('get %s from %s, reply = [%s], bucket %x not found' % (cmd, store, result, bucket))
def get_buckets_keys_count(store):
""" return dict: buckets -> count """
st = {}
try:
for line in (store.get('@') or '').split('\n'):
if line:
d, _, c = line.split(' ')
if not d.endswith('/'):
continue
st[int(d[0], 16)] = int(c)
return st
except IOError:
raise Exception("cannot get @ from %s" % (store))
def get_primary_buckets(store):
""" return possible primary buckets, might be wrong on temporary nodes,
result is list of buckets in integer
"""
ss = get_buckets_keys_count(store)
bucket_list = ss.items()
bucket_list = [x for x in bucket_list if x[1] > 0]
if not bucket_list:
return None
bucket_list.sort(lambda a, b: cmp(a[1], b[1]), reverse=True)
result = [bucket_list[0]]
for i in bucket_list[1:]:
if result[-1][1] / i[1] >= 2:
break
result.append(i)
return [x[0] for x in result]
def get_key_info_disk(store, key):
'''return ver, vhash, flag, vsz, ts, fid, pos'''
info = store.get('??' + key)
if info:
return [int(x) for x in info.split()]
def is_gc_running(ip, port):
s = get_gc_status(ip, port)
if s and s.find('running') >= 0:
return True
return False
def get_gc_status(ip, port):
t = telnetlib.Telnet(ip, port)
t.write('optimize_stat\r\n')
out = t.read_until('\n')
t.write('quit\r\n')
t.close()
return out.strip("\r\n")
def connect(server, **kwargs):
comp_threshold = kwargs.pop('comp_threshold', 0)
prefix = kwargs.pop('prefix', None)
if prefix is not None:
warnings.warn('"prefix" is deprecated. '
'use douban.wrapper.Prefix instead.')
c = libmc.Client([server],
do_split=0,
comp_threshold=comp_threshold,
prefix=prefix)
c.config(libmc.MC_CONNECT_TIMEOUT, 300) # 0.3s
c.config(libmc.MC_POLL_TIMEOUT, 3000) # 3s
c.config(libmc.MC_RETRY_TIMEOUT, 5) # 5s
return c
class MCStore(object):
IGNORED_LIBMC_RET = frozenset([
libmc.MC_RETURN_OK,
libmc.MC_RETURN_INVALID_KEY_ERR
])
def __init__(self, addr):
self.addr = addr
self.host, port = addr.split(":")
self.port = int(port)
self.mc = connect(addr)
def __repr__(self):
return '<MCStore(addr=%s)>' % repr(self.addr)
def __str__(self):
return self.addr
def set(self, key, data, rev=0):
return bool(self.mc.set(key, data, rev))
def set_raw(self, key, data, rev=0, flag=0):
if rev < 0:
raise Exception(str(rev))
return self.mc.set_raw(key, data, rev, flag)
def set_multi(self, values, return_failure=False):
return self.mc.set_multi(values, return_failure=return_failure)
def _check_last_error(self):
last_err = self.mc.get_last_error()
if last_err not in self.IGNORED_LIBMC_RET:
raise IOError(last_err, self.mc.get_last_strerror())
def get(self, key):
try:
r = self.mc.get(key)
if r is None:
self._check_last_error()
return r
except ValueError:
self.mc.delete(key)
def get_raw(self, key):
r, flag = self.mc.get_raw(key)
if r is None:
self._check_last_error()
return r, flag
def get_multi(self, keys):
r = self.mc.get_multi(keys)
self._check_last_error()
return r
def delete(self, key):
return bool(self.mc.delete(key))
def delete_multi(self, keys, return_failure=False):
return self.mc.delete_multi(keys, return_failure=return_failure)
def exists(self, key):
return bool(self.mc.get('?' + key))
def incr(self, key, value):
return self.mc.incr(key, int(value))
class DBClient(MCStore):
def __init__(self, addr):
MCStore.__init__(self, addr)
self._is_old = None
def stats(self):
stats = self.mc.stats()
return stats.values()[0] if stats else None
def is_old(self):
if self._is_old is None:
ver = self.get_server_version()
self._is_old = (ver.strip().split(".")[0] == "0")
return self._is_old
def get_collision_summary(self, bucket):
check_bucket(bucket)
raw = self.get("@collision_%x" % bucket)
if raw is None:
return None
count, hcount, khash, data_size = raw.split()
return (int(count), int(hcount), int(khash, 16), int(data_size))
def get_collision(self, bucket):
check_bucket(bucket)
collisions = defaultdict(dict)
hint_data = self.get("@collision_all_%x" % bucket)
if hint_data is None:
return dict()
for key, meta, _ in parse_new_hint_body(hint_data):
khash_str, _, ver, vhash = meta
collisions[khash_str][key] = (vhash, ver)
return dict(collisions)
def get_records_by_khash_raw(self, khash):
if self.is_old():
return []
if not isinstance(khash, str):
khash = "%016x" % khash
return self.get("@@" + khash)
def get_records_by_khash(self, khash_str):
raw = self.get_records_by_khash_raw(khash_str)
if raw:
return parse_records(raw, False)
else:
return []
def start_gc(self, bucket='', start_fid=0, end_fid=None):
""" bucket must be in 0 or 00 string """
if bucket:
assert isinstance(bucket, basestring) and len(bucket) <= 2
t = telnetlib.Telnet(self.host, self.port)
tree = '@%s' % bucket
if end_fid is None:
gc_cmd = 'gc {} {}\n'.format(tree, start_fid)
else:
gc_cmd = 'gc {} {} {}\n'.format(tree, start_fid, end_fid)
t.write(gc_cmd)
out = t.read_until('\n').strip('\r\n')
assert out == 'OK'
t.write('quit\n')
t.close()
def start_gc_all_buckets(self, db_depth):
hex_digits = string.digits + 'abcdef'
buckets_iter = itertools.product(*[hex_digits for _ in range(db_depth)])
buckets = [''.join(i) for i in buckets_iter]
self.start_gc_buckets(buckets)
def start_gc_buckets(self, buckets):
for b in buckets:
self.start_gc(bucket=b)
while True:
status = self.get_gc_status()
if status.find('running') >= 0:
continue
elif status == 'success':
print "bucket %s gc done" % b
break
elif status == 'fail':
return self.fail("optimize_stat = fail")
else:
self.fail(status)
def get_gc_status(self):
return get_gc_status(self.host, self.port)
def get_version(self, key):
meta = self.get("?" + key)
if meta:
return int(meta.split()[0])
def item_count(self):
s = self.stats()
if s is None:
return None
return int(s['total_items'])
def get_key_info_mem(self, key, khash64=None):
''' return (vhash, ver) or None'''
if khash64 is None:
khash64 = get_khash64(key)
khash32_str = "@%08x" % (khash64 >> 32)
_dir = self.get_dir(khash32_str)
if self.is_old():
return _dir.get(key, None)
else:
return _dir.get("%016x" % khash64, None)
def get_khash_info_mem(self, khash):
''' return [(key, (vhash, ver))], key is "" for v2.'''
khash32 = "@%08x" % (khash >> 32)
_dir = self.get_dir(khash32)
ret = []
if self.is_old():
for k, (vhash, ver) in _dir.iteritems():
if get_khash64(k) == khash:
ret.append((k, (vhash, ver)))
else:
for k, (vhash, ver) in _dir.iteritems():
if int(k, 16) == khash:
return [("", (int(vhash), ver))]
return ret
def get_server_version(self):
try:
st = self.stats()
if st:
return st["version"]
except IOError:
logging.error("fail to get version %s", self)
except KeyError:
logging.error("fail to get version %s %s", self, st)
def get_dir(self, path):
''' return dict
case1: map dir(0-f) to (hash, count),
like {'0/': (1471, 27784005), ... },
case2: map key(or khash) to (vhash, version),
like {'3000000377e9c2ad': (22212, 1), ... }'''
try:
content = self.get(path)
except IOError:
content = ''
return dir_to_dict(content)
def list_dir(self, d): # FIXME: d should not need prefix @?
'''list all KEY in the dir!
not use it if dir is large!'''
for path, (vhash, ver) in sorted(self.get_dir(d).items()):
if path.endswith('/') and len(path) == 2:
for v in self.list_dir(d + path[:-1]):
yield v
else:
yield path, int(vhash), int(ver)
def get_bucket_keys_count(self, bucket, depth=1):
return get_bucket_keys_count(self, bucket, depth)
def get_key_info_disk(self, key):
'''return ver, vhash, flag, vsz, ts, fid, pos'''
return get_key_info_disk(self, key)
def prepare(self, data):
return libmc.encode_value(data, self.mc.comp_threshold)
def close(self):
pass
def test_new(addr, bucket):
b = bucket
c = DBClient(addr)
print "stats:", c.stats()
print 'version:', c.get_server_version()
print "isold:", c.is_old()
print "dir root:", c.get_dir("@")
print "bucket key count:", c.get_bucket_keys_count(int(b))
print "item_count:", c.item_count()
print "primary_buckets", get_primary_buckets(c)
leaf = c.get_dir("@" + b + "000000")
print "a dir leaf:", leaf
khash_str = list(leaf)[0]
print "a khash_str", khash_str
r = c.get_records_by_khash(khash_str)[0]
k = r[0]
print "key, len(value), (flag, tstamp, ver):", k, r[1], r[3:]
print "key info mem:", c.get_key_info_mem(k)
print "key info disk(ver, vhash, flag, vsz, ts, fid, pos):", \
c.get_key_info_disk(k)
print "key version:", c.get_version(k)
print "collision_summary", c.get_collision_summary(int(b))
print "gc status:", c.get_gc_status()
if __name__ == '__main__':
test_new("rosa3a:7900", '3')
| StarcoderdataPython |
1624072 | <reponame>sadikkuzu-mba/pyJiraAHE<filename>JiraAHE.py
from jira import JIRA
from getpass import getpass
username = "<EMAIL>"
password = "<PASSWORD>" # Number1 as usual;)
urlJ = "https://aheinsis.atlassian.net/"
def getText(txt):
try:
ret = getpass(txt + " >> ")
except:
ret = None
return ret
def cls(nL=50): # number of lines
print "\n"*int(nL)
def getJiraAHEobj():
try:
global username
global password
username = getText("username")
password = getText("password")
cls()
jahe = JIRA(urlJ, basic_auth=(username, password))
return jahe
except:
return None
| StarcoderdataPython |
132723 | <filename>automated_analysis.py
import argparse
import csv
from collections import OrderedDict
import sys
from core_data_modules.cleaners import Codes
from core_data_modules.logging import Logger
from core_data_modules.traced_data.io import TracedDataJsonIO
from core_data_modules.util import IOUtils
from core_data_modules.analysis import AnalysisConfiguration, engagement_counts, theme_distributions, \
repeat_participations, sample_messages, traffic_analysis, analysis_utils
from src.lib import PipelineConfiguration
log = Logger(__name__)
IMG_SCALE_FACTOR = 10 # Increase this to increase the resolution of the outputted PNGs
CONSENT_WITHDRAWN_KEY = "consent_withdrawn"
SENT_ON_KEY = "sent_on"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Runs automated analysis over the outputs produced by "
"`generate_outputs.py`, and optionally uploads the outputs to Drive.")
parser.add_argument("user", help="User launching this program")
parser.add_argument("pipeline_configuration_file_path", metavar="pipeline-configuration-file",
help="Path to the pipeline configuration json file")
parser.add_argument("messages_json_input_path", metavar="messages-json-input-path",
help="Path to a JSONL file to read the TracedData of the messages data from")
parser.add_argument("individuals_json_input_path", metavar="individuals-json-input-path",
help="Path to a JSONL file to read the TracedData of the messages data from")
parser.add_argument("automated_analysis_output_dir", metavar="automated-analysis-output-dir",
help="Directory to write the automated analysis outputs to")
args = parser.parse_args()
user = args.user
pipeline_configuration_file_path = args.pipeline_configuration_file_path
messages_json_input_path = args.messages_json_input_path
individuals_json_input_path = args.individuals_json_input_path
automated_analysis_output_dir = args.automated_analysis_output_dir
IOUtils.ensure_dirs_exist(automated_analysis_output_dir)
IOUtils.ensure_dirs_exist(f"{automated_analysis_output_dir}/graphs")
log.info("Loading Pipeline Configuration File...")
with open(pipeline_configuration_file_path) as f:
pipeline_configuration = PipelineConfiguration.from_configuration_file(f)
Logger.set_project_name(pipeline_configuration.pipeline_name)
log.debug(f"Pipeline name is {pipeline_configuration.pipeline_name}")
sys.setrecursionlimit(30000)
# Read the messages dataset
log.info(f"Loading the messages dataset from {messages_json_input_path}...")
with open(messages_json_input_path) as f:
messages = TracedDataJsonIO.import_jsonl_to_traced_data_iterable(f)
for i in range (len(messages)):
messages[i] = dict(messages[i].items())
log.info(f"Loaded {len(messages)} messages")
# Read the individuals dataset
log.info(f"Loading the individuals dataset from {individuals_json_input_path}...")
with open(individuals_json_input_path) as f:
individuals = TracedDataJsonIO.import_jsonl_to_traced_data_iterable(f)
for i in range (len(individuals)):
individuals[i] = dict(individuals[i].items())
log.info(f"Loaded {len(individuals)} individuals")
def coding_plans_to_analysis_configurations(coding_plans):
analysis_configurations = []
for plan in coding_plans:
ccs = plan.coding_configurations
for cc in ccs:
if not cc.include_in_theme_distribution:
continue
analysis_configurations.append(
AnalysisConfiguration(cc.analysis_file_key, plan.raw_field, cc.coded_field, cc.code_scheme)
)
return analysis_configurations
log.info("Computing engagement counts...")
with open(f"{automated_analysis_output_dir}/engagement_counts.csv", "w") as f:
engagement_counts.export_engagement_counts_csv(
messages, individuals, CONSENT_WITHDRAWN_KEY,
coding_plans_to_analysis_configurations(PipelineConfiguration.RQA_CODING_PLANS),
f
)
log.info(f'Computing repeat and new participation per show ...')
# Computes the number of new and repeat consented individuals who participated in each radio show.
# Repeat participants are consented individuals who participated in previous shows prior to the target show.
# New participants are consented individuals who participated in target show but din't participate in previous shows.
repeat_new_participation_map = OrderedDict() # of rqa_raw_field to participation metrics.
rqa_raw_fields = [plan.raw_field for plan in PipelineConfiguration.RQA_CODING_PLANS]
#TODO: update to use responded() once moved to core
for rqa_raw_field in rqa_raw_fields:
target_radio_show = rqa_raw_field # radio show in which we are calculating repeat and new participation metrics for.
target_radio_show_participants = set() # contains uids of individuals who participated in target radio show.
for ind in individuals:
if ind["consent_withdrawn"] == Codes.TRUE:
continue
if target_radio_show in ind:
target_radio_show_participants.add(ind['uid'])
previous_radio_shows = [] # rqa_raw_fields of shows that aired before the target radio show.
for rqa_raw_field in rqa_raw_fields:
if rqa_raw_field == target_radio_show:
break
previous_radio_shows.append(rqa_raw_field)
previous_radio_shows_participants = set() # uids of individuals who participated in previous radio shows.
for rqa_raw_field in previous_radio_shows:
for ind in individuals:
if ind["consent_withdrawn"] == Codes.TRUE:
continue
if rqa_raw_field in ind:
previous_radio_shows_participants.add(ind['uid'])
# Check for uids of individuals who participated in target and previous shows.
repeat_participants = target_radio_show_participants.intersection(previous_radio_shows_participants)
# Check for uids of individuals who participated in target show but din't participate in previous shows.
new_participants = target_radio_show_participants.difference(previous_radio_shows_participants)
repeat_new_participation_map[target_radio_show] = {
"Radio Show": target_radio_show, # Todo switch to dataset name
"No. of opt-in participants": len(target_radio_show_participants),
"No. of opt-in participants that are new": len(new_participants),
"No. of opt-in participants that are repeats": len(repeat_participants),
"% of opt-in participants that are new": None,
"% of opt-in participants that are repeats": None
}
# Compute:
# -% of opt-in participants that are new, by computing No. of opt-in participants that are new / No. of opt-in participants
# * 100, to 1 decimal place.
# - % of opt-in participants that are repeats, by computing No. of opt-in participants that are repeats / No. of opt-in participants
# * 100, to 1 decimal place.
if len(new_participants) > 0:
repeat_new_participation_map[target_radio_show]["% of opt-in participants that are new"] = \
round(len(new_participants) / len(target_radio_show_participants) * 100, 1)
repeat_new_participation_map[target_radio_show]["% of opt-in participants that are repeats"] = \
round(len(repeat_participants) / len(target_radio_show_participants) * 100, 1)
with open(f"{automated_analysis_output_dir}/per_show_repeat_and_new_participation.csv", "w") as f:
headers = ["Radio Show", "No. of opt-in participants", "No. of opt-in participants that are new",
"No. of opt-in participants that are repeats", "% of opt-in participants that are new",
"% of opt-in participants that are repeats"]
writer = csv.DictWriter(f, fieldnames=headers, lineterminator="\n")
writer.writeheader()
for row in repeat_new_participation_map.values():
writer.writerow(row)
log.info("Computing demographic distributions...")
with open(f"{automated_analysis_output_dir}/demographic_distributions.csv", "w") as f:
theme_distributions.export_theme_distributions_csv(
individuals, CONSENT_WITHDRAWN_KEY,
coding_plans_to_analysis_configurations(PipelineConfiguration.DEMOG_CODING_PLANS),
[],
f
)
log.info("Computing theme distributions...")
with open(f"{automated_analysis_output_dir}/theme_distributions.csv", "w") as f:
theme_distributions.export_theme_distributions_csv(
individuals, CONSENT_WITHDRAWN_KEY,
coding_plans_to_analysis_configurations(PipelineConfiguration.RQA_CODING_PLANS),
coding_plans_to_analysis_configurations(PipelineConfiguration.SURVEY_CODING_PLANS),
f
)
# Export raw messages labelled with Meta impact, gratitude and about conversation programmatically known as impact/success story
log.info("Exporting success story raw messages for each episode...")
success_story_string_values = ["gratitude", "about_conversation", "impact"]
with open(f"{automated_analysis_output_dir}/impact_messages.csv", "w") as f:
sample_messages.export_sample_messages_csv(
messages, CONSENT_WITHDRAWN_KEY,
coding_plans_to_analysis_configurations(PipelineConfiguration.RQA_CODING_PLANS),
f, filter_code_ids=success_story_string_values, limit_per_code=sys.maxsize
)
if pipeline_configuration.automated_analysis.traffic_labels is not None:
log.info("Exporting traffic analysis...")
with open(f"{automated_analysis_output_dir}/traffic_analysis.csv", "w") as f:
traffic_analysis.export_traffic_analysis_csv(
messages, CONSENT_WITHDRAWN_KEY,
coding_plans_to_analysis_configurations(PipelineConfiguration.RQA_CODING_PLANS),
SENT_ON_KEY,
pipeline_configuration.automated_analysis.traffic_labels,
f
)
log.info("Automated analysis python script complete")
| StarcoderdataPython |
4842472 | <reponame>edmundsj/xsugar<gh_stars>0
"""
Tests data reading and writing operation, along with condition generation
"""
import pytest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
import os
from shutil import rmtree
from numpy.testing import assert_equal, assert_allclose
from xsugar import Experiment
from sciparse import assertDataDictEqual
from ast import literal_eval
from itertools import zip_longest
from pathlib import Path
def testLoadData(exp, exp_data, convert_name):
data_desired = pd.DataFrame({'wavelengths': [1, 2, 3],
'Mean': [1,2,4]})
metadata_desired = {'non': 'sense'}
filename = convert_name('TEST1~wavelengths-1~temperatures-25')
file_extension = '.csv'
full_filename = exp_data['data_full_path'] + filename + file_extension
with open(full_filename, 'w+') as fh:
fh.write(str(metadata_desired) + '\n')
data_desired.to_csv(fh, mode='a', index=False)
exp.loadData()
data_actual = exp.data[filename]
assert_frame_equal(data_actual, data_desired)
def testLoadXRDData(convert_name):
file_location = os.path.dirname(os.path.abspath(__file__))
base_path = file_location + '/data'
exp = Experiment(name='TEST1', kind='test', base_path=base_path)
exp.loadXRDData()
data_desired = pd.DataFrame({
'Angle (deg)': [69.05, 69.055, 69.06, 69.065, 69.07,69.075,69.08,
69.085, 69.09, 69.095, 69.1, 69.105, 69.11, 69.115],
'Counts': [24, 30, 28, 40, 132, 272, 3472, 16368,21970,10562,
1210,264,130,64]})
data_actual = exp.data[convert_name('TEST1~wafer=1~type-locked_coupled~peak-Si')]
assert_frame_equal(data_actual, data_desired)
def testLoadXRDMetadata(convert_name):
file_location = os.path.dirname(os.path.abspath(__file__))
base_path = file_location + '/data'
exp = Experiment(name='TEST1', kind='test', base_path=base_path)
exp.loadXRDData()
metadata_desired = {
'date': '02/10/2021',
'increment': 0.005, 'scantype': 'locked coupled',
'start': 69.05, 'steps': 14, 'time': 1,
'theta': 34.0, '2theta': 68.0, 'phi': 180.13, 'chi': -0.972}
metadata_actual = exp.metadata[convert_name('TEST1~wafer=1~type-locked_coupled~peak-Si')]
assert_equal(metadata_actual, metadata_desired)
def testLoadConstants(exp, exp_data, convert_name):
"""
Tests that we can load metadata from a file successfully
"""
wavelengths = np.array([1, 2, 3])
temperatures = np.array([25, 50])
frequency = 8500
with open(exp_data['data_full_path'] + \
convert_name('TEST1~wavelengths-1~temperatures-25.csv'), 'w+') as fh:
fh.write('{"frequency": 8500}\n')
fh.write(f'Time, Data\n')
fh.write(f'1, 2\n')
exp = Experiment(name='TEST1', kind='test')
exp.loadData()
constants_actual = exp.constants
constants_desired = {'frequency': frequency}
assert_equal(constants_actual, constants_desired)
def testLoadMetadata(exp, exp_data, convert_name):
"""
Tests that we can load metadata from a file successfully
"""
wavelengths = np.array([1, 2, 3])
temperatures = np.array([25, 50])
frequency = 8500
with open(exp_data['data_full_path'] + \
convert_name('TEST1~wavelengths-1~temperatures-25.csv'), 'w+') as fh:
fh.write('{"frequency": 8500}\n')
fh.write(f'Time, Data\n')
fh.write(f'1, 2\n')
exp = Experiment(name='TEST1', kind='test')
exp.loadData()
metadata_actual = exp.metadata
metadata_desired = {
convert_name('TEST1~wavelengths-1~temperatures-25'): \
{'frequency': frequency}}
assert_equal(metadata_actual, metadata_desired)
# TODO: ADD TEST CASE TO ENSURE WE DON'T LOAD IN TOO MUCH DATA, OR DATA
# THAT DOES NOT PRECISELY MATCH *BOTH* THE NAME *AND* THE ID.
# TODO: ADD TEST CASE TO ENSURE WE DON'T LOAD IN TOO MUCH DATA, OR DATA
# THAT DOES NOT PRECISELY MATCH *BOTH* THE NAME *AND* THE ID.
def testLookup(exp, convert_name):
fudge_data = pd.DataFrame(
{'Time (ms)': [1, 2, 3],
'Voltage (V)': [0,0.1, 0.2]})
exp.data = {
convert_name('TEST1~wavelengths-1~temperatures-25'):fudge_data,
convert_name('TEST1~wavelengths-2~temperatures-25'):fudge_data,
convert_name('TEST1~wavelengths-2~temperatures-35'):fudge_data,
convert_name('TEST1~wavelengths-2~temperatures-35'):fudge_data,
}
data_actual = exp.lookup(temperatures=25)
data_desired = {
convert_name('TEST1~wavelengths-1~temperatures-25'):fudge_data,
convert_name('TEST1~wavelengths-2~temperatures-25'):fudge_data,}
assertDataDictEqual(data_actual, data_desired)
def test_load_conditions(exp, exp_data, convert_name):
"""
Tests that we can load metadata from a file successfully
"""
wavelengths = np.array([1, 2, 3])
temperatures = np.array([25, 50])
frequency = 8500
with open(exp_data['data_full_path'] + \
convert_name('TEST1~wavelengths-1~temperatures-25.csv'), 'w+') as fh:
fh.write('{"frequency": 8500}\n')
fh.write(f'Time, Data\n')
fh.write(f'1, 2\n')
exp = Experiment(name='TEST1', kind='test')
exp.loadData()
metadata_actual = exp.metadata
conditions_desired = [{
'wavelengths': 1,
'temperatures': 25,
'frequency': frequency}]
conditions_actual = exp.conditions
assert_equal(conditions_actual, conditions_desired)
| StarcoderdataPython |
3394694 | #!/usr/bin/env python3
import asyncio
from caproto.server import ioc_arg_parser, run
from collections import defaultdict
from caproto import (ChannelString, ChannelEnum, ChannelDouble,
ChannelChar, ChannelData, ChannelInteger,
ChannelType)
from route_channel import (StringRoute, EnumRoute, DoubleRoute,
CharRoute, IntegerRoute, BoolRoute,
ByteRoute, ShortRoute, BoolRoute)
import re
from arch import get_mean_and_std
import bpm_sim.bpm as bpm
route_type_map = {
str: CharRoute,
bytes: ByteRoute,
int: IntegerRoute,
float: DoubleRoute,
bool: BoolRoute,
ChannelType.STRING: StringRoute,
ChannelType.INT: ShortRoute,
ChannelType.LONG: IntegerRoute,
ChannelType.DOUBLE: DoubleRoute,
ChannelType.ENUM: EnumRoute,
ChannelType.CHAR: CharRoute,
}
default_values = {
str: '',
int: 0,
float: 0.0,
bool: False,
ChannelType.STRING: '',
ChannelType.INT: 0,
ChannelType.LONG: 0,
ChannelType.DOUBLE: 0.0,
ChannelType.ENUM: 0,
ChannelType.CHAR: '',
}
class Router(defaultdict):
def __init__(self, factory=None):
super().__init__(factory)
self.routes = []
def add_route(self, pattern, data_type, get, put=None, new_subscription=None, remove_subscription=None):
self.routes.append((re.compile(pattern), data_type, get, put, new_subscription, remove_subscription))
def __contains__(self, key):
return True
def __missing__(self, pvname):
chan = None
for (pattern, data_type, get_route, put_route, new_subscription_route, remove_subscription_route) in self.routes:
print("Testing {} against {}".format(pvname, pattern.pattern))
if pattern.match(pvname) != None:
chan = self.make_route_channel(pvname, data_type, get_route, put_route, new_subscription_route, remove_subscription_route)
if chan is None:
# No routes matched, so revert to making static data.
chan = self.default_factory(pvname)
ret = self[pvname] = chan
return ret
def make_route_channel(self, pvname, data_type, getter, setter=None, new_subscription=None, remove_subscription=None):
if data_type in route_type_map:
route_class = route_type_map[data_type]
return route_class(pvname, getter, setter, new_subscription, remove_subscription, value=default_values[data_type])
else:
raise ValueError("Router doesn't know what EPICS type to use for Python type {}".format(data_type))
def fabricate_channel(pvname):
print("Making a static channel for key: {}".format(pvname))
return ChannelDouble(value=0)
def main():
_, run_options = ioc_arg_parser(
default_prefix='',
desc="PV black hole")
router = Router(fabricate_channel)
router.add_route("BPMS:.+:[0-9]+:(X|Y|TMIT)", data_type=float, get=bpm.get, new_subscription=bpm.subscribe, remove_subscription=bpm.unsubscribe)
run(router, **run_options)
if __name__ == '__main__':
main() | StarcoderdataPython |
20510 | <gh_stars>1-10
import httpx
import pathlib
import re
import datetime
from bs4 import BeautifulSoup
root = pathlib.Path(__file__).parent.resolve()
def formatGMTime(timestamp):
UTC_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
timeStr = datetime.datetime.strptime(timestamp, UTC_FORMAT) + datetime.timedelta(hours=2, minutes=30)
dateStr = timeStr.strftime("%Y-%m-%d")
return dateStr
def get_events():
events = httpx.get("https://api.github.com/users/Love-YY/events").json()[:5]
results = []
for event in events:
tempEvent = {}
if (event["type"] == "WatchEvent"):
tempEvent["action"] = "*starred*"
tempEvent["target"] = event["repo"]["name"]
tempEvent["time"] = formatGMTime(event["created_at"])
tempEvent["url"] = event["repo"]["url"].replace("api.", "").replace("repos/", "")
elif (event["type"] == "ReleaseEvent"):
tempEvent["action"] = "*released*"
tempEvent["target"] = event["payload"]["release"]["name"]
tempEvent["time"] = formatGMTime(event["payload"]["release"]["published_at"])
tempEvent["url"] = event["payload"]["release"]["html_url"]
elif (event["type"] == "PushEvent"):
tempEvent["action"] = "*pushed*"
tempEvent["target"] = event["repo"]["name"]
tempEvent["time"] = formatGMTime(event["created_at"])
tempEvent["url"] = event["payload"]["commits"][0]["url"].replace("api.", "").replace("repos/", "")
elif (event["type"] == "IssuesEvent"):
tempEvent["action"] = "*" + event["payload"]["action"] + " issue*"
tempEvent["target"] = event["repo"]["name"]
tempEvent["time"] = formatGMTime(event["created_at"])
tempEvent["url"] = event["payload"]["issue"]["url"].replace("api.", "").replace("repos/", "")
else:
tempEvent["action"] = "*" + event["type"].replace("Event", "").lower() + "d*"
tempEvent["target"] = event["repo"]["name"]
tempEvent["time"] = formatGMTime(event["created_at"])
tempEvent["url"] = event["repo"]["url"].replace("api.", "").replace("repos/", "")
results.append(tempEvent)
return results
def get_blogs():
html = httpx.get("https://www.flynoodle.xyz/blog/").text
soup = BeautifulSoup(html, "html.parser")
soup_all = soup.find_all("div", class_="abstract-item")[:5]
results = []
for item in soup_all:
temp = {}
temp["title"] = item.find("div", class_="title").get_text()
temp["url"] = "https://www.flynoodle.xyz" + item.find("a").get("href")
temp["date"] = item.find("i", class_="reco-date").find("span").get_text()
results.append(temp)
return results
def replace_chunk(content, marker, chunk, inline=False):
r = re.compile(
r"<!\-\- {} starts \-\->.*<!\-\- {} ends \-\->".format(marker, marker),
re.DOTALL,
)
if not inline:
chunk = "\n{}\n".format(chunk)
chunk = "<!-- {} starts -->{}<!-- {} ends -->".format(marker, chunk, marker)
return r.sub(chunk, content)
if __name__ == "__main__":
readme = root / "README.md"
readme_contents = readme.open().read()
events = get_events()
events_md = "\n".join(
["* {action} <a href={url} target='_blank'>{target}</a> - {time}".format(**item) for item in events]
)
rewritten = replace_chunk(readme_contents, "event", events_md)
entries = get_blogs()
blogs_md = "\n".join(
["* <a href={url} target='_blank'>{title}</a> - {date}".format(**entry) for entry in entries]
)
rewritten = replace_chunk(rewritten, "blog", blogs_md)
time = (datetime.datetime.now() + datetime.timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S')
time_md = "Automatically updated on " + time
rewritten = replace_chunk(rewritten, "time", time_md)
readme.open("w").write(rewritten) | StarcoderdataPython |
4807866 | from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['fbprophet','dask[complete]']
setup(
name='prophet_gcp',
version='0.1',
author = '<NAME>',
author_email = '<EMAIL>',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='Running prophet for products') | StarcoderdataPython |
36903 | import pytest
import pandas as pd
from src.preprocess import check
class Test_check_column_names:
def test_check_column_names(self):
records = pd.DataFrame({'a': [1]})
config = pd.DataFrame({'column': ['a'], 'dataset': ['ACAPS']})
res = check.check_column_names(records, config, log=False)
assert res is None
| StarcoderdataPython |
171808 | import sys, os
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(CURRENT_DIR, "..", ".."))
import constants
import requests
HTTP_ERROR_CODE_START = 400
HTTP_ERROR_MESSAGE_FORMAT= "Site '%s' returned error '%d'"
REQUEST_ERROR_FORMAT = "Requesting connection to '%s' errored!"
HYPERTEXT_FORMAT_CODES = [ "http://", "https://" ]
OPEN_WEB_BROWSER = "sensible-browser \"%s\" 2>&1 /dev/null &"
if sys.platform.startswith(constants.MAC_OS_X_IDENTIFIER):
OPEN_WEB_BROWSER = "open \"%s\" 2>&1 /dev/null &"
SYS_CMD = lambda x : os.system(OPEN_WEB_BROWSER % (x,))
def open_web_browser(query, **kwargs):
sites = []
parsed = kwargs["nlp"](unicode(query.replace(" dot ", ".")))
for token in parsed:
if token.like_url:
sites.append(str(token.text))
kwargs["log_func"]( sites, tolerance=2 )
for site in sites:
for hypertext_code in HYPERTEXT_FORMAT_CODES:
try:
url = "%s%s" % (hypertext_code, site)
response = requests.get(url)
except:
kwargs["log_func"](REQUEST_ERROR_FORMAT % (site,), tolerance=1)
continue
if response.status_code < HTTP_ERROR_CODE_START:
SYS_CMD(site)
break
else:
error_msg = HTTP_ERROR_MESSAGE_FORMAT % (site, response.status_code)
kwargs["log_func"](error_msg, tolerance=1)
TRIGGER_MODEL = "OPEN_WEB_BROWSER.model"
FUNC = open_web_browser
| StarcoderdataPython |
168840 | <filename>story/models.py
from django.db import models
from .urlgenerator import create_urlcode
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from datetime import datetime
from story.resetkey import secure_key
from django.db.models import F
class Story(models.Model):
class Meta:
# Model for story posts
verbose_name = 'Story'
verbose_name_plural = 'Stories'
ordering = ['created']
author = models.ForeignKey(User, default=0, verbose_name='Author')
title = models.CharField(max_length=160, verbose_name='Title')
text = models.TextField(max_length=3000, verbose_name='Story')
categories = (
("Funny", "Funny"),
("Mysterious", "Mysterious")
)
category = models.CharField(choices=categories, max_length=15, verbose_name='Category')
languages = (
('English', 'English'),
)
language = models.CharField(choices=languages, max_length=10, verbose_name='Language')
popularity = models.IntegerField(default=0, verbose_name='Popularity')
urlcode = models.CharField(max_length=7, blank=True, unique=True, verbose_name='Link Code')
active = models.BooleanField(default=True, verbose_name='Active')
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
if self.urlcode is None or self.urlcode == '':
self.urlcode = create_urlcode(self)
super(Story, self).save(*args, **kwargs)
def __str__(self):
return "{title} - {urlcode}".format(title=self.title, urlcode=self.urlcode)
def get_absolute_url(self):
return reverse('storyPage', kwargs={'shortcode': self.urlcode})
class Notification(models.Model):
class Meta:
# Model for users' notifications
verbose_name = "Notification"
owner = models.ForeignKey(User, on_delete=models.CASCADE) # will get the notification.
notifier = models.CharField(max_length=15) # user who sends notification.
choices = (
('Story', 'Story'),
('CommentLike', 'CommentLike'),
('Comment', 'Comment'),
)
kind = models.CharField(choices=choices, max_length=15) # which model is sending it
conn = models.CharField(max_length=10) # story urlcode or comment id
read = models.BooleanField(default=False) # notitification read status
notify_time = models.DateTimeField(auto_now_add=True, null=True)
def __str__(self):
return "Owner: {owner} - Notifier: {notifier}".format(owner=self.owner, notifier=self.notifier)
class Vote(models.Model):
class Meta:
# Model for story Votes
verbose_name = "Vote"
unique_together = ('user', 'story')
user = models.ForeignKey(User, on_delete=models.CASCADE)
story = models.ForeignKey(Story, on_delete=models.CASCADE)
choices = (
('Upvote', 'Upvote'),
('Downvote', 'Downvote'),
)
vote = models.CharField(choices=choices, max_length=10, verbose_name='Vote')
def __str__(self):
return "{story} was voted as {vote}".format(story=self.story, vote=self.vote)
def save(self, *args, **kwargs):
# Change popularity of related post while saving
if not __class__.objects.filter(user=self.user, story=self.story):
if self.vote == "Upvote":
Story.objects.filter(pk=self.story.pk).update(popularity=F('popularity')+3)
elif self.vote == "Downvote":
Story.objects.filter(pk=self.story.pk).update(popularity=F('popularity')-2)
else:
if self.vote == "Upvote":
Story.objects.filter(pk=self.story.pk).update(popularity=F('popularity')+5)
elif self.vote == "Downvote":
Story.objects.filter(pk=self.story.pk).update(popularity=F('popularity')-5)
super(Vote, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
# Change popularity of related post while saving
print("dfsdf")
if self.vote == "Upvote":
Story.objects.filter(pk=self.story.pk).update(popularity=F('popularity')-3)
elif self.vote == "Downvote":
Story.objects.filter(pk=self.story.pk).update(popularity=F('popularity')+2)
super(Vote, self).delete(*args, **kwargs)
class StoryComment(models.Model):
class Meta:
# Model for comments of story
verbose_name = 'Comment'
ordering = ['-comment_date']
post_itself = models.ForeignKey(Story, on_delete=models.CASCADE, verbose_name='Post')
commentator = models.ForeignKey(User, verbose_name='Commentator')
comment = models.TextField(max_length=250, verbose_name='Comment')
comment_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "{storycode} - {comment_id}".format(storycode=self.post_itself.title, comment_id=self.pk)
def save(self, *args, **kwargs):
# If user comments on the story for the first time, change popularity by 2
if not __class__.objects.filter(commentator=self.commentator, post_itself=self.post_itself):
Story.objects.filter(pk=self.post_itself.pk).update(popularity=F('popularity')+2)
super(StoryComment, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
# If user deletes his/her last comment on the story, change popularity by -2
if __class__.objects.filter(commentator=self.commentator, post_itself=self.post_itself).count() == 1:
Story.objects.filter(pk=self.post_itself.pk).update(popularity=F('popularity')-2)
super(StoryComment, self).delete(*args, **kwargs)
class CommentLike(models.Model):
class Meta:
# Model for comment likes
verbose_name = 'Comment Like'
ordering = ['user']
user = models.ForeignKey(User, on_delete=models.CASCADE)
comment = models.ForeignKey(StoryComment, on_delete=models.CASCADE)
def __str__(self):
return "{story} - {comment} - {user}".format(story=self.comment.post_itself.urlcode, comment=self.comment.pk, user=self.user.username)
def save(self, *args, **kwargs):
# If any user likes any comment of the post, change popularity of the post by +1
Story.objects.filter(pk=self.comment.post_itself.pk).update(popularity=F('popularity')+1)
super(CommentLike, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
print("dfsdfdfsdfsdf")
# If any user dislikes any comment of the post, change popularity of the post by -1
Story.objects.filter(pk=self.comment.post_itself.pk).update(popularity=F('popularity')-1)
super(CommentLike, self).delete(*args, **kwargs)
class Profile(models.Model):
class Meta:
# Model for Users' profile details
verbose_name = 'User Profile Detail'
ordering = ['user']
user = models.OneToOneField(User, on_delete=models.CASCADE, verbose_name='User')
genders = (
('Male', 'Male'),
('Female', 'Female'),
)
gender = models.CharField(max_length=10, choices=genders, blank=True, null=True, verbose_name='Gender')
birthday = models.DateField(null=True, blank=True, verbose_name='Birthday')
confirmed = models.BooleanField(default=False, verbose_name="Email Confirmed")
# TODO: Add Country info for e very user
def __str__(self):
return "User Details for: {user}".format(user=self.user.username)
class PostReport(models.Model):
class Meta:
# Model for post reports
verbose_name = 'Post Report'
ordering = ['story']
story = models.ForeignKey(Story, on_delete=models.CASCADE)
report_text = models.TextField(max_length=300, verbose_name='Report')
created = models.DateTimeField(auto_now_add=True, blank=True, null=True)
def __str__(self):
return "{urlcode} - {story_title}".format(urlcode=self.story.urlcode, story_title=self.story.title)
class Confirmation(models.Model):
class Meta:
# Model for storing e-mail and password confirmatin key
verbose_name = "Confirmation Key"
ordering = ["user"]
user = models.OneToOneField(User, on_delete=models.CASCADE)
key = models.CharField(max_length = 69, unique=True, verbose_name="Key")
def __str__(self):
return str(self.user)
def save(self, *args, **kwargs):
self.key = secure_key(self)
super(Confirmation, self).save(*args, **kwargs)
class PasswordReset(models.Model):
class Meta:
# Model for resetting account Password
verbose_name = "Password Reset Key"
ordering = ["user"]
user = models.OneToOneField(User, on_delete=models.CASCADE)
key = models.CharField(max_length=69, unique=True, verbose_name="Key")
def __str__(self):
return str(self.user)
def save(self, *args, **kwargs):
self.key = secure_key(self)
super(PasswordReset, self).save(*args, **kwargs)
| StarcoderdataPython |
3324296 | <reponame>0xflotus/CUP
#!/usr/bin/env python
# -*- coding: utf-8 -*
# Copyright: [CUP] - See LICENSE for details.
# Authors: <NAME> (@mythmgn),
"""
misc classes for internal use
"""
import os
import sys
class CAck(object):
"""
ack class
"""
def __init__(self, binit=False):
self._rev = binit
def getack_infobool(self):
"""
get bool info
"""
return self._rev
def setack_infobool(self, binit=False):
"""
set bool info
"""
self._rev = binit
def check_type(param, expect):
"""
check type of the param is as the same as expect's
:raise:
raise TypeError if it's not the same
"""
if type(param) != expect:
raise TypeError('TypeError. Expect:%s, got %s' % (expect, type(param)))
def check_not_none(param):
"""
check param is not None
:raise:
NameError if param is None
"""
if param is None:
raise NameError('The param has not been set before access')
def get_funcname(backstep=0):
"""
get funcname of the current code line
:param backstep:
will go backward (one layer) from the current function call stack
"""
# pylint: disable=W0212
return sys._getframe(
backstep + 1).f_code.co_name
def get_filename(backstep=0):
"""
Get the file name of the current code line.
:param backstep:
will go backward (one layer) from the current function call stack
"""
return os.path.basename(
sys._getframe(backstep + 1).f_code.co_filename) # pylint:disable=W0212
def get_lineno(backstep=0):
"""
Get the line number of the current code line
:param backstep:
will go backward (one layer) from the current function call stack
"""
return sys._getframe(backstep + 1).f_lineno # pylint:disable=W0212
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
| StarcoderdataPython |
20430 | <reponame>lambdamusic/wittgensteiniana
"""
Using
http://thejit.org/static/v20/Docs/files/Options/Options-Canvas-js.html#Options.Canvas
"""
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.urls import reverse
from django.shortcuts import render, redirect, get_object_or_404
import json
import os
import json
from libs.myutils.myutils import printDebug
from tractatusapp.models import *
def spacetree(request):
"""
Visualizes a space tree - ORIGINAL VIEW (USED TO GENERATE HTML VERSION)
"""
# DEFAULT JSON FOR TESTING THE APP
to_json = {
'id': "190_0",
'name': "<NAME>",
'children': [
{
'id': "306208_1",
'name': "<NAME> & <NAME>",
'data': {
'relation': "<h4>Pearl Jam & Cypress Hill</h4><b>Connections:</b><ul><h3>Pearl Jam <div>(relation: collaboration)</div></h3><h3>Cypress Hill <div>(relation: collaboration)</div></h3></ul>"
},},
{ 'id': "191_0",
'name': "<NAME>",
'children': [{
'id': "306209_1",
'name': "<NAME>",
'data': {
'relation': "<h4>Pearl Jam & Cypress Hill</h4><b>Connections:</b><ul><h3>Pearl Jam <div>(relation: collaboration)</div></h3><h3>Cypress Hill <div>(relation: collaboration)</div></h3></ul>"
},
}],
}]}
# reconstruct the tree as a nested dictionary
TESTING = False
def nav_tree(el):
d = {}
d['id'] = el.name
d['name'] = el.name
full_ogden = generate_text(el)
preview_ogden = "%s .." % ' '.join(el.textOgden().split()[:10]).replace("div", "span")
d['data'] = {'preview_ogden' : preview_ogden, 'full_ogden' : full_ogden}
if el.get_children() and not TESTING:
d['children'] = [nav_tree(x) for x in el.get_children()]
else:
d['children'] = []
return d
treeroot = {'id': "root", 'name': "TLP", 'children': [],
'data': {'preview_ogden' : "root node", 'full_ogden' : generate_text("root")}}
# level0 = TextUnit.tree.root_nodes()
# TODO - make this a mptt tree function
level0 = TextUnit.tree_top()
for x in level0:
treeroot['children'] += [nav_tree(x)]
context = {
'json': json.dumps(treeroot),
'experiment_description': """
The Space Tree Tractatus is an experimental visualization of the <br />
<a target='_blank' href="http://en.wikipedia.org/wiki/Tractatus_Logico-Philosophicus">Tractatus Logico-Philosophicus</a>, a philosophical text by <NAME>.
<br /><br />
<b>Click</b> on a node to move the tree and center that node. The text contents of the node are displayed at the bottom of the page. <b>Use the mouse wheel</b> to zoom and <b>drag and drop the canvas</b> to pan.
<br /><br />
<small>Made with <a target='_blank' href="http://www.python.org/">Python</a> and the <a target='_blank' href="http://thejit.org/">JavaScript InfoVis Toolkit</a>. More info on this <a href="http://www.michelepasin.org/blog/2012/07/08/wittgenstein-and-the-javascript-infovis-toolkit/">blog post</a></small>
"""
}
return render(request,
'tractatusapp/spacetree/spacetree.html',
context)
def generate_text(instance, expression="ogden"):
""" creates the html needed for the full text representation of the tractatus
includes the number-title, and small links to next and prev satz
# TODO: add cases for different expressions
"""
if instance == "root":
return """<div class='tnum'>Tractatus Logico-Philosophicus<span class='smalllinks'></small></div>
<div><NAME>, 1921.<br />
Translated from the German by <NAME> in 1922<br />
Original title: Logisch-Philosophische Abhandlung, <NAME> (ed.), Annalen der Naturphilosophie, 14 (1921)</div>
"""
else:
next, prev = "", ""
next_satz = instance.tractatus_next()
prev_satz = instance.tractatus_prev()
if next_satz:
next = "<a title='Next Sentence' href='javascript:focus_node(%s);'>→ %s</a>" % (next_satz.name, next_satz.name)
if prev_satz:
prev = "<a title='Previous Sentence' href='javascript:focus_node(%s);'>%s ←</a>" % (prev_satz.name, prev_satz.name)
# HACK src images rendered via JS in the template cause WGET errors
# hence they are hidden away in this visualization
# TODO find a more elegant solution
text_js_ready = instance.textOgden().replace('src="', '-src=\"src image omitted ')
t = "<div class='tnum'><span class='smalllinks'>%s</span>%s<span class='smalllinks'>%s</span></div>%s" % (prev, instance.name, next, text_js_ready)
return t
| StarcoderdataPython |
1672910 | from marshmallow import EXCLUDE, Schema, fields
from application.models import Cards, Meta, Sets
class SetsSchema(Schema):
cs_id = fields.Int()
cs_name = fields.Str()
mtgjson_code = fields.Str()
related_mtgjson_codes = fields.List(fields.Str())
class Meta:
ordered = True
set_schema = SetsSchema()
sets_schema = SetsSchema(many=True)
class PricesSchema(Schema):
price = fields.Float()
updated = fields.DateTime("%Y-%m-%d %H:%M")
class Meta:
ordered = True
class CardsSchema(Schema):
cs_id = fields.Int()
url = fields.Str()
name = fields.Str()
edition = fields.Str()
is_foil = fields.Boolean()
mtgjson_id = fields.Str()
scryfall_id = fields.Str()
collector_number = fields.Str()
mtgjson_code = fields.Str()
prices = fields.Nested(PricesSchema())
class Meta:
ordered = True
class CardsSearchSchema(Schema):
name = fields.Str()
edition = fields.Str()
is_foil = fields.Boolean()
collector_number = fields.Str()
mtgjson_code = fields.Str()
class Meta:
unknown = EXCLUDE
cardssearchschema = CardsSearchSchema()
price_schema = PricesSchema()
prices_schema = PricesSchema(many=True)
card_schema = CardsSchema()
cards_schema = CardsSchema(many=True)
class SetsWithCardsSchema(SetsSchema):
cards = fields.List(
fields.Nested(
CardsSchema(
only=(
"name",
"is_foil",
"mtgjson_id",
"scryfall_id",
"collector_number",
"prices",
)
)
)
)
class Meta:
ordered = True
set_with_cards_schema = SetsWithCardsSchema()
sets_with_cards_schema = SetsWithCardsSchema(many=True)
class CardsWithRelatedPrintingsSchema(CardsSchema):
related_printings = fields.List(fields.Nested(CardsSchema()))
class Meta:
ordered = True
card_with_related_printings_schema = CardsWithRelatedPrintingsSchema()
cards_with_related_printings_schema = CardsWithRelatedPrintingsSchema(many=True)
class MetaSchema(Schema):
last_updated = fields.DateTime("%Y-%m-%d %H:%M")
cards = fields.List(fields.Nested(CardsSchema()))
meta_schema = MetaSchema()
| StarcoderdataPython |
4843113 | import argparse
import calendar
import dotenv
import json
import libraries.api
import libraries.handle_file
import libraries.record
import logging
import logging.config
import os
import pandas as pd
import requests
import time
from csv import writer
from oauthlib.oauth2 import BackendApplicationClient, TokenExpiredError
from requests.auth import HTTPBasicAuth
from requests_oauthlib import OAuth2Session
from typing import Callable, Dict, Set, TextIO
dotenv_file = dotenv.find_dotenv()
dotenv.load_dotenv(dotenv_file)
logging.config.fileConfig('logging.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
class RecordsBuffer:
"""
A buffer of records. DO NOT INSTANTIATE THIS CLASS DIRECTLY.
Instead, instantiate one of its subclasses:
- AlmaRecordsBuffer: A buffer of records with MMS ID and OCLC number
- WorldCatRecordsBuffer: A buffer of records with OCLC number only
Attributes
----------
auth: HTTPBasicAuth
The HTTP Basic Auth object used when requesting an access token
oauth_session: OAuth2Session
The OAuth 2 Session object used to request an access token and make HTTP
requests to the WorldCat Metadata API (note that the OAuth2Session class
is a subclass of requests.Session)
Methods
-------
get_transaction_id()
Builds transaction_id to include with WorldCat Metadata API request
make_api_request(api_request, api_url)
Makes the specified API request to the WorldCat Metadata API
"""
def __init__(self) -> None:
"""Initializes a RecordsBuffer object by creating its OAuth2Session."""
logger.debug('Started RecordsBuffer constructor...')
self.contents = None
logger.debug(f'{type(self.contents)=}')
# Create OAuth2Session for WorldCat Metadata API
logger.debug('Creating OAuth2Session...')
self.auth = HTTPBasicAuth(os.environ['WORLDCAT_METADATA_API_KEY'],
os.environ['WORLDCAT_METADATA_API_SECRET'])
logger.debug(f'{type(self.auth)=}')
client = BackendApplicationClient(
client_id=os.environ['WORLDCAT_METADATA_API_KEY'],
scope=['WorldCatMetadataAPI refresh_token'])
token = {
'access_token': os.environ['WORLDCAT_METADATA_API_ACCESS_TOKEN'],
'expires_at': float(
os.environ['WORLDCAT_METADATA_API_ACCESS_TOKEN_EXPIRES_AT']),
'token_type': os.environ['WORLDCAT_METADATA_API_ACCESS_TOKEN_TYPE']
}
self.oauth_session = OAuth2Session(client=client, token=token)
logger.debug(f'{type(self.oauth_session)=}')
logger.debug('OAuth2Session created.')
logger.debug('Completed RecordsBuffer constructor.')
def __len__(self) -> int:
"""Returns the number of records in this records buffer.
Returns
-------
int
The number of records in this records buffer
Raises
------
TypeError
If the contents attribute is not defined (i.e. is None)
"""
return len(self.contents)
def get_transaction_id(self) -> str:
"""Builds transaction_id to include with WorldCat Metadata API request.
Returns
-------
str
The transaction_id
"""
transaction_id = ''
if ('OCLC_INSTITUTION_SYMBOL' in os.environ
or 'WORLDCAT_PRINCIPAL_ID' in os.environ):
# Add OCLC Institution Symbol, if present
transaction_id = os.getenv('OCLC_INSTITUTION_SYMBOL', '')
if transaction_id != '':
transaction_id += '_'
# Add timestamp and, if present, your WorldCat Principal ID
transaction_id += time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
if 'WORLDCAT_PRINCIPAL_ID' in os.environ:
transaction_id += f"_{os.getenv('WORLDCAT_PRINCIPAL_ID')}"
logger.debug(f'{transaction_id=}')
return transaction_id
def make_api_request(
self,
api_request: Callable[..., requests.models.Response],
api_url: str) -> requests.models.Response:
"""Makes the specified API request to the WorldCat Metadata API.
Parameters
----------
api_request: Callable[..., requests.models.Response]
The specific WorldCat Metadata API request to make
api_url: str
The specific WorldCat Metadata API URL to use
Returns
-------
requests.models.Response
The API response returned by the api_request function
"""
transaction_id = self.get_transaction_id()
if transaction_id != '':
api_url += f"&transactionID={transaction_id}"
headers = {"Accept": "application/json"}
response = None
# Make API request
try:
response = api_request(api_url, headers=headers)
except TokenExpiredError as e:
logger.debug(f'Access token {self.oauth_session.access_token} '
f'expired. Requesting new access token...')
datetime_format = '%Y-%m-%d %H:%M:%SZ'
# Confirm the epoch is January 1, 1970, 00:00:00 (UTC).
# See https://docs.python.org/3.8/library/time.html for an
# explanation of the term 'epoch'.
system_epoch = time.strftime(datetime_format, time.gmtime(0))
expected_epoch = '1970-01-01 00:00:00Z'
if system_epoch != expected_epoch:
logger.warning(f"The system's epoch ({system_epoch}) is not "
f"equal to the expected epoch ({expected_epoch}). There "
f"may therefore be issues in determining whether the "
f"WorldCat Metadata API's refresh token has expired.")
# Convert the WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT value
# to a float representing seconds since the epoch.
# Note that the WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT value
# is a string in ISO 8601 format, except that it substitutes the 'T'
# delimiter (which separates the date from the time) for a space, as
# in '2021-09-30 22:43:07Z'.
refresh_token_expires_at = 0.0
if 'WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT' in os.environ:
logger.debug(f'WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT '
f'variable exists in .env file, so using this value: '
f'{os.getenv("WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT")}'
f' (UTC), which will be converted to seconds since the '
f'epoch')
refresh_token_expires_at = calendar.timegm(
time.strptime(
os.getenv(
'WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT'),
datetime_format))
refresh_token_expires_in = refresh_token_expires_at - time.time()
logger.debug(f'{refresh_token_expires_at=} seconds since the epoch')
logger.debug(f'Current time: {time.time()} seconds since the epoch,'
f' which is {time.strftime(datetime_format, time.gmtime())} '
f'(UTC). So the Refresh Token (if one exists) expires in '
f'{refresh_token_expires_in} seconds.')
# Obtain a new Access Token
token = None
if ('WORLDCAT_METADATA_API_REFRESH_TOKEN' in os.environ
and refresh_token_expires_in > 25):
# Use Refresh Token to request new Access Token
token = self.oauth_session.refresh_token(
os.environ['OCLC_AUTHORIZATION_SERVER_TOKEN_URL'],
refresh_token=os.getenv(
'WORLDCAT_METADATA_API_REFRESH_TOKEN'),
auth=self.auth)
else:
# Request Refresh Token and Access Token
token = self.oauth_session.fetch_token(
os.environ['OCLC_AUTHORIZATION_SERVER_TOKEN_URL'],
auth=self.auth)
logger.debug(f"Refresh token granted ({token['refresh_token']})"
f", which expires at {token['refresh_token_expires_at']}")
# Set Refresh Token environment variables and update .env file
libraries.handle_file.set_env_var(
'WORLDCAT_METADATA_API_REFRESH_TOKEN',
token['refresh_token'])
libraries.handle_file.set_env_var(
'WORLDCAT_METADATA_API_REFRESH_TOKEN_EXPIRES_AT',
token['refresh_token_expires_at'])
logger.debug(f'{token=}')
logger.debug(f'New access token granted: '
f'{self.oauth_session.access_token}')
# Set environment variables based on new Access Token info and
# update .env file accordingly
libraries.handle_file.set_env_var(
'WORLDCAT_METADATA_API_ACCESS_TOKEN',
token['access_token'])
libraries.handle_file.set_env_var(
'WORLDCAT_METADATA_API_ACCESS_TOKEN_TYPE',
token['token_type'])
logger.debug(f"{token['expires_at']=}")
libraries.handle_file.set_env_var(
'WORLDCAT_METADATA_API_ACCESS_TOKEN_EXPIRES_AT',
str(token['expires_at']))
response = api_request(api_url, headers=headers)
libraries.api.log_response_and_raise_for_status(response)
return response
class AlmaRecordsBuffer(RecordsBuffer):
"""
A buffer of Alma records, each with an MMS ID and OCLC number.
Attributes
----------
oclc_num_dict: Dict[str, str]
A dictionary containing each record's original OCLC number (key) and its
MMS ID (value)
records_with_current_oclc_num: TextIO
The CSV file object where records with a current OCLC number are added
records_with_current_oclc_num_writer: writer
The CSV writer object for the records_with_current_oclc_num file object
records_with_old_oclc_num: TextIO
The CSV file object where records with an old OCLC number are added
records_with_old_oclc_num_writer: writer
The CSV writer object for the records_with_old_oclc_num file object
records_with_errors: TextIO
The CSV file object where records are added if an error is encountered
records_with_errors_writer: writer
The CSV writer object for the records_with_errors file object
Methods
-------
add(orig_oclc_num, mms_id)
Adds the given record to this buffer (i.e. to oclc_num_dict)
process_records(results)
Checks each record in oclc_num_dict for the current OCLC number
remove_all_records()
Removes all records from this buffer (i.e. clears oclc_num_dict)
"""
def __init__(self,
records_with_current_oclc_num: TextIO,
records_with_old_oclc_num: TextIO,
records_with_errors: TextIO) -> None:
"""Instantiates an AlmaRecordsBuffer object.
Parameters
----------
records_with_current_oclc_num: TextIO
The CSV file object where records with a current OCLC number are
added
records_with_old_oclc_num: TextIO
The CSV file object where records with an old OCLC number are added
records_with_errors: TextIO
The CSV file object where records are added if an error is
encountered
"""
logger.debug('Started AlmaRecordsBuffer constructor...')
self.oclc_num_dict = {}
logger.debug(f'{type(self.oclc_num_dict)=}')
self.records_with_current_oclc_num = records_with_current_oclc_num
self.records_with_current_oclc_num_writer = \
writer(records_with_current_oclc_num)
self.records_with_old_oclc_num = records_with_old_oclc_num
self.records_with_old_oclc_num_writer = \
writer(records_with_old_oclc_num)
self.records_with_errors = records_with_errors
self.records_with_errors_writer = writer(records_with_errors)
# Create OAuth2Session for WorldCat Metadata API
super().__init__()
self.contents = self.oclc_num_dict
logger.debug(f'{type(self.contents)=}')
logger.debug('Completed AlmaRecordsBuffer constructor.\n')
def __str__(self) -> str:
"""Returns a string listing the contents of this records buffer.
In specific, this method lists the contents of the OCLC Number
dictionary.
Returns
-------
str
The contents of the OCLC Number dictionary
"""
return (f'Records buffer contents ({{OCLC Number: MMS ID}}): '
f'{self.oclc_num_dict}')
def add(self, orig_oclc_num: str, mms_id: str) -> None:
"""Adds the given record to this buffer (i.e. to oclc_num_dict).
Parameters
----------
orig_oclc_num: str
The record's original OCLC number
mms_id: str
The record's MMS ID
Raises
------
AssertionError
If the original OCLC number is already in the OCLC Number dictionary
"""
assert orig_oclc_num not in self.oclc_num_dict, (f'OCLC number '
f'{orig_oclc_num} already exists in records buffer with MMS ID '
f'{self.oclc_num_dict[orig_oclc_num]}')
self.oclc_num_dict[orig_oclc_num] = mms_id
logger.debug(f'Added {orig_oclc_num} to records buffer.')
def process_records(self, results: Dict[str, int]) -> None:
"""Checks each record in oclc_num_dict for the current OCLC number.
This is done by making a GET request to the WorldCat Metadata API:
https://worldcat.org/bib/checkcontrolnumbers?oclcNumbers={oclcNumbers}
Parameters
----------
results: Dict[str, int]
A dictionary containing the total number of records in the following
categories: records with the current OCLC number, records with an
old OCLC number, records with errors
Raises
------
json.decoder.JSONDecodeError
If there is an error decoding the API response
"""
logger.debug('Started processing records buffer...')
api_response_error_msg = ('Problem with Get Current OCLC Number API '
'response')
# Build URL for API request
url = (f"{os.environ['WORLDCAT_METADATA_SERVICE_URL']}"
f"/bib/checkcontrolnumbers"
f"?oclcNumbers={','.join(self.oclc_num_dict.keys())}")
try:
api_response = super().make_api_request(
self.oauth_session.get,
url
)
json_response = api_response.json()
logger.debug(f'Get Current OCLC Number API response:\n'
f'{json.dumps(json_response, indent=2)}')
for record_index, record in enumerate(json_response['entry'],
start=1):
found_requested_oclc_num = record['found']
is_current_oclc_num = not record['merged']
# Look up MMS ID based on OCLC number
mms_id = self.oclc_num_dict[record['requestedOclcNumber']]
logger.debug(f'Started processing record #{record_index} (OCLC '
f'number {record["requestedOclcNumber"]})...')
logger.debug(f'{is_current_oclc_num=}')
if not found_requested_oclc_num:
logger.exception(f'{api_response_error_msg}: OCLC number '
f'{record["requestedOclcNumber"]} not found')
results['num_records_with_errors'] += 1
# Add record to
# records_with_errors_when_getting_current_oclc_number.csv
if self.records_with_errors.tell() == 0:
# Write header row
self.records_with_errors_writer.writerow([
'MMS ID',
'OCLC Number',
'Error'
])
self.records_with_errors_writer.writerow([
mms_id,
record['requestedOclcNumber'],
f'{api_response_error_msg}: OCLC number not found'
])
elif is_current_oclc_num:
results['num_records_with_current_oclc_num'] += 1
# Add record to already_has_current_oclc_number.csv
if self.records_with_current_oclc_num.tell() == 0:
# Write header row
self.records_with_current_oclc_num_writer.writerow([
'MMS ID',
'Current OCLC Number'
])
self.records_with_current_oclc_num_writer.writerow([
mms_id,
record['currentOclcNumber']
])
else:
results['num_records_with_old_oclc_num'] += 1
# Add record to needs_current_oclc_number.csv
if self.records_with_old_oclc_num.tell() == 0:
# Write header row
self.records_with_old_oclc_num_writer.writerow([
'MMS ID',
'Current OCLC Number',
'Original OCLC Number'
])
self.records_with_old_oclc_num_writer.writerow([
mms_id,
record['currentOclcNumber'],
record['requestedOclcNumber']
])
logger.debug(f'Finished processing record #{record_index}.\n')
except json.decoder.JSONDecodeError:
# except (requests.exceptions.JSONDecodeError,
# json.decoder.JSONDecodeError):
logger.exception(f'{api_response_error_msg}: Error decoding JSON')
logger.exception(f'{api_response.text=}')
# Re-raise exception so that the script is halted (since future API
# requests may result in the same error)
raise
logger.debug('Finished processing records buffer.')
def remove_all_records(self) -> None:
"""Removes all records from this buffer (i.e. clears oclc_num_dict)."""
self.oclc_num_dict.clear()
logger.debug(f'Cleared records buffer.')
logger.debug(self.__str__() + '\n')
class WorldCatRecordsBuffer(RecordsBuffer):
"""
A buffer of WorldCat records, each with an OCLC number.
Attributes
----------
oclc_num_set: Set[str]
A set containing each record's OCLC number
records_with_holding_already_set: TextIO
The CSV file object where records whose holding is already set are added
(i.e. records that were not updated)
records_with_holding_already_set_writer: writer
The CSV writer object for the records_with_holding_already_set file
object
records_with_holding_successfully_set: TextIO
The CSV file object where records whose holding was successfully set are
added (i.e. records that were successfully updated)
records_with_holding_successfully_set_writer: writer
The CSV writer object for the records_with_holding_successfully_set file
object
records_with_errors: TextIO
The CSV file object where records are added if an error is encountered
records_with_errors_writer: writer
The CSV writer object for the records_with_errors file object
Methods
-------
add(oclc_num)
Adds the given record to this buffer (i.e. to oclc_num_set)
process_records(results)
Attempts to set the institution holding for each record in oclc_num_set
remove_all_records()
Removes all records from this buffer (i.e. clears oclc_num_set)
"""
def __init__(self,
records_with_holding_already_set: TextIO,
records_with_holding_successfully_set: TextIO,
records_with_errors: TextIO) -> None:
"""Instantiates a WorldCatRecordsBuffer object.
Parameters
----------
records_with_holding_already_set: TextIO
The CSV file object where records whose holding is already set are
added (i.e. records that were not updated)
records_with_holding_successfully_set: TextIO
The CSV file object where records whose holding was successfully set
are added (i.e. records that were successfully updated)
records_with_errors: TextIO
The CSV file object where records are added if an error is
encountered
"""
logger.debug('Started WorldCatRecordsBuffer constructor...')
self.oclc_num_set = set()
logger.debug(f'{type(self.oclc_num_set)=}')
self.records_with_holding_already_set = records_with_holding_already_set
self.records_with_holding_already_set_writer = \
writer(records_with_holding_already_set)
self.records_with_holding_successfully_set = \
records_with_holding_successfully_set
self.records_with_holding_successfully_set_writer = \
writer(records_with_holding_successfully_set)
self.records_with_errors = records_with_errors
self.records_with_errors_writer = writer(records_with_errors)
# Create OAuth2Session for WorldCat Metadata API
super().__init__()
self.contents = self.oclc_num_set
logger.debug(f'{type(self.contents)=}')
logger.debug('Completed WorldCatRecordsBuffer constructor.\n')
def __str__(self) -> str:
"""Returns a string listing the contents of this records buffer.
In specific, this method lists the contents of the OCLC Number set.
Returns
-------
str
The contents of the OCLC Number set
"""
return (f'Records buffer contents (OCLC Numbers): {self.oclc_num_set}')
def add(self, oclc_num: str) -> None:
"""Adds the given record to this buffer (i.e. to oclc_num_set).
Parameters
----------
oclc_num: str
The record's OCLC number
Raises
------
AssertionError
If the OCLC number is already in the OCLC Number set
"""
assert oclc_num not in self.oclc_num_set, (f'OCLC number {oclc_num} '
f'already exists in records buffer')
self.oclc_num_set.add(oclc_num)
logger.debug(f'Added {oclc_num} to records buffer.')
def process_records(self, results: Dict[str, int]) -> None:
"""Attempts to set the holding for each record in oclc_num_set.
This is done by making a POST request to the WorldCat Metadata API:
https://worldcat.org/ih/datalist?oclcNumbers={oclcNumbers}
Parameters
----------
results: Dict[str, int]
A dictionary containing the total number of records in the following
categories: records successfully set, records already set, records
with errors
Raises
------
json.decoder.JSONDecodeError
If there is an error decoding the API response
"""
logger.debug('Started processing records buffer...')
api_response_error_msg = ('Problem with Set Holding API response')
# Build URL for API request
url = (f"{os.environ['WORLDCAT_METADATA_SERVICE_URL']}"
f"/ih/datalist?oclcNumbers={','.join(self.oclc_num_set)}")
try:
api_response = super().make_api_request(
self.oauth_session.post,
url
)
json_response = api_response.json()
logger.debug(f'Set Holding API response:\n'
f'{json.dumps(json_response, indent=2)}')
for record_index, record in enumerate(json_response['entry'],
start=1):
is_current_oclc_num = (record['requestedOclcNumber']
== record['currentOclcNumber'])
new_oclc_num = ''
oclc_num_msg = ''
if not is_current_oclc_num:
new_oclc_num = record['currentOclcNumber']
oclc_num_msg = (f'OCLC number '
f'{record["requestedOclcNumber"]} has been updated to '
f'{new_oclc_num}. Consider updating Alma record.')
logger.warning(oclc_num_msg)
oclc_num_msg = f'Warning: {oclc_num_msg}'
logger.debug(f'Started processing record #{record_index} (OCLC '
f'number {record["requestedOclcNumber"]})...')
logger.debug(f'{is_current_oclc_num=}')
logger.debug(f'{record["httpStatusCode"]=}')
logger.debug(f'{record["errorDetail"]=}')
if record['httpStatusCode'] == 'HTTP 200 OK':
results['num_records_successfully_set'] += 1
# Add record to records_with_holding_successfully_set.csv
if self.records_with_holding_successfully_set.tell() == 0:
# Write header row
self.records_with_holding_successfully_set_writer.writerow([
'Requested OCLC Number',
'New OCLC Number (if applicable)',
'Warning'
])
self.records_with_holding_successfully_set_writer.writerow([
record['requestedOclcNumber'],
new_oclc_num,
oclc_num_msg
])
elif record['httpStatusCode'] == 'HTTP 409 Conflict':
results['num_records_already_set'] += 1
# Add record to records_with_holding_already_set.csv
if self.records_with_holding_already_set.tell() == 0:
# Write header row
self.records_with_holding_already_set_writer.writerow([
'Requested OCLC Number',
'New OCLC Number (if applicable)',
'Error'
])
self.records_with_holding_already_set_writer.writerow([
record['requestedOclcNumber'],
new_oclc_num,
(f"{api_response_error_msg}: {record['errorDetail']}. "
f"{oclc_num_msg}")
])
else:
logger.exception(f"{api_response_error_msg} for OCLC "
f"Number {record['requestedOclcNumber']}: "
f"{record['errorDetail']} ({record['httpStatusCode']})."
)
results['num_records_with_errors'] += 1
# Add record to records_with_errors_when_setting_holding.csv
if self.records_with_errors.tell() == 0:
# Write header row
self.records_with_errors_writer.writerow([
'Requested OCLC Number',
'New OCLC Number (if applicable)',
'Error'
])
self.records_with_errors_writer.writerow([
record['requestedOclcNumber'],
new_oclc_num,
(f"{api_response_error_msg}: {record['httpStatusCode']}"
f": {record['errorDetail']}. {oclc_num_msg}")
])
logger.debug(f'Finished processing record #{record_index}.\n')
except json.decoder.JSONDecodeError:
# except (requests.exceptions.JSONDecodeError,
# json.decoder.JSONDecodeError):
logger.exception(f'{api_response_error_msg}: Error decoding JSON')
logger.exception(f'{api_response.text=}')
# Re-raise exception so that the script is halted (since future API
# requests may result in the same error)
raise
logger.debug('Finished processing records buffer.')
def remove_all_records(self) -> None:
"""Removes all records from this buffer (i.e. clears oclc_num_set)."""
self.oclc_num_set.clear()
logger.debug(f'Cleared records buffer.')
logger.debug(self.__str__() + '\n')
def init_argparse() -> argparse.ArgumentParser:
"""Initializes and returns ArgumentParser object."""
parser = argparse.ArgumentParser(
usage=('%(prog)s [-h] [-v] --input_file INPUT_FILE --operation '
'{get_current_oclc_number, set_holding}'),
description=('For each row in the input file, perform the specified '
'operation.')
)
parser.add_argument(
'-v', '--version', action='version',
version=f'{parser.prog} version 1.0.0'
)
parser.add_argument(
'--input_file',
required=True,
type=str,
help=('the name and path of the file to be processed, which must be in '
'CSV format (e.g. '
'csv/master_list_records_with_potentially_old_oclc_num.csv)')
)
parser.add_argument(
'--operation',
required=True,
choices=['get_current_oclc_number', 'set_holding'],
help=('the operation to be performed on each row of the input file '
'(either get_current_oclc_number or set_holding)')
)
return parser
def main() -> None:
"""Performs the specified operation on every record in the input file.
Gathers the maximum OCLC numbers possible before sending the appropriate
request to the WorldCat Metadata API.
Operations:
- get_current_oclc_number
For each row, check whether the given OCLC number is the current one:
-- If so, then add the record to csv/already_has_current_oclc_number.csv
-- If not, then add the record to csv/needs_current_oclc_number.csv
-- If an error is encountered, then add the record to
csv/records_with_errors_when_getting_current_oclc_number.csv
- set_holding
For each row, set holding for the given OCLC number
-- If holding is set successfully, then add the record to
csv/records_with_holding_successfully_set.csv
-- If holding was already set, then add the record to
csv/records_with_holding_already_set.csv
-- If an error is encountered, then add the record to
csv/records_with_errors_when_setting_holding.csv
"""
# Initialize parser and parse command-line args
parser = init_argparse()
args = parser.parse_args()
# Convert input file into pandas DataFrame
data = None
if args.input_file.endswith('.csv'):
data = pd.read_csv(args.input_file, dtype='str', keep_default_na=False)
else:
logger.exception(f'Invalid format for input file ({args.input_file}). '
f'Must be a CSV file (.csv)')
return
records_already_processed = set()
logger.debug(f'{records_already_processed=}\n')
logger.debug(f'{args.operation=}')
results = None
filename_for_records_to_update = None
filename_for_records_with_no_update_needed = None
filename_for_records_with_errors = None
if args.operation == 'get_current_oclc_number':
results = {
'num_records_with_current_oclc_num': 0,
'num_records_with_old_oclc_num': 0,
'num_records_with_errors': 0
}
filename_for_records_to_update = 'csv/needs_current_oclc_number.csv'
filename_for_records_with_no_update_needed = \
'csv/already_has_current_oclc_number.csv'
filename_for_records_with_errors = \
'csv/records_with_errors_when_getting_current_oclc_number.csv'
else:
results = {
'num_records_successfully_set': 0,
'num_records_already_set': 0,
'num_records_with_errors': 0
}
filename_for_records_to_update = \
'csv/records_with_holding_successfully_set.csv'
filename_for_records_with_no_update_needed = \
'csv/records_with_holding_already_set.csv'
filename_for_records_with_errors = \
'csv/records_with_errors_when_setting_holding.csv'
with open(filename_for_records_to_update, mode='a',
newline='') as records_to_update, \
open(filename_for_records_with_no_update_needed, mode='a',
newline='') as records_with_no_update_needed, \
open(filename_for_records_with_errors, mode='a',
newline='') as records_with_errors:
records_with_errors_writer = writer(records_with_errors)
records_buffer = None
if args.operation == 'get_current_oclc_number':
records_buffer = AlmaRecordsBuffer(
records_with_no_update_needed,
records_to_update,
records_with_errors
)
else:
records_buffer = WorldCatRecordsBuffer(
records_with_no_update_needed,
records_to_update,
records_with_errors
)
logger.debug(f'{type(records_buffer)=}')
logger.debug(records_buffer)
logger.debug(f'{type(records_buffer.contents)=}')
logger.debug(f'{len(records_buffer)=}\n')
# Loop over each row in DataFrame and check whether OCLC number is the
# current one
for index, row in data.iterrows():
logger.debug(f'Started processing row {index + 2} of input file...')
error_occurred = False
error_msg = None
try:
mms_id = None
orig_oclc_num = None
if args.operation == 'get_current_oclc_number':
mms_id = row['MMS ID']
orig_oclc_num = \
row["Unique OCLC Number from Alma Record's 035 $a"]
mms_id = libraries.record.get_valid_record_identifier(
mms_id,
'MMS ID'
)
else:
orig_oclc_num = row['OCLC Number']
# Make sure OCLC Number is valid
orig_oclc_num = libraries.record.get_valid_record_identifier(
orig_oclc_num, 'OCLC number')
orig_oclc_num = \
libraries.record.remove_leading_zeros(orig_oclc_num)
if args.operation == 'get_current_oclc_number':
assert mms_id not in records_already_processed, (f'Record '
f'with MMS ID {mms_id} has already been processed.')
records_already_processed.add(mms_id)
else:
assert orig_oclc_num not in records_already_processed, (
f'Record with OCLC Number {orig_oclc_num} has already '
f'been processed.')
records_already_processed.add(orig_oclc_num)
if len(records_buffer) < int(os.environ[
'WORLDCAT_METADATA_API_MAX_RECORDS_PER_REQUEST']):
if args.operation == 'get_current_oclc_number':
records_buffer.add(orig_oclc_num, mms_id)
else:
records_buffer.add(orig_oclc_num)
else:
# records_buffer has the maximum records possible per API
# request, so process these records
logger.debug('Records buffer is full.\n')
records_buffer.process_records(results)
# Now that its records have been processed, clear buffer
records_buffer.remove_all_records()
# Add current row's data to the empty buffer
if args.operation == 'get_current_oclc_number':
records_buffer.add(orig_oclc_num, mms_id)
else:
records_buffer.add(orig_oclc_num)
except AssertionError as assert_err:
if args.operation == 'get_current_oclc_number':
logger.exception(f"An assertion error occurred when "
f"processing MMS ID '{row['MMS ID']}' (at row "
f"{index + 2} of input file): {assert_err}")
else:
logger.exception(f"An assertion error occurred when "
f"processing OCLC Number '{row['OCLC Number']}' (at "
f"row {index + 2} of input file): {assert_err}")
error_msg = f"Assertion Error: {assert_err}"
error_occurred = True
finally:
if error_occurred:
results['num_records_with_errors'] += 1
# Add record to records_with_errors spreadsheet
if args.operation == 'get_current_oclc_number':
if records_with_errors.tell() == 0:
# Write header row
records_with_errors_writer.writerow([
'MMS ID',
'OCLC Number',
'Error'
])
records_with_errors_writer.writerow([
mms_id,
orig_oclc_num,
error_msg
])
else:
if records_with_errors.tell() == 0:
# Write header row
records_with_errors_writer.writerow([
'Requested OCLC Number',
'New OCLC Number (if applicable)',
'Error'
])
records_with_errors_writer.writerow([
orig_oclc_num,
'',
error_msg
])
logger.debug(f'Finished processing row {index + 2} of input '
f'file.\n')
# If records_buffer is not empty, process remaining records
if len(records_buffer) > 0:
records_buffer.process_records(results)
# logger.debug(f'{records_already_processed=}\n')
logger.debug(f'{len(records_already_processed)=}\n')
print(f'\nEnd of script. Processed {len(data.index)} rows from input file:')
if args.operation == 'get_current_oclc_number':
print(f'- {results["num_records_with_current_oclc_num"]} record(s) '
f'with current OCLC number\n'
f'- {results["num_records_with_old_oclc_num"]} record(s) with '
f'old OCLC number\n'
f'- {results["num_records_with_errors"]} record(s) with errors')
else:
print(f'- {results["num_records_successfully_set"]} record(s) updated, '
f'i.e. holding was successfully set\n'
f'- {results["num_records_already_set"]} record(s) not updated '
f'because holding was already set\n'
f'- {results["num_records_with_errors"]} record(s) with errors')
if __name__ == "__main__":
main()
| StarcoderdataPython |
4824999 | <reponame>akashkj/commcare-hq
import json
from datetime import date
from io import BytesIO
from django.http import (
Http404,
HttpResponse,
HttpResponseBadRequest,
HttpResponseServerError,
JsonResponse,
)
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy, ugettext_noop
from django.views.generic import View
from django.views.decorators.http import require_GET, require_POST
from memoized import memoized
from corehq.apps.export.dbaccessors import get_properly_wrapped_export_instance
from corehq.apps.export.det.exceptions import DETConfigError
from corehq.apps.export.det.schema_generator import generate_from_export_instance
from dimagi.utils.logging import notify_exception
from dimagi.utils.web import json_response
from soil import DownloadBase
from soil.exceptions import TaskFailedError
from soil.util import get_download_context, process_email_request
from corehq.apps.analytics.tasks import (
HUBSPOT_DOWNLOADED_EXPORT_FORM_ID,
send_hubspot_form,
track_workflow,
)
from corehq.apps.domain.decorators import login_and_domain_required
from corehq.apps.domain.models import Domain
from corehq.apps.export.const import MAX_EXPORTABLE_ROWS
from corehq.apps.export.exceptions import (
ExportAsyncException,
ExportFormValidationException,
)
from corehq.apps.export.export import (
get_export_download,
get_export_query,
get_export_size,
)
from corehq.apps.export.forms import (
EmwfFilterFormExport,
FilterCaseESExportDownloadForm,
FilterSmsESExportDownloadForm,
)
from corehq.apps.export.models import FormExportInstance
from corehq.apps.export.models.new import EmailExportWhenDoneRequest
from corehq.apps.export.utils import get_export
from corehq.apps.export.views.utils import (
ExportsPermissionsManager,
get_timezone,
)
from corehq.apps.hqwebapp.decorators import use_daterangepicker
from corehq.apps.hqwebapp.widgets import DateRangePickerWidget
from corehq.apps.locations.permissions import location_safe
from corehq.apps.reports.analytics.esaccessors import media_export_is_too_big
from corehq.apps.reports.filters.case_list import CaseListFilter
from corehq.apps.reports.filters.users import ExpandedMobileWorkerFilter
from corehq.apps.reports.models import HQUserType
from corehq.apps.reports.tasks import build_form_multimedia_zipfile
from corehq.apps.reports.util import datespan_from_beginning
from corehq.apps.settings.views import BaseProjectDataView
from corehq.apps.users.models import CouchUser
from corehq.toggles import PAGINATED_EXPORTS
class DownloadExportViewHelper(object):
'''
Encapsulates behavior that varies based on model (form, case, or sms)
and is needed by the function-based views in this module.
'''
@classmethod
def get(self, request, domain, form_or_case, is_sms):
model = form_or_case if form_or_case else 'sms'
if model == 'form':
return FormDownloadExportViewHelper(request, domain)
elif model == 'case':
return CaseDownloadExportViewHelper(request, domain)
elif model == 'sms':
return SMSDownloadExportViewHelper(request, domain)
else:
raise ValueError("Unrecognized model type")
def __init__(self, request, domain):
super(DownloadExportViewHelper, self).__init__()
self.request = request
self.domain = domain
def get_export(self, id):
raise NotImplementedError()
def send_preparation_analytics(self, export_instances, export_filters):
send_hubspot_form(HUBSPOT_DOWNLOADED_EXPORT_FORM_ID, self.request)
track_workflow(self.request.couch_user.username, 'Downloaded {} Exports With {}Data'.format(
self.model[0].upper() + self.model[1:],
'' if any(get_export_size(instance, export_filters) > 0 for instance in export_instances) else 'No ',
))
def get_filter_form(self, filter_form_data):
domain_object = Domain.get_by_name(self.domain)
timezone = get_timezone(self.domain, self.request.couch_user)
filter_form = self.filter_form_class(domain_object, timezone, filter_form_data)
if not filter_form.is_valid():
raise ExportFormValidationException
return filter_form
class FormDownloadExportViewHelper(DownloadExportViewHelper):
model = 'form'
filter_form_class = EmwfFilterFormExport
def get_export(self, export_id=None):
return get_export(self.model, self.domain, export_id)
class CaseDownloadExportViewHelper(DownloadExportViewHelper):
model = 'case'
filter_form_class = FilterCaseESExportDownloadForm
def get_export(self, export_id=None):
return get_export(self.model, self.domain, export_id)
class SMSDownloadExportViewHelper(DownloadExportViewHelper):
model = 'sms'
filter_form_class = FilterSmsESExportDownloadForm
def get_export(self, export_id=None):
return get_export(self.model, self.domain, export_id, self.request.couch_user.username)
class BaseDownloadExportView(BaseProjectDataView):
template_name = 'export/download_export.html'
http_method_names = ['get', 'post']
show_date_range = False
check_for_multimedia = False
sms_export = False
# To serve filters for export from mobile_user_and_group_slugs
export_filter_class = None
@use_daterangepicker
@method_decorator(login_and_domain_required)
def dispatch(self, request, *args, **kwargs):
self.permissions = ExportsPermissionsManager(self.form_or_case, request.domain, request.couch_user)
self.permissions.access_download_export_or_404()
return super(BaseDownloadExportView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if not request.is_ajax():
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
return super(BaseDownloadExportView, self).post(request, *args, **kwargs)
@property
@memoized
def view_helper(self):
return DownloadExportViewHelper.get(self.request, self.domain, self.form_or_case, self.sms_export)
@property
@memoized
def timezone(self):
return get_timezone(self.domain, self.request.couch_user)
@property
@memoized
def default_datespan(self):
return datespan_from_beginning(self.domain_object, self.timezone)
@property
def page_context(self):
context = {
'download_export_form': self.download_export_form,
'export_list': self.export_list,
'form_or_case': self.form_or_case,
'max_column_size': self.max_column_size,
'show_date_range': self.show_date_range,
'check_for_multimedia': self.check_for_multimedia,
'sms_export': self.sms_export,
'user_types': HQUserType.human_readable
}
if (
self.default_datespan.startdate is not None
and self.default_datespan.enddate is not None
):
context.update({
'default_date_range': '{startdate}{separator}{enddate}'.format(
startdate=self.default_datespan.startdate.strftime('%Y-%m-%d'),
enddate=self.default_datespan.enddate.strftime('%Y-%m-%d'),
separator=DateRangePickerWidget.separator,
),
})
else:
context.update({
'default_date_range': _(
"You have no submissions in this project."
),
'show_no_submissions_warning': True,
})
if self.export_filter_class:
context['dynamic_filters'] = self.export_filter_class(
self.request, self.request.domain
).render()
return context
@property
@memoized
def download_export_form(self):
return self.view_helper.filter_form_class(self.domain_object, timezone=self.timezone)
@property
def export_id(self):
return self.kwargs.get('export_id')
@property
def page_url(self):
if self.export_id:
return reverse(self.urlname, args=(self.domain, self.export_id))
return reverse(self.urlname, args=(self.domain,))
@property
def export_list(self):
exports = []
if (
self.request.method == 'POST'
and 'export_list' in self.request.POST
and not self.request.is_ajax()
):
raw_export_list = json.loads(self.request.POST['export_list'])
exports = [self.view_helper.get_export(e['id']) for e in raw_export_list]
elif self.export_id or self.sms_export:
exports = [self.view_helper.get_export(self.export_id)]
if not self.permissions.has_view_permissions:
if self.permissions.has_deid_view_permissions:
exports = [x for x in exports if x.is_safe]
else:
raise Http404()
# if there are no exports, this page doesn't exist
if not exports:
raise Http404()
exports = [self.download_export_form.format_export_data(e) for e in exports]
return exports
@property
def max_column_size(self):
try:
return int(self.request.GET.get('max_column_size', 2000))
except TypeError:
return 2000
def _check_export_size(domain, export_instances, export_filters):
count = 0
for instance in export_instances:
count += get_export_size(instance, export_filters)
if count > MAX_EXPORTABLE_ROWS and not PAGINATED_EXPORTS.enabled(domain):
raise ExportAsyncException(
_("This export contains %(row_count)s rows. Please change the "
"filters to be less than %(max_rows)s rows.") % {
'row_count': count,
'max_rows': MAX_EXPORTABLE_ROWS
}
)
def _check_deid_permissions(permissions, export_instances):
if not permissions.has_deid_view_permissions:
for instance in export_instances:
if instance.is_deidentified:
raise ExportAsyncException(
_("You do not have permission to export de-identified exports.")
)
@require_POST
@login_and_domain_required
@location_safe
def prepare_custom_export(request, domain):
"""Uses the current exports download framework (with some nasty filters)
to return the current download id to POLL for the download status.
:return: {
'success': True,
'download_id': '<some uuid>',
}
"""
form_or_case = request.POST.get('form_or_case')
sms_export = json.loads(request.POST.get('sms_export'))
permissions = ExportsPermissionsManager(form_or_case, domain, request.couch_user)
permissions.access_download_export_or_404()
view_helper = DownloadExportViewHelper.get(request, domain, form_or_case, sms_export)
filter_form_data = json.loads(request.POST.get('form_data'))
try:
filter_form = view_helper.get_filter_form(filter_form_data)
except ExportFormValidationException:
return json_response({
'error': _("Form did not validate."),
})
export_filters = filter_form.get_export_filters(request, filter_form_data)
export_es_filters = [f.to_es_filter() for f in export_filters]
export_specs = json.loads(request.POST.get('exports'))
export_ids = [spec['export_id'] for spec in export_specs]
export_instances = [view_helper.get_export(export_id) for export_id in export_ids]
try:
_check_deid_permissions(permissions, export_instances)
_check_export_size(domain, export_instances, export_filters)
except ExportAsyncException as e:
return json_response({
'error': str(e),
})
# Generate filename
if len(export_instances) > 1:
filename = "{}_custom_bulk_export_{}".format(domain, date.today().isoformat())
else:
filename = "{} {}".format(export_instances[0].name, date.today().isoformat())
download = get_export_download(
domain,
export_ids,
view_helper.model,
request.couch_user.username,
es_filters=export_es_filters,
owner_id=request.couch_user.get_id,
filename=filename,
)
view_helper.send_preparation_analytics(export_instances, export_filters)
return json_response({
'success': True,
'download_id': download.download_id,
})
@require_GET
@login_and_domain_required
@location_safe
def poll_custom_export_download(request, domain):
"""Polls celery to see how the export download task is going.
:return: final response: {
'success': True,
'dropbox_url': '<url>',
'download_url: '<url>',
<task info>
}
"""
form_or_case = request.GET.get('form_or_case')
permissions = ExportsPermissionsManager(form_or_case, domain, request.couch_user)
permissions.access_download_export_or_404()
download_id = request.GET.get('download_id')
try:
context = get_download_context(download_id)
except TaskFailedError as e:
if e.exception_name == 'XlsLengthException':
return JsonResponse({
'error': _(
'This file has more than 256 columns, which is not supported by xls. '
'Please change the output type to csv or xlsx to export this file.')
})
else:
notify_exception(
request, "Export download failed",
details={'download_id': download_id, 'errors': e.errors,
'exception_name': e.exception_name})
return JsonResponse({
'error': _("Download task failed to start."),
})
if context.get('is_ready', False):
context.update({
'dropbox_url': reverse('dropbox_upload', args=(download_id,)),
'download_url': "{}?get_file".format(
reverse('retrieve_download', args=(download_id,))
),
})
context['is_poll_successful'] = True
return json_response(context)
@location_safe
class DownloadNewFormExportView(BaseDownloadExportView):
urlname = 'new_export_download_forms'
export_filter_class = ExpandedMobileWorkerFilter
show_date_range = True
page_title = ugettext_noop("Download Form Data Export")
check_for_multimedia = True
form_or_case = 'form'
@property
def parent_pages(self):
from corehq.apps.export.views.list import FormExportListView, DeIdFormExportListView
if not (self.permissions.has_edit_permissions and self.permissions.has_view_permissions):
return [{
'title': DeIdFormExportListView.page_title,
'url': reverse(DeIdFormExportListView.urlname, args=(self.domain,)),
}]
return [{
'title': FormExportListView.page_title,
'url': reverse(FormExportListView.urlname, args=(self.domain,)),
}]
@require_POST
@login_and_domain_required
def prepare_form_multimedia(request, domain):
"""Gets the download_id for the multimedia zip and sends it to the
exportDownloadService in download_export.ng.js to begin polling for the
zip file download.
"""
form_or_case = request.POST.get('form_or_case')
sms_export = json.loads(request.POST.get('sms_export'))
permissions = ExportsPermissionsManager(form_or_case, domain, request.couch_user)
permissions.access_download_export_or_404()
view_helper = DownloadExportViewHelper.get(request, domain, form_or_case, sms_export)
filter_form_data = json.loads(request.POST.get('form_data'))
export_specs = json.loads(request.POST.get('exports'))
try:
filter_form = view_helper.get_filter_form(filter_form_data)
except ExportFormValidationException:
return json_response({
'error': _("Please check that you've submitted all required filters."),
})
export = view_helper.get_export(export_specs[0]['export_id'])
filters = filter_form.get_export_filters(request, filter_form_data)
export_es_query = get_export_query(export, filters)
if media_export_is_too_big(export_es_query):
return json_response({
'success': False,
'error': _("This is too many files to export at once. "
"Please modify your filters to select fewer forms."),
})
download = DownloadBase()
download.set_task(build_form_multimedia_zipfile.delay(
domain=domain,
export_id=export.get_id,
es_filters=filters,
download_id=download.download_id,
owner_id=request.couch_user.get_id,
))
return json_response({
'success': True,
'download_id': download.download_id,
})
@require_GET
@location_safe
@login_and_domain_required
def has_multimedia(request, domain):
"""Checks to see if this form export has multimedia available to export
"""
form_or_case = request.GET.get('form_or_case')
if form_or_case != 'form':
raise ValueError("has_multimedia is only available for form exports")
permissions = ExportsPermissionsManager(form_or_case, domain, request.couch_user)
permissions.access_download_export_or_404()
export_object = FormExportInstance.get(request.GET.get('export_id'))
return json_response({
'success': True,
'hasMultimedia': export_object.has_multimedia,
})
@location_safe
class DownloadNewCaseExportView(BaseDownloadExportView):
urlname = 'new_export_download_cases'
export_filter_class = CaseListFilter
page_title = ugettext_noop("Download Case Data Export")
form_or_case = 'case'
@property
def parent_pages(self):
from corehq.apps.export.views.list import CaseExportListView
return [{
'title': CaseExportListView.page_title,
'url': reverse(CaseExportListView.urlname, args=(self.domain,)),
}]
class DownloadNewSmsExportView(BaseDownloadExportView):
urlname = 'new_export_download_sms'
page_title = ugettext_noop("Export SMS Messages")
form_or_case = None
export_id = None
sms_export = True
@property
def parent_pages(self):
return []
class BulkDownloadNewFormExportView(DownloadNewFormExportView):
urlname = 'new_bulk_download_forms'
page_title = ugettext_noop("Download Form Data Exports")
export_filter_class = ExpandedMobileWorkerFilter
check_for_multimedia = False
@login_and_domain_required
@require_POST
def add_export_email_request(request, domain):
download_id = request.POST.get('download_id')
user_id = request.couch_user.user_id
if download_id is None or user_id is None:
return HttpResponseBadRequest(ugettext_lazy('Download ID or User ID blank/not provided'))
try:
download_context = get_download_context(download_id)
except TaskFailedError:
return HttpResponseServerError(ugettext_lazy('Export failed'))
if download_context.get('is_ready', False):
try:
couch_user = CouchUser.get_by_user_id(user_id, domain=domain)
except CouchUser.AccountTypeError:
return HttpResponseBadRequest(ugettext_lazy('Invalid user'))
if couch_user is not None:
process_email_request(domain, download_id, couch_user.get_email())
else:
EmailExportWhenDoneRequest.objects.create(domain=domain, download_id=download_id, user_id=user_id)
return HttpResponse(ugettext_lazy('Export e-mail request sent.'))
@method_decorator(login_and_domain_required, name='dispatch')
class DownloadDETSchemaView(View):
urlname = 'download-det-schema'
def get(self, request, domain, export_instance_id):
export_instance = get_properly_wrapped_export_instance(export_instance_id)
assert domain == export_instance.domain
output_file = BytesIO()
try:
generate_from_export_instance(export_instance, output_file)
except DETConfigError as e:
return HttpResponse(_('Sorry, something went wrong creating that file: {error}').format(error=e))
output_file.seek(0)
response = HttpResponse(
output_file,
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
)
response['Content-Disposition'] = f'attachment; filename="{export_instance.name}-DET.xlsx"'
return response
| StarcoderdataPython |
3374124 | <gh_stars>0
#!/usr/bin/env python3
''' EC² TTLDM-35 - TTL Military Logic Delay Module '''
from Chipdesc.chip import Chip
class TTLDM35(Chip):
''' EC² TTLDM-35 - TTL Military Logic Delay Module '''
symbol_name = "DLY_35"
checked = "MEM32 0029"
symbol = '''
+--------+
| |
| xnn |12
| 7NS+-->
| |4
| 14NS+-->
1| |10
-->+IN 21NS+-->
| |6
| 28NS+-->
| |8
| 35NS+-->
| |
| _ |
+--------+
'''
if __name__ == "__main__":
TTLDM35(__file__).main()
| StarcoderdataPython |
4821572 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import uuid
from decimal import Decimal
from django.core.exceptions import ValidationError
from django.contrib.postgres.fields import JSONField
from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from djmoney.models.fields import MoneyField
from djmoney.forms.widgets import CURRENCY_CHOICES
from ecommerce.utils import get_country_code, get_country_province_code
def validate_tax_rate(value):
if value >= 1 or value < -1:
raise ValidationError(
_('The value `%(value)s` is not a valid tax rate. Please pick between 0.99 to 0'),
params={'value': value},
)
class StoreManager(models.Manager):
@cached_property
def default_store(self):
return Store.objects.all().first()
def delete_all(self):
items = Store.objects.all()
for item in items.all():
item.delete()
class Store(models.Model):
"""
Model to represent a single store and the business details (ex: tax rate)
associate with it.
"""
class Meta:
app_label = 'foundation'
db_table = 'mika_stores'
verbose_name = _('Store')
verbose_name_plural = _('Stores')
default_permissions = ()
permissions = (
# ("can_get_opening_hours_specifications", "Can get opening hours specifications"),
# ("can_get_opening_hours_specification", "Can get opening hours specifications"),
# ("can_post_opening_hours_specification", "Can create opening hours specifications"),
# ("can_put_opening_hours_specification", "Can update opening hours specifications"),
# ("can_delete_opening_hours_specification", "Can delete opening hours specifications"),
)
objects = StoreManager()
name = models.CharField(
_("Name"),
max_length=31,
help_text=_('The official name of this store.'),
blank=False,
null=False,
)
currency = models.CharField(
_("Currency"),
max_length=3,
help_text=_('The currency used by this store formatted in <a href="https://en.wikipedia.org/wiki/ISO_4217">ISO 4217</a> formatting.'),
default="CAD",
blank=True,
null=False,
choices=CURRENCY_CHOICES
)
timezone_name = models.CharField(
_("Timezone Name"),
max_length=63,
help_text=_('The timezone for this store.'),
default="America/Toronto",
blank=True,
null=False
)
tax_rates = JSONField(
_("Tax"),
help_text=_('The dictionary of tax rates to be looked up and applied on our e-commerce purchases.'),
blank=True,
null=False,
)
referrer_credit = MoneyField(
_("Referrer Credit"),
help_text=_('The credit amount that will be granted to device purchases for users whom referred new users to our store.'),
max_digits=14,
decimal_places=2,
default_currency='CAD',
blank=True,
null=True,
)
referee_credit = MoneyField(
_("Referee Credit"),
help_text=_('The credit amount that will be granted to device purchases for users whom where referred to our store by existing users.'),
max_digits=14,
decimal_places=2,
default_currency='CAD',
blank=True,
null=True,
)
def __str__(self):
return str(self.name)
def get_tax_rate(self, country_name, province_name):
country_code = get_country_code(country_name)
province_code = get_country_province_code(country_name, province_name)
try:
return Decimal(self.tax_rates[country_code][province_code])
except KeyError:
return None
| StarcoderdataPython |
1682340 | # This example requires the micropython_dotstar library
# https://github.com/mattytrentini/micropython-dotstar
from machine import SPI, Pin
import tinypico as TinyPICO
from micropython_dotstar import DotStar
import time, random, micropython
# Configure SPI for controlling the DotStar
# Internally we are using software SPI for this as the pins being used are not hardware SPI pins
spi = SPI(sck=Pin( TinyPICO.DOTSTAR_CLK ), mosi=Pin( TinyPICO.DOTSTAR_DATA ), miso=Pin( TinyPICO.SPI_MISO) )
# Create a DotStar instance
dotstar = DotStar(spi, 1, brightness = 0.5 ) # Just one DotStar, half brightness
# Turn on the power to the DotStar
TinyPICO.set_dotstar_power( True )
# Say hello
print("\nHello from TinyPICO!")
print("--------------------\n")
# Show some info on boot
print("Battery Voltage is {}V".format( TinyPICO.get_battery_voltage() ) )
print("Battery Charge State is {}\n".format( TinyPICO.get_battery_charging() ) )
# Show available memory
print("Memory Info - micropython.mem_info()")
print("------------------------------------")
micropython.mem_info()
# Read the data every 15 seconds
update_interval = 5
# Make sure it fires immediately by starting it in the past
update_temp_time = time.time() - 10
def print_temp():
global update_interval
global update_temp_time
# We only run the contents of this function every 5 seconds
if update_temp_time < time.time():
update_temp_time = time.time() + update_interval
# Grab the temperates and print them
print("\nInternal PICO-D4 Temp: {}°F {:.2f}°C".format( TinyPICO.get_internal_temp_F(), TinyPICO.get_internal_temp_C() ) )
# Create a colour wheel index int
color_index = 0
# Rainbow colours on the Dotstar
while True:
# Get the R,G,B values of the next colour
r,g,b = TinyPICO.dotstar_color_wheel( color_index )
# Set the colour on the dotstar
dotstar[0] = ( r, g, b, 0.5)
# Increase the wheel index
color_index += 1
# Sleep for 20ms so the colour cycle isn't too fast
time.sleep_ms(20)
# Print the internal PICO-D4 temperature in F and C
print_temp() | StarcoderdataPython |
107437 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_general_log_parser
----------------------------------
Tests for `general_log_parser` module.
"""
import sys, os, re
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import unittest
from general_log_parser import parser
class TestGeneralLogParser(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_is_valid_date(self):
log = "a.{}.log"
file_regex = re.compile(log.replace("{}", "(\d*)"))
assert parser.is_valid_date(file_regex, "20150505", "20150510", "a.20150505.log") == True
assert parser.is_valid_date(file_regex, "20150505", "20150510", "a.20150504.log") == False
assert parser.is_valid_date(file_regex, "20150505", "20150510", "a.20140505.log") == False
def test_has_piped_input(self):
assert parser.has_piped_input() == True
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3386428 | <reponame>juergenhoetzel/craft<filename>bin/Utils/CraftManifest.py<gh_stars>0
import collections
import datetime
import json
import os
from CraftCore import CraftCore
import utils
class CraftManifestEntryFile(object):
def __init__(self, fileName : str, checksum : str, version : str="") -> None:
self.fileName = fileName
self.checksum = checksum
self.date = datetime.datetime.utcnow()
self.version = version
self.buildPrefix = CraftCore.standardDirs.craftRoot()
@staticmethod
def fromJson(data : dict):
out = CraftManifestEntryFile(data["fileName"], data["checksum"])
out.date = CraftManifest._parseTimeStamp(data["date"])
out.version = data.get("version", "")
out.buildPrefix = data.get("buildPrefix", None)
return out
def toJson(self) -> dict:
return {"fileName" : self.fileName,
"checksum" : self.checksum,
"date" : self.date.strftime(CraftManifest._TIME_FORMAT),
"version" : self.version,
"buildPrefix" : self.buildPrefix}
class CraftManifestEntry(object):
def __init__(self, name : str) -> None:
self.name = name
self.files = []
@staticmethod
def fromJson(data : dict):
entry = CraftManifestEntry(data["name"])
entry.files = sorted([CraftManifestEntryFile.fromJson(fileData) for fileData in data["files"]], key=lambda x:x.date, reverse=True)
return entry
def toJson(self) -> dict:
return {"name":self.name, "files":[x.toJson() for x in self.files]}
def addFile(self, fileName : str, checksum : str, version : str="") -> CraftManifestEntryFile:
f = CraftManifestEntryFile(fileName, checksum, version)
self.files.insert(0, f)
return f
@property
def latest(self) -> CraftManifestEntryFile:
return self.files[0] if self.files else None
class CraftManifest(object):
_TIME_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
def __init__(self):
self.date = datetime.datetime.utcnow()
self.packages = {str(CraftCore.compiler) : {}}
self.origin = None
@staticmethod
def version() -> int:
return 1
@staticmethod
def _migrate0(data : dict):
manifest = CraftManifest()
packages = manifest.packages[str(CraftCore.compiler)]
for name, package in data.items():
if not name in packages:
packages[name] = CraftManifestEntry(name)
p = packages[name]
for fileName, pData in data[name].items():
f = p.addFile(fileName, pData["checksum"])
f.date = datetime.datetime(1, 1, 1)
return manifest
@staticmethod
def fromJson(data : dict):
version = data.get("version", 0)
if version == 0:
return CraftManifest._migrate0(data)
elif version != CraftManifest.version():
raise Exception("Invalid manifest version detected")
manifest = CraftManifest()
manifest.date = CraftManifest._parseTimeStamp(data["date"])
manifest.origin = data.get("origin", None)
for compiler in data["packages"]:
manifest.packages[compiler] = {}
for package in data["packages"][compiler]:
p = CraftManifestEntry.fromJson(package)
manifest.packages[compiler][p.name] = p
return manifest
def update(self, other):
for compiler in other.packages.keys():
if not compiler in self.packages:
self.packages[compiler] = {}
self.packages[compiler].update(other.packages[compiler])
def toJson(self) -> dict:
out = {"date": str(self.date), "origin": self.origin, "packages":{}, "version": CraftManifest.version()}
for compiler, packages in self.packages.items():
out["packages"][compiler] = [x.toJson() for x in self.packages[compiler].values()]
return out
def get(self, package : str) -> CraftManifestEntry:
compiler = str(CraftCore.compiler)
if not compiler in self.packages:
self.packages[compiler] = {}
if not package in self.packages[compiler]:
self.packages[compiler][package] = CraftManifestEntry(package)
return self.packages[compiler][package]
def dump(self, cacheFilePath, includeTime=False):
if includeTime:
name, ext = os.path.splitext(cacheFilePath)
cacheFilePath = f"{name}-{self.date.strftime('%Y%m%dT%H%M%S')}{ext}"
self.date = datetime.datetime.utcnow()
with open(cacheFilePath, "wt+") as cacheFile:
json.dump(self, cacheFile, sort_keys=True, indent=2, default=lambda x:x.toJson())
@staticmethod
def load(manifestFileName : str, urls : [str]=None):
"""
Load a manifest.
If a url is provided a manifest is fetch from that the url and merged with a local manifest.
TODO: in that case we are merging all repositories so we should also merge the cache files
"""
old = None
if not urls and ("ContinuousIntegration", "RepositoryUrl") in CraftCore.settings:
urls = [CraftCore.settings.get("ContinuousIntegration", "RepositoryUrl").rstrip("/")]
if urls:
old = CraftManifest()
for url in urls:
new = CraftManifest.fromJson(CraftCore.cache.cacheJsonFromUrl(f"{url}/manifest.json"))
if new:
new.origin = url
new.dump(manifestFileName, includeTime=True)
old.update(new)
cache = None
if os.path.isfile(manifestFileName):
try:
with open(manifestFileName, "rt+") as cacheFile:
cache = CraftManifest.fromJson(json.load(cacheFile))
cache.dump(manifestFileName, includeTime=True)
except Exception as e:
CraftCore.log.warning(f"Failed to load {cacheFile}, {e}")
pass
if old:
if cache:
old.update(cache)
return old
if not cache:
return CraftManifest()
return cache
@staticmethod
def _parseTimeStamp(time : str) -> datetime.datetime:
return datetime.datetime.strptime(time, CraftManifest._TIME_FORMAT)
| StarcoderdataPython |
3322571 | # -*- coding:utf-8 -*-
# @Script: utils.py
# @Author: <NAME>
# @Email: <EMAIL>
# @Create At: 2020-04-10 03:21:53
# @Last Modified By: <NAME>
# @Last Modified At: 2020-09-17 15:25:00
# @Description: Utils to search and replace file content and get ip address information.
import glob
import mmap
import subprocess
import re
import ipaddress
import os, fnmatch
from pathlib import Path
MTU_REX = 'mtu [0-9]+'
IP_REX = 'inet [0-9]+.[0-9]+.[0-9]+.[0-9]+/[0-9]+'
IPV6_REX = 'inet6 [0-9a-fA-F:]*/[1-9]+'
def find_file_content(search_string):
try:
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path = str(path) + '/static/*.js'
print(path)
for js_file in glob.glob(path):
with open(js_file, 'rb', 0) as file, mmap.mmap(
file.fileno(),
0,
access=mmap.ACCESS_READ) as s:
if s.find(search_string.encode()) != -1:
return js_file
except Exception:
return None
def replace_in_file(filename, to_replace, replace_with):
try:
with open(filename, 'r') as file:
contend = file.read()
contend = contend.replace(to_replace, replace_with)
with open(filename, 'w') as file:
file.write(contend)
return True
except Exception:
return False
def replace_in_files(to_replace, replace_with):
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path = str(path) + '/static/'
listOfFiles = os.listdir(path)
pattern = "*.js"
for filename in listOfFiles:
if fnmatch.fnmatch(filename, pattern):
print(filename)
with open(str(path)+str(filename), 'r') as file:
contend = file.read()
contend = contend.replace(to_replace, replace_with)
with open(str(path)+str(filename), 'w') as file:
file.write(contend)
def get_net_information(dev):
if not dev:
return None
try:
addr = subprocess.run(
['ip', 'addr', 'show', dev],
check=True,
capture_output=True
)
if addr.returncode != 0:
return None
addr = addr.stdout.decode()
mtu = re.search(MTU_REX, addr).group()
mtu = mtu.split(' ')[-1]
ipv4 = re.search(IP_REX, addr).group()
ipv4 = ipv4.split(' ')[-1]
ipv6 = re.search(IPV6_REX, addr)
if ipv6:
ipv6 = ipv6.group().split(' ')[-1]
nic = ipaddress.IPv4Interface(ipv4)
netmask = nic.netmask.compressed
netmask_prefix = nic.with_prefixlen.split('/')[-1]
ip = nic.ip.compressed
hostname = os.uname()[1]
return {
'hostname': hostname,
'ipAddress': ip,
'subnetMask': netmask,
'subnetMaskPrefix': netmask_prefix,
'mtu': mtu,
'ipv6Address': ipv6
}
except Exception:
return None
def update_env_file(ip, port=3000):
content = f'HOST_ADDRESS={ip}\nHOST_PORT={port}'
path = os.path.dirname(__file__)
path = path.split('gw-backend')[0] + '.env'
with open(path, 'w') as env:
env.write(content)
| StarcoderdataPython |
1780877 | <gh_stars>0
# Colors based on the Material design scheme
colors = {
'red': '#B71C1C',
'green': '#2E7D32',
'gray': '#263238'
}
def escape_slack_characters(raw: str) -> str:
"""Escape the special characters that are used by Slack
in their messaging API.
See `Slack API docs <https://api.slack.com/reference/surfaces/formatting#escaping>`_.
Args:
raw (str): String to be escaped
Returns:
(str) String with problematic escape strings
"""
# Escape &
out = raw.replace("&", "&")
# Escape < and >
return out.replace("<", "<").replace(">", ">")
| StarcoderdataPython |
3307506 | from itertools import repeat
def zeros_imul(n):
l = [0]
l *= n
return l
def zeros_mul(n):
return n * [0]
def zeros_repeat(n):
return list(repeat(0, n))
def zeros_slow(n):
return [0 for _ in range(n)]
| StarcoderdataPython |
164264 | import sys
import util
from node import Node
from state import State
def applicable(state, actions):
''' Return a list of applicable actions in a given `state`. '''
app = list()
for act in actions:
if State(state).intersect(act.precond) == act.precond:
app.append(act)
return app
def successorRelaxed(state, action):
''' Return the sucessor state generated by executing `action` in `state`. '''
return State(action.pos_effect).union(state)
def layerGoals(state, predicates):
return State(state).union(predicates)
def goal_test(state, goal):
''' Return true if `state` is a goal state. '''
return State(state).intersect(goal) == State(goal)
def h_naive(state, planning):
return 0
def h_ff(state, planning, goal):
graphplan = dict() #graphplan relaxed
actions = planning.actions
X = state
isGoal = False
if X.intersect(goal) == goal: #ja estamos na meta entao o comprimento (a quantidade) de acoes necessaria eh zero
return 0
level = 0
graphplan[(level,'state')] = X
#PHASE 1 - expand graph
while not isGoal:
actionsApplicable = applicable(X,actions)
level += 1
for a in actionsApplicable:
X = successorRelaxed(X,a) #added positive effects of a
if X.intersect(goal) == goal:
isGoal = True
break
graphplan[(level,'state')] = X
graphplan[(level,'action')] = actionsApplicable
#PHASE 2 - busca regressiva - partindo dos atomos do goal ate termos os atomos do state
thisLevelGoals = set()
thisLevelGoals = thisLevelGoals.union(goal)
relaxedActions = set()
while (level > 0):
prevLevelGoals = set()
for tg in thisLevelGoals:
if tg in graphplan[level-1,'state']:
prevLevelGoals.add(tg)
else:
for a in graphplan[level,'action']:
if tg in a.pos_effect:
prevLevelGoals = prevLevelGoals.union(a.precond)
relaxedActions.add(a)
break
level -= 1
thisLevelGoals = prevLevelGoals.copy()
return len(relaxedActions)
def h_add_planner(state, planning, goal):
h = dict()
actions = planning.actions
X = state
for x in X:
h[x] = 0
change = True
while change:
change = False
actionsApplicable = applicable(X,actions)
for a in actionsApplicable:
X = successorRelaxed(X,a) #added positive effects of a
for p in a.pos_effect:
prev = h.get(p,sys.maxsize)
h[p] = min(prev,(1+sum(h.get(pre, sys.maxsize) for pre in a.precond)))
if prev != h[p]:
change = True
return sum(h.get(i,sys.maxsize) for i in goal)
# heuristica usada nos testes brucutu e local...estava dando bons resultados
# def h_add(planning, state):
# h = dict()
# actions = planning.actions
# init = planning.problem.init
# X = init
# for x in X:
# h[x] = 0
# change = True
# while change:
# change = False
# actionsApplicable = applicable(X,actions)
# for a in actionsApplicable:
# X = successorRelaxed(X,a) #added positive effects of a
# for p in a.pos_effect:
# prev = h.get(p,sys.maxsize)
# h[p] = min(prev,(1+sum(h.get(pre, sys.maxsize) for pre in a.precond)))
# if prev != h[p]:
# change = True
# '''
# selecting only atoms that belongs to state
# node_dict = {p:h.get(p,sys.maxsize) for p in state}
# return node_dict
# '''
# return h
# #return sum(h.get(i,sys.maxsize) for i in goal)
def h_add(planning, state):
h = dict()
actions = planning.actions
#init = planning.problem.init
X = state
for x in X:
h[x] = 0
change = True
while change:
change = False
actionsApplicable = applicable(X,actions)
for a in actionsApplicable:
X = successorRelaxed(X,a) #added positive effects of a
for p in a.pos_effect:
prev = h.get(p,sys.maxsize)
h[p] = min(prev,(1+sum(h.get(pre, sys.maxsize) for pre in a.precond)))
if prev != h[p]:
change = True
'''
selecting only atoms that belongs to state
node_dict = {p:h.get(p,sys.maxsize) for p in state}
return node_dict
'''
return h
#return sum(h.get(i,sys.maxsize) for i in goal)
| StarcoderdataPython |
1760872 | from PIL import Image
from os import path
import sys
if len(sys.argv) == 2:
try:
im = Image.open(path.abspath(sys.argv[1]))
px = im.load()
setColor = set({})
for x in range(im.width):
for y in range(im.height):
setColor.add(px[x, y])
print(len(setColor))
except FileNotFoundError:
print("Unexpected error: file not found")
sys.exit(1)
except:
print("Unexpected error:", sys.exc_info()[1])
sys.exit(1)
else:
print("usage: python3 pixel.py [FILE]\ne.g.: python3 pixel.py image.jpg")
| StarcoderdataPython |
1660274 |
AUTO_SPLIT_LINES = True
_has_readline = False
try:
import readline
_has_readline = True
except ModuleNotFoundError:
pass
_has_prompt_toolkit = False
try:
import prompt_toolkit
import prompt_toolkit.completion
_has_prompt_toolkit = True
except ModuleNotFoundError:
pass
if _has_readline:
class ReadlineCompleter:
def __init__(self, send_request, server_address):
self.send_request = send_request
self.server_address = server_address
def complete(self, text, state):
if not text.strip():
if state == 0:
readline.insert_text('\t')
readline.redisplay()
return ''
else:
return None
if state == 0:
self.matches = request_completion(self.send_request, self.server_address, text)
try:
return self.matches[state]
except IndexError:
return None
if _has_prompt_toolkit:
class PromptToolkitCompleter(prompt_toolkit.completion.Completer):
def __init__(self, send_request, server_address):
super().__init__()
self.send_request = send_request
self.server_address = server_address
def get_completions(self, document, complete_event):
text = document.text_before_cursor
matches = request_completion(self.send_request, self.server_address, text)
for match in matches:
yield prompt_toolkit.completion.Completion(match, -len(text))
def request_completion(send_request, server_address, text):
request_data = {
'type': 'complete',
'line': text,
}
response_data = send_request(server_address, request_data)
return response_data.get('matches', [])
def setup(send_request, server_address):
if _has_readline:
readline.parse_and_bind('tab: complete')
readline.set_completer(ReadlineCompleter(send_request, server_address).complete)
input_func = input
elif _has_prompt_toolkit:
session = prompt_toolkit.PromptSession(
complete_while_typing=False,
complete_style=prompt_toolkit.shortcuts.CompleteStyle.READLINE_LIKE,
completer=PromptToolkitCompleter(send_request, server_address)
)
input_func = session.prompt
else:
input_func = input
if AUTO_SPLIT_LINES:
return SplitLinesInputWrapper(input_func).input
else:
return input_func
class SplitLinesInputWrapper:
"""Split lines from input. Treat one-time multiple-line input as multiple one-line input."""
def __init__(self, raw_input):
self.raw_input = raw_input
self.cached_lines = []
def input(self, prompt=None):
if not self.cached_lines:
raw_str = self.raw_input(prompt)
if '\n' not in raw_str:
return raw_str
self.cached_lines = raw_str.split('\n')
return self.cached_lines.pop(0)
| StarcoderdataPython |
3226882 | # ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: <NAME>
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
import xml.etree.ElementTree as ET
from typing import Callable, Dict, List, Mapping, Sequence, TYPE_CHECKING
from chb.app.BasicBlock import BasicBlock
import chb.util.fileutil as UF
from chb.arm.ARMDictionary import ARMDictionary
from chb.arm.ARMInstruction import ARMInstruction
from chb.invariants.XXpr import XXpr
if TYPE_CHECKING:
from chb.arm.ARMFunction import ARMFunction
class ARMBlock(BasicBlock):
def __init__(
self,
armf: "ARMFunction",
xnode: ET.Element) -> None:
BasicBlock.__init__(self, xnode)
self._armf = armf
self._instructions: Dict[str, ARMInstruction] = {}
@property
def armfunction(self) -> "ARMFunction":
return self._armf
@property
def armdictionary(self) -> ARMDictionary:
return self.armfunction.armdictionary
@property
def instructions(self) -> Mapping[str, ARMInstruction]:
if len(self._instructions) == 0:
for n in self.xnode.findall("i"):
iaddr = n.get("ia")
if iaddr is None:
raise UF.CHBError("ARM Instruction without address in xml")
self._instructions[iaddr] = ARMInstruction(self, n)
return self._instructions
@property
def call_instructions(self) -> Sequence[ARMInstruction]:
result: List[ARMInstruction] = []
for (ia, instr) in sorted(self.instructions.items()):
if instr.is_call_instruction:
result.append(instr)
return result
@property
def store_instructions(self) -> Sequence[ARMInstruction]:
result: List[ARMInstruction] = []
for (ia, instr) in sorted(self.instructions.items()):
if instr.is_store_instruction:
result.append(instr)
return result
def to_string(
self,
bytes: bool = False,
opcodetxt: bool = True,
opcodewidth: int = 40,
sp: bool = True) -> str:
lines: List[str] = []
for (ia, instr) in sorted(self.instructions.items()):
pinstr = instr.to_string(
bytes=bytes,
opcodetxt=opcodetxt,
opcodewidth=opcodewidth,
sp=sp)
lines.append(str(ia).rjust(10) + " " + pinstr)
return "\n".join(lines)
| StarcoderdataPython |
3394206 | from .models import Subject, SchoolType, SchoolSubType, SchoolLevel, SubjectGroup
import os
import csv
def init_subjects():
"""
Zdroj: https://docs.google.com/spreadsheets/d/1msJu1AX_-wYLbhvz8rqsZxFMBwX7-xzghCAFHeeBQEI/edit#gid=2044127896
List: Číselník předmětů
"""
subjects_list = []
filepath = os.path.join(os.getcwd(), "data_init", "subjects.csv")
with open(filepath, "r", encoding="utf8") as data:
subjects = csv.DictReader(data)
for subject in subjects:
subjects_list.append(subject)
for subject in subjects_list:
c, created = Subject.objects.get_or_create(code=subject["Zkratka"], defaults={"name": subject["Název"]})
def init_school_level_2021_02():
"""
Zdroj https://miro.com/app/board/o9J_lby_RpM=/?moveToWidget=3074457353595062463&cot=14
a v nem listecek s referenci na https://www.msmt.cz/file/54480/
21.9.2021 - Stupně škol - Upraveno podle úkolu v Jira ZU-267
"""
data = """
Mateřská škola
1. stupeň ZŠ
2. stupeň ZŠ / nižší stupně gymnázií
SŠ / SOŠ / SOU / vyšší stupně gymnázií
2. stupeň ZŠ a SŠ
"""
def _get_qs_from_subjects_names(subjects_names: list) -> []:
qs = []
for subject_name in subjects_names:
subject, _ = Subject.objects.get_or_create(name=subject_name)
qs.append(subject)
return qs
first_level_elementary_school_subjects = [
"Čeština",
"Angličtina",
"Němčina",
"Francouzština",
"Španělština",
"Ruština",
"Matematika",
"Informační a komunikační technologie",
"Výtvarná výchova",
"Hudební výchova",
"Výchova ke zdraví",
"Tělesná výchova",
"Člověk a svět práce",
"Výchova demokratického občana",
"Výchova k myšlení v evropských a globálních souvislostech",
"Multikulturní výchova",
"Environmentální výchova",
"Mediální výchova",
]
first_level_elementary_school_subjects_qs = _get_qs_from_subjects_names(first_level_elementary_school_subjects)
second_level_elementary_school_subjects = [
"Čeština",
"Angličtina",
"Němčina",
"Francouzština",
"Španělština",
"Ruština",
"Matematika",
"Informační a komunikační technologie",
"Dějepis",
"Občanská výchova",
"Fyzika",
"Chemie",
"Přírodopis / Biologie",
"Zeměpis",
"Hudební výchova",
"Výtvarná výchova",
"Výchova ke zdraví",
"Tělesná výchova",
"Člověk a svět práce",
"Dramatická výchova",
"Etická výchova",
"Filmová / audiovizuální výchova",
"Taneční a pohybová výchova",
"Osobnostní a sociální výchova",
"Výchova demokratického občana",
"Výchova k myšlení v evropských a globálních souvislostech",
"Multikulturní výchova",
"Environmentální výchova",
"Mediální výchova",
"Konverzace",
]
second_level_elementary_school_subjects_qs = _get_qs_from_subjects_names(second_level_elementary_school_subjects)
high_school_subjects = [
"Čeština",
"Angličtina",
"Němčina",
"Francouzština",
"Španělština",
"Ruština",
"Matematika",
"Informační a komunikační technologie",
"Dějepis",
"Fyzika",
"Chemie",
"Přírodopis / Biologie",
"Zeměpis",
"Hudební výchova",
"Výtvarná výchova",
"Tělesná výchova",
"Dramatická výchova",
"Etická výchova",
"Filmová / audiovizuální výchova",
"Taneční a pohybová výchova",
"Výchova demokratického občana",
"Výchova k myšlení v evropských a globálních souvislostech",
"Multikulturní výchova",
"Environmentální výchova",
"Mediální výchova",
"Odborné předměty",
"Praktické vyučování",
"Odborný výcvik",
"Základy společenských věd",
"Konverzace",
]
high_school_subjects_qs = _get_qs_from_subjects_names(high_school_subjects)
for name in [i.strip() for i in data.strip().split("\n")]:
school_level, _ = SchoolLevel.objects.get_or_create(name=name)
if name == "<NAME>":
school_level.subjects.add(*first_level_elementary_school_subjects_qs)
elif name == "2. stupeň ZŠ / nižší stupně gymnázií":
school_level.subjects.add(*second_level_elementary_school_subjects_qs)
elif name == "SŠ / SOŠ / SOU / vyšší stupně gymnázií":
school_level.subjects.add(*high_school_subjects_qs)
elif name == "Mateřská škola":
school_level.target_school_level = False
school_level.save()
elif name == "2. stupeň ZŠ a SŠ":
second_level_and_high_school_subjects_qs = Subject.objects.all()
school_level.subjects.add(*second_level_and_high_school_subjects_qs)
school_level.target_school_level = False
school_level.save()
def init_school_type_2021_02():
"""
Zdroj https://miro.com/app/board/o9J_lby_RpM=/?moveToWidget=3074457353595062463&cot=14
a v nem uvedene listecky s referenci na:
* https://www.atlasskolstvi.cz/stredni-skoly
* https://www.zakonyprolidi.cz/cs/2005-13#cast1
"""
types = """
ZŠ
ZŠ speciální
SŠ
Konzervatoř
ZUŠ - základní umělecká škola
"""
subtypes = """
gymnázium
střední odborná škola
střední odborné učiliště
střední průmyslová škola
střední zemědělská škola
střední zahradnická škola
střední vinařská škola
střední lesnická škola
střední rybářská škola
střední zdravotnická škola
hotelová škola
střední pedagogická škola
střední umělecká škola
střední uměleckoprůmyslová škola
obchodní akademie
odborná škola
odborné učiliště
praktická škola
"""
for name in [i.strip() for i in types.strip().split("\n")]:
SchoolType.objects.get_or_create(name=name)
ss_type = SchoolType.objects.get(name="SŠ")
for name in [i.strip() for i in subtypes.strip().split("\n")]:
SchoolSubType.objects.get_or_create(name=name, type=ss_type)
def init_subject_group():
"""
Init for subject group
Data source: https://docs.google.com/spreadsheets/d/1msJu1AX_-wYLbhvz8rqsZxFMBwX7-xzghCAFHeeBQEI/edit#gid=443235263
List Skupiny předmětů
First row removed
"""
subject_group_list = []
filepath = os.path.join(os.getcwd(), "data_init", "subject_groups.csv")
with open(filepath, "r", encoding="utf8") as data:
subject_groups = csv.DictReader(data)
for subject_group in subject_groups:
subject_group_list.append(subject_group)
for subject_group in subject_group_list:
c, created = SubjectGroup.objects.get_or_create(name=subject_group["Skupina předmětů"])
subject = Subject.objects.get(code=subject_group["Předmět"])
subject.subject_group = c
subject.save()
| StarcoderdataPython |
3314455 | <gh_stars>0
'''
function to test pretrained models on the test set and show the graph
of actual values and predictions
'''
import pandas as pd
from furiosanet import test, get_layers_from_file
def main():
'''
test models saved in the csv
'''
models = pd.read_csv("model-evaluation.csv", index_col=0).index.tolist()
for model in models:
weights_file = "weights/automated/" + model
layers = get_layers_from_file(weights_file)
test(weights_file, layers, show_fig=False)
if __name__ == "__main__":
main()
| StarcoderdataPython |
4828988 | <filename>web/teenage_jinja_turtles/challenge/views/errors.py
from flask import Blueprint, abort
import logging as log
logging = log.getLogger('gunicorn.error')
error = Blueprint('error', __name__)
@error.app_errorhandler(400)
def handle_400(error):
logging.warning(error)
return error.description or 'Bad request mate'
@error.app_errorhandler(500)
def handle_500(error):
logging.error(error)
return \
"""
<p class="alert alert-warning text-center " id="res">
What did you do? I'm now dying...
</p>
""" | StarcoderdataPython |
86402 | <filename>HandGesture.py
import cv2
import numpy as np
import hogsvm2
import pickle
import time
from skimage import feature
def HOG(img, imgResize=(64, 128), bin=9, cell=(8, 8), block=(2, 2), norm="L2", sqrt=True, visualize=False):
if visualize == False:
hog = feature.hog( cv2.resize( img, imgResize),
orientations=bin,
pixels_per_cell=cell,
cells_per_block=block,
block_norm=norm,
transform_sqrt=sqrt,
visualise=visualize)
return hog
else:
hog, hogImg = feature.hog( cv2.resize( img, imgResize),
orientations=bin,
pixels_per_cell=cell,
cells_per_block=block,
block_norm=norm,
transform_sqrt=sqrt,
visualise=visualize)
return hog, hogImg
def trackHand(img, loadModel):
YTest = [["G1"],["G2"],["G3"],["G4"],["G5"],["G6"],["G7"],["G8"],["G9"],["G10"]]
startTime = time.time()
grayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cropWindow = grayImg[0:64*4, 0:128*4]
mList = []
for i in range(len(YTest)):
result = loadModel.score([HOG(cropWindow)], YTest[i])
if result != 0:
return YTest[i]
def text2Num(text):
num = 0
if text == 'G1':
num = 1
if text == 'G2':
num = 2
if text == 'G3':
num = 3
if text == 'G4':
num = 4
if text == 'G5':
num = 5
if text == 'G6':
num = 6
if text == 'G7':
num = 7
if text == 'G8':
num = 8
if text == 'G9':
num = 9
if text == 'G10':
num = 10
return num
def show_webcam(mirror=True):
try:
open("svc.sav", 'rb')
except IOError:
tranModel = handGestureModelTraining("acquisitions")
loadModel = pickle.load(open("svc.sav", 'rb'))
cam = cv2.VideoCapture(0)
startTime = time.time()
while True:
endTime = time.time()
if endTime - startTime >= 0.25:
startTime = time.time()
else:
continue
ret_val, img = cam.read()
img = np.float32(img) / 255.0
if mirror:
img = cv2.flip(img, 1)
(x, y, u, v) = (0, 0, 64*4+200, 128*4)
cv2.rectangle(img, (x, y), (x + u, y + v), (255, 0, 0), 2)
text = str(trackHand(img, loadModel)[0])
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, str(text2Num(text)), (64*4+900, 100), font, 4, (255,255,255), 2, cv2.LINE_AA)
cv2.imshow('NormalCam', img)
cv2.moveWindow('NormalCam', 0, 0)
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
def main():
show_webcam()
main()
| StarcoderdataPython |
60500 | #!/usr/bin/env python3
import hashlib
import re
import requests
import sys
from pprint import pprint as pp
from .exceptions import PCloudException
from .connection import AbstractPCloudConnection
from .pcloudbin import PCloudBinaryConnection
PCLOUD_SERVER_SUFFIX = '.pcloud.com' # only allow downloads from pcloud servers
class PCloudAPIMetaclass(type):
@classmethod
def __prepare__(cls, name, bases):
methods = """
getaudiolink getpubziplink deletefolder getvideolink
file_checksum cancelsharerequest getziplink currentserver
sendverificationemail file_lock file_pwrite getpublinkdownload
file_truncate getpubthumblink getthumb listpublinks listshares
getpubaudiolink savepubthumb deletefile lostpassword
revertrevision resetpassword acceptshare userinfo diff
feedback uploadprogress listrevisions copypubfile copytolink
verifyemail getdigest file_write renamefile getthumbslinks
file_close createuploadlink notifyuploadlink getfilelink
changepassword savezip getpubthumb getthumblink file_pread
renamefolder copyfile file_seek gettreepublink deletepublink
checksumfile verifyitunespurchase supportedlanguages
gethlslink uploadfile file_open savepubzip showpublink
listplshort getfolderpublink uploadtolink createfolder
savethumb file_pread_ifmod setlanguage getpubzip
deleteuploadlink showuploadlink getzip listitunesproducts
sharefolder register declineshare sharerequestinfo
listfolder file_read file_size downloadfile invite
getcertificate changeuploadlink changeshare changepublink
listuploadlinks normalizehash getpubthumbslinks
uploadlinkprogress removeshare getfilepublink
deletefolderrecursive
""".strip().split()
return {method :
(lambda method :
(lambda self, **kwargs:
self.make_request(method, **kwargs))
)(method)
for method in methods}
class PCloudAPI(metaclass=PCloudAPIMetaclass):
"""A stripped down of the PCloudAPI.
All pcloud api methods are available as .method shortcut for
make_request(method, ...).
Exceptions that can be raised during correct operation:
(PCloudException, requests.RequestException, IOError)
"""
def __init__(self, connection=PCloudBinaryConnection, debug=False):
"""Initializes the API.
connection can be either a concrete class of AbstractPCloudConnection
or an AbstractPCloudConnection-derived object.
If debug is true dumps the parameters
"""
if issubclass(connection, AbstractPCloudConnection):
connection = connection().connect()
assert isinstance(connection, AbstractPCloudConnection), \
("PCloud instance expected, got %s" % connection.__class__)
self.connection = connection
self.debug = debug
def make_request(self, method, check_result=True, **params):
"""Performs send_command through the connection.
:param method: the method to call
:param **params: the parameters for the connection
:param _data: file data in the form of bytes or stream of bytes
:param check_result: check that the ['result'] == 0 and raise if not
:returns response in the form of a dictionary
:raises PCloudException
"""
if self.debug:
pp((method, params), stream=sys.stderr)
response = self.connection.send_command(method, **params)
if self.debug:
pp(response, stream=sys.stderr)
if check_result:
result = response.get('result', None)
if result != 0:
raise PCloudException(result_code=result)
return response
def login(self, username, password):
"""Perform login though the connection.
:param username: username
:param password: password
:returns authentication token
Also sets .auth and in turn .connection.auth to the returned token.
"""
digest = self.make_request('getdigest')['digest']
passworddigest = hashlib.sha1(
(password +
hashlib.sha1(username.lower().encode('utf-8')
).hexdigest().lower() +
digest).encode('utf-8')
).hexdigest()
auth = self.make_request('userinfo',
getauth=1,
username=username,
digest=digest,
passworddigest=passworddigest)['auth']
self.auth = auth
return auth
def get_folderid(self, path):
return self.make_request('listfolder',
path=path,
nofiles=1,
)['metadata']['folderid']
def create_directory(self, path):
"""Creates directory recursively.
Does not raise any errors if the file exists.
"""
if path == '/':
return # HACK: pcloud fails otherwise
if path == "":
return # nothing to do
# really ugly, sadly there is no mkdir -p
try:
self.make_request('createfolder', path=path)
except PCloudException as e:
if e.result_code == 2002:
# parent does not exist
# stack danger
self.create_directory(re.sub('(^/?|/+)[^/]+/?$', '', path))
self.make_request('createfolder', path=path)
elif e.result_code == 2004:
# file/folder exists, assume everything is OK
pass
else:
raise
def download(self, remote_path, local_path, progress_callback=None,
enforced_server_suffix=PCLOUD_SERVER_SUFFIX):
"""Downloads file from remote_path to local_path.
:param progress_callback: called each time with the number of bytes
written in the iteration
:param enforced_server_suffix: only allow downloads from servers having
the expected suffix (this together with ssl prevents a downloading
of non-pcloud controlled resource)
:returns pcloud api response
"""
response = self.make_request('getfilelink',
path=remote_path,
forcedownload=1)
server = response['hosts'][0] # should be the closest server
if enforced_server_suffix:
if '/' in server or not server.lower().endswith(enforced_server_suffix):
raise ValueError(
"Received download server {!r} which does not match expected suffix {!r}".format(
server, enforced_server_suffix
)
)
url = "{protocol}://{server}:{port}{path}".format(
protocol=self.connection.use_ssl and 'https' or 'http',
server=server,
port=self.connection.use_ssl and 443 or 80,
path=response['path']
)
r = requests.get(url, stream=True, allow_redirects=False, timeout=self.connection.timeout)
r.raise_for_status()
with open(local_path, 'wb') as fd:
for chunk in r.iter_content(8192):
written = fd.write(chunk)
if progress_callback:
progress_callback(written)
return response
def upload(self, local_path, remote_path,
create_parent=True, progress_callback=None):
"""Uploads file from local_path to remote_path.
:param create_parent: whether to create the parent
:param progress_callback: called each time with the number of bytes
written in the iteration
:returns pcloud api response
"""
remote_dir, filename = remote_path.rsplit('/', 1)
if create_parent:
self.create_directory(remote_dir)
with open(local_path, 'rb') as fd:
response = self.make_request('uploadfile',
_data=fd,
path=remote_dir or '/',
filename=filename,
nopartial=1,
_data_progress_callback=progress_callback)
if not response['fileids']:
raise PCloudException("Upload failed, no files reported back")
return response
def exists_file(self, remote_path):
"""Checks if file exists. Does not work for folders."""
try:
self.make_request('checksumfile',
path=remote_path)
return True
except PCloudException as e:
if e.result_code in [2002, 2009]:
return False
else:
raise
def delete_file(self, remote_path):
"""Delete file at remote_path."""
try:
self.make_request('deletefile',
path=remote_path)
except PCloudException as e:
if e.result_code in [2002, 2009]:
return False
else:
raise
@property
def auth(self):
return self.connection.auth
@auth.setter
def auth(self, auth):
self.connection.auth = auth
| StarcoderdataPython |
1709881 | from django.apps import AppConfig
class IncidentsConfig(AppConfig):
name = 'incidents'
| StarcoderdataPython |
3236200 | <filename>Python/BEGINNER/1005.py
A = float(input())
A = round(A,1)
B = float(input())
B = round(B,1)
MEDIA = ((A * 3.5)+(B * 7.5))/(3.5+7.5)
print(MEDIA) | StarcoderdataPython |
1612773 | '''
>>> student_function('example')
Traceback (most recent call last):
...
ValueError
'''
from test_util import doctester
test_passed = doctester("student_module", total_points=3)
| StarcoderdataPython |
168326 | import datetime
import json
from source.util.util_base.db import (get_multi_data, get_single_value,
update_data)
from source.util.util_data.basic_info import BasicInfo
class NoteData:
def __init__(self, db_conn):
self.db_conn = db_conn
async def note_insert(self, main_ts_code, ts_code, freq_code, trade_date, note):
sql = """
insert into future_note_data(main_ts_code, ts_code, freq_code, trade_date, note, update_date) values ($1, $2, $3, $4, $5, $6)
"""
args = [main_ts_code, ts_code, freq_code.value, trade_date, note, datetime.datetime.now()]
await update_data(self.db_conn, sql, args)
async def get_note(self, main_ts_code, start_date, end_date):
sql = """
select main_ts_code, ts_code, freq_code, trade_date, note from
(select main_ts_code, ts_code, freq_code, trade_date, note from future_note_data
where main_ts_code = $1 and trade_date between $2 and $3
union
select main_ts_code, ts_code, freq_code, trade_date, note from future_note_data
where main_ts_code = 'common' and trade_date between $2 and $3) a
order by trade_date, ts_code, freq_code
"""
args = [main_ts_code, start_date, end_date]
result_ori = await get_multi_data(self.db_conn, sql, args)
result = []
for main_ts_code, ts_code, freq_code, trade_date, note in result_ori:
result.append({
"main_ts_code": main_ts_code,
"ts_code": ts_code,
"freq_code": freq_code,
"trade_date": trade_date,
"note": note
})
return result
async def bs_note_insert(self, main_ts_code, ts_code, freq_code, trade_date, trade_type, number, point, note):
sql = """
insert into future_bs_note_data(main_ts_code, ts_code, freq_code, trade_date, trade_type, number, point, note, update_date) values ($1, $2, $3, $4, $5, $6, $7, $8, $9)
"""
args = [main_ts_code, ts_code, freq_code.value, trade_date, trade_type, number, point, note, datetime.datetime.now()]
await update_data(self.db_conn, sql, args)
async def get_bs_note(self, main_ts_code, start_date, end_date):
sql = """
select main_ts_code, ts_code, freq_code, trade_date, trade_type, number, point, note from future_bs_note_data
where main_ts_code = $1 and trade_date between $2 and $3 order by trade_date, ts_code, freq_code
"""
args = [main_ts_code, start_date, end_date]
result_ori = await get_multi_data(self.db_conn, sql, args)
result = []
for main_ts_code, ts_code, freq_code, trade_date, trade_type, number, point, note in result_ori:
result.append({
"main_ts_code": main_ts_code,
"ts_code": ts_code,
"freq_code": freq_code,
"trade_date": trade_date,
"trade_type": trade_type,
"number": number,
"point": point,
"note": note
})
return result
async def get_json_data(self, key_name):
sql = """
select data::json from json_data
where name = $1
"""
args = [key_name]
result = await get_single_value(self.db_conn, sql, args)
result = json.loads(result)
return result
async def get_strategy_result_data(self, trade_date):
sql = """
select a.ts_code, a.main_ts_code, b.name, a.strategy_code, a.freq_code, a.bs_flag from strategy_result a left join
(select ts_code, name from s_info
union
select ts_code, name from future_basic_info_data) b
on a.ts_code = b.ts_code
where a.date=$1 order by a.strategy_code, a.ts_code, a.main_ts_code, a.freq_code, a.bs_flag
"""
args = [trade_date]
result_ori = await get_multi_data(self.db_conn, sql, args)
result = {}
for ts_code, main_ts_code, name, strategy_code, freq_code, bs_flag in result_ori:
result.setdefault(strategy_code, []).append({
"ts_code": ts_code,
"main_ts_code": main_ts_code,
"name": name,
"freq_code": freq_code,
"bs_flag": bs_flag
})
return result
| StarcoderdataPython |
3331593 | <gh_stars>1-10
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
from swarms.commons.utils import EzPickle
from swarms import base
# from ma_envs.envs.environment import MultiAgentEnv
from swarms.agents.point_agents.pursuer_agent import PointAgent
from swarms.agents.point_agents.evader_agent import Evader
from swarms.commons import utils as U
import networkx as nwx
import itertools
try:
import matplotlib
# matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import matplotlib.animation as mpla
from matplotlib.patches import Wedge
from matplotlib.patches import RegularPolygon
import matplotlib.patches as patches
except:
pass
class PursuitEvasionEnv(gym.Env, EzPickle):
metadata = {'render.modes': ['human', 'animate']}
def __init__(self,
nr_pursuers=5,
nr_evaders=1,
obs_mode='2D_rbf',
comm_radius=40,
world_size=100,
distance_bins=8,
bearing_bins=8,
torus=True,
dynamics='direct'):
EzPickle.__init__(self, nr_pursuers, nr_evaders, obs_mode, comm_radius, world_size, distance_bins,
bearing_bins, torus, dynamics)
self.nr_agents = nr_pursuers
self.nr_evaders = 1
self.obs_mode = obs_mode
self.distance_bins = distance_bins
self.bearing_bins = bearing_bins
self.comm_radius = comm_radius
self.obs_radius = comm_radius / 2
self.torus = torus
self.dynamics = dynamics
self.world_size = world_size
self.world = base.World(world_size, torus, dynamics)
self.world.agents = [
PointAgent(self) for _ in
range(self.nr_agents)
]
[self.world.agents.append(Evader(self)) for _ in range(self.nr_evaders)]
self._reward_mech = 'global'
self.timestep = None
self.hist = None
self.ax = None
self.obs_comm_matrix = None
if self.obs_mode == 'sum_obs_learn_comm':
self.world.dim_c = 1
# self.seed()
@property
def state_space(self):
return spaces.Box(low=-10., high=10., shape=(self.nr_agents * 3,), dtype=np.float32)
@property
def observation_space(self):
return self.agents[0].observation_space
@property
def action_space(self):
return self.agents[0].action_space
@property
def reward_mech(self):
return self.reward_mech
@property
def agents(self):
return self.world.policy_agents
def get_param_values(self):
return self.__dict__
def seed(self, seed=None):
self.np_random, seed_ = seeding.np_random(seed)
return [seed_]
@property
def timestep_limit(self):
return 1024
@property
def is_terminal(self):
if self.timestep >= self.timestep_limit:
if self.ax:
plt.close()
return True
return False
def reset(self):
self.timestep = 0
# self.ax = None
# self.nr_agents = 5 # np.random.randint(2, 10)
self.world.agents = [
PointAgent(self)
for _ in
range(self.nr_agents)
]
self.world.agents.append(Evader(self))
self.obs_comm_matrix = self.obs_radius * np.ones([self.nr_agents + 1, self.nr_agents + 1])
self.obs_comm_matrix[0:-self.nr_evaders, 0:-self.nr_evaders] = self.comm_radius
pursuers = np.random.rand(self.nr_agents, 3)
pursuers[:, 0:2] = self.world_size * ((0.95 - 0.05) * pursuers[:, 0:2] + 0.05)
pursuers[:, 2:3] = 2 * np.pi * pursuers[:, 2:3]
evader = (0.95 - 0.05) * np.random.rand(self.nr_evaders, 2) + 0.05
evader = self.world_size * evader
self.world.agent_states = pursuers
self.world.landmark_states = evader
self.world.reset()
if self.obs_radius < self.world_size * np.sqrt(2):
sets = self.graph_feature()
feats = [p.graph_feature for p in self.agents]
if self.world.dim_c > 0:
messages = np.zeros([self.nr_agents, 1])
else:
messages = []
obs = []
for i, bot in enumerate(self.world.policy_agents):
# bot_in_subset = [list(s) for s in sets if i in s]
# [bis.remove(i) for bis in bot_in_subset]
ob = bot.get_observation(self.world.distance_matrix[i, :],
self.world.angle_matrix[i, :],
self.world.angle_matrix[:, i],
feats,
np.zeros([self.nr_agents, 2])
)
obs.append(ob)
return obs
def step(self, actions):
self.timestep += 1
assert len(actions) == self.nr_agents
# print(actions)
clipped_actions = np.clip(actions, self.agents[0].action_space.low, self.agents[0].action_space.high)
for agent, action in zip(self.agents, clipped_actions):
agent.action.u = action[0:2]
if self.world.dim_c > 0:
agent.action.c = action[2:]
self.world.step()
if self.obs_radius < self.world_size * np.sqrt(2):
sets = self.graph_feature()
feats = [p.graph_feature for p in self.agents]
if self.world.dim_c > 0:
messages = clipped_actions[:, 2:]
else:
messages = []
velocities = np.vstack([agent.state.w_vel for agent in self.agents])
next_obs = []
for i, bot in enumerate(self.world.policy_agents):
# print(hop_counts)
# bot_in_subset = [list(s) for s in sets if i in s]
# [bis.remove(i) for bis in bot_in_subset]
ob = bot.get_observation(self.world.distance_matrix[i, :],
self.world.angle_matrix[i, :],
self.world.angle_matrix[:, i],
feats,
velocities
)
next_obs.append(ob)
rewards = self.get_reward(actions)
done = self.is_terminal
if rewards[0] > -1 / self.obs_radius: # distance of 1 in world coordinates, scaled by the reward scaling factor
done = True
# if done and self.timestep < self.timestep_limit:
# rewards = 100 * np.ones((self.nr_agents,))
# info = dict()
info = {'pursuer_states': self.world.agent_states,
'evader_states': self.world.landmark_states,
'state': np.vstack([self.world.agent_states[:, 0:2], self.world.landmark_states]),
'actions': actions}
return next_obs, rewards, done, info
def get_reward(self, actions):
r = -np.minimum(np.min(self.world.distance_matrix[-1, :-self.nr_evaders]), self.obs_radius) / self.obs_radius # - 0.05 * np.sum(np.mean(actions**2, axis=1))
# r = -np.minimum(np.partition(self.world.distance_matrix[-1, :-self.nr_evaders], 2)[2], self.obs_radius) / self.world_size
# r = - 1
# print(np.min(self.world.distance_matrix[-1, :-self.nr_evaders]))
r = np.ones((self.nr_agents,)) * r
return r
def graph_feature(self):
adj_matrix = np.array(self.world.distance_matrix < self.obs_comm_matrix, dtype=float)
# visibles = np.sum(adj_matrix, axis=0) - 1
# print("mean neighbors seen: ", np.mean(visibles[:-1]))
# print("evader seen by: ", visibles[-1])
sets = U.dfs(adj_matrix, 2)
g = nwx.Graph()
for set_ in sets:
l_ = list(set_)
if self.nr_agents in set_:
# points = self.nodes[set_, 0:2]
# dist_matrix = self.get_euclid_distances(points, matrix=True)
# determine distance and adjacency matrix of subset
dist_matrix = np.array([self.world.distance_matrix[x] for x in list(itertools.product(l_, l_))]).reshape(
[len(l_), len(l_)])
obs_comm_matrix = np.array(
[self.obs_comm_matrix[x] for x in list(itertools.product(l_, l_))]).reshape(
[len(l_), len(l_)])
adj_matrix_sub = np.array((0 <= dist_matrix) & (dist_matrix < obs_comm_matrix), dtype=float)
connection = np.where(adj_matrix_sub == 1)
edges = [[x[0], x[1]] for x in zip([l_[c] for c in connection[0]], [l_[c] for c in connection[1]])]
g.add_nodes_from(l_)
g.add_edges_from(edges)
for ind, e in enumerate(edges):
g[e[0]][e[1]]['weight'] = dist_matrix[connection[0][ind], connection[1][ind]]
for i in range(self.nr_agents):
try:
self.agents[i].graph_feature = \
nwx.shortest_path_length(g, source=i, target=self.nr_agents, weight='weight')
except:
self.agents[i].graph_feature = np.inf
return sets
def render(self, mode='human'):
if mode == 'animate':
output_dir = "/tmp/video/"
if self.timestep == 0:
import shutil
import os
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
os.makedirs(output_dir, exist_ok=True)
if not self.ax:
fig, ax = plt.subplots()
ax.set_aspect('equal')
ax.set_xlim((0, self.world_size))
ax.set_ylim((0, self.world_size))
self.ax = ax
else:
self.ax.clear()
self.ax.set_aspect('equal')
self.ax.set_xlim((0, self.world_size))
self.ax.set_ylim((0, self.world_size))
comm_circles = []
obs_circles = []
self.ax.scatter(self.world.landmark_states[:, 0], self.world.landmark_states[:, 1], c='r', s=20)
self.ax.scatter(self.world.agent_states[:, 0], self.world.agent_states[:, 1], c='b', s=20)
for i in range(self.nr_agents):
comm_circles.append(plt.Circle((self.world.agent_states[i, 0],
self.world.agent_states[i, 1]),
self.comm_radius, color='g', fill=False))
self.ax.add_artist(comm_circles[i])
obs_circles.append(plt.Circle((self.world.agent_states[i, 0],
self.world.agent_states[i, 1]),
self.obs_radius, color='g', fill=False))
self.ax.add_artist(obs_circles[i])
# self.ax.text(self.world.agent_states[i, 0], self.world.agent_states[i, 1],
# "{}".format(i), ha='center',
# va='center', size=20)
# circles.append(plt.Circle((self.evader[0],
# self.evader[1]),
# self.evader_radius, color='r', fill=False))
# self.ax.add_artist(circles[-1])
if mode == 'human':
plt.pause(0.01)
elif mode == 'animate':
if self.timestep % 1 == 0:
plt.savefig(output_dir + format(self.timestep//1, '04d'))
if self.is_terminal:
import os
os.system("ffmpeg -r 10 -i " + output_dir + "%04d.png -c:v libx264 -pix_fmt yuv420p -y /tmp/out.mp4")
if __name__ == '__main__':
nr_pur = 10
env = PursuitEvasionEnv(nr_pursuers=nr_pur,
nr_evaders=1,
obs_mode='sum_obs_no_ori',
comm_radius=200 * np.sqrt(2),
world_size=100,
distance_bins=8,
bearing_bins=8,
dynamics='unicycle',
torus=True)
for ep in range(1):
o = env.reset()
dd = False
for t in range(1024):
a = 1 * np.random.randn(nr_pur, env.world.agents[0].dim_a)
a[:, 0] = 1
# a[:, 1] = 0
o, rew, dd, _ = env.step(a)
# if rew.sum() < 0:
# print(rew[0])
if t % 1 == 0:
env.render()
if dd:
break
| StarcoderdataPython |
1610945 | """Parser for BlueMaestro BLE advertisements."""
import logging
from struct import unpack
_LOGGER = logging.getLogger(__name__)
def parse_bluemaestro(self, data, source_mac, rssi):
"""Parse BlueMaestro advertisement."""
msg_length = len(data)
firmware = "BlueMaestro"
device_id = data[4]
bluemaestro_mac = source_mac
msg = data[5:]
if msg_length == 18 and device_id == 0x17:
# BlueMaestro Tempo Disc THD
device_type = "Tempo Disc THD"
# pylint: disable=unused-variable
(batt, time_interval, log_cnt, temp, humi, dew_point, mode) = unpack("!BhhhHhH", msg)
result = {
"temperature": temp / 10,
"humidity": humi / 10,
"battery": batt,
"dewpoint": dew_point / 10
}
elif msg_length == 18 and device_id == 0x1b:
# BlueMaestro Tempo Disc THPD (sends P instead of D, no D is send)
device_type = "Tempo Disc THPD"
# pylint: disable=unused-variable
(batt, time_interval, log_cnt, temp, humi, press, mode) = unpack("!BhhhHhH", msg)
result = {
"temperature": temp / 10,
"humidity": humi / 10,
"battery": batt,
"pressure": press / 10
}
else:
if self.report_unknown == "BlueMaestro":
_LOGGER.info(
"BLE ADV from UNKNOWN BlueMaestro DEVICE: RSSI: %s, MAC: %s, ADV: %s",
rssi,
to_mac(source_mac),
data.hex()
)
return None
# check for MAC presence in whitelist, if needed
if self.discovery is False and bluemaestro_mac not in self.sensor_whitelist:
_LOGGER.debug("Discovery is disabled. MAC: %s is not whitelisted!", to_mac(bluemaestro_mac))
return None
result.update({
"rssi": rssi,
"mac": ''.join('{:02X}'.format(x) for x in bluemaestro_mac[:]),
"type": device_type,
"packet": log_cnt,
"firmware": firmware,
"data": True
})
return result
def to_mac(addr: int):
"""Return formatted MAC address"""
return ':'.join(f'{i:02X}' for i in addr)
| StarcoderdataPython |
3343011 | from django.contrib.auth import views as auth_view
from django.urls import path
from .views import login_view, logout_view, signup_view
# ****** Url Patterns ******
urlpatterns = [
path('signup/', signup_view, name='signup'),
path('login/', login_view, name='login'),
path('logout/', logout_view, name='logout'),
# *** Password Reset Urls ***
path(
'password-reset/',
auth_view.PasswordResetView.as_view(
template_name='accounts/password-reset.html'
),
name='password_reset',
),
path(
'password-reset/done/',
auth_view.PasswordResetDoneView.as_view(
template_name='accounts/password-reset-done.html'
),
name='password_reset_done',
),
path(
'password-reset-confirm/<uidb64>/<token>/',
auth_view.PasswordResetConfirmView.as_view(
template_name='accounts/password-reset-confirm.html'
),
name='password_reset_confirm',
),
path(
'password-reset-complete/',
auth_view.PasswordResetCompleteView.as_view(
template_name='accounts/password-reset-complete.html'
),
name='password_reset_complete',
),
]
| StarcoderdataPython |
156023 | <reponame>ckamtsikis/cmssw<filename>JetMETCorrections/MCJet/python/RelValQCD_cfi.py
import FWCore.ParameterSet.Config as cms
readFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles)
readFiles.extend( (
" /store/relval/CMSSW_3_4_0_pre2/RelValQCD_FlatPt_15_3000/GEN-SIM-RECO/MC_3XY_V10-v1/0003/D085615A-A5BD-DE11-8897-0026189437E8.root",
"/store/relval/CMSSW_3_4_0_pre2/RelValQCD_FlatPt_15_3000/GEN-SIM-RECO/MC_3XY_V10-v1/0003/9A67BC35-AFBD-DE11-9FE5-001731AF67B5.root",
"/store/relval/CMSSW_3_4_0_pre2/RelValQCD_FlatPt_15_3000/GEN-SIM-RECO/MC_3XY_V10-v1/0003/5C70929C-C0BD-DE11-B1C6-002618943829.root",
"/store/relval/CMSSW_3_4_0_pre2/RelValQCD_FlatPt_15_3000/GEN-SIM-RECO/MC_3XY_V10-v1/0003/54E06430-B1BD-DE11-BC8C-003048679168.root",
"/store/relval/CMSSW_3_4_0_pre2/RelValQCD_FlatPt_15_3000/GEN-SIM-RECO/MC_3XY_V10-v1/0003/32097BFD-A3BD-DE11-894F-001A92810ADE.root"
)
)
| StarcoderdataPython |
1665268 | #!/bin/python
# Copyright (c) 2015-2017, Open Communications Security
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of ocsbincodec-c nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#-------------------------------------------------------------------------------
# This script generates C++ code that contains examples of Base64 encoded values
# that can be used to test the Base64 implementation.
#
import base64;
import random;
COUNT=16
def gen_random_string(size):
s=''
for i in range(size):
s = s + chr(random.randint(0, 255))
return s
def str_to_c_byte_array(s):
out=''
for i in range(len(s)):
out = out + '0x{0:02X}, '.format(ord(s[i]))
if ((i % 8) == 7):
out = out + '\n\t'
out = out.strip()
if (len(out) > 0):
out = out[0:len(out) - 1]
return '\t' + out
samples=[]
for size in range(1, COUNT + 1):
samples.append(gen_random_string(size))
for s in samples:
print('static const unsigned char Base64CodecTest_SAMPLE_BIN{0}[{0}] = {{'.format(len(s)) )
print(str_to_c_byte_array(s))
print('};')
print('')
print('typedef struct {')
print('\tconst char * encoded;')
print('\tint decodedSize;')
print('\tconst unsigned char * decoded;')
print('} Base64CodecTest_sample_entry_t;')
print('')
print('#define B64_SAMPLES_SIZE {0}'.format(len(samples)))
print('static const Base64CodecTest_sample_entry_t Base64CodecTest_SAMPLES[B64_SAMPLES_SIZE] = {')
for s in samples:
print('\t{{"{0}", sizeof(Base64CodecTest_SAMPLE_BIN{1}), Base64CodecTest_SAMPLE_BIN{1}}},'.format(
base64.b64encode(s),
len(s)))
print('};')
| StarcoderdataPython |
139374 | <reponame>nghiattran/mentality
from lib.neuron import Neuron
class Layer(object):
def __init__(self, setting):
self.neurons = []
if type(setting) is int:
self.name = ''
for i in range(setting):
self.neurons.append(Neuron(self))
elif type(setting) is dict:
try:
self.name = setting['name']
for neuron in setting['neurons']:
self.neurons.append(Neuron(self, neuron))
except:
raise ValueError('Input file is corrupted.')
else:
raise ValueError('Layer constructor only takes either an integer argument for a dictionary.')
def to_json(self):
return {
'name': self.name,
'neurons': [neuron.to_json() for neuron in self.neurons]
}
def set_name(self, name):
self.name = name
@staticmethod
def from_json(setting):
return Layer(setting)
def activate(self, inputs = None):
if inputs is None:
return [self.neurons[i].activate() for i in range(len(self.neurons))]
if len(inputs) != len(self.neurons):
raise ValueError('Input size does not match number of neurons.')
return [self.neurons[i].activate(inputs[i]) for i in range(len(self.neurons))]
def propagate(self, learning_rate, outputs = None, momentum=0):
if outputs is None:
return [
self.neurons[i].propagate(learning_rate=learning_rate, output=None, momentum=momentum)
for i in range(len(self.neurons))
]
if len(outputs) != len(self.neurons):
raise ValueError('Output size does not match number of neurons.')
return [
self.neurons[i].propagate(learning_rate=learning_rate, output=outputs[i], momentum=momentum)
for i in range(len(self.neurons))
]
def project(self, layer):
if type(layer) is not Layer:
raise ValueError('Projected object is not a Layer instance')
for neuron in self.neurons:
for projected_neuron in layer.neurons:
neuron.connect(projected_neuron)
def get_connections(self):
connections = []
for neuron in self.neurons:
connections += neuron.next
return connections | StarcoderdataPython |
1748248 | #
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging # noqa
import mock
from rally.common import logging as rally_logging
from tests.unit import test
class SetUpLogTestCase(test.TestCase):
@mock.patch("rally.common.logging.CONF")
@mock.patch("rally.common.logging.handlers")
@mock.patch("rally.common.logging.oslogging")
def test_setup(self, mock_oslogging, mock_handlers, mock_conf):
proj = "fakep"
version = "fakev"
mock_handlers.ColorHandler.LEVEL_COLORS = {
logging.DEBUG: "debug_color"}
mock_conf.rally_debug = True
rally_logging.setup(proj, version)
self.assertIn(logging.RDEBUG, mock_handlers.ColorHandler.LEVEL_COLORS)
self.assertEqual(
mock_handlers.ColorHandler.LEVEL_COLORS[logging.DEBUG],
mock_handlers.ColorHandler.LEVEL_COLORS[logging.RDEBUG])
mock_oslogging.setup.assert_called_once_with(mock_conf, proj, version)
mock_oslogging.getLogger(None).logger.setLevel.assert_called_once_with(
logging.RDEBUG)
@mock.patch("rally.common.logging.log")
@mock.patch("rally.common.logging.RallyContextAdapter")
@mock.patch("rally.common.logging.oslogging")
def test_getLogger(self, mock_oslogging, mock_rally_context_adapter,
mock_log):
name = "fake"
vers = "fake"
mock_oslogging._loggers = {}
returned_logger = rally_logging.getLogger(name, vers)
self.assertIn(name, mock_oslogging._loggers)
mock_rally_context_adapter.assert_called_once_with(
mock_log.getLogger(name),
{"project": "rally", "version": vers})
self.assertEqual(mock_oslogging._loggers[name], returned_logger)
class RallyContaxtAdapterTestCase(test.TestCase):
@mock.patch("rally.common.logging.log")
@mock.patch("rally.common.logging.oslogging.KeywordArgumentAdapter")
def test_debug(self, mock_keyword_argument_adapter, mock_log):
mock_log.RDEBUG = 123
fake_msg = "fake message"
radapter = rally_logging.RallyContextAdapter(mock.MagicMock(), "fakep")
radapter.log = mock.MagicMock()
radapter.debug(fake_msg)
radapter.log.assert_called_once_with(mock_log.RDEBUG,
fake_msg)
def test__find_caller(self):
radapter = rally_logging.RallyContextAdapter(mock.MagicMock(), "fakep")
self.caller = None
def logging_method():
self.caller = radapter._find_the_caller()
def foo():
logging_method()
foo()
# the number of the line which calls logging_method
lineno = 92
self.assertEqual((__file__, lineno, "logging_method()"), self.caller)
@mock.patch("rally.common.logging.getLogger")
def test__check_args(self, mock_get_logger):
radapter = rally_logging.RallyContextAdapter(mock.MagicMock(), "fakep")
def foo(*args):
radapter._check_args("", *args)
foo()
self.assertFalse(mock_get_logger.called)
foo(1)
# the number of the line which calls foo
lineno = 110
mock_get_logger.assert_called_once_with("%s:%s" % (__file__, lineno))
logger = mock_get_logger.return_value
self.assertEqual(1, logger.warning.call_count)
args = logger.warning.call_args_list[0]
self.assertTrue(args[0][0].startswith("[foo(1)] Do not use"))
@mock.patch("rally.common.logging.getLogger")
def test_exception(self, mock_get_logger):
radapter = rally_logging.RallyContextAdapter(mock.MagicMock(), {})
radapter.log = mock.MagicMock()
radapter.exception("foo")
self.assertFalse(mock_get_logger.called)
radapter.exception(Exception("!2!"))
# the number of the line which calls foo
lineno = 129
mock_get_logger.assert_called_once_with("%s:%s" % (__file__, lineno))
logger = mock_get_logger.return_value
self.assertEqual(1, logger.warning.call_count)
args = logger.warning.call_args_list[0]
self.assertTrue(args[0][0].startswith("[radapter.exception(Exception("
"\"!2!\"))] Do not transmit"))
@mock.patch("rally.common.logging.getLogger")
def test_error(self, mock_get_logger):
radapter = rally_logging.RallyContextAdapter(mock.MagicMock(), {})
radapter.log = mock.MagicMock()
radapter.error("foo", "bar")
# the number of the line which calls foo
lineno = 146
mock_get_logger.assert_called_once_with("%s:%s" % (__file__, lineno))
logger = mock_get_logger.return_value
self.assertEqual(1, logger.warning.call_count)
args = logger.warning.call_args_list[0]
self.assertTrue(args[0][0].startswith("[radapter.error(\"foo\", "
"\"bar\")] Do not use *args "))
class ExceptionLoggerTestCase(test.TestCase):
@mock.patch("rally.common.logging.is_debug")
def test_context(self, mock_is_debug):
# Prepare
mock_is_debug.return_value = True
logger = mock.MagicMock()
exception = Exception()
# Run
with rally_logging.ExceptionLogger(logger, "foo") as e:
raise exception
# Assertions
logger.warning.assert_called_once_with("foo")
logger.exception.assert_called_once_with(exception)
logger.debug.assert_called_once_with(exception)
self.assertEqual(e.exception, exception)
class LogCatcherTestCase(test.TestCase):
# FIXME(pboldin): These are really functional tests and should be moved
# there when the infrastructure is ready
def test_logcatcher(self):
LOG = rally_logging.getLogger("testlogger")
LOG.logger.setLevel(rally_logging.INFO)
with rally_logging.LogCatcher(LOG) as catcher:
LOG.warning("Warning")
LOG.info("Info")
LOG.debug("Debug")
catcher.assertInLogs("Warning")
self.assertRaises(AssertionError, catcher.assertInLogs, "Error")
self.assertEqual(["Warning", "Info"], catcher.fetchLogs())
self.assertEqual(2, len(catcher.fetchLogRecords()))
class CatcherHandlerTestCase(test.TestCase):
@mock.patch("logging.handlers.BufferingHandler.__init__")
def test_init(self, mock_buffering_handler___init__):
catcher_handler = rally_logging.CatcherHandler()
mock_buffering_handler___init__.assert_called_once_with(
catcher_handler, 0)
def test_shouldFlush(self):
catcher_handler = rally_logging.CatcherHandler()
self.assertFalse(catcher_handler.shouldFlush())
def test_emit(self):
catcher_handler = rally_logging.CatcherHandler()
catcher_handler.buffer = mock.Mock()
catcher_handler.emit("foobar")
catcher_handler.buffer.append.assert_called_once_with("foobar")
class LogCatcherUnitTestCase(test.TestCase):
def setUp(self):
super(LogCatcherUnitTestCase, self).setUp()
patcher = mock.patch("rally.common.logging.CatcherHandler")
self.catcher_handler = patcher.start()
self.catcher_handler.return_value.buffer = [
mock.Mock(msg="foo"), mock.Mock(msg="bar")]
self.addCleanup(patcher.stop)
self.logger = mock.Mock()
def test_init(self):
catcher = rally_logging.LogCatcher(self.logger)
self.assertEqual(self.logger.logger, catcher.logger)
self.assertEqual(self.catcher_handler.return_value, catcher.handler)
self.catcher_handler.assert_called_once_with()
def test_enter(self):
catcher = rally_logging.LogCatcher(self.logger)
self.assertEqual(catcher, catcher.__enter__())
self.logger.logger.addHandler.assert_called_once_with(
self.catcher_handler.return_value)
def test_exit(self):
catcher = rally_logging.LogCatcher(self.logger)
catcher.__exit__(None, None, None)
self.logger.logger.removeHandler.assert_called_once_with(
self.catcher_handler.return_value)
def test_assertInLogs(self):
catcher = rally_logging.LogCatcher(self.logger)
self.assertEqual(["foo"], catcher.assertInLogs("foo"))
self.assertEqual(["bar"], catcher.assertInLogs("bar"))
self.assertRaises(AssertionError, catcher.assertInLogs, "foobar")
def test_assertInLogs_contains(self):
catcher = rally_logging.LogCatcher(self.logger)
record_mock = mock.MagicMock()
self.catcher_handler.return_value.buffer = [record_mock]
record_mock.msg.__contains__.return_value = True
self.assertEqual([record_mock.msg], catcher.assertInLogs("foo"))
record_mock.msg.__contains__.assert_called_once_with("foo")
def test_fetchLogRecords(self):
catcher = rally_logging.LogCatcher(self.logger)
self.assertEqual(self.catcher_handler.return_value.buffer,
catcher.fetchLogRecords())
def test_fetchLogs(self):
catcher = rally_logging.LogCatcher(self.logger)
self.assertEqual(
[r.msg for r in self.catcher_handler.return_value.buffer],
catcher.fetchLogs())
class LogTestCase(test.TestCase):
def test_log_task_wrapper(self):
mock_log = mock.MagicMock()
msg = "test %(a)s %(b)s"
class TaskLog(object):
def __init__(self):
self.task = {"uuid": "some_uuid"}
@rally_logging.log_task_wrapper(mock_log, msg, a=10, b=20)
def some_method(self, x, y):
return x + y
t = TaskLog()
self.assertEqual("some_method", t.some_method.__name__)
self.assertEqual(4, t.some_method(2, 2))
params = {"msg": msg % {"a": 10, "b": 20}, "uuid": t.task["uuid"]}
expected = [
mock.call("Task %(uuid)s | Starting: %(msg)s" % params),
mock.call("Task %(uuid)s | Completed: %(msg)s" % params)
]
self.assertEqual(expected, mock_log.mock_calls)
def test_log_deprecated(self):
mock_log = mock.MagicMock()
@rally_logging.log_deprecated("depr42", "1.1.1", mock_log)
def some_method(x, y):
return x + y
self.assertEqual(4, some_method(2, 2))
self.assertIn("some_method()", mock_log.call_args[0][0])
self.assertIn("depr42", mock_log.call_args[0][0])
self.assertIn("1.1.1", mock_log.call_args[0][0])
def test_log_deprecated_args(self):
mock_log = mock.MagicMock()
@rally_logging.log_deprecated_args("ABC42", "0.0.1", ("z",),
mock_log, once=True)
def some_method(x, y, z):
return x + y + z
self.assertEqual(7, some_method(2, 2, z=3))
self.assertIn("ABC42", mock_log.call_args[0][0])
self.assertIn("`z' of `some_method()'", mock_log.call_args[0][0])
self.assertIn("0.0.1", mock_log.call_args[0][0])
mock_log.reset_mock()
self.assertEqual(7, some_method(2, 2, z=3))
self.assertFalse(mock_log.called)
@rally_logging.log_deprecated_args("CBA42", "0.0.1", ("z",),
mock_log, once=False)
def some_method(x, y, z):
return x + y + z
self.assertEqual(7, some_method(2, 2, z=3))
self.assertIn("CBA42", mock_log.call_args[0][0])
mock_log.reset_mock()
self.assertEqual(7, some_method(2, 2, z=3))
self.assertIn("CBA42", mock_log.call_args[0][0])
| StarcoderdataPython |
1688387 | <reponame>foxytrixy-com/foxylib<filename>foxylib/tools/function/loop/loop_tool.py
import time
class LoopTool:
class ExitException(Exception):
pass
@classmethod
def failcount2secs_default(cls, failcount):
secs = min(2 ** failcount, 60)
return secs
@classmethod
def func2loop(cls, func, failcount2secs=None):
if failcount2secs is None:
failcount2secs = cls.failcount2secs_default
failcount = 0
while True:
try:
succeeded = func()
except cls.ExitException:
return
if succeeded:
failcount = 0
continue
time.sleep(failcount2secs(failcount))
failcount += 1
| StarcoderdataPython |
188702 | <gh_stars>1-10
from os import path, makedirs
from datetime import datetime
from io import BytesIO
from base64 import b64encode
from urllib.parse import quote
from time import sleep
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
class HtmlLogger:
timestampColumnName = 'Timestamp'
def __init__(self, save_path, filename, overwrite=False):
self.save_path = save_path
self.filename = filename
self.fullPath = '{}/{}.html'.format(save_path, filename)
self.maxTableCellLength = 50
if not path.exists(save_path):
makedirs(save_path)
if (not overwrite) and path.exists(self.fullPath):
with open(self.fullPath, 'r') as f:
content = f.read()
# remove close tags in order to allow writing to data table
for v in ['</body>', '</html>', '</table>']:
idx = content.rfind(v)
# remove tag from string
if idx >= 0:
content = content[:idx] + content[idx + len(v):]
self.head = content
# script already in self.head now, therefore no need it again
self.script = ''
else:
self.head = '<!DOCTYPE html><html><head><style>' \
'table { font-family: gisha; border-collapse: collapse; display: block;}' \
'td, th { border: 1px solid #dddddd; text-align: center; padding: 8px; white-space:pre;}' \
'.collapsible { background-color: #777; color: white; cursor: pointer; padding: 18px; border: none; text-align: left; outline: none; font-size: 15px; }' \
'.active, .collapsible:hover { background-color: #555; }' \
'.content { max-height: 0; overflow: hidden; transition: max-height 0.2s ease-out;}' \
'</style></head>' \
'<body>'
# init collapse script
self.script = '<script> var coll = document.getElementsByClassName("collapsible"); var i; for (i = 0; i < coll.length; i++) { coll[i].addEventListener("click", function() { this.classList.toggle("active"); var content = this.nextElementSibling; if (content.style.maxHeight){ content.style.maxHeight = null; } else { content.style.maxHeight = content.scrollHeight + "px"; } }); } </script>'
self.end = '</body></html>'
self.infoTables = {}
self.dataTable = ''
self.dataTableCols = None
self.nColsDataTable = None
self.dataTableRowsNum = 0
self.nRowsPerColumnsRow = 10
# converts dictionary to rows with nElementPerRow (k,v) elements at most in each row
@staticmethod
def dictToRows(dict, nElementPerRow):
rows = []
row = []
counter = 0
# sort elements by keys name
for k in sorted(dict.keys()):
v = dict[k]
row.append(k)
row.append(v)
counter += 1
if counter == nElementPerRow:
rows.append(row)
row = []
counter = 0
# add last elements
if len(row) > 0:
rows.append(row)
return rows
def setMaxTableCellLength(self, length):
if length > 0:
self.maxTableCellLength = length
def __writeToFile(self):
# concat info tables to single string
infoTablesStr = ''
for title, table in self.infoTables.items():
infoTablesStr += table
# init elements write order to file
writeOrder = [self.head, infoTablesStr, self.dataTable, '</table>', self.script, self.end]
# write elements
with open(self.fullPath, 'w') as f:
for elem in writeOrder:
if elem is not '':
writeSuccess = False
while writeSuccess is False:
try:
# try to write to file
f.write(elem)
writeSuccess = True
except Exception as e:
# if couldn't write for some reason, like no space left on device, wait some time until we will free some space
print('HtmlLogger write failed, error:[{}]'.format(e))
sleep(10 * 60)
def __addRow(self, row):
res = '<tr>'
for v in row:
isTable = False
# check maybe we have a sub-table
if (type(v) is list) and (len(v) > 0) and isinstance(v[0], list):
v = self.__createTableFromRows(v)
isTable = True
# add element or sub-table to current table
content = '{}'.format(v)
# add scroll to cell if content is long
if (isTable is False) and (len(content) > self.maxTableCellLength):
content = '<div style="width: 300px; overflow: auto"> {} </div>'.format(content)
# add content as cell
res += '<td> {} </td>'.format(content)
res += '</tr>'
return res
# recursive function that supports sub-tables
def __createTableFromRows(self, rows):
res = '<table>'
# create rows
for row in rows:
res += self.__addRow(row)
# close table
res += '</table>'
return res
def createInfoTable(self, title, rows):
# open a new table
res = '<button class="collapsible"> {} </button>'.format(title)
res += '<div class="content" style="overflow: auto">'
# add rows
res += self.__createTableFromRows(rows)
# close table
res += '</div><h2></h2>'
return res
# title - a string for table title
# rows - array of rows. each row is array of values.
def addInfoTable(self, title, rows):
# create new table
self.infoTables[title] = self.createInfoTable(title, rows)
# write to file
self.__writeToFile()
# add row to existing info table by its title
def addRowToInfoTableByTitle(self, title, row):
if title in self.infoTables:
table = self.infoTables[title]
valuesToFind = ['</table>']
idx = 0
# walk through the string to the desired position
for v in valuesToFind:
if idx >= 0:
idx = table.find(v, idx)
if idx >= 0:
# insert new row in desired position
table = table[:idx] + self.__addRow(row) + table[idx:]
# update table in infoTables
self.infoTables[title] = table
# write to file
self.__writeToFile()
@staticmethod
def __addColumnsRowToTable(cols):
res = '<tr bgcolor="gray">'
for c in cols:
res += '<td> {} </td>'.format(c)
res += '</tr>'
# returns columns row
return res
def addColumnsRowToDataTable(self, writeToFile=False):
self.dataTable += self.__addColumnsRowToTable(self.dataTableCols)
# write to file
if writeToFile:
self.__writeToFile()
def updateDataTableCols(self, dataTableCols):
# save copy of columns names
self.dataTableCols = dataTableCols.copy()
# add timestamp to columns
self.dataTableCols.insert(0, self.timestampColumnName)
# save table number of columns
self.nColsDataTable = len(self.dataTableCols)
@staticmethod
def __addTitleRow(title, nCols):
return '<tr><th colspan={} bgcolor="gray"> {} </th></tr>'.format(nCols, title)
def createDataTable(self, title, columns):
self.dataTableRowsNum = 0
res = ''
# check if we need to close last data table in page, before starting a new one
if len(self.dataTable) > 0:
res += '</table><h2></h2>'
res += '<table>'
# update data table columns
self.updateDataTableCols(columns)
# create title row
res += self.__addTitleRow(title, self.nColsDataTable)
# add table to body
self.dataTable += res
# add columns row
self.addColumnsRowToDataTable()
# write to file
self.__writeToFile()
def getTimeStr(self):
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# values is a dictionary
def addDataRow(self, values, trType='<tr>', writeFile=True):
res = trType
# add timestamp to values
values[self.timestampColumnName] = self.getTimeStr()
# build table row, iterate over dictionary
for c in self.dataTableCols:
res += '<td>'
if c in values:
if isinstance(values[c], list):
res += self.__createTableFromRows(values[c])
else:
content = '{}'.format(values[c])
if (len(content) > self.maxTableCellLength) and (not '</button>' in content):
content = '<div style="width: 300px; overflow: auto"> {} </div>'.format(content)
res += content
res += '</td>'
# close row
res += '</tr>'
# add data to dataTable
self.dataTable += res
# update number of data table rows
self.dataTableRowsNum += 1
# add columns row if needed
if self.dataTableRowsNum % self.nRowsPerColumnsRow == 0:
self.addColumnsRowToDataTable()
if writeFile:
# write to file
self.__writeToFile()
# add data summary to data table
# values is a dictionary
def addSummaryDataRow(self, values):
self.addDataRow(values, trType='<tr bgcolor="#27AE60">')
def addInfoToDataTable(self, line, color='lightblue'):
res = '<tr>'
res += '<td> {} </td>'.format(self.getTimeStr())
res += '<td colspan={} bgcolor="{}"> {} </td>'.format(self.nColsDataTable - 1, color, line)
res += '</tr>'
# add table to body
self.dataTable += res
# write to file
self.__writeToFile()
def replaceValueInDataTable(self, oldVal, newVal):
self.dataTable = self.dataTable.replace(oldVal, newVal)
# write to file
self.__writeToFile()
def plot(self, **kwargs):
# data is a list, where each element is [x , y , 'bo' (i.e. pts style)]
data = kwargs.get('data')
if not data:
return
fig, ax = plt.subplots(nrows=1, ncols=1)
for x, y, style in data:
ax.plot(x, y, style)
# init properties we might want to handle
properties = [('xticks', ax.set_xticks), ('yticks', ax.set_yticks), ('size', fig.set_size_inches),
('xlabel', ax.set_xlabel), ('ylabel', ax.set_ylabel), ('title', ax.set_title)]
for key, func in properties:
if key in kwargs:
func(kwargs[key])
# set title
infoTableTitle = kwargs.get('title', 'Plot')
# convert fig to base64
canvas = FigureCanvas(fig)
png_output = BytesIO()
canvas.print_png(png_output)
img = b64encode(png_output.getvalue())
img = '<img src="data:image/png;base64,{}">'.format(quote(img))
self.addInfoTable(infoTableTitle, [[img]])
class SimpleLogger(HtmlLogger):
def __init__(self, save_path, filename, overwrite=False):
super(SimpleLogger, self).__init__(save_path, filename, overwrite)
self.tableColumn = 'Description'
self.createDataTable('Activity', [self.tableColumn])
def addRow(self, values):
super(SimpleLogger, self).addDataRow({self.tableColumn: values})
def addSummaryRow(self, values):
super(SimpleLogger, self).addSummaryDataRow({self.tableColumn: values})
| StarcoderdataPython |
1739168 | <gh_stars>0
from selenium import webdriver
from seleniumbase.__version__ import __version__
from seleniumbase.core.browser_launcher import get_driver # noqa
from seleniumbase.fixtures import js_utils # noqa
from seleniumbase.fixtures import page_actions # noqa
from seleniumbase.fixtures.base_case import BaseCase # noqa
from seleniumbase.masterqa.master_qa import MasterQA # noqa
from seleniumbase.common import decorators # noqa
from seleniumbase.common import encryption # noqa
import collections
import sys
if sys.version_info[0] >= 3:
from seleniumbase import translate # noqa
if sys.version_info >= (3, 7):
webdriver.TouchActions = None # Lifeline for past selenium-wire versions
if sys.version_info >= (3, 10):
collections.Callable = collections.abc.Callable # Lifeline for nosetests
del collections # Undo "import collections" / Simplify "dir(seleniumbase)"
del sys # Undo "import sys" / Simplify "dir(seleniumbase)"
del webdriver # Undo "import webdriver" / Simplify "dir(seleniumbase)"
version_info = [int(i) for i in __version__.split(".") if i.isdigit()] # noqa
version_tuple = tuple(version_info) # noqa
| StarcoderdataPython |
4834334 | import logging
import re
from sqlalchemy import create_engine
from sqlalchemy.engine.reflection import Inspector
import sqlalchemy_utils
logger = logging.getLogger(__name__)
def hint_match(record, hints):
for hint in hints:
if re.match(hint, record):
return True
return False
def try_drop_test_data(
database='indexd_test', root_user='postgres', host='localhost'):
# Using an engine that connects to the `postgres` database allows us to
# create a new database.
engine = create_engine("postgres://{user}@{host}/{name}".format(
user=root_user, host=host, name=database))
if sqlalchemy_utils.database_exists(engine.url):
sqlalchemy_utils.drop_database(engine.url)
engine.dispose()
def setup_database(
user='test', password='<PASSWORD>', database='indexd_test',
root_user='postgres', host='localhost', no_drop=False, no_user=False):
"""Setup the user and database"""
if not no_drop:
try_drop_test_data(database)
# Create an engine connecting to the `postgres` database allows us to
# create a new database from there.
engine = create_engine("postgres://{user}@{host}/{name}".format(
user=root_user, host=host, name=database))
if not sqlalchemy_utils.database_exists(engine.url):
sqlalchemy_utils.create_database(engine.url)
conn = engine.connect()
if not no_user:
try:
user_stmt = "CREATE USER {user} WITH PASSWORD '{password}'".format(
user=user, password=password)
conn.execute(user_stmt)
perm_stmt = 'GRANT ALL PRIVILEGES ON DATABASE {database} to {password}'\
''.format(database=database, password=password)
conn.execute(perm_stmt)
conn.execute("commit")
except Exception as e:
logger.warning("Unable to add user: %s", e)
conn.close()
engine.dispose()
def check_engine_for_migrate(engine):
"""
check if a db engine support database migration
Args:
engine (sqlalchemy.engine.base.Engine): a sqlalchemy engine
Return:
bool: whether the engine support migration
"""
return engine.dialect.supports_alter
def init_schema_version(driver, model, current_version):
"""
initialize schema table with a initialized singleton of version
Args:
driver (object): an alias or index driver instance
model (sqlalchemy.ext.declarative.api.Base): the version table model
current_version (int): current schema version
Return:
version (int): current version number in database
"""
with driver.session as s:
schema_version = s.query(model).first()
if not schema_version:
schema_version = model(version=current_version)
s.add(schema_version)
current_version = schema_version.version
return current_version
def migrate_database(driver, migrate_functions, current_schema_version, model):
"""
migrate current database to match the schema version provided in
current schema
Args:
driver (object): an alias or index driver instance
migrate_functions (list): a list of migration functions
current_schema_version (int): version of current schema in code
model (sqlalchemy.ext.declarative.api.Base): the version table model
Return:
None
"""
db_schema_version = init_schema_version(driver, model, 0)
need_migrate = (current_schema_version - db_schema_version) > 0
if not check_engine_for_migrate(driver.engine) and need_migrate:
logger.error(
'Engine {} does not support alter, skip migration'.format(
driver.engine.dialect.name))
return
for f in migrate_functions[
db_schema_version:current_schema_version]:
with driver.session as s:
schema_version = s.query(model).first()
schema_version.version += 1
logger.debug('migrating {} schema to {}'.format(
driver.__class__.__name__,
schema_version.version))
f(engine=driver.engine, session=s)
s.merge(schema_version)
logger.debug('finished migration for version {}'.format(
schema_version.version))
def is_empty_database(driver):
"""
check if the database is empty or not
Args:
driver (object): an alias or index driver instance
Returns:
Boolean
"""
table_list = Inspector.from_engine(driver.engine).get_table_names()
return len(table_list) == 0
| StarcoderdataPython |
4829352 | <filename>apps/payments/api/serializers.py<gh_stars>1-10
"""
Payments API
"""
from rest_framework import serializers
from apps.users.api.serializers import UserSerializer
from apps.purchases.api.serializers import PurchaseOrderSerializer
from apps.inventory.api.serializers import InventoryItemSerializer
from ..models import OwnershipTransfer
class OwnershipTransferSerializer(serializers.ModelSerializer):
"""Serializer for ownership transfers"""
order = PurchaseOrderSerializer()
previous_owner = UserSerializer()
current_owner = UserSerializer()
created_by = UserSerializer()
modified_by = UserSerializer()
items = InventoryItemSerializer(many=True)
class Meta:
model = OwnershipTransfer
fields = ('id', 'order', 'previous_owner', 'current_owner', 'items',
'date_transferred', 'date_created', 'date_modified',
'created_by', 'modified_by')
| StarcoderdataPython |
1663837 | <filename>custom_dynamics/enums.py
from enum import Enum
class MillerDynamics(Enum):
"""
Selection of dynamics to perform the miller ocp
"""
EXPLICIT = "explicit"
ROOT_EXPLICIT = "root_explicit"
IMPLICIT = "implicit"
ROOT_IMPLICIT = "root_implicit"
IMPLICIT_TAU_DRIVEN_QDDDOT = "implicit_qdddot"
ROOT_IMPLICIT_QDDDOT = "root_implicit_qdddot"
| StarcoderdataPython |
98803 | <reponame>markliuyuxiang/web-avatarify
import os
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from .api import style_gan
service = os.getenv("SERVICE")
if service == "avatarify":
from .api import avatarify
else:
from .api import avatarify_relay as avatarify
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(
avatarify.router,
prefix="/api/v1/avatarify",
tags=["api"],
dependencies=[],
responses={404: {"description": "Not found"}},
)
app.include_router(
style_gan.router,
prefix="/api/v1/getAvatar",
tags=["api"],
dependencies=[],
responses={404: {"description": "Not found"}},
)
| StarcoderdataPython |
1763808 | import pathlib
import typing
from typing import Type
import numpy as np
from flytekit.core.context_manager import FlyteContext
from flytekit.core.type_engine import TypeEngine, TypeTransformer, TypeTransformerFailedError
from flytekit.models.core import types as _core_types
from flytekit.models.literals import Blob, BlobMetadata, Literal, Scalar
from flytekit.models.types import LiteralType
class NumpyArrayTransformer(TypeTransformer[np.ndarray]):
"""
TypeTransformer that supports np.ndarray as a native type.
"""
NUMPY_ARRAY_FORMAT = "NumpyArray"
def __init__(self):
super().__init__(name="Numpy Array", t=np.ndarray)
def get_literal_type(self, t: Type[np.ndarray]) -> LiteralType:
return LiteralType(
blob=_core_types.BlobType(
format=self.NUMPY_ARRAY_FORMAT, dimensionality=_core_types.BlobType.BlobDimensionality.SINGLE
)
)
def to_literal(
self, ctx: FlyteContext, python_val: np.ndarray, python_type: Type[np.ndarray], expected: LiteralType
) -> Literal:
meta = BlobMetadata(
type=_core_types.BlobType(
format=self.NUMPY_ARRAY_FORMAT, dimensionality=_core_types.BlobType.BlobDimensionality.SINGLE
)
)
local_path = ctx.file_access.get_random_local_path() + ".npy"
pathlib.Path(local_path).parent.mkdir(parents=True, exist_ok=True)
# save numpy array to a file
# allow_pickle=False prevents numpy from trying to save object arrays (dtype=object) using pickle
np.save(file=local_path, arr=python_val, allow_pickle=False)
remote_path = ctx.file_access.get_random_remote_path(local_path)
ctx.file_access.put_data(local_path, remote_path, is_multipart=False)
return Literal(scalar=Scalar(blob=Blob(metadata=meta, uri=remote_path)))
def to_python_value(self, ctx: FlyteContext, lv: Literal, expected_python_type: Type[np.ndarray]) -> np.ndarray:
try:
uri = lv.scalar.blob.uri
except AttributeError:
TypeTransformerFailedError(f"Cannot convert from {lv} to {expected_python_type}")
local_path = ctx.file_access.get_random_local_path()
ctx.file_access.get_data(uri, local_path, is_multipart=False)
# load numpy array from a file
return np.load(file=local_path)
def guess_python_type(self, literal_type: LiteralType) -> typing.Type[np.ndarray]:
if (
literal_type.blob is not None
and literal_type.blob.dimensionality == _core_types.BlobType.BlobDimensionality.SINGLE
and literal_type.blob.format == self.NUMPY_ARRAY_FORMAT
):
return np.ndarray
raise ValueError(f"Transformer {self} cannot reverse {literal_type}")
TypeEngine.register(NumpyArrayTransformer())
| StarcoderdataPython |
1656339 | <reponame>miaucl/iot-pi-cam
#!/usr/bin/python
"""
Raspberry Pi Power Button Script.
Author: miaucl
Description: This script listens to a power button connected on PIN XXX and enables shutdown and reboot functionality for a raspbian dist using python 2/3.
Setup: The PIN XXX is configured with a pull up resistor and should be connected to the GND PIN by a simple interuptor.
(PIN: XXX) (PIN: GND)
___ ___
| ___ |
|_______/ _______|
BTN
Standard:
PIN=29/PORT=5
GND=30
"""
import RPi.GPIO as GPIO
import subprocess, time, sys, syslog, os
#####################
### Configuration ###
#####################
# GPIO-Port of the PIN
# GPIO 5, PIN 29 (GND just aside on PIN 30)
PORT = 5
# Max limit for shutdown (in secs), should the button be pressed longer, a reboot takes place
T_REBOOT = 3
# Timeout for the button to disable the action
T_TIMEOUT = 6
# Debounce time for the button (in secs)
T_PRELL = 0.05
###############
### Globals ###
###############
# Timestamp of the button press
timestamp = None
######################
### Initialization ###
######################
# Get the uid to check the permissions
uid = os.getuid()
# Needs root permission to shutdown/reboot
if uid > 0:
print ("Scripts needs root permission!")
sys.exit(0)
# GPIO initializing, BMC-Pin number, Pullup-Resistor
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(PORT, GPIO.IN, pull_up_down=GPIO.PUD_UP)
##########################
### Interrupt Routines ###
##########################
# Interrupt-Routine fuer die Taste
def buttonISR(pin):
"""Interrupt routing listening to button."""
global timestamp
# Button is pressed down
if not (GPIO.input(pin)):
syslog.syslog("Button down")
# Start the press
if timestamp is None:
syslog.syslog("Start press")
timestamp = time.time()
# Restart the press
elif time.time() - timestamp > T_PRELL:
syslog.syslog("Restart press")
timestamp = time.time()
# Skip as it is probably a rebound
else:
syslog.syslog("\t--> Skip: Already a press in process and probably a rebound")
# Button is released up
else:
syslog.syslog("Button up")
# If a press is active
if timestamp:
# A press is completed
if time.time() - timestamp > T_PRELL:
syslog.syslog("Stop press after: {:.3f}s".format(time.time() - timestamp))
# Reboot for long press
if time.time() - timestamp >= T_REBOOT:
syslog.syslog('==> System reboot');
time.sleep(1)
subprocess.call(['shutdown', '-r', 'now'], shell=False)
# Shutdown for short press
else:
syslog.syslog('==> System shutdown');
time.sleep(1)
subprocess.call(['shutdown', '-h', 'now'], shell=False)
# Reset the timestamp
timestamp = None
# Skip as it is probably a rebound
else:
syslog.syslog("\t--> Skip: Probably a rebound after: {:.3f}s".format(time.time() - timestamp))
# Interrupt for the button PIN
GPIO.add_event_detect(PORT, GPIO.BOTH, callback=buttonISR)
############
### Main ###
############
syslog.syslog('Shutdown.py started');
while True:
try:
# Sleep
time.sleep(1)
# Reset the timestamp after timeout when active
if timestamp and time.time() - timestamp > T_TIMEOUT:
syslog.syslog("Timeout press...")
timestamp = None
except KeyboardInterrupt:
syslog.syslog('Shutdown terminated (Keyboard CTRL+C)');
print("Bye")
sys.exit(0)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.