hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b5dde242388a3c0b90abd4420143d4c4d72acbeb
| 914 |
py
|
Python
|
docker_retag/utils/auth_helper.py
|
aiopsclub/docker_retag
|
0019917b0cdd7860c7ff79afdb78101878f5c1b1
|
[
"MIT"
] | null | null | null |
docker_retag/utils/auth_helper.py
|
aiopsclub/docker_retag
|
0019917b0cdd7860c7ff79afdb78101878f5c1b1
|
[
"MIT"
] | null | null | null |
docker_retag/utils/auth_helper.py
|
aiopsclub/docker_retag
|
0019917b0cdd7860c7ff79afdb78101878f5c1b1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import requests
def kv2dict(kvinfo):
kv = {}
for item in kvinfo.split(","):
item_list = item.split("=")
kv[item_list[0]] = item_list[1].strip('"')
return kv
def get_service_realm(registry_url):
registry_api_url = (
registry_url if registry_url.endswith("/v2/") else registry_url + "/v2/"
)
registry_res = requests.get(registry_api_url)
www_authenticate_header = registry_res.headers.get("Www-Authenticate")
if www_authenticate_header:
return kv2dict(www_authenticate_header.split()[-1])
return None
def required_auth(registry_url):
registry_api_url = (
registry_url if registry_url.endswith("/v2/") else registry_url + "/v2/"
)
registry_res = requests.get(registry_api_url)
return registry_res.status_code == 401
def scope_generate(image):
return "repository:{}:pull,push".format(image)
| 26.114286 | 80 | 0.682713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.106127 |
b5de2f232c7693a7a9e178d8efeaacaaaf172cb4
| 1,081 |
py
|
Python
|
app/__init__.py
|
SomeoneLixin/api-dock
|
3958a3a3286ae7f8802df9aba5ece2908ca4361e
|
[
"MIT"
] | 4 |
2018-05-07T15:39:17.000Z
|
2019-07-03T21:28:10.000Z
|
app/__init__.py
|
SomeoneLixin/api-dock
|
3958a3a3286ae7f8802df9aba5ece2908ca4361e
|
[
"MIT"
] | 4 |
2020-09-05T10:57:19.000Z
|
2021-05-09T16:01:22.000Z
|
app/__init__.py
|
SomeoneLixin/api-dock
|
3958a3a3286ae7f8802df9aba5ece2908ca4361e
|
[
"MIT"
] | 1 |
2018-05-09T07:57:03.000Z
|
2018-05-09T07:57:03.000Z
|
from flask import Flask, g
from flask_cors import CORS
from flask_jwt_extended import JWTManager
from config import config
from app.models import db, ma
from app.models.RevokedToken import RevokedToken
def create_app(config_name):
app = Flask(__name__)
CORS(app, resources={r"/api/*": {"origins": "*"}})
app.config.from_object(config[config_name])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
config[config_name].init_app(app)
app.secret_key = app.config['SECRET_KEY']
db.init_app(app)
ma.init_app(app)
jwt = JWTManager(app)
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return RevokedToken.is_jti_blacklisted(jti)
from .api import api_blueprint
app.register_blueprint(api_blueprint)
@app.route('/')
def index():
return 'API Dock, a web application for managing and testing your APIs.'
return app
| 29.216216 | 80 | 0.719704 | 0 | 0 | 0 | 0 | 286 | 0.26457 | 0 | 0 | 206 | 0.190564 |
b5df02ad3bc4934c674cd77a38e8acef0d4d0b9f
| 730 |
py
|
Python
|
Snippets/auto_scroll.py
|
ColinShark/Pyrogram-Snippets
|
50ede9ca9206bd6d66c6877217b4a80b4f845294
|
[
"WTFPL"
] | 59 |
2021-01-07T16:19:48.000Z
|
2022-02-22T06:56:36.000Z
|
Snippets/auto_scroll.py
|
Mrvishal2k2/Pyrogram-Snippets
|
d4e66876f6aff1252dfb88423fedd66e18057446
|
[
"WTFPL"
] | 4 |
2019-10-14T14:02:38.000Z
|
2020-11-06T11:47:03.000Z
|
Snippets/auto_scroll.py
|
ColinShark/Pyrogram-Snippets
|
50ede9ca9206bd6d66c6877217b4a80b4f845294
|
[
"WTFPL"
] | 26 |
2021-03-02T14:31:51.000Z
|
2022-03-23T21:19:14.000Z
|
# Send .autoscroll in any chat to automatically read all sent messages until you call
# .autoscroll again. This is useful if you have Telegram open on another screen.
from pyrogram import Client, filters
from pyrogram.types import Message
app = Client("my_account")
f = filters.chat([])
@app.on_message(f)
def auto_read(_, message: Message):
app.read_history(message.chat.id)
message.continue_propagation()
@app.on_message(filters.command("autoscroll", ".") & filters.me)
def add_keep(_, message: Message):
if message.chat.id in f:
f.remove(message.chat.id)
message.edit("Autoscroll deactivated")
else:
f.add(message.chat.id)
message.edit("Autoscroll activated")
app.run()
| 25.172414 | 85 | 0.710959 | 0 | 0 | 0 | 0 | 422 | 0.578082 | 0 | 0 | 238 | 0.326027 |
b5e13346685449cfbebc7876faf4f41723fbe5c9
| 2,977 |
py
|
Python
|
_demos/paint.py
|
imdaveho/intermezzo
|
3fe4824a747face996e301ca5190caec0cb0a6fd
|
[
"MIT"
] | 8 |
2018-02-26T16:24:07.000Z
|
2021-06-30T07:40:52.000Z
|
_demos/paint.py
|
imdaveho/intermezzo
|
3fe4824a747face996e301ca5190caec0cb0a6fd
|
[
"MIT"
] | null | null | null |
_demos/paint.py
|
imdaveho/intermezzo
|
3fe4824a747face996e301ca5190caec0cb0a6fd
|
[
"MIT"
] | null | null | null |
from intermezzo import Intermezzo as mzo
curCol = [0]
curRune = [0]
backbuf = []
bbw, bbh = 0, 0
runes = [' ', '░', '▒', '▓', '█']
colors = [
mzo.color("Black"),
mzo.color("Red"),
mzo.color("Green"),
mzo.color("Yellow"),
mzo.color("Blue"),
mzo.color("Magenta"),
mzo.color("Cyan"),
mzo.color("White"),
]
def updateAndDrawButtons(current, x, y, mx, my, n, attrf):
lx, ly = x, y
for i in range(0, n):
if lx <= mx and mx <= lx+3 and ly <= my and my <= ly+1:
current[0] = i
r, fg, bg = attrf(i)
mzo.set_cell(lx+0, ly+0, r, fg, bg)
mzo.set_cell(lx+1, ly+0, r, fg, bg)
mzo.set_cell(lx+2, ly+0, r, fg, bg)
mzo.set_cell(lx+3, ly+0, r, fg, bg)
mzo.set_cell(lx+0, ly+1, r, fg, bg)
mzo.set_cell(lx+1, ly+1, r, fg, bg)
mzo.set_cell(lx+2, ly+1, r, fg, bg)
mzo.set_cell(lx+3, ly+1, r, fg, bg)
lx += 4
lx, ly = x, y
for i in range(0, n):
if current[0] == i:
fg = mzo.color("Red") | mzo.attr("Bold")
bg = mzo.color("Default")
mzo.set_cell(lx+0, ly+2, '^', fg, bg)
mzo.set_cell(lx+1, ly+2, '^', fg, bg)
mzo.set_cell(lx+2, ly+2, '^', fg, bg)
mzo.set_cell(lx+3, ly+2, '^', fg, bg)
lx += 4
def update_and_redraw_all(mx, my):
global backbuf, runes, curRune, colors, curCol
mzo.clear(mzo.color("Default"), mzo.color("Default"))
if mx != -1 and my != -1:
backbuf[bbw*my+mx] = {"Ch": runes[curRune[0]], "Fg": colors[curCol[0]], "Bg": 0}
err = mzo.copy_into_cell_buffer(backbuf)
if err:
raise(Exception(err))
_, h = mzo.size()
def rune_cb(i):
global runes
return runes[i], mzo.color("Default"), mzo.color("Default")
def color_cb(i):
global colors
return ' ', mzo.color("Default"), colors[i]
updateAndDrawButtons(curRune, 0, 0, mx, my, len(runes), rune_cb)
updateAndDrawButtons(curCol, 0, h-3, mx, my, len(colors), color_cb)
mzo.flush()
def reallocBackBuffer(w, h):
global backbuf, bbw, bbh
bbw, bbh = w, h
backbuf = [{"Ch": "", "Fg": 0, "Bg": 0} for _ in range(w*h)]
def main():
err = mzo.init()
if err:
raise(Exception(err))
mzo.set_input_mode(mzo.input("Esc") | mzo.input("Mouse"))
w, h = mzo.size()
reallocBackBuffer(w, h)
update_and_redraw_all(-1, -1)
while True:
mx, my = -1, -1
evt = mzo.poll_event()
if evt["Type"] == mzo.event("Key"):
if evt["Key"] == mzo.key("Esc"):
break
elif evt["Type"] == mzo.event("Mouse"):
if evt["Key"] == mzo.mouse("Left"):
mx, my = evt["MouseX"], evt["MouseY"]
elif evt["Type"] == mzo.event("Resize"):
reallocBackBuffer(evt["Width"], evt["Height"])
update_and_redraw_all(mx, my)
if __name__ == "__main__":
try:
main()
finally:
mzo.close()
| 29.186275 | 88 | 0.518979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 296 | 0.099162 |
b5e16df4333ead8fee7050f33874cfa2a8d52eb0
| 1,896 |
py
|
Python
|
amt/media_reader_cli.py
|
lsxta/amt
|
7dcff9b1ce570abe103d0d8c50fd334f2c93af7d
|
[
"MIT"
] | 5 |
2021-12-22T08:49:23.000Z
|
2022-02-22T12:38:40.000Z
|
amt/media_reader_cli.py
|
lsxta/amt
|
7dcff9b1ce570abe103d0d8c50fd334f2c93af7d
|
[
"MIT"
] | 1 |
2022-01-30T00:51:05.000Z
|
2022-02-03T04:59:42.000Z
|
amt/media_reader_cli.py
|
lsxta/amt
|
7dcff9b1ce570abe103d0d8c50fd334f2c93af7d
|
[
"MIT"
] | 1 |
2022-01-29T09:38:16.000Z
|
2022-01-29T09:38:16.000Z
|
import logging
from .media_reader import MediaReader
from .util.media_type import MediaType
class MediaReaderCLI(MediaReader):
auto_select = False
def print_results(self, results):
for i, media_data in enumerate(results):
print("{:4}| {}\t{} {} ({})".format(i, media_data.global_id, media_data["name"], media_data.get("label", media_data["season_title"]), MediaType(media_data["media_type"]).name))
def select_media(self, term, results, prompt, no_print=False, auto_select_if_single=False):
index = 0
print("Looking for", term)
if not self.auto_select and not (len(results) == 1 and auto_select_if_single):
if not no_print:
self.print_results(results)
index = input(prompt)
try:
return results[int(index)]
except (ValueError, IndexError):
logging.warning("Invalid input; skipping")
return None
def list_some_media_from_server(self, server_id, limit=None):
self.print_results(self.get_server(server_id).get_media_list(limit=limit)[:limit])
def list_servers(self):
for id in sorted(self.state.get_server_ids()):
print(id)
def test_login(self, server_ids=None, force=False):
failures = False
for server in self.get_servers():
if server.has_login() and (not server_ids or server.id in server_ids):
if (force or server.needs_to_login()) and not server.relogin():
logging.error("Failed to login into %s", server.id)
failures = True
return not failures
def auth(self, tracker_id, just_print=False):
tracker = self.get_tracker_by_id(tracker_id)
print("Get token form", tracker.get_auth_url())
if not just_print:
self.settings.store_secret(tracker.id, input("Enter token:"))
| 38.693878 | 188 | 0.642405 | 1,800 | 0.949367 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.081224 |
b5e250ffeccc9fb9e0d710d9d521ebecc7097405
| 1,272 |
py
|
Python
|
src/webapi/libs/deps/__init__.py
|
VisionTale/StreamHelper
|
29a5e5d5c68401f2c1d1b9cf54a7c68fb41d623a
|
[
"MIT"
] | null | null | null |
src/webapi/libs/deps/__init__.py
|
VisionTale/StreamHelper
|
29a5e5d5c68401f2c1d1b9cf54a7c68fb41d623a
|
[
"MIT"
] | 37 |
2020-12-16T06:30:22.000Z
|
2022-03-28T03:04:28.000Z
|
src/webapi/libs/deps/__init__.py
|
VisionTale/StreamHelper
|
29a5e5d5c68401f2c1d1b9cf54a7c68fb41d623a
|
[
"MIT"
] | null | null | null |
"""
Dependency management package.
"""
def debug_print(message: str, verbose: bool):
"""
Print if verbose is set to true.
:param message: message to print
:param verbose: whether to print
:return:
"""
if verbose:
print(message)
def download_and_unzip_archive(url: str, zip_file_fp: str, static_folder: str, remove: bool = True, verbose: bool = True):
"""
Downloads and unzips an archive.
:param url: url to request
:param zip_file_fp: filepath for zip
:param static_folder: folder for flasks static files
:param remove: whether to remove the zip after unpacking, defaults to true.
:param verbose: whether to print information, defaults to true.
:exception OSError: os.remove, requests.get, open, TextIOWrapper.write, ZipFile, ZipFile.extractall
"""
from requests import get
r = get(url)
debug_print("Saving archive..", verbose)
with open(zip_file_fp, 'wb') as f:
f.write(r.content)
debug_print("Extracting..", verbose)
from zipfile import ZipFile
with ZipFile(zip_file_fp, 'r') as zip_file:
zip_file.extractall(static_folder)
if remove:
debug_print("Removing archive..", verbose)
from os import remove
remove(zip_file_fp)
| 30.285714 | 122 | 0.677673 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 663 | 0.521226 |
b5e3ba2877ce6a63efd56ee6ed3e28f80e3fe47d
| 1,096 |
py
|
Python
|
fixture/soap.py
|
DiastroniX/python_training_mantis
|
86f145285bea716246788d7967e1de7c23661bae
|
[
"Apache-2.0"
] | null | null | null |
fixture/soap.py
|
DiastroniX/python_training_mantis
|
86f145285bea716246788d7967e1de7c23661bae
|
[
"Apache-2.0"
] | null | null | null |
fixture/soap.py
|
DiastroniX/python_training_mantis
|
86f145285bea716246788d7967e1de7c23661bae
|
[
"Apache-2.0"
] | null | null | null |
from suds.client import Client
from suds import WebFault
from model.project import Project
class SoapHelper:
def __init__(self, app):
self.app = app
def can_login(self, username, password):
client = Client("http://localhost/mantisbt-1.2.20/api/soap/mantisconnect.php?wsdl")
try:
client.service.mc_login(username, password)
return True
except WebFault:
return False
project_cache = None
def project_list(self):
client = Client("http://localhost/mantisbt-1.2.20/api/soap/mantisconnect.php?wsdl")
self.project_cache = []
try:
for element in client.service.mc_projects_get_user_accessible(self.app.config['webadmin']['username'],
self.app.config['webadmin']['password']):
name = element.name
id = element.id
self.project_cache.append(Project(name=name, id=id))
except WebFault:
return False
return list(self.project_cache)
| 34.25 | 117 | 0.588504 | 1,003 | 0.915146 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.156934 |
b5e50a13752cec91e8412a4602fb057eaceaa6b0
| 1,113 |
py
|
Python
|
demos/runner/validate.py
|
Tanbobobo/DL-starter
|
be4678171bd51ae9e4f61079fa6422e3378d7ce4
|
[
"Apache-2.0"
] | null | null | null |
demos/runner/validate.py
|
Tanbobobo/DL-starter
|
be4678171bd51ae9e4f61079fa6422e3378d7ce4
|
[
"Apache-2.0"
] | null | null | null |
demos/runner/validate.py
|
Tanbobobo/DL-starter
|
be4678171bd51ae9e4f61079fa6422e3378d7ce4
|
[
"Apache-2.0"
] | null | null | null |
import torch
import wandb
def val(
criterion=None,
metric=None,
loader=None,
model=None,
device=None
):
r'''
Args:
criterion: a differentiable function to provide gratitude for backward
metric: a score to save best model
loader: a data iterator
model: model
device: calculation device, cpu or cuda.
Returns:
a metric socre on behalf of the accuracy on unseen dataset of the prediction of the model
'''
model.eval()
model.to(device)
loss_value_mean = 0
with torch.no_grad():
for idx, data in enumerate(loader):
img = data['img'].to(device)
gt = data['gt'].to(device)
pred = model(img)
loss_value = criterion(pred, gt)
loss_value_mean += loss_value
metric.accumulate(pred, gt)
wandb.log({'val_loss': loss_value})
metric_value = metric.value
loss_value_mean = loss_value_mean / len(loader)
return model, metric_value, loss_value_mean
| 27.146341 | 98 | 0.574124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 391 | 0.351303 |
b5e65e7ea71fdd5c4688f420edd49d985bd3eb75
| 89 |
py
|
Python
|
coding/calculate-5-6/code.py
|
mowshon/python-quiz
|
215fb23dbb0fa42b438f988e49172b87b48bade3
|
[
"MIT"
] | 2 |
2020-07-17T21:08:26.000Z
|
2020-08-16T03:12:07.000Z
|
coding/calculate-5-6/code.py
|
mowshon/python-quiz
|
215fb23dbb0fa42b438f988e49172b87b48bade3
|
[
"MIT"
] | 2 |
2021-06-08T22:04:35.000Z
|
2022-01-13T03:03:32.000Z
|
coding/calculate-5-6/code.py
|
mowshon/python-quiz
|
215fb23dbb0fa42b438f988e49172b87b48bade3
|
[
"MIT"
] | null | null | null |
def calculate(num1, num2=4):
res = num1 * num2
print(res)
print(calculate(5, 6))
| 17.8 | 28 | 0.629213 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b5e76e091ee3230443db9902e3df57b4dbeb04c4
| 4,428 |
py
|
Python
|
plot_fig07e_varying.py
|
victorcroisfelt/cf-ra-spatial-separability
|
60611c85079dd13848c70e3192331ea2a9f55138
|
[
"MIT"
] | null | null | null |
plot_fig07e_varying.py
|
victorcroisfelt/cf-ra-spatial-separability
|
60611c85079dd13848c70e3192331ea2a9f55138
|
[
"MIT"
] | null | null | null |
plot_fig07e_varying.py
|
victorcroisfelt/cf-ra-spatial-separability
|
60611c85079dd13848c70e3192331ea2a9f55138
|
[
"MIT"
] | 2 |
2022-01-08T12:18:43.000Z
|
2022-02-23T07:59:18.000Z
|
########################################
# plot_fig07d_anaa_practical.py
#
# Description. Script used to actually plot Fig. 07 (d) of the paper.
#
# Author. @victorcroisfelt
#
# Date. December 29, 2021
#
# This code is part of the code package used to generate the numeric results
# of the paper:
#
# Croisfelt, V., Abrão, T., and Marinello, J. C., “User-Centric Perspective in
# Random Access Cell-Free Aided by Spatial Separability”, arXiv e-prints, 2021.
#
# Available on:
#
# https://arxiv.org/abs/2107.10294
#
# Comment. Please, make sure that you have the required data files. They are
# obtained by running the scripts:
#
# - data_fig07_08_bcf.py
# - data_fig07_08_cellular.py
# - data_fig07_08_cellfree.py
#
########################################
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import warnings
########################################
# Preamble
########################################
# Comment the line below to see possible warnings related to python version
# issues
warnings.filterwarnings("ignore")
axis_font = {'size':'12'}
plt.rcParams.update({'font.size': 12})
matplotlib.rc('xtick', labelsize=12)
matplotlib.rc('ytick', labelsize=12)
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
########################################
# Loading data
########################################
data_bcf = np.load('data/fig07e_bcf.npz')
data_cellfree_est1 = np.load('data/fig07e_cellfree_est1.npz')
data_cellfree_est2 = np.load('data/fig07e_cellfree_est2.npz')
data_cellfree_est3 = np.load('data/fig07e_cellfree_est3.npz')
# Extract x-axis
L_range = data_cellfree_est1["L_range"]
N_range = data_cellfree_est1["N_range"]
# Extract ANAA
anaa_bcf = data_bcf["anaa"]
anaa_cellfree_est1 = data_cellfree_est1["anaa"]
anaa_cellfree_est2 = data_cellfree_est2["anaa"]
anaa_cellfree_est3 = data_cellfree_est3["anaa"]
########################################
# Plot
########################################
# Fig. 07e
fig, ax = plt.subplots(figsize=(4/3 * 3.15, 2))
#fig, ax = plt.subplots(figsize=(1/3 * (6.30), 3))
# Go through all values of N
for nn, N in enumerate(N_range):
plt.gca().set_prop_cycle(None)
if N == 1:
# BCF
ax.plot(L_range[:-2], anaa_bcf[:-2], linewidth=1.5, linestyle=(0, (3, 1, 1, 1, 1, 1)), color='black', label='BCF')
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=1.5, linestyle='--', color='black', label='CF-SUCRe: Est. 1')
ax.plot(L_range[:-2], anaa_cellfree_est2[:-2, nn], linewidth=1.5, linestyle='-.', color='black', label='CF-SUCRe: Est. 2')
ax.plot(L_range[:-2], anaa_cellfree_est3[:-2, nn], linewidth=1.5, linestyle=':', color='black', label='CF-SUCRe: Est. 3')
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=1.5, linestyle='--')
ax.plot(L_range[:-2], anaa_cellfree_est2[:-2, nn], linewidth=1.5, linestyle='-.')
ax.plot(L_range[:-2], anaa_cellfree_est3[:-2, nn], linewidth=1.5, linestyle=':')
elif N == 8:
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=1.5, linestyle='--')
ax.plot(L_range[:-2], anaa_cellfree_est2[:-2, nn], linewidth=1.5, linestyle='-.')
ax.plot(L_range[:-2], anaa_cellfree_est3[:-2, nn], linewidth=1.5, linestyle=':')
plt.gca().set_prop_cycle(None)
if N == 1:
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=0.0, marker='^', color='black', label='$N=1$')
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=0.0, marker='^')
ax.plot(L_range[:-2], anaa_cellfree_est2[:-2, nn], linewidth=0.0, marker='^')
ax.plot(L_range[:-2], anaa_cellfree_est3[:-2, nn], linewidth=0.0, marker='^')
elif N == 8:
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=0.0, marker='v', color='black', label='$N=8$')
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=0.0, marker='v')
ax.plot(L_range[:-2], anaa_cellfree_est2[:-2, nn], linewidth=0.0, marker='v')
ax.plot(L_range[:-2], anaa_cellfree_est3[:-2, nn], linewidth=0.0, marker='v')
def forward(x):
return x**(1/2)
def inverse(x):
return x**2
ax.set_xscale('function', functions=(forward, inverse))
ax.set_xticks(L_range[:-2])
ax.set_yticks(np.array([1, 3, 5, 7, 9, 10]))
ax.grid(visible=True, alpha=0.25, linestyle='--')
ax.set_xlabel(r'number of APs $L$')
ax.set_ylabel('ANAA')
ax.legend(fontsize='xx-small', markerscale=.5)
plt.show()
| 30.537931 | 124 | 0.630759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,722 | 0.38845 |
b5e97f4578877e1fcf5bd928b8d18930e062681c
| 6,697 |
py
|
Python
|
Meters/IEC/Datasets/get_time.py
|
Runamook/PyCharmProjects
|
1b1a063345e052451f00e3fdea82e31bdd2a0cae
|
[
"MIT"
] | null | null | null |
Meters/IEC/Datasets/get_time.py
|
Runamook/PyCharmProjects
|
1b1a063345e052451f00e3fdea82e31bdd2a0cae
|
[
"MIT"
] | null | null | null |
Meters/IEC/Datasets/get_time.py
|
Runamook/PyCharmProjects
|
1b1a063345e052451f00e3fdea82e31bdd2a0cae
|
[
"MIT"
] | null | null | null |
import datetime
from time import sleep
import re
import pytz
# try:
# from .emhmeter import MeterBase, create_input_vars, logger
# except ModuleNotFoundError:
# from emhmeter import MeterBase, create_input_vars, logger
# TODO: Not working
class GetTime:
def __init__(self, input_vars):
self.input_vars = input_vars
self.meter_number = input_vars["meter"]["meterNumber"]
self.results = dict()
def _get(self, what):
# results = dict()
# Get time
if what == "time":
obis = "0.9.1"
name = "time"
elif what == "date":
obis = "0.9.2"
name = "date"
else:
logger.error(f"Incorrect input {what}, use \"time\" or \"date\"")
raise KeyError
delta = datetime.timedelta(seconds=14)
ref_time = datetime.datetime.utcnow() + delta
self.results[name] = [ref_time, None]
logger.debug(f"===================== Getting {name} ===================== ")
value = self.query("R5", f"{obis}()")
if obis not in value:
logger.error(f"Unable to receive {name}. Received: \"{value}\"")
value = f"{obis}(error)"
self.results[name][1] = value
logger.debug(f"{self.results}")
if what == "time":
self.make_pause()
return
def query(self, cmd, data):
with MeterBase(self.input_vars) as m:
m.sendcmd_and_decode_response(b"/" + b"?" + self.meter_number.encode() + b"!\r\n")
m.sendcmd_and_decode_response(MeterBase.ACK + b'051\r\n')
result = m.sendcmd_and_decode_response(cmd.encode(), data.encode())
cmd = MeterBase.SOH + b'B0' + MeterBase.ETX
m.sendcmd_and_decode_response(cmd + MeterBase.bcc(cmd))
return result
@staticmethod
def make_pause():
pause = 25
logger.debug(f"Pausing for {pause} seconds")
sleep(pause)
return
def check(self, what, value):
# 0.9.2(1190724), 0.9.1(1221856)
re_in_parenthesis = re.compile('^0.9..[(](.+?)[)]')
logger.debug(f"Checking meter {what} \"{value}\"")
reference_value = value[0]
checked_value = value[1]
found_value = re_in_parenthesis.search(checked_value).groups()[0]
if what == "date":
return self.check_date(reference_value, found_value)
elif what == "time":
return self.check_time(reference_value, found_value)
@staticmethod
def check_date(ref_value, checked_value):
# datetime object, string
if checked_value != "error":
ref_value = ref_value.strftime("%y%m%d")
else:
ref_value = "010000"
checked_value = checked_value[1:]
logger.debug(f"Checking {ref_value} == {checked_value}")
return ref_value == checked_value
@staticmethod
def check_time(ref_value, checked_value):
# datetime object, string
# Select meter TZ based on response
if checked_value[0] == "1":
local_tz = pytz.timezone("Europe/Berlin") # UTC +2
elif checked_value[0] == "2":
local_tz = pytz.timezone("UTC")
else:
local_tz = pytz.timezone("Europe/Moscow") # UTC +3
# Generate "now" time in UTC
utc_now = pytz.utc.localize(datetime.datetime.utcnow())
# ref_value is UTC already, insert TZ info into object
ref_value = pytz.utc.localize(ref_value)
# Adjust it to actual meter TZ
ref_value = ref_value.astimezone(local_tz)
if checked_value != "error":
now_date = utc_now.strftime("%y%m%d")
else:
now_date = "010000"
# Take meter TZ now date (generated by script), add to meter TZ now time (received from meter)
checked_value = datetime.datetime.strptime(now_date + checked_value[1:], "%y%m%d%H%M%S")
# Insert local_tz into datetime object
checked_value = local_tz.localize(checked_value)
# Now both objects are in local TZ
logger.debug(f"Checking {ref_value} == {checked_value}")
# Compare
delta = (checked_value - ref_value).total_seconds()
logger.debug(f"Delta = {delta}")
allowable_delta = 6 # Seconds
return abs(delta) <= allowable_delta
def get(self):
self.input_vars["get_id"] = False
self._get("time")
self._get("date")
for key in self.results.keys():
if self.check(key, self.results[key]):
logger.debug(f"{key} is correct")
self.results[key].append("0")
else:
logger.debug(f"{key} is incorrect")
self.results[key].append("1")
return self.results
def parse(self, data):
# Input
# {'time': [datetime.datetime(2019, 7, 27, 13, 43, 28, 274370), '0.9.1(1154336)', '0'],
# 'date': [datetime.datetime(2019, 7, 27, 13, 44, 20, 519825), '0.9.2(1190727)', '1']},
logger.debug(f"{self.meter_number} Parsing time output")
logger.debug(f"{self.meter_number} {data}")
results = dict()
for key in data.keys():
if key == "time":
obis = "0.9.1"
elif key == "date":
obis = "0.9.2"
epoch = data[key][0].strftime("%s")
item_value = data[key][1]
trigger_value = data[key][2]
results[epoch] = [(f"{obis}-value", item_value), (f"{obis}-trigger", trigger_value)]
# {epoch: [(obis_code, val), (), (), ...]}
final_result = {"time": results}
logger.debug(f"{final_result}")
return final_result
if __name__ == "__main__":
meter = {
"meterNumber": "04180616",
"Manufacturer": "",
"ip": "10.124.2.48",
"InstallationDate": "2018-10-10T10:00:00",
"IsActive": True,
"voltageRatio": 200,
"currentRatio": 10,
"totalFactor": 210
}
meter = {
"meterNumber": "05296170",
"Manufacturer": "EMH",
"ip": "10.124.2.120",
"InstallationDate": "2019-02-20T09:00:00",
"IsActive": True,
"voltageRatio": 200,
"currentRatio": 15,
"totalFactor": 215
}
variables = {"port": MeterBase.get_port(meter["ip"]),
"timestamp": MeterBase.get_dt(),
"data_handler": "P.01",
"exporter": "Zabbix",
"server": "192.168.33.33",
"meter": meter
}
logger.setLevel("DEBUG")
m = GetTime(variables)
data = m.get()
print(m.parse(data))
| 31.441315 | 102 | 0.551441 | 5,479 | 0.818128 | 0 | 0 | 1,978 | 0.295356 | 0 | 0 | 2,091 | 0.312229 |
b5ea159a84e98d9a3984e6fe5b31678efa676891
| 143 |
py
|
Python
|
References/Geovana Neves/TCC_Geovana_Neves_GitHub/SUAVE_modifications/SUAVE-feature-constant_throttle_EAS/trunk/SUAVE/Analyses/Results.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
References/Geovana Neves/TCC_Geovana_Neves_GitHub/SUAVE_modifications/SUAVE-feature-constant_throttle_EAS/trunk/SUAVE/Analyses/Results.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
References/Geovana Neves/TCC_Geovana_Neves_GitHub/SUAVE_modifications/SUAVE-feature-constant_throttle_EAS/trunk/SUAVE/Analyses/Results.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
# Results.py
#
# Created: Jan 2015, T. Lukacyzk
# Modified: Feb 2016, T. MacDonald
from SUAVE.Core import Data
class Results(Data):
pass
| 15.888889 | 34 | 0.699301 | 29 | 0.202797 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.559441 |
b5ea1cb63e2208d12c4791c91ece989cd820bf44
| 3,889 |
py
|
Python
|
instagrapi/direct.py
|
chaulaode1257/instagrapi
|
cfb8cb53d3a63092c0146f3a0b7a086c760908c9
|
[
"MIT"
] | 11 |
2021-01-09T22:52:30.000Z
|
2022-03-22T18:33:38.000Z
|
instagrapi/direct.py
|
chaulaode1257/instagrapi
|
cfb8cb53d3a63092c0146f3a0b7a086c760908c9
|
[
"MIT"
] | null | null | null |
instagrapi/direct.py
|
chaulaode1257/instagrapi
|
cfb8cb53d3a63092c0146f3a0b7a086c760908c9
|
[
"MIT"
] | 4 |
2020-12-26T06:14:53.000Z
|
2022-01-05T05:00:16.000Z
|
import re
from typing import List
from .utils import dumps
from .types import DirectThread, DirectMessage
from .exceptions import ClientNotFoundError, DirectThreadNotFound
from .extractors import extract_direct_thread, extract_direct_message
class Direct:
def direct_threads(self, amount: int = 20) -> List[DirectThread]:
"""Return last threads
"""
assert self.user_id, "Login required"
params = {
"visual_message_return_type": "unseen",
"thread_message_limit": "10",
"persistentBadging": "true",
"limit": "20",
}
cursor = None
threads = []
self.private_request("direct_v2/get_presence/")
while True:
if cursor:
params['cursor'] = cursor
result = self.private_request("direct_v2/inbox/", params=params)
inbox = result.get("inbox", {})
for thread in inbox.get("threads", []):
threads.append(extract_direct_thread(thread))
cursor = inbox.get("oldest_cursor")
if not cursor or (amount and len(threads) >= amount):
break
if amount:
threads = threads[:amount]
return threads
def direct_thread(self, thread_id: int, amount: int = 20) -> DirectThread:
"""Return full information by thread
"""
assert self.user_id, "Login required"
params = {
"visual_message_return_type": "unseen",
"direction": "older",
"seq_id": "40065", # 59663
"limit": "20",
}
cursor = None
items = []
while True:
if cursor:
params['cursor'] = cursor
try:
result = self.private_request(f"direct_v2/threads/{thread_id}/", params=params)
except ClientNotFoundError as e:
raise DirectThreadNotFound(e, thread_id=thread_id, **self.last_json)
thread = result['thread']
for item in thread['items']:
items.append(item)
cursor = thread.get("oldest_cursor")
if not cursor or (amount and len(items) >= amount):
break
if amount:
items = items[:amount]
thread['items'] = items
return extract_direct_thread(thread)
def direct_messages(self, thread_id: int, amount: int = 20) -> List[DirectMessage]:
"""Fetch list of messages by thread (helper)
"""
assert self.user_id, "Login required"
return self.direct_thread(thread_id, amount).messages
def direct_answer(self, thread_id: int, text: str) -> DirectMessage:
"""Send message
"""
assert self.user_id, "Login required"
return self.direct_send(text, [], [int(thread_id)])
def direct_send(self, text: str, user_ids: List[int] = [], thread_ids: List[int] = []) -> DirectMessage:
"""Send message
"""
assert self.user_id, "Login required"
method = "text"
kwargs = {}
if 'http' in text:
method = "link"
kwargs["link_text"] = text
kwargs["link_urls"] = dumps(
re.findall(r"(https?://[^\s]+)", text))
else:
kwargs["text"] = text
if thread_ids:
kwargs["thread_ids"] = dumps([int(tid) for tid in thread_ids])
if user_ids:
kwargs["recipient_users"] = dumps([[int(uid) for uid in user_ids]])
data = {
"client_context": self.generate_uuid(),
"action": "send_item",
**kwargs
}
result = self.private_request(
"direct_v2/threads/broadcast/%s/" % method,
data=self.with_default_data(data),
with_signature=False
)
return extract_direct_message(result["payload"])
| 35.678899 | 108 | 0.558498 | 3,643 | 0.936745 | 0 | 0 | 0 | 0 | 0 | 0 | 789 | 0.20288 |
b5eee5ae8e8ac24bba961d0d4420546bd6f06e1d
| 26,090 |
py
|
Python
|
src/main/python/cybercaptain/visualization/bar.py
|
FHNW-CyberCaptain/CyberCaptain
|
07c989190e997353fbf57eb7a386947d6ab8ffd5
|
[
"MIT"
] | 1 |
2018-10-01T10:59:55.000Z
|
2018-10-01T10:59:55.000Z
|
src/main/python/cybercaptain/visualization/bar.py
|
FHNW-CyberCaptain/CyberCaptain
|
07c989190e997353fbf57eb7a386947d6ab8ffd5
|
[
"MIT"
] | null | null | null |
src/main/python/cybercaptain/visualization/bar.py
|
FHNW-CyberCaptain/CyberCaptain
|
07c989190e997353fbf57eb7a386947d6ab8ffd5
|
[
"MIT"
] | 1 |
2021-11-01T00:09:00.000Z
|
2021-11-01T00:09:00.000Z
|
"""
This module contains the visualization bar class.
"""
import glob
import os
import re
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import FuncFormatter
from cybercaptain.utils.exceptions import ValidationError
from cybercaptain.visualization.base import visualization_base
from cybercaptain.utils.jsonFileHandler import json_file_reader
from cybercaptain.utils.helpers import str2bool
class visualization_bar(visualization_base):
"""
This class handles the bar graph plotting.
**Parameters**:
kwargs:
contains a dictionary of all attributes.
**Attributes**:
type:
the defined bar plot type (Currently supported: histogram, comparedbarplot, groupedbarplot, barplot3d, barplotgroupedstacked, barplotcomparedstacked)
dataAttribute:
in which attribute the values can be found in the dataset (E.g. 'example1.test.val')
Recommended to use the group-module and reuse the there set value attribute here.
groupNameAttribute:
in which attribute the grouped name can be found (E.g. 'example1.test.group')
Recommended to use the group-module and reuse the there set group attribute here.
threshold:
possibility to set a value threshold to hide smaller groups for example.
figureSize:
define a tuple to set the figure size proportion (E.g. '20, 10').
rotateXTicks:
int to rotate the x-ticks names if needed (E.g. 90 or -90).
rotateYTicks:
int to rotate the y-ticks names if needed (E.g. 90 or -90).
filenamesRegexExtract:
enables to extract stuff from the filenames to for example use on the x/y axis of file/run grouped plots (E.g. '([-+]\\d+)').
colormapAscending:
normalizes given values and set a color depending on their value (Ascending heat - possible to combine with 'colormap')
(Supported for: comparedbarplot, groupedbarplot, barplot3d - Defaults to False)
(Important: Ascending heat colors do not make sense for every plot although it is supported!)
colormap:
set the string for the colormap to be used on the graphs (Reference: https://matplotlib.org/users/colormaps.html)
horizontal:
the bool to display the barchart horizontal to the default vertical (Supported for: comparedbarplot, groupedbarplot, barplotcomparedstacked, barplotgroupedstacked)
scaledTo100:
the bool to scale a stacked bar plot to 100 (Supported for: barplotcomparedstacked, barplotgroupedstacked)
xlabel:
the string for the x-axis.
ylabel:
the string for the y-axis.
title:
the string for the title.
zlabel:
the string for the z-axis.
showYAxisFileNames:
the bool if on the BarPlot3D plot the filenames should be shown on the y-axis.
Can be combined with filenamesRegexExtract to just get certain things.
showGrid:
show the grid behind the plot (Defaults to False).
showLegend:
show the data legend for the chart (Defaults to True).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.validate(kwargs)
# If subclass needs special variables define here
self.type = kwargs.get("type") # Histogram, ComparedBarPlot, GroupedBarPlot, BarPlot3D, StackedBarPlot, GroupedBarPlot
# General
self.data_attribute = kwargs.get("dataAttribute") # In which attribute to find the group value in the dataset
self.group_name_attribute = kwargs.get("groupNameAttribute") # In which attribute to find the group name in the dataset
self.x_label = kwargs.get("xlabel", "")
self.y_label = kwargs.get("ylabel", "")
self.title = kwargs.get("title", "")
self.threshold = kwargs.get("threshold")
self.figure_size = kwargs.get("figureSize", [20, 10])
self.filenames_regex_extract = kwargs.get("filenamesRegexExtract")
self.color_map_ascending = str2bool(kwargs.get("colormapAscending"))
self.color_map = kwargs.get("colormap")
self.rotate_xticks = kwargs.get("rotateXTicks", 0)
self.rotate_yticks = kwargs.get("rotateYTicks", 0)
self.horizontal = str2bool(kwargs.get("horizontal"))
self.show_grid = str2bool(kwargs.get("showGrid"))
self.show_legend = str2bool(kwargs.get("showLegend", True))
# Stacked Plots
self.scaled_to_100 = str2bool(kwargs.get("scaledTo100"))
# BarPlot3D
self.z_label = kwargs.get("zlabel", "")
self.show_y_axis_file_names = str2bool(kwargs.get("showYAxisFileNames"))
def run(self):
"""
The bar run method collects and bundles the data for the plotting method.
**Returns**:
``True`` if the run was successful.
``False``if the run did not end successful.
"""
self.cc_log("INFO", "Data Visualization Bar: Started")
success = False
self.cc_log("INFO", "Bar visualization type: %s" % self.type)
plt.rcParams['figure.figsize'] = (self.figure_size[0], self.figure_size[1])
files = glob.glob(self.src)
if len(files) < 1:
self.cc_log("ERROR", "No files to plot were found - maybe recheck wildcard if defined!")
return False
if self.type == "histogram":
success = self.plot_histogram(files)
elif self.type == "comparedbarplot":
success = self.plot_comparedbarplot(files)
elif self.type == "groupedbarplot":
success = self.plot_groupedbarplot(files)
elif self.type == "barplot3d":
success = self.plot_barplot3d(files)
elif self.type == "barplotcomparedstacked":
success = self.plot_barplotcomparedstacked(files)
elif self.type == "barplotgroupedstacked":
success = self.plot_barplotgroupedstacked(files)
else:
self.cc_log("ERROR", "Data Visualization Bar: An unknown bar plot type (%s) was defined!" % (self.type))
return False
if success:
self.cc_log("DEBUG", "Data Visualization Bar: The plot can be found at: %s" % self.target)
self.cc_log("INFO", "Data Visualization Bar: Finished")
return True
return False
def plot_comparedbarplot(self, files):
"""
Plots a simple compared barplot according to the groups and their values.
Multiple files/runs will show on the X-Axis and different groups beside eachother.
(colormapAscending supported - ascending heat for every group)
**Parameters**:
files : list
list of file paths.
**Returns**:
``True`` if the plot was successfully saved.
``False`` in case something failed.
"""
_, ax = plt.subplots()
file_count, names_list, data_dict = self.get_data_from_files(files)
if len(data_dict) == 0:
self.cc_log("WARNING", "Data length to plot is equal to zero - recheck dataAttribute, groupNameAttribute or threshold!")
return False
data_vals = list(data_dict.values())
data_keys = list(data_dict.keys())
self.set_color_cycle(len(data_keys), ax)
barWidth = 1/len(data_keys)
x_pos = np.arange(file_count)
for i in range(0, len(data_keys)):
custom_colormap = self.get_heat_colormap(data_vals[i]) # Ascending Heat If Activated
if self.horizontal:
plt.barh(x_pos, data_vals[i], height=barWidth, color=custom_colormap, edgecolor='white', label=data_keys[i])
x_pos = [p + barWidth for p in x_pos]
else:
plt.bar(x_pos, data_vals[i], width=barWidth, color=custom_colormap, edgecolor='white', label=data_keys[i])
x_pos = [p + barWidth for p in x_pos]
#data_keys_expanded = data_keys*file_count
#data_keys_expanded[0] = data_keys_expanded[0] + "\n"+names_list[0]
#for i in range(1, file_count):
# data_keys_expanded[i*len(data_keys)] = data_keys_expanded[i*len(data_keys)] + "\n"+names_list[i]
plt.xticks([0], names_list, rotation=self.rotate_xticks)
if self.horizontal:
plt.yticks(np.arange(file_count), names_list, rotation=self.rotate_yticks)
else:
plt.xticks(np.arange(file_count), names_list, rotation=self.rotate_xticks)
ax.set_ylabel(self.y_label, fontweight='bold')
ax.set_xlabel(self.x_label, fontweight='bold')
ax.set_title(self.title, fontweight='bold')
if self.show_legend: ax.legend(data_keys, loc = 'best')
if self.show_grid: plt.grid(linestyle='dotted')
plt.savefig(self.target, bbox_inches='tight')
plt.close('all')
return True
def plot_groupedbarplot(self, files):
"""
Plots a simple grouped barplot according to the groups and their values.
Multiple files/runs will show beside each other. X-Axis resembles the groups.
(colormapAscending supported - ascending heat colors for every run/file)
**Parameters**:
files : list
list of file paths.
**Returns**:
``True`` if the plot was successfully saved.
``False`` in case something failed.
"""
_, ax = plt.subplots()
file_count, names_list, data_dict = self.get_data_from_files(files)
if len(data_dict) == 0:
self.cc_log("WARNING", "Data length to plot is equal to zero - recheck dataAttribute, groupNameAttribute or threshold!")
return False
data_vals = list(data_dict.values())
data_keys = list(data_dict.keys())
self.set_color_cycle(len(data_keys), ax)
barWidth = 1/file_count
x_pos = np.arange(len(data_vals))
for i in range(0, file_count):
plot_values = [ x[i] for x in data_vals ]
custom_colormap = self.get_heat_colormap(plot_values) # Ascending Heat If Activated
if self.horizontal:
plt.barh(x_pos, plot_values, height=barWidth, color=custom_colormap, edgecolor='white', label=names_list[i])
x_pos = [x + barWidth for x in x_pos]
else:
plt.bar(x_pos, plot_values, width=barWidth, color=custom_colormap, edgecolor='white', label=names_list[i])
x_pos = [x + barWidth for x in x_pos]
if self.horizontal:
plt.yticks(np.arange(len(data_vals)), data_keys, rotation=self.rotate_yticks)
else:
plt.xticks(np.arange(len(data_vals)), data_keys, rotation=self.rotate_xticks)
ax.set_ylabel(self.y_label, fontweight='bold')
ax.set_xlabel(self.x_label, fontweight='bold')
ax.set_title(self.title, fontweight='bold')
if self.show_legend: ax.legend(loc = 'best')
if self.show_grid: plt.grid(linestyle='dotted')
plt.savefig(self.target, bbox_inches='tight')
plt.close('all')
return True
def plot_barplot3d(self, files):
"""
Plots a barplot plot in 3D x axis according to the groups.
Multiple files/runs will show on the z axis.
(colormapAscending supported - ascending heat colors for every run/file)
**Parameters**:
files : list
list of file paths.
**Returns**:
``True`` if the plot was successfully saved.
``False`` in case something failed.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
file_count, names_list, data_dict = self.get_data_from_files(files)
if len(data_dict) == 0:
self.cc_log("WARNING", "Data length to plot is equal to zero - recheck dataAttribute, groupNameAttribute or threshold!")
return False
data_vals = list(data_dict.values())
data_keys = list(data_dict.keys())
self.set_color_cycle(len(data_keys), ax)
for i in range(0, file_count):
plot_values = [ x[i] for x in data_vals ]
custom_colormap = self.get_heat_colormap(plot_values) # Ascending Heat If Activated
ax.bar(np.arange(len(data_keys)), plot_values, color=custom_colormap, zs=i, zdir='y', alpha=0.8)
ax.set_xticks(np.arange(len(data_keys)))
ax.set_xticklabels(data_keys, rotation=self.rotate_xticks)
ax.set_yticks(np.arange(file_count))
if self.show_y_axis_file_names:
ax.set_yticklabels(names_list, ha="left")
else:
ax.set_yticklabels([])
ax.set_xlabel(self.x_label, y=1.10, labelpad=20, fontweight='bold')
ax.set_ylabel(self.y_label, fontweight='bold')
ax.set_zlabel(self.z_label, fontweight='bold')
ax.set_title(self.title, y=1.02, fontweight='bold')
if self.show_grid: plt.grid(linestyle='dotted')
plt.savefig(self.target, bbox_inches='tight')
plt.close('all')
return True
def plot_barplotgroupedstacked(self, files):
"""
Plots a simple barplot according to the groups and their values.
Multiple files/runs will be shown stacked on each of the groups.
**Parameters**:
files : list
list of file paths.
**Returns**:
``True`` if the plot was successfully saved.
``False`` in case something failed.
"""
_, ax = plt.subplots()
file_count, names_list, data_dict = self.get_data_from_files(files)
if len(data_dict) == 0:
self.cc_log("WARNING", "Data length to plot is equal to zero - recheck dataAttribute, groupNameAttribute or threshold!")
return False
data_vals = list(data_dict.values())
data_keys = list(data_dict.keys())
self.set_color_cycle(len(data_keys), ax)
barWidth = 0.9
bottom = np.array([0] * len(data_keys))
if self.scaled_to_100: totals = [sum(x) for x in data_vals]
for i in range(0, file_count):
plot_values = [ x[i] for x in data_vals ]
if self.scaled_to_100: plot_values = [l / j * 100 for l,j in zip(plot_values, totals)]
if self.horizontal:
ax.barh(np.arange(len(data_keys)), plot_values, linewidth=0, height=barWidth, left=bottom, label=names_list[i])
bottom = bottom + plot_values
else:
ax.bar(np.arange(len(data_keys)), plot_values, linewidth=0, width=barWidth, bottom=bottom, label=names_list[i])
bottom = bottom + plot_values
if self.horizontal:
ax.set_yticks(np.arange(len(data_keys)))
ax.set_yticklabels(data_keys, rotation=self.rotate_yticks)
if self.scaled_to_100: ax.xaxis.set_major_formatter(FuncFormatter(lambda y, pos: "%d%%" % (y)))
else:
ax.set_xticks(np.arange(len(data_keys)))
ax.set_xticklabels(data_keys, rotation=self.rotate_xticks)
if self.scaled_to_100: ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: "%d%%" % (y)))
ax.set_ylabel(self.y_label, fontweight='bold')
ax.set_xlabel(self.x_label, fontweight='bold')
ax.set_title(self.title, fontweight='bold')
if self.show_legend: ax.legend(loc = 'best')
if self.show_grid: plt.grid(linestyle='dotted')
plt.savefig(self.target, bbox_inches='tight')
plt.close('all')
return True
def plot_barplotcomparedstacked(self, files):
"""
Plots a simple barplot but with the different files/runs shown on the x_axis.
Different groups/values are stacked.
**Parameters**:
files : list
list of file paths.
**Returns**:
``True`` if the plot was successfully saved.
``False`` in case something failed.
"""
_, ax = plt.subplots()
file_count, names_list, data_dict = self.get_data_from_files(files)
if len(data_dict) == 0:
self.cc_log("WARNING", "Data length to plot is equal to zero - recheck dataAttribute, groupNameAttribute or threshold!")
return False
data_vals = list(data_dict.values())
data_keys = list(data_dict.keys())
data_to_plot_length = len(data_vals[0])
self.set_color_cycle(len(data_keys), ax)
ind = np.arange(data_to_plot_length) # the x locations for the groups
width = 0.9 # the width of the bars: can also be len(x) sequence
bottom = [0] * data_to_plot_length # init a list with zeros for bottom
plts = []
if self.scaled_to_100: totals = [sum(x) for x in zip(*data_vals)]
for single_data_set in data_vals:
if self.scaled_to_100: single_data_set = [i / j * 100 for i,j in zip(single_data_set, totals)]
if self.horizontal:
plts.append(plt.barh(ind, single_data_set, linewidth=0, height=width, left=bottom))
else:
plts.append(plt.bar(ind, single_data_set, linewidth=0, width=width, bottom=bottom))
for i in range(len(single_data_set)):
bottom[i] = bottom[i] + single_data_set[i]
if self.horizontal:
plt.yticks(ind, names_list, rotation=self.rotate_yticks)
if self.scaled_to_100: ax.xaxis.set_major_formatter(FuncFormatter(lambda y, pos: "%d%%" % (y)))
else:
plt.xticks(ind, names_list, rotation=self.rotate_xticks)
if self.scaled_to_100: ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: "%d%%" % (y)))
plt.ylabel(self.y_label, fontweight='bold')
plt.xlabel(self.x_label, fontweight='bold')
plt.title(self.title, fontweight='bold')
if self.show_legend: plt.legend(plts, data_keys, loc='best', bbox_to_anchor=(1, 0.5))
plt.subplots_adjust(right=0.7)
if self.show_grid: plt.grid(linestyle='dotted')
plt.savefig(self.target, bbox_inches='tight')
plt.close('all')
return True
def plot_histogram(self, files):
"""
Plots a histogram.
**Parameters**:
files : list
list of file paths.
**Returns**:
``True`` if the plot was successfully saved.
``False`` in case something failed.
"""
_, ax = plt.subplots()
values_list = []
names_list = []
for file in files:
json_fr = json_file_reader(file)
values = []
while not json_fr.isEOF():
data = json_fr.readRecord()
value = data
for a in self.data_attribute.split('.'):
value = value[a]
# Threshold
if self.threshold and int(value) < int(self.threshold):
continue # Skip this line as its < threshold
values.append(value)
json_fr.close()
values_list.append(values)
names_list.append(os.path.basename(file))
self.set_color_cycle(len(names_list), ax)
ax.hist(values_list, label = names_list, bins=10, edgecolor='white')
ax.set_ylabel(self.y_label, fontweight='bold')
ax.set_xlabel(self.x_label, fontweight='bold')
ax.set_title(self.title, fontweight='bold')
if self.show_legend: ax.legend(loc = 'best')
if self.show_grid: plt.grid(linestyle='dotted')
plt.savefig(self.target, bbox_inches='tight')
plt.close('all')
return True
def get_data_from_files(self, files):
"""
Gets and extracts the data from the given fileslist.
**Parameters**:
files : list
list of filepaths to process.
**Returns**:
``file_count, names_list, data_dict`` amount of files, names list of the files, grouped data dict with the values scaled in case of missing data
"""
data_dict = {}
names_list = []
file_count = 0
for file in files:
json_fr = json_file_reader(file)
while not json_fr.isEOF():
json_data = json_fr.readRecord()
value = json_data
for a in self.data_attribute.split('.'):
value = value[a]
# Threshold
if self.threshold and int(value) < int(self.threshold):
continue # Skip this line as its < threshold
group_name = json_data
for a in self.group_name_attribute.split('.'):
group_name = group_name[a]
if group_name in data_dict:
data_dict[group_name].append(value)
else:
data_dict[group_name] = [0] * file_count
data_dict[group_name].append(value)
for gn in data_dict:
if len(data_dict[gn]) < file_count+1: # Will not be appended as filecount isnt incremented yet, +1 added
data_dict[gn].append(0)
json_fr.close()
# Add filenames list to names list or extract regex if defined
name = None
if self.filenames_regex_extract:
name = re.search(self.filenames_regex_extract, os.path.basename(file))
if name: name = name.group(0)
if not name: name = os.path.basename(file)
names_list.append(name)
file_count += 1
return file_count, names_list, data_dict
def set_color_cycle(self, amount, ax, colormap_name="tab20"):
"""
Sets the color cycle for the plot according to the amount needed.
**Parameters**:
amount : int
amount of colors needed.
ax : MatplotLib Axes Object
the axes subplot object to set the colors on.
colormap_name : MatplotLib ColorMap
the wanted colormap to set (More infos on: https://matplotlib.org/users/colormaps.html)
Default 'tab20'
"""
if self.color_map: colormap_name = self.color_map
cmap = plt.get_cmap(colormap_name)
ax.set_prop_cycle(plt.cycler('color', cmap(np.linspace(0, 1, amount))))
def get_heat_colormap(self, values, colormap="Reds"):
"""
Returns a custom heat asscending colormap according to the given values.
**Parameters**:
values : list
list of the values to normalize and get the heat ascending matplotlib colormap back.
colormap : str
possibility to use a custom heat ascending colormap (Default: Reds)
**Returns**:
``list`` containing a matplotlib color depending on the previous given value.
``None`` if 'colormapAscending' is not configured or False
"""
if self.color_map: colormap = self.color_map
if not self.color_map_ascending: return None
cNorm = colors.Normalize(vmin=min(values), vmax=max(values))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=plt.get_cmap(colormap))
return [scalarMap.to_rgba(v) for v in values]
def validate(self, kwargs):
"""
The validate method checks if all the input arguments are corret.
**Parameters**:
kwargs : dict
Contains a dict of all the arguments for the line chart visualisation.
"""
super().validate(kwargs)
self.cc_log("INFO", "Data Visualization Bar: started validation")
if not kwargs.get("type"):
raise ValidationError(self, ["type"], "Parameter cannot be empty!")
if not kwargs.get("dataAttribute"):
raise ValidationError(self, ["dataAttribute"], "Parameter cannot be empty!")
if not kwargs.get("groupNameAttribute") and kwargs.get("type") != "histogram":
raise ValidationError(self, ["groupNameAttribute"], "Parameter cannot be empty!")
if kwargs.get("threshold"):
try:
int(kwargs.get("threshold"))
except:
raise ValidationError(self, ["threshold"], "Parameter has to be an int!")
if kwargs.get("figureSize"):
if not isinstance(kwargs.get("figureSize"), list) or len(kwargs.get("figureSize")) != 2:
raise ValidationError(self, ["figureSize"], "Parameter has to be a list of two (E.g. 20, 10)!")
if kwargs.get("rotateXTicks"):
try:
int(kwargs.get("rotateXTicks"))
except:
raise ValidationError(self, ["rotateXTicks"], "Parameter has to be an int!")
if kwargs.get("rotateYTicks"):
try:
int(kwargs.get("rotateYTicks"))
except:
raise ValidationError(self, ["rotateYTicks"], "Parameter has to be an int!")
if kwargs.get("colormap"):
if kwargs.get("colormap") not in plt.colormaps(): raise ValidationError(self, ["colormap"], "Colormap has to be existing, check the matplotlibb docu!")
# Optional
#if not kwargs.get("title"):
# raise ValidationError(self, ["title"], "Parameter cannot be empty!")
#if not kwargs.get("ylabel"):
# raise ValidationError(self, ["ylabel"], "Parameter cannot be empty!")
#if not kwargs.get("xlabel"):
# raise ValidationError(self, ["xlabel"], "Parameter cannot be empty!")
#if not kwargs.get("zlabel"):
# raise ValidationError(self, ["zlabel"], "Parameter cannot be empty!")
#if not kwargs.get("horizontal"):
# raise ValidationError(self, ["horizontal"], "Parameter cannot be empty!")
#if not kwargs.get("scaledTo100"):
# raise ValidationError(self, ["scaledTo100"], "Parameter cannot be empty!")
self.cc_log("INFO", "Data Visualization Bar: finished validation")
| 41.086614 | 175 | 0.617555 | 25,568 | 0.979992 | 0 | 0 | 0 | 0 | 0 | 0 | 10,475 | 0.401495 |
b5f0389774cedeaa041026bfccf255de23607efa
| 3,560 |
py
|
Python
|
app/profiles/schemas/update.py
|
MrPeker/acikkaynak-service
|
21c3f2faaa84342d2fa95709293bc84d1e2a23ae
|
[
"Apache-2.0"
] | 5 |
2021-02-28T22:29:13.000Z
|
2021-11-29T00:24:28.000Z
|
app/profiles/schemas/update.py
|
MrPeker/acikkaynak-service
|
21c3f2faaa84342d2fa95709293bc84d1e2a23ae
|
[
"Apache-2.0"
] | null | null | null |
app/profiles/schemas/update.py
|
MrPeker/acikkaynak-service
|
21c3f2faaa84342d2fa95709293bc84d1e2a23ae
|
[
"Apache-2.0"
] | 3 |
2021-03-03T19:56:30.000Z
|
2021-03-06T22:10:35.000Z
|
import graphene
from app.common.library import graphql
from app.common.models import City
from ..models import Profile
from .queries import ProfileNode
# queries
class Query(graphene.ObjectType):
pass
# mutations
class ProfileUpdateMutation(graphene.Mutation):
Output = ProfileNode
class Arguments:
profile = graphene.ID(required=True)
slug = graphene.String()
first_name = graphene.String()
last_name = graphene.String()
gender = graphene.String()
birthdate = graphene.String()
email = graphene.String()
phone = graphene.String()
profile_picture_uri = graphene.String()
locale = graphene.String()
bio = graphene.String()
location_city = graphene.ID()
languages = graphene.List(graphene.ID)
timezone = graphene.String()
@classmethod
# pylint:disable=unused-argument
def mutate(cls, root, info, **kwargs):
# TODO ensure that that profile belongs to this user
profile_id = graphql.global_id_to_model_id(kwargs["profile"])
if profile_id is None:
raise ValueError("Profile id is invalid")
profile = Profile.objects.get(pk=profile_id)
cognito_needs_update = False
user = None
if profile.users.count() == 1:
user = profile.users.first()
# if profile.users.filter(uuid=info.context.user.uuid).count() == 0:
# raise ValueError("you don't own this profile")
# for standard fields
# (keyword, update_profile, update_user, update_cognito)
fields = [
("slug", True, False, False),
("first_name", True, True, True),
("last_name", True, True, True),
("gender", True, True, True),
("birthdate", True, True, True),
("email", True, True, True),
("phone", True, True, True),
("profile_picture_uri", True, True, True),
("bio", True, False, False),
("timezone", True, False, False),
("locale", False, True, True),
]
for keyword, update_profile, update_user, update_cognito in fields:
if kwargs.get(keyword):
if update_profile:
setattr(profile, keyword, kwargs[keyword])
if update_user and user is not None:
setattr(user, keyword, kwargs[keyword])
if update_cognito:
cognito_needs_update = True
# for *-to-many fields
if (kwargs.get("languages")):
profile.languages.clear()
for language_global_id in kwargs["languages"]:
language_id = graphql.global_id_to_model_id(language_global_id)
if language_id is not None:
profile.languages.add(language_id)
if (kwargs.get("location_city")):
location_city_id = graphql.global_id_to_model_id(kwargs["location_city"])
if location_city_id is None:
raise ValueError("City id is invalid")
location_city = City.objects.get(pk=location_city_id)
location_country = location_city.country
profile.location_city = location_city
profile.location_country = location_country
if cognito_needs_update:
pass # TODO: update cognito
profile.full_clean()
profile.save()
return profile
class Mutation(graphene.ObjectType):
profile_update = ProfileUpdateMutation.Field()
| 31.504425 | 85 | 0.601404 | 3,373 | 0.947472 | 0 | 0 | 2,611 | 0.733427 | 0 | 0 | 555 | 0.155899 |
b5f1bcd8c2a8c9268b813650480c225371c73233
| 7,401 |
py
|
Python
|
kubevirt/models/v1_generation_status.py
|
ansijain/client-python
|
444ab92a68371c1ccd89314753fa7ab5c4ac9bbe
|
[
"Apache-2.0"
] | 21 |
2018-02-21T23:59:28.000Z
|
2021-12-08T05:47:37.000Z
|
kubevirt/models/v1_generation_status.py
|
ansijain/client-python
|
444ab92a68371c1ccd89314753fa7ab5c4ac9bbe
|
[
"Apache-2.0"
] | 47 |
2018-02-01T15:35:01.000Z
|
2022-02-11T07:45:54.000Z
|
kubevirt/models/v1_generation_status.py
|
ansijain/client-python
|
444ab92a68371c1ccd89314753fa7ab5c4ac9bbe
|
[
"Apache-2.0"
] | 19 |
2018-04-03T09:20:52.000Z
|
2021-06-01T06:07:28.000Z
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1GenerationStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'group': 'str',
'hash': 'str',
'last_generation': 'int',
'name': 'str',
'namespace': 'str',
'resource': 'str'
}
attribute_map = {
'group': 'group',
'hash': 'hash',
'last_generation': 'lastGeneration',
'name': 'name',
'namespace': 'namespace',
'resource': 'resource'
}
def __init__(self, group=None, hash=None, last_generation=None, name=None, namespace=None, resource=None):
"""
V1GenerationStatus - a model defined in Swagger
"""
self._group = None
self._hash = None
self._last_generation = None
self._name = None
self._namespace = None
self._resource = None
self.group = group
if hash is not None:
self.hash = hash
self.last_generation = last_generation
self.name = name
if namespace is not None:
self.namespace = namespace
self.resource = resource
@property
def group(self):
"""
Gets the group of this V1GenerationStatus.
group is the group of the thing you're tracking
:return: The group of this V1GenerationStatus.
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""
Sets the group of this V1GenerationStatus.
group is the group of the thing you're tracking
:param group: The group of this V1GenerationStatus.
:type: str
"""
if group is None:
raise ValueError("Invalid value for `group`, must not be `None`")
self._group = group
@property
def hash(self):
"""
Gets the hash of this V1GenerationStatus.
hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps
:return: The hash of this V1GenerationStatus.
:rtype: str
"""
return self._hash
@hash.setter
def hash(self, hash):
"""
Sets the hash of this V1GenerationStatus.
hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps
:param hash: The hash of this V1GenerationStatus.
:type: str
"""
self._hash = hash
@property
def last_generation(self):
"""
Gets the last_generation of this V1GenerationStatus.
lastGeneration is the last generation of the workload controller involved
:return: The last_generation of this V1GenerationStatus.
:rtype: int
"""
return self._last_generation
@last_generation.setter
def last_generation(self, last_generation):
"""
Sets the last_generation of this V1GenerationStatus.
lastGeneration is the last generation of the workload controller involved
:param last_generation: The last_generation of this V1GenerationStatus.
:type: int
"""
if last_generation is None:
raise ValueError("Invalid value for `last_generation`, must not be `None`")
self._last_generation = last_generation
@property
def name(self):
"""
Gets the name of this V1GenerationStatus.
name is the name of the thing you're tracking
:return: The name of this V1GenerationStatus.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1GenerationStatus.
name is the name of the thing you're tracking
:param name: The name of this V1GenerationStatus.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def namespace(self):
"""
Gets the namespace of this V1GenerationStatus.
namespace is where the thing you're tracking is
:return: The namespace of this V1GenerationStatus.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""
Sets the namespace of this V1GenerationStatus.
namespace is where the thing you're tracking is
:param namespace: The namespace of this V1GenerationStatus.
:type: str
"""
self._namespace = namespace
@property
def resource(self):
"""
Gets the resource of this V1GenerationStatus.
resource is the resource type of the thing you're tracking
:return: The resource of this V1GenerationStatus.
:rtype: str
"""
return self._resource
@resource.setter
def resource(self, resource):
"""
Sets the resource of this V1GenerationStatus.
resource is the resource type of the thing you're tracking
:param resource: The resource of this V1GenerationStatus.
:type: str
"""
if resource is None:
raise ValueError("Invalid value for `resource`, must not be `None`")
self._resource = resource
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1GenerationStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.411111 | 125 | 0.580327 | 7,094 | 0.958519 | 0 | 0 | 4,100 | 0.553979 | 0 | 0 | 4,117 | 0.556276 |
b5f230d3037e9e1528cdc347b55ec3805c78a481
| 3,352 |
py
|
Python
|
scripts/plot_fits.py
|
trichter/robust_earthquake_spectra
|
ef816e30944293e27c0d5da4d31ec2184e6d187b
|
[
"MIT"
] | 8 |
2021-07-23T13:01:29.000Z
|
2022-03-27T17:57:36.000Z
|
scripts/plot_fits.py
|
trichter/robust_earthquake_spectra
|
ef816e30944293e27c0d5da4d31ec2184e6d187b
|
[
"MIT"
] | null | null | null |
scripts/plot_fits.py
|
trichter/robust_earthquake_spectra
|
ef816e30944293e27c0d5da4d31ec2184e6d187b
|
[
"MIT"
] | null | null | null |
# Copyright 2021 Tom Eulenfeld, MIT license
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pickle
from qopen.core import get_pair, Gsmooth
from qopen.rt import G as G_func
def set_gridlabels(ax, i, n, N, xlabel='frequency (Hz)', ylabel=None):
if i % n != 0 and ylabel:
plt.setp(ax.get_yticklabels(), visible=False)
elif i // n == (n - 1) // 2 and ylabel:
ax.set_ylabel(ylabel)
if i < N - n and xlabel:
plt.setp(ax.get_xticklabels(), visible=False)
elif i % n == (n - 1) // 2 and i >= N - n - 1 and xlabel:
ax.set_xlabel(xlabel)
def _get_times(tr):
t0 = tr.stats.starttime - tr.stats.origintime
return np.arange(len(tr)) * tr.stats.delta + t0
def plot_fits(energies, g0, b, W, R, v0, info, smooth=None,
smooth_window='bartlett'):
fs = 250 / 25.4
plt.figure(figsize=(fs, 0.6*fs))
tcoda, tbulk, Ecoda, Ebulk, Gcoda, Gbulk = info
N = len(energies)
nx, ny = 3, 3
gs = gridspec.GridSpec(ny, nx, wspace=0.06, hspace=0.08)
share = None
if b is None:
b = 0
c1 = 'mediumblue'
c2 = 'darkred'
c1l = '#8181CD'
c2l = '#8B6969'
for i, energy in enumerate(energies):
evid, station = get_pair(energy)
ax = plt.subplot(gs[i // nx, i % nx], sharex=share, sharey=share)
plot = ax.semilogy
def get_Emod(G, t):
return R[station] * W[evid] * G * np.exp(-b * t)
st = energy.stats
r = st.distance
t = _get_times(energy) + r / v0 - (st.sonset - st.origintime)
if smooth:
plot(t, energy.data_unsmoothed, color='0.7')
plot(t, energy.data, color=c1l)
G_ = Gsmooth(G_func, r, t, v0, g0, smooth=smooth,
smooth_window=smooth_window)
Emod = get_Emod(G_, t)
index = np.argwhere(Emod < 1e-30)[-1]
Emod[index] = 1e-30
plot(t, Emod, color=c2l)
plot(tcoda[i], Ecoda[i], color=c1)
Emodcoda = get_Emod(Gcoda[i], tcoda[i])
plot(tcoda[i], Emodcoda, color=c2)
if tbulk and len(tbulk) > 0:
plot(tbulk[i], Ebulk[i], 'o', color=c1, mec=c1, ms=4)
Emodbulk = get_Emod(Gbulk[i], tbulk[i])
plot(tbulk[i], Emodbulk, 'o', ms=3,
color=c2, mec=c2)
l = '%s\n%dkm' % (station, r / 1000)
ax.annotate(l, (1, 1), (-5, -5), 'axes fraction',
'offset points', ha='right', va='top', size='x-small')
ylabel = 'spectral energy density $E$ (Jm$^{-3}$Hz$^{-1}$)'
set_gridlabels(ax, i, nx, N, xlabel='time (s)', ylabel=ylabel)
kw = dict(color='darkgreen', alpha=0.5, lw=0, zorder=10000)
ax.axvspan(tcoda[i][0]-4, tcoda[i][0]-0.3, 0.05, 0.08, **kw)
ax.axvspan(tcoda[i][0]+0.3, tcoda[i][-1], 0.05, 0.08, **kw)
if share is None:
share = ax
ax.yaxis.set_minor_locator(mpl.ticker.NullLocator())
ax.set_yticks(10. ** np.arange(-11, -5, 2))
ax.set_xlim((-2, 62))
ax.set_ylim((1e-13 / 1.5, 1e-6 * 1.5))
if __name__ == '__main__':
fname = '../qopen/01_go/fits_20186784_04.00Hz-08.00Hz.pkl'
with open(fname, 'rb') as f:
tup = pickle.load(f)
plot_fits(*tup)
plt.savefig('../figs/qopen_fits_20186784_4-8Hz.pdf', bbox_inches='tight')
| 34.204082 | 77 | 0.568019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 361 | 0.107697 |
b5f35bed476c5278cc37b5eb93da2b3545e9bfe8
| 957 |
py
|
Python
|
magmango/tests/test_potcar.py
|
nimalec/Magno
|
016bed1c2fb8275ac76ece3d0b7f39c4ebc45551
|
[
"MIT"
] | 1 |
2021-01-08T18:22:13.000Z
|
2021-01-08T18:22:13.000Z
|
magmango/tests/test_potcar.py
|
nimalec/Magno
|
016bed1c2fb8275ac76ece3d0b7f39c4ebc45551
|
[
"MIT"
] | null | null | null |
magmango/tests/test_potcar.py
|
nimalec/Magno
|
016bed1c2fb8275ac76ece3d0b7f39c4ebc45551
|
[
"MIT"
] | null | null | null |
import unittest
import os
import numpy as np
from pymatgen import Structure
from magmango.calculation.potcar import PotcarSettings
#
# class PotcarSettingsTest(unittest.TestCase):
# def setUp(self):
# self.potcar_file_path = "data/potcar_pto"
# #self.structure = Structure.from_file(self.poscar_file_path)
#
# def test_from_input(self):
# #poscar_sett = PoscarSettings(self.structure, self.poscar_file_path)
# #self.assertEqual(poscar_sett._structure, self.structure)
#
# # def test_from_file(self):
# # poscar_infile_sett = PoscarSettings()
# # poscar_infile_sett.poscar_from_file(self.poscar_file_path)
# # struct = poscar_infile_sett._structure
# # self.assertEqual(struct, self.structure)
#
# def test_update_settings(self):
# poscar_infile_sett = PoscarSettings()
# poscar_infile_sett.poscar_from_file(self.poscar_file_path)
# poscar_sett = poscar_infile_sett._structure
| 35.444444 | 76 | 0.736677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 805 | 0.84117 |
b5f407423805cba0b85dc8b97c1c27b8ba3da9b6
| 225 |
py
|
Python
|
answers/Aryan Goyal/Day 10/Que 1.py
|
arc03/30-DaysOfCode-March-2021
|
6d6e11bf70280a578113f163352fa4fa8408baf6
|
[
"MIT"
] | 22 |
2021-03-16T14:07:47.000Z
|
2021-08-13T08:52:50.000Z
|
answers/Aryan Goyal/Day 10/Que 1.py
|
arc03/30-DaysOfCode-March-2021
|
6d6e11bf70280a578113f163352fa4fa8408baf6
|
[
"MIT"
] | 174 |
2021-03-16T21:16:40.000Z
|
2021-06-12T05:19:51.000Z
|
answers/Aryan Goyal/Day 10/Que 1.py
|
arc03/30-DaysOfCode-March-2021
|
6d6e11bf70280a578113f163352fa4fa8408baf6
|
[
"MIT"
] | 135 |
2021-03-16T16:47:12.000Z
|
2021-06-27T14:22:38.000Z
|
def pangram(s):
a = "abcdefghijklmnopqrstuvwxyz"
for i in a:
if i not in s.lower():
return False
return True
# main
string1 = input()
if(pangram(string1) == True):
print("Yes")
else:
print("No")
| 17.307692 | 35 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.191111 |
b5f4eae105a3ccda0bbf32f61e4d9bc409056d85
| 773 |
py
|
Python
|
website/addons/dropbox/tests/test_serializer.py
|
DanielSBrown/osf.io
|
98dda2ac237377197acacce78274bc0a4ce8f303
|
[
"Apache-2.0"
] | null | null | null |
website/addons/dropbox/tests/test_serializer.py
|
DanielSBrown/osf.io
|
98dda2ac237377197acacce78274bc0a4ce8f303
|
[
"Apache-2.0"
] | 13 |
2020-03-24T15:29:41.000Z
|
2022-03-11T23:15:28.000Z
|
website/addons/dropbox/tests/test_serializer.py
|
DanielSBrown/osf.io
|
98dda2ac237377197acacce78274bc0a4ce8f303
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Serializer tests for the Dropbox addon."""
from nose.tools import * # noqa (PEP8 asserts)
from website.addons.base.testing.serializers import StorageAddonSerializerTestSuiteMixin
from website.addons.dropbox.tests.utils import MockDropbox
from website.addons.dropbox.tests.factories import DropboxAccountFactory
from website.addons.dropbox.serializer import DropboxSerializer
from tests.base import OsfTestCase
mock_client = MockDropbox()
class TestDropboxSerializer(StorageAddonSerializerTestSuiteMixin, OsfTestCase):
addon_short_name = 'dropbox'
Serializer = DropboxSerializer
ExternalAccountFactory = DropboxAccountFactory
client = mock_client
def set_provider_id(self, pid):
self.node_settings.folder = pid
| 32.208333 | 88 | 0.798189 | 302 | 0.390686 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.126779 |
b5f7ed8a0664870db210f6051f62a7c08134ae57
| 9,357 |
py
|
Python
|
tumblrlikes.py
|
cesarmiquel/Tumblr-Likes
|
3a96e979dbb420553535dd73320f3e7206bcbbfc
|
[
"MIT"
] | 1 |
2017-03-09T23:47:19.000Z
|
2017-03-09T23:47:19.000Z
|
tumblrlikes.py
|
cesarmiquel/Tumblr-Likes
|
3a96e979dbb420553535dd73320f3e7206bcbbfc
|
[
"MIT"
] | null | null | null |
tumblrlikes.py
|
cesarmiquel/Tumblr-Likes
|
3a96e979dbb420553535dd73320f3e7206bcbbfc
|
[
"MIT"
] | null | null | null |
import os
import urllib
import json
import pprint
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.api import urlfetch
import jinja2
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader = jinja2.FileSystemLoader(os.path.dirname(__file__) + '/templates'),
extensions = ['jinja2.ext.autoescape'])
# Blogs
class Blog(ndb.Model):
name = ndb.StringProperty() # ori
url = ndb.StringProperty()
title = ndb.StringProperty()
posts = ndb.IntegerProperty()
# Blog Post Image
class BlogPostImage(ndb.Model):
url = ndb.StringProperty() # ori
small_url = ndb.StringProperty() # 250
medium_url = ndb.StringProperty() # 500
caption = ndb.TextProperty() # 500
# Blog post entity
class BlogPost(ndb.Model):
link_url = ndb.StringProperty()
post_url = ndb.StringProperty()
blog_name = ndb.StringProperty(indexed=True)
caption = ndb.TextProperty()
photos = ndb.StructuredProperty(BlogPostImage, repeated=True)
# Get blog likes and add them to queue
class ProcessBlogLikes(webapp2.RequestHandler):
def get(self):
updated_blogs = {}
posts_saved = 0
self.api_key = self.request.get("api_key")
blog_name = self.request.get("blog_name")
offset = int(self.request.get("offset"))
url = 'http://api.tumblr.com/v2/blog/%s/likes?api_key=%s&limit=20&offset=%d' % (blog_name, self.api_key, offset)
result = urlfetch.fetch(url)
if result.status_code == 200:
response = json.loads(result.content)
liked_posts = response['response']['liked_posts']
for post in liked_posts:
# only process photos
if post['type'] != 'photo':
continue
posts_saved = posts_saved + self.save_post(post)
if post['blog_name'] not in updated_blogs:
updated_blogs[post['blog_name']] = 1
# Update number of posts
for blog_name in updated_blogs:
num_posts = BlogPost.query(BlogPost.blog_name == blog_name).count()
blog_key = ndb.Key('Blog', blog_name)
blog = blog_key.get()
blog.posts = num_posts
blog.put()
print "Found %d posts in %s" % (num_posts, blog_name)
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps({'posts_saved': posts_saved}))
def save_post(self, post):
# save blog
blog_key = ndb.Key('Blog', post['blog_name'])
blog = blog_key.get()
if not blog:
blog = Blog()
blog.name = post['blog_name']
blog.key = blog_key
blog.posts = 0
blog.url = blog.name + '.tumblr.com'
url = 'http://api.tumblr.com/v2/blog/%s.tumblr.com/info?api_key=%s' % (blog.name, self.api_key)
result = urlfetch.fetch(url)
if result.status_code == 200:
blog_info = json.loads(result.content)
# save blog
blog.title = blog_info['response']['blog']['title']
blog.put()
# find post in DB. If not found save
post_key = ndb.Key('BlogPost', post['id'])
blog_post = post_key.get()
if not blog_post:
blog_post = BlogPost()
blog_post.blog_name = post['blog_name']
blog_post.key = post_key
blog_post.link_url = post.get('link_url', '')
blog_post.post_url = post.get('post_url', '')
blog_post.caption = post.get('caption', '')
# save photos
photos = post.get('photos', [])
post_photos = []
for photo in photos:
blog_photo = BlogPostImage()
blog_photo.url = photo['original_size']['url']
blog_photo.caption = photo.get('caption', '')
for size in photo['alt_sizes']:
if size['width'] == 250:
blog_photo.small_url = size['url']
if size['width'] == 500:
blog_photo.medium_url = size['url']
post_photos.append(blog_photo)
blog_post.photos = post_photos
blog_post.put()
return 1
return 0
# Update blog stats and information
class UpdateBlogInfo(webapp2.RequestHandler):
def get(self):
posts_processed = 0
api_key = self.request.get("api_key")
blog_name = self.request.get("blog_name")
offset = int(self.request.get("offset"))
options = ndb.QueryOptions(offset=offset, limit=20)
blog_names = Blog.query(default_options=options)
response = []
for blog in blog_names:
if blog.title == None:
url = 'http://api.tumblr.com/v2/blog/%s.tumblr.com/info?api_key=%s' % (blog.name, api_key)
result = urlfetch.fetch(url)
if result.status_code == 200:
blog_info = json.loads(result.content)
# count how many posts we have
num_posts = BlogPost.query(BlogPost.blog_name == blog.name).count()
# save blog
blog.title = blog_info['response']['blog']['title']
blog.url = blog.name + '.tumblr.com'
blog.posts = num_posts
blog.put()
response.append({'name': blog.name, 'title': blog_info['response']['blog']['title'], 'url': blog.name + '.tumblr.com', 'posts': num_posts})
posts_processed += 1
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps({'posts_processed': posts_processed, 'blogs':response}))
# Retrieve the list of available blogs
class GetBlogList(webapp2.RequestHandler):
def get(self):
blog_names = Blog.query()
response = []
for blog in blog_names:
response.append({'name': blog.name, 'title': blog.title, 'posts': blog.posts, 'url': blog.url})
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps({'blogs': response}))
# Retrieve posts for a blog
class GetBlogPosts(webapp2.RequestHandler):
def get(self):
blog_name = self.request.get('blog_name')
cursor = self.request.get('cursor')
page = self.request.get('page')
if page == '':
page = '0'
response = self.get_posts(blog_name, cursor, int(page))
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps({'posts': response['posts'], 'name': blog_name, 'cursor': response['cursor'], 'more': response['more']}))
def get_posts(self, blog_name, cursor = '', page = 0):
page_size = 30
curs = ndb.Cursor(urlsafe = cursor)
qo = ndb.QueryOptions(offset = page * page_size)
if blog_name != 'likes':
blog_posts, next_cursor, more = BlogPost.query(BlogPost.blog_name == blog_name).order(-BlogPost.key).fetch_page(page_size, start_cursor = curs, options=qo)
else:
blog_posts, next_cursor, more = BlogPost.query().order(-BlogPost.key).fetch_page(page_size, start_cursor = curs, options=qo)
# DEBUG blog_posts, next_cursor, more = BlogPost.query().order(-BlogPost.key).fetch_page(page_size, start_cursor = curs, options=qo)
response = {'posts': [], 'cursor': '', 'more': more}
for post in blog_posts:
new_post = {
'blog_name': post.blog_name,
'link_url': post.link_url,
'post_url': post.post_url,
'caption': post.caption,
'photos': []
}
for photo in post.photos:
new_post['photos'].append(
{
'url': photo.url,
'small_url': photo.small_url,
'medium_url': photo.medium_url,
'caption': photo.caption,
}
)
response['posts'].append(new_post)
if next_cursor != None:
response['cursor'] = next_cursor.urlsafe()
else:
response['cursor'] = ''
return response
# Retrieve posts for a blog
class GetBlogPostsHtml(GetBlogPosts):
def get(self):
blog_name = self.request.get('blog_name')
response = {'name': blog_name, 'posts': self.get_posts(blog_name)}
self.response.headers['Content-Type'] = 'text/html'
template = JINJA_ENVIRONMENT.get_template('post-images.html')
self.response.write(template.render(response))
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
template_values = []
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
application = webapp2.WSGIApplication([
('/', MainPage),
('/blogs', GetBlogList),
('/posts', GetBlogPosts),
('/postshtml', GetBlogPostsHtml),
('/process', ProcessBlogLikes),
('/updatestats', UpdateBlogInfo),
], debug=True)
| 36.267442 | 167 | 0.578284 | 8,507 | 0.909159 | 0 | 0 | 0 | 0 | 0 | 0 | 1,769 | 0.189056 |
b5f8afd3209dc9c313d59f605ef9e611cf525951
| 9,348 |
py
|
Python
|
tests/test_reliable_redis_backend.py
|
thread/django-lightweight-queue
|
2c67eb13a454fa1a02f8445c26915b6e9261fdad
|
[
"BSD-3-Clause"
] | 23 |
2015-04-29T04:47:02.000Z
|
2022-03-11T12:43:01.000Z
|
tests/test_reliable_redis_backend.py
|
thread/django-lightweight-queue
|
2c67eb13a454fa1a02f8445c26915b6e9261fdad
|
[
"BSD-3-Clause"
] | 23 |
2015-02-27T14:30:47.000Z
|
2021-12-02T14:18:34.000Z
|
tests/test_reliable_redis_backend.py
|
thread/django-lightweight-queue
|
2c67eb13a454fa1a02f8445c26915b6e9261fdad
|
[
"BSD-3-Clause"
] | 1 |
2015-08-18T12:27:08.000Z
|
2015-08-18T12:27:08.000Z
|
import datetime
import unittest
import contextlib
import unittest.mock
from typing import Any, Dict, Tuple, Mapping, Iterator, Optional
import fakeredis
from django_lightweight_queue.job import Job
from django_lightweight_queue.types import QueueName
from django_lightweight_queue.backends.reliable_redis import (
ReliableRedisBackend,
)
from . import settings
from .mixins import RedisCleanupMixin
class ReliableRedisDeduplicationTests(RedisCleanupMixin, unittest.TestCase):
longMessage = True
prefix = settings.LIGHTWEIGHT_QUEUE_REDIS_PREFIX
def create_job(
self,
path: str = 'path',
args: Tuple[Any, ...] = ('args',),
kwargs: Optional[Dict[str, Any]] = None,
timeout: Optional[int] = None,
sigkill_on_stop: bool = False,
created_time: Optional[datetime.datetime] = None,
) -> Job:
if created_time is None:
created_time = self.start_time
job = Job(path, args, kwargs or {}, timeout, sigkill_on_stop)
job.created_time = created_time
return job
def enqueue_job(self, queue: QueueName, *args: Any, **kwargs: Any) -> Job:
job = self.create_job(*args, **kwargs)
self.backend.enqueue(job, queue)
return job
@contextlib.contextmanager
def mock_workers(self, workers: Mapping[str, int]) -> Iterator[None]:
with unittest.mock.patch(
'django_lightweight_queue.utils._accepting_implied_queues',
new=False,
), unittest.mock.patch.dict(
'django_lightweight_queue.app_settings.WORKERS',
workers,
):
yield
def setUp(self) -> None:
with unittest.mock.patch('redis.StrictRedis', fakeredis.FakeStrictRedis):
self.backend = ReliableRedisBackend()
self.client = self.backend.client
super(ReliableRedisDeduplicationTests, self).setUp()
self.start_time = datetime.datetime.utcnow()
def test_empty_queue(self):
result = self.backend.deduplicate('empty-queue')
self.assertEqual(
(0, 0),
result,
"Should do nothing when queue empty",
)
def test_single_entry_in_queue(self):
QUEUE = 'single-job-queue'
self.enqueue_job(QUEUE)
# sanity check
self.assertEqual(
1,
self.backend.length(QUEUE),
)
result = self.backend.deduplicate(QUEUE)
self.assertEqual(
(1, 1),
result,
"Should do nothing when queue has only unique jobs",
)
self.assertEqual(
1,
self.backend.length(QUEUE),
"Should still be a single entry in the queue",
)
def test_unique_entries_in_queue(self):
QUEUE = 'unique-jobs-queue'
self.enqueue_job(QUEUE, args=('args1',))
self.enqueue_job(QUEUE, args=('args2',))
# sanity check
self.assertEqual(
2,
self.backend.length(QUEUE),
)
result = self.backend.deduplicate(QUEUE)
self.assertEqual(
(2, 2),
result,
"Should do nothing when queue has only unique jobs",
)
self.assertEqual(
2,
self.backend.length(QUEUE),
"Should still be a single entry in the queue",
)
def test_duplicate_entries_in_queue(self):
QUEUE = 'duplicate-jobs-queue'
self.enqueue_job(QUEUE)
self.enqueue_job(QUEUE)
# sanity check
self.assertEqual(
2,
self.backend.length(QUEUE),
)
result = self.backend.deduplicate(QUEUE)
self.assertEqual(
(2, 1),
result,
"Should remove duplicate entries from queue",
)
self.assertEqual(
1,
self.backend.length(QUEUE),
"Should still be a single entry in the queue",
)
def test_preserves_order_with_fixed_timestamps(self):
QUEUE = 'job-queue'
WORKER_NUMBER = 0
self.enqueue_job(QUEUE, args=['args1'])
self.enqueue_job(QUEUE, args=['args2'])
self.enqueue_job(QUEUE, args=['args1'])
self.enqueue_job(QUEUE, args=['args3'])
self.enqueue_job(QUEUE, args=['args2'])
self.enqueue_job(QUEUE, args=['args1'])
# sanity check
self.assertEqual(
6,
self.backend.length(QUEUE),
)
result = self.backend.deduplicate(QUEUE)
self.assertEqual(
(6, 3),
result,
"Should remove duplicate entries from queue",
)
self.assertEqual(
3,
self.backend.length(QUEUE),
"Wrong number of jobs remaining in queue",
)
job = self.backend.dequeue(QUEUE, WORKER_NUMBER, timeout=1)
self.assertEqual(
['args1'],
job.args,
"First job dequeued should be the first job enqueued",
)
self.backend.processed_job(QUEUE, WORKER_NUMBER, job)
job = self.backend.dequeue(QUEUE, WORKER_NUMBER, timeout=1)
self.assertEqual(
['args2'],
job.args,
"Second job dequeued should be the second job enqueued",
)
self.backend.processed_job(QUEUE, WORKER_NUMBER, job)
job = self.backend.dequeue(QUEUE, WORKER_NUMBER, timeout=1)
self.assertEqual(
['args3'],
job.args,
"Third job dequeued should be the third job enqueued",
)
def test_preserves_order_with_unique_timestamps(self):
QUEUE = 'job-queue'
WORKER_NUMBER = 0
time = self.start_time
self.enqueue_job(QUEUE, args=['args1'], created_time=time)
time += datetime.timedelta(seconds=1)
self.enqueue_job(QUEUE, args=['args2'], created_time=time)
time += datetime.timedelta(seconds=1)
self.enqueue_job(QUEUE, args=['args1'], created_time=time)
time += datetime.timedelta(seconds=1)
self.enqueue_job(QUEUE, args=['args3'], created_time=time)
time += datetime.timedelta(seconds=1)
self.enqueue_job(QUEUE, args=['args2'], created_time=time)
time += datetime.timedelta(seconds=1)
self.enqueue_job(QUEUE, args=['args1'], created_time=time)
# sanity check
self.assertEqual(
6,
self.backend.length(QUEUE),
)
result = self.backend.deduplicate(QUEUE)
self.assertEqual(
(6, 3),
result,
"Should remove duplicate entries from queue",
)
self.assertEqual(
3,
self.backend.length(QUEUE),
"Wrong number of jobs remaining in queue",
)
job = self.backend.dequeue(QUEUE, WORKER_NUMBER, timeout=1)
self.assertEqual(
['args1'],
job.args,
"First job dequeued should be the first job enqueued",
)
self.backend.processed_job(QUEUE, WORKER_NUMBER, job)
job = self.backend.dequeue(QUEUE, WORKER_NUMBER, timeout=1)
self.assertEqual(
['args2'],
job.args,
"Second job dequeued should be the second job enqueued",
)
self.backend.processed_job(QUEUE, WORKER_NUMBER, job)
job = self.backend.dequeue(QUEUE, WORKER_NUMBER, timeout=1)
self.assertEqual(
['args3'],
job.args,
"Third job dequeued should be the third job enqueued",
)
def test_startup_recovers_orphaned_job(self):
QUEUE = 'the-queue'
self.enqueue_job(QUEUE)
orig_job = self.backend.dequeue(QUEUE, worker_number=3, timeout=1)
self.assertEqual(
0,
self.backend.length(QUEUE),
"Queue should appear empty after dequeuing job",
)
with self.mock_workers({QUEUE: 1}):
self.backend.startup(QUEUE)
self.assertEqual(
1,
self.backend.length(QUEUE),
"Queue should have recovered entry after running startup",
)
actual_job = self.backend.dequeue(QUEUE, worker_number=1, timeout=1)
self.assertEqual(
orig_job.as_dict(),
actual_job.as_dict(),
"The queue job should be the original one",
)
def test_startup_doesnt_move_job_on_known_queue(self):
QUEUE = 'the-queue'
self.enqueue_job(QUEUE)
orig_job = self.backend.dequeue(QUEUE, worker_number=3, timeout=1)
self.assertEqual(
0,
self.backend.length(QUEUE),
"Queue should appear empty after dequeuing job",
)
with self.mock_workers({QUEUE: 3}):
self.backend.startup(QUEUE)
self.assertEqual(
0,
self.backend.length(QUEUE),
"Queue should still appear empty after startup",
)
actual_job = Job.from_json(
self.client.lpop(
self.backend._processing_key(QUEUE, 3),
).decode(),
)
self.assertEqual(
orig_job.as_dict(),
actual_job.as_dict(),
"The queue job should be the original one",
)
| 28.5 | 81 | 0.578947 | 8,940 | 0.956354 | 346 | 0.037013 | 377 | 0.040329 | 0 | 0 | 1,553 | 0.166132 |
b5f91ae2a0e4966e6263d4fa5ec3616c068ac79a
| 653 |
py
|
Python
|
src/waldur_slurm/migrations/0019_fill_allocation_user_usage.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 26 |
2017-10-18T13:49:58.000Z
|
2021-09-19T04:44:09.000Z
|
src/waldur_slurm/migrations/0019_fill_allocation_user_usage.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 14 |
2018-12-10T14:14:51.000Z
|
2021-06-07T10:33:39.000Z
|
src/waldur_slurm/migrations/0019_fill_allocation_user_usage.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 32 |
2017-09-24T03:10:45.000Z
|
2021-10-16T16:41:09.000Z
|
from django.db import migrations
def fill_allocation_user_usage(apps, schema_editor):
AllocationUserUsage = apps.get_model('waldur_slurm', 'AllocationUserUsage')
for item in AllocationUserUsage.objects.all():
item.allocation = item.allocation_usage.allocation
item.year = item.allocation_usage.year
item.month = item.allocation_usage.month
item.save(update_fields=['allocation', 'year', 'month'])
class Migration(migrations.Migration):
dependencies = [
('waldur_slurm', '0018_add_allocation_month_year'),
]
operations = [
migrations.RunPython(fill_allocation_user_usage),
]
| 28.391304 | 79 | 0.715161 | 210 | 0.321593 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.162328 |
b5fd2934ba1f4d9447596711eac5fb882a9d016a
| 2,430 |
py
|
Python
|
SBGCobraTools.py
|
dsanleo/SBGCobraTools
|
2cc3a012e1d398ec9185de6ed0d6fa94526afc85
|
[
"MIT"
] | null | null | null |
SBGCobraTools.py
|
dsanleo/SBGCobraTools
|
2cc3a012e1d398ec9185de6ed0d6fa94526afc85
|
[
"MIT"
] | null | null | null |
SBGCobraTools.py
|
dsanleo/SBGCobraTools
|
2cc3a012e1d398ec9185de6ed0d6fa94526afc85
|
[
"MIT"
] | null | null | null |
# Get all carbon sources and return the objective flux. It can be normalized by the carbon input
def get_carbon_sources(model,carbon_uptake=-10,normalize=True,original_source='EX_glc__D_e',carbon_source_list=[]):
cobra.io.write_sbml_model(model,"tmp.xml")
if len(carbon_source_list)==0:
for reaction in model.reactions:
my_id=reaction.id
if reaction.boundary and not reaction.id.startswith("DM_"):
for exchange_metabolite in reaction.metabolites: # Deberia haber solo un metabolito
if 'C' in exchange_metabolite.elements.keys():
n_carbons=exchange_metabolite.elements['C']
model_tmp=cobra.io.read_sbml_model("tmp.xml")
model_tmp.reactions.get_by_id(original_source).lower_bound=0 # Se modifica el uptake de la fuente original
if normalize:
model_tmp.reactions.get_by_id(my_id).lower_bound=carbon_uptake/n_carbons
else:
model_tmp.reactions.get_by_id(my_id).lower_bound=carbon_uptake
solution=model_tmp.optimize()
print("Reaction: "+reaction.id+" N carbons: "+str(n_carbons)+" Metabolite: "+exchange_metabolite.id+" Solution: "+str(solution.objective_value))
else:
for reaction_name in carbon_source_list:
reaction=model.reactions.get_by_id(reaction_name)
my_id=reaction.id
if reaction.boundary and not reaction.id.startswith("DM_"):
for exchange_metabolite in reaction.metabolites: # Deberia haber solo un metabolito
if 'C' in exchange_metabolite.elements.keys():
n_carbons=exchange_metabolite.elements['C']
model_tmp=cobra.io.read_sbml_model("tmp.xml")
model_tmp.reactions.get_by_id(original_source).lower_bound=0 # Se modifica el uptake de la fuente original
if normalize:
model_tmp.reactions.get_by_id(my_id).lower_bound=carbon_uptake/n_carbons
else:
model_tmp.reactions.get_by_id(my_id).lower_bound=carbon_uptake
solution=model_tmp.optimize()
print("Reaction: "+reaction.id+" N carbons: "+str(n_carbons)+" Metabolite: "+exchange_metabolite.id+" Solution: "+str(solution.objective_value))
| 69.428571 | 168 | 0.637037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.174486 |
b5fe08cd114c3ed382e1d1703c6401c43f46dc9b
| 17,970 |
py
|
Python
|
Testing/test_StableMotifs.py
|
jcrozum/StableMotifs
|
8a9d640d3e8b074e0f05e9b45b8ef8bef8d8b5c7
|
[
"MIT"
] | 9 |
2020-04-03T14:18:06.000Z
|
2021-05-18T12:08:20.000Z
|
Testing/test_StableMotifs.py
|
jcrozum/StableMotifs
|
8a9d640d3e8b074e0f05e9b45b8ef8bef8d8b5c7
|
[
"MIT"
] | 30 |
2020-04-06T16:08:45.000Z
|
2021-06-14T15:15:41.000Z
|
Testing/test_StableMotifs.py
|
jcrozum/StableMotifs
|
8a9d640d3e8b074e0f05e9b45b8ef8bef8d8b5c7
|
[
"MIT"
] | 2 |
2021-01-14T15:21:51.000Z
|
2021-05-18T12:04:17.000Z
|
import sys
sys.path.append('../')
import unittest
import sys
sys.path.insert(0,"C:/Users/jcroz/github/StableMotifs")
import pystablemotifs as sm
import pyboolnet.file_exchange
class test_StableMotifs(unittest.TestCase):
rules='''A*=B
B*=A
C*=A or not D
D*=C
E*=B and F
F*=E
'''
rules_pbn = sm.format.booleannet2bnet(rules)
primes = pyboolnet.file_exchange.bnet2primes(rules_pbn)
ar = sm.AttractorRepertoire.from_primes(primes)
diag = ar.succession_diagram
def test_nr_of_attractors(self):
self.assertEqual(len(self.ar.attractors),3)
def test_booleannet2bnet(self):
self.assertEqual(self.rules_pbn, 'A,\tB\nB,\tA\nC,\tA | !D\nD,\tC\nE,\tB & F\nF,\tE\n')
def test_attractor_fixed_nodes_list(self):
self.assertDictEqual(self.ar.attractors[0].logically_fixed_nodes, {'A': 0, 'B': 0, 'E': 0, 'F': 0})
self.assertDictEqual(self.ar.attractors[1].logically_fixed_nodes, {'A': 1, 'B': 1, 'C': 1, 'D': 1, 'E': 1, 'F': 1})
self.assertDictEqual(self.ar.attractors[2].logically_fixed_nodes, {'A': 1, 'B': 1, 'C': 1, 'D': 1, 'E': 0, 'F': 0})
def test_motif_reduction_dict(self):
motif_history_list=[i.motif_history for i in self.diag.motif_reduction_dict.values()]
validation_motif_history_list=[[],[{'A': 0, 'B': 0}],
[{'A': 1, 'B': 1}],
[{'A': 1, 'B': 1}, {'E': 1, 'F': 1}],
[{'A': 1, 'B': 1}, {'E': 0, 'F': 0}],
[{'E': 0, 'F': 0}],
[{'E': 0, 'F': 0}, {'A': 0, 'B': 0}]]
self.assertListEqual(motif_history_list,validation_motif_history_list)
def test_AttractorRepertoire_attractor_states(self):
max_simulate_size=20
ar = sm.AttractorRepertoire.from_primes(self.primes, max_simulate_size=max_simulate_size)
attractors_dict_list=[]
for a in ar.attractors:
attractors_dict_list.append(a.attractor_dict)
self.assertListEqual(attractors_dict_list, [{'A': 0, 'B': 0, 'C': 'X', 'D': 'X', 'E': 0, 'F': 0},
{'A': 1, 'B': 1, 'C': 1, 'D': 1, 'E': 1, 'F': 1},
{'A': 1, 'B': 1, 'C': 1, 'D': 1, 'E': 0, 'F': 0}])
#"pathological" example with a complex attractor on a "ghost" branch
rules_pathological='''xA*= not xA and not xB or xC
xB*= not xA and not xB or xC
xC*= xA and xB'''
rules_pbn_pathological = sm.format.booleannet2bnet(rules_pathological)
primes_pathological = pyboolnet.file_exchange.bnet2primes(rules_pbn_pathological)
ar_pathological = sm.AttractorRepertoire.from_primes(primes_pathological)
diag_pathological = ar_pathological.succession_diagram
def test_ghost_branch(self):
'''
High level test of the correct identification of attractors that are not preceded by stable motif lockins.
(ghost_branch is not a function in the module)
'''
self.assertSetEqual(set(self.ar_pathological.attractors[0].stg.nodes()),set(['000', '010', '100']))
self.assertDictEqual(self.ar_pathological.attractors[1].logically_fixed_nodes,{'xA': 1, 'xB': 1, 'xC': 1})
# self.assertListEqual(self.diag_pathological.reduced_complex_attractor_list,[[{'000', '010', '100'}], None])
# self.assertListEqual(self.diag_pathological.attractor_fixed_nodes_list,[{}, {'xA': 1, 'xB': 1, 'xC': 1}])
def test_ghost_branch_with_max_simulate_size_0(self):
'''
If we don't simulate the STG with brute force at all (max_simulate_size=0) the output should give a warning
on the form of a "!" for nodes that potentially oscillate.
'''
max_simulate_size=0
ar = sm.AttractorRepertoire.from_primes(self.primes_pathological, max_simulate_size=max_simulate_size)
attractors_dict_list=[]
for a in ar.attractors:
attractors_dict_list.append(a.attractor_dict)
self.assertListEqual(attractors_dict_list, [{'xA': '!', 'xB': '!', 'xC': 0}, {'xA': 1, 'xB': 1, 'xC': 1}])
def test_two_complex_attractors_on_the_same_branch(self):
rules='''a* = !b | Z
b* = c | Z
c* = a & ~Z
A* = ~B | Y
B* = C | Y
C* = A & ~Y
Z* = ~(A & B & ~C)
Y* = ~(a & b & ~c)
'''
rules_pbn = sm.format.booleannet2bnet(rules)
primes = pyboolnet.file_exchange.bnet2primes(rules_pbn)
max_simulate_size=20
ar = sm.AttractorRepertoire.from_primes(primes, max_simulate_size=max_simulate_size)
attractors_dict_list=[]
for a in ar.attractors:
attractors_dict_list.append(a.attractor_dict)
self.assertListEqual(attractors_dict_list, [{'A': '1', 'B': '1', 'C': '0', 'Y': '1', 'Z': '0', 'a': 'X', 'b': 'X', 'c': 'X'},
{'A': 'X', 'B': 'X', 'C': 'X', 'Y': '0', 'Z': '1', 'a': '1', 'b': '1', 'c': '0'}])
max_simulate_size=0
ar = sm.AttractorRepertoire.from_primes(primes, max_simulate_size=max_simulate_size)
attractors_dict_list=[]
for a in ar.attractors:
attractors_dict_list.append(a.attractor_dict)
self.assertListEqual(attractors_dict_list, [{'A': '?', 'B': '?', 'C': '?', 'Y': '?', 'Z': '?', 'a': '?', 'b': '?', 'c': '?'}])
#Testing functions of export.py
def test_networkx_succession_diagram_reduced_network_based(self):
import pystablemotifs.export as ex
rules='''A*=B
B*=A
C*=A or not D
D*=C
E*=B and F
F*=E'''
rules_pbn = sm.format.booleannet2bnet(rules)
primes = pyboolnet.file_exchange.bnet2primes(rules_pbn)
max_simulate_size=20
include_attractors_in_diagram=False
ar = sm.AttractorRepertoire.from_primes(primes, max_simulate_size=max_simulate_size)
GR=ex.networkx_succession_diagram_reduced_network_based(ar,include_attractors_in_diagram=include_attractors_in_diagram)
self.assertDictEqual(dict(GR.nodes(data=True)),{0: {'label': '', 'virtual_nodes': []},
1: {'label': '{A: 0, B: 0}', 'virtual_nodes': [{'A': 0, 'B': 0}]},
2: {'label': '{A: 1, B: 1}', 'virtual_nodes': [{'A': 1, 'B': 1}]},
3: {'label': '{A: 1, B: 1}, {E: 1, F: 1}',
'virtual_nodes': [{'A': 1, 'B': 1}, {'E': 1, 'F': 1}]},
4: {'label': '{A: 1, B: 1}, {E: 0, F: 0}',
'virtual_nodes': [{'A': 1, 'B': 1}, {'E': 0, 'F': 0}]},
5: {'label': '{E: 0, F: 0}', 'virtual_nodes': [{'E': 0, 'F': 0}]},
6: {'label': '{E: 0, F: 0}, {A: 0, B: 0}',
'virtual_nodes': [{'E': 0, 'F': 0}, {'A': 0, 'B': 0}]}})
self.assertListEqual(list(GR.edges()), [(0, 1), (0, 2), (0, 5), (2, 3), (2, 4), (5, 6)])
def test_networkx_succession_diagram_reduced_network_based(self):
import pystablemotifs.export as ex
rules='''A*=B
B*=A
C*=A or not D
D*=C
E*=B and F
F*=E'''
rules_pbn = sm.format.booleannet2bnet(rules)
primes = pyboolnet.file_exchange.bnet2primes(rules_pbn)
max_simulate_size=20
include_attractors_in_diagram=False
ar = sm.AttractorRepertoire.from_primes(primes, max_simulate_size=max_simulate_size)
GR=ex.networkx_succession_diagram_reduced_network_based(ar,include_attractors_in_diagram=include_attractors_in_diagram)
self.assertDictEqual(dict(GR.nodes(data=True)),{0: {'label': '', 'virtual_nodes': []},
1: {'label': '{A: 0, B: 0}', 'virtual_nodes': [{'A': 0, 'B': 0}]},
2: {'label': '{A: 1, B: 1}', 'virtual_nodes': [{'A': 1, 'B': 1}]},
3: {'label': '{A: 1, B: 1}, {E: 1, F: 1}',
'virtual_nodes': [{'A': 1, 'B': 1}, {'E': 1, 'F': 1}]},
4: {'label': '{A: 1, B: 1}, {E: 0, F: 0}',
'virtual_nodes': [{'A': 1, 'B': 1}, {'E': 0, 'F': 0}]},
5: {'label': '{E: 0, F: 0}', 'virtual_nodes': [{'E': 0, 'F': 0}]},
6: {'label': '{E: 0, F: 0}, {A: 0, B: 0}',
'virtual_nodes': [{'E': 0, 'F': 0}, {'A': 0, 'B': 0}]}})
self.assertListEqual(list(GR.edges()), [(0, 1), (0, 2), (0, 5), (2, 3), (2, 4), (5, 4), (5, 6)
include_attractors_in_diagram=True
GR=ex.networkx_succession_diagram_reduced_network_based(ar,include_attractors_in_diagram=include_attractors_in_diagram)
self.assertDictEqual(dict(GR.nodes(data=True)),{0: {'label': '', 'virtual_nodes': []},
1: {'label': '{A: 0, B: 0}', 'virtual_nodes': [{'A': 0, 'B': 0}]},
2: {'label': '{A: 1, B: 1}', 'virtual_nodes': [{'A': 1, 'B': 1}]},
3: {'label': '{A: 1, B: 1}, {E: 1, F: 1}',
'virtual_nodes': [{'A': 1, 'B': 1}, {'E': 1, 'F': 1}]},
4: {'label': '{A: 1, B: 1}, {E: 0, F: 0}',
'virtual_nodes': [{'A': 1, 'B': 1}, {'E': 0, 'F': 0}]},
5: {'label': '{E: 0, F: 0}', 'virtual_nodes': [{'E': 0, 'F': 0}]},
6: {'label': '{E: 0, F: 0}, {A: 0, B: 0}',
'virtual_nodes': [{'E': 0, 'F': 0}, {'A': 0, 'B': 0}]},
'A0': {'label': '{A: 0, B: 0, C: X, D: X, E: 0, F: 0}',
'virtual_nodes': {'A': 0, 'B': 0, 'C': 'X', 'D': 'X', 'E': 0, 'F': 0}},
'A1': {'label': '{A: 1, B: 1, C: 1, D: 1, E: 1, F: 1}',
'virtual_nodes': {'A': 1, 'B': 1, 'C': 1, 'D': 1, 'E': 1, 'F': 1}},
'A2': {'label': '{A: 1, B: 1, C: 1, D: 1, E: 0, F: 0}',
'virtual_nodes': {'A': 1, 'B': 1, 'C': 1, 'D': 1, 'E': 0, 'F': 0}}})
self.assertListEqual(list(GR.edges()), [(0, 1),
(0, 2),
(0, 5),
(1, 'A0'),
(2, 3),
(2, 4),
(3, 'A1'),
(4, 'A2'),
(5, 4),
(5, 6),
(6, 'A0')])
def test_networkx_succession_diagram_motif_based(self):
import pystablemotifs.export as ex
rules='''A*=B
B*=A
C*=A or not D
D*=C
E*=B and F
F*=E'''
rules_pbn = sm.format.booleannet2bnet(rules)
primes = pyboolnet.file_exchange.bnet2primes(rules_pbn)
max_simulate_size=20
include_attractors_in_diagram=False
ar = sm.AttractorRepertoire.from_primes(primes, max_simulate_size=max_simulate_size)
GM=ex.networkx_succession_diagram_motif_based(ar,include_attractors_in_diagram=include_attractors_in_diagram)
self.assertDictEqual(dict(GM.nodes(data=True)),{(0, 1): {'label': '{A: 0, B: 0}', 'virtual_nodes': {'A': 0, 'B': 0}},
(0, 2): {'label': '{B: 1, A: 1}', 'virtual_nodes': {'B': 1, 'A': 1}},
(2, 3): {'label': '{E: 1, F: 1}', 'virtual_nodes': {'E': 1, 'F': 1}},
(2, 4): {'label': '{E: 0, F: 0}', 'virtual_nodes': {'E': 0, 'F': 0}},
(0, 5): {'label': '{E: 0, F: 0}', 'virtual_nodes': {'E': 0, 'F': 0}},
(5, 4): {'label': '{B: 1, A: 1}', 'virtual_nodes': {'B': 1, 'A': 1}},
(5, 6): {'label': '{A: 0, B: 0}', 'virtual_nodes': {'A': 0, 'B': 0}}})
self.assertListEqual(list(GM.edges()), [((0, 2), (2, 3)), ((0, 2), (2, 4)), ((0, 5), (5, 4)), ((0, 5), (5, 6))])
include_attractors_in_diagram=True
ar = sm.AttractorRepertoire.from_primes(primes, max_simulate_size=max_simulate_size)
GM=ex.networkx_succession_diagram_motif_based(ar,include_attractors_in_diagram=include_attractors_in_diagram)
self.assertDictEqual(dict(GM.nodes(data=True)),{(0, 1): {'label': '{A: 0, B: 0}', 'virtual_nodes': {'A': 0, 'B': 0}},
(0, 2): {'label': '{B: 1, A: 1}', 'virtual_nodes': {'B': 1, 'A': 1}},
(2, 3): {'label': '{E: 1, F: 1}', 'virtual_nodes': {'E': 1, 'F': 1}},
(2, 4): {'label': '{E: 0, F: 0}', 'virtual_nodes': {'E': 0, 'F': 0}},
(0, 5): {'label': '{E: 0, F: 0}', 'virtual_nodes': {'E': 0, 'F': 0}},
(5, 4): {'label': '{B: 1, A: 1}', 'virtual_nodes': {'B': 1, 'A': 1}},
(5, 6): {'label': '{A: 0, B: 0}', 'virtual_nodes': {'A': 0, 'B': 0}},
'A0': {'label': '{A: 0, B: 0, C: X, D: X, E: 0, F: 0}',
'virtual_nodes': {'A': 0, 'B': 0, 'C': 'X', 'D': 'X', 'E': 0, 'F': 0}},
'A1': {'label': '{A: 1, B: 1, C: 1, D: 1, E: 1, F: 1}',
'virtual_nodes': {'A': 1, 'B': 1, 'C': 1, 'D': 1, 'E': 1, 'F': 1}},
'A2': {'label': '{A: 1, B: 1, C: 1, D: 1, E: 0, F: 0}',
'virtual_nodes': {'A': 1, 'B': 1, 'C': 1, 'D': 1, 'E': 0, 'F': 0}}})
self.assertListEqual(list(GM.edges()), [((0, 1), 'A0'),
((0, 2), (2, 3)),
((0, 2), (2, 4)),
((2, 3), 'A1'),
((2, 4), 'A2'),
((0, 5), (5, 4)),
((0, 5), (5, 6)),
((5, 4), 'A2'),
((5, 6), 'A0')])
def test_networkx_succession_diagram_reduced_network_based_pathological_example(self):
import pystablemotifs.export as ex
rules='''
xA*= not xA and not xB or xC
xB*= not xA and not xB or xC
xC*= xA and xB
'''
rules_pbn = sm.format.booleannet2bnet(rules)
primes = pyboolnet.file_exchange.bnet2primes(rules_pbn)
max_simulate_size=20
include_attractors_in_diagram=False
ar = sm.AttractorRepertoire.from_primes(primes, max_simulate_size=max_simulate_size)
GR=ex.networkx_succession_diagram_reduced_network_based(ar,include_attractors_in_diagram=include_attractors_in_diagram)
self.assertDictEqual(dict(GR.nodes(data=True)),{0: {'label': '', 'virtual_nodes': []},
1: {'label': '{xA: 1, xB: 1, xC: 1}',
'virtual_nodes': [{'xA': 1, 'xB': 1, 'xC': 1}]}})
self.assertListEqual(list(GR.edges()), [(0, 1)])
include_attractors_in_diagram=True
GR=ex.networkx_succession_diagram_reduced_network_based(ar,include_attractors_in_diagram=include_attractors_in_diagram)
self.assertDictEqual(dict(GR.nodes(data=True)),{0: {'label': '', 'virtual_nodes': []},
1: {'label': '{xA: 1, xB: 1, xC: 1}',
'virtual_nodes': [{'xA': 1, 'xB': 1, 'xC': 1}]},
'A0': {'label': '{xA: X, xB: X, xC: 0}',
'virtual_nodes': {'xA': 'X', 'xB': 'X', 'xC': 0}},
'A1': {'label': '{xA: 1, xB: 1, xC: 1}',
'virtual_nodes': {'xA': 1, 'xB': 1, 'xC': 1}}})
self.assertListEqual(list(GR.edges()), [(0, 1), (0, 'A0'), (1, 'A1')])
if __name__ == '__main__':
unittest.main()
| 63.723404 | 134 | 0.420701 | 17,743 | 0.987368 | 0 | 0 | 0 | 0 | 0 | 0 | 4,496 | 0.250195 |
b5ffeb36473c0df68ff9596c309080a9ed5b0766
| 4,584 |
py
|
Python
|
environments/env_locust.py
|
jwallnoefer/projectivesimulation
|
b8f7b3d7d492b5d5f6df7f9f0802bead33c946ca
|
[
"Apache-2.0"
] | 14 |
2018-02-13T17:39:58.000Z
|
2021-07-06T18:09:28.000Z
|
environments/env_locust.py
|
jwallnoefer/projectivesimulation
|
b8f7b3d7d492b5d5f6df7f9f0802bead33c946ca
|
[
"Apache-2.0"
] | null | null | null |
environments/env_locust.py
|
jwallnoefer/projectivesimulation
|
b8f7b3d7d492b5d5f6df7f9f0802bead33c946ca
|
[
"Apache-2.0"
] | 8 |
2018-03-22T04:12:31.000Z
|
2021-01-31T19:14:28.000Z
|
# -*- coding: utf-8 -*-
"""
Copyright 2018 Alexey Melnikov and Katja Ried.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
Please acknowledge the authors when re-using this code and maintain this notice intact.
Code written by Katja Ried, implementing ideas from
'Modelling collective motion based on the principle of agency'
Katja Ried, Thomas Muller & Hans J. Briegel
arXiv:1712.01334 (2017)
"""
import numpy as np
class TaskEnvironment(object):
"""This is a one-dimensional, circular world in which multiple agents move around.
Percepts show agents the net movement of their close neighbours relative to themselves.
Actions are turning or keeping going. Agents are rewarded for aligning themselves with their neighbours.
This environment is used to study the collective motion of marching locusts.
Reference: 'Modelling collective motion based on the principle of agency',
Katja Ried, Thomas Muller and Hans J. Briegel, arXiv:1712.01334."""
def __init__(self, num_agents, world_size, sensory_range):
"""Initializes a world. Arguments:
num_agents (int>0) - number of agents
world_size (int>0) - length of world; ends are identified (ie world is circular)
sensory range (int>0) - how many steps away an agent can see others.
Simple example: env = TaskEnvironment(5,40,4) (for 5 agents)
max_num_trials, max_steps_per_trial = 20, 30 """
self.num_agents = num_agents;
self.world_size = world_size;
self.sensory_range = sensory_range;
self.num_actions = 2 #turn or keep going
self.num_percepts_list = [5]
self.num_max_steps_per_trial = 10**9
self.positions = np.random.randint(world_size,size=num_agents) #where each agent is
#Note that multiple agents can occupy the same position - they do not collide.
self.speeds = np.ndarray.tolist(np.random.choice([-1,1],num_agents)) #which way they are going
#note that positions is an array whereas speeds is a list
def get_neighbours(self,agent_index):
"""Determine indices of all agents within visual range including self."""
focal_pos = self.positions[agent_index];
neighbours = np.ndarray.tolist(np.where(dist_mod(self.positions,focal_pos,self.world_size)<self.sensory_range+1)[0]);
return(neighbours)
def net_rel_mvmt(self,agent_index):
"""Returns the net flow of all neighbours (excluding self),
with sign indicating movement relative to orientation of focal agent."""
neighbours = self.get_neighbours(agent_index)
neighbours.remove(agent_index)
return(self.speeds[agent_index]*sum([self.speeds[index] for index in neighbours]))
def get_percept(self,agent_index):
"""Given an agent index, returns an integer [0,4] encoding the net flow relative to self (truncated at abs<=2)."""
#compute percept
net_rel_move = self.net_rel_mvmt(agent_index)
#map to limited range of percepts
if net_rel_move<-2:
net_rel_move=-2
if net_rel_move>+2:
net_rel_move=2
return(net_rel_move+2)
def move(self,agent_index, action):
"""Given an agent_index and that agent's action (0 for turn, 1 for keep going),
this function updates their speed and position and computes their reward,
along with the percept for the next agent in the list."""
self.speeds[agent_index] = self.speeds[agent_index]*(action*2-1)
self.positions[agent_index] = np.remainder(self.positions[agent_index]+self.speeds[agent_index],self.world_size)
reward = (np.sign(self.net_rel_mvmt(agent_index))+1)/2
next_percept = self.get_percept((agent_index+1)%self.num_agents)
return ([next_percept], reward, False)
def reset(self):
"""Sets positions and speeds back to random values and returns the percept for the 0th agent."""
self.positions = np.random.randint(self.world_size,size=self.num_agents)
self.speeds = np.ndarray.tolist(np.random.choice([-1,1],self.num_agents))
return([self.get_percept(0)])
def dist_mod(num1,num2,mod):
"""Distance between num1 and num2 (absolute value)
if they are given modulo an integer mod, ie between zero and mod.
Also works if num1 is an array (not a list) and num2 a number or vice versa."""
diff=np.remainder(num1-num2,mod)
diff=np.minimum(diff, mod-diff)
return(diff)
| 49.290323 | 128 | 0.695681 | 3,742 | 0.816318 | 0 | 0 | 0 | 0 | 0 | 0 | 2,469 | 0.538613 |
bd0162bf0a28c31d37370edf04366759674e96cb
| 1,174 |
py
|
Python
|
masktools/superskims/slit.py
|
adwasser/masktools
|
c96c8f375f0e94ee2791466d0ce6d31007f58022
|
[
"MIT"
] | null | null | null |
masktools/superskims/slit.py
|
adwasser/masktools
|
c96c8f375f0e94ee2791466d0ce6d31007f58022
|
[
"MIT"
] | null | null | null |
masktools/superskims/slit.py
|
adwasser/masktools
|
c96c8f375f0e94ee2791466d0ce6d31007f58022
|
[
"MIT"
] | null | null | null |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
class Slit:
def __init__(self, x, y, length, width, pa, name):
'''
Representation of a slit in a mask. Coordinates are relative to the mask, so that
the x-axis is along the long end and the y-axis is along the short end.
Parameters
----------
x: float, arcsec along long end of mask
y: float, arcsec along short end of mask
length: float, arcsec, slit length (along spatial axis), should be a minimum of 3
width: float, arcsec, width of slit (along dispersion axis)
pa: float, degrees, position angle of slit, relative to sky (i.e., 0 is north, 90 is east)
name: string, unique (within mask) identifier
'''
self.x = x
self.y = y
self.length = length
self.width = width
self.pa = pa
self.name = name
def __repr__(self):
info_str = ': length of {0:.2f}, PA of {1:.2f} at ({2:.2f}, {3:.2f})'
return '<Slit: ' + self.name + info_str.format(self.length, self.pa, self.x, self.y) + '>'
| 39.133333 | 98 | 0.581772 | 1,063 | 0.905451 | 0 | 0 | 0 | 0 | 0 | 0 | 711 | 0.605622 |
bd0183d07de9ad7a1f13f37bb28f41e2ff5b5a7b
| 1,940 |
py
|
Python
|
gemmforge/instructions/builders/alloctor_builder.py
|
ravil-mobile/gemmforge
|
6381584c2d1ce77eaa938de02bc4f130f19cb2e4
|
[
"MIT"
] | null | null | null |
gemmforge/instructions/builders/alloctor_builder.py
|
ravil-mobile/gemmforge
|
6381584c2d1ce77eaa938de02bc4f130f19cb2e4
|
[
"MIT"
] | 2 |
2021-02-01T16:31:22.000Z
|
2021-05-05T13:44:43.000Z
|
gemmforge/instructions/builders/alloctor_builder.py
|
ravil-mobile/gemmforge
|
6381584c2d1ce77eaa938de02bc4f130f19cb2e4
|
[
"MIT"
] | null | null | null |
from .abstract_builder import AbstractBuilder
from gemmforge.symbol_table import SymbolType, Symbol
from gemmforge.basic_types import RegMemObject, ShrMemObject
from gemmforge.instructions import RegisterAlloc, ShrMemAlloc
from gemmforge.basic_types import GeneralLexicon
from abc import abstractmethod
class AbstractAllocBuilder(AbstractBuilder):
def __init__(self, vm, symbol_table):
super(AbstractAllocBuilder, self).__init__(vm, symbol_table)
self._obj = None
@abstractmethod
def _name_new_symbol(self):
pass
def get_resultant_obj(self):
if not self._obj:
raise NotImplementedError
return self._obj
class ShrMemAllocBuilder(AbstractAllocBuilder):
def __init__(self, vm, symbol_table):
super(ShrMemAllocBuilder, self).__init__(vm, symbol_table)
self._counter = 0
def build(self, size=None):
self._reset()
name = self._name_new_symbol()
self._obj = ShrMemObject(name, size)
dest = Symbol(name=name,
stype=SymbolType.SharedMem,
obj=self._obj)
self._symbol_table.add_symbol(dest)
self._instructions.append(ShrMemAlloc(self._vm, dest, size))
def _name_new_symbol(self):
name = f'{GeneralLexicon.LOCAL_SHR_MEM}{self._counter}'
self._counter += 1
return name
class RegistersAllocBuilder(AbstractAllocBuilder):
def __init__(self, vm, symbol_table):
super(RegistersAllocBuilder, self).__init__(vm, symbol_table)
self._counter = 0
def build(self, size: int, init_value=None):
self._reset()
name = self._name_new_symbol()
self._obj = RegMemObject(name, size)
dest = Symbol(name,
SymbolType.Register,
self._obj)
self._symbol_table.add_symbol(dest)
self._instructions.append(RegisterAlloc(self._vm, dest, init_value))
def _name_new_symbol(self):
name = f'{GeneralLexicon.REG_NAME}{self._counter}'
self._counter += 1
return name
| 28.955224 | 72 | 0.723196 | 1,628 | 0.839175 | 0 | 0 | 54 | 0.027835 | 0 | 0 | 91 | 0.046907 |
bd04f09ba2aeaba23212f09a5a18c36cfe707aa2
| 1,104 |
py
|
Python
|
solutions/LeetCode/Python3/1049.py
|
timxor/leetcode-journal
|
5f1cb6bcc44a5bc33d88fb5cdb4126dfc6f4232a
|
[
"MIT"
] | 854 |
2018-11-09T08:06:16.000Z
|
2022-03-31T06:05:53.000Z
|
solutions/LeetCode/Python3/1049.py
|
timxor/leetcode-journal
|
5f1cb6bcc44a5bc33d88fb5cdb4126dfc6f4232a
|
[
"MIT"
] | 29 |
2019-06-02T05:02:25.000Z
|
2021-11-15T04:09:37.000Z
|
solutions/LeetCode/Python3/1049.py
|
timxor/leetcode-journal
|
5f1cb6bcc44a5bc33d88fb5cdb4126dfc6f4232a
|
[
"MIT"
] | 347 |
2018-12-23T01:57:37.000Z
|
2022-03-12T14:51:21.000Z
|
__________________________________________________________________________________________________
sample 32 ms submission
class Solution:
def lastStoneWeightII(self, stones: List[int]) -> int:
total = sum(stones)
sums = {0}
for s in stones:
sums |= {s + i for i in sums}
return min(2 * pos_sum - total if 2 * pos_sum - total >= 0 else sys.maxsize for pos_sum in sums)
__________________________________________________________________________________________________
sample 36 ms submission
class Solution:
def lastStoneWeightII(self, stones: List[int]) -> int:
memo = {0}
for s in stones:
memo |= {i+s for i in memo}
sumA = sum(stones)
return min(abs(sumA-2*i) for i in memo)
__________________________________________________________________________________________________
sample 40 ms submission
class Solution:
def lastStoneWeightII(self, A):
dp = {0}
sumA = sum(A)
for a in A:
dp |= {a + i for i in dp}
return min(abs(sumA - i - i) for i in dp)
| 39.428571 | 104 | 0.673007 | 733 | 0.663949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
bd0555b1790f397fc8d762146f856a6acab0847d
| 3,043 |
py
|
Python
|
Python3/809.expressive-words.py
|
610yilingliu/leetcode
|
30d071b3685c2131bd3462ba77c6c05114f3f227
|
[
"MIT"
] | null | null | null |
Python3/809.expressive-words.py
|
610yilingliu/leetcode
|
30d071b3685c2131bd3462ba77c6c05114f3f227
|
[
"MIT"
] | null | null | null |
Python3/809.expressive-words.py
|
610yilingliu/leetcode
|
30d071b3685c2131bd3462ba77c6c05114f3f227
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=809 lang=python3
#
# [809] Expressive Words
#
# https://leetcode.com/problems/expressive-words/description/
#
# algorithms
# Medium (46.84%)
# Likes: 320
# Dislikes: 823
# Total Accepted: 45.2K
# Total Submissions: 96.2K
# Testcase Example: '"heeellooo"\n["hello", "hi", "helo"]'
#
# Sometimes people repeat letters to represent extra feeling, such as "hello"
# -> "heeellooo", "hi" -> "hiiii". In these strings like "heeellooo", we have
# groups of adjacent letters that are all the same: "h", "eee", "ll", "ooo".
#
# For some given string S, a query word is stretchy if it can be made to be
# equal to S by any number of applications of the following extension
# operation: choose a group consisting of characters c, and add some number of
# characters c to the group so that the size of the group is 3 or more.
#
# For example, starting with "hello", we could do an extension on the group "o"
# to get "hellooo", but we cannot get "helloo" since the group "oo" has size
# less than 3. Also, we could do another extension like "ll" -> "lllll" to get
# "helllllooo". If S = "helllllooo", then the query word "hello" would be
# stretchy because of these two extension operations: query = "hello" ->
# "hellooo" -> "helllllooo" = S.
#
# Given a list of query words, return the number of words that are
# stretchy.
#
#
#
#
# Example:
# Input:
# S = "heeellooo"
# words = ["hello", "hi", "helo"]
# Output: 1
# Explanation:
# We can extend "e" and "o" in the word "hello" to get "heeellooo".
# We can't extend "helo" to get "heeellooo" because the group "ll" is not size
# 3 or more.
#
#
#
# Constraints:
#
#
# 0 <= len(S) <= 100.
# 0 <= len(words) <= 100.
# 0 <= len(words[i]) <= 100.
# S and all words in words consist only of lowercase letters
#
#
#
# @lc code=start
class Solution(object):
def expressiveWords(self, S, words):
"""
:type S: str
:type words: List[str]
:rtype: int
"""
if not S:
return 0
ans = 0
set_S = set(S)
S_list = []
pre_s, pre_index = S[0], 0
for i, s in enumerate(S):
if pre_s != s:
S_list.append(S[pre_index:i])
pre_s, pre_index = s, i
if i == len(S) - 1:
S_list.append(S[pre_index:])
for word in words:
if set(word) != set_S:
continue
word_list = []
pre_w, pre_index = word[0], 0
for i, w in enumerate(word):
if pre_w != w:
word_list.append(word[pre_index:i])
pre_w, pre_index = w, i
if i == len(word) - 1:
word_list.append(word[pre_index:])
if len(S_list) == len(word_list):
if all(S_list[i] == word_list[i] if len(S_list[i]) < 3 else len(S_list[i]) >= len(word_list[i]) for i in range(len(S_list))):
ans += 1
return ans
# @lc code=end
| 29.833333 | 141 | 0.57049 | 1,175 | 0.384615 | 0 | 0 | 0 | 0 | 0 | 0 | 1,871 | 0.612439 |
bd068843b439a58814f27d16075e43744d08bd52
| 1,601 |
py
|
Python
|
settings/Microscope_settings.py
|
bopopescu/Lauecollect
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
[
"MIT"
] | null | null | null |
settings/Microscope_settings.py
|
bopopescu/Lauecollect
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
[
"MIT"
] | 1 |
2019-10-22T21:28:31.000Z
|
2019-10-22T21:39:12.000Z
|
settings/Microscope_settings.py
|
bopopescu/Lauecollect
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
[
"MIT"
] | 2 |
2019-06-06T15:06:46.000Z
|
2020-07-20T02:03:22.000Z
|
Size = (1255, 1160)
Position = (39, 26)
ScaleFactor = 1.0
ZoomLevel = 32.0
Orientation = 0
Mirror = False
NominalPixelSize = 0.125
filename = 'Z:\\All Projects\\Crystallization\\2018.08.27.caplilary with crystals inspection\\2018.08.27 CypA 2.jpg'
ImageWindow.Center = (649, 559)
ImageWindow.ViewportCenter = (2.41796875, 2.0)
ImageWindow.crosshair_color = (255, 0, 255)
ImageWindow.boxsize = (0.04, 0.04)
ImageWindow.box_color = (255, 0, 0)
ImageWindow.show_box = False
ImageWindow.Scale = [[0.21944444444444444, -0.0763888888888889], [0.46944444444444444, -0.075]]
ImageWindow.show_scale = True
ImageWindow.scale_color = (255, 0, 0)
ImageWindow.crosshair_size = (0.05, 0.05)
ImageWindow.show_crosshair = False
ImageWindow.show_profile = False
ImageWindow.show_FWHM = False
ImageWindow.show_center = False
ImageWindow.calculate_section = False
ImageWindow.profile_color = (255, 0, 255)
ImageWindow.FWHM_color = (0, 0, 255)
ImageWindow.center_color = (0, 0, 255)
ImageWindow.ROI = [[-0.5194444444444445, -0.3458333333333333], [0.225, 0.19305555555555556]]
ImageWindow.ROI_color = (255, 255, 0)
ImageWindow.show_saturated_pixels = False
ImageWindow.mask_bad_pixels = False
ImageWindow.saturation_threshold = 233
ImageWindow.saturated_color = (255, 0, 0)
ImageWindow.linearity_correction = False
ImageWindow.bad_pixel_threshold = 233
ImageWindow.bad_pixel_color = (30, 30, 30)
ImageWindow.show_grid = False
ImageWindow.grid_type = 'xy'
ImageWindow.grid_color = (0, 0, 255)
ImageWindow.grid_x_spacing = 0.3
ImageWindow.grid_x_offset = 0.0
ImageWindow.grid_y_spacing = 0.5
ImageWindow.grid_y_offset = 0.0
| 37.232558 | 116 | 0.775141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.068082 |
bd07434502bfcaa7d1b29853452ba88cedddad3e
| 3,259 |
py
|
Python
|
model_rocke3d.py
|
projectcuisines/gcm_ana
|
cd9f7d47dd4a9088bcd7556b4955d9b8e09b9741
|
[
"MIT"
] | 1 |
2021-09-29T18:03:56.000Z
|
2021-09-29T18:03:56.000Z
|
model_rocke3d.py
|
projectcuisines/thai_trilogy_code
|
cd9f7d47dd4a9088bcd7556b4955d9b8e09b9741
|
[
"MIT"
] | null | null | null |
model_rocke3d.py
|
projectcuisines/thai_trilogy_code
|
cd9f7d47dd4a9088bcd7556b4955d9b8e09b9741
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Utilities for the ROCKE3D output."""
import dask.array as da
import xarray as xr
from grid import reverse_along_dim, roll_da_to_pm180
from model_um import calc_um_rel
from names import rocke3d
__all__ = ("adjust_rocke3d_grid", "calc_rocke3d_rei", "calc_rocke3d_rel")
calc_rocke3d_rel = calc_um_rel
def adjust_rocke3d_grid(darr, lon_name="lon", lat_name="lat"):
"""
Adjust the grid of a ROCKE3D data array.
Reverse the latitude dimension and shift the substellar coordinate
from -180 degrees to 0 degree in longitude.
"""
out = darr
if lat_name in out.dims:
out = reverse_along_dim(out, lat_name)
if lon_name in out.dims:
# Shift data along the longitude to center the substellar at (0,0)
out = roll_da_to_pm180(
out.assign_coords(**{lon_name: out[lon_name] + 180}), lon_name=lon_name
)
return out
def calc_rocke3d_rei(ds):
"""
Aggregate parametrization based on effective dimension.
In the initial form, the same approach is used for stratiform
and convective cloud.
The fit provided here is based on Stephan Havemann's fit of
Dge with temperature, consistent with David Mitchell's treatment
of the variation of the size distribution with temperature. The
parametrization of the optical properties is based on De
(=(3/2)volume/projected area), whereas Stephan's fit gives Dge
(=(2*SQRT(3)/3)*volume/projected area), which explains the
conversion factor. The fit to Dge is in two sections, because
Mitchell's relationship predicts a cusp at 216.208 K. Limits
of 8 and 124 microns are imposed on Dge: these are based on this
relationship and should be reviewed if it is changed. Note also
that the relationship given here is for polycrystals only.
Parameters
----------
ds: xarray.Dataset
ROCKE-3D data set
These are the parameters used in the temperature dependent
parameterizations for ice cloud particle sizes below.
Parameters for the aggregate parametrization
a0_agg_cold = 7.5094588E-04,
b0_agg_cold = 5.0830326E-07,
a0_agg_warm = 1.3505403E-04,
b0_agg_warm = 2.6517429E-05,
t_switch = 216.208,
t0_agg = 279.5,
s0_agg = 0.05,
Returns
-------
rei: xarray.DataArray
Ice effective radius [um].
"""
a0_agg_cold = 7.5094588e-04
b0_agg_cold = 5.0830326e-07
a0_agg_warm = 1.3505403e-04
b0_agg_warm = 2.6517429e-05
t_switch = 216.208
t0_agg = 279.5
s0_agg = 0.05
# Air temperature in ROCKE-3D
air_temp = ds[rocke3d.temp]
# Calculate the R_eff
rei = xr.where(
air_temp < t_switch,
a0_agg_cold * da.exp(s0_agg * (air_temp - t0_agg)) + b0_agg_cold,
a0_agg_warm * da.exp(s0_agg * (air_temp - t0_agg)) + b0_agg_warm,
)
# Limit of the parameterization
rei = (
(3 / 2)
* (3 / (2 * da.sqrt(3)))
* xr.ufuncs.minimum(1.24e-04, xr.ufuncs.maximum(8.0e-06, rei))
)
rei = rei.rename("ice_cloud_condensate_effective_radius")
rei.attrs.update(
{
"long_name": "ice_cloud_condensate_effective_radius",
"units": "micron",
}
)
return rei
| 30.745283 | 83 | 0.666769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,989 | 0.61031 |
bd080979389c4fa7ca1e77a7f150acdec97764c3
| 4,090 |
py
|
Python
|
models/wordcloud.py
|
mcxwx123/RecGFI
|
6e872c3b8c5398959b119e5ba14e665bbb45c56b
|
[
"MIT"
] | 9 |
2022-01-28T14:24:35.000Z
|
2022-01-30T05:05:03.000Z
|
models/wordcloud.py
|
mcxwx123/RecGFI
|
6e872c3b8c5398959b119e5ba14e665bbb45c56b
|
[
"MIT"
] | null | null | null |
models/wordcloud.py
|
mcxwx123/RecGFI
|
6e872c3b8c5398959b119e5ba14e665bbb45c56b
|
[
"MIT"
] | 1 |
2022-01-28T14:24:41.000Z
|
2022-01-28T14:24:41.000Z
|
from wordcloud import WordCloud,STOPWORDS
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re
import multidict as multidict
from collections import Counter
import json
import datetime
import os
plt.switch_backend('agg')
def removePunctuation(text):
text = re.sub(r'[{}]+'.format('!,;:?`"\'、,;'),' ',text)
return text.strip()
def getFrequencyDictForText0(sentence,pro):
global tmpDict0
# making dict for counting frequencies
sentence=removePunctuation(sentence)
for text in sentence.split(" "):
if len(text)<3 or re.match("a|the|an|the|to|in|for|of|or|by|with|is|on|that|be", text) or (re.match("^[A-Za-z]+$", text) is None):
continue
val = tmpDict0.get(text, [0,[]])
pros=val[1]
if pro not in pros:
pros.append(pro)
tmpDict0[text.lower()] = [val[0] + 1,pros]
def getFrequencyDictForText1(sentence,pro):
global tmpDict1
# making dict for counting frequencies
sentence=removePunctuation(sentence)
for text in sentence.split(" "):
if len(text)<3 or re.match("a|the|an|the|to|in|for|of|or|by|with|is|on|that|be", text) or (re.match("^[A-Za-z]+$", text) is None):
continue
val = tmpDict1.get(text, [0,[]])
pros=val[1]
if pro not in pros:
pros.append(pro)
tmpDict1[text.lower()] = [val[0] + 1,pros]
class DateEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj,datetime.datetime):
return obj.strftime("%Y-%m-%d %H:%M:%S")
elif isinstance(obj,datetime.timedelta):
return obj.seconds
else:
return json.JSONEncoder.default(self,obj)
def drawwordcloud():
global tmpDict0,tmpDict1
finalbody0=''
finalbody1=''
current_work_dir = os.path.dirname(__file__)
with open(current_work_dir+'/../data/issuedata.json') as f:
issuestr = json.load(f)
issuedic = json.loads(issuestr)
issuedata = issuedic['issuedata']
lst=[]
for i in range(len(issuedata)):
lst.append(issuedata[i][0])
finaldata=pd.DataFrame(lst)
finaldata=finaldata.values.tolist()
finalbody0=[]
finalbody1=[]
for d in finaldata:
pro=d[1]
body=d[39]
p=re.compile(r"```.+?```",flags=re.S)
s=p.sub("",body)
body=" ".join(s.split())
p=re.compile(r"http[:/\w\.]+")
s=p.sub("",body)
body=" ".join(s.split())
body.lower()
if d[37]==0:#clscmt
finalbody0.append([body,pro])
else:
finalbody1.append([body,pro])
tmpDict0 = {}
tmpDict1 = {}
for i in finalbody0:
getFrequencyDictForText0(i[0],i[1])
for i in finalbody1:
getFrequencyDictForText1(i[0],i[1])
for key in list(tmpDict0.keys()):
val0 = tmpDict0.get(key, [0,[]])
val1 = tmpDict1.get(key, [0,[]])
if len(list(set(val0[1]+val1[1])))<5:
del tmpDict0[key]
for key in list(tmpDict1.keys()):
val0 = tmpDict0.get(key, [0,[]])
val1 = tmpDict1.get(key, [0,[]])
if len(list(set(val0[1]+val1[1])))<5:
del tmpDict1[key]
fullTermsDict0 = multidict.MultiDict()
for key in tmpDict0:
val0 = tmpDict0.get(key, [0,[]])
val1 = tmpDict1.get(key, [0,[]])
fullTermsDict0.add(key, pow(val0[0], 2)/(val0[0]+val1[0]))
fullTermsDict1 = multidict.MultiDict()
for key in tmpDict1:
val0 = tmpDict0.get(key, [0,[]])
val1 = tmpDict1.get(key, [0,[]])
fullTermsDict1.add(key, pow(val1[0], 2)/(val0[0]+val1[0]))
wc = WordCloud(
background_color='white',
width=500,
height=350,
max_font_size=100,
min_font_size=3,
max_words=50,
relative_scaling=0.5,
collocations=False,
min_word_length=3,
#stopwords=stopwords,
mode='RGBA'
#colormap='pink'
)
wc.generate_from_frequencies(fullTermsDict0)
wc.to_file(r"wordcloud0.png")
wc.generate_from_frequencies(fullTermsDict1)
wc.to_file(r"wordcloud1.png")
| 29.854015 | 138 | 0.596822 | 311 | 0.075928 | 0 | 0 | 0 | 0 | 0 | 0 | 436 | 0.106445 |
bd08ddc4c6e6b83523aa9e949593219788ab5e5c
| 2,996 |
py
|
Python
|
favorites_updater.py
|
techonerd/moepoi
|
6440f39653bc3560e39429570bd25b7c564b7f54
|
[
"MIT"
] | 36 |
2020-07-21T16:19:48.000Z
|
2022-03-21T15:31:02.000Z
|
favorites_updater.py
|
gaesant/moepoi
|
cd478ca00afa5140bb8057c7d37b1ccb2fcbe3b6
|
[
"MIT"
] | 1 |
2022-02-18T07:41:14.000Z
|
2022-02-18T07:41:14.000Z
|
favorites_updater.py
|
gaesant/moepoi
|
cd478ca00afa5140bb8057c7d37b1ccb2fcbe3b6
|
[
"MIT"
] | 176 |
2020-07-22T19:24:14.000Z
|
2022-03-30T23:42:58.000Z
|
from python_graphql_client import GraphqlClient
import pathlib
import re
import os
root = pathlib.Path(__file__).parent.resolve()
client = GraphqlClient(endpoint="https://graphql.anilist.co")
TOKEN = os.environ.get("ANILIST_TOKEN", "")
def replace_chunk(content, marker, chunk, inline=False):
r = re.compile(
r"<!\-\- {} starts \-\->.*<!\-\- {} ends \-\->".format(marker, marker),
re.DOTALL,
)
if not inline:
chunk = "\n{}\n".format(chunk)
chunk = "<!-- {} starts -->{}<!-- {} ends -->".format(marker, chunk, marker)
return r.sub(chunk, content)
def make_query():
return """
query($favPage: Int) {
Viewer {
favourites {
anime(page: $favPage) {
nodes {
title {
romaji
}
siteUrl
}
pageInfo {
total
currentPage
lastPage
perPage
hasNextPage
}
}
manga(page: $favPage) {
nodes {
title {
romaji
}
siteUrl
}
pageInfo {
total
currentPage
lastPage
perPage
hasNextPage
}
}
characters(page: $favPage) {
nodes {
name {
full
}
siteUrl
}
pageInfo {
total
currentPage
lastPage
perPage
hasNextPage
}
}
}
}
}
"""
def fetch_favorites(oauth_token, types='anime'):
results = []
variables = {"favPage": 1}
data = client.execute(
query=make_query(),
variables=variables,
headers={"Authorization": "Bearer {}".format(oauth_token)},
)
for x in data['data']['Viewer']['favourites'][types]['nodes']:
results.append(
{
'title': x['title']['romaji'] if types != 'characters' else x['name']['full'],
'url': x['siteUrl']
}
)
return results
if __name__ == "__main__":
readme = root / "README.md"
readme_contents = readme.open().read()
# Favorites Anime
data = fetch_favorites(TOKEN, types='anime')
res = "\n".join(
[
"* [{title}]({url})".format(**x)
for x in data
]
)
print (res)
rewritten = replace_chunk(readme_contents, "favorites_anime", res)
# Favorites Manga
data = fetch_favorites(TOKEN, types='manga')
res = "\n".join(
[
"* [{title}]({url})".format(**x)
for x in data
]
)
print (res)
rewritten = replace_chunk(readme_contents, "favorites_manga", res)
# Favorites Characters
data = fetch_favorites(TOKEN, types='characters')
res = "\n".join(
[
"* [{title}]({url})".format(**x)
for x in data
]
)
print (res)
rewritten = replace_chunk(readme_contents, "favorites_characters", res)
readme.open("w").write(rewritten)
| 23.046154 | 94 | 0.502003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,350 | 0.450601 |
bd0a67b7badc84d9a3a79ed71754a0226bee9e55
| 844 |
py
|
Python
|
moistmaster/analytics/migrations/0001_initial.py
|
benjohnsonnlp/robosquirt
|
f96c58421532f9b956cec2277b7978022c7c1d80
|
[
"BSD-3-Clause"
] | null | null | null |
moistmaster/analytics/migrations/0001_initial.py
|
benjohnsonnlp/robosquirt
|
f96c58421532f9b956cec2277b7978022c7c1d80
|
[
"BSD-3-Clause"
] | 7 |
2020-02-12T00:56:32.000Z
|
2022-02-10T09:57:40.000Z
|
moistmaster/analytics/migrations/0001_initial.py
|
benjohnsonnlp/robosquirt
|
f96c58421532f9b956cec2277b7978022c7c1d80
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.0.6 on 2019-07-10 03:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='WateringSession',
fields=[
('identifier', models.CharField(max_length=36, primary_key=True, serialize=False)),
('created_time', models.DateTimeField()),
('session_start', models.DateTimeField()),
('session_end', models.DateTimeField()),
('device_identifier', models.IntegerField()),
('originator', models.TextField()),
('reason', models.TextField()),
],
options={
'get_latest_by': ['created_time'],
},
),
]
| 28.133333 | 99 | 0.535545 | 751 | 0.88981 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.220379 |
bd0c339764aca9d1b1dc4bb3784afbd33f7e553d
| 30,324 |
py
|
Python
|
stcloud/api/apps_api.py
|
sematext/sematext-api-client-python
|
16e025cd3d32aa58deb70fc5930ae4165afebe97
|
[
"Apache-2.0"
] | 1 |
2020-05-01T12:15:52.000Z
|
2020-05-01T12:15:52.000Z
|
stcloud/api/apps_api.py
|
sematext/sematext-api-client-python
|
16e025cd3d32aa58deb70fc5930ae4165afebe97
|
[
"Apache-2.0"
] | null | null | null |
stcloud/api/apps_api.py
|
sematext/sematext-api-client-python
|
16e025cd3d32aa58deb70fc5930ae4165afebe97
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Sematext Cloud API
API Explorer provides access and documentation for Sematext REST API. The REST API requires the API Key to be sent as part of `Authorization` header. E.g.: `Authorization : apiKey e5f18450-205a-48eb-8589-7d49edaea813`. # noqa: E501
OpenAPI spec version: v3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from stcloud.api_client import ApiClient
class AppsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_using_delete(self, any_state_app_id, **kwargs): # noqa: E501
"""delete # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_using_delete(any_state_app_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int any_state_app_id: anyStateAppId (required)
:return: GenericMapBasedApiResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_using_delete_with_http_info(any_state_app_id, **kwargs) # noqa: E501
else:
(data) = self.delete_using_delete_with_http_info(any_state_app_id, **kwargs) # noqa: E501
return data
def delete_using_delete_with_http_info(self, any_state_app_id, **kwargs): # noqa: E501
"""delete # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_using_delete_with_http_info(any_state_app_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int any_state_app_id: anyStateAppId (required)
:return: GenericMapBasedApiResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['any_state_app_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'any_state_app_id' is set
if ('any_state_app_id' not in params or
params['any_state_app_id'] is None):
raise ValueError("Missing the required parameter `any_state_app_id` when calling `delete_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'any_state_app_id' in params:
path_params['anyStateAppId'] = params['any_state_app_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/users-web/api/v3/apps/{anyStateAppId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GenericMapBasedApiResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_app_types_using_get(self, **kwargs): # noqa: E501
"""Get all App types supported for the account identified with apiKey # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_app_types_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: AppTypesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_app_types_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_app_types_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_app_types_using_get_with_http_info(self, **kwargs): # noqa: E501
"""Get all App types supported for the account identified with apiKey # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_app_types_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: AppTypesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_app_types_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/users-web/api/v3/apps/types', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppTypesResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_using_get(self, any_state_app_id, **kwargs): # noqa: E501
"""Gets defails for one particular App # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_using_get(any_state_app_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int any_state_app_id: anyStateAppId (required)
:return: AppResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_using_get_with_http_info(any_state_app_id, **kwargs) # noqa: E501
else:
(data) = self.get_using_get_with_http_info(any_state_app_id, **kwargs) # noqa: E501
return data
def get_using_get_with_http_info(self, any_state_app_id, **kwargs): # noqa: E501
"""Gets defails for one particular App # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_using_get_with_http_info(any_state_app_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int any_state_app_id: anyStateAppId (required)
:return: AppResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['any_state_app_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'any_state_app_id' is set
if ('any_state_app_id' not in params or
params['any_state_app_id'] is None):
raise ValueError("Missing the required parameter `any_state_app_id` when calling `get_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'any_state_app_id' in params:
path_params['anyStateAppId'] = params['any_state_app_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/users-web/api/v3/apps/{anyStateAppId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def invite_app_guests_using_post(self, body, **kwargs): # noqa: E501
"""Invite guests to an app # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.invite_app_guests_using_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Invitation body: For `app` and `apps` fields only `id` needs to be populated.Other fields can be left empty or with default values (required)
:return: GenericMapBasedApiResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.invite_app_guests_using_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.invite_app_guests_using_post_with_http_info(body, **kwargs) # noqa: E501
return data
def invite_app_guests_using_post_with_http_info(self, body, **kwargs): # noqa: E501
"""Invite guests to an app # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.invite_app_guests_using_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Invitation body: For `app` and `apps` fields only `id` needs to be populated.Other fields can be left empty or with default values (required)
:return: GenericMapBasedApiResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method invite_app_guests_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `invite_app_guests_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/users-web/api/v3/apps/guests', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GenericMapBasedApiResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_apps_users_using_get(self, **kwargs): # noqa: E501
"""Get all users of apps accessible to this account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_apps_users_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: AppsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_apps_users_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_apps_users_using_get_with_http_info(**kwargs) # noqa: E501
return data
def list_apps_users_using_get_with_http_info(self, **kwargs): # noqa: E501
"""Get all users of apps accessible to this account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_apps_users_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: AppsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_apps_users_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/users-web/api/v3/apps/users', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_using_get(self, **kwargs): # noqa: E501
"""Get all apps accessible by account identified with apiKey # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: AppsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_using_get_with_http_info(**kwargs) # noqa: E501
return data
def list_using_get_with_http_info(self, **kwargs): # noqa: E501
"""Get all apps accessible by account identified with apiKey # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: AppsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/users-web/api/v3/apps', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_description_using_put1(self, any_state_app_id, **kwargs): # noqa: E501
"""Update description of the app # noqa: E501
App can be in any state # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_description_using_put1(any_state_app_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int any_state_app_id: App Id (required)
:param AppDescription body: Update Details
:return: AppResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_description_using_put1_with_http_info(any_state_app_id, **kwargs) # noqa: E501
else:
(data) = self.update_description_using_put1_with_http_info(any_state_app_id, **kwargs) # noqa: E501
return data
def update_description_using_put1_with_http_info(self, any_state_app_id, **kwargs): # noqa: E501
"""Update description of the app # noqa: E501
App can be in any state # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_description_using_put1_with_http_info(any_state_app_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int any_state_app_id: App Id (required)
:param AppDescription body: Update Details
:return: AppResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['any_state_app_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_description_using_put1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'any_state_app_id' is set
if ('any_state_app_id' not in params or
params['any_state_app_id'] is None):
raise ValueError("Missing the required parameter `any_state_app_id` when calling `update_description_using_put1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'any_state_app_id' in params:
path_params['anyStateAppId'] = params['any_state_app_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/users-web/api/v3/apps/{anyStateAppId}/description', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_using_put2(self, body, any_state_app_id, **kwargs): # noqa: E501
"""Update app # noqa: E501
App can be in any state # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_using_put2(body, any_state_app_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UpdateAppInfo body: dto (required)
:param int any_state_app_id: App Id (required)
:return: AppResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_using_put2_with_http_info(body, any_state_app_id, **kwargs) # noqa: E501
else:
(data) = self.update_using_put2_with_http_info(body, any_state_app_id, **kwargs) # noqa: E501
return data
def update_using_put2_with_http_info(self, body, any_state_app_id, **kwargs): # noqa: E501
"""Update app # noqa: E501
App can be in any state # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_using_put2_with_http_info(body, any_state_app_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UpdateAppInfo body: dto (required)
:param int any_state_app_id: App Id (required)
:return: AppResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'any_state_app_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_using_put2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_using_put2`") # noqa: E501
# verify the required parameter 'any_state_app_id' is set
if ('any_state_app_id' not in params or
params['any_state_app_id'] is None):
raise ValueError("Missing the required parameter `any_state_app_id` when calling `update_using_put2`") # noqa: E501
collection_formats = {}
path_params = {}
if 'any_state_app_id' in params:
path_params['anyStateAppId'] = params['any_state_app_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/users-web/api/v3/apps/{anyStateAppId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 38.777494 | 236 | 0.614991 | 29,766 | 0.981599 | 0 | 0 | 0 | 0 | 0 | 0 | 15,252 | 0.502968 |
bd0df2e5d3c7a33e0c8bc13d1922de1d4da4a323
| 3,706 |
py
|
Python
|
app_startup4/views.py
|
konjing/django_sme_award
|
840ed3685299c77be8516acf1e8a0123930dd63d
|
[
"MIT"
] | null | null | null |
app_startup4/views.py
|
konjing/django_sme_award
|
840ed3685299c77be8516acf1e8a0123930dd63d
|
[
"MIT"
] | 5 |
2021-03-19T02:32:48.000Z
|
2021-06-10T19:01:30.000Z
|
app_startup4/views.py
|
konjing/django_sme_award
|
840ed3685299c77be8516acf1e8a0123930dd63d
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib import messages
from app_startup4.models import StartupCompetition
def registerListView(request):
""" จัดการข้อมูลและพิจารณา ผู้สมัครที่ส่งข้อมูลเข้ามา """
queryset = StartupCompetition.objects.filter(state=1, competition=2)
if request.method == 'POST': #<- Checking for method type
id_list = request.POST.getlist('regis_id')
total_select = len(id_list)
for regis_id in id_list:
StartupCompetition.objects.filter(id=regis_id).update(state=2)
messages.success(request, 'ผู้สมัคร {} ราย ผ่านการตรวจสอบคุุณสมบัติ'.format(total_select))
return redirect('st-register-list')
context = {'queryset':queryset}
return render(request, 'app_startup4/register_list.html', context)
def screenListView(request):
""" จัดการข้อมูลและพิจารณา ผู้สมัครที่ผ่านการตรวจสอบคุณสมบัติ """
queryset = StartupCompetition.objects.filter(state=2, competition=2)
if request.method == 'POST': #<- Checking for method type
id_list = request.POST.getlist('regis_id')
total_select = len(id_list)
for regis_id in id_list:
StartupCompetition.objects.filter(id=regis_id).update(state=4)
messages.success(request, 'ผู้สมัคร {} ราย ผ่านการตรวจประเมินเบื้องต้น'.format(total_select))
return redirect('st-screen-list')
context = {'queryset':queryset}
return render(request, 'app_startup4/screen_list.html', context)
def interviewListView(request):
""" จัดการข้อมูลและพิจารณา ผู้สมัครที่ผ่านการตรวจประเมินเบื้องต้น """
queryset = StartupCompetition.objects.filter(state=4, competition=2)
if request.method == 'POST': #<- Checking for method type
id_list = request.POST.getlist('regis_id')
total_select = len(id_list)
for regis_id in id_list:
StartupCompetition.objects.filter(id=regis_id).update(state=6)
messages.success(request, 'ผู้สมัคร {} ราย ผ่านการสัมภาษณ์เพื่อเลือกไป Site visit'.format(total_select))
return redirect('st-interview-list')
context = {'queryset':queryset}
return render(request, 'app_startup4/interview_list.html', context)
def evaluateListView(request):
""" จัดการข้อมูลและพิจารณา ผู้สมัครที่ผ่านการสัมภาษณ์เพื่อเลือกไป Site visit """
queryset = StartupCompetition.objects.filter(state=6, competition=2)
if request.method == 'POST': #<- Checking for method type
id_list = request.POST.getlist('regis_id')
total_select = len(id_list)
for regis_id in id_list:
StartupCompetition.objects.filter(id=regis_id).update(state=8)
messages.success(request, 'ผู้สมัคร {} ราย ผ่านเกณฑ์พิจารณาในแต่ละกลุ่ม'.format(total_select))
return redirect('st-evaluate-list')
context = {'queryset':queryset}
return render(request, 'app_startup4/evaluate_list.html', context)
def candidateListView(request):
""" จัดการข้อมูลและพิจารณา ผู้สมัครที่ผ่านเข้าชิงรางวัลในแต่ละกลุ่ม """
queryset = StartupCompetition.objects.filter(state=8, competition=2)
# if request.method == 'POST': #<- Checking for method type
# id_list = request.POST.getlist('regis_id')
# total_select = len(id_list)
# for regis_id in id_list:
# StartupCompetition.objects.filter(id=regis_id).update(state=10)
# messages.success(request, 'ผู้สมัคร {} ราย ได้รับรา่งวัล'.format(total_select))
# return redirect('st-candidate-list')
context = {'queryset':queryset}
return render(request, 'app_startup4/candidate_list.html', context)
| 40.282609 | 120 | 0.674582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,291 | 0.495244 |
bd101452c6ae5bad47e4c2d957dbf69805a1b869
| 3,462 |
py
|
Python
|
SRC/common/IO/GUI/DIR.py
|
usnistgov/OOF3D
|
4fd423a48aea9c5dc207520f02de53ae184be74c
|
[
"X11"
] | 31 |
2015-04-01T15:59:36.000Z
|
2022-03-18T20:21:47.000Z
|
SRC/common/IO/GUI/DIR.py
|
usnistgov/OOF3D
|
4fd423a48aea9c5dc207520f02de53ae184be74c
|
[
"X11"
] | 3 |
2015-02-06T19:30:24.000Z
|
2017-05-25T14:14:31.000Z
|
SRC/common/IO/GUI/DIR.py
|
usnistgov/OOF3D
|
4fd423a48aea9c5dc207520f02de53ae184be74c
|
[
"X11"
] | 7 |
2015-01-23T15:19:22.000Z
|
2021-06-09T09:03:59.000Z
|
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modifed
# versions of this software, you first contact the authors at
# [email protected].
dirname = 'GUI'
if not DIM_3:
clib = 'oof2commonGUI'
else:
clib = 'oof3dcommonGUI'
clib_order = 100
pyfiles = [
'activeareaPage.py',
'activityViewer.py',
'chooser.py',
'colorparamwidgets.py',
'console.py',
'displaymethodwidget.py',
'fileselector.py',
'fixedwidthtext.py',
'fontselector.py',
'genericselectGUI.py',
'gfxLabelTree.py',
'gfxmenu.py',
'gfxwindow.py',
'gtklogger.py',
'gtkutils.py',
'guilogger.py',
'historian.py',
'initialize.py',
'introPage.py',
'labelledslider.py',
'mainmenuGUI.py',
'mainthreadGUI.py',
'matrixparamwidgets.py',
'microstructurePage.py',
'mousehandler.py',
'oofGUI.py',
'oof_mainiteration.py',
'parameterwidgets.py',
'pixelPage.py',
'pixelgroupwidget.py',
'pixelinfoGUI.py',
'pixelselectparamwidgets.py'
'pixelselecttoolboxGUI.py',
'progressbarGUI2.py',
'questioner.py',
'quit.py',
'regclassfactory.py',
'reporter_GUI.py',
'reporterrorGUI.py',
'subWindow.py',
'toolboxGUI.py',
'tutorialsGUI.py',
'viewertoolboxGUI.py',
'whowidget.py',
'widgetscope.py',
'workerGUI.py'
]
if not DIM_3:
cfiles = [
'oofcanvas.C',
'rubberband.C',
'canvasdot.c',
'canvastriangle.c',
'gfxbrushstyle.C'
]
swigfiles =[
'oofcanvas.swg',
'rubberband.swg',
'gfxbrushstyle.swg'
]
swigpyfiles = [
'gfxbrushstyle.spy'
]
hfiles = [
'canvasdot.h',
'canvastriangle.h',
'oofcanvas.h',
'rubberband.h',
'rbstipple.xbm',
'rbstubble.xbm',
'gfxbrushstyle.h'
]
else:
cfiles = ['progressGUI.C']
if USE_COCOA:
cfiles.append('oofcanvas3d.mm')
else:
cfiles.append('oofcanvas3d.C')
swigfiles = ['oofcanvas3d.swg', 'progressGUI.swg']
hfiles = ['oofcanvas3d.h', 'progressGUI.h']
swigpyfiles = ['progressGUI.spy']
def set_clib_flags(clib):
import oof2setuputils
# This is a hack that is needed by pkg-config on Macs using
# fink. After merging its pangocairo branch, fink isn't putting
# pango.pc and freetype2.pc in the default locations because they
# can cause conflicts. Once fink completes upgrading to modern
# versions of these libraries, this hack can be removed.
oof2setuputils.extend_path("PKG_CONFIG_PATH",
"/sw/lib/pango-ft219/lib/pkgconfig",
"/sw/lib/freetype219/lib/pkgconfig/")
oof2setuputils.pkg_check("gtk+-2.0", GTK_VERSION, clib)
oof2setuputils.pkg_check("pygtk-2.0", PYGTK_VERSION, clib)
oof2setuputils.pkg_check("pygobject-2.0", PYGOBJECT_VERSION)
if not DIM_3:
oof2setuputils.pkg_check("libgnomecanvas-2.0", GNOMECANVAS_VERSION,
clib)
clib.externalLibs.append('oof2common')
else:
clib.externalLibs.append('oof3dcommon')
| 25.455882 | 75 | 0.624783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,117 | 0.611496 |
bd12daa2d90f5e59ee73aa4f239e4f3eb0699f08
| 4,366 |
py
|
Python
|
chapter_01/main_chapter01_00.py
|
couldbebetter/simulations_radar_systems_design
|
fcb23964e10c7ebb9cb1beabadc257e970a2c1de
|
[
"MIT"
] | 20 |
2018-02-02T06:46:14.000Z
|
2022-01-05T21:25:50.000Z
|
chapter_01/main_chapter01_00.py
|
couldbebetter/simulations_radar_systems_design
|
fcb23964e10c7ebb9cb1beabadc257e970a2c1de
|
[
"MIT"
] | null | null | null |
chapter_01/main_chapter01_00.py
|
couldbebetter/simulations_radar_systems_design
|
fcb23964e10c7ebb9cb1beabadc257e970a2c1de
|
[
"MIT"
] | 5 |
2018-05-31T16:42:07.000Z
|
2020-07-30T22:29:43.000Z
|
# -*- coding: utf-8 -*-
"""
Created on 21 October 2017
implements Listing 1.2. MATLAB Program “fig1_12.m”
in Mahafza radar book
@author: Ashiv Dhondea
"""
import numpy as np
import RadarBasics as RB
import RadarConstants as RC
import RadarEquations as RE
# Importing what's needed for nice plots.
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['Helvetica']})
rc('text', usetex=True)
params = {'text.latex.preamble' : [r'\usepackage{amsmath}', r'\usepackage{amssymb}']}
plt.rcParams.update(params)
# ------------------------------------------------------------------------- #
speed_light = RC.c; # [m/s]
# ------------------------------------------------------------------------- #
# Radar parameters
P_Tx = 1.5e6; # [W]
centre_freq = 5.6e9; #[Hz]
G_Tx_dB = 45.; # [dB]
G_Tx = RB.fn_dB_to_Power(G_Tx_dB)
G_Rx = G_Tx;
RCS = 0.1 #[m^2]
bandwidth = 5e6; # [Hz]
te = 290.; # [K]
nf = 3; #[dB]
T0 = RB.fn_dB_to_Power(nf)*te
radar_loss = RB.fn_dB_to_Power(6.0);
wavelength = RB.fnCalculate_Wavelength_or_Frequency(speed_light,centre_freq);
rho_Tx = np.linspace(25e3,165e3,1000); # target range 25 -165 km, 1000 points
P_Rx1 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64);
P_Rx2 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64);
P_Rx3 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64);
snr_Rx_1 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64);
snr_Rx_2 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64);
snr_Rx_3 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64);
snr_Rx_2_04 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64);
snr_Rx_3_18 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64);
for index in range(len(rho_Tx)):
P_Rx1[index] = RE.fnCalculate_ReceivedPower(P_Tx,G_Tx,G_Rx,rho_Tx[index],rho_Tx[index],wavelength,RCS);
P_Rx2[index] = RE.fnCalculate_ReceivedPower(P_Tx,G_Tx,G_Rx,rho_Tx[index],rho_Tx[index],wavelength,RCS/10.);
P_Rx3[index] = RE.fnCalculate_ReceivedPower(P_Tx,G_Tx,G_Rx,rho_Tx[index],rho_Tx[index],wavelength,RCS*10.);
snr_Rx_1[index] = RE.fnCalculate_ReceivedSNR(P_Rx1[index],T0,bandwidth,radar_loss);
snr_Rx_2[index] = RE.fnCalculate_ReceivedSNR(P_Rx2[index],T0,bandwidth,radar_loss)
snr_Rx_3[index] = RE.fnCalculate_ReceivedSNR(P_Rx3[index],T0,bandwidth,radar_loss)
snr_Rx_2_04[index] = RE.fnCalculate_ReceivedSNR(P_Rx1[index]*0.4,T0,bandwidth,radar_loss)
snr_Rx_3_18[index] = RE.fnCalculate_ReceivedSNR(P_Rx1[index]*1.8,T0,bandwidth,radar_loss)
snr_Rx_1_dB = RB.fn_Power_to_dB(snr_Rx_1);
snr_Rx_2_dB = RB.fn_Power_to_dB(snr_Rx_2);
snr_Rx_3_dB = RB.fn_Power_to_dB(snr_Rx_3);
rcs1 = RB.fn_Power_to_dB(RCS);
rcs2 = RB.fn_Power_to_dB(RCS/10.)
rcs3 = RB.fn_Power_to_dB(RCS*10.)
snr_Rx_2_04_dB = RB.fn_Power_to_dB(snr_Rx_2_04);
snr_Rx_3_18_dB = RB.fn_Power_to_dB(snr_Rx_3_18);
# ------------------------------------------------------------------------- #
fig = plt.figure(1);
ax = fig.gca()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig.suptitle(r"\textbf{SNR versus detection range for three different values of RCS}" ,fontsize=12);
plt.plot(rho_Tx/1000.,snr_Rx_3_dB,label=r"$\sigma = %f~\mathrm{dBsm}$" %rcs3)
plt.plot(rho_Tx/1000.,snr_Rx_1_dB,linestyle='-.',label=r"$\sigma = %f~\mathrm{dBsm}$" %rcs1)
plt.plot(rho_Tx/1000.,snr_Rx_2_dB,linestyle='--',label=r"$\sigma = %f~\mathrm{dBsm}$" %rcs2)
ax.set_ylabel(r"SNR $[\mathrm{dB}]$")
ax.set_xlabel(r'Detection range $[\mathrm{km}]$');
plt.legend(loc='best')
plt.grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black')
fig.savefig('main_chapter01_00_12a.pdf',bbox_inches='tight',pad_inches=0.11,dpi=10)
fig = plt.figure(2);
ax = fig.gca()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig.suptitle(r"\textbf{SNR versus detection range for three different values of radar peak power}" ,fontsize=12);
plt.plot(rho_Tx/1000.,snr_Rx_3_18_dB,label=r"$P_\text{Tx} = 2.16~\mathrm{MW}$")
plt.plot(rho_Tx/1000.,snr_Rx_1_dB,linestyle='-.',label=r"$P_\text{Tx} = 1.5~\mathrm{MW}$")
plt.plot(rho_Tx/1000.,snr_Rx_2_04_dB,linestyle='--',label=r"$P_\text{Tx} = 0.6~\mathrm{MW}$" )
ax.set_ylabel(r"SNR $[\mathrm{dB}]$")
ax.set_xlabel(r'Detection range $[\mathrm{km}]$');
plt.legend(loc='best')
plt.grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black')
fig.savefig('main_chapter01_00_12b.pdf',bbox_inches='tight',pad_inches=0.11,dpi=10)
| 40.803738 | 113 | 0.682776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,267 | 0.289931 |
bd12efd43b17ceb7fdeb18d7ab015b7b17528841
| 4,770 |
py
|
Python
|
core/views.py
|
edwildson/djangosecommerce
|
22541d24af52cd6f2d51196116a101583389b945
|
[
"CC0-1.0"
] | 1 |
2021-11-05T20:35:00.000Z
|
2021-11-05T20:35:00.000Z
|
core/views.py
|
edwildson/djangosecommerce
|
22541d24af52cd6f2d51196116a101583389b945
|
[
"CC0-1.0"
] | null | null | null |
core/views.py
|
edwildson/djangosecommerce
|
22541d24af52cd6f2d51196116a101583389b945
|
[
"CC0-1.0"
] | null | null | null |
# coding=utf-8
import functools
import warnings
from django.shortcuts import render
from .forms import ContactForm
from catalog.views import Product
from django.views.generic import View, TemplateView, CreateView
from django.contrib.auth import get_user_model
from django.contrib import messages
# Sobrecarga da classe para reiniciar senha
from django.contrib.auth.forms import PasswordResetForm
from django.contrib.auth.tokens import default_token_generator
from django.shortcuts import resolve_url
from django.urls import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.deprecation import RemovedInDjango20Warning
from django.template.response import TemplateResponse
from django.views.decorators.csrf import csrf_protect
from django.utils.translation import ugettext as _
from django.http import JsonResponse
User = get_user_model()
class IndexView(TemplateView):
template_name = 'index.html'
def get_context_data(self, *args, **kwargs):
texts = ['Lorem ipsum', 'dolor sit amet', 'consectetur']
context = {
'title': 'LolJa Onlaini',
'texts': texts,
}
return context
index = IndexView.as_view()
def contact(request):
success = False
form = ContactForm(request.POST or None)
if form.is_valid():
form.send_mail()
success = True
elif request.method == 'POST':
messages.error(request, 'Formulário inválido')
context = {
'form': form,
'success': success
}
return render(request, 'contact.html', context)
def deprecate_current_app(func):
"""
Handle deprecation of the current_app parameter of the views.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
if 'current_app' in kwargs:
warnings.warn(
"Passing `current_app` as a keyword argument is deprecated. "
"Instead the caller of `{0}` should set "
"`request.current_app`.".format(func.__name__),
RemovedInDjango20Warning
)
current_app = kwargs.pop('current_app')
request = kwargs.get('request', None)
if request and current_app is not None:
request.current_app = current_app
return func(*args, **kwargs)
return inner
@deprecate_current_app
@csrf_protect
def password_reset(request,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
subject_template_name='registration/password_reset_subject.txt',
password_reset_form=PasswordResetForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
extra_context=None,
html_email_template_name=None,
extra_email_context=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_done')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'subject_template_name': subject_template_name,
'request': request,
'html_email_template_name': html_email_template_name,
'extra_email_context': extra_email_context,
}
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
'title': _('Password reset'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
def apiGetProducts(request):
if request.method == "GET":
products = Product.objects.all()
products_resp = []
for product in products:
products_resp.append({
'name': product.name,
'score': product.score,
'description': product.description,
'price': product.price
})
return HttpResponse(JsonResponse({
'store': "Lolja Online",
'data': products_resp
}), content_type='application/json')
return HttpResponse(JsonResponse({'message': "São aceitas apenas requisições GET."}),
content_type='application/json')
| 32.896552 | 89 | 0.637945 | 297 | 0.062199 | 0 | 0 | 2,353 | 0.492775 | 0 | 0 | 902 | 0.188901 |
bd133f9bc78502bb6dd771b9750d4b772d62e105
| 96 |
py
|
Python
|
venv/lib/python3.8/site-packages/jedi/inference/compiled/mixed.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2 |
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/jedi/inference/compiled/mixed.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19 |
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/jedi/inference/compiled/mixed.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/d9/2c/a4/7718a956dd946c833114214fec833728fef3062ae858a03a9d82cf9dc7
| 96 | 96 | 0.895833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
bd14232c1edf5c76909d75642903193968483bbc
| 1,087 |
py
|
Python
|
tests/jekpost_tests.py
|
arjunkrishnababu96/jekpost
|
2ddcb337e98c534426d83f1bd6fbde1f45f59225
|
[
"MIT"
] | 1 |
2018-10-05T16:53:02.000Z
|
2018-10-05T16:53:02.000Z
|
tests/jekpost_tests.py
|
arjunkrishnababu96/jekpost
|
2ddcb337e98c534426d83f1bd6fbde1f45f59225
|
[
"MIT"
] | null | null | null |
tests/jekpost_tests.py
|
arjunkrishnababu96/jekpost
|
2ddcb337e98c534426d83f1bd6fbde1f45f59225
|
[
"MIT"
] | null | null | null |
import unittest
import jekpost.jekpost_create as jek
from datetime import date
class JekpostTests(unittest.TestCase):
def test_date_gets_formatted(self):
"""
Check
31-DEC-2016 (2016-12-31)
1-NOV-2015 (2015-11-01)
11-JAN-2015 (2015-01-11)
"""
sample_dates = [ (date(2014, 12, 31), '2014-12-31'),
(date(2015, 11, 1), '2015-11-01'),
(date(2015, 1, 11), '2015-01-11')
]
for date_object, expected_date in sample_dates:
with self.subTest(i=date_object):
formatted_date = jek.get_date_formatted(date_object)
self.assertEqual(formatted_date, expected_date)
def test_make_filename(self):
date_formatted = '2014-12-31'
title = 'Post 01'
expected_filename = '2014-12-31-Post-01.md'
result_filename = jek.make_filename(title, date_formatted)
self.assertEqual(result_filename, expected_filename)
if __name__ == '__main__':
unittest.main()
| 31.970588 | 68 | 0.580497 | 957 | 0.880405 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.211592 |
bd14aa72ec4ce2f35a4d3b6757b556fa6efbf5d2
| 2,636 |
py
|
Python
|
ESMF/src/addon/ESMPy/examples/mesh_locstream_regrid.py
|
joeylamcy/gchp
|
0e1676300fc91000ecb43539cabf1f342d718fb3
|
[
"NCSA",
"Apache-2.0",
"MIT"
] | 1 |
2018-07-05T16:48:58.000Z
|
2018-07-05T16:48:58.000Z
|
ESMF/src/addon/ESMPy/examples/mesh_locstream_regrid.py
|
joeylamcy/gchp
|
0e1676300fc91000ecb43539cabf1f342d718fb3
|
[
"NCSA",
"Apache-2.0",
"MIT"
] | 1 |
2022-03-04T16:12:02.000Z
|
2022-03-04T16:12:02.000Z
|
ESMF/src/addon/ESMPy/examples/mesh_locstream_regrid.py
|
joeylamcy/gchp
|
0e1676300fc91000ecb43539cabf1f342d718fb3
|
[
"NCSA",
"Apache-2.0",
"MIT"
] | null | null | null |
# This example demonstrates how to regrid between a mesh and a locstream.
import ESMF
import numpy
import ESMF.util.helpers as helpers
import ESMF.api.constants as constants
# This call enables debug logging
# ESMF.Manager(debug=True)
from ESMF.util.mesh_utilities import mesh_create_5, mesh_create_5_parallel
from ESMF.util.locstream_utilities import create_locstream_16, create_locstream_16_parallel
if ESMF.pet_count() == 1:
mesh, _, _, _, _, _ = mesh_create_5()
locstream = create_locstream_16()
else:
if ESMF.pet_count() is not 4:
raise ValueError("processor count must be 4 or 1 for this example")
else:
mesh, _, _, _, _ = mesh_create_5_parallel()
locstream = create_locstream_16_parallel()
# create a field
srcfield = ESMF.Field(mesh, name='srcfield')#, meshloc=ESMF.MeshLoc.ELEMENT)
# create a field on the locstream
dstfield = ESMF.Field(locstream, name='dstfield')
xctfield = ESMF.Field(locstream, name='xctfield')
# initialize the fields
[x, y] = [0, 1]
deg2rad = 3.14159/180
gridXCoord = srcfield.grid.get_coords(x)
gridYCoord = srcfield.grid.get_coords(y)
srcfield.data[...] = 10.0 + (gridXCoord * deg2rad) ** 2 + (gridYCoord * deg2rad) ** 2
gridXCoord = locstream["ESMF:X"]
gridYCoord = locstream["ESMF:Y"]
xctfield.data[...] = 10.0 + (gridXCoord * deg2rad) ** 2 + (gridYCoord * deg2rad) ** 2
dstfield.data[...] = 1e20
# create an object to regrid data from the source to the destination field
# TODO: this example seems to fail occasionally with UnmappedAction.ERROR, probably due to a tolerance issue - ask Bob
regrid = ESMF.Regrid(srcfield=srcfield, dstfield=dstfield, regrid_method=ESMF.RegridMethod.BILINEAR,
unmapped_action=ESMF.UnmappedAction.IGNORE)
# do the regridding from source to destination field
dstfield = regrid(srcfield, dstfield)
# compute the mean relative error
num_nodes = numpy.prod(xctfield.data.shape[:])
relerr = 0
meanrelerr = 0
if num_nodes is not 0:
ind = numpy.where((dstfield.data != 1e20) & (xctfield.data != 0))[0]
relerr = numpy.sum(numpy.abs(dstfield.data[ind] - xctfield.data[ind]) / numpy.abs(xctfield.data[ind]))
meanrelerr = relerr / num_nodes
# handle the parallel case
if ESMF.pet_count() > 1:
relerr = helpers.reduce_val(relerr, op=constants.Reduce.SUM)
num_nodes = helpers.reduce_val(num_nodes, op=constants.Reduce.SUM)
# output the results from one processor only
if ESMF.local_pet() is 0:
meanrelerr = relerr / num_nodes
print ("ESMPy Grid Mesh Regridding Example")
print (" interpolation mean relative error = {0}".format(meanrelerr))
assert (meanrelerr < 3e-5)
| 35.621622 | 118 | 0.727238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 757 | 0.287178 |
bd14fe29205fb7db4613505f3b8655a44fae871a
| 36 |
py
|
Python
|
workPEMSBAY/Param_HistoricalAverage.py
|
deepkashiwa20/DeepTraffic
|
6ac66258ef9fa9bbe7dcd7c4750cb24946eba58c
|
[
"MIT"
] | 35 |
2021-06-18T01:03:16.000Z
|
2022-03-31T02:16:37.000Z
|
workPEMSBAY/Param_HistoricalAverage.py
|
yuanxw5/DL-Traff-Graph
|
63f3d81ce3a750e43645c61089c5ca219bfcbfd3
|
[
"MIT"
] | null | null | null |
workPEMSBAY/Param_HistoricalAverage.py
|
yuanxw5/DL-Traff-Graph
|
63f3d81ce3a750e43645c61089c5ca219bfcbfd3
|
[
"MIT"
] | 12 |
2021-06-17T02:54:48.000Z
|
2022-03-25T05:21:43.000Z
|
HISTORYDAY = 7
DAYTIMESTEP = 24 * 12
| 18 | 21 | 0.722222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
bd15c472f906d140ef00546b94634b33eb43240f
| 714 |
py
|
Python
|
topy/__init__.py
|
TarcisioLOliveira/topy
|
060da675e6494fee63fa5547befcb1f8ecc39fdc
|
[
"MIT"
] | 1 |
2021-01-25T00:13:34.000Z
|
2021-01-25T00:13:34.000Z
|
topy/__init__.py
|
TarcisioLOliveira/topy
|
060da675e6494fee63fa5547befcb1f8ecc39fdc
|
[
"MIT"
] | null | null | null |
topy/__init__.py
|
TarcisioLOliveira/topy
|
060da675e6494fee63fa5547befcb1f8ecc39fdc
|
[
"MIT"
] | null | null | null |
"""
# ==============================================================================
# ToPy -- Topology optimization with Python.
# Copyright (C) 2012, 2015, 2016, 2017 William Hunter.
# Copyright (C) 2020, 2021, Tarcísio L. de Oliveira
# ==============================================================================
"""
from .topology_trad import *
from .topology_gen import *
from .visualisation import *
from .elements import *
from .optimisation import *
from .pathfinding import *
__version__ = "1.0.0"
__author__ = "Tarcisio L. de Oliveira"
__all__ = (
topology_trad.__all__ +
topology_gen.__all__ +
visualisation.__all__ +
elements.__all__ +
optimisation.__all__ +
pathfinding.__all__
)
| 26.444444 | 80 | 0.560224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 354 | 0.493724 |
bd1639f542971f0b9d004e950fd65037d1434c94
| 4,788 |
py
|
Python
|
data/fidt_generate.py
|
PPGod95/FIDTM
|
b5582c5cc485496d85af2043ffd6e4266f354f3b
|
[
"MIT"
] | null | null | null |
data/fidt_generate.py
|
PPGod95/FIDTM
|
b5582c5cc485496d85af2043ffd6e4266f354f3b
|
[
"MIT"
] | null | null | null |
data/fidt_generate.py
|
PPGod95/FIDTM
|
b5582c5cc485496d85af2043ffd6e4266f354f3b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@Project :
@FileName:
@Author :penghr
@Time :2021/11/xx xx:xx
@Desc : FIDTM-train/dataset/FIDTM/
├── test
│ ├── gt_fidt_map
│ │ └── IMG_8.h5
│ ├── gt_show
│ │ └── IMG_8.jpg
│ ├── images
│ │ └── IMG_8.jpg
│ └── labels
│ └── IMG_8.txt
└── train
├── gt_fidt_map
│ └── IMG_1.h5
├── gt_show
│ └── IMG_1.jpg
├── images
│ └── IMG_1.jpg
└── labels
└── IMG_1.txt
原始数据集分为train&test,各目录下有images和labels文件夹,运行脚本生成gt_show以及gt_fidt_map文件夹,其中gt_show为可视化标注不参与训练,gt_fidt_map为生成的fidtmap和kpoint字典,参与下一步训练。
"""
import math
import os
import cv2
import h5py
import torch
import numpy as np
from tqdm import tqdm
# 生成路径
dataset_path = '../dataset/FIDTM'
label_type = 'txt'
train_path = os.path.join(dataset_path, 'train')
test_path = os.path.join(dataset_path, 'test')
train_img_path = os.path.join(train_path, 'images')
test_img_path = os.path.join(test_path, 'images')
train_label_path = os.path.join(train_path, 'labels')
test_label_path = os.path.join(test_path, 'labels')
train_gt_map = train_img_path.replace('images', 'gt_fidt_map')
test_gt_map = test_img_path.replace('images', 'gt_fidt_map')
train_gt_show = train_img_path.replace('images', 'gt_show')
test_gt_show = test_img_path.replace('images', 'gt_show')
path_list = [train_gt_map, test_gt_map, train_gt_show, test_gt_show]
for i in path_list:
os.makedirs(i, exist_ok=True)
train_list = []
for fs in os.listdir(train_img_path):
train_list.append(os.path.join(train_img_path, fs))
test_list = []
for fs in os.listdir(test_img_path):
test_list.append(os.path.join(test_img_path, fs))
img_paths = train_list + test_list
img_paths.sort()
def fidt_generate(im_data, gt_data, lamda):
size = im_data.shape
new_im_data = cv2.resize(im_data, (lamda * size[1], lamda * size[0]), 0)
new_size = new_im_data.shape
d_map = (np.zeros([new_size[0], new_size[1]]) + 255).astype(np.uint8)
gt_data = lamda * gt_data
for o in range(0, len(gt_data)):
x = np.max([1, math.floor(gt_data[o][1])])
y = np.max([1, math.floor(gt_data[o][0])])
if x >= new_size[0] or y >= new_size[1]:
continue
d_map[x][y] = d_map[x][y] - 255
distance_map = cv2.distanceTransform(d_map, cv2.DIST_L2, 0)
distance_map = torch.from_numpy(distance_map)
distance_map = 1 / (1 + torch.pow(distance_map, 0.02 * distance_map + 0.75))
distance_map = distance_map.numpy()
distance_map[distance_map < 1e-2] = 0
return distance_map
print('开始生成训练数据')
with tqdm(total=len(img_paths)) as pbar:
for img_path in img_paths:
img = cv2.imread(img_path)
if label_type == 'txt':
gt = np.loadtxt(img_path.replace('images', 'labels').replace('.jpg', '.txt'))[:, 0:2].round(8)
elif label_type == 'npy':
gt = np.load(img_path.replace('images', 'labels').replace('.jpg', '.npy')).round(8)
elif label_type == 'mat':
gt = np.loadtxt(img_path.replace('images', 'labels').replace('.jpg', '.mat'))[:, 0:2].round(8)
'''最关键,根据标签生成fidt图'''
fidt_map = fidt_generate(img, gt, 1)
# cv2.imshow('1', fidt_map)
# cv2.waitKey(0)
'''标签对应像素为1其余为0'''
kpoint = np.zeros((img.shape[0], img.shape[1]))
for i in range(0, len(gt)):
if int(gt[i][1]) < img.shape[0] and int(gt[i][0]) < img.shape[1]:
kpoint[int(gt[i][1]), int(gt[i][0])] = 1
# cv2.imshow('1', kpoint)
# cv2.waitKey(0)
'''保存成h5文件,其实就是字典,后期优化'''
with h5py.File(img_path.replace('.jpg', '.h5').replace('images', 'gt_fidt_map'), 'w') as hf:
hf['fidt_map'] = fidt_map
hf['kpoint'] = kpoint
pbar.update()
'''可视化,可以不要'''
try:
fidt_map1 = fidt_map
fidt_map1 = fidt_map1 / np.max(fidt_map1) * 255
fidt_map1 = fidt_map1.astype(np.uint8)
fidt_map1 = cv2.applyColorMap(fidt_map1, 2)
cv2.imwrite(img_path.replace('images', 'gt_show'), fidt_map1)
except Exception as e:
print(img_path,e)
# cv2.imshow('1', fidt_map1)
# cv2.waitKey(0)
print('完成')
| 33.71831 | 131 | 0.539474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,083 | 0.403369 |
bd166a19a710f2d8a3cb312cb57d84d5ce6d3bb6
| 356 |
py
|
Python
|
tests/urls.py
|
maykinmedia/djadyen
|
8bde7172c72d68975d4a77c7ef6bed73412619dc
|
[
"BSD-3-Clause"
] | 3 |
2018-10-19T06:57:50.000Z
|
2020-11-12T11:20:37.000Z
|
tests/urls.py
|
maykinmedia/djadyen
|
8bde7172c72d68975d4a77c7ef6bed73412619dc
|
[
"BSD-3-Clause"
] | 16 |
2017-02-14T12:37:58.000Z
|
2019-04-25T07:55:42.000Z
|
tests/urls.py
|
maykinmedia/djadyen
|
8bde7172c72d68975d4a77c7ef6bed73412619dc
|
[
"BSD-3-Clause"
] | 2 |
2018-05-16T10:08:34.000Z
|
2019-09-29T23:31:04.000Z
|
try:
from django.urls import path, include
except:
from django.conf.urls import url as path, include
from django.contrib import admin
urlpatterns = [
path(r'^admin/', admin.site.urls),
path(r'^app/', include('tests.app.urls')),
path(r'^adyen/notifications/', include('djadyen.notifications.urls', namespace='adyen-notifications')),
]
| 27.384615 | 107 | 0.702247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.300562 |
bd167da504ac1a51b81416178b209b951582f2de
| 583 |
py
|
Python
|
cpc/type/__init__.py
|
U-Ar/Cpresto
|
f723458fb237c9e3e8bc8a6afdf7c81858a65363
|
[
"BSD-3-Clause"
] | 1 |
2021-05-09T07:10:19.000Z
|
2021-05-09T07:10:19.000Z
|
cpc/type/__init__.py
|
U-Ar/Cpresto
|
f723458fb237c9e3e8bc8a6afdf7c81858a65363
|
[
"BSD-3-Clause"
] | null | null | null |
cpc/type/__init__.py
|
U-Ar/Cpresto
|
f723458fb237c9e3e8bc8a6afdf7c81858a65363
|
[
"BSD-3-Clause"
] | null | null | null |
from .ArrayType import *
from .ArrayTypeRef import *
from .CompositeType import *
from .FunctionType import *
from .FunctionTypeRef import *
from .IntegerType import *
from .IntegerTypeRef import *
from .NamedType import *
from .ParamTypes import *
from .PointerType import *
from .PointerTypeRef import *
from .StructType import *
from .StructTypeRef import *
from .Type import *
from .TypeRef import *
from .TypeTable import *
from .UnionType import *
from .UnionTypeRef import *
from .UserType import *
from .UserTypeRef import *
from .VoidType import *
from .VoidTypeRef import *
| 26.5 | 30 | 0.7753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
bd16c8b39601a36ca38158d3199f58ce464da6a0
| 41 |
py
|
Python
|
packages/speex.py
|
mhutch/bockbuild
|
0d989e2d0259d17d41a195f8d28b3844a4652e7b
|
[
"MIT"
] | null | null | null |
packages/speex.py
|
mhutch/bockbuild
|
0d989e2d0259d17d41a195f8d28b3844a4652e7b
|
[
"MIT"
] | null | null | null |
packages/speex.py
|
mhutch/bockbuild
|
0d989e2d0259d17d41a195f8d28b3844a4652e7b
|
[
"MIT"
] | null | null | null |
XiphPackage ('speex', 'speex', '1.2rc1')
| 20.5 | 40 | 0.634146 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.536585 |
bd1785dbb7815b9bfadbb18331b534590567b760
| 2,105 |
py
|
Python
|
Python Tutorial Django/pac/pac/main/views.py
|
PaulPan00/donkey_wrapper
|
a03cf0f42f65625fbce792b06c98acd153c5d6c8
|
[
"MIT"
] | 6 |
2021-03-26T01:42:31.000Z
|
2021-04-11T16:17:42.000Z
|
Python Tutorial Django/pac/pac/main/views.py
|
packetsss/Python
|
a03cf0f42f65625fbce792b06c98acd153c5d6c8
|
[
"MIT"
] | null | null | null |
Python Tutorial Django/pac/pac/main/views.py
|
packetsss/Python
|
a03cf0f42f65625fbce792b06c98acd153c5d6c8
|
[
"MIT"
] | 7 |
2021-04-06T06:55:22.000Z
|
2021-05-03T11:26:38.000Z
|
# Create by Packetsss
# Personal use is allowed
# Commercial use is prohibited
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse, HttpResponseRedirect
from .models import ToDoList, Item
from .forms import Create_list
def index(response, id):
lst = ToDoList.objects.get(id=id)
dic = {"list": lst}
if lst in response.user.todolist.all():
if response.method == "POST":
# Our dic be like: {"save": ["save"], "c1": ["clicked"]}
print(response.POST)
if response.POST.get("save"):
for item in lst.item_set.all():
if response.POST.get("c" + str(item.id)) == "clicked":
item.complete = True
else:
item.complete = False
item.save()
elif response.POST.get("newItem"):
txt = response.POST.get("new")
if len(txt) > 0:
lst.item_set.create(text=txt, complete=False)
else:
print("Invalid input")
return render(response, "main/list.html", dic)
return render(response, "main/view.html", dic)
def home(response):
return render(response, "main/home.html", {})
def create(response):
if response.method == "POST":
form = Create_list(response.POST)
if form.is_valid():
n = form.cleaned_data["name"]
t = ToDoList(name=n)
t.save()
response.user.todolist.add(t)
return HttpResponseRedirect(f"/{t.id}")
else:
pass
form = Create_list()
dic = {"form": form}
return render(response, "main/create.html", dic)
def view(response):
return render(response, "main/view.html", {})
# def id(response, id):
# lst = ToDoList.objects.get(id=id)
# return HttpResponse(f"<h1>{lst.name}</h1>")
# def name(response, name):
# lst = ToDoList.objects.get(name=name)
# item = lst.item_set.get(id=1)
# return render(response, "main/base.html", {})
| 30.071429 | 74 | 0.56152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 593 | 0.28171 |
bd17b046c9a2e0dbd7f153a5a1f41fd0257f99eb
| 5,610 |
py
|
Python
|
src/Commands.py
|
rkpop/kokobot
|
d19d68e12a7e6c0a25373ae5404e46632d59c40f
|
[
"MIT"
] | 3 |
2018-07-25T23:55:58.000Z
|
2018-10-17T05:50:18.000Z
|
src/Commands.py
|
rkpop/kokobot
|
d19d68e12a7e6c0a25373ae5404e46632d59c40f
|
[
"MIT"
] | null | null | null |
src/Commands.py
|
rkpop/kokobot
|
d19d68e12a7e6c0a25373ae5404e46632d59c40f
|
[
"MIT"
] | 1 |
2018-12-01T05:18:48.000Z
|
2018-12-01T05:18:48.000Z
|
import asyncio
from discord.ext import commands
from src.BaseCog import BaseCog
from src.DB import DB
from src.Reasons import Reasons
class Commands(BaseCog):
def __init__(self, bot, config):
super().__init__(bot, config)
self.reasons = Reasons()
HELP_MESSAGE = """
Command: `/kkb <action> [args]`
All messages sent by the bot will contain a "reddit_id" field.
Use that ID for all of the below commands.
Comments will be marked with a White color.
Posts will be marked with a Blue color.
Approve/Remove Comment:
`approvec [comment_id,]`
e.g. `/kkb approvec 7abc351`
e.g. `/kkb approvec 7asb472,7bashf2`
`removec [comment_id,]`
Approve Posts:
`approve [post_id,]`
Remove Posts:
`remove [post_id,]`
OR
`remove [post_id,] reasons [#]`
e.g. `/kkb remove 7bas4e reasons 2 5 19`
If the reason requires input from you, include the text after that number
e.g. `/kkb remove 7bas4e reasons 1 r/kpoppers`
You can also use the 'custom' reason for freeform response
e.g. `/kkb remove 7bas4e reasons custom "My custom reason"`
Make sure to use DOUBLE QUOTES instead of single quotes.
Get help:
`/kkb help`
"""
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
await ctx.channel.send(str(error), delete_after=15)
await asyncio.sleep(15)
await ctx.message.delete()
@commands.command()
async def help(self, ctx):
await asyncio.gather(
ctx.message.delete(),
ctx.send(self.HELP_MESSAGE, delete_after=30),
)
@commands.command()
async def approvec(self, ctx, comment_id_list):
comment_ids = comment_id_list.split(",")
if len(comment_ids) == 0:
raise ValueError("No comment IDs were given")
for comment_id in comment_ids:
await self.reddit.approve_comment(comment_id)
await asyncio.gather(
ctx.message.delete(),
self.delete_message(ctx.channel, comment_id),
)
@commands.command()
async def removec(self, ctx, comment_id_list, *reasons):
comment_ids = comment_id_list.split(",")
if len(comment_ids) == 0:
raise ValueError("No comment IDs were given")
for comment_id in comment_ids:
await self.reddit.remove_comment(comment_id)
await asyncio.gather(
ctx.message.delete(),
self.delete_message(ctx.channel, comment_id),
)
@commands.command()
async def approve(self, ctx, post_id_list):
post_ids = post_id_list.split(",")
if len(post_ids) == 0:
raise ValueError("No posts were given")
for post_id in post_ids:
is_report = False
if DB.get().is_post_resolved(post_id):
is_report = True
await self.reddit.approve_post(post_id, is_report=is_report)
await asyncio.gather(
ctx.message.delete(),
self.delete_message(ctx.channel, post_id),
)
@commands.command()
async def remove(self, ctx, post_id_list, *reasons):
post_ids = post_id_list.split(",")
if len(post_ids) == 0:
raise ValueError("No posts were given")
if len(reasons) < 2:
reasons = []
else:
if reasons[0] != "reasons":
raise ValueError('Invalid command format. Expected "reasons".')
reasons = reasons[1:]
if len(reasons) == 0:
for post_id in post_ids:
is_report = False
if DB.get().is_post_resolved(post_id):
is_report = True
await self.reddit.remove_post(post_id, is_report=is_report)
await ctx.message.delete()
return
if len(post_ids) > 1:
raise ValueError("Reasons are not supported when removing multiple posts")
post_id = post_ids[0]
reason_body = self.parse_reasons(reasons)
submission = await self.reddit.praw().submission(id=post_id)
header = self.reasons.get_header(submission.author, "post")
footer = self.reasons.get_footer()
reason_text = "{}{}{}".format(header, reason_body, footer)
is_report = False
if DB.get().is_post_resolved(post_id):
is_report = True
await asyncio.gather(
self.reddit.remove_post(post_id, reason_text, is_report=is_report),
ctx.message.delete(),
self.delete_message(ctx.channel, post_id),
)
def parse_reasons(self, reason_input):
# 1 'r/kpoppers' 2 3 6 9 'https://redd.it/7fb1r5' custom 'Custom reason!'
reason_string = ""
user_input = False
for index, reason in enumerate(reason_input):
if user_input:
user_input = False
continue
if self.reasons.needs_text(reason):
if len(reason_input) <= index + 1:
raise ValueError("Reason {} required text.".format(reason))
if reason_input[index + 1] == "custom":
raise ValueError("Reason {} required text.".format(reason))
reason_string += (
self.reasons.add_reason(reason, reason_input[index + 1]) + "\n\n"
)
user_input = True
else:
reason_string += self.reasons.add_reason(reason) + "\n\n"
return reason_string
| 31.166667 | 86 | 0.587344 | 5,473 | 0.975579 | 0 | 0 | 3,326 | 0.59287 | 3,177 | 0.56631 | 1,400 | 0.249554 |
bd184a22649fd3e0a64f5b17ec6b9f8201e73eaa
| 2,981 |
py
|
Python
|
src/lur/grade.py
|
qlurkin/lur_python
|
39564f276b3c03a073d4922627634b67c3af2052
|
[
"MIT"
] | null | null | null |
src/lur/grade.py
|
qlurkin/lur_python
|
39564f276b3c03a073d4922627634b67c3af2052
|
[
"MIT"
] | null | null | null |
src/lur/grade.py
|
qlurkin/lur_python
|
39564f276b3c03a073d4922627634b67c3af2052
|
[
"MIT"
] | null | null | null |
from cmath import nan
from sqlite3 import DatabaseError
import pandas as pd
import numpy as np
import json
def load_from_csv(path):
dt = pd.read_csv(path, sep=';', dtype={'matricule': object})
return dt.set_index('matricule')
def fix_matricule(matricule):
if matricule.startswith('195'):
return '19' + matricule[3:]
return matricule
def load_from_claco_csv(path):
df = pd.read_csv(path, delimiter=';')
df['matricule'] = df['username'].str.split('@', expand=True)[0]
df['name'] = df['firstname'] + " " + df['lastname']
df['grade'] = df['score'] / df['total_score_on']
df = df[['matricule', 'name', 'grade']]
df['matricule'] = df['matricule'].map(fix_matricule, na_action='ignore')
df = df.dropna(subset=['matricule'])
df = df.set_index('matricule')
return df
def capwords(S):
return ' '.join([w.capitalize() for w in S.split(' ')])
def save(df, path):
df.to_json(path, indent=4, force_ascii=False)
def combine(**kwargs):
res = pd.DataFrame()
for df in kwargs.values():
res = res.combine_first(df[['name']])
for name, df in kwargs.items():
res[name] = df['grade']
res[name] = res[name].fillna(0.0)
return res
def to_plus_ecam_csv(df: pd.DataFrame, activity_code, path=None):
if path is None:
path = activity_code + '.csv'
if 'status' in df:
df = pd.DataFrame(df[['grade', 'status']])
else:
df = pd.DataFrame(df[['grade']])
df['status'] = np.nan
df['stat'] = df['status'].map(to_plus_ecam_stat)
df['cote'] = df['grade']
df['ae'] = activity_code
df = pd.DataFrame(df[['ae', 'cote', 'stat']])
df.to_csv(path, sep=';', encoding='utf8', index_label='matricule')
def to_plus_ecam_stat(status):
if status == 'présent':
return None
if status == 'absent':
return 'a'
if status == 'malade':
return 'm'
return status
def from_auto_correction(path):
with open(path, encoding='utf8') as file:
students = json.load(file)['students']
if 'check' in students[0]:
grades = {student['student']['matricule']: student['check']['grade'] for student in students}
else:
grades = {student['student']['matricule']: student['grade'] for student in students}
names = {student['student']['matricule']: student['student']['name'] for student in students}
grades = pd.Series(grades)
names = pd.Series(names)
df = pd.DataFrame({'name': names, 'grade': grades})
return df
def round_to_half(grade):
return np.floor(2 * grade + 0.5)/2
def round_to_tenth(grade):
return np.floor(10 * grade + 0.5)/10
if __name__ == '__main__':
data = {
'matricule': ['12345', '23456', '34567'],
'name': ['Quentin', 'André', 'Ken'],
'grade': [12, 13, 14],
'status': ['absent', 'malade', 'présent']
}
df = pd.DataFrame(data)
df = df.set_index('matricule')
to_plus_ecam_csv(df, 'ic1t', 'uc1t.csv')
| 31.378947 | 101 | 0.606172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 629 | 0.210791 |
bd1a64b8faa4b7589ee4783d56ea7df4ceeadc17
| 201 |
py
|
Python
|
website/canvas/storage.py
|
bopopescu/canvas
|
2dfd6009eaecd8dac64ccc6125084e65305fb5d0
|
[
"BSD-3-Clause"
] | 61 |
2015-11-10T17:13:46.000Z
|
2021-08-06T17:58:30.000Z
|
website/canvas/storage.py
|
bopopescu/canvas
|
2dfd6009eaecd8dac64ccc6125084e65305fb5d0
|
[
"BSD-3-Clause"
] | 13 |
2015-11-11T07:49:41.000Z
|
2021-06-09T03:45:31.000Z
|
website/canvas/storage.py
|
bopopescu/canvas
|
2dfd6009eaecd8dac64ccc6125084e65305fb5d0
|
[
"BSD-3-Clause"
] | 18 |
2015-11-11T04:50:04.000Z
|
2021-08-20T00:57:11.000Z
|
from compressor.storage import CompressorFileStorage
class CanvasFileStorage(CompressorFileStorage):
def url(self, path):
return "//canvas-dynamic-assets.s3.amazonaws.com/static/" + path
| 28.714286 | 72 | 0.766169 | 145 | 0.721393 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.248756 |
bd1ac0aae6302168adf0c1a01d9c3dda3ee533a8
| 6,104 |
py
|
Python
|
prettytype/__init__.py
|
stuglaser/prettytype
|
af31de6017dad56b1f6ba37225f0b17d35a69bd4
|
[
"MIT"
] | 1 |
2021-01-06T17:37:40.000Z
|
2021-01-06T17:37:40.000Z
|
prettytype/__init__.py
|
stuglaser/prettytype
|
af31de6017dad56b1f6ba37225f0b17d35a69bd4
|
[
"MIT"
] | null | null | null |
prettytype/__init__.py
|
stuglaser/prettytype
|
af31de6017dad56b1f6ba37225f0b17d35a69bd4
|
[
"MIT"
] | null | null | null |
import numbers
class NoneT(object):
def __repr__(self):
return 'NoneT()'
def __str__(self):
return 'None'
def msct(self, other):
if isinstance(other, NoneT):
return self
return MaybeT(other)
class MaybeT(object):
def __init__(self, childT):
self.childT = childT
def __repr__(self):
return 'MaybeT({!r})'.format(self.childT)
def __str__(self):
return str(self.childT) + '?'
def __eq__(self, other):
return isinstance(other, MaybeT) and self.childT == other.childT
def __neq__(self, other):
return not self == other
def msct(self, other):
if isinstance(other, MaybeT):
return MaybeT(self.childT.msct(other.childT))
elif isinstance(other, NoneT):
return self
else:
return MaybeT(self.childT.msct(other))
class AnyT(object):
def parent(self):
return None
def __repr__(self):
return 'AnyT()'
def __str__(self):
return '*'
def msct(self, other):
if isinstance(other, NoneT):
return MaybeT(self)
return self
anyT = AnyT()
class SimpleType(object):
def __init__(self, name, parent=anyT, type=None):
self.name = name
self._parent = parent
self.type = type
def parent(self):
return self._parent
def ancestry(self):
anc = [self]
t = self
while True:
t = t.parent()
if t is None:
break
anc.append(t)
anc.reverse()
return anc
def msct(self, other):
if isinstance(other, SimpleType):
if self.name == other.name:
return self
# Finds a common ancestor
best = anyT
for aT, bT in zip(self.ancestry(), other.ancestry()):
if aT != bT:
break
best = aT
return best
elif isinstance(other, NoneT):
return other.msct(self)
else:
return anyT
def __repr__(self):
return 'SimpleType(%r, %r, %r)' % (self.name, self._parent, self.type)
def __str__(self):
return self.name
emptyT = SimpleType('')
noneT = NoneT()
numberT = SimpleType('number')
intT = SimpleType('int', parent=numberT, type=int)
floatT = SimpleType('float', parent=numberT, type=float)
stringT = SimpleType('str', type=str)
PRIMITIVES = [intT, floatT, stringT]
class ListT(object):
def __init__(self, eltT):
self.eltT = eltT
def __eq__(self, other):
return isinstance(other, ListT) and self.eltT == other.eltT
def __neq__(self, other):
return not self == other
def __repr__(self):
return 'ListT({!r})'.format(self.eltT)
def __str__(self):
return '[{}]'.format(self.eltT)
def msct(self, other):
if isinstance(other, ListT):
if self.eltT == other.eltT:
return self
return ListT(self.eltT.msct(other.eltT))
elif isinstance(other, NoneT):
return other.msct(self)
else:
return anyT
class DictT(object):
def __init__(self, keyT, valueT):
self.keyT = keyT
self.valueT = valueT
def __eq__(self, other):
return (
isinstance(other, DictT) and
self.keyT == other.keyT and
self.valueT == other.valueT)
def msct(self, other):
if isinstance(other, DictT):
if self.keyT == other.keyT and self.valueT == other.valueT:
return self
return DictT(
self.keyT.msct(other.keyT),
self.valueT.msct(other.valueT))
elif isinstance(other, NoneT):
return other.msct(self)
else:
return anyT
def __repr__(self):
return 'DictT({!r}, {!r})'.format(self.keyT, self.valueT)
def __str__(self):
if self.keyT == self.valueT == emptyT:
return '{:}'
return '{%s: %s}' % (self.keyT, self.valueT)
class ClassT(object):
def __init__(self, class_):
self.class_ = class_
def __eq__(self, other):
return isinstance(other, ClassT) and self.class_ == other.class_
def __ne__(self, other):
return not self == other
def parent(self):
assert self.class_ != object
# TODO: Handle multiple inheritance somehow
return ClassT(self.class_.__bases__[0])
def ancestry(self):
anc = []
t = self
while t.class_ != object:
anc.append(t)
t = t.parent()
anc.reverse()
return anc
def msct(self, other):
if self == other:
return self
elif isinstance(other, ClassT):
best = anyT
for aT, bT in zip(self.ancestry(), other.ancestry()):
if aT != bT:
break
best = aT
return best
elif isinstance(other, NoneT):
return other.msct(self)
else:
return anyT
def __repr__(self):
return 'ClassT({})'.format(self.class_.__name__)
def __str__(self):
return self.class_.__name__
# most specific common type
def msct_all(types):
if len(types) == 0:
return emptyT
best = types[0]
for t in types[1:]:
best = best.msct(t)
return best
def typeof(obj):
if obj is None:
return noneT
for prim in PRIMITIVES:
if isinstance(obj, prim.type):
return prim
if isinstance(obj, list):
return ListT(msct_all([typeof(x) for x in obj]))
elif isinstance(obj, dict):
return DictT(
msct_all([typeof(k) for k in obj.keys()]),
msct_all([typeof(v) for v in obj.values()]))
else:
return ClassT(type(obj))
def prettytype(obj):
desc = typeof(obj)
return str(desc)
if isinstance(obj, basestring):
return 'str'
elif isinstance(obj, tuple):
return 'todo'
return ANY
| 23.750973 | 78 | 0.548984 | 4,965 | 0.813401 | 0 | 0 | 0 | 0 | 0 | 0 | 265 | 0.043414 |
bd1bc728b1d732bdeadd112c3709dd6ba324fe1b
| 5,705 |
py
|
Python
|
simulate_position_covariance_data.py
|
ronniyjoseph/Hybrid-Calibration
|
7f24a8a5f67d647a47d4559566f7461cb3be57ac
|
[
"AFL-3.0"
] | null | null | null |
simulate_position_covariance_data.py
|
ronniyjoseph/Hybrid-Calibration
|
7f24a8a5f67d647a47d4559566f7461cb3be57ac
|
[
"AFL-3.0"
] | 9 |
2019-10-23T03:30:33.000Z
|
2020-02-19T05:25:27.000Z
|
simulate_position_covariance_data.py
|
ronniyjoseph/Hybrid-Calibration
|
7f24a8a5f67d647a47d4559566f7461cb3be57ac
|
[
"AFL-3.0"
] | null | null | null |
import os
import numpy
import copy
import argparse
from matplotlib import pyplot
from src.radiotelescope import RadioTelescope
from src.radiotelescope import BaselineTable
from src.skymodel import SkyRealisation
from simulate_beam_covariance_data import compute_baseline_covariance
from simulate_beam_covariance_data import create_hex_telescope
from simulate_beam_covariance_data import plot_covariance_data
import time
def position_covariance_simulation(array_size=3, create_signal=True, compute_covariance=True, plot_covariance=True,
show_plot=True):
output_path = "/data/rjoseph/Hybrid_Calibration/numerical_simulations/"
project_path = "linear_position_covariance_numerical_point_fixed/"
n_realisations = 100000
position_precision = 1e-3
if not os.path.exists(output_path + project_path + "/"):
print("Creating Project folder at output destination!")
os.makedirs(output_path + project_path)
telescope = RadioTelescope(load=False, shape=['linear', 14, 5])#create_hex_telescope(array_size)
if create_signal:
create_visibility_data(telescope, position_precision, n_realisations, output_path + project_path,
output_data=True)
if compute_covariance:
compute_baseline_covariance(telescope, output_path + project_path, n_realisations, data_type='model')
compute_baseline_covariance(telescope, output_path + project_path, n_realisations, data_type='perturbed')
compute_baseline_covariance(telescope, output_path + project_path, n_realisations, data_type='residual')
if plot_covariance:
figure, axes = pyplot.subplots(1, 3, figsize=(18, 5))
plot_covariance_data(output_path + project_path, simulation_type="Position", figure=figure, axes=axes)
if show_plot:
pyplot.show()
return
def create_visibility_data(telescope_object, position_precision, n_realisations, path, output_data=False):
print("Creating Signal Realisations")
if not os.path.exists(path + "/" + "Simulated_Visibilities") and output_data:
print("Creating realisation folder in Project path")
os.makedirs(path + "/" + "Simulated_Visibilities")
ideal_baselines = telescope_object.baseline_table
for i in range(n_realisations):
if i % int(n_realisations/100) == 0:
print(f"Realisation {i}")
# source_population = SkyRealisation(sky_type='random', flux_high=1, seed=i)
# l_coordinate = numpy.random.uniform(-1, 1, 1)
# m_coordinate = numpy.random.uniform(-1, 1, 1)
#
# source_population = SkyRealisation(sky_type="point", fluxes=numpy.array([100]), l_coordinates=l_coordinate,
# m_coordinates=m_coordinate, spectral_indices=numpy.array([0.8]))
source_population = SkyRealisation(sky_type="point", fluxes=numpy.array([100]), l_coordinates=0.3,
m_coordinates=0.0, spectral_indices=numpy.array([0.8]))
perturbed_telescope = copy.copy(telescope_object)
# Compute position perturbations
number_antennas = len(perturbed_telescope.antenna_positions.x_coordinates)
x_offsets = numpy.random.normal(0, position_precision, number_antennas)
y_offsets = numpy.random.normal(0, position_precision, number_antennas)
# print(ideal_baselines.u_coordinates)
perturbed_telescope.antenna_positions.x_coordinates += x_offsets
perturbed_telescope.antenna_positions.y_coordinates += y_offsets
# Compute uv coordinates
perturbed_telescope.baseline_table = BaselineTable(position_table=perturbed_telescope.antenna_positions)
perturbed_baselines = perturbed_telescope.baseline_table
# Compute visibilities for the ideal case and the perturbed case
model_visibilities = source_population.create_visibility_model(ideal_baselines,
frequency_channels=numpy.array([150e6]))
perturbed_visibilities = source_population.create_visibility_model(perturbed_baselines,
frequency_channels=numpy.array([150e6]))
residual_visibilities = model_visibilities - perturbed_visibilities
numpy.save(path + "/" + "Simulated_Visibilities/" + f"model_realisation_{i}", model_visibilities.flatten())
numpy.save(path + "/" + "Simulated_Visibilities/" + f"perturbed_realisation_{i}",
perturbed_visibilities.flatten())
numpy.save(path + "/" + "Simulated_Visibilities/" + f"residual_realisation_{i}",
residual_visibilities.flatten())
return
def perturbed_to_original_mapper(original_baselines, perturbed_baselines):
perturbed_to_original_mapping = numpy.zeros(perturbed_baselines.number_of_baselines)
for i in range(perturbed_baselines.number_of_baselines):
antenna1_indices = numpy.where(original_baselines.antenna_id1 == perturbed_baselines.antenna_id1[i])
antenna2_indices = numpy.where(original_baselines.antenna_id2 == perturbed_baselines.antenna_id2[i])
perturbed_to_original_mapping[i] = numpy.intersect1d(antenna1_indices, antenna2_indices)[0]
return perturbed_to_original_mapping.astype(int)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ssh", action="store_true", dest="ssh_key", default=False)
params = parser.parse_args()
import matplotlib
if params.ssh_key:
matplotlib.use("Agg")
from matplotlib import pyplot
position_covariance_simulation()
| 46.382114 | 117 | 0.713234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,137 | 0.199299 |
bd1c3ef16239b0d584d630e4d1b4df484eba30ba
| 371 |
py
|
Python
|
wspay/migrations/0004_remove_wspayrequest_transactions.py
|
pinkdroids/django-wspay
|
19b52cb19539577c812e512062ffb239f2d89190
|
[
"MIT"
] | null | null | null |
wspay/migrations/0004_remove_wspayrequest_transactions.py
|
pinkdroids/django-wspay
|
19b52cb19539577c812e512062ffb239f2d89190
|
[
"MIT"
] | null | null | null |
wspay/migrations/0004_remove_wspayrequest_transactions.py
|
pinkdroids/django-wspay
|
19b52cb19539577c812e512062ffb239f2d89190
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.1 on 2022-01-20 15:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wspay', '0003_rename_wspaytransaction_transactionhistory_and_more'),
]
operations = [
migrations.RemoveField(
model_name='wspayrequest',
name='transactions',
),
]
| 20.611111 | 78 | 0.638814 | 286 | 0.770889 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.377358 |
bd1d6496d7db8cd8d21e423c19bb1534688474e4
| 24,456 |
py
|
Python
|
anthill/event/admin.py
|
anthill-services/anthill-event
|
3c303f33e4c150ce2dfed4f3534ec40e935ecfb8
|
[
"MIT"
] | null | null | null |
anthill/event/admin.py
|
anthill-services/anthill-event
|
3c303f33e4c150ce2dfed4f3534ec40e935ecfb8
|
[
"MIT"
] | null | null | null |
anthill/event/admin.py
|
anthill-services/anthill-event
|
3c303f33e4c150ce2dfed4f3534ec40e935ecfb8
|
[
"MIT"
] | 1 |
2017-12-03T22:03:10.000Z
|
2017-12-03T22:03:10.000Z
|
from anthill.common.validate import validate
from anthill.common import admin as a, update
from . model.event import EventNotFound, CategoryNotFound, EventFlags, EventEndAction
import ujson
import collections
EVENT_END_ACTION_DESCRIPTION = """
<b>Send Message</b><br>A message with detailed information about event (including score, rank, profile)
will be sent to the participating players<br><br>
<b>Call Exec Function</b><br>A function on exec service will be called with detailed information about event (including
score, rank, profile). In that case the Server Code should be enabled, with function with name <code>event_completed</code>:
<pre><code>async function event_completed(args)
{
// args[\"event\"] would contain event info
// args[\"participants\"] would contain a list of participation objects to process
// (one object for each player/participant), like so:
{
\"account\": <account id>, // or \"group\" for group-based event
\"profile\": <participation profile>,
\"score\": <score>,
\"rank\": <rank>
}
}
event_completed.allow_call = true;
</code></pre><br>
"""
class CategoriesController(a.AdminController):
async def get(self):
categories = await self.application.events.list_categories(self.gamespace)
result = {
"categories": categories
}
return result
def render(self, data):
return [
a.breadcrumbs([
a.link("events", "Events")
], "Categories"),
a.links("Categories", [
a.link("category", category.name, "list-alt", category_id=category.category_id)
for category in data["categories"]
]),
a.links("Navigate", [
a.link("events", "Go back", icon="chevron-left"),
a.link("common", "Edit common template", icon="flask"),
a.link("new_category", "Create a new category", icon="plus"),
a.link("https://spacetelescope.github.io/understanding-json-schema/index.html", "See docs", icon="book")
])
]
def access_scopes(self):
return ["event_admin"]
class CategoryController(a.AdminController):
async def delete(self, danger, **ingored):
if danger != "confirm":
raise a.Redirect("category", category_id=self.context.get("category_id"))
category_id = self.context.get("category_id")
await self.application.events.delete_category(self.gamespace, category_id)
raise a.Redirect("categories", message="Category has been deleted")
async def get(self, category_id):
category = await self.application.events.get_category(self.gamespace, category_id)
scheme_json = category.scheme
result = {
"scheme": scheme_json,
"category_name": category.name
}
return result
def render(self, data):
return [
a.breadcrumbs([
a.link("events", "Events"),
a.link("categories", "Categories")
], data["category_name"]),
a.form("Category template", fields={
"scheme": a.field("scheme", "json", "primary"),
"category_name": a.field("Category name (ID)", "text", "primary", "non-empty")
}, methods={
"update": a.method("Update", "primary"),
}, data=data),
a.split([
a.notice(
"About templates",
"Each category template has a common template shared across categories. "
"Category template inherits a common template."
),
a.form("Danger", fields={
"danger": a.field("This cannot be undone! The events of this category will be also deleted! "
"Type 'confirm' to do this.", "text", "danger",
"non-empty")
}, methods={
"delete": a.method("Delete category", "danger"),
}, data=data),
]),
a.links("Navigate", [
a.link("events", "Go back", icon="chevron-left"),
a.link("common", "Edit common template", icon="flask"),
a.link("events", "See events of this category", category=self.context.get("category_id")),
a.link("https://spacetelescope.github.io/understanding-json-schema/index.html", "See docs", icon="book")
])
]
def access_scopes(self):
return ["event_admin"]
async def update(self, scheme, category_name):
category_id = self.context.get("category_id")
try:
scheme_data = ujson.loads(scheme)
except (KeyError, ValueError):
raise a.ActionError("Corrupted json")
await self.application.events.update_category(self.gamespace, category_id, scheme_data, category_name)
raise a.Redirect(
"category",
message="Category has been updated",
category_id=category_id)
class ChooseCategoryController(a.AdminController):
async def apply(self, category):
raise a.Redirect("new_event", category=category)
async def get(self, category=None):
categories = await self.application.events.list_categories(self.gamespace)
return {
"category": category,
"categories": {
cat.category_id: cat.name for cat in categories
}
}
def render(self, data):
return [
a.breadcrumbs([
a.link("events", "Events")
], "Choose category"),
a.form(
title="Choose event category to create event of",
fields={
"category": a.field(
"Select category", "select", "primary", values=data["categories"]
)
}, methods={
"apply": a.method("Proceed", "primary")
}, data=data
),
a.links("Navigation", links=[
a.link("events", "Go back", icon="chevron-left"),
a.link("categories", "Manage categories", "list-alt")
])
]
def access_scopes(self):
return ["event_admin"]
class CommonController(a.AdminController):
async def get(self):
scheme = await self.application.events.get_common_scheme(self.gamespace)
result = {
"scheme": scheme
}
return result
def render(self, data):
return [
a.breadcrumbs([
a.link("events", "Events"),
a.link("categories", "Categories")
], "Common template"),
a.form("Common template", fields={
"scheme": a.field("scheme", "json", "primary")
}, methods={
"update": a.method("Update", "primary"),
}, data=data),
a.links("Navigate", [
a.link("@back", "Go back", icon="chevron-left"),
a.link("https://spacetelescope.github.io/understanding-json-schema/index.html", "See docs", icon="book")
])
]
def access_scopes(self):
return ["event_admin"]
async def update(self, scheme):
try:
scheme_data = ujson.loads(scheme)
except (KeyError, ValueError):
raise a.ActionError("Corrupted json")
await self.application.events.update_common_scheme(self.gamespace, scheme_data)
raise a.Redirect("common", message="Common template has been updated")
class EventController(a.AdminController):
async def delete(self, **ignored):
event_id = self.context.get("event_id")
try:
event = await self.application.events.get_event(self.gamespace, event_id)
except EventNotFound:
raise a.ActionError("No such event")
await self.application.events.delete_event(self.gamespace, event_id)
raise a.Redirect(
"events",
message="Event has been deleted",
category=event.category_id)
async def get(self, event_id):
events = self.application.events
try:
event = await events.get_event(self.gamespace, event_id)
except EventNotFound:
raise a.ActionError("Event was not found.")
category_id = event.category_id
category_name = event.category
enabled = "true" if event.enabled else "false"
tournament = "true" if event.tournament else "false"
clustered = "true" if event.clustered else "false"
group = "true" if event.group else "false"
start_dt = str(event.time_start)
end_dt = str(event.time_end)
end_action = str(event.end_action)
common_scheme = await events.get_common_scheme(self.gamespace)
category = await events.get_category(self.gamespace, category_id)
category_scheme = category.scheme
scheme = common_scheme.copy()
update(scheme, category_scheme)
return {
"enabled": enabled,
"tournament": tournament,
"clustered": clustered,
"group": group,
"event": event,
"start_dt": start_dt,
"end_dt": end_dt,
"event_data": event.data,
"scheme": scheme,
"category": category_id,
"category_name": category_name,
"end_action": end_action
}
def render(self, data):
category = data["category"]
return [
a.breadcrumbs([
a.link("events", "Events", category=category),
], "Event"),
a.form(
title="Event editor",
fields={
"event_data": a.field(
"Event properties", "dorn", "primary",
schema=data["scheme"], order=8
),
"enabled": a.field("Is event enabled", "switch", "primary", order=3),
"tournament": a.field("Is tournament enabled (e.g. players will be ranked)",
"switch", "primary", order=4),
"clustered": a.field("Is tournament's leaderboard clustered", "switch", "primary",
readonly=True, order=5),
"group": a.field("Is event group-based", "switch", "primary",
readonly=True, order=6),
"end_action": a.field("Action Once Event Is Complete", "select", "primary", order=7, values={
EventEndAction.NONE: "Do nothing",
EventEndAction.MESSAGE: "Send Message",
EventEndAction.EXEC: "Call Exec Function"
}, description=EVENT_END_ACTION_DESCRIPTION),
"category_name": a.field("Category", "readonly", "primary"),
"start_dt": a.field("Start date", "date", "primary", order=1),
"end_dt": a.field("End date", "date", "primary", order=2)
},
methods={
"save": a.method("Save", "primary"),
"delete": a.method("Delete event", "danger")
},
data=data
),
a.links("Navigate", [
a.link("events", "Go back", icon="chevron-left", category=category),
a.link("category", "Edit category", icon="list-alt", category_id=category),
a.link("new_event", "Clone event", icon="clone",
clone=self.context.get("event_id"),
category=data.get("category"))
])
]
@validate(event_data="load_json_dict", start_dt="datetime", end_dt="datetime",
enabled="bool", tournament="bool", end_action="str")
async def save(self, event_data, start_dt, end_dt, enabled=False, tournament=False,
end_action=EventEndAction.NONE, **ignore):
event_id = self.context.get("event_id")
events = self.application.events
try:
event = await events.get_event(self.gamespace, event_id)
except EventNotFound:
raise a.ActionError("Event was not found.")
flags = event.flags
flags.set(EventFlags.TOURNAMENT, tournament)
end_action = EventEndAction(end_action)
await events.update_event(
self.gamespace, event_id, enabled, flags,
event_data, start_dt, end_dt, end_action)
raise a.Redirect(
"event",
message="Event has been updated",
event_id=event_id)
def access_scopes(self):
return ["event_admin"]
class EventsController(a.AdminController):
EVENTS_IN_PAGE = 20
async def apply(self, category=None):
if not category:
raise a.Redirect("choose_category")
raise a.Redirect("events", category=category)
@validate(category="int", page="int")
async def get(self, category=0, page=1):
categories = await self.application.events.list_categories(
self.gamespace)
events, pages = await self.application.events.list_paged_events(
self.gamespace,
EventsController.EVENTS_IN_PAGE, page,
category_id=category)
cats = {
cat.category_id: cat.name
for cat in categories
}
cats[0] = "< Select >"
return {
"events": events,
"category": category,
"categories": cats,
"pages": pages
}
def render(self, data):
tbl_rows = []
for event in data["events"]:
title = "unknown"
description = "unknown"
if "title" in event.data:
title_object = event.data["title"]
title = title_object.get("EN", title_object.get("en", "unknown"))
elif "name" in event.data:
title_object = event.data["name"]
title = title_object.get("EN", title_object.get("en", "unknown"))
if "description" in event.data:
description_object = event.data["description"]
description = description_object.get("EN", description_object.get("en", "unknown"))
tbl_tr = {
"edit": [a.link("event", event.item_id, icon="calendar", event_id=event.item_id)],
"enabled": "yes" if event.enabled else "no",
"tournament": "yes" + (" (clustered)" if event.clustered else "") if event.tournament else "no",
"name": title[:32],
"description": description[:32],
"category": event.category,
"dates": str(event.time_start) + " -<br> " + str(event.time_end),
"controls": [a.button("event", "Delete", "danger", _method="delete", event_id=event.item_id)]
}
tbl_rows.append(tbl_tr)
return [
a.breadcrumbs([], "Events"),
a.form(
title="Filters",
fields={
"category": a.field(
"Category", "select", "primary", values=data["categories"]
)
}, methods={
"apply": a.method("Apply", "primary")
}, data=data
),
a.content("Events", [
{
"id": "edit",
"title": "Edit"
}, {
"id": "name",
"title": "Name"
}, {
"id": "description",
"title": "Description"
}, {
"id": "enabled",
"title": "Enabled"
}, {
"id": "tournament",
"title": "Tournament"
}, {
"id": "category",
"title": "Category"
}, {
"id": "dates",
"title": "Dates"
}, {
"id": "controls",
"title": "Controls"
}], tbl_rows, "default"),
a.pages(data["pages"]),
a.links("Navigation", links=[
a.link("choose_category", "Create new event", "plus", category=self.context.get("category", "0")),
a.link("categories", "Manage categories", "list-alt")
])
]
def access_scopes(self):
return ["event_admin"]
class NewCategoryController(a.AdminController):
async def create(self, scheme, category_name):
try:
scheme_data = ujson.loads(scheme)
except (KeyError, ValueError):
raise a.ActionError("Corrupted json")
category_id = await self.application.events.create_category(self.gamespace, category_name, scheme_data)
raise a.Redirect(
"category",
message="Category has been created",
category_id=category_id)
def render(self, data):
return [
a.breadcrumbs([
a.link("events", "Events"),
a.link("categories", "Categories")
], "New category"),
a.form("Category template", fields={
"scheme": a.field("scheme", "json", "primary"),
"category_name": a.field("Category name (ID)", "text", "primary", "non-empty")
}, methods={
"create": a.method("Create", "primary"),
}, data={"scheme": {}}),
a.notice(
"About templates",
"Each category template has a common template shared across categories. "
"Category template inherits a common template."
),
a.links("Navigate", [
a.link("categories", "Go back", icon="chevron-left"),
a.link("common", "Edit common template", icon="flask"),
a.link("events", "See events of this category", category=self.context.get("category_id")),
a.link("https://spacetelescope.github.io/understanding-json-schema/index.html", "See docs", icon="book")
])
]
def access_scopes(self):
return ["event_admin"]
class NewEventController(a.AdminController):
@validate(event_data="load_json_dict", start_dt="datetime", end_dt="datetime", enabled="bool",
tournament="bool", clustered="bool", group="bool", end_action="str_name")
async def create(self, event_data, start_dt, end_dt,
enabled=False, tournament=False, clustered=False, group=False,
end_action=EventEndAction.NONE, **ignore):
category_id = self.context.get("category")
flags = EventFlags()
if tournament:
flags.set(EventFlags.TOURNAMENT)
if clustered:
flags.set(EventFlags.CLUSTERED)
if group:
flags.set(EventFlags.GROUP)
end_action = EventEndAction(end_action)
try:
event_id = await self.application.events.create_event(
self.gamespace, category_id, enabled, flags,
event_data, start_dt, end_dt, end_action)
except CategoryNotFound:
raise a.ActionError("Category not found")
raise a.Redirect(
"event",
message="Event has been created",
event_id=event_id)
@validate(category="int", clone="int")
async def get(self, category, clone=None):
events = self.application.events
common_scheme = await events.get_common_scheme(self.gamespace)
category = await events.get_category(self.gamespace, category)
category_name = category.name
category_scheme = category.scheme
def update(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
scheme = common_scheme.copy()
update(scheme, category_scheme)
event_data = None
start_dt = None
end_dt = None
enabled = "true"
tournament = "false"
clustered = "false"
group = "false"
end_action = EventEndAction.NONE
if clone:
try:
event = await events.get_event(self.gamespace, clone)
except EventNotFound:
raise a.ActionError("Event was not found.")
event_data = event.data
enabled = "true" if event.enabled else "false"
tournament = "true" if event.tournament else "false"
clustered = "true" if event.clustered else "false"
group = "true" if event.group else "false"
start_dt = str(event.time_start)
end_dt = str(event.time_end)
end_action = str(event.end_action)
return {
"scheme": scheme,
"enabled": enabled,
"tournament": tournament,
"clustered": clustered,
"group": group,
"category_name": category_name,
"event_data": event_data,
"start_dt": start_dt,
"end_dt": end_dt,
"end_action": end_action
}
def render(self, data):
category = self.context.get("category")
return [
a.breadcrumbs([
a.link("events", "Events", category=category),
], "New event"),
a.form(
title="New event (of category " + data.get("category_name") + ")",
fields={
"event_data": a.field(
"Event properties", "dorn", "primary",
schema=data["scheme"], order=8
),
"enabled": a.field("Is event enabled", "switch", "primary", order=3),
"tournament": a.field("Is tournament enabled (e.g. players will be ranked)",
"switch", "primary", order=4),
"clustered": a.field("Is tournament's leaderboard clustered",
"switch", "primary", order=5,
description="Cannot be changed later"),
"group": a.field("In even group-based",
"switch", "primary", order=6,
description="Cannot be changed later"),
"end_action": a.field("Action Once Event Is Complete", "select", "primary", order=7, values={
EventEndAction.NONE: "Do nothing",
EventEndAction.MESSAGE: "Send Message",
EventEndAction.EXEC: "Call Exec Function"
}, description=EVENT_END_ACTION_DESCRIPTION),
"start_dt": a.field("Start date", "date", "primary", "non-empty", order=1),
"end_dt": a.field("End date", "date", "primary", "non-empty", order=2)
},
methods={
"create": a.method("Create", "primary")
},
data=data
),
a.links("Navigate", [
a.link("events", "Go back", icon="chevron-left", category=category),
a.link("category", "Edit category", icon="list-alt", category_id=category)
])
]
def access_scopes(self):
return ["event_admin"]
class RootAdminController(a.AdminController):
def render(self, data):
return [
a.links("Events service", [
a.link("events", "Edit events", icon="wrench")
])
]
def access_scopes(self):
return ["event_admin"]
| 36.392857 | 124 | 0.5294 | 23,006 | 0.94071 | 0 | 0 | 4,629 | 0.189279 | 8,936 | 0.365391 | 6,729 | 0.275147 |
bd1d74e5ac367e134c8e0a19a4b10cfe4ee5fb88
| 15,704 |
py
|
Python
|
main.py
|
opt12/gym-jsbsim-eee
|
fa61d0d4679fd65b5736fc562fe268714b4e08d8
|
[
"MIT"
] | 7 |
2020-11-10T07:33:40.000Z
|
2021-06-23T07:25:43.000Z
|
main.py
|
opt12/gym-jsbsim-eee
|
fa61d0d4679fd65b5736fc562fe268714b4e08d8
|
[
"MIT"
] | null | null | null |
main.py
|
opt12/gym-jsbsim-eee
|
fa61d0d4679fd65b5736fc562fe268714b4e08d8
|
[
"MIT"
] | 5 |
2020-07-12T00:10:59.000Z
|
2021-06-22T09:13:13.000Z
|
import sys, os
# sys.path.append(os.path.join(os.path.dirname(__file__)) #TODO: Is this a good idea? Dunno! It works!
# print(os.path.join(os.path.dirname(__file__)))
import argparse
import markov_pilot.environment.properties as prp
from markov_pilot.environment.environment import NoFGJsbSimEnv_multi, JsbSimEnv_multi
from markov_pilot.wrappers.episodePlotterWrapper import EpisodePlotterWrapper_multi
from markov_pilot.wrappers.varySetpointsWrapper import VarySetpointsWrapper
from markov_pilot.tasks.tasks import SingleChannel_FlightTask, SingleChannel_MinimumProps_Task
from reward_funcs import _make_base_reward_components, make_angular_integral_reward_components, make_sideslip_angle_reward_components
from markov_pilot.agents.AgentTrainer import DDPG_AgentTrainer, PID_AgentTrainer, PidParameters, MADDPG_AgentTrainer
from markov_pilot.agents.agent_container import AgentContainer, AgentSpec
from markov_pilot.agents.train import perform_training
from markov_pilot.helper.lab_journal import LabJournal
from markov_pilot.helper.load_store import restore_agent_container_from_journal, restore_env_from_journal, save_test_run
from markov_pilot.testbed.evaluate_training import evaluate_training
## define the initial setpoints
target_path_angle_gamma_deg = -6.5
target_kias = 92
target_roll_angle_phi_deg = -15
target_sideslip_angle_beta_deg = 0
def parse_args(): #used https://github.com/openai/maddpg/ as a basis
parser = argparse.ArgumentParser("Reinforcement Learning experiments for multiagent environments")
# Environment
parser.add_argument("--max-episode-len-sec", type=int, default=120, help="maximum episode length in seconds (steps = seconds*interaction frequ.)")
parser.add_argument("--num-steps", type=int, default=30000, help="number of training steps to perform")
parser.add_argument("--interaction-frequency", type=float, default=5, help="frequency of agent interactions with the environment")
# Core training parameters
parser.add_argument("--lr_actor", type=float, default=1e-4, help="learning rate for the actor training Adam optimizer")
parser.add_argument("--lr_critic", type=float, default=1e-3, help="learning rate for the critic training Adam optimizer")
parser.add_argument("--tau", type=float, default=1e-3, help="target network adaptation factor")
parser.add_argument("--gamma", type=float, default=0.99, help="discount factor")
parser.add_argument("--batch-size", type=int, default=64, help="number of episodes to optimize at the same time")
parser.add_argument("--replay-size", type=int, default=1000000, help="size of the replay buffer")
# Checkpointing
parser.add_argument("--exp-name", type=str, default='Default_Experiment', help="name of the experiment")
parser.add_argument("--save-dir", type=str, default="./tmp/policy/", help="directory in which training state and model should be saved")
parser.add_argument("--save-rate", type=int, default=1000, help="save model once every time this many episodes are completed")
parser.add_argument("--load-dir", type=str, default="", help="directory in which training state and model are loaded")
# Evaluation
parser.add_argument("--restore", nargs='+', type=int, default=False) #to restore agents and env from lab-journal lines given as list and continue training
parser.add_argument("--play", nargs='+', type=int, default=False) #to play with agents and env restored from lab-journal lines
parser.add_argument("--best", type=bool, default=False) #TODO: when given, the first line from restore or play will be used to restore the environment and the best agents for that run will be loaded
parser.add_argument("--flightgear", type=bool, default=False) #TODO: when given, together with --play [lines] the environment will be replaced with the flight-gear enabled and the player will render to FlightGear
parser.add_argument("--testing-iters", type=int, default=2000, help="number of steps before running a performance test")
parser.add_argument("--plots-dir", type=str, default="./learning_curves/", help="directory where plot data is saved")
parser.add_argument("--base-dir", type=str, default="./", help="directory the test_run date is saved")
return parser.parse_args()
def setup_env(arglist) -> NoFGJsbSimEnv_multi:
agent_interaction_freq = arglist.interaction_frequency
episode_time_s=arglist.max_episode_len_sec
## define the initial conditions
initial_path_angle_gamma_deg = target_path_angle_gamma_deg + 3
initial_roll_angle_phi_deg = target_roll_angle_phi_deg + 10
initial_sideslip_angle_beta_deg = 0
initial_fwd_speed_KAS = 80
initial_aoa_deg = 1.0
initial_altitude_ft = 6000
elevator_AT_for_PID = SingleChannel_FlightTask('elevator', prp.elevator_cmd, {prp.flight_path_deg: target_path_angle_gamma_deg},
make_base_reward_components=_make_base_reward_components, #pass this in here as otherwise, the restore form disk gets nifty
integral_limit = 100)
#integral_limit: self.Ki * dt * int <= output_limit --> int <= 1/0.2*6.5e-2 = 77
aileron_AT_for_PID = SingleChannel_FlightTask('aileron', prp.aileron_cmd, {prp.roll_deg: initial_roll_angle_phi_deg},
make_base_reward_components=_make_base_reward_components, #pass this in here as otherwise, the restore form disk gets nifty
integral_limit = 100)
#integral_limit: self.Ki * dt * int <= output_limit --> int <= 1/0.2*1e-2 = 500
rudder_AT_for_PID = SingleChannel_FlightTask('rudder', prp.rudder_cmd, {prp.sideslip_deg: 0},
max_allowed_error= 10,
make_base_reward_components=_make_base_reward_components, #pass this in here as otherwise, the restore form disk gets nifty
integral_limit = 100)
#integral_limit: self.Ki * dt * int <= output_limit --> int <= 1/0.2*1e-2 = 500
coop_flight_path_task = SingleChannel_FlightTask('flight_path_angle', prp.elevator_cmd, {prp.flight_path_deg: target_path_angle_gamma_deg},
presented_state=[prp.q_radps, prp.indicated_airspeed, prp.elevator_cmd, prp.rudder_cmd, prp.aileron_cmd],
max_allowed_error= 30,
make_base_reward_components= make_angular_integral_reward_components,
integral_limit = 0.25)
coop_banking_task = SingleChannel_FlightTask('banking_angle', prp.aileron_cmd, {prp.roll_deg: target_roll_angle_phi_deg},
presented_state=[prp.p_radps, prp.indicated_airspeed, prp.aileron_cmd, prp.elevator_cmd, prp.aileron_cmd],
max_allowed_error= 60,
make_base_reward_components= make_angular_integral_reward_components,
integral_limit = 0.25)
coop_sideslip_task = SingleChannel_FlightTask('sideslip_angle', prp.rudder_cmd, {prp.sideslip_deg: target_sideslip_angle_beta_deg},
presented_state=[prp.r_radps, prp.indicated_airspeed, prp.rudder_cmd, prp.aileron_cmd, prp.elevator_cmd,
coop_banking_task.setpoint_value_props[0], coop_banking_task.setpoint_props[0]], #TODO: this relies on defining coop_banking_task before coop_sideslip_task :-()
max_allowed_error= 30,
make_base_reward_components= make_sideslip_angle_reward_components,
integral_limit = 0.25)
task_list = [coop_flight_path_task, coop_banking_task, coop_sideslip_task]
env = NoFGJsbSimEnv_multi(task_list, agent_interaction_freq = agent_interaction_freq, episode_time_s = episode_time_s)
env = EpisodePlotterWrapper_multi(env, output_props=[prp.sideslip_deg])
env.set_initial_conditions({ prp.initial_u_fps: 1.6878099110965*initial_fwd_speed_KAS
, prp.initial_flight_path_deg: initial_path_angle_gamma_deg
, prp.initial_roll_deg: initial_roll_angle_phi_deg
, prp.initial_aoa_deg: initial_aoa_deg
, prp.initial_altitude_ft: initial_altitude_ft
}) #just an example, sane defaults are already set in env.__init()__ constructor
env.set_meta_information(experiment_name = arglist.exp_name)
return env
def setup_container(task_list, arglist):
agent_classes_dict = {
'PID': PID_AgentTrainer,
'MADDPG': MADDPG_AgentTrainer,
'DDPG': DDPG_AgentTrainer,
}
#for PID controllers we need an elaborated parameter set for each type
pid_params = {'aileron': PidParameters(3.5e-2, 1e-2, 0.0),
'elevator': PidParameters( -5e-2, -6.5e-2, -1e-3),
'rudder': PidParameters( 0, 0, 0), #TODO: This parameter set just leaves the rudder alone. No actuation at all
}
params_aileron_pid_agent = {
'pid_params': pid_params['aileron'],
'writer': None,
}
params_elevator_pid_agent = {
'pid_params': pid_params['elevator'],
'writer': None,
}
params_rudder_pid_agent = {
'pid_params': pid_params['rudder'],
'writer': None,
}
#for the learning agents, a standard parameter set will do; the details will be learned
params_DDPG_MADDPG_agent = {
**vars(arglist),
'layer1_size': 400,
'layer2_size': 300,
'writer': None,
}
#for the learning agents, a standard parameter set will do; the details will be learned
params_DDPG_MADDPG_agent_big_net = {
**vars(arglist),
'layer1_size': 1200,
'layer2_size': 900,
'writer': None,
}
agent_spec_aileron_PID = AgentSpec('aileron', 'PID', ['banking_angle'], params_aileron_pid_agent)
agent_spec_aileron_DDPG = AgentSpec('aileron', 'DDPG', ['banking_angle'], params_DDPG_MADDPG_agent)
agent_spec_aileron_MADDPG = AgentSpec('aileron', 'MADDPG', ['banking_angle'], params_DDPG_MADDPG_agent)
agent_spec_elevator_PID = AgentSpec('elevator', 'PID', ['flight_path_angle'], params_elevator_pid_agent)
agent_spec_elevator_DDPG = AgentSpec('elevator', 'DDPG', ['flight_path_angle'], params_DDPG_MADDPG_agent)
agent_spec_elevator_MADDPG = AgentSpec('elevator', 'MADDPG', ['flight_path_angle'], params_DDPG_MADDPG_agent)
agent_spec_rudder_MADDPG = AgentSpec('rudder', 'MADDPG', ['sideslip_angle'], params_DDPG_MADDPG_agent_big_net)
agent_spec_rudder_DDPG = AgentSpec('rudder', 'DDPG', ['sideslip_angle'], params_DDPG_MADDPG_agent)
agent_spec_rudder_PID = AgentSpec('rudder', 'PID', ['sideslip_angle'], params_rudder_pid_agent)
# #this is an example on how an assignment of an agent to multiple task could look like
# #it is assumed, that the glidepath task is split into two subtasks: one to control the elevator, the other to monitor the glide angle set-point
# #following this scheme e. g. combined speed control and glide path angle tasks could be defined to control elevator and thrust
# params_DDPG_MADDPG_separated_agent = {
# **vars(arglist),
# 'layer1_size': 400,
# 'layer2_size': 300,
# 'task_reward_weights': [2, 14],
# 'writer': None,
# }
# attention, the tasks are currently undefined in setup_env()
# agent_spec_glide_path_MADDPG_separated_tasks = AgentSpec('elevator', 'MADDPG', ['elevator_actuation_task', 'glide_path_task'], params_DDPG_MADDPG_separated_agent)
# the agent spec to train elevator and aileron control in one single agent (failed)
# agent_spec_elevator_aileron_DDPG = AgentSpec('elevator_aileron', 'DDPG', ['flight_path_angle', 'banking_angle'], params_DDPG_MADDPG_agent)
# the agent spec to train elevator and aileron and rudder control in one single agent (failed)
# agent_spec_elevator_aileron_rudder_MADDPG = AgentSpec('ele_ail_rud', 'DDPG', ['flight_path_angle', 'banking_angle', 'sideslip_angle'], params_DDPG_MADDPG_agent_big_net)
#Here we specify which agents shall be initiated; chose from the above defined single-specs
# agent_spec = [agent_spec_elevator_MADDPG, agent_spec_aileron_MADDPG, agent_spec_rudder_MADDPG]
# agent_spec = [agent_spec_elevator_aileron_DDPG]
# agent_spec = [agent_spec_elevator_PID, agent_spec_aileron_PID, agent_spec_rudder_DDPG]
# the best controller was yielded by training three cooperating DDPG agents
agent_spec = [agent_spec_elevator_DDPG, agent_spec_aileron_DDPG, agent_spec_rudder_DDPG]
task_list_n = task_list #we only need the task list to create the mapping. Anything else form the env is not interesting for the agent container.
agent_container = AgentContainer.init_from_specs(task_list_n, agent_spec, agent_classes_dict, **vars(arglist))
return agent_container
if __name__ == '__main__':
arglist = parse_args()
lab_journal = LabJournal(arglist.base_dir, arglist)
# # uncomment the following lines when trying to restore from disk
# restore_lines = [3463, 3488, 3489]
# testing_env = restore_env_from_journal(lab_journal, restore_lines[0])
# # if needed, change to FlightGear enabled environment
# # testing_env = restore_env_from_journal(lab_journal, restore_lines[0], target_environment='FG')
# #alternatively, use setup_env() to create a new testin_env
# # testing_env = setup_env(arglist)
# # if needed, apply VarySetpointsWrapper to see wild action:
# # testing_env = VarySetpointsWrapper(testing_env, prp.roll_deg, (-30, 30), (10, 120), (5, 30), (0.05, 0.1))
# # testing_env = VarySetpointsWrapper(testing_env, prp.flight_path_deg, (-9, -5.5), (10, 120), (5, 30), (0.05, 0.1))
# agent_container = restore_agent_container_from_journal(lab_journal, restore_lines)
# # normally, we don't save the test runs restored from disk
# # save_test_run(testing_env, agent_container, lab_journal, arglist) #use the testing_env here to have the save_path available in the evaluation
# evaluate_training(agent_container, testing_env, lab_journal=lab_journal) #run the standardized test on the test_env
# # if FligthGear rendering is desired, use this alternative
# # evaluate_training(agent_container, testing_env, lab_journal=None, render_mode = 'flightgear') #run the standardized test on the test_env
# # when restoring form disk, exit now.
# exit(0)
training_env = setup_env(arglist)
testing_env = setup_env(arglist)
#apply Varyetpoints to the training to increase the variance of training data
training_env = VarySetpointsWrapper(training_env, prp.roll_deg, (-30, 30), (10, 30), (5, 30), (0.05, 0.5))
training_env = VarySetpointsWrapper(training_env, prp.flight_path_deg, (-10, -5.5), (10, 45), (5, 30), (0.05, 0.5))
training_env = VarySetpointsWrapper(training_env, prp.sideslip_deg, (-2, 2), (10, 45), (5, 30), (0.05, 0.5))
agent_container = setup_container(training_env.task_list, arglist)
save_test_run(testing_env, agent_container, lab_journal, arglist) #use the testing_env here to have the save_path available in the evaluation
perform_training(training_env, testing_env, agent_container, lab_journal, arglist)
training_env.close()
testing_env.close()
| 59.037594 | 219 | 0.707972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,657 | 0.423905 |
bd1d8b232aa33e6da7911055afde86063303f3d6
| 19,781 |
py
|
Python
|
atm/core.py
|
HDI-Project/ATM
|
dde454a95e963a460843a61bbb44d18982984b17
|
[
"MIT"
] | 554 |
2017-12-19T06:43:11.000Z
|
2022-03-26T04:24:55.000Z
|
atm/core.py
|
BTHUNTERCN/ATM
|
dde454a95e963a460843a61bbb44d18982984b17
|
[
"MIT"
] | 128 |
2017-12-19T21:30:32.000Z
|
2021-04-19T17:03:39.000Z
|
atm/core.py
|
BTHUNTERCN/ATM
|
dde454a95e963a460843a61bbb44d18982984b17
|
[
"MIT"
] | 140 |
2017-12-20T03:47:04.000Z
|
2022-03-17T01:50:24.000Z
|
# -*- coding: utf-8 -*-
"""Core ATM module.
This module contains the ATM class, which is the one responsible for
executing and orchestrating the main ATM functionalities.
"""
import logging
import random
import time
from datetime import datetime, timedelta
from operator import attrgetter
from tqdm import tqdm
from atm.constants import TIME_FMT, PartitionStatus, RunStatus
from atm.database import Database
from atm.method import Method
from atm.worker import ClassifierError, Worker
LOGGER = logging.getLogger(__name__)
class ATM(object):
_LOOP_WAIT = 5
def __init__(
self,
# SQL Conf
dialect='sqlite',
database='atm.db',
username=None,
password=None,
host=None,
port=None,
query=None,
# AWS Conf
access_key=None,
secret_key=None,
s3_bucket=None,
s3_folder=None,
# Log Conf
models_dir='models',
metrics_dir='metrics',
verbose_metrics=False,
):
self.db = Database(dialect, database, username, host, port, query)
self.aws_access_key = access_key
self.aws_secret_key = secret_key
self.s3_bucket = s3_bucket
self.s3_folder = s3_folder
self.models_dir = models_dir
self.metrics_dir = metrics_dir
self.verbose_metrics = verbose_metrics
def add_dataset(self, train_path, test_path=None, name=None,
description=None, class_column=None):
"""Add a new dataset to the Database.
Args:
train_path (str):
Path to the training CSV file. It can be a local filesystem path,
absolute or relative, or an HTTP or HTTPS URL, or an S3 path in the
format ``s3://{bucket_name}/{key}``. Required.
test_path (str):
Path to the testing CSV file. It can be a local filesystem path,
absolute or relative, or an HTTP or HTTPS URL, or an S3 path in the
format ``s3://{bucket_name}/{key}``.
Optional. If not given, the training CSV will be split in two parts,
train and test.
name (str):
Name given to this dataset. Optional. If not given, a hash will be
generated from the training_path and used as the Dataset name.
description (str):
Human friendly description of the Dataset. Optional.
class_column (str):
Name of the column that will be used as the target variable.
Optional. Defaults to ``'class'``.
Returns:
Dataset:
The created dataset.
"""
return self.db.create_dataset(
train_path=train_path,
test_path=test_path,
name=name,
description=description,
class_column=class_column,
aws_access_key=self.aws_access_key,
aws_secret_key=self.aws_secret_key,
)
def add_datarun(self, dataset_id, budget=100, budget_type='classifier',
gridding=0, k_window=3, metric='f1', methods=['logreg', 'dt', 'knn'],
r_minimum=2, run_per_partition=False, score_target='cv', priority=1,
selector='uniform', tuner='uniform', deadline=None):
"""Register one or more Dataruns to the Database.
The methods hyperparameters will be analyzed and Hyperpartitions generated
from them.
If ``run_per_partition`` is ``True``, one Datarun will be created for each
Hyperpartition. Otherwise, a single one will be created for all of them.
Args:
dataset_id (int):
Id of the Dataset which this Datarun will belong to.
budget (int):
Budget amount. Optional. Defaults to ``100``.
budget_type (str):
Budget Type. Can be 'classifier' or 'walltime'.
Optional. Defaults to ``'classifier'``.
gridding (int):
``gridding`` setting for the Tuner. Optional. Defaults to ``0``.
k_window (int):
``k`` setting for the Selector. Optional. Defaults to ``3``.
metric (str):
Metric to use for the tuning and selection. Optional. Defaults to ``'f1'``.
methods (list):
List of methods to try. Optional. Defaults to ``['logreg', 'dt', 'knn']``.
r_minimum (int):
``r_minimum`` setting for the Tuner. Optional. Defaults to ``2``.
run_per_partition (bool):
whether to create a separated Datarun for each Hyperpartition or not.
Optional. Defaults to ``False``.
score_target (str):
Which score to use for the tuning and selection process. It can be ``'cv'`` or
``'test'``. Optional. Defaults to ``'cv'``.
priority (int):
Priority of this Datarun. The higher the better. Optional. Defaults to ``1``.
selector (str):
Type of selector to use. Optional. Defaults to ``'uniform'``.
tuner (str):
Type of tuner to use. Optional. Defaults to ``'uniform'``.
deadline (str):
Time deadline. It must be a string representing a datetime in the format
``'%Y-%m-%d %H:%M'``. If given, ``budget_type`` will be set to ``'walltime'``.
Returns:
Datarun:
The created Datarun or list of Dataruns.
"""
if deadline:
deadline = datetime.strptime(deadline, TIME_FMT)
budget_type = 'walltime'
elif budget_type == 'walltime':
deadline = datetime.now() + timedelta(minutes=budget)
run_description = '___'.join([tuner, selector])
target = score_target + '_judgment_metric'
method_parts = {}
for method in methods:
# enumerate all combinations of categorical variables for this method
method_instance = Method(method)
method_parts[method] = method_instance.get_hyperpartitions()
LOGGER.info('method {} has {} hyperpartitions'.format(
method, len(method_parts[method])))
dataruns = list()
if not run_per_partition:
datarun = self.db.create_datarun(
dataset_id=dataset_id,
description=run_description,
tuner=tuner,
selector=selector,
gridding=gridding,
priority=priority,
budget_type=budget_type,
budget=budget,
deadline=deadline,
metric=metric,
score_target=target,
k_window=k_window,
r_minimum=r_minimum
)
dataruns.append(datarun)
for method, parts in method_parts.items():
for part in parts:
# if necessary, create a new datarun for each hyperpartition.
# This setting is useful for debugging.
if run_per_partition:
datarun = self.db.create_datarun(
dataset_id=dataset_id,
description=run_description,
tuner=tuner,
selector=selector,
gridding=gridding,
priority=priority,
budget_type=budget_type,
budget=budget,
deadline=deadline,
metric=metric,
score_target=target,
k_window=k_window,
r_minimum=r_minimum
)
dataruns.append(datarun)
# create a new hyperpartition in the database
self.db.create_hyperpartition(datarun_id=datarun.id,
method=method,
tunables=part.tunables,
constants=part.constants,
categoricals=part.categoricals,
status=PartitionStatus.INCOMPLETE)
dataset = self.db.get_dataset(dataset_id)
LOGGER.info('Dataruns created. Summary:')
LOGGER.info('\tDataset ID: {}'.format(dataset.id))
LOGGER.info('\tTraining data: {}'.format(dataset.train_path))
LOGGER.info('\tTest data: {}'.format(dataset.test_path))
if run_per_partition:
LOGGER.info('\tDatarun IDs: {}'.format(
', '.join(str(datarun.id) for datarun in dataruns)))
else:
LOGGER.info('\tDatarun ID: {}'.format(dataruns[0].id))
LOGGER.info('\tHyperpartition selection strategy: {}'.format(dataruns[0].selector))
LOGGER.info('\tParameter tuning strategy: {}'.format(dataruns[0].tuner))
LOGGER.info('\tBudget: {} ({})'.format(dataruns[0].budget, dataruns[0].budget_type))
return dataruns if run_per_partition else dataruns[0]
def work(self, datarun_ids=None, save_files=True, choose_randomly=True,
cloud_mode=False, total_time=None, wait=True, verbose=False):
"""Get unfinished Dataruns from the database and work on them.
Check the ModelHub Database for unfinished Dataruns, and work on them
as they are added. This process will continue to run until it exceeds
total_time or there are no more Dataruns to process or it is killed.
Args:
datarun_ids (list):
list of IDs of Dataruns to work on. If ``None``, this will work on any
unfinished Dataruns found in the database. Optional. Defaults to ``None``.
save_files (bool):
Whether to save the fitted classifiers and their metrics or not.
Optional. Defaults to True.
choose_randomly (bool):
If ``True``, work on all the highest-priority dataruns in random order.
Otherwise, work on them in sequential order (by ID).
Optional. Defaults to ``True``.
cloud_mode (bool):
Save the models and metrics in AWS S3 instead of locally. This option
works only if S3 configuration has been provided on initialization.
Optional. Defaults to ``False``.
total_time (int):
Total time to run the work process, in seconds. If ``None``, continue to
run until interrupted or there are no more Dataruns to process.
Optional. Defaults to ``None``.
wait (bool):
If ``True``, wait for more Dataruns to be inserted into the Database
once all have been processed. Otherwise, exit the worker loop
when they run out.
Optional. Defaults to ``False``.
verbose (bool):
Whether to be verbose about the process. Optional. Defaults to ``True``.
"""
start_time = datetime.now()
# main loop
while True:
# get all pending and running dataruns, or all pending/running dataruns
# from the list we were given
dataruns = self.db.get_dataruns(include_ids=datarun_ids, ignore_complete=True)
if not dataruns:
if wait:
LOGGER.debug('No dataruns found. Sleeping %d seconds and trying again.',
self._LOOP_WAIT)
time.sleep(self._LOOP_WAIT)
continue
else:
LOGGER.info('No dataruns found. Exiting.')
break
# either choose a run randomly between priority, or take the run with the lowest ID
if choose_randomly:
run = random.choice(dataruns)
else:
run = sorted(dataruns, key=attrgetter('id'))[0]
# say we've started working on this datarun, if we haven't already
self.db.mark_datarun_running(run.id)
LOGGER.info('Computing on datarun %d' % run.id)
# actual work happens here
worker = Worker(self.db, run, save_files=save_files,
cloud_mode=cloud_mode, aws_access_key=self.aws_access_key,
aws_secret_key=self.aws_secret_key, s3_bucket=self.s3_bucket,
s3_folder=self.s3_folder, models_dir=self.models_dir,
metrics_dir=self.metrics_dir, verbose_metrics=self.verbose_metrics)
try:
if run.budget_type == 'classifier':
pbar = tqdm(
total=run.budget,
ascii=True,
initial=run.completed_classifiers,
disable=not verbose
)
while run.status != RunStatus.COMPLETE:
worker.run_classifier()
run = self.db.get_datarun(run.id)
if verbose and run.completed_classifiers > pbar.last_print_n:
pbar.update(run.completed_classifiers - pbar.last_print_n)
pbar.close()
elif run.budget_type == 'walltime':
pbar = tqdm(
disable=not verbose,
ascii=True,
initial=run.completed_classifiers,
unit=' Classifiers'
)
while run.status != RunStatus.COMPLETE:
worker.run_classifier()
run = self.db.get_datarun(run.id) # Refresh the datarun object.
if verbose and run.completed_classifiers > pbar.last_print_n:
pbar.update(run.completed_classifiers - pbar.last_print_n)
pbar.close()
except ClassifierError:
# the exception has already been handled; just wait a sec so we
# don't go out of control reporting errors
LOGGER.error('Something went wrong. Sleeping %d seconds.', self._LOOP_WAIT)
time.sleep(self._LOOP_WAIT)
elapsed_time = (datetime.now() - start_time).total_seconds()
if total_time is not None and elapsed_time >= total_time:
LOGGER.info('Total run time for worker exceeded; exiting.')
break
def run(self, train_path, test_path=None, name=None, description=None,
class_column='class', budget=100, budget_type='classifier', gridding=0, k_window=3,
metric='f1', methods=['logreg', 'dt', 'knn'], r_minimum=2, run_per_partition=False,
score_target='cv', selector='uniform', tuner='uniform', deadline=None, priority=1,
save_files=True, choose_randomly=True, cloud_mode=False, total_time=None,
verbose=True):
"""Create a Dataset and a Datarun and then work on it.
Args:
train_path (str):
Path to the training CSV file. It can be a local filesystem path,
absolute or relative, or an HTTP or HTTPS URL, or an S3 path in the
format ``s3://{bucket_name}/{key}``. Required.
test_path (str):
Path to the testing CSV file. It can be a local filesystem path,
absolute or relative, or an HTTP or HTTPS URL, or an S3 path in the
format ``s3://{bucket_name}/{key}``.
Optional. If not given, the training CSV will be split in two parts,
train and test.
name (str):
Name given to this dataset. Optional. If not given, a hash will be
generated from the training_path and used as the Dataset name.
description (str):
Human friendly description of the Dataset. Optional.
class_column (str):
Name of the column that will be used as the target variable.
Optional. Defaults to ``'class'``.
budget (int):
Budget amount. Optional. Defaults to ``100``.
budget_type (str):
Budget Type. Can be 'classifier' or 'walltime'.
Optional. Defaults to ``'classifier'``.
gridding (int):
``gridding`` setting for the Tuner. Optional. Defaults to ``0``.
k_window (int):
``k`` setting for the Selector. Optional. Defaults to ``3``.
metric (str):
Metric to use for the tuning and selection. Optional. Defaults to ``'f1'``.
methods (list):
List of methods to try. Optional. Defaults to ``['logreg', 'dt', 'knn']``.
r_minimum (int):
``r_minimum`` setting for the Tuner. Optional. Defaults to ``2``.
run_per_partition (bool):
whether to create a separated Datarun for each Hyperpartition or not.
Optional. Defaults to ``False``.
score_target (str):
Which score to use for the tuning and selection process. It can be ``'cv'`` or
``'test'``. Optional. Defaults to ``'cv'``.
priority (int):
Priority of this Datarun. The higher the better. Optional. Defaults to ``1``.
selector (str):
Type of selector to use. Optional. Defaults to ``'uniform'``.
tuner (str):
Type of tuner to use. Optional. Defaults to ``'uniform'``.
deadline (str):
Time deadline. It must be a string representing a datetime in the format
``'%Y-%m-%d %H:%M'``. If given, ``budget_type`` will be set to ``'walltime'``.
verbose (bool):
Whether to be verbose about the process. Optional. Defaults to ``True``.
Returns:
Datarun:
The created Datarun or list of Dataruns.
"""
dataset = self.add_dataset(train_path, test_path, name, description, class_column)
datarun = self.add_datarun(
dataset.id,
budget,
budget_type,
gridding,
k_window,
metric,
methods,
r_minimum,
run_per_partition,
score_target,
priority,
selector,
tuner,
deadline
)
if run_per_partition:
datarun_ids = [_datarun.id for _datarun in datarun]
else:
datarun_ids = [datarun.id]
if verbose:
print('Processing dataset {}'.format(train_path))
self.work(
datarun_ids,
save_files,
choose_randomly,
cloud_mode,
total_time,
False,
verbose=verbose
)
dataruns = self.db.get_dataruns(
include_ids=datarun_ids,
ignore_complete=False,
ignore_pending=True
)
if run_per_partition:
return dataruns
elif len(dataruns) == 1:
return dataruns[0]
def load_model(self, classifier_id):
"""Load a Model from the Database.
Args:
classifier_id (int):
Id of the Model to load.
Returns:
Model:
The loaded model instance.
"""
return self.db.get_classifier(classifier_id).load_model()
| 40.954451 | 95 | 0.547495 | 19,250 | 0.973156 | 0 | 0 | 0 | 0 | 0 | 0 | 10,091 | 0.510136 |
bd225009cbeb540acf88e600f37e2294b3fa16ce
| 742 |
py
|
Python
|
dbcollection/datasets/leeds_sports_pose/leeds_sports_pose/__init__.py
|
dbcollection/dbcollection
|
a36f57a11bc2636992e26bba4406914162773dd9
|
[
"MIT"
] | 23 |
2017-09-20T19:23:26.000Z
|
2022-01-09T16:18:11.000Z
|
dbcollection/datasets/leeds_sports_pose/leeds_sports_pose/__init__.py
|
dbcollection/dbcollection
|
a36f57a11bc2636992e26bba4406914162773dd9
|
[
"MIT"
] | 148 |
2017-07-23T14:28:28.000Z
|
2022-01-13T00:35:17.000Z
|
dbcollection/datasets/leeds_sports_pose/leeds_sports_pose/__init__.py
|
dbcollection/dbcollection
|
a36f57a11bc2636992e26bba4406914162773dd9
|
[
"MIT"
] | 6 |
2018-01-12T15:47:57.000Z
|
2021-02-09T06:32:39.000Z
|
"""
Leeds Sports Pose (LSP) Dataset download/process functions.
"""
from dbcollection.datasets import BaseDataset
from .keypoints import Keypoints, KeypointsOriginal
urls = (
'http://sam.johnson.io/research/lsp_dataset_original.zip',
{
'url': 'http://sam.johnson.io/research/lsp_dataset.zip',
'extract_dir': 'lsp_dataset',
},
)
keywords = ('image_processing', 'detection', 'human_pose', 'keypoints')
tasks = {
"keypoints": Keypoints,
"keypoints_original": KeypointsOriginal,
}
default_task = 'keypoints'
class Dataset(BaseDataset):
"""Leeds Sports Pose (LSP) Dataset preprocessing/downloading functions."""
urls = urls
keywords = keywords
tasks = tasks
default_task = default_task
| 24.733333 | 78 | 0.699461 | 196 | 0.264151 | 0 | 0 | 0 | 0 | 0 | 0 | 371 | 0.5 |
bd25018110a4f497d278f0c5fcc41f39296d2cf6
| 3,505 |
py
|
Python
|
flydra_analysis/flydra_analysis/a2/check_mainbrain_h5_contiguity.py
|
elhananby/flydra
|
09b86859b1863700cdea0bbcdd4758da6c83930b
|
[
"Apache-2.0",
"MIT"
] | 45 |
2017-08-25T06:46:56.000Z
|
2021-08-29T16:42:49.000Z
|
flydra_analysis/flydra_analysis/a2/check_mainbrain_h5_contiguity.py
|
elhananby/flydra
|
09b86859b1863700cdea0bbcdd4758da6c83930b
|
[
"Apache-2.0",
"MIT"
] | 7 |
2017-10-16T10:46:20.000Z
|
2020-12-03T16:42:55.000Z
|
flydra_analysis/flydra_analysis/a2/check_mainbrain_h5_contiguity.py
|
elhananby/flydra
|
09b86859b1863700cdea0bbcdd4758da6c83930b
|
[
"Apache-2.0",
"MIT"
] | 21 |
2018-04-11T09:06:40.000Z
|
2021-12-26T23:38:40.000Z
|
#!/usr/bin/env python
from __future__ import print_function
import tables
import argparse
import numpy as np
import sys
def check_mainbrain_h5_contiguity(
filename, slow_but_less_ram=False, shortcircuit=False, verbose=False
):
failed_obj_ids = []
if verbose:
print("opening %r" % filename)
with tables.open_file(filename, mode="r") as f:
table = f.root.kalman_estimates
all_obj_ids = table.cols.obj_id[:]
obj_ids = np.unique(all_obj_ids)
if verbose:
print("checking %d obj_ids" % len(obj_ids))
if not slow_but_less_ram:
# faster but more RAM
all_frames = table.cols.frame[:]
for obj_id in obj_ids:
frame = all_frames[all_obj_ids == obj_id]
diff = frame[1:] - frame[:-1]
if np.any(diff != 1):
failed_obj_ids.append(obj_id)
if verbose:
print("failed: %d" % obj_id)
if shortcircuit:
return failed_obj_ids
else:
# slower but more memory efficient
for obj_id in obj_ids:
cond = all_obj_ids == obj_id
idxs = np.nonzero(cond)[0]
frame = table.read_coordinates(idxs, field="frame")
diff = frame[1:] - frame[:-1]
if np.any(diff != 1):
failed_obj_ids.append(obj_id)
if verbose:
print("failed: %d" % obj_id)
if shortcircuit:
return failed_obj_ids
return failed_obj_ids
def main():
parser = argparse.ArgumentParser()
parser.add_argument("file", type=str, default=None, help="file to check")
parser.add_argument(
"--verbose", action="store_true", default=False, help="print stuff"
)
parser.add_argument(
"--findall",
action="store_true",
default=False,
help="continue after first hit (only make sense with verbose or output-log)",
)
parser.add_argument(
"--slow-but-less-ram", action="store_true", default=False, help="print stuff"
)
parser.add_argument(
"--no-output-log",
action="store_true",
default=False,
help="do not print a final summary",
)
options = parser.parse_args()
failed_obj_ids = check_mainbrain_h5_contiguity(
filename=options.file,
slow_but_less_ram=options.slow_but_less_ram,
shortcircuit=not options.findall,
verbose=options.verbose,
)
if len(failed_obj_ids):
if not options.no_output_log:
print("%s some objects failed: %r" % (options.file, failed_obj_ids))
sys.exit(1)
else:
if not options.no_output_log:
print("%s no objects failed" % options.file)
sys.exit(0)
def cls(root="/mnt/strawscience/data/auto_pipeline/raw_archive/by_date"):
"""Generates example command lines amenable to use, for example, with GNU parallel."""
from itertools import product
import os.path as op
for year, month in product(
(2015, 2014, 2013, 2012), ["%02d" % d for d in xrange(1, 13)]
):
print(
"find %s -iname '*.mainbrain.h5' "
"-exec flydra_analysis_check_mainbrain_h5_contiguity --findall {} \; "
"&>~/%d-%s.log" % (op.join(root, str(year), month), year, month)
)
if __name__ == "__main__":
main()
| 33.066038 | 90 | 0.575178 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 728 | 0.207703 |
bd2629883944c343ab1a2e4d82cafb22e7d45e13
| 2,304 |
py
|
Python
|
reader.py
|
Birdulon/html-mangareader
|
dbdbbaa454125896b9de2d918f2ab59a3c06adc2
|
[
"MIT"
] | 1 |
2021-05-08T14:58:17.000Z
|
2021-05-08T14:58:17.000Z
|
reader.py
|
Birdulon/html-mangareader
|
dbdbbaa454125896b9de2d918f2ab59a3c06adc2
|
[
"MIT"
] | null | null | null |
reader.py
|
Birdulon/html-mangareader
|
dbdbbaa454125896b9de2d918f2ab59a3c06adc2
|
[
"MIT"
] | null | null | null |
import sys
import traceback
import webbrowser
from argparse import ArgumentParser, Namespace
from os import path
from tkinter import Tk, messagebox, filedialog
from mangareader.mangarender import extract_render
from mangareader import templates
from time import sleep
def parse_args() -> Namespace:
parser = ArgumentParser(description='Mangareader')
parser.add_argument('path', nargs='?', help='Path to image, folder, or comic book archive')
parser.add_argument('--no-browser', action='store_true')
return parser.parse_args()
def main() -> None:
args = parse_args()
if not args.path:
imagetypes = ';'.join(f'*.{ext}' for ext in templates.DEFAULT_IMAGETYPES)
archivetypes = ';'.join(
f'*.{ext}' for ext in (*templates.ZIP_TYPES, *templates.RAR_TYPES, *templates._7Z_TYPES)
)
filetypes = (
('Supported files', ';'.join((imagetypes, archivetypes))),
('Images', imagetypes),
('Comic book archive', archivetypes),
('All files', '*'),
)
target_path = filedialog.askopenfilename(
filetypes=filetypes, title='Open Image - Mangareader',
)
if not target_path:
return
else:
target_path = args.path
working_dir = getattr(sys, '_MEIPASS', path.abspath(path.dirname(__file__)))
lib_dir = f'{working_dir}/mangareader'
with open(f'{working_dir}/version', encoding='utf-8') as version_file:
version = version_file.read().strip()
try:
boot_path = extract_render(
path=target_path,
version=version,
doc_template_path=f'{lib_dir}/doc.template.html',
page_template_path=f'{lib_dir}/img.template.html',
boot_template_path=f'{lib_dir}/boot.template.html',
asset_paths=(f'{lib_dir}/{asset}' for asset in templates.ASSETS),
img_types=templates.DEFAULT_IMAGETYPES,
)
if args.no_browser:
print(boot_path)
else:
webbrowser.open(boot_path.as_uri())
except Exception as e:
Tk().withdraw()
messagebox.showerror(
'Mangareader encountered an error: ' + type(e).__name__, ''.join(traceback.format_exc())
)
if __name__ == '__main__':
main()
| 34.909091 | 100 | 0.631076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 436 | 0.189236 |
bd29d7f8357ca28a05195118a23e7f338eea17aa
| 483 |
py
|
Python
|
Qemu/power_on_qemu.py
|
I-Rinka/Virtualization-Difference
|
7727215f5b5cdb8bf18d91ef76685ccd3489e760
|
[
"MIT"
] | null | null | null |
Qemu/power_on_qemu.py
|
I-Rinka/Virtualization-Difference
|
7727215f5b5cdb8bf18d91ef76685ccd3489e760
|
[
"MIT"
] | null | null | null |
Qemu/power_on_qemu.py
|
I-Rinka/Virtualization-Difference
|
7727215f5b5cdb8bf18d91ef76685ccd3489e760
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import socket
import os
import time
import threading
def power_on():
os.system("sudo bash ./start_vm.sh")
if __name__ == "__main__":
n=os.fork()
if n>0:
os.system("sleep 2")
os.system("sudo ip addr add 172.19.0.1/24 dev tap1")
os.system("sudo ip link set tap1 up")
os.wait()
else:
# os.execl("./1_start_vm.sh","./1_start_vm.sh")
power_on()
| 18.576923 | 64 | 0.52588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.372671 |
bd2a739ca5325c09ff24414f0ce30e0bab1eacb7
| 381 |
py
|
Python
|
tests/unit/python/execution_tree/dynamic_init.py
|
frzfrsfra4/phylanx
|
001fe7081f3a24e56157cdb21b2d126b8953ff5d
|
[
"BSL-1.0"
] | 83 |
2017-08-27T15:09:13.000Z
|
2022-01-18T17:03:41.000Z
|
tests/unit/python/execution_tree/dynamic_init.py
|
frzfrsfra4/phylanx
|
001fe7081f3a24e56157cdb21b2d126b8953ff5d
|
[
"BSL-1.0"
] | 808 |
2017-08-27T15:35:01.000Z
|
2021-12-14T17:30:50.000Z
|
tests/unit/python/execution_tree/dynamic_init.py
|
frzfrsfra4/phylanx
|
001fe7081f3a24e56157cdb21b2d126b8953ff5d
|
[
"BSL-1.0"
] | 55 |
2017-08-27T15:09:22.000Z
|
2022-03-25T12:07:34.000Z
|
# Copyright (c) 2018 R. Tohid
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from phylanx import Phylanx, PhylanxSession
@Phylanx
def foo():
a = 2
return a
def main():
assert (2 == foo())
if __name__ == "__main__":
PhylanxSession.init(1)
main()
| 17.318182 | 79 | 0.671916 | 0 | 0 | 0 | 0 | 42 | 0.110236 | 0 | 0 | 192 | 0.503937 |
bd2af34a041fa744101d9895d1374416d6964a87
| 1,073 |
py
|
Python
|
indexStackexchange.py
|
o19s/semantic-search-course
|
ebe15eaa65c5009fa2d526b4df72bf8dbfb8630f
|
[
"Apache-2.0"
] | 6 |
2016-03-07T18:41:52.000Z
|
2016-12-22T20:45:17.000Z
|
indexStackexchange.py
|
o19s/semantic-search-course
|
ebe15eaa65c5009fa2d526b4df72bf8dbfb8630f
|
[
"Apache-2.0"
] | 1 |
2016-03-07T19:09:19.000Z
|
2016-03-07T19:09:19.000Z
|
indexStackexchange.py
|
o19s/semantic-search-course
|
ebe15eaa65c5009fa2d526b4df72bf8dbfb8630f
|
[
"Apache-2.0"
] | null | null | null |
import requests
import json
def openPosts():
data = ""
try:
f = open("scifi_stackexchange.json")
data = f.read()
except IOError:
stackExchangeData ="https://storage.googleapis.com/quepid-sample-datasets/elasticsearch/scifi_stackexchange.json"
resp = requests.get(stackExchangeData)
print("GET %s Len %s" % (resp.status_code, len(resp.text)))
f = open("scifi_stackexchange.json", "w")
f.write(resp.text)
data = resp.text
f.close()
return json.loads(data)
posts = openPosts()
def bulkAdds(posts, index='stackexchange'):
print("Indexing %s Posts" % len(posts))
for post in posts:
print("indexing %s" % post['Id'])
yield {
"_id": post['Id'],
"_index": index,
'_type': 'post',
'_op_type': 'index',
'_source': post
}
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
es = Elasticsearch("http://localhost:9200")
bulk(es, bulkAdds(posts))
| 26.825 | 121 | 0.587139 | 0 | 0 | 354 | 0.329916 | 0 | 0 | 0 | 0 | 296 | 0.275862 |
bd2bf016c3e6797feee09cab8b88dd0593ba10a9
| 332 |
py
|
Python
|
hackathonbaobab2020/tests/__init__.py
|
baobabsoluciones/hackathonbaobab2020
|
ada30525cca061daad4bd420aa45dd4cfc7b790e
|
[
"MIT"
] | null | null | null |
hackathonbaobab2020/tests/__init__.py
|
baobabsoluciones/hackathonbaobab2020
|
ada30525cca061daad4bd420aa45dd4cfc7b790e
|
[
"MIT"
] | 2 |
2020-12-03T22:37:45.000Z
|
2021-01-28T19:43:42.000Z
|
hackathonbaobab2020/tests/__init__.py
|
baobabsoluciones/hackathonbaobab2020
|
ada30525cca061daad4bd420aa45dd4cfc7b790e
|
[
"MIT"
] | 5 |
2020-11-20T15:37:58.000Z
|
2021-01-29T10:22:07.000Z
|
import os
import zipfile
from ..core import Instance
def get_test_instance(zip, filename):
directory = os.path.dirname(__file__)
zip_path = os.path.join(directory, zip)
zip_obj = zipfile.ZipFile(zip_path)
data = zip_obj.read(filename)
return Instance.from_mm(path=None, content=data.decode().splitlines(True))
| 27.666667 | 78 | 0.740964 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
bd2c89f3c83b146173c4e02b15272145ff176687
| 1,634 |
py
|
Python
|
Lab01_Introduction/exercise-4.py
|
rodrigoc-silva/Python-course
|
327b20738a4b383510faddc0ec26a54be1bbd717
|
[
"MIT"
] | null | null | null |
Lab01_Introduction/exercise-4.py
|
rodrigoc-silva/Python-course
|
327b20738a4b383510faddc0ec26a54be1bbd717
|
[
"MIT"
] | null | null | null |
Lab01_Introduction/exercise-4.py
|
rodrigoc-silva/Python-course
|
327b20738a4b383510faddc0ec26a54be1bbd717
|
[
"MIT"
] | null | null | null |
#This program shows the amount of each ingredient needed for a numbers of cookies.
#constants
sugar = 1.5
butter = 1
flour = 2.75
cookies = 48
#input
numOfCookies = int(input('Enter the number of cookies:'))
#calculation
amtSugar = sugar / cookies * numOfCookies
amtButter = butter / cookies * numOfCookies
amtFlour = flour / cookies * numOfCookies
#output
print('To make', numOfCookies, 'cookies, you will nedd:')
print(format(amtSugar, ',.2f'), 'cups of sugar.')
print(format(amtButter, ',.2f'), 'cups of butter.')
print(format(amtFlour, ',.2f'), 'cups of flour.')
#ask user to quit program
input("\n\nPress any key to quit...")
##Output with 5 test cases
##
##Test Case 1.
##
# Enter the number of cookies:56
# To make 56 cookies, you will nedd:
# 1.75 cups of sugar.
# 1.17 cups of butter.
# 3.21 cups of flour.
##
##
# Press any key to quit...
##
##Test Case 2.
##
# Enter the number of cookies:96
# To make 96 cookies, you will nedd:
# 3.00 cups of sugar.
# 2.00 cups of butter.
# 5.50 cups of flour.
##
##
# Press any key to quit...
##
##Test Case 3.
##
# Enter the number of cookies:480
# To make 480 cookies, you will nedd:
# 15.00 cups of sugar.
# 10.00 cups of butter.
# 27.50 cups of flour.
##
##
# Press any key to quit...
##
##Test Case 4.
##
# Enter the number of cookies:200
# To make 200 cookies, you will nedd:
# 6.25 cups of sugar.
# 4.17 cups of butter.
# 11.46 cups of flour.
##
##
# Press any key to quit...
##
##Test Case 5.
##
# Enter the number of cookies:2
# To make 2 cookies, you will nedd:
# 0.06 cups of sugar.
# 0.04 cups of butter.
# 0.11 cups of flour.
##
##
# Press any key to quit...
| 18.155556 | 82 | 0.660343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,235 | 0.755814 |
bd2cd6efeb3ab12f89218d4758f64e6ad10fe52a
| 3,778 |
py
|
Python
|
tests/test_versions.py
|
jaleskovec/reqcheck
|
ffc13cd28127f751617cdd29f7003866341fca58
|
[
"MIT"
] | null | null | null |
tests/test_versions.py
|
jaleskovec/reqcheck
|
ffc13cd28127f751617cdd29f7003866341fca58
|
[
"MIT"
] | 2 |
2021-01-27T12:22:11.000Z
|
2021-01-31T03:32:08.000Z
|
tests/test_versions.py
|
jaleskovec/reqcheck
|
ffc13cd28127f751617cdd29f7003866341fca58
|
[
"MIT"
] | null | null | null |
import unittest
from datetime import datetime, timedelta
from pkg_resources import parse_version
import reqcheck.versions as versions
class GetBehindNumVersionsTest(unittest.TestCase):
def setUp(self):
self.versions = [
{'version': parse_version('1.0.1')},
{'version': parse_version('1.0.2')},
{'version': parse_version('1.0.3')},
]
def testWhenVersionNotFound(self):
self.assertEqual(None, versions.get_behind_num_versions(self.versions, parse_version('1.0.1'), parse_version('5.1')))
self.assertEqual(None, versions.get_behind_num_versions(self.versions, parse_version('5.1'), parse_version('1.0.1')))
class GetBehindTimeDeltaTest(unittest.TestCase):
def setUp(self):
self.versions = [
{'version': parse_version('1.0.1'), 'last_upload': datetime(2016, 10, 22, 12, 34, 56)},
{'version': parse_version('1.0.2'), 'last_upload': datetime(2016, 10, 22, 14, 46, 24)},
]
def testWhenMissingVersion(self):
self.assertEqual(None, versions.get_behind_time_delta(self.versions, parse_version('1.0.1'), None))
self.assertEqual(None, versions.get_behind_time_delta(self.versions, None, parse_version('1.0.2')))
def testWhenVersionsMatch(self):
version = parse_version('1.0.1')
self.assertEqual(timedelta(0), versions.get_behind_time_delta(self.versions, version, version))
def testResult(self):
expected = self.versions[1]['last_upload'] - self.versions[0]['last_upload']
self.assertEqual(expected, versions.get_behind_time_delta(self.versions, parse_version('1.0.1'), parse_version('1.0.2')))
class DetermineBehindTest(unittest.TestCase):
def setUp(self):
self.versions = [
{'version': parse_version('1.0.0'), 'last_upload': datetime(2014, 10, 21, 17, 14, 45)},
{'version': parse_version('1.0.1'), 'last_upload': datetime(2016, 10, 22, 14, 14, 45)},
{'version': parse_version('1.0.2'), 'last_upload': datetime(2016, 10, 22, 16, 14, 45)},
{'version': parse_version('1.0.3'), 'last_upload': datetime(2018, 11, 12, 18, 10, 15)},
{'version': parse_version('1.0.4'), 'last_upload': datetime(2021, 1, 28, 18, 15, 30)},
]
def testWhenBehind(self):
result = versions.determine_behind(self.versions, parse_version('1.0.1'), parse_version('1.0.4'))
self.assertEqual((3, timedelta(days = 1559, seconds = 14445), '-3 versions (~ 4 years 99 days)'), result)
def testWhenLatest(self):
result = versions.determine_behind(self.versions, parse_version('1.0.4'), parse_version('1.0.4'))
self.assertEqual((0, timedelta(days = 0), 'latest'), result)
class DetermineAgeTest(unittest.TestCase):
def setUp(self):
self.versions = [
{'version': parse_version('1.0.0'), 'last_upload': datetime(2014, 10, 21, 17, 14, 45)},
{'version': parse_version('1.0.1'), 'last_upload': datetime(2016, 10, 22, 14, 14, 45)},
{'version': parse_version('1.0.2'), 'last_upload': datetime(2016, 10, 22, 16, 14, 45)},
{'version': parse_version('1.0.3'), 'last_upload': datetime(2018, 11, 12, 18, 10, 15)},
{'version': parse_version('1.0.4'), 'last_upload': datetime(2021, 1, 28, 18, 15, 30)},
]
def testWhenVersionFound(self):
result = versions.determine_age(self.versions, parse_version('1.0.3'), datetime(2021, 1, 29))
self.assertEqual((timedelta(days = 808, seconds = 20985), '~ 2 years 78 days (2018-11-12)'), result)
def testWhenVersionNotFound(self):
result = versions.determine_age(self.versions, parse_version('1.20.12'), datetime(2021, 1, 29))
self.assertEqual((None, 'unknown'), result)
| 49.064935 | 129 | 0.642139 | 3,631 | 0.961091 | 0 | 0 | 0 | 0 | 0 | 0 | 607 | 0.160667 |
bd2df4074244dd69e6875bd543777ca99a678244
| 2,437 |
py
|
Python
|
seoaudit/config.py
|
Guber/seoaudit
|
e38bc453629643f0282cdf9324e4f1db81f57f7f
|
[
"Apache-2.0"
] | 7 |
2019-12-10T17:05:14.000Z
|
2020-11-10T10:10:45.000Z
|
seoaudit/config.py
|
Guber/seoaudit
|
e38bc453629643f0282cdf9324e4f1db81f57f7f
|
[
"Apache-2.0"
] | 3 |
2020-10-23T09:19:19.000Z
|
2021-12-13T20:28:03.000Z
|
seoaudit/config.py
|
Guber/seoaudit
|
e38bc453629643f0282cdf9324e4f1db81f57f7f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from seoaudit.checks.element import ElementCheck
from seoaudit.checks.page import PageCheck
from seoaudit.checks.site import SiteCheck
page_tests = [(PageCheck.TEXT_TO_CODE_RATIO, {"min_ratio": 0.1}),
(PageCheck.DOM_SIZE, {"max_size": 1500}),
[PageCheck.ELEMENTS_SIMILARITY,
{"el1_query": "/*", "el2_query": "/html/head/title", "match_most_common": 1}],
[PageCheck.ELEMENTS_SIMILARITY,
{"el1_query": "/*", "el2_query": "/html/head/meta[@name='description']/@content",
"match_most_common": 1}],
[PageCheck.ELEMENTS_SIMILARITY,
{"el1_query": "//h1", "el2_query": "/html/head/meta[@name='description']/@content",
"match_most_common": 1}],
[PageCheck.ELEMENTS_COUNT, {"query": "(//h2)", "min_count": 2}],
[PageCheck.STRUCTURED_DATA_FOUND, {"type": "json-ld", "property": "@type", "value": "Organization"}],
[SiteCheck.TITLE_REPETITION],
[SiteCheck.DESCRIPTION_REPETITION],
[SiteCheck.PAGE_IN_SITEMAP],
[SiteCheck.PAGE_CRAWLABLE]]
# Todo: add regex check for charset = utf-8
# Todo: add regex check for robots not block page
element_tests = [
("/html", 'lang'),
("(/html/head/meta[@charset])", 'charset'),
("/html/head/title", 'textContent',
[(ElementCheck.MIN_LENGTH, {"min_length": 40}),
(ElementCheck.MAX_LENGTH, {"max_length": 70})]),
("(/html/head/meta[@name='description'])", 'content',
[(ElementCheck.MIN_LENGTH, {"min_length": 50}), (ElementCheck.MAX_LENGTH, {"max_length": 160})]),
("(/html/head/meta[@name='viewport'])", 'content'),
("(//img)", 'alt'),
("(//a[@href])", 'title'),
("(/html/head/meta[@property='og:locale'])", 'content'),
("(/html/head/meta[@property='og:title'])", 'content'),
("(/html/head/meta[@property='og:description'])", 'content'),
("(/html/head/meta[@property='og:type'])", 'content'),
("(/html/head/meta[@property='og:url'])", 'content'),
("(/html/head/meta[@property='og:image'])", 'content'),
("(/html/head/meta[@name='twitter:title'])", 'content'),
("(/html/head/meta[@name='twitter:description'])", 'content'),
("(/html/head/meta[@name='twitter:image'])", 'content'),
("(/html/head/meta[@name='twitter:card'])", 'content'),
("(/html/head/link[@rel='canonical'])", 'href')
]
| 50.770833 | 115 | 0.588839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,278 | 0.524415 |
bd2e2477ed8bfb390dc1b2799ff2c5a263416a72
| 3,822 |
py
|
Python
|
utils/certificate/tools.py
|
sr-gi/paysense
|
e896191b42c97009d56a23d54518569561064c42
|
[
"BSD-3-Clause"
] | 2 |
2015-11-30T19:05:19.000Z
|
2017-02-04T08:31:59.000Z
|
utils/certificate/tools.py
|
sr-gi/paysense
|
e896191b42c97009d56a23d54518569561064c42
|
[
"BSD-3-Clause"
] | null | null | null |
utils/certificate/tools.py
|
sr-gi/paysense
|
e896191b42c97009d56a23d54518569561064c42
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) <2015> <Sergi Delgado Segura>
# Distributed under the BSD software license, see the accompanying file LICENSE
from pyasn1.codec.der import encoder, decoder
from pyasn1_modules.rfc2459 import Certificate
from Crypto.PublicKey import RSA
from M2Crypto import X509
from hashlib import sha256
from os import path
__author__ = 'sdelgado'
def certificate_hashing(cert_der, algorithm='sha256'):
""" Performs the hash of a provided X509 certificate.
:param cert_der: x509 certificate.
:type cert_der: binary DER
:param algorithm: hashing algorithm. Sha256 will be used as default.
:type algorithm: str
:return: The hash of the certificate performed using the provided hashing function.
:rtype: binary
"""
asn1_cert = decoder.decode(cert_der, asn1Spec=Certificate())[0]
tbs = asn1_cert.getComponentByName("tbsCertificate")
# Calculate the certificate hash
tbs_der = encoder.encode(tbs)
if algorithm == 'sha256':
digest = sha256()
else:
# ToDo: Check if we should include more hashing algorithms
return "Algorithm not supported"
digest.update(tbs_der)
cert_hash = digest.digest()
return cert_hash
def check_blind_hash(cert_der, blinded_hash, r, ca_cert):
""" Compute the blind hash of the provided certificate and check if it match with the provided blinded hash
:param cert_der: input certificate.
:type cert_der: binary DER
:param blinded_hash: input blinded hash to be checked.
:type blinded_hash: binary
:param r: blinding factor used to perform the blind hash.
:type r: long
:param ca_cert: CA cert. It will be used to extract the public key and perform the hash blinding.
:type ca_cert: M2Crypto.X509()
:return: True if the hashes match, False otherwise.
:rtype: bool
"""
pk = RSA.importKey(ca_cert.get_pubkey().as_der())
cert_hash = certificate_hashing(cert_der)
# Check that the provided blind signatures match with the calculated ones
if pk.blind(cert_hash, r) == blinded_hash:
response = True
else:
response = False
return response
def store_certificate(certificate, filename='paysense', extension='.crt'):
""" Stores a certificate in a human readable format.
:param certificate: certificate to be stored.
:type certificate: binary DER
:param filename: name or system path (including name) where the certificate will be stored (without extension).
:type filename: str
:param extension: file extension.
:type extension: str
:return: None
"""
x509 = X509.load_cert_der_string(certificate)
# Save the pem data into the pem file
x509.save_pem(filename + extension)
# In order to write the human readable certificate before the encoded data we should load the data just stored
# and append at the end of the file.
f = open(filename + extension, 'r')
data = f.read()
f.close()
f = open(filename + extension, 'w')
f.write(x509.as_text())
f.write(data)
f.close()
# ToDo: This function originally came from the ACA. The existence of previous certificate should be performed against a DDBB instead of looking for a certificate in the folder.
# ToDo: since old certificates could be deleted from it. If this is changed, this function should be putted back in the ACA.
def check_certificate(bitcoin_address, certs_path):
""" Checks if a certificate exists in the certificate directory.
:param bitcoin_address: name of the certificate to look for.
:type bitcoin_address: str
:param certs_path: system path where the certificate are stored.
:type certs_path: str
:return: True if the certificate exists, False otherwise.
:rtype: bool
"""
return path.exists(certs_path + bitcoin_address + '.pem')
| 34.745455 | 176 | 0.715856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,478 | 0.648352 |
bd2ee870e5845b50e43bca14345288b03bd674b2
| 1,340 |
py
|
Python
|
zombie_infection.py
|
schana/random-hacking
|
5eeda2f05681ce9f56f1b9114255c2392e92ee9a
|
[
"Apache-2.0"
] | null | null | null |
zombie_infection.py
|
schana/random-hacking
|
5eeda2f05681ce9f56f1b9114255c2392e92ee9a
|
[
"Apache-2.0"
] | null | null | null |
zombie_infection.py
|
schana/random-hacking
|
5eeda2f05681ce9f56f1b9114255c2392e92ee9a
|
[
"Apache-2.0"
] | null | null | null |
import random
import sys
sys.setrecursionlimit(15000)
count_columns = 50
count_rows = 40
matrix = [[random.randint(0, 1) for i in range(count_columns)] for j in range(count_rows)]
matrix = [[0] * count_columns for _ in range(count_rows)]
for _ in range(10):
matrix[random.randint(0, count_rows - 1)][random.randint(0, count_columns - 1)] = 1
visited = [[False] * len(row) for row in matrix]
def print_matrix():
for row in matrix:
for value in row:
print(value if value else ' ', end=' ')
print()
# can use stack if recursion depth is too much - just push items on to be spread
# and iterate in a loop
def spread(r, c):
if r < 0 or r >= count_rows or c < 0 or c >= count_columns:
return
if matrix[r][c] == 1 and not visited[r][c]:
visited[r][c] = True
spread(r, c+1)
spread(r, c-1)
spread(r+1, c)
spread(r-1, c)
else:
matrix[r][c] = 1
visited[r][c] = True
time = 0
while not all(all(row) for row in matrix):
print_matrix()
print()
time += 1
visited = [[False] * len(row) for row in matrix]
for r, row in enumerate(matrix):
for c, value in enumerate(row):
if not visited[r][c] and value == 1:
spread(r, c)
visited[r][c] = True
print_matrix()
print(time)
| 24.363636 | 90 | 0.590299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.081343 |
bd2f0d561dd6a11311d27d509b7dfde5f4ee84a5
| 114 |
py
|
Python
|
portabletext_html/__init__.py
|
otovo/python-sanity-html
|
e445dd9d0f05f8cfe424112e568f37c36e663096
|
[
"Apache-2.0"
] | 13 |
2021-04-14T11:19:20.000Z
|
2021-11-26T11:56:39.000Z
|
portabletext_html/__init__.py
|
otovo/python-sanity-html
|
e445dd9d0f05f8cfe424112e568f37c36e663096
|
[
"Apache-2.0"
] | 25 |
2021-04-14T12:48:50.000Z
|
2021-11-29T08:11:14.000Z
|
portabletext_html/__init__.py
|
otovo/python-portabletext-html
|
31718a70db58afd143976bf0edfb42239f1800dc
|
[
"Apache-2.0"
] | null | null | null |
from portabletext_html.renderer import PortableTextRenderer, render
__all__ = ['PortableTextRenderer', 'render']
| 28.5 | 67 | 0.824561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.263158 |
bd32c31d868cb38b20ce13d60b0f31a73b04c464
| 309 |
py
|
Python
|
dad.py
|
1234borkowskip/WD151280
|
273d8a5b655e60eed1195125420462e1e11161cf
|
[
"MIT"
] | null | null | null |
dad.py
|
1234borkowskip/WD151280
|
273d8a5b655e60eed1195125420462e1e11161cf
|
[
"MIT"
] | null | null | null |
dad.py
|
1234borkowskip/WD151280
|
273d8a5b655e60eed1195125420462e1e11161cf
|
[
"MIT"
] | null | null | null |
def cg(n, a1=1, q=2):
n = n - 1
while n > 0:
a1 = a1 * q
n = n - 1
return a1
print("{} wyraz ciagu geometrycznego gdzie a1 = {}, q = {} wynosi {}".format(4, 1, 5, cg(4, 1, 5)))
print("{} wyraz ciagu geometrycznego gdzie a1 = {}, q = {} wynosi {}".format(7, 1, 2, cg(7)))
| 30.9 | 100 | 0.482201 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.407767 |
bd33d681a6be39dde7d89c1d2456e4e15b7cbc67
| 296 |
py
|
Python
|
src/code42cli/cmds/shell.py
|
maddie-vargo/code42cli
|
fde4a70d4810923b668e8ca2d8d00af75c567dd1
|
[
"MIT"
] | 14 |
2020-07-15T13:55:53.000Z
|
2022-02-24T19:09:50.000Z
|
src/code42cli/cmds/shell.py
|
maddie-vargo/code42cli
|
fde4a70d4810923b668e8ca2d8d00af75c567dd1
|
[
"MIT"
] | 213 |
2020-07-16T14:21:00.000Z
|
2022-03-29T16:08:08.000Z
|
src/code42cli/cmds/shell.py
|
maddie-vargo/code42cli
|
fde4a70d4810923b668e8ca2d8d00af75c567dd1
|
[
"MIT"
] | 11 |
2020-07-15T13:58:09.000Z
|
2022-03-29T17:33:51.000Z
|
import click
import IPython
from code42cli import BANNER
from code42cli.options import sdk_options
@click.command()
@sdk_options()
def shell(state):
"""Open an IPython shell with py42 initialized as `sdk`."""
IPython.embed(colors="Neutral", banner1=BANNER, user_ns={"sdk": state.sdk})
| 22.769231 | 79 | 0.743243 | 0 | 0 | 0 | 0 | 193 | 0.652027 | 0 | 0 | 73 | 0.246622 |
bd347bef874fe2b7fd02a07a979e78547511f381
| 216 |
py
|
Python
|
src/Main.py
|
Yee172/Memory_Revival
|
e9bf4598564546ada3b9d9bfce7bf35fad348850
|
[
"MIT"
] | null | null | null |
src/Main.py
|
Yee172/Memory_Revival
|
e9bf4598564546ada3b9d9bfce7bf35fad348850
|
[
"MIT"
] | null | null | null |
src/Main.py
|
Yee172/Memory_Revival
|
e9bf4598564546ada3b9d9bfce7bf35fad348850
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Yee_172'
__date__ = '2017/12/03'
import sys
PATH = sys.path[0][:-4]
sys.path.append(PATH)
from src.Func import *
win = MainWin()
sys.exit(app.exec_())
| 14.4 | 23 | 0.648148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.305556 |
bd34863190099e5a1deaa0f914751c6c45b7892c
| 1,191 |
py
|
Python
|
tools/protonvpn-ips/main.py
|
alessandrobasi/basi-warninglist
|
995d3cd94e1dc7afdc09eff11bc1baa352b225e9
|
[
"MIT"
] | null | null | null |
tools/protonvpn-ips/main.py
|
alessandrobasi/basi-warninglist
|
995d3cd94e1dc7afdc09eff11bc1baa352b225e9
|
[
"MIT"
] | null | null | null |
tools/protonvpn-ips/main.py
|
alessandrobasi/basi-warninglist
|
995d3cd94e1dc7afdc09eff11bc1baa352b225e9
|
[
"MIT"
] | null | null | null |
import requests, os
dir_name = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
save_path = "../../lists/"+dir_name+"/"
def main():
ips = set()
with open(save_path+"all.txt","r",encoding="UTF-8") as f:
for line in f:
ips.add(line[:-1])
url_ = 'https://api.protonmail.ch/vpn/logicals'
headers = {'user-agent': 'Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36'}
r = requests.get(url=url_, headers=headers)
json_request = r.json()
for obj in json_request["LogicalServers"]:
for server in obj["Servers"]:
ips.add(server["EntryIP"])
ips.add(server["ExitIP"])
with open(save_path+"ipv4CIDR.txt","w", encoding="UTF-8") as ipv4F, open(save_path+"ipv6CIDR.txt","w", encoding="UTF-8") as ipv6F, open(save_path+"all.txt","w", encoding="UTF-8") as allF:
for ip in ips:
allF.write(ip+"\n")
if '.' in ip:
ipv4F.write(ip+"\n")
else:
ipv6F.write(ip+"\n")
return str(len(ips))
if __name__ == "__main__":
print("ProtonVPN ips")
main()
| 31.342105 | 191 | 0.577666 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 352 | 0.29555 |
bd3567ec2bb0a247f32f1485e666f3eac6f7dc19
| 2,809 |
py
|
Python
|
dakota/sobol/sobol.py
|
arfc/dcwrapper
|
82226f601580be464668fa63df64f037962db57e
|
[
"BSD-3-Clause"
] | 1 |
2020-03-26T14:09:30.000Z
|
2020-03-26T14:09:30.000Z
|
dakota/sobol/sobol.py
|
mehmeturkmen/dcwrapper
|
82226f601580be464668fa63df64f037962db57e
|
[
"BSD-3-Clause"
] | 10 |
2019-10-08T18:46:36.000Z
|
2019-11-14T19:23:05.000Z
|
dakota/sobol/sobol.py
|
mehmeturkmen/dcwrapper
|
82226f601580be464668fa63df64f037962db57e
|
[
"BSD-3-Clause"
] | 3 |
2019-10-29T19:23:44.000Z
|
2020-09-18T13:09:49.000Z
|
# Dakota Python Driving Script
# necessary python modules
import dakota.interfacing as di
import subprocess
import sys
import os
import multiprocessing
sys.path.append('../../scripts')
import input as inp
import output as oup
import external_cym
cycdir = '../../cyclus-files/sobol/'
# ----------------------------
# Parse Dakota parameters file
# ----------------------------
params, results = di.read_parameters_file()
# -------------------------------
# Convert and send to Cyclus
# -------------------------------
# Edit Cyclus input file
cyclus_template = cycdir + 'sobol.xml.in'
scenario_name = 'fs' + str(int(params['fs'])) + 'ty' + \
str(int(params['ty'])) + 'ct' + str(int(params['ct']))
variable_dict = {'fleet_share_mox': int((params['fs'])),
'fleet_share_fr': int((100 - params['fs'])),
'transition_year': int((params['ty'])),
'cooling_time': int((params['ct'] * 12))}
output_xml = cycdir + 'sobol.xml'
inp.render_input(cyclus_template, variable_dict, output_xml)
# Run Cyclus with edited input file
output_sqlite = cycdir + scenario_name + '.sqlite'
os.system('cyclus -i ' + output_xml + ' -o ' + output_sqlite)
# ----------------------------
# Return the results to Dakota
# ----------------------------
f = open('output_name.txt', 'w+')
f.write(output_sqlite)
f.close()
p = multiprocessing.Process(target=external_cym.hlw)
p.start()
fresh = False
while fresh is False:
if os.path.exists('hlw.txt'):
if os.stat('hlw.txt').st_size > 0:
fresh = True
p.terminate()
f = open('hlw.txt', 'r')
if f.mode == 'r':
hlw = f.read()
f.close()
q = multiprocessing.Process(target=external_cym.dep_u)
q.start()
fresh = False
while fresh is False:
if os.path.exists('depu.txt'):
if os.stat('depu.txt').st_size > 0:
fresh = True
p.terminate()
f = open('depu.txt', 'r')
if f.mode == 'r':
depleted_u = f.read()
f.close()
p = multiprocessing.Process(target=external_cym.idlecapp)
p.start()
fresh = False
while fresh is False:
if os.path.exists('idlecap.txt'):
if os.stat('idlecap.txt').st_size > 0:
fresh = True
p.terminate()
f = open('idlecap.txt', 'r')
if f.mode == 'r':
idlecap = f.read()
f.close()
for i, r in enumerate(results.responses()):
if r.asv.function:
if i == 0:
r.function = hlw
if i == 1:
r.function = depleted_u
if i == 2:
r.function = idlecap
if os.path.exists('depu.txt'):
os.remove('depu.txt')
if os.path.exists('hlw.txt'):
os.remove('hlw.txt')
if os.path.exists('idlecap.txt'):
os.remove('idlecap.txt')
results.write()
| 25.770642 | 62 | 0.555714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 799 | 0.284443 |
bd36b9e0da9470a5e37a78543606857028eee2e2
| 873 |
py
|
Python
|
basic linear reg.py
|
recurshawn/Basic-Linear-Regression
|
e0ee45f58d45911d34a854f015ea196b159a2f8a
|
[
"MIT"
] | 2 |
2018-08-12T06:09:04.000Z
|
2019-05-06T07:03:26.000Z
|
basic linear reg.py
|
recurshawn/Basic-Linear-Regression
|
e0ee45f58d45911d34a854f015ea196b159a2f8a
|
[
"MIT"
] | null | null | null |
basic linear reg.py
|
recurshawn/Basic-Linear-Regression
|
e0ee45f58d45911d34a854f015ea196b159a2f8a
|
[
"MIT"
] | 2 |
2018-07-19T14:35:46.000Z
|
2018-08-12T06:13:43.000Z
|
""" BASIC LINEAR REGRESSION CODE
Consider this my first application of what I understood about ML/DL so far.
I wrote this to check my understanding of the basic concepts. It's pretty simple but I needed to get my feet wet with code.
Any suggestions for modifications are welcome!
"""
#x and y datasets
x = [3,4,5,6,7,8,9,10,11,12,13,14]
y = [6,8,10,12,14,16,18,20,22,24,26,28]
#randomly chosen initial slope and y intercept values
m = 3.0
c = 2.0
#applying gradient to modify m and c to fit the dataset
for i in xrange(100000):
for k in range(12):
m = m - 0.01*2*(m*x[k]+c - y[k])*x[k]
c = c - 0.01*2*(m*x[k]+c - y[k])
#printing the proper values of m and c
print "Slope of line:", m
print "Y-intercept of line:", c
userX = float(raw_input("Enter a x value:"))
userY = m*userX + c
print "y is ", userY
| 28.16129 | 128 | 0.634593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 536 | 0.613975 |
bd3a8db83a92cdd76c21b817a1af0e0151e6c4ab
| 5,690 |
py
|
Python
|
app/hide-and-seek/common/computils/debug.py
|
loramf/mlforhealthlabpub
|
aa5a42a4814cf69c8223f27c21324ee39d43c404
|
[
"BSD-3-Clause"
] | 171 |
2021-02-12T10:23:19.000Z
|
2022-03-29T01:58:52.000Z
|
app/hide-and-seek/common/computils/debug.py
|
loramf/mlforhealthlabpub
|
aa5a42a4814cf69c8223f27c21324ee39d43c404
|
[
"BSD-3-Clause"
] | 4 |
2021-06-01T08:18:33.000Z
|
2022-02-20T13:37:30.000Z
|
app/hide-and-seek/common/computils/debug.py
|
loramf/mlforhealthlabpub
|
aa5a42a4814cf69c8223f27c21324ee39d43c404
|
[
"BSD-3-Clause"
] | 93 |
2021-02-10T03:21:59.000Z
|
2022-03-30T19:10:37.000Z
|
"""
Debug helpers.
"""
import io
import logging
from typing import Union, Optional, Callable
import numpy as np
import pandas as pd
_printt_log_method = print
def set_log_method(log_method: Optional[Callable] = None) -> None:
global _printt_log_method # pylint: disable=global-statement
if log_method is not None:
_printt_log_method = log_method
else:
_printt_log_method = print
def _init_str(minimal: bool = False) -> str:
if minimal:
return ""
global _printt_log_method # pylint: disable=global-statement
return "\n" if _printt_log_method == print else "\n\n" # pylint: disable=comparison-with-callable
force_minimal_logging = False
def ar(
array: np.ndarray,
name: Optional[str] = None,
lim: Union[int, str, None] = None,
lw: int = 200,
minimal: bool = False,
) -> None:
"""Debug `ar`ray.
Print helper for `numpy.ndarray`, will print like so:
```
my_array [<class 'numpy.ndarray'>] [dtype=float32]:
SHAPE: (3, 3)
[[ 0.5372, 1.2580, -0.9479],
[-0.7958, -1.6064, -1.2641],
[ 1.6119, 1.3587, -0.1000]])
```
The `linewith` printoption will be set to `200` by default (`lw` argument) to allow for fewer line breaks.
Args:
array (np.ndarray): array to print.
name (Optional[str], optional): The name for the array to print. Defaults to None.
lim (Optional[int, str], optional): If `int`, will set `edgeitems` printoption to this value.
If set to `"full"` will print the entire array (can be slow). Defaults to None.
lw (int, optional): Set the `linewith` printoption to this. Defaults to 200.
minimal (bool, optional): If true, will not print the array itself. Defaults to False.
"""
global _printt_log_method # pylint: disable=global-statement
if force_minimal_logging:
minimal = True
if name is None:
name = f"Array-{id(array)}"
content = _init_str(minimal)
if not minimal:
content += f"=== <{name}> ===:\n[{type(array)}] [dtype={array.dtype}]\n"
content += f"SHAPE: {tuple(array.shape)}\n"
with np.printoptions(
threshold=np.product(array.shape) if lim == "full" else 1000, # 1000 is default.
edgeitems=lim if isinstance(lim, int) else 3, # 3 is default.
linewidth=lw,
):
content += str(array)
content += "\n" # Leave one blank line after printing.
else:
content += f"<{name}>:: {array.shape}"
_printt_log_method(content)
def ar_(*args, **kwargs):
"""
Shortcut for `ar(..., minimal=True)`.
"""
ar(*args, **kwargs, minimal=True)
def setup_logger(
name: str, level: int = logging.INFO, format_str: str = "%(name)s:%(levelname)s:\t%(message)s"
) -> logging.Logger:
"""Set up a console logger with name `name`.
Args:
name (str): Logger name.
level (int): Logging level to set. Defaults to logging.INFO.
format_str (str, optional): The format string to use for the logger formatter.
Defaults to "%(name)s:%(levelname)s:\t%(message)s".
Returns:
logging.Logger: [description]
"""
_logger = logging.getLogger(name)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt=format_str)
handler.setFormatter(formatter)
_logger.addHandler(handler)
_logger.setLevel(level)
return _logger
def _df_info_to_str(dataframe: pd.DataFrame) -> str:
buf = io.StringIO()
dataframe.info(buf=buf)
return buf.getvalue()
def df(
dataframe: Union[pd.DataFrame, pd.Series],
name: Optional[str] = None,
info: bool = False,
max_rows_before_collapse: Optional[Union[int, str]] = None,
keep_rows_if_collapsed: Optional[int] = None,
force_show_all_cols: bool = False,
minimal: bool = False,
) -> None:
"""Debug `d`ata`f`rame.
Print helper for `pd.DataFrame`.
"""
global _printt_log_method # pylint: disable=global-statement
if force_minimal_logging:
minimal = True
if name is None:
name = f"DataFrame-{id(dataframe)}"
if isinstance(dataframe, pd.DataFrame):
tp = "<class 'pd.DataFrame'>"
elif isinstance(dataframe, pd.Series):
tp = "<class 'pd.Series'>"
else:
raise ValueError(f"`df` must be a pandas DataFrame or Series, was {type(dataframe)}.")
content = _init_str(minimal)
if not minimal:
content += f"=== <{name}> ===:\n[{tp}]\n\n"
pd_option_seq = []
if max_rows_before_collapse is not None:
if max_rows_before_collapse == "full":
max_rows_before_collapse = dataframe.shape[0]
pd_option_seq.extend(["display.max_rows", max_rows_before_collapse])
if keep_rows_if_collapsed is not None:
pd_option_seq.extend(["display.min_rows", keep_rows_if_collapsed])
if force_show_all_cols:
pd_option_seq.extend(["display.max_columns", dataframe.shape[1]])
pd_option_seq.extend(["display.expand_frame_repr", True])
def _build(c):
if info:
c += _df_info_to_str(dataframe) + "\n"
c += str(dataframe) + "\n"
return c
if len(pd_option_seq) > 0:
with pd.option_context(*pd_option_seq):
content = _build(content)
else:
content = _build(content)
else:
content += f"<{name}>:: {tp}:: {dataframe.shape}"
_printt_log_method(content)
def df_(*args, **kwargs):
"""
Shortcut for `df(..., minimal=True)`.
"""
df(*args, **kwargs, minimal=True)
| 30.10582 | 110 | 0.618102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,232 | 0.392267 |
bd3b0f2c14b30cd87e31089661c02ceeb62af81c
| 3,862 |
py
|
Python
|
setup.py
|
jacklinke/django-directed
|
8ef8cd8a71e9a03a8628dce6465351f676f542ff
|
[
"Apache-2.0"
] | 2 |
2022-02-09T10:15:40.000Z
|
2022-02-22T14:11:03.000Z
|
setup.py
|
jacklinke/django-directed
|
8ef8cd8a71e9a03a8628dce6465351f676f542ff
|
[
"Apache-2.0"
] | 1 |
2022-02-20T14:49:37.000Z
|
2022-02-20T14:49:37.000Z
|
setup.py
|
jacklinke/django-directed
|
8ef8cd8a71e9a03a8628dce6465351f676f542ff
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
import sys
from collections import defaultdict
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version(*file_paths):
"""Retrieves the version from django_directed/__init__.py"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def get_extras_require(path, add_all=True):
# https://hanxiao.io/2019/11/07/A-Better-Practice-for-Managing-extras-require-Dependencies-in-Python/
with open(path) as fp:
extra_deps = defaultdict(set)
for k in fp:
if k.strip() and not k.startswith("#"):
tags = set()
if ":" in k:
k, v = k.split(":")
tags.update(vv.strip() for vv in v.split(","))
tags.add(re.split("[<=>]", k)[0])
for t in tags:
extra_deps[t].add(k)
# add tag `all` at the end
if add_all:
extra_deps["all"] = set(vv for v in extra_deps.values() for vv in v)
return extra_deps
readme = open("README.md").read()
changelog = open("CHANGELOG.md").read()
requirements = open("requirements/base.txt").readlines()
extras_requirements_path = "requirements/extras.txt"
version = get_version("django_directed", "__init__.py")
if sys.argv[-1] == "publish":
try:
import wheel
print("Wheel version: ", wheel.__version__)
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
sys.exit()
if sys.argv[-1] == "tag":
print("Tagging the version on git:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries",
"Topic :: Database",
"Topic :: Utilities",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Framework :: Django :: 3.1",
"Framework :: Django :: 3.2",
]
setup(
name="django-directed",
version=version,
description="""Tools for building, querying, manipulating, and exporting directed graphs with django""",
long_description=readme + "\n\n" + changelog,
long_description_content_type="text/markdown",
author="Jack Linke",
author_email="[email protected]",
license="Apache Software License",
url="https://github.com/jacklinke/django-directed/",
project_urls={
"Documentation": "https://django-directed.readthedocs.io/en/latest/",
"Source": "https://github.com/jacklinke/django-directed/",
"Tracker": "https://github.com/jacklinke/django-directed/issues",
},
packages=[
"django_directed",
],
package_dir={"django_directed": "django_directed"},
include_package_data=True,
keywords="django-directed, graph, tree, dag, network, directed, acyclic, postgres, cte",
python_requires=">=3.7, <4",
classifiers=classifiers,
install_requires=requirements,
extras_require=get_extras_require(extras_requirements_path),
)
| 33.877193 | 108 | 0.634645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,759 | 0.455463 |
bd3b1d81b7abc114bb78bcdb8316981a6a5efeb1
| 2,050 |
py
|
Python
|
cv_utils/object_detection/dataset/utils.py
|
fadamsyah/cv_utils
|
487fc65fe4a71f05dd03df31cde21d866968c0b4
|
[
"MIT"
] | null | null | null |
cv_utils/object_detection/dataset/utils.py
|
fadamsyah/cv_utils
|
487fc65fe4a71f05dd03df31cde21d866968c0b4
|
[
"MIT"
] | 1 |
2021-11-01T06:10:29.000Z
|
2021-11-09T12:47:48.000Z
|
cv_utils/object_detection/dataset/utils.py
|
fadamsyah/cv_utils
|
487fc65fe4a71f05dd03df31cde21d866968c0b4
|
[
"MIT"
] | null | null | null |
import json
import os
import shutil
from copy import deepcopy
from pathlib import Path
def create_and_overwrite_dir(path_dir):
# Create the directory
Path(path_dir).mkdir(parents=True, exist_ok=True)
# Overwrite the directory
for path in os.listdir(path_dir):
try: os.remove(os.path.join(path_dir, path))
except IsADirectoryError: shutil.rmtree(os.path.join(path_dir, path))
def read_json(path):
"""
Read a .json file
Args:
path (string): Path of a .json file
Returns:
data (dictionary): Output dictionary
"""
f = open(path,)
data = json.load(f)
f.close()
return data
def write_json(files, path, indent=4):
"""
Write a json file from a dictionary
Args:
files (dictionary): Data
path (string): Saved json path
indent (int, optional): Number of spaces of indentation. Defaults to 4.
"""
json_object = json.dumps(files, indent = indent)
# Writing to saved_path_json
with open(path, "w") as outfile:
outfile.write(json_object)
def coco_to_img2annots(coco_annotations):
# Initialize img2annots
img2annots = {}
# Generate img2annots key
num_obj_init = {category['id']: 0 for category in coco_annotations['categories']}
for image in coco_annotations['images']:
image_id = image['id']
img2annots[image_id] = {
'description': deepcopy(image),
'annotations': [],
'num_objects': deepcopy(num_obj_init)
}
# Add every annotation to its corresponding image key
for annotation in coco_annotations['annotations']:
image_id = annotation['image_id']
category_id = annotation['category_id']
img2annots[image_id]['annotations'].append(annotation)
img2annots[image_id]['num_objects'][category_id] += 1
return img2annots
def yolo_to_img2annots(yolo_annotations, yolo_classes):
pass
# return img2annots
| 25.308642 | 85 | 0.632683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 689 | 0.336098 |
bd3b2ec0d2c9b53564b73ae83d10294a44c47243
| 269 |
py
|
Python
|
datos_modelar/clean_data.py
|
alexvaca0/EquipoGamma
|
6999cb2d24104c9cabdbfbddcb3d2746f395b0ba
|
[
"MIT"
] | null | null | null |
datos_modelar/clean_data.py
|
alexvaca0/EquipoGamma
|
6999cb2d24104c9cabdbfbddcb3d2746f395b0ba
|
[
"MIT"
] | null | null | null |
datos_modelar/clean_data.py
|
alexvaca0/EquipoGamma
|
6999cb2d24104c9cabdbfbddcb3d2746f395b0ba
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 6 23:26:38 2019
@author: avaca
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import GridSearchCV
complete_data = pd.read_csv('', encoding = 'utf-8')
vars_categ = ['HY_provincia', 'HY_tipo']
| 16.8125 | 51 | 0.69145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.423792 |
bd4090b02cae8881961d07fc8983d1d1780b62bf
| 1,326 |
py
|
Python
|
python/extracting/findPrimer.py
|
csiu/tokens
|
8a7f865d921d91aae4019e43677435ad78a8a703
|
[
"MIT"
] | null | null | null |
python/extracting/findPrimer.py
|
csiu/tokens
|
8a7f865d921d91aae4019e43677435ad78a8a703
|
[
"MIT"
] | 1 |
2015-04-20T03:28:48.000Z
|
2015-04-20T03:40:44.000Z
|
python/extracting/findPrimer.py
|
csiu/tokens
|
8a7f865d921d91aae4019e43677435ad78a8a703
|
[
"MIT"
] | null | null | null |
import re
inputfile = '/Users/csiu/project/webCrawler/extractPrimer/test.txt'
primerStart = """5'"""
primerEnd = """3'"""
nucleotides = '[ATGCN]'
linePrimerEnding = re.compile(nucleotides + '$')
linePrimerStarting = re.compile('^' + nucleotides)
with open(inputfile, 'r') as f:
for line in f:
if primerStart in line:
## when primer is within same line
if primerEnd in line:
pStartIndex = line.index(primerStart)
pEndIndex = line.index(primerEnd) + len(primerEnd)
matchingObj = line[pStartIndex : pEndIndex]
print matchingObj
continue
line = line.strip()
## when primer start is in middle of line
## and primer spans to second line
if linePrimerEnding.search(line):
## part 1
pStartIndex = line.index(primerStart)
matchingObj = line[pStartIndex:]
## part 2
line = f.next().strip()
if linePrimerStarting.match(line):
if primerEnd in line:
pEndIndex = line.index(primerEnd) + len(primerEnd)
matchingObj += line[:pEndIndex]
print matchingObj
continue
| 30.136364 | 74 | 0.534691 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 216 | 0.162896 |
bd41247422955c2bec8c75c3f52048f4b0c6d343
| 1,213 |
py
|
Python
|
app/core/admin.py
|
crocodundee/news-board-api-app
|
380e6e75f9263a068200a723c04568a9f2261ecc
|
[
"MIT"
] | null | null | null |
app/core/admin.py
|
crocodundee/news-board-api-app
|
380e6e75f9263a068200a723c04568a9f2261ecc
|
[
"MIT"
] | null | null | null |
app/core/admin.py
|
crocodundee/news-board-api-app
|
380e6e75f9263a068200a723c04568a9f2261ecc
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from django.utils.translation import gettext as _
from core import models
class UserAdmin(BaseUserAdmin):
"""Admin panel configuration for User model"""
list_display = ["username", "first_name", "last_name", "is_staff"]
fieldsets = (
(None, {"fields": ("username", "password")}),
(_("Personal info"), {"fields": ("first_name", "last_name", "email")}),
(
_("Permissions"),
{"fields": ("is_staff", "is_superuser", "is_active")},
),
(_("Important dates"), {"fields": ("last_login",)}),
)
class PostAdmin(admin.ModelAdmin):
"""Admin panel for Post model"""
list_display = ['title', 'author', 'created_at']
list_filter = ['author', 'created_at']
class CommentAdmin(admin.ModelAdmin):
"""Admin panel for Comment model"""
list_display = ['post', 'author', 'created_at']
list_filter = ['post', 'author']
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(models.Post, PostAdmin)
admin.site.register(models.Comment, CommentAdmin)
| 28.880952 | 79 | 0.659522 | 826 | 0.680956 | 0 | 0 | 0 | 0 | 0 | 0 | 417 | 0.343776 |
bd41d6fca25f541134f0afce1961c06f85b0df9b
| 1,806 |
py
|
Python
|
tests/fixtures.py
|
DNXLabs/ssm-loader
|
eae0257794126247584150eeb1b74ae05f4fcaf5
|
[
"Apache-2.0"
] | null | null | null |
tests/fixtures.py
|
DNXLabs/ssm-loader
|
eae0257794126247584150eeb1b74ae05f4fcaf5
|
[
"Apache-2.0"
] | 2 |
2020-07-31T05:32:10.000Z
|
2020-09-07T10:38:24.000Z
|
tests/fixtures.py
|
DNXLabs/ssm-loader
|
eae0257794126247584150eeb1b74ae05f4fcaf5
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import os
import json
import boto3
from click.testing import CliRunner
from moto import mock_ssm
@pytest.fixture
def runner():
return CliRunner()
@pytest.fixture(scope='function')
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ['AWS_ACCESS_KEY_ID'] = 'test'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'test'
os.environ['AWS_SECURITY_TOKEN'] = 'test'
os.environ['AWS_SESSION_TOKEN'] = 'test'
@pytest.fixture(scope='function')
def ssm(aws_credentials):
with mock_ssm():
yield boto3.client('ssm', region_name='us-east-1')
@pytest.fixture
def ssm_put_parameter(ssm):
ssm.put_parameter(
Name='/app/env/ssm_string',
Description='description',
Value='PLACEHOLDER',
Type='String'
)
ssm.put_parameter(
Name='/app/env/ssm_secure_string',
Description='description secure string',
Value='PLACEHOLDER',
Type='SecureString'
)
@pytest.fixture
def ssm_empty_parameters():
result = {
"parameters": []
}
return json.dumps(result, indent=4, sort_keys=True, default=str) + '\n'
@pytest.fixture
def load_command_parameters_output():
return '/app/env/ssm_string OK\n/app/env/ssm_secure_string OK\n'
@pytest.fixture
def ssm_parameters():
result = {
"parameters": [
{
"Name": "/app/env/ssm_string",
"Type": "String",
"Value": "PLACEHOLDER",
"Version": 1
},
{
"Name": "/app/env/ssm_secure_string",
"Type": "SecureString",
"Value": "PLACEHOLDER",
"Version": 1
}
]
}
return json.dumps(result, indent=4, sort_keys=True, default=str) + '\n'
| 22.860759 | 75 | 0.593577 | 0 | 0 | 105 | 0.05814 | 1,673 | 0.926357 | 0 | 0 | 558 | 0.30897 |
bd42f92ac6de47d16f3dec018fcdc491713b5ba6
| 5,656 |
py
|
Python
|
scripts/plotting/create_num_demos_plots.py
|
Learning-and-Intelligent-Systems/predicators
|
0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e
|
[
"MIT"
] | 24 |
2021-11-20T16:35:41.000Z
|
2022-03-30T03:49:52.000Z
|
scripts/plotting/create_num_demos_plots.py
|
Learning-and-Intelligent-Systems/predicators
|
0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e
|
[
"MIT"
] | 214 |
2021-10-12T01:17:50.000Z
|
2022-03-31T20:18:36.000Z
|
scripts/plotting/create_num_demos_plots.py
|
Learning-and-Intelligent-Systems/predicators
|
0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e
|
[
"MIT"
] | 1 |
2022-02-15T20:24:17.000Z
|
2022-02-15T20:24:17.000Z
|
"""Create plots for learning from varying numbers of demonstrations."""
import os
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from predicators.scripts.analyze_results_directory import create_dataframes, \
get_df_for_entry
pd.options.mode.chained_assignment = None # default='warn'
# plt.rcParams["font.family"] = "CMU Serif"
############################ Change below here ################################
# Details about the plt figure.
DPI = 500
FONT_SIZE = 18
# Groups over which to take mean/std.
GROUPS = [
"ENV", "APPROACH", "EXCLUDED_PREDICATES", "EXPERIMENT_ID",
"NUM_TRAIN_TASKS", "CYCLE"
]
# All column names and keys to load into the pandas tables before plotting.
COLUMN_NAMES_AND_KEYS = [
("ENV", "env"),
("APPROACH", "approach"),
("EXCLUDED_PREDICATES", "excluded_predicates"),
("EXPERIMENT_ID", "experiment_id"),
("SEED", "seed"),
("NUM_TRAIN_TASKS", "num_train_tasks"),
("CYCLE", "cycle"),
("NUM_SOLVED", "num_solved"),
("AVG_NUM_PREDS", "avg_num_preds"),
("AVG_TEST_TIME", "avg_suc_time"),
("AVG_NODES_CREATED", "avg_num_nodes_created"),
("LEARNING_TIME", "learning_time"),
("PERC_SOLVED", "perc_solved"),
]
DERIVED_KEYS = [("perc_solved",
lambda r: 100 * r["num_solved"] / r["num_test_tasks"])]
# The first element is the name of the metric that will be plotted on the
# x axis. See COLUMN_NAMES_AND_KEYS for all available metrics. The second
# element is used to label the x axis.
X_KEY_AND_LABEL = [
("NUM_TRAIN_TASKS", "Number of Training Tasks"),
# ("LEARNING_TIME", "Learning time in seconds"),
]
# Same as above, but for the y axis.
Y_KEY_AND_LABEL = [
("PERC_SOLVED", "% Evaluation Tasks Solved"),
# ("AVG_NODES_CREATED", "Averaged nodes created"),
]
# PLOT_GROUPS is a nested dict where each outer dict corresponds to one plot,
# and each inner entry corresponds to one line on the plot.
# The keys of the outer dict are plot titles.
# The keys of the inner dict are (legend label, marker, df selector).
PLOT_GROUPS = {
"Learning from Few Demonstrations": [
("PickPlace1D", "o",
lambda df: df["EXPERIMENT_ID"].apply(lambda v: "cover_main_" in v)),
("Blocks", ".",
lambda df: df["EXPERIMENT_ID"].apply(lambda v: "blocks_main_" in v)),
("Painting", "*",
lambda df: df["EXPERIMENT_ID"].apply(lambda v: "painting_main_" in v)
),
("Tools", "s",
lambda df: df["EXPERIMENT_ID"].apply(lambda v: "tools_main_" in v)),
],
"GNN Shooting LfD": [
("PickPlace1D", "o", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "cover_gnn_shooting_" in v)),
("Blocks", ".", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "blocks_gnn_shooting_" in v)),
("Painting", "*", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "painting_gnn_shooting_" in v)),
("Tools", "s", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "tools_gnn_shooting_" in v)),
],
"GNN Model-Free LfD": [
("PickPlace1D", "o", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "cover_gnn_modelfree_" in v)),
("Blocks", ".", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "blocks_gnn_modelfree_" in v)),
("Painting", "*", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "painting_gnn_modelfree_" in v)),
("Tools", "s", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "tools_gnn_modelfree_" in v)),
],
}
# If True, add (0, 0) to every plot
ADD_ZERO_POINT = True
Y_LIM = (-5, 110)
#################### Should not need to change below here #####################
def _main() -> None:
outdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"results")
os.makedirs(outdir, exist_ok=True)
matplotlib.rcParams.update({'font.size': FONT_SIZE})
grouped_means, grouped_stds, _ = create_dataframes(COLUMN_NAMES_AND_KEYS,
GROUPS, DERIVED_KEYS)
means = grouped_means.reset_index()
stds = grouped_stds.reset_index()
for x_key, x_label in X_KEY_AND_LABEL:
for y_key, y_label in Y_KEY_AND_LABEL:
for plot_title, d in PLOT_GROUPS.items():
_, ax = plt.subplots()
for label, marker, selector in d:
exp_means = get_df_for_entry(x_key, means, selector)
exp_stds = get_df_for_entry(x_key, stds, selector)
xs = exp_means[x_key].tolist()
ys = exp_means[y_key].tolist()
y_stds = exp_stds[y_key].tolist()
if ADD_ZERO_POINT:
xs = [0] + xs
ys = [0] + ys
y_stds = [0] + y_stds
ax.errorbar(xs,
ys,
yerr=y_stds,
label=label,
marker=marker)
ax.set_xticks(xs)
ax.set_title(plot_title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_ylim(Y_LIM)
plt.legend()
plt.tight_layout()
filename = f"{plot_title}_{x_key}_{y_key}.png"
filename = filename.replace(" ", "_").lower()
outfile = os.path.join(outdir, filename)
plt.savefig(outfile, dpi=DPI)
print(f"Wrote out to {outfile}")
if __name__ == "__main__":
_main()
| 37.456954 | 79 | 0.572313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,310 | 0.408416 |
bd43a1e72c9d194feac6f21f795a8c2f2065d1a1
| 85,638 |
py
|
Python
|
pyaedt/modeler/stackup_3d.py
|
pyansys/pyaedt
|
c7b045fede6bc707fb20a8db7d5680c66d8263f6
|
[
"MIT"
] | 38 |
2021-10-01T23:15:26.000Z
|
2022-03-30T18:14:41.000Z
|
pyaedt/modeler/stackup_3d.py
|
pyansys/pyaedt
|
c7b045fede6bc707fb20a8db7d5680c66d8263f6
|
[
"MIT"
] | 362 |
2021-09-30T17:11:55.000Z
|
2022-03-31T13:36:20.000Z
|
pyaedt/modeler/stackup_3d.py
|
pyansys/pyaedt
|
c7b045fede6bc707fb20a8db7d5680c66d8263f6
|
[
"MIT"
] | 15 |
2021-09-30T20:21:02.000Z
|
2022-02-21T20:22:03.000Z
|
import os
from collections import OrderedDict
try:
import joblib
except ImportError:
pass
try:
import numpy as np
except ImportError:
pass
from pyaedt import constants
from pyaedt.generic.general_methods import generate_unique_name
from pyaedt.generic.general_methods import pyaedt_function_handler
from pyaedt.modules.MaterialLib import Material
from pyaedt.generic.general_methods import is_ironpython
LAYERS = {"s": "signal", "g": "ground", "d": "dielectric"}
def _replace_by_underscore(character, string):
if not isinstance(character, str):
raise TypeError("character must be str")
if not isinstance(character, str):
raise TypeError("string must be str")
reformat_name = list(string)
while character in reformat_name:
index = reformat_name.index(character)
reformat_name[index] = "_"
return "".join(reformat_name)
class NamedVariable(object):
"""Cast PyAEDT variable object to simplify getters and setters in Stackup3D.
Parameters
----------
application : :class:`pyaedt.hfss.Hfss
HFSS design or project where the variable is to be created.
name : str
The name of the variable. If the the name begins with an '$', the variable will be a project variable.
Otherwise, it will be a design variable.
expression : str
Expression of the value.
Examples
--------
>>> from pyaedt import Hfss
>>> from pyaedt.modeler.stackup_3d import Stackup3D
>>> hfss = Hfss()
>>> my_frequency = NamedVariable(hfss, "my_frequency", "900000Hz")
>>> wave_length_formula = "c0/" + my_frequency.name
>>> my_wave_length = NamedVariable(hfss, "my_wave_length", wave_length_formula)
>>> my_permittivity = NamedVariable(hfss, "my_permittivity", "2.2")
>>> my_wave_length.expression = my_wave_length.expression + "/" + my_permittivity.name
"""
def __init__(self, application, name, expression):
self._application = application
self._name = name
self._expression = expression
application[name] = expression
@property
def _variable(self):
return self._application.variable_manager.variables[self._name]
@property
def name(self):
"""Name of the variable as a string."""
return self._name
@property
def expression(self):
"""Expression of the variable as a string."""
return self._expression
@expression.setter
def expression(self, expression):
"""Set the expression of the variable.
Parameters
----------
expression: str
Value expression of the variable."""
if isinstance(expression, str):
self._expression = expression
self._application[self.name] = expression
else:
self._application.logger.error("Expression must be a string")
@property
def unit_system(self):
"""Unit system of the expression as a string."""
return self._variable.unit_system
@property
def units(self):
"""Units."""
return self._variable.units
@property
def value(self):
"""Value."""
return self._variable.value
@property
def numeric_value(self):
"""Numeric part of the expression as a float value."""
return self._variable.numeric_value
@property
def evaluated_value(self):
"""String that combines the numeric value and the units."""
return self._variable.evaluated_value
@pyaedt_function_handler()
def hide_variable(self, value=True):
"""Set the variable to a hidden variable.
Parameters
----------
value : bool, optional
Whether the variable is a hidden variable. The default is ``True``.
Returns
bool
"""
self._application.variable_manager[self._name].hidden = value
return True
@pyaedt_function_handler()
def read_only_variable(self, value=True):
"""Set the variable to a read-only variable.
Parameters
----------
value : bool, optional
Whether the variable is a read-only variable. The default is ``True``.
Returns
-------
bool
"""
self._application.variable_manager[self._name].read_only = value
return True
class Layer3D(object):
"""Provides a class for a management of a parametric layer in 3D Modeler."""
def __init__(
self,
stackup,
app,
name,
layer_type="S",
material="copper",
thickness=0.035,
fill_material="FR4_epoxy",
index=1,
):
self._stackup = stackup
self._index = index
self._app = app
self._name = name
layer_position = "layer_" + name + "_position"
self._position = NamedVariable(app, layer_position, "0mm")
self._thickness = None
self._layer_type = LAYERS.get(layer_type.lower())
self._obj_3d = []
obj_3d = None
self._material = self.duplicate_parametrize_material(material)
self._material_name = self._material.name
if self._layer_type != "dielectric":
self._fill_material = self.duplicate_parametrize_material(fill_material)
self._fill_material_name = self._fill_material.name
self._thickness_variable = self._name + "_thickness"
if thickness:
self._thickness = NamedVariable(self._app, self._thickness_variable, str(thickness) + "mm")
if self._layer_type == "dielectric":
obj_3d = self._app.modeler.create_box(
["dielectric_x_position", "dielectric_y_position", layer_position],
["dielectric_length", "dielectric_width", self._thickness_variable],
name=self._name,
matname=self._material_name,
)
elif self._layer_type == "ground":
if thickness:
obj_3d = self._app.modeler.create_box(
["dielectric_x_position", "dielectric_y_position", layer_position],
["dielectric_length", "dielectric_width", self._thickness_variable],
name=self._name,
matname=self._material_name,
)
else:
obj_3d = self._app.modeler.create_rectangle(
"Z",
["dielectric_x_position", "dielectric_y_position", layer_position],
["dielectric_length", "dielectric_width"],
name=self._name,
matname=self._material_name,
)
elif self._layer_type == "signal":
if thickness:
obj_3d = self._app.modeler.create_box(
["dielectric_x_position", "dielectric_y_position", layer_position],
["dielectric_length", "dielectric_width", self._thickness_variable],
name=self._name,
matname=self._fill_material,
)
else:
obj_3d = self._app.modeler.create_rectangle(
"Z",
["dielectric_x_position", "dielectric_y_position", layer_position],
["dielectric_length", "dielectric_width"],
name=self._name,
matname=self._fill_material,
)
obj_3d.group_name = "Layer_{}".format(self._name)
if obj_3d:
self._obj_3d.append(obj_3d)
else:
self._app.logger.error("Generation of the ground layer does not work.")
@property
def name(self):
"""Layer name.
Returns
-------
str
"""
return self._name
@property
def number(self):
"""Layer ID.
Returns
-------
int
"""
return self._index
@property
def material_name(self):
"""Material name.
Returns
-------
str
"""
return self._material_name
@property
def material(self):
"""Material.
Returns
-------
:class:`pyaedt.modules.Material.Material`
Material.
"""
return self._material
@property
def filling_material(self):
"""Fill material.
Returns
-------
:class:`pyaedt.modules.Material.Material`
Material.
"""
return self._fill_material
@property
def filling_material_name(self):
"""Fill material name.
Returns
-------
str
"""
return self._fill_material_name
@property
def thickness(self):
"""Thickness variable.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._thickness
@property
def thickness_value(self):
"""Thickness value.
Returns
-------
float, str
"""
return self._thickness.value
@thickness.setter
def thickness(self, value):
self._thickness.expression = value
@property
def elevation(self):
"""Layer elevation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable object.
"""
return self._position
@property
def elevation_value(self):
"""Layer elevation value.
Returns
-------
str, float
"""
return self._app.variable_manager[self._position.name].value
@pyaedt_function_handler()
def duplicate_parametrize_material(self, material_name, cloned_material_name=None, list_of_properties=None):
"""Duplicate a material and parametrize all properties.
Parameters
----------
material_name : str
Name of origin material
cloned_material_name : str, optional
Name of destination material. The default is ``None``.
list_of_properties : list, optional
Properties to parametrize. The default is ``None``.
Returns
-------
:class:`pyaedt.modules.Material.Material`
Material object.
"""
application = self._app
if isinstance(material_name, Material):
return material_name
if isinstance(cloned_material_name, Material):
return cloned_material_name
if self._app.materials.checkifmaterialexists(material_name):
if not cloned_material_name:
cloned_material_name = "cloned_" + material_name
if not self._app.materials.checkifmaterialexists(cloned_material_name):
if not list_of_properties:
cloned_material = application.materials.duplicate_material(material_name, cloned_material_name)
permittivity = cloned_material.permittivity.value
permeability = cloned_material.permeability.value
conductivity = cloned_material.conductivity.value
dielectric_loss_tan = cloned_material.dielectric_loss_tangent.value
magnetic_loss_tan = cloned_material.magnetic_loss_tangent.value
reformat_name = _replace_by_underscore(" ", cloned_material_name)
reformat_name = _replace_by_underscore("(", reformat_name)
reformat_name = _replace_by_underscore(")", reformat_name)
reformat_name = _replace_by_underscore("/", reformat_name)
reformat_name = _replace_by_underscore("-", reformat_name)
reformat_name = _replace_by_underscore(".", reformat_name)
reformat_name = _replace_by_underscore(",", reformat_name)
permittivity_variable = "$" + reformat_name + "_permittivity"
permeability_variable = "$" + reformat_name + "_permeability"
conductivity_variable = "$" + reformat_name + "_conductivity"
dielectric_loss_variable = "$" + reformat_name + "_dielectric_loss"
magnetic_loss_variable = "$" + reformat_name + "_magnetic_loss"
application[permittivity_variable] = str(permittivity)
application[permeability_variable] = str(permeability)
application[conductivity_variable] = str(conductivity)
application[dielectric_loss_variable] = str(dielectric_loss_tan)
application[magnetic_loss_variable] = str(magnetic_loss_tan)
cloned_material.permittivity = permittivity_variable
cloned_material.permeability = permeability_variable
cloned_material.conductivity = conductivity_variable
cloned_material.dielectric_loss_tangent = dielectric_loss_variable
cloned_material.magnetic_loss_tangent = magnetic_loss_variable
return cloned_material
else:
return application.materials[cloned_material_name]
else:
application.logger.error("The material name %s doesn't exist" % material_name)
return None
@pyaedt_function_handler()
def add_patch(
self,
frequency,
patch_width,
patch_length=None,
patch_position_x=0,
patch_position_y=0,
patch_name=None,
axis="X",
):
"""Create a parametric patch.
Parameters
----------
frequency : float
Frequency value for the patch calculation in Hz.
patch_width : float
Patch width.
patch_length : float, optional
Patch length. The default is ``None``.
patch_position_x : float, optional
Patch start x position.
patch_position_y : float, optional
Patch start y position. The default is ``0.``
patch_name : str, optional
Patch name. The default is ``None``.
axis : str, optional
Line orientation axis. The default is ``"X"``.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.Patch`
"""
if not patch_name:
patch_name = generate_unique_name("{}_patch".format(self._name), n=3)
lst = self._stackup._layer_name
for i in range(len(lst)):
if lst[i] == self._name:
if self._stackup.stackup_layers[lst[i - 1]]._layer_type == "dielectric":
below_layer = self._stackup.stackup_layers[lst[i - 1]]
break
else:
self._app.logger.error("The layer below the selected one must be of dielectric type")
return False
created_patch = Patch(
self._app,
frequency,
patch_width,
signal_layer=self,
dielectric_layer=below_layer,
patch_length=patch_length,
patch_position_x=patch_position_x,
patch_position_y=patch_position_y,
patch_name=patch_name,
axis=axis,
)
self._obj_3d.append(created_patch.aedt_object)
self._stackup._object_list.append(created_patch)
created_patch.aedt_object.group_name = "Layer_{}".format(self._name)
return created_patch
@pyaedt_function_handler()
def ml_patch(
self,
frequency,
patch_width,
patch_position_x=0,
patch_position_y=0,
patch_name=None,
axis="X",
):
"""Create a new parametric patch using machine learning algorithm rather than analytic formulas.
Parameters
----------
frequency : float
Frequency value for patch calculation in Hz.
patch_width : float
Patch width.
patch_length : float
Patch Length.
patch_position_x : float, optional
Patch start x position.
patch_position_y : float, optional
Patch start y position.
patch_name : str, optional
Patch name.
axis : str, optional
Line orientation axis.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.Patch`
"""
if not patch_name:
patch_name = generate_unique_name("{}_patch".format(self._name), n=3)
lst = self._stackup._layer_name
for i in range(len(lst)):
if lst[i] == self._name:
if self._stackup.stackup_layers[lst[i - 1]]._layer_type == "dielectric":
below_layer = self._stackup.stackup_layers[lst[i - 1]]
break
else:
self._app.logger.error("The layer below the selected one must be of dielectric type")
return False
created_patch = MachineLearningPatch(
self._app,
frequency,
patch_width,
signal_layer=self,
dielectric_layer=below_layer,
patch_position_x=patch_position_x,
patch_position_y=patch_position_y,
patch_name=patch_name,
axis=axis,
)
self._obj_3d.append(created_patch.aedt_object)
self._stackup._object_list.append(created_patch)
created_patch.aedt_object.group_name = "Layer_{}".format(self._name)
return created_patch
@pyaedt_function_handler()
def add_trace(
self,
line_width,
line_length,
is_electrical_length=False,
is_impedance=False,
line_position_x=0,
line_position_y=0,
line_name=None,
axis="X",
reference_system=None,
frequency=1e9,
):
"""Create a trace.
Parameters
----------
line_width : float
Line width. It can be the physical width or the line impedance.
line_length : float
Line length. It can be the physical length or the electrical length.
is_electrical_length : bool, optional
Whether the line length is an electrical length or a physical length. The default
is ``False``, which means it is a physical length.
is_impedance : bool, optional
Whether the line width is an impedance. The default is ``False``, in which case
the line width is a geometrical value.
line_position_x : float, optional
Line center start x position. The default is ``0``.
line_position_y : float, optional
Line center start y position. The default is ``0``.
line_name : str, optional
Line name. The default is ``None``.
axis : str, optional
Line orientation axis. The default is ``"X"``.
reference_system : str, optional
Line reference system. The default is ``None``, in which case a new coordinate
system is created.
frequency : float, optional
Frequency value for the line calculation in Hz. The default is ``1e9``.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.Line`
"""
if not line_name:
line_name = generate_unique_name("{0}_line".format(self._name), n=3)
dielectric_layer = None
for v in list(self._stackup._stackup.values()):
if v._index == self._index - 1:
dielectric_layer = v
break
if dielectric_layer is None:
self._app.logger.error("There is no layer under this layer.")
created_line = Trace(
self._app,
frequency,
line_width if is_impedance else None,
line_width if not is_impedance else None,
self,
dielectric_layer,
line_length=line_length if not is_electrical_length else None,
line_electrical_length=line_length if is_electrical_length else None,
line_position_x=line_position_x,
line_position_y=line_position_y,
line_name=line_name,
reference_system=reference_system,
axis=axis,
)
created_line.aedt_object.group_name = "Layer_{}".format(self._name)
self._obj_3d.append(created_line.aedt_object)
self._stackup._object_list.append(created_line)
return created_line
@pyaedt_function_handler()
def add_polygon(self, points, material="copper", is_void=False, poly_name=None):
"""Create a polygon.
Parameters
----------
points : list
Points list of [x,y] coordinates.
material : str, optional
Material name. The default is ``"copper"``.
is_void : bool, optional
Whether the polygon is a void. The default is ``False``.
On ground layers, it will act opposite of the Boolean value because the ground
is negative.
poly_name : str, optional
Polygon name. The default is ``None``.
Returns
-------
"""
if not poly_name:
poly_name = generate_unique_name("{0}_poly".format(self._name), n=3)
polygon = Polygon(
self._app,
points,
thickness=self._thickness,
signal_layer_name=self._name,
mat_name=material,
is_void=is_void,
poly_name=poly_name,
)
polygon.aedt_object.group_name = "Layer_{}".format(self._name)
if self._layer_type == "ground":
if not is_void:
if polygon.aedt_object.is3d:
self._app.modeler[self._name].subtract(polygon.aedt_object, True)
polygon.aedt_object.material_name = self.filling_material_name
else:
self._app.modeler[self._name].subtract(polygon.aedt_object, False)
return True
elif is_void:
if polygon.aedt_object.is3d:
self._app.modeler.subtract(self._obj_3d, polygon.aedt_object, True)
polygon.aedt_object.material_name = self.filling_material_name
else:
self._app.modeler[self._name].subtract(polygon.aedt_object, False)
return True
else:
self._app.modeler.subtract(self._obj_3d[0], polygon.aedt_object, True)
self._obj_3d.append(polygon.aedt_object)
self._stackup._object_list.append(polygon)
return polygon
class PadstackLayer(object):
"""Provides a data class for the definition of a padstack layer and relative pad and antipad values."""
def __init__(self, padstack, layer_name, elevation, thickness):
self._padstack = padstack
self._layer_name = layer_name
self._layer_elevation = elevation
self._layer_thickness = thickness
self._pad_radius = 1
self._antipad_radius = 2
self._units = "mm"
class Padstack(object):
"""Padstack Class member of Stackup3D."""
def __init__(self, app, stackup, name, material="copper"):
self._app = app
self._stackup = stackup
self.name = name
self._padstacks_by_layer = OrderedDict({})
self._vias_objects = []
self._num_sides = 16
self._plating_ratio = 1
v = None
k = None
for k, v in self._stackup.stackup_layers.items():
if not self._padstacks_by_layer and v._layer_type == "dielectric":
continue
self._padstacks_by_layer[k] = PadstackLayer(self, k, v.elevation, v.thickness)
if v and v._layer_type == "dielectric":
del self._padstacks_by_layer[k]
self._padstacks_material = material
@property
def plating_ratio(self):
"""Plating ratio between 0 and 1.
Returns
-------
float
"""
return self._plating_ratio
@plating_ratio.setter
def plating_ratio(self, val):
if isinstance(val, (float, int)) and val > 0 and val <= 1:
self._plating_ratio = val
elif isinstance(val, str):
self._plating_ratio = val
else:
self._app.logger.error("Plating has to be between 0 and 1")
@property
def num_sides(self):
"""Number of sides on the circle, which is 0 for a true circle.
Returns
-------
int
"""
return self._num_sides
@num_sides.setter
def num_sides(self, val):
self._num_sides = val
@pyaedt_function_handler()
def set_all_pad_value(self, value):
"""Set all pads in all layers to a specified value.
Parameters
----------
value : float
Pad radius.
Returns
-------
bool
"True`` when successful, ``False`` when failed.
"""
for v in list(self._padstacks_by_layer.values()):
v._pad_radius = value
return True
@pyaedt_function_handler()
def set_all_antipad_value(self, value):
"""Set all antipads in all layers to a specified value.
Parameters
----------
value : float
Pad radius.
Returns
-------
bool
"True`` when successful, ``False`` when failed.
"""
for v in list(self._padstacks_by_layer.values()):
v._antipad_radius = value
return True
@pyaedt_function_handler()
def set_start_layer(self, layer):
"""Set the start layer to a specified value.
Parameters
----------
layer : str
Layer name.
Returns
-------
bool
"True`` when successful, ``False`` when failed.
"""
found = False
new_stackup = OrderedDict({})
for k, v in self._stackup.stackup_layers.items():
if k == layer:
found = True
if found and layer not in self._padstacks_by_layer:
new_stackup[k] = PadstackLayer(self, k, v.elevation)
elif found:
new_stackup[k] = self._padstacks_by_layer[k]
self._padstacks_by_layer = new_stackup
return True
@pyaedt_function_handler()
def set_stop_layer(self, layer):
"""Set the stop layer to a specified value.
Parameters
----------
layer : str
Layer name.
Returns
-------
bool
"True`` when successful, ``False`` when failed.
"""
found = False
new_stackup = OrderedDict({})
for k in list(self._stackup.stackup_layers.keys()):
if k == layer:
found = True
if not found and k in list(self._padstacks_by_layer.keys()):
new_stackup[k] = self._padstacks_by_layer[k]
self._padstacks_by_layer = new_stackup
@pyaedt_function_handler()
def add_via(self, position_x=0, position_y=0, instance_name=None, reference_system=None):
"""Insert a new via on this padstack.
Parameters
----------
position_x : float, optional
Center x position. The default is ``0``.
position_y : float, optional
Center y position. The default is ``0``.
instance_name : str, optional
Via name. The default is ``None``.
reference_system : str, optional
Whether to use an existing reference system or create a new one. The default
is ``None``, in which case a new reference system is created.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
Object created.
"""
if not instance_name:
instance_name = generate_unique_name("{}_".format(self.name), n=3)
if reference_system:
self._app.modeler.set_working_coordinate_system(reference_system)
self._reference_system = reference_system
else:
self._app.modeler.create_coordinate_system(
origin=[0, 0, 0], reference_cs="Global", name=instance_name + "_CS"
)
self._app.modeler.set_working_coordinate_system(instance_name + "_CS")
self._reference_system = instance_name + "_CS"
first_el = None
cyls = []
for v in list(self._padstacks_by_layer.values()):
if not first_el:
first_el = v._layer_elevation
else:
position_x = self._app.modeler._arg_with_dim(position_x)
position_y = self._app.modeler._arg_with_dim(position_y)
cyls.append(
self._app.modeler.create_cylinder(
"Z",
[position_x, position_y, v._layer_elevation.name],
v._pad_radius,
v._layer_thickness.name,
matname=self._padstacks_material,
name=instance_name,
numSides=self._num_sides,
)
)
if self.plating_ratio < 1:
hole = self._app.modeler.create_cylinder(
"Z",
[position_x, position_y, v._layer_elevation.name],
"{}*{}".format(self._app.modeler._arg_with_dim(v._pad_radius), 1 - self.plating_ratio),
v._layer_thickness.name,
matname=self._padstacks_material,
name=instance_name,
numSides=self._num_sides,
)
cyls[-1].subtract(hole, False)
anti = self._app.modeler.create_cylinder(
"Z",
[position_x, position_y, v._layer_elevation.name],
v._antipad_radius,
v._layer_thickness.name,
matname="air",
name=instance_name + "_antipad",
)
self._app.modeler.subtract(
self._stackup._signal_list + self._stackup._ground_list + self._stackup._dielectric_list,
anti,
False,
)
first_el = v._layer_elevation
if len(cyls) > 1:
self._app.modeler.unite(cyls)
self._vias_objects.append(cyls[0])
cyls[0].group_name = "Vias"
self._stackup._vias.append(self)
return cyls[0]
class Stackup3D(object):
"""Main Stackup3D Class."""
def __init__(self, application):
self._app = application
self._layer_name = []
self._layer_position = []
self._dielectric_list = []
self._dielectric_name_list = []
self._ground_list = []
self._ground_name_list = []
self._ground_fill_material = []
self._signal_list = []
self._signal_name_list = []
self._signal_material = []
self._object_list = []
self._vias = []
self._end_of_stackup3D = NamedVariable(self._app, "StackUp_End", "0mm")
self._z_position_offset = 0
self._first_layer_position = "layer_1_position"
self._shifted_index = 0
self._stackup = OrderedDict({})
self._start_position = NamedVariable(self._app, self._first_layer_position, "0mm")
self._dielectric_x_position = NamedVariable(self._app, "dielectric_x_position", "0mm")
self._dielectric_y_position = NamedVariable(self._app, "dielectric_y_position", "0mm")
self._dielectric_width = NamedVariable(self._app, "dielectric_width", "1000mm")
self._dielectric_length = NamedVariable(self._app, "dielectric_length", "1000mm")
self._padstacks = []
@property
def padstacks(self):
"""List of padstacks created.
Returns
-------
List
"""
return self._padstacks
@property
def dielectrics(self):
"""List of dielectrics created.
Returns
-------
List
"""
return self._dielectric_list
@property
def grounds(self):
"""List of grounds created.
Returns
-------
List
"""
return self._ground_list
@property
def signals(self):
"""List of signals created.
Returns
-------
List
"""
return self._signal_list
@property
def objects(self):
"""List of obects created.
Returns
-------
List
"""
return self._object_list
@property
def objects_by_layer(self):
"""List of padstacks created.
Returns
-------
List
"""
objs = {}
for obj in self.objects:
if objs.get(obj.layer_name, None):
objs[obj.layer_name].append(obj)
else:
objs[obj.layer_name] = [obj]
return objs
@property
def start_position(self):
"""Variable containing the start position.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
"""
return self._start_position
@start_position.setter
def start_position(self, expression):
self._start_position.expression = expression
@property
def dielectric_x_position(self):
"""Stackup x origin.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable object.
"""
return self._dielectric_x_position
@dielectric_x_position.setter
def dielectric_x_position(self, expression):
self._dielectric_x_position.expression = expression
@property
def dielectric_y_position(self):
"""Stackup y origin.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable object.
"""
return self._dielectric_x_position
@dielectric_y_position.setter
def dielectric_y_position(self, expression):
self._dielectric_y_position.expression = expression
@property
def dielectric_width(self):
"""Stackup width.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable object.
"""
return self._dielectric_width
@dielectric_width.setter
def dielectric_width(self, expression):
self._dielectric_width.expression = expression
@property
def dielectric_length(self):
"""Stackup length.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable object.
"""
return self._dielectric_length
@dielectric_length.setter
def dielectric_length(self, expression):
self._dielectric_length.expression = expression
@property
def layer_names(self):
"""List of all layer names.
Returns
-------
list
"""
return self._layer_name
@property
def layer_positions(self):
"""List of all layer positions.
Returns
-------
List
"""
return self._layer_position
@property
def stackup_layers(self):
"""Dictionary of all stackup layers.
Returns
-------
dict
"""
return self._stackup
@property
def z_position_offset(self):
"""Elevation.
Returns
-------
"""
return self._z_position_offset
@pyaedt_function_handler()
def add_padstack(self, name, material="copper"):
"""Add a new padstack definition.
Parameters
----------
name : str
padstack name
material : str, optional
Padstack material. The default is ``"copper"``.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.Padstack`
"""
p = Padstack(self._app, self, name, material)
self._padstacks.append(p)
return p
@pyaedt_function_handler()
def add_layer(self, name, layer_type="S", material="copper", thickness=0.035, fill_material="FR4_epoxy"):
"""Add a new layer to the stackup.
The new layer can be a signal (S), ground (G), or dielectric (D).
The layer is entirely filled with the specified fill material. Anything will be drawn
wmaterial.
Parameters
----------
name : str
Layer name.
layer_type : str, optional
Layer type. Options are ``"S"``, ``"D"``, and ``"G"``. The default is ``"S"``.
material : str, optional
Material name. The default is ``"copper"``. The material will be parametrized.
thickness : float, optional
Thickness value. The default is ``0.035``. The thickness will be parametrized.
fill_material : str, optional
Fill material name. The default is ``"FR4_epoxy"``. The fill material will be
parametrized. This parameter is not valid for dielectrics.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.Layer3D`
Layer object.
"""
self._shifted_index += 1
if not layer_type:
raise ValueError("Layer type has to be an S, D, or G string.")
self._layer_name.append(name)
lay = Layer3D(
stackup=self,
app=self._app,
name=name,
layer_type=layer_type,
material=material,
thickness=thickness,
fill_material=fill_material,
index=self._shifted_index,
)
self._layer_position_manager(lay)
if layer_type == "D":
self._dielectric_list.extend(lay._obj_3d)
self._dielectric_name_list.append(lay._name)
lay._obj_3d[-1].transparency = "0.8"
elif layer_type == "G":
self._ground_list.extend(lay._obj_3d)
self._ground_name_list.append(lay._name)
self._ground_fill_material.append(lay._fill_material)
lay._obj_3d[-1].transparency = "0.6"
lay._obj_3d[-1].color = (255, 0, 0)
elif layer_type == "S":
self._signal_list.extend(lay._obj_3d)
self._signal_name_list.append(lay._name)
self._signal_material.append(lay._material_name)
# With the function _layer_position_manager i think this part is not needed anymore or has to be reworked
lay._obj_3d[-1].transparency = "0.8"
self._stackup[lay._name] = lay
return lay
@pyaedt_function_handler()
def add_signal_layer(self, name, material="copper", thickness=0.035, fill_material="FR4_epoxy"):
"""Add a new ground layer to the stackup.
A signal layer is positive. The layer is entirely filled with the fill material.
Anything will be drawn wmaterial.
Parameters
----------
name : str
Layer name.
material : str
Material name. Material will be parametrized.
thickness : float
Thickness value. Thickness will be parametrized.
fill_material : str
Fill Material name. Material will be parametrized.=
material : str, optional
Material name. Material will be parametrized. Default value is `"copper"`.
thickness : float, optional
Thickness value. Thickness will be parametrized. Default value is `0.035`.
fill_material : str, optional
Fill material name. Material will be parametrized. Default value is `"FR4_epoxy"`.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.Layer3D`
Layer object.
"""
return self.add_layer(
name=name, layer_type="S", material=material, thickness=thickness, fill_material=fill_material
)
@pyaedt_function_handler()
def add_dielectric_layer(
self,
name,
material="FR4_epoxy",
thickness=0.035,
):
"""Add a new dielectric layer to the stackup.
Parameters
----------
name : str
Layer name.
material : str
Material name. The default is ``"FR4_epoxy"``. The material will be parametrized.
thickness : float, optional
Thickness value. The default is ``0.035``. The thickness will be parametrized.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.Layer3D`
Layer 0bject.
"""
return self.add_layer(name=name, layer_type="D", material=material, thickness=thickness, fill_material=None)
@pyaedt_function_handler()
def add_ground_layer(self, name, material="copper", thickness=0.035, fill_material="air"):
"""Add a new ground layer to the stackup. A ground layer is negative.
The layer is entirely filled with metal. Any polygon will draw a void in it.
Parameters
----------
name : str
Layer name.
material : str
Material name. Material will be parametrized.
thickness : float
Thickness value. Thickness will be parametrized.
fill_material : str
Fill Material name. Material will be parametrized.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.Layer3D`
Layer Object.
"""
return self.add_layer(
name=name, layer_type="G", material=material, thickness=thickness, fill_material=fill_material
)
@pyaedt_function_handler()
def _layer_position_manager(self, layer):
"""
Parameters
----------
layer
Returns
-------
"""
previous_layer_end = self._end_of_stackup3D.expression
layer.elevation.expression = previous_layer_end
if layer.thickness:
self._end_of_stackup3D.expression = layer.elevation.name + " + " + layer.thickness.name
else:
self._end_of_stackup3D.expression = layer.elevation.name
# if we call this function instantiation of the Layer, the first call, previous_layer_end is "0mm", and
# layer.position.expression is also "0mm" and self._end_of_stackup becomes the first layer.position + thickness
# if it has thickness, and so the second call, previous_layer_end is the previous layer position + thickness
# so the current layer position is the previous_layer_end and the end_of_stackup is the current layer position +
# thickness, and we just need to call this function after the construction of a layer3D.
@pyaedt_function_handler()
def resize(self, percentage_offset):
"""Resize the stackup around objects created by a percentage offset.
Parameters
----------
percentage_offset : float
Offset of resize. The value must be greater than 0.
Returns
-------
bool
"""
list_of_2d_points = []
list_of_x_coordinates = []
list_of_y_coordinates = []
for obj3d in self._object_list:
points_list_by_object = obj3d.points_on_layer
list_of_2d_points = points_list_by_object + list_of_2d_points
for via in self._vias:
for v in via._vias_objects:
list_of_x_coordinates.append(v.bounding_box[0] - v.bounding_dimension[0])
list_of_x_coordinates.append(v.bounding_box[3] - v.bounding_dimension[0])
list_of_y_coordinates.append(v.bounding_box[1] - v.bounding_dimension[1])
list_of_y_coordinates.append(v.bounding_box[4] - v.bounding_dimension[1])
list_of_x_coordinates.append(v.bounding_box[0] + v.bounding_dimension[0])
list_of_x_coordinates.append(v.bounding_box[4] + v.bounding_dimension[0])
list_of_y_coordinates.append(v.bounding_box[4] + v.bounding_dimension[1])
list_of_y_coordinates.append(v.bounding_box[1] + v.bounding_dimension[1])
for point in list_of_2d_points:
list_of_x_coordinates.append(point[0])
list_of_y_coordinates.append(point[1])
maximum_x = max(list_of_x_coordinates)
minimum_x = min(list_of_x_coordinates)
maximum_y = max(list_of_y_coordinates)
minimum_y = min(list_of_y_coordinates)
variation_x = abs(maximum_x - minimum_x)
variation_y = abs(maximum_y - minimum_y)
self._app["dielectric_x_position"] = str(minimum_x - variation_x * percentage_offset / 100) + "mm"
self._app["dielectric_y_position"] = str(minimum_y - variation_y * percentage_offset / 100) + "mm"
self._app["dielectric_length"] = str(maximum_x - minimum_x + 2 * variation_x * percentage_offset / 100) + "mm"
self._app["dielectric_width"] = str(maximum_y - minimum_y + 2 * variation_y * percentage_offset / 100) + "mm"
return True
def resize_around_element(self, element, percentage_offset=0.25):
"""Resize the stackup around objects and make it parametrize.
Parameters
----------
element : :class:`pyaedt.modeler.stackup_3d.Patch
Element around which the resizing is done.
percentage_offset : float, optional
Offset of resize. Value accepted are greater than 0. O.25 by default.
Returns
-------
bool
"""
self._app["dielectric_x_position"] = (
element.position_x.name + " - " + element.length.name + " * " + str(percentage_offset)
)
self._app["dielectric_y_position"] = (
element.position_y.name + " - " + element.width.name + " * (0.5 + " + str(percentage_offset) + ")"
)
self._app["dielectric_length"] = element.length.name + " * (1 + " + str(percentage_offset) + " * 2)"
self._app["dielectric_width"] = element.width.name + " * (1 + " + str(percentage_offset) + " * 2)"
return True
class CommonObject(object):
"""CommonObject Class in Stackup3D."""
def __init__(self, application):
self._application = application
self._name = None
self._dielectric_layer = None
self._signal_layer = None
self._aedt_object = None
self._layer_name = None
self._layer_number = None
self._material_name = None
self._reference_system = None
@property
def reference_system(self):
"""Coordinate system of the object.
Returns
-------
str
"""
return self._reference_system
@property
def dielectric_layer(self):
"""Dielectric layer that the object belongs to.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.Layer3D`
"""
return self._dielectric_layer
@property
def signal_layer(self):
"""Signal layer that the object belongs to.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.Layer3D`
"""
return self._signal_layer
@property
def name(self):
"""Object name.
Returns
-------
str
"""
return self._name
@property
def application(self):
"""App object."""
return self._application
@property
def aedt_object(self):
"""PyAEDT object 3D.
Returns
-------
:class:`pyaedt.modeler.Object3d.Object3d`
"""
return self._aedt_object
@property
def layer_name(self):
"""Layer name.
Returns
-------
str
"""
return self._layer_name
@property
def layer_number(self):
"""Layer ID.
Returns
-------
int
"""
return self._layer_number
@property
def material_name(self):
"""Material name.
Returns
-------
str
"""
return self._material_name
@property
def points_on_layer(self):
"""Object bounding box.
Returns
-------
List
List of [x,y] coordinate of the bounding box.
"""
bb = self._aedt_object.bounding_box
return [[bb[0], bb[1]], [bb[0], bb[4]], [bb[3], bb[4]], [bb[3], bb[1]]]
class Patch(CommonObject, object):
"""Patch Class in Stackup3D."""
def __init__(
self,
application,
frequency,
patch_width,
signal_layer,
dielectric_layer,
patch_length=None,
patch_position_x=0,
patch_position_y=0,
patch_name="patch",
reference_system=None,
axis="X",
):
CommonObject.__init__(self, application)
self._frequency = NamedVariable(application, patch_name + "_frequency", str(frequency) + "Hz")
self._signal_layer = signal_layer
self._dielectric_layer = dielectric_layer
self._substrate_thickness = dielectric_layer.thickness
self._width = NamedVariable(application, patch_name + "_width", application.modeler._arg_with_dim(patch_width))
self._position_x = NamedVariable(
application, patch_name + "_position_x", application.modeler._arg_with_dim(patch_position_x)
)
self._position_y = NamedVariable(
application, patch_name + "_position_y", application.modeler._arg_with_dim(patch_position_y)
)
self._position_z = signal_layer.elevation
self._dielectric_layer = dielectric_layer
self._signal_layer = signal_layer
self._dielectric_material = dielectric_layer.material
self._material_name = signal_layer.material_name
self._layer_name = signal_layer.name
self._layer_number = signal_layer.number
self._name = patch_name
self._patch_thickness = signal_layer.thickness
self._application = application
self._aedt_object = None
try:
self._permittivity = NamedVariable(
application, patch_name + "_permittivity", float(self._dielectric_material.permittivity.value)
)
except ValueError:
self._permittivity = NamedVariable(
application,
patch_name + "_permittivity",
float(application.variable_manager[self._dielectric_material.permittivity.value].value),
)
if isinstance(patch_length, float) or isinstance(patch_length, int):
self._length = NamedVariable(
application, patch_name + "_length", application.modeler._arg_with_dim(patch_length)
)
self._effective_permittivity = self._effective_permittivity_calcul
self._wave_length = self._wave_length_calcul
elif patch_length is None:
self._effective_permittivity = self._effective_permittivity_calcul
self._added_length = self._added_length_calcul
self._wave_length = self._wave_length_calcul
self._length = self._length_calcul
self._impedance_l_w, self._impedance_w_l = self._impedance_calcul
if reference_system:
application.modeler.set_working_coordinate_system(reference_system)
if axis == "X":
start_point = [
"{0}_position_x".format(self._name),
"{0}_position_y-{0}_width/2".format(self._name),
0,
]
else:
start_point = [
"{0}_position_x-{0}_width/2".format(self._name),
"{}_position_y".format(self._name),
0,
]
self._reference_system = reference_system
else:
application.modeler.create_coordinate_system(
origin=[
"{0}_position_x".format(patch_name),
"{}_position_y".format(patch_name),
signal_layer.elevation.name,
],
reference_cs="Global",
name=patch_name + "_CS",
)
if axis == "X":
start_point = [0, "-{}_width/2".format(patch_name), 0]
else:
start_point = ["-{}_width/2".format(patch_name), 0, 0]
application.modeler.set_working_coordinate_system(patch_name + "_CS")
self._reference_system = patch_name + "_CS"
if signal_layer.thickness:
self._aedt_object = application.modeler.create_box(
position=start_point,
dimensions_list=[
"{}_length".format(patch_name),
"{}_width".format(patch_name),
signal_layer.thickness.name,
],
name=patch_name,
matname=signal_layer.material_name,
)
else:
self._aedt_object = application.modeler.create_rectangle(
position=start_point,
dimension_list=[self.length.name, self.width.name],
name=patch_name,
matname=signal_layer.material_name,
)
application.assign_coating(self._aedt_object.name, signal_layer.material)
application.modeler.set_working_coordinate_system("Global")
application.modeler.subtract(blank_list=[signal_layer.name], tool_list=[patch_name], keepOriginals=True)
@property
def frequency(self):
"""Model frequency.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._frequency
@property
def substrate_thickness(self):
"""Substrate thickness.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._substrate_thickness
@property
def width(self):
"""Width.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable object.
"""
return self._width
@property
def position_x(self):
"""Starting position X.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable object.
"""
return self._position_x
@property
def position_y(self):
"""Starting position Y.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable object.
"""
return self._position_y
@property
def permittivity(self):
"""Permittivity.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable object.
"""
return self._permittivity
@property
def _permittivity_calcul(self):
"""Permittivity calculation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable object.
"""
self._permittivity = self.application.materials[self._dielectric_material].permittivity
return self._permittivity
@property
def effective_permittivity(self):
"""Effective permittivity.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable object.
"""
return self._effective_permittivity
@property
def _effective_permittivity_calcul(self):
# "(substrat_permittivity + 1)/2 + (substrat_permittivity -
# 1)/(2 * sqrt(1 + 10 * substrate_thickness/patch_width))"
er = self._permittivity.name
h = self._substrate_thickness.name
w = self._width.name
patch_eff_permittivity_formula = "(" + er + "+ 1)/2 + (" + er + "- 1)/(2 * sqrt(1 + 10 * " + h + "/" + w + "))"
self._effective_permittivity = NamedVariable(
self.application, self._name + "_eff_permittivity", patch_eff_permittivity_formula
)
return self._effective_permittivity
@property
def added_length(self):
"""Added length calculation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable object.
"""
return self._added_length
@property
def _added_length_calcul(self):
"""Added length calculation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable object.
"""
# "0.412 * substrate_thickness * (patch_eff_permittivity + 0.3) * (patch_width/substrate_thickness + 0.264)"
# " / ((patch_eff_permittivity - 0.258) * (patch_width/substrate_thickness + 0.813)) "
er_e = self._effective_permittivity.name
h = self._substrate_thickness.name
w = self._width.name
patch_added_length_formula = (
"0.412 * " + h + " * (" + er_e + " + 0.3) * (" + w + "/" + h + " + 0.264)/"
"((" + er_e + " - 0.258) * (" + w + "/" + h + " + 0.813))"
)
self._added_length = NamedVariable(self.application, self._name + "_added_length", patch_added_length_formula)
return self._added_length
@property
def wave_length(self):
"""Wave length.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._wave_length
@property
def _wave_length_calcul(self):
"""Wave Length Calutation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
# "c0 * 1000/(patch_frequency * sqrt(patch_eff_permittivity))"
f = self._frequency.name
er_e = self._effective_permittivity.name
patch_wave_length_formula = "(c0 * 1000/(" + f + "* sqrt(" + er_e + ")))mm"
self._wave_length = NamedVariable(
self.application,
self._name + "_wave_length",
self.application.modeler._arg_with_dim(patch_wave_length_formula),
)
return self._wave_length
@property
def length(self):
"""Length.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._length
@property
def _length_calcul(self):
"""Length Calutation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
# "patch_wave_length / 2 - 2 * patch_added_length"
d_l = self._added_length.name
lbd = self._wave_length.name
patch_length_formula = lbd + "/2" + " - 2 * " + d_l
self._length = NamedVariable(self.application, self._name + "_length", patch_length_formula)
return self._length
@property
def impedance(self):
"""Impedance.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._impedance_l_w, self._impedance_w_l
@property
def _impedance_calcul(self):
"""Impedance Calculation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
# "45 * (patch_wave_length/patch_width * sqrt(patch_eff_permittivity)) ** 2"
# "60 * patch_wave_length/patch_width * sqrt(patch_eff_permittivity)"
er_e = self._effective_permittivity.name
lbd = self._wave_length.name
w = self._width.name
patch_impedance_formula_l_w = "45 * (" + lbd + "/" + w + "* sqrt(" + er_e + ")) ** 2"
patch_impedance_formula_w_l = "60 * " + lbd + "/" + w + "* sqrt(" + er_e + ")"
self._impedance_l_w = NamedVariable(
self.application, self._name + "_impedance_l_w", patch_impedance_formula_l_w
)
self._impedance_w_l = NamedVariable(
self.application, self._name + "_impedance_w_l", patch_impedance_formula_w_l
)
self.application.logger.warning(
"The closer the ratio between wave length and the width is to 1,"
" the less correct the impedance calculation is"
)
return self._impedance_l_w, self._impedance_w_l
def create_lumped_port(self, reference_layer, opposite_side=False, port_name=None, axisdir=None):
"""Create a parametrized lumped port.
Parameters
----------
reference_layer : class:`pyaedt.modeler.stackup_3d.Layer3D
The reference layer, in most cases the ground layer.
opposite_side : bool, optional
Change the side where the port is created.
port_name : str, optional
Name of the lumped port.
axisdir : int or :class:`pyaedt.application.Analysis.Analysis.AxisDir`, optional
Position of the port. It should be one of the values for ``Application.AxisDir``,
which are: ``XNeg``, ``YNeg``, ``ZNeg``, ``XPos``, ``YPos``, and ``ZPos``.
The default is ``Application.AxisDir.XNeg``.
Returns
-------
bool
"""
string_position_x = self.position_x.name
if opposite_side:
string_position_x = self.position_x.name + " + " + self.length.name
string_position_y = self.position_y.name + " - " + self.width.name + "/2"
string_position_z = reference_layer.elevation.name
string_width = self.width.name
string_length = (
self._signal_layer.elevation.name
+ " + "
+ self._signal_layer.thickness.name
+ " - "
+ reference_layer.elevation.name
)
port = self.application.modeler.create_rectangle(
csPlane=constants.PLANE.YZ,
position=[string_position_x, string_position_y, string_position_z],
dimension_list=[string_width, string_length],
name=self.name + "_port",
matname=None,
)
if self.application.solution_type == "Modal":
if axisdir is None:
axisdir = self.application.AxisDir.ZPos
port = self.application.create_lumped_port_to_sheet(port.name, portname=port_name, axisdir=axisdir)
elif self.application.solution_type == "Terminal":
port = self.application.create_lumped_port_to_sheet(
port.name, portname=port_name, reference_object_list=[reference_layer.name]
)
return port
class Trace(CommonObject, object):
"""Provides a class to create a trace in stackup."""
def __init__(
self,
application,
frequency,
line_impedance,
line_width,
signal_layer,
dielectric_layer,
line_length=None,
line_electrical_length=90,
line_position_x=0,
line_position_y=0,
line_name="line",
reference_system=None,
axis="X",
):
CommonObject.__init__(self, application)
self._frequency = NamedVariable(application, line_name + "_frequency", str(frequency) + "Hz")
self._signal_layer = signal_layer
self._dielectric_layer = dielectric_layer
self._substrate_thickness = dielectric_layer.thickness
self._position_x = NamedVariable(
application, line_name + "_position_x", application.modeler._arg_with_dim(line_position_x)
)
self._position_y = NamedVariable(
application, line_name + "_position_y", application.modeler._arg_with_dim(line_position_y)
)
self._position_z = signal_layer.elevation
self._dielectric_material = dielectric_layer.material
self._material_name = signal_layer.material_name
self._layer_name = signal_layer.name
self._layer_number = signal_layer.number
self._name = line_name
self._line_thickness = signal_layer.thickness
self._width = None
self._width_h_w = None
self._axis = axis
try:
self._permittivity = NamedVariable(
application, line_name + "_permittivity", float(self._dielectric_material.permittivity.value)
)
except ValueError:
self._permittivity = NamedVariable(
application,
line_name + "_permittivity",
float(application.variable_manager[self._dielectric_material.permittivity.value].value),
)
if isinstance(line_width, float) or isinstance(line_width, int):
self._width = NamedVariable(
application, line_name + "_width", application.modeler._arg_with_dim(line_width)
)
self._effective_permittivity = self._effective_permittivity_calcul
self._wave_length = self._wave_length_calcul
self._added_length = self._added_length_calcul
if isinstance(line_electrical_length, float) or isinstance(line_electrical_length, int):
self._electrical_length = NamedVariable(
application, line_name + "_elec_length", str(line_electrical_length)
)
self._length = self._length_calcul
elif isinstance(line_length, float) or isinstance(line_length, int):
self._length = NamedVariable(
application, line_name + "_length", application.modeler._arg_with_dim(line_length)
)
self._electrical_length = self._electrical_length_calcul
else:
application.logger.error("line_length must be a float.")
self._charac_impedance_w_h, self._charac_impedance_h_w = self._charac_impedance_calcul
elif line_width is None:
self._charac_impedance = NamedVariable(
self.application, line_name + "_charac_impedance_h_w", str(line_impedance)
)
self._width, self._width_h_w = self._width_calcul
self._effective_permittivity = self._effective_permittivity_calcul
self._wave_length = self._wave_length_calcul
self._added_length = self._added_length_calcul
if isinstance(line_electrical_length, float) or isinstance(line_electrical_length, int):
self._electrical_length = NamedVariable(
application, line_name + "_elec_length", str(line_electrical_length)
)
self._length = self._length_calcul
elif isinstance(line_length, float) or isinstance(line_length, int):
self._length = NamedVariable(
application, line_name + "_length", application.modeler._arg_with_dim(line_length)
)
self._electrical_length = self._electrical_length_calcul
else:
application.logger.error("line_length must be a float.")
if reference_system:
application.modeler.set_working_coordinate_system(reference_system)
if axis == "X":
start_point = [
"{0}_position_x".format(self._name),
"{0}_position_y-{0}_width/2".format(self._name),
0,
]
else:
start_point = [
"{0}_position_x-{0}_width/2".format(self._name),
"{}_position_y".format(self._name),
0,
]
self._reference_system = reference_system
else:
application.modeler.create_coordinate_system(
origin=[
"{}_position_x".format(self._name),
"{}_position_y".format(self._name),
signal_layer.elevation.name,
],
reference_cs="Global",
name=line_name + "_CS",
)
application.modeler.set_working_coordinate_system(line_name + "_CS")
if axis == "X":
start_point = [0, "-{0}_width/2".format(self._name), 0]
else:
start_point = ["-{0}_width/2".format(self._name), 0, 0]
self._reference_system = line_name + "_CS"
if signal_layer.thickness:
self._aedt_object = application.modeler.create_box(
position=start_point,
dimensions_list=[
"{}_length".format(self._name),
"{}_width".format(self._name),
signal_layer.thickness.name,
],
name=line_name,
matname=signal_layer.material_name,
)
else:
self._aedt_object = application.modeler.create_rectangle(
position=start_point,
dimension_list=["{}_length".format(self._name), "{}_width".format(self._name)],
name=line_name,
matname=signal_layer.material_name,
)
application.modeler.set_working_coordinate_system("Global")
application.modeler.subtract(blank_list=[signal_layer.name], tool_list=[line_name], keepOriginals=True)
@property
def frequency(self):
"""Frequency.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._frequency
@property
def substrate_thickness(self):
"""Substrate Thickness.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._substrate_thickness
@property
def width(self):
"""Width.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._width
@property
def width_h_w(self):
"""Width H W.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
if self._width_h_w is not None:
return self._width_h_w
@property
def _width_calcul(self):
"""Width calculation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
# if w/h < 2 :
# a = z * sqrt((er + 1) / 2) / 60 + (0.23 + 0.11 / er) * (er - 1) / (er + 1)
# w/h = 8 * exp(a) / (exp(2 * a) - 2)
# else w/h > 2 :
# b = 377 * pi / (2 * z * sqrt(er))
# w/h = 2 * (b - 1 - log(2 * b - 1) * (er - 1) * (log(b - 1) + 0.39 - 0.61 / er) / (2 * er)) / pi
h = self._substrate_thickness.name
z = self._charac_impedance.name
er = self._permittivity.name
a_formula = (
"("
+ z
+ " * sqrt(("
+ er
+ " + 1)/2)/60 + (0.23 + 0.11/"
+ er
+ ")"
+ " * ("
+ er
+ "- 1)/("
+ er
+ "+ 1))"
)
w_div_by_h_inf_2 = "(8 * exp(" + a_formula + ")/(exp(2 * " + a_formula + ") - 2))"
b_formula = "(377 * pi/(2 * " + z + " * " + "sqrt(" + er + ")))"
w_div_by_h_sup_2 = (
"(2 * ("
+ b_formula
+ " - 1 - log(2 * "
+ b_formula
+ " - 1) * ("
+ er
+ " - 1) * (log("
+ b_formula
+ " - 1) + 0.39 - 0.61/"
+ er
+ ")/(2 * "
+ er
+ "))/pi)"
)
w_formula_inf = w_div_by_h_inf_2 + " * " + h
w_formula_sup = w_div_by_h_sup_2 + " * " + h
self._width_h_w = NamedVariable(self.application, self._name + "_width_h_w", w_formula_inf)
self._width = NamedVariable(self.application, self._name + "_width", w_formula_sup)
return self._width, self._width_h_w
@property
def position_x(self):
"""Starting Position X.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._position_x
@property
def position_y(self):
"""Starting Position Y.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._position_y
@property
def permittivity(self):
"""Permittivity.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._permittivity
@property
def _permittivity_calcul(self):
"""Permittivity Calutation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
self._permittivity = self.application.materials[self._dielectric_material].permittivity
return self._permittivity
@property
def added_length(self):
"""Added Length Calutation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._added_length
@property
def _added_length_calcul(self):
"""Added Length Calutation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
# "0.412 * substrate_thickness * (patch_eff_permittivity + 0.3) * (patch_width/substrate_thickness + 0.264)"
# " / ((patch_eff_permittivity - 0.258) * (patch_width/substrate_thickness + 0.813)) "
er_e = self._effective_permittivity.name
h = self._substrate_thickness.name
w = self._width.name
patch_added_length_formula = (
"0.412 * " + h + " * (" + er_e + " + 0.3) * (" + w + "/" + h + " + 0.264)/"
"((" + er_e + " - 0.258) * (" + w + "/" + h + " + 0.813))"
)
self._added_length = NamedVariable(self.application, self._name + "_added_length", patch_added_length_formula)
return self._added_length
@property
def length(self):
"""Length.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._length
@property
def _length_calcul(self):
"""Length Calutation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
# "patch_wave_length / 2 - 2 * patch_added_length"
d_l = self._added_length.name
lbd = self._wave_length.name
e_l = self._electrical_length.name
line_length_formula = lbd + "* (" + e_l + "/360)" + " - 2 * " + d_l
self._length = NamedVariable(self.application, self._name + "_length", line_length_formula)
return self._length
@property
def charac_impedance(self):
"""Characteristic Impedance.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._charac_impedance
@property
def _charac_impedance_calcul(self):
"""Characteristic Impedance Calutation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
# if w / h > 1: 60 * log(8 * h / w + w / (4 * h)) / sqrt(er_e)
# if w / h < 1: 120 * pi / (sqrt(er_e) * (w / h + 1.393 + 0.667 * log(w / h + 1.444)))
w = self._width.name
h = self._dielectric_layer.thickness.name
er_e = self.effective_permittivity.name
charac_impedance_formula_w_h = (
"60 * log(8 * " + h + "/" + w + " + " + w + "/(4 * " + h + "))/sqrt(" + er_e + ")"
)
charac_impedance_formula_h_w = (
"120 * pi / (sqrt(" + er_e + ") * (" + w + "/" + h + "+ 1.393 + 0.667 * log(" + w + "/" + h + " + 1.444)))"
)
self._charac_impedance_w_h = NamedVariable(
self.application, self._name + "_charac_impedance_w_h", charac_impedance_formula_w_h
)
self._charac_impedance_h_w = NamedVariable(
self.application, self._name + "_charac_impedance_h_w", charac_impedance_formula_h_w
)
return self._charac_impedance_w_h, self._charac_impedance_h_w
@property
def effective_permittivity(self):
"""Effective Permittivity.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._effective_permittivity
@property
def _effective_permittivity_calcul(self):
"""Effective Permittivity Calutation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
# "(substrat_permittivity + 1)/2 +
# (substrat_permittivity - 1)/(2 * sqrt(1 + 10 * substrate_thickness/patch_width))"
er = self._permittivity.name
h = self._substrate_thickness.name
w = self._width.name
patch_eff_permittivity_formula = (
"(" + er + " + 1)/2 + (" + er + " - 1)/(2 * sqrt(1 + 10 * " + h + "/" + w + "))"
)
self._effective_permittivity = NamedVariable(
self.application, self._name + "_eff_permittivity", patch_eff_permittivity_formula
)
return self._effective_permittivity
@property
def wave_length(self):
"""Wave Length.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._wave_length
@property
def _wave_length_calcul(self):
"""Wave Length Calutation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
# "c0 * 1000/(patch_frequency * sqrt(patch_eff_permittivity))"
# TODO it is currently only available for mm
f = self._frequency.name
er_e = self._effective_permittivity.name
patch_wave_length_formula = "(c0 * 1000/(" + f + "* sqrt(" + er_e + ")))mm"
self._wave_length = NamedVariable(
self.application,
self._name + "_wave_length",
self.application.modeler._arg_with_dim(patch_wave_length_formula),
)
return self._wave_length
@property
def electrical_length(self):
"""Electrical Length.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
return self._electrical_length
@property
def _electrical_length_calcul(self):
"""Electrical Length calculation.
Returns
-------
:class:`pyaedt.modeler.stackup_3d.NamedVariable`
Variable Object.
"""
lbd = self._wave_length.name
length = self._length.name
d_l = self._added_length.name
elec_length_formula = "360 * (" + length + " + 2 * " + d_l + ")/" + lbd
self._electrical_length = NamedVariable(self.application, self._name + "_elec_length", elec_length_formula)
return self._electrical_length
@pyaedt_function_handler()
def create_lumped_port(self, reference_layer_name, change_side=False):
"""Create a lumped port on the specified line.
Parameters
----------
reference_layer_name : str
Name of the layer on which attach the reference.
change_side : bool, optional
Either if apply the port on one direction or the opposite. Default it is on Positive side.
Returns
-------
:class:`pyaedt.modules.Boundary.BoundaryObject`
Boundary object.
"""
if self._axis == "X":
if change_side:
axisdir = self.application.AxisDir.XNeg
else:
axisdir = self.application.AxisDir.XPos
else:
if change_side:
axisdir = self.application.AxisDir.YNeg
else:
axisdir = self.application.AxisDir.YPos
p1 = self.application.create_lumped_port_between_objects(
reference_layer_name, self.aedt_object.name, axisdir=axisdir
)
z_elev = ""
start_count = False
for k, v in self._signal_layer._stackup.stackup_layers.items():
if k == reference_layer_name or k == self._signal_layer.name:
if not start_count:
start_count = True
else:
start_count = False
elif start_count:
z_elev += "-" + v.thickness.name
self.application.modeler.oeditor.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:Geometry3DCmdTab",
["NAME:PropServers", self._name + ":Move:1"],
["NAME:ChangedProps", ["NAME:Move Vector", "X:=", "0mm", "Y:=", "0mm", "Z:=", z_elev]],
],
]
)
return p1
class Polygon(CommonObject, object):
"""Polygon Class in Stackup3D."""
def __init__(
self,
application,
point_list,
thickness,
signal_layer_name,
poly_name="poly",
mat_name="copper",
is_void=False,
reference_system=None,
):
CommonObject.__init__(self, application)
self._is_void = is_void
self._layer_name = signal_layer_name
self._app = application
pts = []
for el in point_list:
pts.append(
[
application.modeler._arg_with_dim(el[0]),
application.modeler._arg_with_dim(el[1]),
"layer_" + str(signal_layer_name) + "_position",
]
)
if reference_system:
application.modeler.set_working_coordinate_system(reference_system)
self._reference_system = reference_system
else:
application.modeler.create_coordinate_system(
origin=[0, 0, 0], reference_cs="Global", name=poly_name + "_CS"
)
application.modeler.set_working_coordinate_system(poly_name + "_CS")
self._reference_system = poly_name + "_CS"
self._aedt_object = application.modeler.create_polyline(
position_list=pts, name=poly_name, matname=mat_name, cover_surface=True
)
if thickness:
if isinstance(thickness, (float, int)):
application.modeler.sweep_along_vector(self._aedt_object, [0, 0, thickness], draft_type="Natural")
else:
application.modeler.sweep_along_vector(self._aedt_object, [0, 0, thickness.name], draft_type="Natural")
application.modeler.set_working_coordinate_system("Global")
@property
def points_on_layer(self):
"""Object Bounding Box.
Returns
-------
List
List of [x,y] coordinate of bounding box.
"""
bb = self._aedt_object.bounding_box
return [[bb[0], bb[1]], [bb[0], bb[4]], [bb[3], bb[4]], [bb[3], bb[1]]]
class MachineLearningPatch(Patch, object):
"""MachineLearningPatch Class in Stackup3D."""
def __init__(
self,
application,
frequency,
patch_width,
signal_layer,
dielectric_layer,
patch_position_x=0,
patch_position_y=0,
patch_name="patch",
reference_system=None,
axis="X",
):
Patch.__init__(
self,
application,
frequency,
patch_width,
signal_layer,
dielectric_layer,
patch_length=None,
patch_position_x=patch_position_x,
patch_position_y=patch_position_y,
patch_name=patch_name,
reference_system=reference_system,
axis=axis,
)
if not is_ironpython:
try:
joblib
except NameError: # pragma: no cover
raise ImportError("joblib package is needed to run ML.")
path_file = os.path.dirname(__file__)
path_folder = os.path.split(path_file)[0]
training_file = os.path.join(path_folder, "misc", "patch_svr_model_100MHz_1GHz.joblib")
model = joblib.load(training_file)
list_for_array = [
[
self.frequency.numeric_value,
self.width.numeric_value,
self._permittivity.numeric_value,
self.dielectric_layer.thickness.numeric_value,
]
]
array_for_prediction = np.array(list_for_array, dtype=np.float32)
length = model.predict(array_for_prediction)[0]
self.length.expression = application.modeler._arg_with_dim(length)
else: # pragma: no cover
self.application.logger.warning("Machine learning algorithm aren't covered in IronPython.")
| 33.91604 | 119 | 0.574768 | 84,717 | 0.989245 | 0 | 0 | 57,409 | 0.670368 | 0 | 0 | 29,088 | 0.339662 |
bd463c23420373bb21cbb94033d3aa58f68f33b2
| 1,119 |
py
|
Python
|
lib/retainn/curl.py
|
cellularmitosis/retainn
|
9e59024f3b35d4bb0bdf675b9f29369569e2080b
|
[
"MIT"
] | 1 |
2021-06-05T08:40:44.000Z
|
2021-06-05T08:40:44.000Z
|
lib/retainn/curl.py
|
cellularmitosis/retainn
|
9e59024f3b35d4bb0bdf675b9f29369569e2080b
|
[
"MIT"
] | null | null | null |
lib/retainn/curl.py
|
cellularmitosis/retainn
|
9e59024f3b35d4bb0bdf675b9f29369569e2080b
|
[
"MIT"
] | 1 |
2021-10-14T12:28:32.000Z
|
2021-10-14T12:28:32.000Z
|
"""Retainn web-fetching functions."""
try:
# Python 3
from urllib.request import Request, urlopen
except ImportError:
# Python 2
from urllib2 import Request, urlopen
def http_get_deck_and_etag(gist_url):
"""Download the markdown and etag of a deck URL."""
response = urlopen(gist_url + "/raw")
markdown = response.read()
etag = response.headers['etag']
return (markdown, etag)
def http_head_deck_etag(gist_url):
"""Perform a HEAD against gist_url and return the etag."""
class HeadRequest(Request):
def get_method(self):
return 'HEAD'
head_request = HeadRequest(gist_url + '/raw')
response = urlopen(head_request)
headers = response.headers
etag = headers['etag']
return etag
def http_get_deck_if_needed(gist_url, previous_etag):
"""Fetch deck markdown + etag if etag is out of date."""
current_etag = http_head_deck_etag(gist_url)
if current_etag == previous_etag:
return (None, None)
else:
(markdown, current_etag) = http_get_deck_and_etag(gist_url)
return (markdown, current_etag)
| 28.692308 | 67 | 0.680071 | 83 | 0.074173 | 0 | 0 | 0 | 0 | 0 | 0 | 252 | 0.225201 |
bd47557928bc51ca7d2e89e0a88949b5b7b0aaa5
| 1,511 |
py
|
Python
|
data/train/python/bd47557928bc51ca7d2e89e0a88949b5b7b0aaa5urls.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84 |
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/bd47557928bc51ca7d2e89e0a88949b5b7b0aaa5urls.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5 |
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/bd47557928bc51ca7d2e89e0a88949b5b7b0aaa5urls.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24 |
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
from django.conf.urls.defaults import *
urlpatterns = patterns('clwmail.admin.views',
(r'user/manage/page/(?P<page_num>\d{1,})/$' ,'usermanage'),
(r'user/manage/page/$' ,'usermanage'),
(r'user/add/$' ,'useradd'),
(r'user/(?P<userid>.*)/domain/(?P<domain>.*)/edit/$' ,'useredit'),
(r'user/(?P<userid>.*)/domain/(?P<domain>.*)/hide/$' ,'userhide'),
(r'user/(?P<userid>.*)/domain/(?P<domain>.*)/unhide/$' ,'userunhide'),
(r'group/manage/$' ,'groupmanage'),
(r'group/manage/page/(?P<page_num>\d{1,})/$' ,'groupmanage'),
(r'group/(?P<alias>.*)/domain/(?P<domain>.*)/edit/$' ,'groupedit'),
(r'group/(?P<alias>.*)/domain/(?P<domain>.*)/delete/$' ,'groupdelete'),
(r'group/add/$' ,'groupadd'),
(r'domain/(?P<domain_name>.*)/userget/$' ,'getaliasusers'),
(r'domain/manage/$' ,'domainmanage'),
(r'domain/manage/page/(?P<page_num>\d{1,})/$' ,'domainmanage'),
(r'domain/(?P<domain_name>.*)/edit/$' ,'domainedit'),
(r'domain/(?P<domain_name>.*)/delete/$' ,'domaindelete'),
(r'domain/add/$' ,'domainadd'),
(r'genpass/$' ,'genpass'),
(r'' ,'usermanage'),
)
| 65.695652 | 77 | 0.420913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 861 | 0.569821 |
bd481775933800ff7697945131e21bcfb7859fdc
| 670 |
py
|
Python
|
tutorials2/data_loading_tutorial.py
|
xuanyuyt/pytorch-tutorial
|
92076ac56d42da98ea61ce06708bb8c537a49af0
|
[
"MIT"
] | null | null | null |
tutorials2/data_loading_tutorial.py
|
xuanyuyt/pytorch-tutorial
|
92076ac56d42da98ea61ce06708bb8c537a49af0
|
[
"MIT"
] | null | null | null |
tutorials2/data_loading_tutorial.py
|
xuanyuyt/pytorch-tutorial
|
92076ac56d42da98ea61ce06708bb8c537a49af0
|
[
"MIT"
] | null | null | null |
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
# Ignore waring
import warnings
warnings.filterwarnings('ignore')
plt.ion()
landmarks_frame = pd.read_csv('G:/Other_Datasets/faces/face_landmarks.csv')
n = 65
img_name = landmarks_frame.iloc[n, 0]
landmarks = landmarks_frame.iloc[n, 1:].as_matrix()
landmarks = landmarks.astype('float').reshape(-1, 2)
print('Image name: {}'.format(img_name))
print('Landmarks shape: {}'.format(landmarks.shape))
print('First 4 Landmarks: {}'.format(landmarks[:4]))
| 29.130435 | 75 | 0.770149 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.2 |
bd48e2bd368062575f8e3c68c1ab5aaaace702be
| 6,020 |
py
|
Python
|
run_validation.py
|
sr9000/stepik_code_task_baking
|
60a5197f659db1734132eeb9d82624f1b7aaeb3f
|
[
"MIT"
] | null | null | null |
run_validation.py
|
sr9000/stepik_code_task_baking
|
60a5197f659db1734132eeb9d82624f1b7aaeb3f
|
[
"MIT"
] | null | null | null |
run_validation.py
|
sr9000/stepik_code_task_baking
|
60a5197f659db1734132eeb9d82624f1b7aaeb3f
|
[
"MIT"
] | null | null | null |
import logging as log
import sys
from extra.introspection import collect_datasets, collect_wrong_solutions
from implementation.checker import output_reader, checker as check
from implementation.solver import input_reader, solver as solve, hinter as get_hint
from pre_definition.solve_caller import call_with_args
from pre_definition.stdio import stdio
class ValidationException(Exception):
pass
class AbsolutelyWrongException(ValidationException):
pass
class PartiallyCorrectException(ValidationException):
pass
def main():
log.info(f'--- Started: {sys.argv[0]} ---')
log.info('collect_datasets')
datasets = collect_datasets()
# datasets is readable
readed_ds = list(reading_datasets(datasets))
log.info('datasets is readable')
# datasets is solvable
solved_ds = list(solving_datasets(readed_ds))
log.info('datasets is solvable')
# solutions is readable
readed_sl = list(reading_solutions(solved_ds))
log.info('solutions is readable')
readed_stripped_sl = list(reading_solutions(solved_ds, stripping=True))
log.info('stripped solutions is readable')
# solutions passing check
assert_solutions(readed_ds, readed_sl, readed_stripped_sl)
log.info('solutions passing check')
log.info('collect wrong solutions')
wrsols = collect_wrong_solutions()
# datasets is wrong_solvable
wrsolved_ds = list(wrong_solving_datasets(readed_ds, wrsols))
log.info('datasets is wrong_solvable')
# wrong solutions is readable
readed_wrsl = list(wrong_reading_solutions(wrsolved_ds))
log.info('wrong solutions is readable')
# wrong solutions fail at least one check (but also should pass at least one)
assert_wrong_solutions(readed_ds, readed_sl, readed_wrsl)
log.info('wrong solutions give representative feedback')
def assert_wrong_solutions(readed_ds, readed_sl, readed_wrsl):
for wrname, wrans in readed_wrsl:
try:
assert_solutions(readed_ds, readed_sl, wrans)
except PartiallyCorrectException:
# expect exception from checking
pass
except AbsolutelyWrongException:
# shit, this wrong solution is absolute garbage
raise ValidationException(f'Wrong solution "{wrname}" did not pass any test, but should.')
except Exception as e:
# something goes wrong
raise ValidationException(f'Wrong solution "{wrname}" goes bad with check.') from e
else:
# shit, we didnt catch wrong solution
raise ValidationException(f'Wrong solution "{wrname}" passed all tests, but should fail at least one.')
def wrong_reading_solutions(wrsolved_ds):
for wrname, wrsols in wrsolved_ds:
try:
yield wrname, list(reading_solutions(wrsols))
except Exception as e:
raise ValidationException(f'Failed to read wrong solution "{wrname}"') from e
def wrong_solving_datasets(readed_ds, wrsols):
for wrname, wrcall in wrsols:
try:
yield wrname, list(solving_datasets(readed_ds, wrcall))
except Exception as e:
raise ValidationException(f'Failed to run wrong solution "{wrname}"') from e
def assert_solutions(readed_ds, readed_sl, answered_sl=None):
if answered_sl is None:
answered_sl = readed_sl
correct = 0
wrong = 0
first_error = None
first_full_name = ''
for (full_name1, indata, _), (full_name2, outdata), (full_name3, ansdata) in zip(readed_ds, readed_sl, answered_sl):
assert full_name1 == full_name2, f'Checking solutions on different names'
assert full_name1 == full_name3, f'Checking solutions on different names'
try:
check(indata, outdata, ansdata)
correct += 1
except Exception as e:
wrong += 1
if first_error is None:
first_error = e
first_full_name = full_name1
if not correct:
raise AbsolutelyWrongException(f'Failed to check solution {first_full_name}') from first_error
elif wrong:
raise PartiallyCorrectException(f'Failed to check solution {first_full_name}') from first_error
def reading_solutions(solved_ds, stripping=False):
for full_name, sl, hint in solved_ds:
try:
sl = sl.strip(' ') if stripping else sl
with stdio(input=sl):
output_data = call_with_args(output_reader, hint)
yield full_name, output_data
except Exception as e:
raise ValidationException(f'Failed to read solution {full_name}') from e
def solving_datasets(readed_ds, solve_func=solve):
for full_name, ds_data, hint in readed_ds:
try:
with stdio(output=True) as cm:
call_with_args(solve_func, ds_data)
r = cm.output_get()
assert r == r.strip(' '), f'Solution of {full_name} did not pass stripping test'
yield full_name, cm.output_get(), hint
except Exception as e:
raise ValidationException(f'Failed to solve dataset {full_name}') from e
def reading_datasets(datasets):
for name, dsgen in datasets:
for dsno, ds in enumerate(dsgen(), start=1):
full_name = f'"{name}" #{dsno}'
try:
with stdio(input=ds):
input_data = input_reader()
hint = call_with_args(get_hint, input_data)
yield full_name, input_data, hint
except Exception as e:
raise ValidationException(f'Failed to read dataset {full_name}') from e
if __name__ == '__main__':
log.basicConfig(level=log.INFO, format='%(asctime)s | %(levelname)s | %(message)s')
try:
main()
except (ValidationException, AssertionError) as e:
log.error(f'Failed to pass validation', exc_info=False)
cause = e
while cause:
log.error(str(cause), exc_info=False)
cause = cause.__cause__
quit(-1)
| 34.797688 | 120 | 0.670598 | 169 | 0.028073 | 1,961 | 0.325748 | 0 | 0 | 0 | 0 | 1,385 | 0.230066 |
bd498e34fef15d05b6f453bd45dc8caf7869dd9c
| 139 |
py
|
Python
|
accounts/schemas/validators/__init__.py
|
aobcvr/rpg_quest_accounts
|
960772a159618194ea26e81d6d874e5f69fbbec7
|
[
"MIT"
] | 2 |
2021-08-15T14:27:37.000Z
|
2021-09-14T10:55:38.000Z
|
accounts/schemas/validators/__init__.py
|
aobcvr/rpg_quest_accounts
|
960772a159618194ea26e81d6d874e5f69fbbec7
|
[
"MIT"
] | 1 |
2021-09-12T12:30:11.000Z
|
2021-09-12T12:30:11.000Z
|
accounts/schemas/validators/__init__.py
|
aobcvr/rpg_quest_accounts
|
960772a159618194ea26e81d6d874e5f69fbbec7
|
[
"MIT"
] | null | null | null |
from .locale import LocaleValidator
from .timezone import TimezoneValidator
__all__ = (
'LocaleValidator',
'TimezoneValidator',
)
| 17.375 | 39 | 0.755396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.258993 |
bd49a1d92154f5da9b36b624b1f7c5c860a48554
| 346 |
py
|
Python
|
remove_duplicates_from_sorted_array.py
|
lutianming/leetcode
|
848c7470ff5fd23608cc954be23732f60488ed8a
|
[
"MIT"
] | null | null | null |
remove_duplicates_from_sorted_array.py
|
lutianming/leetcode
|
848c7470ff5fd23608cc954be23732f60488ed8a
|
[
"MIT"
] | null | null | null |
remove_duplicates_from_sorted_array.py
|
lutianming/leetcode
|
848c7470ff5fd23608cc954be23732f60488ed8a
|
[
"MIT"
] | null | null | null |
class Solution:
# @param a list of integers
# @return an integer
def removeDuplicates(self, A):
length = len(A)
if length <= 1:
return length
index = 1
for i in range(1, length):
if A[i] != A[i-1]:
A[index] = A[i]
index += 1
return index
| 24.714286 | 34 | 0.459538 | 345 | 0.99711 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.135838 |
bd49b8155e5a08f5ca5bd991e7606c828ebf1c3d
| 421 |
py
|
Python
|
apps/algorithms/migrations/0003_auto_20190201_0136.py
|
ScorpioDoctor/StudyAiD1
|
fd37a400a61dc1ae7a3ef6b1273afc3f0daea3e4
|
[
"Apache-2.0"
] | null | null | null |
apps/algorithms/migrations/0003_auto_20190201_0136.py
|
ScorpioDoctor/StudyAiD1
|
fd37a400a61dc1ae7a3ef6b1273afc3f0daea3e4
|
[
"Apache-2.0"
] | null | null | null |
apps/algorithms/migrations/0003_auto_20190201_0136.py
|
ScorpioDoctor/StudyAiD1
|
fd37a400a61dc1ae7a3ef6b1273afc3f0daea3e4
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.0.10 on 2019-02-01 01:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('algorithms', '0002_auto_20190201_0013'),
]
operations = [
migrations.RemoveField(
model_name='algorithmimage',
name='algorithm',
),
migrations.DeleteModel(
name='AlgorithmImage',
),
]
| 20.047619 | 50 | 0.589074 | 335 | 0.795724 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.304038 |
bd49d7f152ceeb7bc9bb00c813b8cb8af0d1c6dc
| 3,704 |
py
|
Python
|
visan/plot/datasetattributespanel.py
|
ercumentaksoy/visan
|
57c9257d80622fc0ab03591db48cc2155bd12f1b
|
[
"MIT",
"BSD-3-Clause"
] | 7 |
2020-04-09T05:21:03.000Z
|
2022-01-23T18:39:02.000Z
|
visan/plot/datasetattributespanel.py
|
ercumentaksoy/visan
|
57c9257d80622fc0ab03591db48cc2155bd12f1b
|
[
"MIT",
"BSD-3-Clause"
] | 7 |
2020-01-05T19:19:20.000Z
|
2020-05-27T09:41:49.000Z
|
visan/plot/datasetattributespanel.py
|
ercumentaksoy/visan
|
57c9257d80622fc0ab03591db48cc2155bd12f1b
|
[
"MIT",
"BSD-3-Clause"
] | 4 |
2020-04-18T14:11:22.000Z
|
2021-11-10T02:27:49.000Z
|
# Copyright (C) 2002-2021 S[&]T, The Netherlands.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import wx
def _isList(obj):
try:
iter(obj)
except Exception:
return False
else:
try:
import numpy
if isinstance(obj, numpy.ndarray):
return True
except Exception:
pass
try:
obj + ''
except Exception:
return True
else:
return False
class DataSetAttributesPanel(wx.Panel):
def __init__(self, parent):
panelstyle = wx.TAB_TRAVERSAL
if wx.Platform == '__WXGTK__':
panelstyle |= wx.SUNKEN_BORDER
wx.Panel.__init__(self, parent, -1, style=panelstyle)
# Create and configure all widgets
self.CreateControls()
self.CreateLayout()
def CreateControls(self):
# Create the two column list for showing attributes
self.attributeList = wx.ListCtrl(self, -1, style=(wx.LC_REPORT | wx.LC_NO_HEADER | wx.LC_VRULES),
size=(100, -1))
self.attributeList.InsertColumn(0, "attribute")
self.attributeList.InsertColumn(1, "value")
def CreateLayout(self):
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.attributeList, 1, wx.EXPAND)
self.SetSizer(sizer)
def UpdateAttributes(self, attributes, keyframe):
self.attributeList.DeleteAllItems()
keys = sorted(attributes.keys())
for key in keys:
value = attributes[key]
if _isList(value):
# try to see if we can use a keyframe index for the value
try:
value = value[keyframe]
except IndexError:
# if the keyframe is out of range, just use the final value
value = value[-1]
except Exception:
pass
self.attributeList.Append([key, value])
self.attributeList.SetColumnWidth(0, wx.LIST_AUTOSIZE)
if wx.Platform == '__WXMSW__':
self.attributeList.SetColumnWidth(0, self.attributeList.GetColumnWidth(0) + 5)
self.attributeList.SetColumnWidth(1, wx.LIST_AUTOSIZE)
| 38.583333 | 105 | 0.660907 | 1,762 | 0.475702 | 0 | 0 | 0 | 0 | 0 | 0 | 1,764 | 0.476242 |
bd49f05f95bdcec75ece665e2dc35ecf557cf5b9
| 3,473 |
py
|
Python
|
iscc_registry/observe.py
|
titusz/iscc-registry
|
def03f420e671ec470070bb09b6a78099f7827da
|
[
"MIT"
] | 3 |
2020-07-06T16:01:54.000Z
|
2020-08-06T11:03:25.000Z
|
iscc_registry/observe.py
|
titusz/iscc-registry
|
def03f420e671ec470070bb09b6a78099f7827da
|
[
"MIT"
] | null | null | null |
iscc_registry/observe.py
|
titusz/iscc-registry
|
def03f420e671ec470070bb09b6a78099f7827da
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Watching for registration events"""
import time
from dataclasses import dataclass, asdict
import iscc_registry
from loguru import logger as log
import iscc
from iscc_registry.conn import db_client
from iscc_registry.publish import get_live_contract
from iscc_registry import tools
from iscc_registry.tools import build_iscc_id
@dataclass
class RegEntry:
iscc: str
actor: str
cid: str = ""
tx_hash: str = ""
block_hash: str = ""
block_num: int = 0
def parse_event(evt):
# encode ISCC
iscc_codes = []
for code_type in ("mc", "cc", "dc", "ic"):
iscc_codes.append(iscc.encode(getattr(evt.args, code_type)))
# encode CIDv0
cid = tools.sha256_to_cid(evt.args.cid)
return RegEntry(
iscc="-".join(iscc_codes),
actor=evt.args.actor,
cid=cid,
tx_hash=evt.transactionHash.hex(),
block_hash=evt.blockHash.hex(),
block_num=evt.blockNumber,
)
def observe(from_block=None, rebuild=False):
"""Watch ISCC-Registry contract events and index new registartion events."""
meta_index = db_client()
if rebuild:
meta_index.clear()
from_block = 0
if from_block is None:
if "height_eth" not in meta_index:
meta_index["height_eth"] = 0
from_block = meta_index["height_eth"]
log.info(f"start observing from block {from_block}")
co = get_live_contract()
event_filter = co.events.Registration.createFilter(fromBlock=from_block)
reg_entry = None
log.info("observe historic registration events")
for event in event_filter.get_all_entries():
reg_entry = parse_event(event)
log.info(f"observing historic {reg_entry}")
index(reg_entry)
if reg_entry:
meta_index["height_eth"] = reg_entry.block_num
log.info("start watching new registration events")
while True:
for event in event_filter.get_new_entries():
reg_entry = parse_event(event)
log.info(f"observing {reg_entry}")
index(reg_entry)
if reg_entry:
meta_index["height_eth"] = reg_entry.block_num
time.sleep(2)
def index(reg_entry: RegEntry) -> str:
meta_index = db_client()
counter = 0
iscc_id = build_iscc_id(iscc_registry.LEDGER_ID_ETH, reg_entry.iscc, counter)
while iscc_id in meta_index:
if meta_index[iscc_id]["actor"] == reg_entry.actor:
log.info(f"updateing {iscc_id} -> {reg_entry}")
meta_index[iscc_id] = asdict(reg_entry)
break
counter += 1
log.info(f"counting up {iscc_id}")
iscc_id = build_iscc_id(iscc_registry.LEDGER_ID_ETH, reg_entry.iscc, counter)
meta_index[iscc_id] = asdict(reg_entry)
log.info(f"indexed {iscc_id} -> {reg_entry}")
return iscc_id
def find_next(reg_entry: RegEntry) -> str:
meta_index = db_client()
counter = 0
iscc_id = build_iscc_id(iscc_registry.LEDGER_ID_ETH, reg_entry.iscc, counter)
while iscc_id in meta_index:
if meta_index[iscc_id]["actor"] == reg_entry.actor:
log.info(
f"Previously registered by same actor. This will be an update: {iscc_id} -> {reg_entry}"
)
return iscc_id
counter += 1
log.info(f"counting up {iscc_id}")
iscc_id = build_iscc_id(iscc_registry.LEDGER_ID_ETH, reg_entry.iscc, counter)
return iscc_id
if __name__ == "__main__":
observe()
| 30.734513 | 104 | 0.657357 | 132 | 0.038007 | 0 | 0 | 143 | 0.041175 | 0 | 0 | 658 | 0.189462 |
bd4bc561fda1c42a4d4d120ce2ef72e50322b064
| 2,670 |
py
|
Python
|
examples/active_replication/flux_egress_operator.py
|
yujialuo/erdos
|
7a631b55895f1a473b0f4d38a0d6053851e65b5d
|
[
"Apache-2.0"
] | null | null | null |
examples/active_replication/flux_egress_operator.py
|
yujialuo/erdos
|
7a631b55895f1a473b0f4d38a0d6053851e65b5d
|
[
"Apache-2.0"
] | null | null | null |
examples/active_replication/flux_egress_operator.py
|
yujialuo/erdos
|
7a631b55895f1a473b0f4d38a0d6053851e65b5d
|
[
"Apache-2.0"
] | null | null | null |
from erdos.data_stream import DataStream
from erdos.message import Message
from erdos.op import Op
from erdos.timestamp import Timestamp
from erdos.utils import setup_logging
import flux_utils
from flux_utils import is_control_stream, is_not_control_stream
from flux_buffer import Buffer
import threading
class FluxEgressOperator(Op):
def __init__(self,
name,
output_stream_name,
ack_stream_name,
num_replicas=2,
log_file_name=None):
super(FluxEgressOperator, self).__init__(name)
self._logger = setup_logging(self.name, log_file_name)
self._output_stream_name = output_stream_name
self._ack_stream_name = ack_stream_name
self._num_replicas = num_replicas
self._buffer = Buffer(num_replicas)
self._lock = threading.Lock()
@staticmethod
def setup_streams(input_streams,
output_stream_name,
ack_stream_name):
input_streams.filter(is_not_control_stream).add_callback(
FluxEgressOperator.on_msg)
input_streams.filter(is_control_stream).add_callback(
FluxEgressOperator.on_control_msg)
return [DataStream(name=output_stream_name,
labels={'back_pressure': 'true'}),
DataStream(name=ack_stream_name,
labels={'ack_stream': 'true'})]
def on_msg(self, msg):
self._lock.acquire()
msg_seq_num = msg.data[0]
# Send ACK message to replica if we have one.
self.get_output_stream(self._ack_stream_name).send(
Message(msg_seq_num, msg.timestamp))
msg.data = msg.data[1] # Remove the output sequence number
# Forward output
self.get_output_stream(self._output_stream_name).send(msg)
# TODO(yika): optionally buffer data until sink sends ACK
self._lock.release()
def on_control_msg(self, msg):
self._lock.acquire()
(control_num, replica_num) = msg.data
if control_num == flux_utils.FluxControllerCommand.FAIL:
# Send REVERSE msg to secondary
msg.data = flux_utils.SpecialCommand.REVERSE
self.get_output_stream(self._ack_stream_name).send(msg)
self._logger.info("Sent REVERSE message to perform takeover.")
elif control_num == flux_utils.FluxControllerCommand.RECOVER:
# TODO(yika): implement catch-up
pass
else:
self._logger.fatal('Unexpected control message {}'.format(msg))
self._lock.release()
def execute(self):
self.spin()
| 37.083333 | 75 | 0.648689 | 2,362 | 0.884644 | 0 | 0 | 571 | 0.213858 | 0 | 0 | 329 | 0.123221 |
bd4c2d2d1aecd9d7ef7769f96a47de90c8225163
| 6,400 |
py
|
Python
|
src/CNN_models/train_model.py
|
ChrisPedder/Medieval_Manuscripts
|
40bfcf9c273385cfd8aa66e63b2fb80078fef33b
|
[
"MIT"
] | null | null | null |
src/CNN_models/train_model.py
|
ChrisPedder/Medieval_Manuscripts
|
40bfcf9c273385cfd8aa66e63b2fb80078fef33b
|
[
"MIT"
] | 5 |
2020-12-28T15:28:35.000Z
|
2022-02-10T03:26:44.000Z
|
src/CNN_models/train_model.py
|
ChrisPedder/Medieval_Manuscripts
|
40bfcf9c273385cfd8aa66e63b2fb80078fef33b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 10 11:07:05 2018
@author: chrispedder
To train the model, run from the top-level dir as:
python3 -m src.CNN_models.train_model --args ...
"""
import numpy as np
import os
import argparse
import json
import tensorflow as tf
from abc import ABC, abstractmethod
from datetime import datetime
from .TFRecordsReader import TFRecordsReader
from ..data.Predictors import (
predictors_options, VGG16Predictor, embedding_sizes)
# Helper function for writing to JSON
def jsonify(obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, np.float32) or isinstance(obj, np.float64):
return float(obj)
elif isinstance(obj, np.int32) or isinstance(obj, np.int64):
return int(obj)
return obj
class ModelTrainer(object):
def __init__(self, args):
self.args = args
self.model = self.build_model()
self.datasets = self.get_train_test_datasets()
self.predictor = predictors_options[args.embedding_model]
@abstractmethod
def build_model(self):
pass
@abstractmethod
def create_model_training_folder(self):
pass
def safe_folder_create(self, folder):
if not os.path.isdir(folder):
os.mkdir(folder)
@abstractmethod
def get_train_test_datasets(self):
pass
@abstractmethod
def write_config_to_json(self):
pass
@abstractmethod
def train(self):
pass
@abstractmethod
def predict(self, data):
pass
class DeterministicModel(ModelTrainer):
def __init__(self, args):
self.epochs = args.epochs
self.batch_size = args.batch_size
self.dropout = args.dropout
self.log_dir = args.log_dir
self.embed_size = embedding_sizes[args.embedding_model]
self.hidden_size = args.hidden_size
super().__init__(args)
def create_model_training_folder(self):
# Check that top level log dir exists, if not, create it
self.safe_folder_create(self.log_dir)
# Next-level log dir based on date, if not already present, create it
now = datetime.now()
date = now.strftime("%d_%m_%Y")
date_dir = os.path.join(self.log_dir, date)
self.safe_folder_create(date_dir)
# Lowest-level log dir based on numbering, if date_dir not empty,
# check that the previous highest index was, and increment by one.
last_index = 0
if len(os.listdir(date_dir)) != 0:
subfolder_list = [x[0] for x in os.walk(date_dir) if os.path.isdir(x[0])]
last_index = max([int(x.split('_')[-1]) for x in subfolder_list[1:]])
model_dir = os.path.join(date_dir, 'model_' + str(last_index + 1))
self.safe_folder_create(model_dir)
return model_dir
def get_train_test_datasets(self):
reader = TFRecordsReader(self.args)
return reader.datasets
def build_model(self):
model = tf.keras.Sequential()
model.add(tf.keras.Input(shape=(self.embed_size,)))
model.add(tf.keras.layers.Dense(self.hidden_size, activation='relu'))
model.add(tf.keras.layers.Dropout(self.dropout))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
return model
def write_config_to_json(self):
args_dict = vars(self.args)
for key, value in args_dict.items():
args_dict[key] = jsonify(value)
json_path = os.path.join(self.logs_folder, 'config.json')
with open(json_path, 'w') as f:
json.dump(args_dict, f)
print(f'Config file written to {json_path}')
def train(self):
self.logs_folder = self.create_model_training_folder()
self.model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
checkpointer = tf.keras.callbacks.ModelCheckpoint(
os.path.join(self.logs_folder, 'checkpoints'),
monitor='val_accuracy',
verbose=1, save_best_only=True,
save_weights_only=True)
# tensorboard = tf.keras.callbacks.TensorBoard(
# log_dir = os.path.join(self.logs_folder, 'tensorboard'),
# histogram_freq = 1,
# write_graph = True,
# write_images = True)
self.model.fit(
x=self.datasets['train'],
epochs=self.epochs,
batch_size=self.batch_size,
validation_data=self.datasets['test'],
callbacks = [checkpointer])
# callbacks = [checkpointer, tensorboard])
self.write_config_to_json()
def predict(self, data):
args_copy = self.args
args_copy.batch_size = 1
pred = self.predictor(args_copy)
outputs = []
for entry in data:
embedding = pred.predict(entry)
out = self.model.predict(embedding)
outputs.append(out)
return outputs
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', help='The size of the batches to use '
'when training the models', type=int,
default=32)
parser.add_argument('--embedding_model', help='which embeddings to '
'use when training the model', type=str,
default='vgg16')
parser.add_argument('--data_dir', help='Path to the data',
type=str, required=True)
parser.add_argument('--epochs', help='How many epochs to train the model '
'for.', type=int, default=50)
parser.add_argument('--dropout', help='How much dropout to apply to model ',
type=float, default=0.5)
parser.add_argument('--log_dir', help='Where to save model weights and '
'config.', type=str, required=True)
parser.add_argument('--hidden_size', help='What hidden sizes to use in '
'model.', type=int, default=256)
parser.add_argument('--learning_rate', help='What learning rate to use in '
'training the model.', type=float, default=0.0001)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
model = DeterministicModel(args)
model.train()
| 33.333333 | 85 | 0.621563 | 4,207 | 0.657344 | 0 | 0 | 364 | 0.056875 | 0 | 0 | 1,369 | 0.213906 |
bd4e4bb56c05d5afc00c0ccb424743f1c99a0f0b
| 8,063 |
py
|
Python
|
pfb_exporter/transform/sqla.py
|
znatty22/pfb-edu
|
24e606895c192b92493c0808d00a10fdf6f5ffa4
|
[
"Apache-2.0"
] | null | null | null |
pfb_exporter/transform/sqla.py
|
znatty22/pfb-edu
|
24e606895c192b92493c0808d00a10fdf6f5ffa4
|
[
"Apache-2.0"
] | null | null | null |
pfb_exporter/transform/sqla.py
|
znatty22/pfb-edu
|
24e606895c192b92493c0808d00a10fdf6f5ffa4
|
[
"Apache-2.0"
] | null | null | null |
"""
Transform SQLAlchemy Models to PFB Schema
"""
import os
import logging
import inspect
import subprocess
from collections import defaultdict
import timeit
from pprint import pformat
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.inspection import inspect as sqla_inspect
from sqlalchemy.orm.properties import ColumnProperty
from sqlalchemy.ext.declarative.api import DeclarativeMeta
from sqlalchemy.exc import NoInspectionAvailable
from pfb_exporter.utils import import_module_from_file, seconds_to_hms
from pfb_exporter.transform.base import Transformer
SQLA_AVRO_TYPE_MAP = {
'primitive': {
'Text': 'string',
'Boolean': 'boolean',
'Float': 'float',
'Integer': 'int',
'String': 'string',
'UUID': 'string',
'DateTime': 'string',
},
'logical': {
'UUID': 'uuid',
'DateTime': None
}
}
class SqlaTransformer(Transformer):
def __init__(self, models_filepath, output_dir, db_conn_url=None):
"""
Constructor
:param models_filepath: path to where the SQLAlchemy models are stored
or will be written if they are generated
:type models_filepath: str
:param output_dir: path where PFB Schema will be written
:type output_dir: str
:param db_conn_url: Connection URL for database. Format depends on
database. See SQLAlchemy documentation for supported databases
"""
super().__init__(models_filepath, output_dir)
self.logger = logging.getLogger(type(self).__name__)
self.db_conn_url = db_conn_url
self.data_dict = {}
self.model_dict = {}
def _transform(self):
"""
Entry point for PFB schema generation.
Called by pfb_exporter.transform.base.Transformer
1. (Optional) Generate SQLAlchemy models from database
2. Import model classes from dir or file
2. Transform SQLAlchemy models to PFB Schema
"""
self.logger.info('Build PFB Schema from SqlAlchemy models')
if self.db_conn_url:
self._generate_models()
self._import_models()
if not (self.db_conn_url or self.model_dict):
raise RuntimeError(
'There are 0 models to generate the PFB file. You must '
'provide a DB connection URL that can be used to '
'connect to a database to generate the models or '
'provide a dir or file path to where the models reside'
)
return self._create_pfb_schema()
def _generate_models(self):
"""
Generate SQLAlchemy models from database
Uses sqlacodegen CLI to generate models
See https://github.com/agronholm/sqlacodegen
"""
# sqlacodegen requires the models to be written to a file
if os.path.isdir(self.models_filepath):
self.models_filepath = os.path.join(
self.models_filepath, 'models.py'
)
# Generate SQLAlchemy models
cmd_str = (
f'sqlacodegen {self.db_conn_url} --outfile {self.models_filepath}'
)
self.logger.debug(f'Building SQLAlchemy models:\n{cmd_str}')
start_time = timeit.default_timer()
output = subprocess.run(
cmd_str, shell=True, stdout=subprocess.PIPE
)
total_time = timeit.default_timer() - start_time
output.check_returncode()
self.logger.debug(f'Time elapsed: {seconds_to_hms(total_time)}')
def _import_models(self):
"""
Import the SQLAlchemy model classes from the Python modules
in models_filepath
"""
self.logger.debug(
f'Importing SQLAlchemy models from {self.models_filepath}'
)
def _import_model_classes_from_file(filepath):
"""
Import the SQLAlchemy models from the Python module at `filepath`
"""
imported_model_classes = []
mod = import_module_from_file(filepath)
# NOTE - We cannot use
# pfb_exporter.utils.import_subclass_from_module here because
# we are unable to use issubclass to test if the SQLAlchemy model
# class is a subclass of its parent
# (sqlalchemy.ext.declarative.api.Base)
# The best we can do is make sure the class is a SQLAlchemy object
# and check that the object is a DeclarativeMeta type
for cls_name, cls_path in inspect.getmembers(mod, inspect.isclass):
cls = getattr(mod, cls_name)
try:
sqla_inspect(cls)
except NoInspectionAvailable:
# Not a SQLAlchemy object
pass
else:
if type(cls) == DeclarativeMeta:
imported_model_classes.append(cls)
return imported_model_classes
if (os.path.isfile(self.models_filepath) and
os.path.splitext(self.models_filepath)[-1] == '.py'):
filepaths = [self.models_filepath]
else:
filepaths = [
os.path.join(root, fn)
for root, dirs, files in os.walk(self.models_filepath)
for fn in files
if os.path.splitext(fn)[-1] == '.py'
]
self.logger.debug(
f'Found {len(filepaths)} Python modules:\n{pformat(filepaths)}'
)
# Add the imported modules to a dict
for fp in filepaths:
classes = _import_model_classes_from_file(fp)
for cls in classes:
self.model_dict[cls.__name__] = cls
self.logger.info(
f'Imported {len(self.model_dict)} SQLAlchemy models:'
f'\n{pformat(list(self.model_dict.keys()))}'
)
def _create_pfb_schema(self):
"""
Transform SQLAlchemy models into PFB schema
"""
self.logger.info('Creating PFB schema from SQLAlchemy models ...')
relational_model = {}
for model_name, model_cls in self.model_dict.items():
self.logger.info(
f'Building schema for {model_name} ...'
)
# Inspect model columns and types
for p in sqla_inspect(model_cls).iterate_properties:
model_schema = defaultdict(list)
if not isinstance(p, ColumnProperty):
continue
if not hasattr(p, 'columns'):
continue
column_obj = p.columns[0]
# Check if foreign key
if column_obj.foreign_keys:
fkname = column_obj.foreign_keys.pop().target_fullname
model_schema['foreign_keys'].append(
{'table': fkname.split('.')[0], 'name': p.key}
)
# Convert SQLAlchemy column type to avro type
stype = type(column_obj.type).__name__
# Get avro primitive type
ptype = SQLA_AVRO_TYPE_MAP['primitive'].get(stype)
if not ptype:
self.logger.warn(
f'⚠️ Could not find avro type for {p}, '
f'SQLAlchemy type: {stype}'
)
attr_dict = {'name': p.key, 'type': ptype}
# Get avro logical type if applicable
ltype = SQLA_AVRO_TYPE_MAP['logical'].get(stype)
if ltype:
attr_dict.update({'logicalType': ltype})
# Get default value for attr
# if column_obj.default:
# attr_dict.update({'default': column_obj.default})
# if column_obj.nullable:
# attr_dict.update({'nullable': column_obj.nullable})
model_schema['attributes'].append(attr_dict)
relational_model[model_cls.__tablename__] = model_schema
return relational_model
| 34.60515 | 79 | 0.583902 | 7,171 | 0.88893 | 0 | 0 | 0 | 0 | 0 | 0 | 3,101 | 0.384406 |
bd4f4238a7747d65be7c026c2c3ecfe16032b5fb
| 104 |
py
|
Python
|
diofant/printing/pretty/__init__.py
|
project-kotinos/diofant___diofant
|
882549ac3a4dac238695aa620c02fce6ca33f9d3
|
[
"BSD-3-Clause"
] | 1 |
2021-08-22T09:34:15.000Z
|
2021-08-22T09:34:15.000Z
|
diofant/printing/pretty/__init__.py
|
project-kotinos/diofant___diofant
|
882549ac3a4dac238695aa620c02fce6ca33f9d3
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/printing/pretty/__init__.py
|
project-kotinos/diofant___diofant
|
882549ac3a4dac238695aa620c02fce6ca33f9d3
|
[
"BSD-3-Clause"
] | null | null | null |
"""ASCII-ART 2D pretty-printer"""
from .pretty import pprint, pprint_use_unicode, pretty, pretty_print
| 26 | 68 | 0.778846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.317308 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.