max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tests/test_bip69_bip49.py | hkrugersa/nowallet | 63 | 12790651 | <reponame>hkrugersa/nowallet<filename>tests/test_bip69_bip49.py
# pylint: disable=W0621
import random
import pytest
from pycoin.serialize import b2h
from pycoin.tx.TxOut import TxOut
from pycoin.tx.Spendable import Spendable
from connectrum.svr_info import ServerInfo
from nowallet import bip49
@pytest.fixture
def server():
return ServerInfo("onion",
hostname="fdkhv2bb7hqel2e7.onion",
ports=12345)
def test_serverinfo_class(server):
assert isinstance(server, ServerInfo)
assert server.get_port("t") == ("fdkhv2bb7hqel2e7.onion", 12345, False)
@pytest.fixture
def txout_small_coin_small_script():
return TxOut(0, b"\x00")
@pytest.fixture
def txout_large_coin_small_script():
return TxOut(10, b"\x00")
@pytest.fixture
def txout_small_coin_large_script():
return TxOut(0, b"\xFF")
@pytest.fixture
def txout_large_coin_large_script():
return TxOut(10, b"\xFF")
def test_txout_ordering(txout_small_coin_small_script,
txout_large_coin_small_script,
txout_small_coin_large_script,
txout_large_coin_large_script):
a, b = txout_large_coin_large_script, TxOut(10, b"\xFF")
assert (a.coin_value, b2h(a.script)) == (b.coin_value, b2h(b.script))
txout_list = [txout_small_coin_small_script,
txout_large_coin_small_script,
txout_small_coin_large_script,
txout_large_coin_large_script]
random.shuffle(txout_list)
txout_list.sort(key=lambda txo: (txo.coin_value, b2h(txo.script)))
assert txout_list[0] == txout_small_coin_small_script
assert txout_list[1] == txout_small_coin_large_script
assert txout_list[2] == txout_large_coin_small_script
assert txout_list[3] == txout_large_coin_large_script
def test_txout(txout_small_coin_small_script):
assert isinstance(txout_small_coin_small_script, TxOut)
assert txout_small_coin_small_script.coin_value == 0
assert txout_small_coin_small_script.script == b"\x00"
@pytest.fixture
def spendable_small_hex_small_vout(txout_small_coin_small_script):
return Spendable.from_tx_out(txout_small_coin_small_script, b"\x00", 0)
@pytest.fixture
def spendable_large_hex_small_vout(txout_large_coin_small_script):
return Spendable.from_tx_out(txout_large_coin_small_script, b"\xFF", 0)
@pytest.fixture
def spendable_small_hex_large_vout(txout_small_coin_large_script):
return Spendable.from_tx_out(txout_small_coin_large_script, b"\x00", 10)
@pytest.fixture
def spendable_large_hex_large_vout(txout_large_coin_large_script):
return Spendable.from_tx_out(txout_large_coin_large_script, b"\xFF", 10)
def test_spendable_ordering(txout_large_coin_large_script,
spendable_small_hex_small_vout,
spendable_large_hex_small_vout,
spendable_small_hex_large_vout,
spendable_large_hex_large_vout):
spendable_list = [spendable_small_hex_small_vout,
spendable_large_hex_small_vout,
spendable_small_hex_large_vout,
spendable_large_hex_large_vout]
random.shuffle(spendable_list)
spendable_list.sort(key=lambda utxo: (utxo.as_dict()["tx_hash_hex"],
utxo.as_dict()["tx_out_index"]))
assert spendable_list[0] == spendable_small_hex_small_vout
assert spendable_list[1] == spendable_small_hex_large_vout
assert spendable_list[2] == spendable_large_hex_small_vout
assert spendable_list[3] == spendable_large_hex_large_vout
def test_spendable(spendable_small_hex_small_vout):
spendable = Spendable.from_tx_out(TxOut(0, b"\x00"), b"\x00", 0)
assert isinstance(spendable, Spendable)
assert spendable.tx_hash == b"\x00"
assert spendable.tx_out_index == 0
@pytest.fixture
def segwitbip32node_from_chbs():
secret = "CORRECT HORSE BATTERY STAPLE".encode("utf-8")
return bip49.SegwitBIP32Node.from_master_secret(secret)
def test_segwitkey_script(segwitbip32node_from_chbs):
script = segwitbip32node_from_chbs.p2wpkh_script()
assert isinstance(script, bytes)
assert script == (b"\x00\x14\xe5\xba\xc1f\xbd[\x9fb\x04" + \
b"\xb1\xb4?\xb3\xc6!\x99qd\xc7\xfe")
def test_segwitkey_script_hash(segwitbip32node_from_chbs):
script_hash = segwitbip32node_from_chbs.p2wpkh_script_hash()
assert isinstance(script_hash, bytes)
assert script_hash == (b"H\x12\xe21\x90\x00:\xc2\xd2\xd7" + \
b"\xe3\x15\x99<\x96\x08\xaea\xac%")
def test_segwitkey_electrumx_spkhash(segwitbip32node_from_chbs):
script_hash = segwitbip32node_from_chbs.electrumx_script_hash()
assert isinstance(script_hash, str)
assert script_hash == ("41d8dc340e750287f1ef920956e1f9ae" + \
"8a724efa9bb3772352118fe26372be97")
def test_segwitkey_address(segwitbip32node_from_chbs):
address = segwitbip32node_from_chbs.p2sh_p2wpkh_address()
assert isinstance(address, str)
assert address == "38G7CQfoej3fZQbHHey7Z1XPUGpVpJv4em"
def test_bech32_segwitkey_address(segwitbip32node_from_chbs):
address = segwitbip32node_from_chbs.bech32_p2wpkh_address()
assert isinstance(address, str)
assert address == "bc1pqq2wtwkpv674h8mzqjcmg0anccsejutycllqmc65qs"
| 1.984375 | 2 |
tests/conftest.py | jeremyjordan/flower-classifier | 6 | 12790652 | import os
import pytest
import torch
import torchvision
from flower_classifier.datasets.csv import CSVDataset
from flower_classifier.datasets.oxford_flowers import OxfordFlowers102Dataset, OxfordFlowersDataModule, split_dataset
from flower_classifier.datasets.random import RandomDataModule
from tests.datasets import TEST_CACHE_DIR
@pytest.fixture(scope="module")
def oxford_dataset() -> torch.utils.data.Dataset:
transforms = [
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
dataset = OxfordFlowers102Dataset(root_dir=TEST_CACHE_DIR, download=True, transforms=transforms)
return dataset
@pytest.fixture(scope="module")
def oxford_dataloader(oxford_dataset):
dataloader = torch.utils.data.DataLoader(oxford_dataset, batch_size=8, shuffle=False)
return dataloader
@pytest.fixture(scope="module")
def oxford_datamodule():
transforms = [
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
data_module = OxfordFlowersDataModule(data_dir=TEST_CACHE_DIR, batch_size=32, train_transforms=transforms)
return data_module
@pytest.fixture(scope="module")
def oxford_csv_dataset() -> torch.utils.data.Dataset:
split_dataset(root_dir=TEST_CACHE_DIR, target_dir=TEST_CACHE_DIR)
train_filename = os.path.join(TEST_CACHE_DIR, "train_split.csv")
transforms = [
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
dataset = CSVDataset(filename=train_filename, transforms=transforms)
return dataset
@pytest.fixture(scope="module")
def oxford_csv_dataloader(oxford_csv_dataset):
dataloader = torch.utils.data.DataLoader(oxford_csv_dataset, batch_size=8, shuffle=False)
return dataloader
@pytest.fixture(scope="module")
def random_datamodule():
data_module = RandomDataModule(batch_size=32)
return data_module
| 2.359375 | 2 |
Pzzzzz/plugins/rss/__init__.py | Pzzzzz5142/animal-forest-QQ-group-bot | 5 | 12790653 | <reponame>Pzzzzz5142/animal-forest-QQ-group-bot
from nonebot import on_command, CommandSession, on_startup
from nonebot.plugin import perm
from nonebot.command import Command, call_command
from nonebot.message import unescape, escape
import asyncio
import asyncpg
import nonebot
from aiocqhttp.exceptions import Error as CQHttpError
from nonebot.argparse import ArgumentParser
import sys
from nonebot.log import logger
from db import db
import cq
from utils import doc
import feedparser as fp
import re
from .utils import sendrss, getrss, handlerss, AutoReply
from .bcr import bcr
from .mrfz import mrfz
from .loli import loli
from .pork_price import pprice
from .bh3 import bh3
from .hpoi import hpoi
from .xlOfficial import xl
from .pixiv import pixiv
import time
__plugin_name__ = "rss 订阅"
NOUPDATE = ["loli", "hpoi"]
NOBROADCAST = ["gcores"]
FULLTEXT = ["pprice"]
BROADCASTGROUP = [
145029700,
]
@nonebot.scheduler.scheduled_job("cron", hour="5", minute="0")
async def _():
bot = nonebot.get_bot()
async with db.pool.acquire() as conn:
values = await conn.fetch("select gid from mg where morningcall = true")
for item in values:
item = item["gid"]
try:
await bot.send_group_msg(
group_id=int(item), message=f"Ciallo~(∠・ω< )⌒★,早上好。"
)
except CQHttpError:
pass
@nonebot.scheduler.scheduled_job("cron", hour="0,6,12,18", minute="0")
async def bk():
bot = nonebot.get_bot()
async with db.pool.acquire() as conn:
ls = await bot.get_group_member_list(
group_id=bot.config.QGROUP, self_id=3418961367
)
await conn.execute("""delete from backup""")
for item in ls:
await conn.execute(
f"""insert into backup values({item['user_id']},'{item['card']}','{item['role']}')"""
)
@nonebot.scheduler.scheduled_job("interval", minutes=20)
async def __():
bot = nonebot.get_bot()
loop = asyncio.get_event_loop()
async with db.pool.acquire() as conn:
values = await conn.fetch("select gid from mg where rss = true")
values = [int(item["gid"]) for item in values]
for key in doc:
if key in NOUPDATE or "pixiv" in key:
continue
asyncio.run_coroutine_threadsafe(
handlerss(
bot,
key,
gtfun(key),
key not in NOBROADCAST,
key in FULLTEXT,
values,
),
loop,
)
@on_command("rss", only_to_me=False)
async def rss(session: CommandSession):
if "subs" in session.state:
async with db.pool.acquire() as conn:
for _, item in session.state["ls"]:
try:
await conn.execute(
"""insert into subs values ({},'{}','{}')""".format(
session.event.user_id, "No Information", item
)
)
await session.send(f"「{doc[item]}」的资讯已添加订阅了!有新资讯发布时,会私信你哦!")
except asyncpg.exceptions.ForeignKeyViolationError:
await session.send(f"貌似系统并没有支持该订阅源的订阅!")
logger.error("no", exc_info=True)
except asyncpg.exceptions.UniqueViolationError:
await session.send(f"你已经添加过「{doc[item]}」的资讯订阅啦!")
except:
await session.send(
f"发生未知错误!错误详细信息已记录了在log中!\n定位 message id 为:{session.event.message_id}"
)
logger.error("some rss issue", exc_info=True)
elif "route" in session.state:
for rt in session.state["ls"]:
resp = await sendrss(
session.event.user_id,
session.bot,
"自定义路由",
None,
getrss,
(1, 1),
route=rt,
)
if resp and session.event.detail_type != "private":
await session.send(
unescape(cq.at(session.event.user_id) + f"「{rt}」的资讯已私信,请查收。")
)
elif "del" in session.state:
async with db.pool.acquire() as conn:
fail = []
success = []
for _, dl in session.state["ls"]:
resp = await conn.execute(
"delete from subs where qid = {} and rss = '{}'".format(
session.event.user_id, dl
)
)
if resp[len("delete ") :] == "0":
fail.append(doc[dl])
else:
success.append(doc[dl])
if len(fail) > 0:
await session.send(
cq.at(session.event.user_id)
+ f"这{'个' if len(fail)==1 else '些'}源「{'、'.join(fail)}」不在你的订阅列表里面哦~"
)
if len(success) > 0:
await session.send(
cq.at(session.event.user_id)
+ f" 取消订阅「{'、'.join(success)}」成功!可喜可贺,可喜可贺!"
)
elif session.state["list"]:
async with db.pool.acquire() as conn:
values = await conn.fetch(
"select * from subs where qid = {}".format(session.event.user_id)
)
if len(values) == 0:
session.finish("貌似你没有订阅任何 rss 源")
await session.send(
cq.at(session.event.user_id)
+ "以下是你已订阅的源:\n{}".format(
"\n".join([doc[i["rss"]] + " - " + i["rss"] for i in values])
)
)
else:
loop = asyncio.get_event_loop()
for item, nm in session.state["ls"]:
asyncio.run_coroutine_threadsafe(
sendrss(
session.event.user_id,
session.bot,
nm,
None,
item,
feedBack=session.event.group_id
if session.event.detail_type != "private"
else False,
),
loop,
)
@rss.args_parser
async def _(session: CommandSession):
if session.is_first_run:
parser = ArgumentParser(session=session)
subparser = parser.add_mutually_exclusive_group()
subparser.add_argument("-s", "--subs", nargs="+", help="订阅指定的 rss 源")
subparser.add_argument("-r", "--route", nargs="+", help="获取自定路由的 rss 源的资讯")
subparser.add_argument("-d", "--delete", nargs="+", help="删除 rss 订阅")
subparser.add_argument(
"-l", "--list", action="store_true", default=False, help="列出已订阅的源"
)
subparser.add_argument("-a", "--add", help="开通rss源")
parser.add_argument("rss", nargs="*", help="获取已存在的 rss 源资讯")
argv = parser.parse_args(session.current_arg_text.strip().split(" "))
session.state["ls"] = []
session.state["list"] = argv.list
if argv.list:
return
if argv.subs != None:
session.state["subs"] = argv.subs
ls = argv.subs
if argv.delete != None:
session.state["del"] = argv.delete
ls = argv.delete
if argv.rss != []:
session.state["rss"] = argv.rss
ls = argv.rss
if argv.route != None:
session.state["route"] = argv.route
session.state["ls"] = argv.route
if len(session.state["ls"]) == 0:
session.finish("查询路由地址不能为空哦!")
return
if argv.add != None:
await session.send(str(session.event.user_id))
result = await add_rss(argv.add.strip(), str(session.event.user_id))
session.finish(result)
ls = list(set(ls))
if session.event.detail_type == "group":
async with db.pool.acquire() as conn:
values = await conn.fetch(
"select safe from mg where gid = {}".format(session.event.group_id)
)
if len(values) > 0 and values[0]["safe"]:
ls = [i for i in ls if "r18" not in i]
for key in doc:
if key in ls[:]:
session.state["ls"].append((gtfun(key), key))
ls.remove(key)
if len(ls) > 0 and " ".join(ls).strip() != "":
await session.send(
unescape(
"没有添加「{}」的订阅源!请联系".format(" ".join(ls)) + cq.at(545870222) + "添加订阅!"
)
)
if len(session.state["ls"]) == 0:
await session.send(
"本次资讯{}为空哦!".format("查看" if session.state["rss"] != [] else "订阅")
)
session.finish(
AutoReply(
"Rss 指令帮助菜单",
"以下是 rss 指令支持的源",
[(i, j) for i, j in doc.items() if "r18" not in i],
)
)
@on_command("订阅", only_to_me=False, shell_like=True)
async def subs(session: CommandSession):
ls = session.current_arg_text.strip(" ")
flg = await call_command(
session.bot,
session.event,
"rss",
current_arg="-s " + ls,
disable_interaction=True,
)
if flg == False:
session.finish("订阅失败")
@on_command("取消订阅", only_to_me=False, shell_like=True)
async def unsubs(session: CommandSession):
ls = session.current_arg_text.strip(" ")
flg = await call_command(
session.bot,
session.event,
"rss",
current_arg="-d " + ls,
disable_interaction=True,
)
if flg == False:
session.finish("取消订阅失败")
@on_command("up", only_to_me=False, shell_like=True, permission=perm.SUPERUSER)
async def up(x):
print(f"started at {time.strftime('%X')}")
bot = nonebot.get_bot()
loop = asyncio.get_event_loop()
async with db.pool.acquire() as conn:
values = await conn.fetch("select gid from mg where rss = true")
values = [int(item["gid"]) for item in values]
for key in doc:
if key in NOUPDATE or "pixiv" in key:
continue
asyncio.run_coroutine_threadsafe(
handlerss(
bot,
key,
gtfun(key),
key not in NOBROADCAST,
key in FULLTEXT,
values,
),
loop,
)
print(f"finished at {time.strftime('%X')}")
@on_command("带礼包", only_to_me=False, shell_like=True, permission=perm.SUPERUSER)
async def _(session: CommandSession):
event = {
"user_id": session.event.user_id,
"message": session.event.message,
"post_type": "message",
"message_type": "private",
"raw_message": session.event.raw_message,
"sub_type": "friend",
}
if session.event.detail_type != "private":
event["message_type"] = "group"
event["sub_type"] = None
event["group_id"] = session.event.group_id
await call_command(
session.bot,
session.event,
"rss",
current_arg="pixiv_day_r18 pixiv_week_r18 pixiv_day_male_r18",
)
def gtfun(name: str):
if "pixiv" in name:
name = "pixiv"
return getattr(sys.modules[__name__], name)
| 2.109375 | 2 |
day 5/1-poisson.py | JackRab/10-days-of-statistics | 0 | 12790654 | """
Objective
In this challenge, we learn about Poisson distributions.
Task
A random variable, X, follows Poisson distribution with mean of 2.5.
Find the probability with which the random variable X is equal to 5.
"""
from math import exp, factorial
def poisson(lam=2.5, k=5):
"""
Return the probability of X=k with possion distribution with mean lam
"""
return lam**k*exp(-lam)/factorial(k)
print(round(poisson(), 3)) | 3.890625 | 4 |
tasks.py | bixind/JohnReboot | 2 | 12790655 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import weather.weather as weather
import audio.audio as audio
import aliases.aliases as aliases
import instrate.instrate as instrate
import history.history as history
import pollanalyse.pollanalyse as pollanalyse
import imageprocessing.imageprocessing as images
from time import *
import logging
def defaultModule(com, vk):
return {'message' : 'Nemo omnia potest scire'}
class Dispenser:
def __init__(self, vk):
self.modules = {'погода' : weather.makeWeatherReport,
'аудио' : audio.getAudio,
'иначе' : aliases.setAlias,
'рейтинг' : instrate.getRating,
'история' : history.getHistory,
'опрос' : pollanalyse.getPollInfo,
'шакализировать' : images.getShakalized}
self.vk = vk
def dispense(self, com):
try:
command = com.args[0]
if command in self.modules:
return self.modules[com.args[0]](com, self.vk)
newargs = aliases.getAlias(com.id, command)
if newargs is None:
return defaultModule(com, self.vk)
args = (newargs + com.args[1:])
com = com._replace(args=args)
command = com.args[0]
if command in self.modules:
return self.modules[com.args[0]](com, self.vk)
return defaultModule(com, self.vk)
except Exception as e:
logging.exception(e)
return 'Error'
| 2.21875 | 2 |
backend/apps/common/utils.py | sebastien-prudhomme/models-web-app | 0 | 12790656 | """Common utils for parsing and handling InferenceServices."""
import os
from kubeflow.kubeflow.crud_backend import api, helpers, logging
log = logging.getLogger(__name__)
KNATIVE_REVISION_LABEL = "serving.knative.dev/revision"
FILE_ABS_PATH = os.path.abspath(os.path.dirname(__file__))
INFERENCESERVICE_TEMPLATE_YAML = os.path.join(
FILE_ABS_PATH, "yaml", "inference_service_template.yaml")
def load_inference_service_template(**kwargs):
"""
Return an InferenceService dict, with defaults from the local yaml.
Reads the yaml for the web app's custom resource, replaces the variables
and returns it as a python dict.
kwargs: the parameters to be replaced in the yaml
"""
return helpers.load_param_yaml(INFERENCESERVICE_TEMPLATE_YAML, **kwargs)
# helper functions for accessing the logs of an InferenceService
def get_inference_service_pods(svc, components=[]):
"""
Return the Pod names for the different isvc components.
Return a dictionary with (endpoint, component) keys,
i.e. ("default", "predictor") and a list of pod names as values
"""
namespace = svc["metadata"]["namespace"]
# dictionary{revisionName: (endpoint, component)}
revisions_dict = get_components_revisions_dict(components, svc)
if len(revisions_dict.keys()) == 0:
return {}
pods = api.list_pods(namespace, auth=False).items
component_pods_dict = {}
for pod in pods:
for revision in revisions_dict:
if KNATIVE_REVISION_LABEL not in pod.metadata.labels:
continue
if pod.metadata.labels[KNATIVE_REVISION_LABEL] != revision:
continue
component = revisions_dict[revision]
curr_pod_names = component_pods_dict.get(component, [])
curr_pod_names.append(pod.metadata.name)
component_pods_dict[component] = curr_pod_names
if len(component_pods_dict.keys()) == 0:
log.info("No pods are found for inference service: %s",
svc["metadata"]["name"])
return component_pods_dict
# FIXME(elikatsis,kimwnasptd): Change the logic of this function according to
# https://github.com/arrikto/dev/issues/867
def get_components_revisions_dict(components, svc):
"""Return a dictionary{revisionId: component}."""
status = svc["status"]
revisions_dict = {}
for component in components:
if "components" not in status:
log.info("Component '%s' not in inference service '%s'",
component, svc["metadata"]["name"])
continue
if component not in status["components"]:
log.info("Component '%s' not in inference service '%s'",
component, svc["metadata"]["name"])
continue
if "latestReadyRevision" in status["components"][component]:
revision = status["components"][component]["latestReadyRevision"]
revisions_dict[revision] = component
if len(revisions_dict.keys()) == 0:
log.info(
"No revisions found for the inference service's components: %s",
svc["metadata"]["name"],
)
return revisions_dict
| 2.265625 | 2 |
apps/trade/migrations/0042_auto_20190326_1424.py | lianxiaopang/camel-store-api | 12 | 12790657 | <reponame>lianxiaopang/camel-store-api<filename>apps/trade/migrations/0042_auto_20190326_1424.py
# Generated by Django 2.1.7 on 2019-03-26 06:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trade', '0041_auto_20190313_1418'),
]
operations = [
migrations.AddField(
model_name='orders',
name='machine_code',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='支付终端号'),
),
migrations.AlterField(
model_name='orders',
name='model_type',
field=models.CharField(choices=[('ord', '普通订单'), ('sub', '订阅订单'), ('repl', '积分换购订单'), ('qrpay', '线下支付订单')], default='ord', max_length=128, verbose_name='订单类型'),
),
]
| 1.539063 | 2 |
main.py | wur3/music-theor | 1 | 12790658 | from note import Note
from majorScale import MajorScale
from minorScale import MinorScale
print("Hi welcome to my app.\n")
note = None
scale = None
while(True):
# if no scale is chosen
if scale is None:
# choose a note
if note is None:
note = input("Choose a note: ")
menu = ("1. {0} major scale\n"
"2. {0} minor scale\n"
"3. Choose another note\n"
"4. Exit").format(note)
print(menu)
# choose major or minor
optn = input("\nChoose an option: ")
if optn == "1":
scale = MajorScale(note)
elif optn == "2":
scale = MinorScale(note)
elif optn == "3":
note = None
elif optn == "4":
break
else:
print("Invalid option. Try again.\n")
# major scale
if isinstance(scale, MajorScale):
print(scale)
menu = ("1. Get parallel minor\n"
"2. Get relative minor\n"
"3. Choose another note\n"
"4. Exit").format(note)
print(menu)
optn = input("\nChoose an option: ")
if optn == "1":
scale = MinorScale(scale,1)
elif optn == "2":
scale = MinorScale(scale,2)
elif optn == "3":
note = None
scale = None
elif optn == "4":
break
else:
print("Invalid option. Try again.\n")
# minor scale
if isinstance(scale, MinorScale):
print(scale)
menu = ("1. Get parallel major\n"
"2. Get relative major\n"
"3. Choose another note\n"
"4. Exit").format(note)
print(menu)
optn = input("\nChoose an option: ")
if optn == "1":
scale = MajorScale(scale,1)
elif optn == "2":
scale = MajorScale(scale,2)
elif optn == "3":
note = None
scale = None
elif optn == "4":
break
else:
print("Invalid option. Try again.\n")
print("Bye!")
| 3.6875 | 4 |
display.py | majidaldo/boweb | 2 | 12790659 | #this could be in a repo on its own should have used a
#obj oriented approach
"""manages GTK3 broadwayd displays
.. and to minimize bash scripting ugggh
usage:
>displynum, port =display.add()
>display.app('gedit',displaynum) #where gedit is a gtk3 app
you may want to set the limits after import
>import display
>display.DisplayLimit=10
"""
import signal
import os
import atexit
import subprocess
from collections import defaultdict
from time import sleep
import socket
import psutil # optionally used
port2display={}
display2port={}
class LimitError(Exception): val=None; pass
class DisplayLimit(LimitError):
"""a limit to the number of displays"""
val=10;
pass
class ApplicationLimit(LimitError):
"""a limit to the number of applications per display"""
val=10
pass
#should program onappstart onappclose
#todo capture stdio on procs
def get_openport():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('',0))
return s.getsockname()[1]
class sequenceg(): #should have used a generator but cool to...
#..hack classes
dc=0
@staticmethod
def getdisplay(self):
self.dc+=1 ;
return self.dc
@staticmethod
def __call__(self):
return self.getdisplay(self)
sequence=lambda p: sequenceg.__call__(sequenceg)
def friendly_display(port,begin=8000):
"""for wehn you want some 'web' ports"""
ret= port-begin
if ret < 0 or port<0:
raise ValueError('neg values')
return ret
def display_is_port(port):
display=port
return display
#functions need to be one to one mappings bw out and in
#port2display_function
p2df=sequence
port2display_function=p2df #don't use the port2dispaly_func ...
#... in the code
#display_is_port#friendly_display#
# class keydefaultdict(defaultdict):
# def __missing__(self, key):
# if self.default_factory is None:
# raise KeyError( key )
# else:
# ret = self[key] = self.default_factory(key)
# return ret
class displaydict(defaultdict):
#adding issues are covvered by add()
def removemapping(self,display):
port2display.pop(display2port.pop(display))
def __delitem__(self, display):
super(displaydict, self).__delitem__(display)
self.removemapping(display)
def pop(self, display):
super(displaydict, self).pop(display)
self.removemapping(display)
#procs assoc with each display
running_displays=displaydict(list)
#lesson learned:
#def add(port,block=True) not a good idea to specify a port
def add(portgetter=get_openport
,block=True):#don't see a reason to not block
remove_zombie_apps(); kill_zombie_displays()
if len(running_displays)==DisplayLimit.val:
raise DisplayLimit(DisplayLimit.val)
port=portgetter() #not safe. need to reserve port
"""runs the html5 part of the app returning the display number
blocks until the dispaly server is up by default"""
display=p2df(port)
if display in running_displays:
raise KeyError('display server already running')
else:
if isport_openable(port) is True:
raise ValueError("can't get port "+str(port))
try:
p=subprocess.Popen(['./start_display.sh'
,str(display),str(port)]
#,preexec_fn=os.setsid
)
except: #todo: problem: broadwayd does not exit if it
#cant get the port. it gives back:
#"Can't listen: Error binding to address: Address already in use"
#dont' p.wait
raise Exception("couldn't start display")
#block until 'app' is ready on the port
if block==True:#todo if port given not openable
tries=0
while ( (isport_openable(port) is not True) ):
tries+=1 ; #sometimes it gets stuck here if
#rapid requests
if tries>10: return add(portgetter,block) #not nice
sleep(.1); continue
#registrations
running_displays[display].append(p) #the only reason it's a...
#...default dict.. do i really need defaultdict?
port2display[port]=display;
display2port[display]=port
# port->display should be 1 to 1 mapping
if len(display2port) != len(port2display):
raise Exception('display and port numbers are not 1-to-1')
return display, port
#what happens when the app spawns a window or another proc?
#on multiple gedits only the first one is alive
def app(cmd,display,**kwargs):
"""runs a gtk3 prog on display. """
if (display) not in running_displays:
raise ValueError('display does not exist')
remove_zombie_apps()
if (len(running_displays[display])-1)==ApplicationLimit.val:
raise ApplicationLimit(ApplicationLimit.val)
#kwargs['preexec_fn']=os.setpgid
sp=subprocess.Popen(['./display.sh',cmd,str(display)]
,**kwargs)
running_displays[display].append(sp)
return sp
def isport_openable(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('127.0.0.1',port)) #if can bind then not busy
s.close()
return False
except: return True
# cr=s.connect_ex(('127.0.0.1', port))
# if cr==0: return True
# else: return cr
def stop(display,signal=signal.SIGKILL):#signal.SIGINT):
# when using this with the server.. can't rely on being nice
# so just kill it
"""stops display and everything running on it"""
if display not in running_displays:
raise KeyError('no display #'+str(display)+' to kill')
#os.killpg(p.pid, signal.SIGTERM)
proclist= running_displays[display]
for p in reversed(proclist):
p.send_signal(signal);
#p.kill()
p.wait()
running_displays.pop(display)
remove_zombie_apps()
def remove_zombie_apps():
#the not immediate
delthese=[]
for adisplay in running_displays:
for an,aproc in enumerate(running_displays[adisplay]):
if an==0:continue #skip the broadway proc
if aproc.poll() is None: continue# running
else: delthese.append( (adisplay,an) )
for adisplay,an in delthese:
#in case empty list
try: running_displays[adisplay].pop(an) #the process...
# ..will be removed by the garbage collector eventually
except: pass
def kill_zombie_displays(really=True):#seems to add robustness...
#stop it if it become a problem
if really is not True: return
for ap in psutil.process_iter():
try: cmdline = ap.cmdline[0]
except: continue
if cmdline == 'broadwayd':
# index 2 is the port
if int(ap.cmdline[2]) not in port2display: ap.kill()
def kill_all():
"""kills all display apps on the server forcefully
...that it knows about that is."""
for ad in running_displays.keys():
stop(ad,signal=signal.SIGKILL)
atexit.register(kill_all) | 2.4375 | 2 |
trace_river/graph.py | andrew-houghton/river-flow-3 | 0 | 12790660 | from collections import UserDict
class Graph(UserDict):
"""Graph for holding which points are merged and connected
Keys are tuple of tuples eg ((1,2), (2,2))
Values are sets of keys eg {((1,2), (2,2)), ((4,2), (1,4))}
Relationships should be maintained in both directions
"""
def __delitem__(self, key):
for neighbour in self[key]:
old_neighbour_value = self[neighbour]
old_neighbour_value.remove(key)
super().__delitem__(key)
def __setitem__(self, key, value):
assert key not in value
for neighbour in value:
old_neighbour_value = self.get(neighbour, set())
old_neighbour_value.add(key)
super().__setitem__(neighbour, old_neighbour_value)
super().__setitem__(key, value)
def __repr__(self):
return f"{type(self).__name__}{self.data}"
| 3.515625 | 4 |
sorts/kth_smallest_ele.py | ami1708/Python | 0 | 12790661 | # //using pivot element and partition and merge sort basically
import random
def swap(A, i, j):
A[i], A[j] = A[j], A[i]
def partition(A, lo, hi):
pivot = A[lo]
i = lo + 1
j = hi
while True:
while A[i] < pivot:
i += 1
if i == hi:
break
while A[j] > pivot:
j -= 1
if j == lo:
break
if j <= i:
break
swap(A, i, j)
swap(A, lo, j)
print(A)
return j
def k_smallest(A, k):
lo = 0
hi = len(A) - 1
k = k - 1
random.shuffle(A)
while hi > lo:
j = partition(A, lo, hi)
if j == k:
return A[k]
elif j > k:
hi = j - 1
else:
lo = j + 1
return A[k]
if __name__ == '__main__':
test_case = int(input())
for _ in range(test_case):
number_of_elements = int(input())
A = [int(x) for x in input().strip().split(' ')]
k = int(input())
print(k_smallest(A, k)) | 3.78125 | 4 |
test/end-to-end/Exceptions/Python/main.py | HighSchoolHacking/GLS-Draft | 30 | 12790662 | try:
raise Exception("Oh no!")
except Exception as error:
print("Found an error.")
finally:
# ...
| 2.375 | 2 |
util/api/core.py | IRIS-Team/tchecker | 17 | 12790663 | <gh_stars>10-100
import cfscrape, requests, json, os, sys, threading
from bs4 import BeautifulSoup as bs
from requests.adapters import HTTPAdapter
from fake_headers import Headers
from urllib3.util.ssl_ import create_urllib3_context
from util.core import *
from util.emails.core import *
from util.scraper.core import *
def banner():
if os.name == 'nt':
_ = os.system('cls')
else:
_ = os.system('clear')
print (f''' {colours.text}{colours.darktext} {colours.main}z
{colours.darktext} {colours.main}z {colours.darktext} _____ ___ _ _
{colours.main} {colours.darktext} |_ _/ __| |_ ___ __| |___ _
{colours.main} z{colours.darktext} | || (__| ' \/ -_) _| / / '_|
{colours.main} ᓚᘏᗢ {colours.darktext} |_| \___|_||_\___\__|_\_\_| {colours.sencondary}0.1{colours.text}
{colours.darktext}▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬{colours.darktext}
''')
def menu():
print(f'''{returnColor("[1]")} Target User
{returnColor("[2]")} Target Email
{returnColor("[3]")} Verified User Scraper
{returnColor("[4]")} Gif Avatar Scraper
''')
choice = int(input(f'{returnColor(">")} '))
if choice == 1:
target = input(f'Username: {returnColor("@")}')
check = checkUsername(target)
if check == False: exit('Invalid Handle')
else: print(f'Registed: {returnColor(check)}')
twitterRequest = scraper(target)
if choice == 2:
target = input(f'Email Address: ')
if checkEmail(target) == True: print(f'Email has been {returnColor("Taken")}')
else: print(f'No account with this email')
if choice == 3:
verifyScraper()
if choice == 4:
gifScraper()
def brutedomain(email, chars) -> str:
guesses = []
domain_file = open('files/emails.txt', 'r').readlines()
provider = email.split('@')[1]
for domain in domain_file:
domain = domain.rstrip()
if provider[0] == domain[0]:
if len(provider.split('.')[0]) == len(domain.split('.')[0]):
guesses.append(email.split('@')[0]+"@"+domain)
print(f'Possible Domain - {email.split("@")[0]}@{domain}')
return guesses[-1]
def scraper(target: str) -> str:
url = "https://api.twitter.com/graphql/P8ph10GzBbdMqWZxulqCfA/UserByScreenName?variables=%7B%22screen_name%22%3A%22" + target + "%22%2C%22withHighlightedLabel%22%3Atrue%7D"
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9,bn;q=0.8",
'authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA',
"content-type": "application/json",
"dnt": "1",
'origin': 'https://twitter.com',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-site',
'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Mobile Safari/537.36',
'x-twitter-active-user': 'yes',
'x-twitter-client-language': 'en'
}
resp = json.loads(requests.get(url, headers=headers).text)
try:
if resp["data"]["user"]["id"] in resp:
pass
except:
try:
err = resp["errors"][0]["message"]
if "Not found" == err:
print(f'{colours.error}•{colours.text} Username Not Found On Twitter')
else:
print(err)
except:
print(f'{colours.error}•{colours.text} Username Not Found On Twitter')
bio = resp["data"]["user"]["legacy"]["description"]
followers = resp["data"]["user"]["legacy"]["followers_count"]
location = resp["data"]["user"]["legacy"]["location"]
name = resp["data"]["user"]["legacy"]["name"]
Id = resp["data"]["user"]["id"]
created = resp["data"]["user"]["legacy"]["created_at"]
if location == '':
location = 'Unknown'
if bio == '':
bio = 'Unknown'
class CustomAdapter(HTTPAdapter):
def init_poolmanager(self, *args, **kwargs):
ctx = create_urllib3_context()
super(CustomAdapter, self).init_poolmanager(
*args, ssl_context=ctx, **kwargs
)
try:
url = 'https://twitter.com/account/begin_password_reset'
header = Headers(browser='chrome', os='win', headers=True)
scraper = cfscrape.create_scraper()
scraper.mount('https://', CustomAdapter())
req = scraper.get(url, headers=header.generate())
soup = bs(req.text, 'html.parser')
authenticity_token = soup.input.get('value')
data = {'authenticity_token': authenticity_token, 'account_identifier': target}
cookies = req.cookies
response = scraper.post(url, cookies=cookies, data=data, headers=header.generate())
soup2 = bs(response.text, 'html.parser')
try:
if (
soup2.find('div', attrs={'class': 'is-errored'}).text
== 'Please try again later.'
):
exit(f'{colours.error}Rate Limit{colours.text}')
except:
pass
try:
info = soup2.find('ul', attrs={'class': 'Form-radioList'}).findAll('strong')
except:
exit(f'{colours.error}Rate Limit{colours.text}')
try:
phone = int(info[0].text)
email = str(info[1].text)
except:
email = str(info[0].text)
phone = 'None'
except Exception as e:
exit(f'{colours.error}{e}{colours.text}')
email = brutedomain(email, None)
return [name, email, phone]
def checkUsername(username) -> str:
url = f"https://api.twitter.com/graphql/P8ph10GzBbdMqWZxulqCfA/UserByScreenName?variables=%7B%22screen_name%22%3A%22{username}%22%2C%22withHighlightedLabel%22%3Atrue%7D"
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9,bn;q=0.8",
'authorization': '<KEY>',
"content-type": "application/json",
"dnt": "1",
'origin': 'https://twitter.com',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-site',
'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Mobile Safari/537.36',
'x-twitter-active-user': 'yes',
'x-twitter-client-language': 'en'
}
resp = json.loads(requests.get(url, headers=headers).text)
try:
return resp["data"]["user"]["legacy"]["created_at"]
except:
try:
if "Not found" == resp["errors"][0]["message"]:
return False
else:
return False
except:
return False
| 2.546875 | 3 |
gs_energy-prediction/scripts/reoptimize_ucc.py | cgyurik/qml_for_qchem | 0 | 12790664 | <reponame>cgyurik/qml_for_qchem<gh_stars>0
import os
import sys
import json
# pylint: disable=wrong-import-position
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
sys.path.append(ROOT_DIR)
from convoQC.scripts.optimize_ucc import optimize_ucc
from convoQC.utils import load_ucc_data, DATA_DIR, encode_complex_and_array
# pylint: enable=wrong-import-position
UCC_REOPT_DIR = DATA_DIR + 'ucc_reoptimized/'
usage = ('Usage: python {} <target_filename> <init_params_filename>'
.format(sys.argv[0]))
if len(sys.argv) is not 3:
raise Exception(usage)
if not (isinstance(sys.argv[1], str) and isinstance(sys.argv[1], str)):
raise TypeError('The first argument is not a string.\n' + usage)
target_filename = sys.argv[1]
source_filename = sys.argv[2]
existing_ucc_reoptimized_files = os.listdir(UCC_REOPT_DIR)
if (target_filename + '.json' in existing_ucc_reoptimized_files):
print('The file data/ucc_reoptimized/{}.json exists already. Exiting.')
exit()
source_ucc_dict = load_ucc_data(source_filename)
init_params = source_ucc_dict['params']
target_ucc_dict = optimize_ucc(target_filename, init_params)
print('saving data to file.')
with open(UCC_REOPT_DIR + target_filename + '.json', 'wt') as f:
json.dump(target_ucc_dict, f, default=encode_complex_and_array)
print(*((k, v) for k, v in target_ucc_dict.items()), sep='\n')
| 2.25 | 2 |
pywinutiltools/__init__.py | riag/pywinutiltools | 0 | 12790665 | <filename>pywinutiltools/__init__.py
__version__ = '0.1.0'
from pywinutiltools import powershell
| 1.109375 | 1 |
aiosqlite/__version__.py | PikalaxALT/aiosqlite | 7 | 12790666 | __version__ = "0.16.1"
| 1.039063 | 1 |
ProLib.py | marromlam/quick-memos | 0 | 12790667 | <reponame>marromlam/quick-memos
# -*- coding: UTF-8 -*-
##########################################################################################
# Importing packages. ####################################################################
import numpy as np
import csv
import re
import os
import ast
import math
import sympy as sp
import scipy as sc
from scipy import stats
import sys
import time
from termcolor import colored, cprint
import subprocess
from subprocess import DEVNULL, STDOUT, check_call
from sympy.parsing.sympy_parser import (parse_expr,standard_transformations,
implicit_multiplication)
transformations = standard_transformations + (implicit_multiplication,)
separ = '--------------------------------------------'
sectspa = ' '
##########################################################################################
##########################################################################################
# Variable extractor class and function. #################################################
class IdentifierExtractor(ast.NodeVisitor):
def __init__(self):
self.ids = set()
def visit_Name(self, node):
self.ids.add(node.id)
def VariableExtractor(FUN):
extractor = IdentifierExtractor()
extractor.visit(ast.parse(FUN))
extractor.ids = extractor.ids - set(vars(math))
return extractor.ids
##########################################################################################
##########################################################################################
# Round uncertainty functions. ###########################################################
def NumPow(X):
Y = np.around(np.log10(abs(X)));
Y = Y - (10 ** Y > abs(X));
return Y
def UncRound(x, ux):
if type(x) is float:
x = np.array([[x]])
ux = np.array([[ux]])
elif type(x) is int:
x = np.array([[x]])
ux = np.array([[ux]])
elif type(x) is np.ndarray:
try:
x.shape[1]
except:
x = x[:, np.newaxis]
ux = ux[:, np.newaxis]
n = NumPow(ux)
Y = np.concatenate((x / (10 ** n), ux / (10 ** n)), axis=1)
Y = np.concatenate((n, np.around(10 * Y) / 10), axis=1)
# Correction if exact decimal in round.
f, c = Y.shape
for l in range(0, f):
if Y[l][2] == 10:
naux = n[l] + 1; xaux = x[l][0]; uxaux = ux[l][0]
yaux = np.array([xaux, uxaux])
Y[l] = np.concatenate((naux, np.around(10*yaux)/10), axis=0)
return Y
def UncPrint(x, ux):
try:
aux1 = UncRound(x, ux)
print(' ' + str(aux1[1]) + '(' + str(aux1[2]) + ') x 10[' + str(aux1[0]) + ']')
except:
aux1 = UncRound(x, ux); aux1 = aux1[0]
print(' ' + str(aux1[1]) + '(' + str(aux1[2]) + ') x 10[' + str(aux1[0]) + ']')
##########################################################################################
##########################################################################################
# Export table to LaTeX document. ########################################################
def TableToTeX(MAT, CAP, SYM, UNI, ppath):
f, c = MAT.shape
C = np.zeros([f, int((3 / 2) * (c))], )
for l in range(1, c, 2):
B = UncRound(MAT[:, [l - 1]], MAT[:, [l]])
C[:, [int((3 / 2) * (l + 1) - 3)]] = B[:, [0]]
C[:, [int((3 / 2) * (l + 1) - 2)]] = B[:, [1]]
C[:, [int((3 / 2) * (l + 1) - 1)]] = B[:, [2]]
with open(ppath + 'export_TeX' + '.txt', 'a') as aux:
aux.write('\\begin{table}[H] \n\\centering\n')
aux.write('\\begin{tabular}{|')
for m in range(0, int((1 - 1 / 2) * c)):
aux.write('c|')
aux.write('} \\hline\n')
# Headings.
for n in range(0, int(c / 2)):
if n == c / 2 - 1:
aux.write('$ ' + SYM[n] + ' \\ \\mathrm{(' + UNI[n] + ')} $')
aux.write('\\\\ \\hline \\hline\n')
else:
aux.write('$ ' + SYM[n] + ' \\ \\mathrm{(' + UNI[n] + ')} $ & ')
# All rows and cols iterative.
for o in range(0, f):
for p in range(0, int(3 * c / 2 - 2), 3):
if p == int(3 * c / 2 - 3):
if C[o, [int(p)]] == -np.inf:
aux.write('$ ( ' + str(float(C[o, [int(p + 1)]])) + ' \\pm ' \
+ str(float(C[o, [int(p + 2)]])) + \
' ) \\times 10^{\\infty} $ ')
else:
aux.write('$ ( ' + str(float(C[o, [int(p + 1)]])) + ' \\pm ' \
+ str(float(C[o, [int(p + 2)]])) + \
' ) \\times 10^{' + str(int(C[o, [int(p)]])) + '} $ ')
else:
if C[o, [int(p)]] == -np.inf:
aux.write('$ ( ' + str(float(C[o, [int(p + 1)]])) + ' \\pm ' \
+ str(float(C[o, [int(p + 2)]])) + \
' ) \\times 10^{\\infty} $ & ')
else:
aux.write('$ ( ' + str(float(C[o, [int(p + 1)]])) + ' \\pm ' \
+ str(float(C[o, [int(p + 2)]])) + \
' ) \\times 10^{' + str(int(C[o, [int(p)]])) + '} $ & ')
aux.write('\\\\ \\hline\n')
# Final activities: caption and ending enviroment.
aux.write('\\end{tabular}\n\\caption{' + CAP + '}\n\\end{table}\n\n\n\n')
##########################################################################################
##########################################################################################
# Preview LaTeX table. ###################################################################
def PreviewTableTeX(MAT, CAP, SYM, UNI, ppath):
f, c = MAT.shape
C = np.zeros([f, int((3 / 2) * (c))], )
for l in range(1, c, 2):
B = UncRound(MAT[:, [l - 1]], MAT[:, [l]])
C[:, [int((3 / 2) * (l + 1) - 3)]] = B[:, [0]]
C[:, [int((3 / 2) * (l + 1) - 2)]] = B[:, [1]]
C[:, [int((3 / 2) * (l + 1) - 1)]] = B[:, [2]]
with open(ppath + 'preview_TeX' + '.tex', 'w') as aux:
aux.write('\\documentclass[varwidth=true,border=10pt,convert={size=640x}]{standalone}\n')
aux.write('\\usepackage{graphicx,float}\n')
aux.write('\\usepackage[utf8]{inputenc}')
aux.write('\\usepackage[T1]{fontenc}\n')
aux.write('\\begin{document}\n')
aux.write('\\begin{table}[H] \n\\centering\n')
aux.write('\\resizebox{12cm}{!}{\\begin{tabular}{|')
for m in range(0, int((1 - 1 / 2) * c)):
aux.write('c|')
aux.write('} \\hline\n')
# Headings.
for n in range(0, int(c / 2)):
if n == c / 2 - 1:
aux.write('$ ' + SYM[n] + ' \\ \\mathrm{(' + UNI[n] + ')} $')
aux.write('\\\\ \\hline \\hline\n')
else:
aux.write('$ ' + SYM[n] + ' \\ \\mathrm{(' + UNI[n] + ')} $ & ')
# All rows and cols iterative.
for o in range(0, f):
for p in range(0, int(3 * c / 2 - 2), 3):
if p == int(3 * c / 2 - 3):
if C[o, [int(p)]] == -np.inf:
aux.write('$ ( ' + str(float(C[o, [int(p + 1)]])) + ' \\pm ' + str(float(C[o, [int(p + 2)]])) + ' ) \\times 10^{\\infty} $ ')
else:
aux.write('$ ( ' + str(float(C[o, [int(p + 1)]])) + ' \\pm ' + str(float(C[o, [int(p + 2)]])) + ' ) \\times 10^{' + str(int(C[o, [int(p)]])) + '} $ ')
else:
if C[o, [int(p)]] == -np.inf:
aux.write('$ ( ' + str(float(C[o, [int(p + 1)]])) + ' \\pm ' + str(float(C[o, [int(p + 2)]])) + ' ) \\times 10^{\\infty} $ & ')
else:
aux.write('$ ( ' + str(float(C[o, [int(p + 1)]])) + ' \\pm ' + str(float(C[o, [int(p + 2)]])) + ' ) \\times 10^{' + str(int(C[o, [int(p)]])) + '} $ & ')
aux.write('\\\\ \\hline\n')
# Final activities: caption and ending enviroment.
aux.write('\\end{tabular}}\n\\caption{'+CAP+'}\n\\end{table}\n\\end{document}\n')
# Comppiling
check_call(['/usr/local/texlive/2017/bin/x86_64-darwin/pdflatex', ppath + 'preview_TeX.tex'], stdout=DEVNULL, stderr=STDOUT)
os.system('rm preview_TeX.log')
os.system('rm preview_TeX.aux')
os.system('rm ' + ppath + 'preview_TeX.tex')
#os.system('open -a Preview.app ' + '/Users/marcos/Documents/Python/ProjectMaker/' + 'preview_TeX.pdf')
os.system('open preview_TeX.pdf')
os.system('rm preview_TeX.pdf')
##########################################################################################
##########################################################################################
# Wolfram. ###############################################################################
def WolframEx(MAT, CAP, SYM, UNI, ppath):
f, c = MAT.shape
if c==4:
with open(ppath + 'export_WMT' + '.txt', 'a') as aux:
aux.write(separ + 'x' + separ + '\n\n$PlotTheme = "Classic";\nNeeds["ErrorBarPlots`"];\n\n')
# Points.
aux.write('data={\n'); p = 0
for l in range(0, f):
if l == int(f - 1):
aux.write('{' + str(float(MAT[l, [int(p + 0)]])) + ',' + str(float(MAT[l, [int(p + 2)]])) +'}')
else:
aux.write('{' + str(float(MAT[l, [int(p + 0)]])) + ',' + str(float(MAT[l, [int(p + 2)]])) + '},\n')
aux.write('};\n\n')
# Errorbars.
aux.write('EL=ErrorListPlot[{\n'); p = 0
for o in range(0, f):
if o == int(f-1):
aux.write('{{' + str(float(MAT[o, [int(p + 0)]])) + ',' + str(float(MAT[o, [int(p + 2)]])) + '},ErrorBar[' + str(float(MAT[o, [int(p + 1)]])) + ',' + str(float(MAT[o, [int(p + 3)]])) + ']}')
else:
aux.write('{{' + str(float(MAT[o, [int(p + 0)]])) + ',' + str(float(MAT[o, [int(p + 2)]])) + '},ErrorBar[' + str(float(MAT[o, [int(p + 1)]])) + ',' + str(float(MAT[o, [int(p + 3)]])) + ']},\n')
aux.write('}];\n\n')
# Final activities: caption and ending enviroment.
aux.write('LP = ListPlot[data];\n')
aux.write('LL = ListLinePlot[data];\n')
aux.write('Show[LP, LL, Frame -> True, FrameLabel -> {"' + SYM[0] + '(' + UNI[0] + ')", "' + SYM[1] + '(' + UNI[1] + ')"},AspectRatio -> 1 / GoldenRatio]\n\n')
else:
print(' Exporter can not export the selected data to Wolfram: bad selection.')
##########################################################################################
##########################################################################################
# Load ALL variables in a database. ######################################################
def LoadVar(ppath, database):
file = ppath + str(database) + '.csv';
Data = {}
if os.path.isfile(file) is True:
with open(file, newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
q = list(spamreader)
print('# Loading ' + str(file) + '.')
aux = np.zeros(len(q[0]) - 2, )
for l in range(0, len(q)):
row = q[l]; nam = q[0]
if l == 0:
for m in range(0,int(len(row)/2)):
Data[str(row[2*m])] = {}
elif l == 1:
for m in range(0,int(len(row)/2)):
Data[str(nam[2*m])]['sym'] = row[2*m+0]
Data[str(nam[2*m])]['uni'] = row[2*m+1]
Data[str(nam[2*m])]['dat'] = np.zeros(len(q)-2,)
Data[str(nam[2*m])]['unc'] = np.zeros(len(q)-2,)
else:
for m in range(0,int(len(row)/2)):
if row[2*m+0] != '':
Data[str(nam[2*m])]['dat'][l-2] = float(row[2*m+0])
else:
Data[str(nam[2*m])]['dat'] = \
np.delete(Data[str(nam[2*m])]['dat'], \
l - 2 - aux[m - 1])
if row[2*m+1] != '':
Data[str(nam[2*m])]['unc'][l - 2] = \
float(row[2*m+1])
else:
Data[str(nam[2*m])]['unc'] = \
np.delete(Data[str(nam[2*m])]['unc'], \
l - 2 - aux[m - 1])
aux[m - 1] = aux[m - 1] + 1
print(' Load file success.')
if len(Data) is 0:
print(' Nothing to load.')
Data = {}
else:
print(' Database does not exist!')
Data = {}
return Data
##########################################################################################
##########################################################################################
# Store ONE variable in a database. ######################################################
def StoreVar(vardata, varname, ppath, database):
Data = LoadVar(ppath,database)
print(' Creating label in database dictionary.')
Data[str(varname)] = vardata
# Searching the biggest vector in Data
aux1 = 0
for l in range(0, len(Data.keys())):
aux1 = max(len(Data[list(Data.keys())[l]]['dat']), aux1)
# Headers of data table
for l in range(0, len(Data.keys())):
if len(Data.keys()) == 1:
aux = list(Data.keys())[l]
rowNAME = str(aux) + ',' + ' ' + '\n'
rowSYUN = str(Data[list(Data.keys())[l]]['sym']) + ',' + \
str(Data[list(Data.keys())[l]]['uni']) + '\n'
print(' Variable name(s) are stored.')
else:
if l == 0:
aux = list(Data.keys())[l]
rowNAME = str(aux) + ',' + ' '
rowSYUN = str(Data[list(Data.keys())[l]]['sym']) + ',' + \
str(Data[list(Data.keys())[l]]['uni'])
print(' Variable symbols and units are stored.')
elif l == len(Data.keys()) - 1:
aux = list(Data.keys())[l]
rowNAME = rowNAME + ',' + str(aux) + ',' + ' ' + '\n'
rowSYUN = rowSYUN + ',' + \
str(Data[list(Data.keys())[l]]['sym']) + ',' + \
str(Data[list(Data.keys())[l]]['uni']) + '\n'
else:
aux = list(Data.keys())[l]
rowNAME = rowNAME + ',' + str(aux) + ',' + ' '
rowSYUN = rowSYUN + ',' + Data[list(Data.keys())[l]]['sym'] + \
',' + Data[list(Data.keys())[l]]['uni']
with open(ppath + database + '.csv', 'w') as aux:
aux.write(rowNAME)
aux.write(rowSYUN)
# Writing data and uncertainty row by row
for l in range(0, max(aux1, len(vardata['dat']))):
for m in range(0, len(Data.keys())):
if len(Data.keys()) == 1: # 1 var
rowDAUN = str(float(Data[list(Data.keys())[m]]['dat'][l])) + \
',' + str(float(Data[list(Data.keys())[m]]['unc'][l])) + '\n'
aux.write(rowDAUN)
else:
if m == 0:
if l <= len((Data[list(Data.keys())[m]]['dat'])) - 1:
rowDAUN = \
str(float(Data[list(Data.keys())[m]]['dat'][l])) + ',' + \
str(float(Data[list(Data.keys())[m]]['unc'][l]))
else:
rowDAUN = \
'' + ',' + \
''
elif m == len(Data.keys()) - 1:
if l <= len((Data[list(Data.keys())[m]]['dat'])) - 1:
rowDAUN = rowDAUN + ',' + \
str(float(Data[list(Data.keys())[m]]['dat'][l])) + ',' + \
str(float(Data[list(Data.keys())[m]]['unc'][l])) + '\n'
else:
rowDAUN = rowDAUN + ',' + \
'' + ',' + \
'' + '\n'
aux.write(rowDAUN)
else:
if l <= len((Data[list(Data.keys())[m]]['dat'])) - 1:
rowDAUN = rowDAUN + ',' + \
str(float(Data[list(Data.keys())[m]]['dat'][l])) + ',' + \
str(float(Data[list(Data.keys())[m]]['unc'][l]))
else:
rowDAUN = rowDAUN + ',' + \
'' + ',' + \
''
print(' Variable data and uncertainty are stored.')
##########################################################################################
##########################################################################################
# Disp a ProjectMaker variable. ##########################################################
def dispu(var):
try:
aux1 = ''; aux2 = ''; aux3 = ''
for l in range(0,len(var)):
aux1 = aux1 + 'Var[' + str(l+1) + '] - Symbol: ' + var[l]["sym"] + '\n'
aux1 = aux1 + 'Var[' + str(l+1) + '] - Units : ' + var[l]["uni"] + '\n'
aux2 = aux2 + 'd({})'.format(var[l]["sym"]).rjust(12)
aux2 = aux2 + 'u({})'.format(var[l]["sym"]).rjust(12)
aux3 = aux3 + '{}'.format('-').rjust(12,'-')
aux3 = aux3 + '{}'.format('-').rjust(12,'-')
try:
len(var[l]['dat']) == len(var[0]['dat'])
except:
print('Variables have not the same lenght. So no printing.')
print('Variables to print metadata.')
print('The number of events is ' + str(len(var[0]['dat'])))
print(aux1); print(aux3); print(aux2); print(aux3)
for m in range(0, len(var[0]['dat'])):
aux4 = ''
for l in range(0,len(var)):
aux4 = aux4 + '{}'.format(str(var[l]["dat"][m])).rjust(12)
aux4 = aux4 + '{}'.format(str(var[l]["unc"][m])).rjust(12)
print(aux4)
print(aux3 + '\n')
except:
print('Status Failure.')
def disp(var):
try:
aux1 = ''; aux2 = ''; aux3 = ''
for l in range(0,len(var)):
aux1 = aux1 + 'Var[' + str(l+1) + '] - Symbol: ' + var[l]["sym"] + '\n'
aux1 = aux1 + 'Var[' + str(l+1) + '] - Units : ' + var[l]["uni"] + '\n'
aux2 = aux2 + 'd({})'.format(var[l]["sym"]).rjust(15)
aux3 = aux3 + '{}'.format('-').rjust(15,'-')
try:
len(var[l]['dat']) == len(var[0]['dat'])
except:
print('Variables have not the same lenght. So no printing.')
print('Variables to print metadata.')
print('The number of events is ' + str(len(var[0]['dat'])))
print(aux1); print(aux3); print(aux2); print(aux3)
for m in range(0, len(var[0]['dat'])):
aux4 = ''
for l in range(0,len(var)):
aux4 = aux4 + '{}'.format(str(var[l]["dat"][m])).rjust(15)
print(aux4)
print(aux3 + '\n')
except:
print('Status Failure.')
##########################################################################################
def SetUp():
print('Setting Current Path is needed. Set it writting:')
print('CURRENTPATH = "your/path"\n')
aux1 = 'Each time your database is modified you must refresh Data variable.'
aux2 = 'The very first time importing database is compulsury. This can be done as:'
aux3 = 'Data = LoadVar(ppath,"Data")'
print(aux1 + aux2 + '\n' + aux3 + '\n')
def write_to_clipboard(output):
process = subprocess.Popen(
'pbcopy', env={'LANG': 'en_US.UTF-8'}, stdin=subprocess.PIPE)
process.communicate(output.encode('utf-8'))
def read_from_clipboard():
return subprocess.check_output(
'pbpaste', env={'LANG': 'en_US.UTF-8'}).decode('utf-8') | 2.015625 | 2 |
app/services/vaccine_availability_timeslot.py | rayraykay/VaxFinder-backend | 31 | 12790668 | <gh_stars>10-100
from typing import List, Optional, Type, Union
from uuid import UUID
from loguru import logger
from app.schemas.vaccine_availability import (
VaccineAvailabilityTimeslotCreateRequest,
VaccineAvailabilityTimeslotCreateSprocParams,
VaccineAvailabilityTimeslotResponse,
VaccineAvailabilityTimeslotUpdateRequest,
)
from app.services.base import BaseService
from app.services.exceptions import (
InternalDatabaseError,
InvalidAuthenticationKeyForRequest,
)
class VaccineAvailabilityTimeslotService(
BaseService[
VaccineAvailabilityTimeslotResponse,
VaccineAvailabilityTimeslotCreateSprocParams,
VaccineAvailabilityTimeslotUpdateRequest,
]
):
read_procedure_name = "vaccine_availability_timeslots_Read"
read_procedure_id_parameter = "id"
create_procedure_name = "vaccine_availability_timeslots_Create"
update_procedure_name = "vaccine_availability_timeslots_Update"
update_procedure_id_parameter = "id"
delete_procedure_name = "vaccine_availability_timeslots_Delete"
delete_procedure_id_parameter = "id"
@property
def table(self) -> str:
return "vaccine_availability_timeslot"
@property
def db_response_schema(self) -> Type[VaccineAvailabilityTimeslotResponse]:
return VaccineAvailabilityTimeslotResponse
@property
def create_response_schema(
self,
) -> Type[VaccineAvailabilityTimeslotCreateSprocParams]:
return VaccineAvailabilityTimeslotCreateSprocParams
@property
def update_response_schema(
self,
) -> Type[VaccineAvailabilityTimeslotUpdateRequest]:
return VaccineAvailabilityTimeslotUpdateRequest
async def get_multi(
self,
) -> List[VaccineAvailabilityTimeslotResponse]:
raise NotImplementedError("Get multi is not available for timeslots")
async def get_by_vaccine_availability_id(
self, vaccine_availability_id: UUID, auth_key: Optional[UUID] = None
) -> Optional[List[VaccineAvailabilityTimeslotResponse]]:
procedure_name = "vaccine_availability_timeslots_ReadByParent"
ret_value, db_rows = await self._db.sproc_fetch_all(
procname=procedure_name,
parameters={"parentID": vaccine_availability_id},
auth_key=auth_key,
)
if db_rows is None:
# We are assuming that any error on the stored procedure is due
# to the fact that the object doesn't exist.
return []
if ret_value == -1:
raise InternalDatabaseError(f"Failed to execute {procedure_name}")
return [VaccineAvailabilityTimeslotResponse(**o) for o in db_rows]
| 2.125 | 2 |
30os.py | Ulyssesss/Learn-Python | 1 | 12790669 | import os
print(os.name)
print(os.uname())
print(os.environ)
print(os.environ.get('PATH'))
p = os.path.join('.', 'test_dir')
print(p)
os.mkdir(p)
os.rmdir(p)
| 2.421875 | 2 |
azext_concierge/concierge/common/shell.py | egineering-llc/azure-cli-concierge-extension | 1 | 12790670 | import os, subprocess
def execute_shell_process(message, command):
print(message)
env_copy = os.environ.copy()
output = subprocess.run(command, env=env_copy, shell=True)
if output.returncode == 0:
print("Success!")
else:
print("Oops! Please try again.") | 2.75 | 3 |
tpot/models/df_caso_3_3_tpot_pipeline.py | nachovazquez98/COVID-19_Paper | 0 | 12790671 | <reponame>nachovazquez98/COVID-19_Paper
import numpy as np
import pandas as pd
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from sklearn.svm import LinearSVC
from tpot.builtins import StackingEstimator
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1)
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'], random_state=None)
# Average CV score on the training set was: 0.7222949874965342
exported_pipeline = make_pipeline(
StackingEstimator(estimator=SGDClassifier(alpha=0.0, eta0=0.1, fit_intercept=True, l1_ratio=0.0, learning_rate="constant", loss="hinge", penalty="elasticnet", power_t=10.0)),
LinearSVC(C=0.1, dual=False, loss="squared_hinge", penalty="l2", tol=1e-05)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| 2.765625 | 3 |
rates/updater/updater.py | luisparravicini/backtesting-learning | 0 | 12790672 | <reponame>luisparravicini/backtesting-learning
from datetime import datetime
import ccxt
import time
import re
from .db import Database
from .exchange import build_exchange, timestamps_to_seconds, secs_to_millis
class Updater:
def __init__(self, exchange_name, symbol, db_base_path):
self.exchange = build_exchange(exchange_name)
self.symbol = symbol
self.db = Database(exchange_name, self.symbol, db_base_path)
self.sleep_time = 45
def fetch_ohlcv(self):
since = self.db.newest_timestamp()
# some exchanges use since as an exclusive limit
# we ask again for the last candle in case the last time we
# got that data, it was not closed yet and contained partial data
if since is not None:
since -= 1
# we store timestamps in seconds, ccxt uses millis
since = secs_to_millis(since)
try:
data = self.exchange.fetch_ohlcv(self.symbol, timeframe='1m', since=since)
timestamps_to_seconds(data)
except ccxt.NetworkError as err:
print(f'ERROR: {err}')
data = []
return data
def fetch_and_save(self):
data = self.fetch_ohlcv()
if len(data) > 0:
self.db.add(data)
for datum in data:
date = datetime.fromtimestamp(datum[0]).strftime('%Y-%m-%d %H:%M:%S')
print(date, datum)
print()
def run(self):
print(f'sleep: {self.sleep_time}')
while True:
self.fetch_and_save()
time.sleep(self.sleep_time)
| 2.625 | 3 |
rlpython/embed.py | ukleinek/rlpython | 4 | 12790673 | <reponame>ukleinek/rlpython<gh_stars>1-10
import inspect
import logging
import os
def embed(single_threaded=False, bind='', permissions='600',
multi_session=False, started_from_cmd_line=False, print=print,
debug=False, **repl_kwargs):
from rlpython.frontend import start_frontend
from rlpython.repl_server import ReplServer
from rlpython.repl import Repl
# debug mode
if debug:
logging.basicConfig(level=logging.DEBUG)
os.environ['RLPYTHON_DEBUG'] = 'True'
# use namespace of caller instead of own if nothing is set
if 'globals' not in repl_kwargs and 'locals' not in repl_kwargs:
stack = inspect.stack()
frame_info = stack[1]
repl_kwargs['globals'] = frame_info.frame.f_globals
repl_kwargs['locals'] = frame_info.frame.f_locals
# setup warnings
if 'warnings' not in repl_kwargs:
repl_kwargs['warnings'] = []
if not started_from_cmd_line:
repl_kwargs['warnings'].append('running single threaded: cancellation using CTRL-C will not work') # NOQA
if single_threaded and not bind and not started_from_cmd_line:
repl_kwargs['warnings'].append('running single threaded: Use "!" to cancel multi line statements') # NOQA
# network embed
if bind:
single_threaded = True # FIXME
# single threaded
if single_threaded:
repl_server = ReplServer(
url=bind,
permissions=permissions,
repl_domain=Repl.DOMAIN.NETWORK,
print=print,
**repl_kwargs,
)
if multi_session:
return repl_server
try:
repl_server.setup()
repl_server.print_bind_informations()
repl_server.run_single_session(**repl_kwargs)
except OSError as exception:
exit('rlpython: ERROR: {}'.format(exception.args[1]))
finally:
repl_server.shutdown()
# multi threaded
else:
raise NotImplementedError
# local embed
else:
# single threaded
if single_threaded:
repl = Repl(**repl_kwargs)
try:
repl.interact()
finally:
repl.shutdown()
# multi threaded
else:
repl_server = ReplServer(
url='localhost:0',
permissions=permissions,
repl_domain=Repl.DOMAIN.LOCAL_NETWORK,
)
repl_server.setup()
port = repl_server.get_port()
start_frontend(port)
repl_server.run_single_session(**repl_kwargs)
| 2.109375 | 2 |
jsonconf/tests/commandLineTests.py | bponsler/jsonconf | 0 | 12790674 | from unittest import TestCase
from jsonconf import CommandLineParser
class ConfigTests(TestCase):
def setUp(self):
pass
def test_constructor(self):
parser = CommandLineParser()
self.assertTrue(parser is not None)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), [])
self.assertEqual(parser.getProgram(), None)
def test_emptyArgs(self):
args = []
parser = CommandLineParser()
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), [])
self.assertEqual(parser.getProgram(), None)
def test_singleArg(self):
args = ["/usr/bin/whatever"]
parser = CommandLineParser()
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), [])
self.assertEqual(parser.getProgram(), "/usr/bin/whatever")
def test_extraArgs(self):
extraArgs = ["one", "two", "-d", "--ignore"]
args = ["/usr/bin/whatever"]
args.extend(extraArgs)
parser = CommandLineParser()
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), extraArgs)
self.assertEqual(parser.getProgram(), "/usr/bin/whatever")
def test_keyArgs(self):
kwargs = {
"one": '1',
"two": "2",
"-d": "hello",
"--ignore": '5',
}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), kwargs)
self.assertEqual(parser.getExtraArguments(), extraArgs)
self.assertEqual(parser.getProgram(), "/usr/bin/whatever")
def test_complexKey(self):
kwargs = {
"one.two.three": '1',
}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), kwargs)
self.assertEqual(parser.getExtraArguments(), extraArgs)
self.assertEqual(parser.getProgram(), "/usr/bin/whatever")
def test_both(self):
kwargs = {
"one": '1',
"two.three": '1',
}
extraArgs = ["--test", "-v"]
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), kwargs)
self.assertEqual(parser.getExtraArguments(), extraArgs)
self.assertEqual(parser.getProgram(), "/usr/bin/whatever")
def test_requiredTest(self):
kwargs = {}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.requireKey("verbose")
self.assertRaises(Exception, parser.parse, args)
def test_requiredTest2(self):
kwargs = {"--verbose": 1}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.requireKey("--verbose")
parser.parse(args)
def test_invalidConverter(self):
kwargs = {"--verbose": "hello"}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
# Cannot parse string to int
parser.requireKey("--verbose", int)
self.assertRaises(Exception, parser.parse, args)
def test_invalidConverter(self):
kwargs = {"--verbose": "1"}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.requireKey("--verbose", int)
parser.parse(args)
def test_renameKeywordArguments(self):
kwargs = {"--verbose": "1"}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.get("verbose"), "1")
self.assertEqual(parser.getExtraArguments(), [])
kwargs = {"-v": "1"}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.get("verbose"), "1")
self.assertEqual(parser.getExtraArguments(), [])
kwargs = {"verbose": "1"}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.get("verbose"), "1")
self.assertEqual(parser.getExtraArguments(), [])
kwargs = {"verb": "1"}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.get("verbose"), "1")
self.assertEqual(parser.getExtraArguments(), [])
kwargs = {"verbose": "1", "--verbose": "1", "-v": "1", "verb": "1"}
extraArgs = []
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.get("verbose"), "1")
self.assertEqual(parser.getExtraArguments(), [])
def test_renameExtraArguments(self):
kwargs = {}
extraArgs = ["-v"]
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), ["verbose"])
extraArgs = ["--verbose"]
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), ["verbose"])
extraArgs = ["verbose"]
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), ["verbose"])
extraArgs = ["verb"]
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), ["verbose"])
extraArgs = ["-v", "--verbose", "verb", "verbose"]
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {})
self.assertEqual(parser.getExtraArguments(), ["verbose"])
def test_renameOtherArgs(self):
kwargs = {"test": "255"}
extraArgs = ["--verbose", "otherArg"]
args = ["/usr/bin/whatever"]
args.extend(map(lambda i: "%s=%s" % (i[0], i[1]), kwargs.items()))
args.extend(extraArgs)
parser = CommandLineParser()
parser.renameKeys("verbose", ["-v", "--verbose", "verbose", "verb"])
parser.parse(args)
self.assertEqual(parser.getKeywordArguments(), {"test": "255"})
self.assertEqual(parser.getExtraArguments(), ["verbose", "otherArg"])
| 3.125 | 3 |
tests/test_sfm2cldf.py | clld/pydictionaria | 1 | 12790675 | import unittest
import pydictionaria.sfm2cldf as s
import clldutils.sfm as sfm
class SplitMarkersWithSeparators(unittest.TestCase):
def test_lump_everything_together_if_seperator_isnt_found(self):
sep = 'sep'
input_markers = [
('marker1', 'value1'),
('marker2', 'value2')]
expected = [
[('marker1', 'value1'), ('marker2', 'value2')]]
self.assertEqual(
list(s.group_by_separator(sep, input_markers)),
expected)
def test_split_groups_on_separator(self):
sep = 'sep'
input_markers = [
('marker1', 'value1'),
('sep', 'value'),
('marker2', 'value2')]
expected = [
[('marker1', 'value1')],
[('sep', 'value'), ('marker2', 'value2')]]
self.assertEqual(
list(s.group_by_separator(sep, input_markers)),
expected)
class SplitListByPredicate(unittest.TestCase):
def test_no_element_matches_pred(self):
def iseven(x):
return x % 2 == 0
elements = [1, 3, 5]
even, odd = s.split_by_pred(iseven, elements)
self.assertEqual(even, [])
self.assertEqual(odd, [1, 3, 5])
def test_all_elements_match_pred(self):
def iseven(x):
return x % 2 == 0
elements = [2, 4, 6]
even, odd = s.split_by_pred(iseven, elements)
self.assertEqual(even, [2, 4, 6])
self.assertEqual(odd, [])
def test_some_elements_match_pred(self):
def iseven(x):
return x % 2 == 0
elements = [1, 2, 3, 4]
even, odd = s.split_by_pred(iseven, elements)
self.assertEqual(even, [2, 4])
self.assertEqual(odd, [1, 3])
class GenerateSequentialIDs(unittest.TestCase):
def test_sequence_starts_with_one(self):
gen = s.IDGenerator()
first_id = gen.next_id()
self.assertEqual(first_id, '000001')
def test_sequence_counts_up(self):
gen = s.IDGenerator()
first_id = gen.next_id()
second_id = gen.next_id()
self.assertEqual(first_id, '000001')
self.assertEqual(second_id, '000002')
def test_adding_prefix(self):
gen = s.IDGenerator('PRE')
first_id = gen.next_id()
second_id = gen.next_id()
self.assertEqual(first_id, 'PRE000001')
self.assertEqual(second_id, 'PRE000002')
class LinkProcessing(unittest.TestCase):
def setUp(self):
id_index = {
'OLDID1': 'NEWID1',
'OLDID2': 'NEWID2',
'OLDID3': 'NEWID3'}
label_index = {
'NEWID1': 'label 1',
'NEWID2': 'label 2',
'NEWID3': 'label 3'}
link_markers = {'linkmarker1', 'linkmarker2'}
link_regex = r'\bOLDID\d+\b'
self.link_processor = s.LinkProcessor(
id_index, label_index, link_markers, link_regex)
def test_entries_without_links_dont_change(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'no link'),
('othermarker', 'no link')])
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'no link'),
('othermarker', 'no link')])
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
def test_single_link_is_replaced(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: OLDID1'),
('othermarker', 'no link')])
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: [label 1](NEWID1)'),
('othermarker', 'no link')])
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
def test_links_in_different_markers_are_replaced(self):
original_entry = sfm.Entry([
('linkmarker1', 'link: OLDID2'),
('linkmarker2', 'link: OLDID1'),
('othermarker', 'no link')])
expected = sfm.Entry([
('linkmarker1', 'link: [label 2](NEWID2)'),
('linkmarker2', 'link: [label 1](NEWID1)'),
('othermarker', 'no link')])
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
def test_links_in_same_marker_are_replaced(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link 1: OLDID1; link 2: OLDID2'),
('othermarker', 'no link')])
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link 1: [label 1](NEWID1); link 2: [label 2](NEWID2)'),
('othermarker', 'no link')])
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
def test_same_link_twice_in_the_same_marker(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link 1: OLDID1; link 2: OLDID1'),
('othermarker', 'no link')])
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link 1: [label 1](NEWID1); link 2: [label 1](NEWID1)'),
('othermarker', 'no link')])
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
def test_only_process_links_in_specified_markers(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: OLDID1'),
('othermarker', 'link: OLDID2')])
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: [label 1](NEWID1)'),
('othermarker', 'link: OLDID2')])
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
def test_ignore_regex_matches_that_are_not_in_the_index(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: OLDID1000'),
('othermarker', 'no link')])
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: OLDID1000'),
('othermarker', 'no link')])
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
def test_dont_mutate_original_entry(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: OLDID1'),
('othermarker', 'no link')])
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: OLDID1'),
('othermarker', 'no link')])
_ = self.link_processor(original_entry)
self.assertEqual(original_entry, expected)
def test_carry_over_attributes(self):
original_entry = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: OLDID1'),
('othermarker', 'no link')])
original_entry.id = 'I have an ID, too!'
expected = sfm.Entry([
('linkmarker1', 'no link'),
('linkmarker2', 'link: [label 1](NEWID1)'),
('othermarker', 'no link')])
expected.id = 'I have an ID, too!'
new_entry = self.link_processor(original_entry)
self.assertEqual(new_entry, expected)
class MediaCaptionExtraction(unittest.TestCase):
def test_find_caption(self):
entry = sfm.Entry([
('marker1', 'val1'),
('pc', 'image-name'),
('cap', 'caption'),
('marker2', 'val2')])
caption_finder = s.CaptionFinder(['pc'], 'cap')
_ = caption_finder(entry)
expected = {'image-name': 'caption'}
self.assertEqual(caption_finder.captions, expected)
def test_find_multiple_captions(self):
entry = sfm.Entry([
('marker1', 'val1'),
('pc', 'image1-name'),
('cap', 'caption1'),
('marker2', 'val2'),
('pc', 'image2-name'),
('cap', 'caption2'),
('marker3', 'val3')])
caption_finder = s.CaptionFinder(['pc'], 'cap')
_ = caption_finder(entry)
expected = {
'image1-name': 'caption1',
'image2-name': 'caption2'}
self.assertEqual(caption_finder.captions, expected)
def test_captions_need_to_be_adjacent(self):
entry = sfm.Entry([
('marker1', 'val1'),
('pc', 'image-name'),
('marker2', 'val2'),
('cap', 'caption'),
('marker3', 'val3')])
caption_finder = s.CaptionFinder(['pc'], 'cap')
_ = caption_finder(entry)
expected = {}
self.assertEqual(caption_finder.captions, expected)
class MapSfmToCldf(unittest.TestCase):
def setUp(self):
self.mapping = {'marker1': 'Column1', 'marker2': 'Column2'}
def test_map_id(self):
sfm_entry = sfm.Entry()
sfm_entry.id = 'id1'
cldf_row = s.sfm_entry_to_cldf_row(None, self.mapping, {}, set(), sfm_entry)
self.assertEqual(cldf_row, {'ID': 'id1'})
def test_map_columns(self):
sfm_entry = sfm.Entry([('marker1', 'value1'), ('marker2', 'value2')])
sfm_entry.id = 'id1'
cldf_row = s.sfm_entry_to_cldf_row(None, self.mapping, {}, set(), sfm_entry)
self.assertEqual(
cldf_row,
{'ID': 'id1', 'Column1': 'value1', 'Column2': 'value2'})
def test_ignore_unexpected_sfm_markers(self):
sfm_entry = sfm.Entry([('marker1', 'value1'), ('unknown', 'value2')])
sfm_entry.id = 'id1'
cldf_row = s.sfm_entry_to_cldf_row(None, self.mapping, {}, set(), sfm_entry)
self.assertEqual(
cldf_row,
{'ID': 'id1', 'Column1': 'value1'})
def test_map_entry_id(self):
sfm_entry = sfm.Entry([('marker1', 'value1')])
sfm_entry.id = 'id1'
sfm_entry.entry_id = 'entry1'
cldf_row = s.sfm_entry_to_cldf_row(None, self.mapping, {}, set(), sfm_entry)
self.assertEqual(
cldf_row,
{'ID': 'id1', 'Column1': 'value1', 'Entry_ID': 'entry1'})
def test_map_sense_ids(self):
sfm_entry = sfm.Entry([('marker1', 'value1')])
sfm_entry.id = 'id1'
sfm_entry.sense_ids = ['sense1', 'sense2']
cldf_row = s.sfm_entry_to_cldf_row(None, self.mapping, {}, set(), sfm_entry)
self.assertEqual(
cldf_row,
{'ID': 'id1', 'Column1': 'value1', 'Sense_IDs': ['sense1', 'sense2']})
def test_map_language_id(self):
sfm_entry = sfm.Entry([('marker1', 'value1')])
sfm_entry.id = 'id1'
sfm_entry.sense_ids = ['sense1', 'sense2']
cldf_row = s.sfm_entry_to_cldf_row(None, self.mapping, {}, set(), sfm_entry, 'lang1')
self.assertEqual(
cldf_row,
{'ID': 'id1', 'Column1': 'value1', 'Sense_IDs': ['sense1', 'sense2'], 'Language_ID': 'lang1'})
def test_map_media_ids(self):
sfm_entry = sfm.Entry([('marker1', 'value1')])
sfm_entry.id = 'id1'
sfm_entry.media_ids = ['file1', 'file2']
cldf_row = s.sfm_entry_to_cldf_row(None, self.mapping, {}, set(), sfm_entry)
self.assertEqual(
cldf_row,
{'ID': 'id1', 'Column1': 'value1', 'Media_IDs': ['file1', 'file2']})
def test_gloss():
sfm_entry = sfm.Entry([('ge', 'abc\tdef')])
cldf_row = s.sfm_entry_to_cldf_row(None, {'ge': 'Gloss'}, {}, set(), sfm_entry)
assert cldf_row['Gloss'] == 'abc\tdef'
cldf_row = s.sfm_entry_to_cldf_row('ExampleTable', {'ge': 'Gloss'}, {}, set(), sfm_entry)
assert cldf_row['Gloss'] == ['abc', 'def']
def test_cf():
sfm_entry = sfm.Entry([('cf', 'val1'), ('cf', 'val2;val3')])
cldf_row = s.sfm_entry_to_cldf_row('EntryTable', {'cf': 'Entry_IDs'}, {}, {'Entry_IDs'}, sfm_entry)
assert cldf_row['Entry_IDs'] == ['val1', 'val2', 'val3']
def test_multimarkers():
sfm_entry = sfm.Entry([('cf', 'val1'), ('cf', 'val2')])
cldf_row = s.sfm_entry_to_cldf_row(None, {'cf': 'See_Also'}, {}, set(), sfm_entry)
assert cldf_row['See_Also'] == 'val1 ; val2'
| 2.921875 | 3 |
ANLY-501-INTRO/LAB5-cluster/00-TWO-VARIABLE-NORMAL-DIST.py | rexarski/ggtown-ds | 0 | 12790676 |
##-------------------------------------------
## 2 VARIABLE NORMAL DISTIBUTION
##-------------------------------------------
import matplotlib.pyplot as plt
import numpy as np
#USER INPUTS
FUNC=2
FS=18 #FONT SIZE
CMAP='hsv' #'RdYlBu'
#normal distribution param
ux=0.5; uy=0.0
sx=2.0; sy=1.0 #STD-DEV
rho=0.5; #[0,1) RHO=PEARSON CORRELATION
u=np.array([[ux],[uy]]) #MEAN VECTOR u=[ux,uy]
s=np.array([[sx**2.0,rho*sy*sx],[rho*sy*sx,sy**2.0]]) #COVARIANCE METRIC
#GENERATE POINTS SAMPLED FROM DISTRIBUTION
xp, yp = np.random.multivariate_normal(u.reshape(2), s, 1000).T
# DEFINE FUNCTION
def N(x, y):
out=1.0/(2*3.1415*sx*sy*(1-rho**2.0)**0.5)
out=out*np.exp(-(((x-ux)/sx)**2.0-2*rho*((x-ux)/sx)*((y-uy)/sy)+((y-uy)/sy)**2.0)/(2*(1-rho**2)))
return out
#MESH-1 (SMALLER)
L=3*max(sx,sy)
xmin=-L; xmax=L; ymin=-L; ymax=L
x,y = np.meshgrid(np.linspace(xmin,xmax,20),np.linspace(ymin,ymax,20))
#MESH-2 (DENSER)
X, Y = np.meshgrid(np.linspace(xmin, xmax, 40), np.linspace(ymin, ymax, 40))
#SURFACE PLOT
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
ax.set_xlabel('x', fontsize=FS); ax.set_ylabel('y', fontsize=FS); ax.set_zlabel('p(x,y)', fontsize=FS)
surf=ax.plot_surface(X, Y, N(X, Y), cmap=CMAP)
ax.scatter(xp, yp, 1.1*np.max(N(X, Y)) , '.')
plt.show();
#SCATTER PLOT
plt.plot(xp, yp,'.')
#CONTOUR PLOT
# plt.axis('equal')
plt.contour(X, Y, N(X, Y), 20, cmap=CMAP);
plt.show();
| 2.875 | 3 |
monhorn/console.py | EntySec/Monhorn | 0 | 12790677 | <reponame>EntySec/Monhorn<gh_stars>0
"""
MIT License
Copyright (c) 2020-2022 EntySec
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .plugins import Plugins
from hatsploit.core.cli.badges import Badges
from hatsploit.lib.runtime import Runtime
from hatsploit.lib.session import Session
from hatsploit.lib.commands import Commands
from pex.fs import FS
class Console(Plugins, Badges, Runtime, Commands, FS):
""" Subclass of monhorn module.
This subclass of monhorn module is intended for providing
Monhorn main console.
"""
prompt = '%linemonhorn%end > '
core_commands = [
('exit', 'Terminate Monhorn session.'),
('help', 'Show available commands.'),
('quit', 'Stop interaction.')
]
commands = {}
def check_session(self, session: Session) -> bool:
""" Check is session alive.
:param Session session: session to check
:return bool: True if session is alive
"""
if session.channel.terminated:
self.print_warning("Connection terminated.")
session.close()
return False
return True
def start_monhorn(self, session: Session) -> None:
""" Start Monhorn.
:param Session session: session to start Monhorn for
:return None: None
"""
commands = session.monhorn + 'commands/' + session.details['Platform'].lower()
exists, is_dir = self.exists(commands)
if exists and not is_dir:
self.commands.update(
self.load_commands(commands)
)
commands = session.monhorn + 'commands/generic'
exists, is_dir = self.exists(commands)
if exists and not is_dir:
self.commands.update(
self.load_commands(commands)
)
for command in self.commands:
self.commands[command].session = session
def monhorn_console(self, session: Session) -> None:
""" Start Monhorn console.
:param Session session: session to start Monhorn console for
:return None: None
"""
self.start_monhorn(session)
if self.check_session(session):
while True:
result = self.catch(self.monhorn_shell, [session])
if result is not Exception and result:
break
def monhorn_shell(self, session: Session) -> bool:
""" Start Monhorn shell.
:param Session session: session to start Monhorn shell for
:return bool: True if Monhorn shell completed
"""
command = self.input_empty(self.prompt)
if command:
if command[0] == 'quit':
return True
elif command[0] == 'help':
self.print_table("Core Commands", ('Command', 'Description'),
*self.core_commands)
self.show_commands(self.commands)
elif command[0] == 'exit':
session.send_command("exit")
session.channel.terminated = True
return True
else:
self.check_session(session)
self.execute_custom_command(command, self.commands)
return False
| 1.875 | 2 |
irida_uploader_cl/parsers/miniseq/__init__.py | duanjunhyq/irida_uploader_cl | 0 | 12790678 | <reponame>duanjunhyq/irida_uploader_cl
from irida_uploader_cl.parsers.miniseq.parser import Parser
| 1.039063 | 1 |
ETM/ETM.py | BlackmanWhite/Easy-Text-Menu-ETM | 2 | 12790679 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
__author__ = ('<NAME> (<NAME>)')
__version__ = ('3.5')
__license__ = ('GNU General Public License v3.0')
opt = []
index = 1
class Menu():
def __init__(self, title, character, char_length):
self.title = title
self.character = character
self.char_length = int(char_length)
def change_title(self, new_title):
self.title = new_title
def print_menu(self):
return print('{0}{1}{0}'.format(self.character*self.char_length, self.title))
def print_options(self):
option_n = 1
for Option in opt:
print('[{}] {}'.format(option_n, Option.option))
option_n += 1
def print_all(self):
self.print_menu()
self.print_options()
def add_option(self, option, command):
o = Option(option, command)
opt.append(o)
def remove_option(self, _option):
for Option in opt:
if Option.option == _option:
opt.remove(Option)
def clear_option(self):
opt.clear()
def get_input(self):
usr = input('> ')
found = False
for Option in opt:
if usr == str(Option.index):
found = True
Option.command()
if found == True:
pass
else:
print("Invalid command")
def about(self):
return print('Made by {}\nVersion: {}\nLicense: {}'.format(__author__,__version__,__license__))
class Option():
def __init__(self, option, command):
global index
self.option = option
self.command = command
self.index = index
opt.append(self)
index += 1
| 3.546875 | 4 |
tests/test_engine.py | adithyavis/pywarm | 194 | 12790680 | <reponame>adithyavis/pywarm<filename>tests/test_engine.py
# 08-31-2019;
"""
Test cases for warm.engine.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
from pathlib import Path
import sys
sys.path.append(str(Path(__file__).parent.parent))
from warm import engine
def test_set_get_default_parent():
a = nn.Identity()
b = nn.Identity()
engine.set_default_parent(a)
assert engine.get_default_parent() is a, 'get_default_parent result mismatchs set_default_parent.'
engine.set_default_parent(b)
assert engine.get_default_parent() is b, 'get_default_parent result mismatchs set_default_parent.'
def test_auto_name():
a = nn.Identity()
for i in range(10):
assert engine._auto_name('test', a) == f'test_{i+1}', 'new calls to _auto_name failed to increment name count.'
a(None) # test if forward pre hook is triggered to reset names
assert engine._auto_name('test', a) == 'test_1', 'forward_pre_hook did not work.'
def test_initialize():
a = nn.Parameter(torch.zeros(3, 4))
b = nn.Parameter(torch.zeros(3, 4))
c = nn.Parameter(torch.zeros(3, 4))
torch.manual_seed(1)
engine.initialize_(a, 'normal_')
torch.manual_seed(1)
nn.init.normal_(b)
assert torch.equal(a, b), 'initialize_ with str spec did not work correctly.'
assert not torch.equal(a, c), 'initialize_ with str spec did not work.'
torch.manual_seed(1)
engine.initialize_(c, nn.init.normal_)
assert torch.equal(a, c), 'initialize_ with function spec did not work correctly.'
def test_activate():
a = torch.randn(3, 4)
b = copy.deepcopy(a)
a = engine.activate(a, 'hardshrink')
b = F.hardshrink(b)
assert torch.equal(a, b), 'activate with str spec did not work correctly.'
a = engine.activate(a, 'relu')
b = F.relu(b)
assert torch.equal(a, b), 'activate with str spec did not work correctly.'
def test_permute():
x = torch.randn(1, 2, 3)
y = engine.permute(x, 'BCD', 'DCB')
assert list(y.shape) == [3, 2, 1], 'permute 3d tensor with str in_shape and str out_shape did not work correctly.'
y = engine.permute(x, 'BCD', None)
assert list(y.shape) == [1, 2, 3], 'permute tensor with None out_shape did not work corretly.'
y = engine.permute(x, 'BCD', [1, 0, 2])
assert list(y.shape) == [2, 1, 3], 'permute tensor with list out_shape did not work corretly.'
x = torch.randn(1, 2, 3, 4)
y = engine.permute(x, 'BCD', 'DCB')
assert list(y.shape) == [3, 4, 2, 1], 'permute 4d tensor with str in_shape and str out_shape did not work correctly.'
y = engine.permute(x, 'DBC', 'CDB')
assert list(y.shape) == [4, 1, 2, 3], 'permute 4d tensor with str in_shape and str out_shape did not work correctly.'
x = torch.randn(1, 2, 3, 4, 5)
y = engine.permute(x, 'BDC', 'BCD')
assert list(y.shape) == [1, 5, 2, 3, 4], 'permute 5d tensor with str in_shape and str out_shape did not work correctly.'
x = torch.randn(1, 2)
y = engine.permute(x, 'BDC', 'BCD')
assert list(y.shape) == [1, 2], 'permute 2d tensor with str in_shape and str out_shape did not work correctly.'
y = engine.permute(x, 'CBD', 'DBC')
assert list(y.shape) == [2, 1], 'permute 2d tensor with str in_shape and str out_shape did not work correctly.'
def test_unused_kwargs():
kw = {'unused1':0, 'unused2':0, 'base_class':0}
unused = engine.unused_kwargs(kw)
assert 'base_class' not in unused, 'unused_kwargs leaks used.'
assert set(unused.keys()) == {'unused1', 'unused2'}, 'unused_kwargs did not filter kw correctly.'
def test_prepare_model_is_ready():
class TestModel(nn.Module):
def forward(self, x):
x = engine.forward(x, nn.Linear, 'linear',
base_arg=(x.shape[-1], 4, False), # in_features, out_features, bias
in_shape=None, out_shape=None, base_shape=None,
initialization={'weight':'ones_'}, activation=(F.dropout, {'p':1.0}), )
return x
x = torch.randn(1, 2, 3)
m = TestModel()
assert not engine.is_ready(m), 'is_ready did not work correctly.'
engine.prepare_model_(m, x)
assert engine.is_ready(m), 'prepare_model_ did not work correctly.'
assert m.linear_1.bias is None, 'linear_1 should not have bias.'
assert torch.allclose(m.linear_1.weight, torch.Tensor([1.0])), 'linear_1.weight should be initialized to all 1s.'
y = m(x)
assert torch.allclose(y, torch.Tensor([0.0])), 'y should be all 0s because we dropout everything.'
assert list(y.shape) == [1, 2, 4], 'y should have shape [1, 2, 4] after linear projection.'
def test_forward():
x = torch.randn(1, 2, 3)
m = nn.Module()
engine.set_default_parent(m)
class TripleOut(nn.Module): # to test tuple_out
def forward(self, x, b=1, c='2'):
return x+b, x, c
y = engine.forward(x, base_class=TripleOut, base_name='tri', tuple_out=False)
assert isinstance(y, torch.Tensor), 'tuple_out did not work correctly.'
y = engine.forward(x, base_class=TripleOut, base_name='tri', tuple_out=True)
assert isinstance(y, tuple) and len(y) == 3 and y[-1] == '2', 'tuple_out did not work correctly.'
y = engine.forward(x, base_class=TripleOut, base_name='tri', forward_kw={'c':3}, tuple_out=True)
assert y[-1] == 3, 'forward_kw did not work correctly.'
y = engine.forward(x, base_class=TripleOut, base_name='tri', forward_arg=(2.0,))
assert torch.allclose(y-x, torch.Tensor([2.0])), 'forward_arg did not work correctly.'
y = engine.forward(x, base_class=TripleOut, activation=(F.dropout, {'p':1.0}))
assert torch.allclose(y, torch.Tensor([0.0])), 'activation did not work correctly.'
y = engine.forward(
x, base_class=nn.Linear, base_kw={'out_features':4}, infer_kw={'in_features':'C'}, base_shape='BDC')
assert y.shape[1] == 4, 'base_kw, infer_kw did not work correctly.'
def test_namespace():
m = nn.Module()
engine.set_default_parent(m)
@engine.namespace
def f1(name=''):
return ';'.join([f2(name=name) for i in range(2)])
@engine.namespace
def f2(name=''):
return name
s0, s1, s2 = [f1() for i in range(3)]
assert s0 == 'f1_1-f2_1;f1_1-f2_2'
assert s1 == 'f1_2-f2_1;f1_2-f2_2'
assert s2 == 'f1_3-f2_1;f1_3-f2_2'
| 2.140625 | 2 |
reviewboard/extensions/tests/test_filediffacl_hook.py | pombredanne/reviewboard | 0 | 12790681 | """Unit tests for reviewboard.extensions.hooks.FileDiffACLHook."""
import kgb
from djblets.features.testing import override_feature_check
from reviewboard.extensions.hooks import FileDiffACLHook
from reviewboard.extensions.tests.testcases import BaseExtensionHookTestCase
from reviewboard.reviews.features import DiffACLsFeature
class FileDiffACLHookTests(kgb.SpyAgency, BaseExtensionHookTestCase):
"""Tests for the FileDiffACLHook."""
fixtures = ['test_scmtools', 'test_users']
def setUp(self):
super(FileDiffACLHookTests, self).setUp()
self.user = self.create_user()
self.review_request = self.create_review_request(
create_repository=True)
self.review_request.target_people.add(self.review_request.submitter)
self.create_diffset(review_request=self.review_request, draft=True)
self.review_request.publish(user=self.review_request.submitter)
def test_single_aclhook_true(self):
"""Testing FileDiffACLHook basic approval with True result"""
self._test_hook_approval_sequence([True], True)
def test_single_aclhook_none(self):
"""Testing FileDiffACLHook basic approval with None result"""
self._test_hook_approval_sequence([None], True)
def test_single_aclhook_false(self):
"""Testing FileDiffACLHook basic approval with False result"""
self._test_hook_approval_sequence([False], False)
def test_multiple_aclhooks_1(self):
"""Testing FileDiffACLHook multiple with True and False"""
self._test_hook_approval_sequence([True, False], False)
def test_multiple_aclhooks_2(self):
"""Testing FileDiffACLHook multiple with True and None"""
self._test_hook_approval_sequence([True, None], True)
def test_multiple_aclhooks_3(self):
"""Testing FileDiffACLHook multiple with False and None"""
self._test_hook_approval_sequence([False, None], False)
def _test_hook_approval_sequence(self, accessible_values, result):
"""Test a sequence of FileDiffACLHook approval results.
Args:
accessible_values (list of bool):
A list of the values to return from FileDiffACLHook
implementations.
result (bool):
A resulting approval value to check.
"""
with override_feature_check(DiffACLsFeature.feature_id,
enabled=True):
for value in accessible_values:
hook = FileDiffACLHook(extension=self.extension)
self.spy_on(hook.is_accessible, op=kgb.SpyOpReturn(value))
self.assertEqual(self.review_request.is_accessible_by(self.user),
result)
| 2.203125 | 2 |
vega/trainer/callbacks/hccl.py | This-50m/vega | 724 | 12790682 | <filename>vega/trainer/callbacks/hccl.py<gh_stars>100-1000
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Data parallel callback."""
import logging
import vega
from .callback import Callback
from vega.common import ClassFactory, ClassType
from vega.common.general import General
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.CALLBACK)
class Hccl(Callback):
"""Callback that saves the evaluated Performance."""
def __init__(self):
"""Initialize ModelCheckpoint callback."""
super(Hccl, self).__init__()
self.priority = 260
def init_trainer(self, logs=None):
"""Set trainer object for current callback."""
if not self.trainer.hccl:
return
if vega.is_torch_backend():
self._init_pytorch_trainer()
if vega.is_ms_backend():
self._init_ms_trainer()
def _init_pytorch_trainer(self):
import torch
import torch.distributed as dist
logger.info("init HCCL")
model = self.trainer.model
dist.init_process_group(
backend='hccl',
init_method=f"tcp://{General.cluster.hccl_server_ip}:{General.cluster.hccl_port}",
world_size=self.trainer.num_workers,
rank=self.trainer.rank_id)
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.trainer.device_id],
broadcast_buffers=General.cluster.enable_broadcast_buffers)
self.trainer.model = model
def _init_ms_trainer(self):
from mindspore import context
from mindspore.context import ParallelMode
from mindspore.communication.management import init
logger.info("init HCCL")
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)
init()
def before_epoch(self, epoch, logs=None):
"""Be called before each epoach."""
if not vega.is_torch_backend() or not self.trainer.hccl:
return
if self.trainer.sampler is not None:
self.trainer.sampler.set_epoch(epoch)
def after_train(self, logs=None):
"""Stop session."""
if self.trainer.hccl and vega.is_tf_backend():
self.trainer.sess.run(self.trainer.npu_shutdown)
self.trainer.sess.close()
| 1.953125 | 2 |
sentencia for.py | slack333/Python | 0 | 12790683 | numeros = [1,2,3,4,5,6,7,8,9,10]
indice = 0
while(indice < len(numeros)):
print(numeros[indice])
indice += 1
for numero in numeros:
print(numero)
for numero in numeros:
numero *= 10
numeros = [1,2,3,4,5,6,7,8,9,10]
indice = 0
for numero in numeros:
numeros[indice] *= 10
indice += 1
print (numeros)
x = [1,2,3,4,5,6,7,8,9,10]
for indice, numero in enumerate(x):
x[indice] *= 10
print(x)
cadena = "Hola amigos"
for caracter in cadena:
print(caracter)
cadena2 = ""
for caracter in cadena:
cadena2 += caracter * 2
print (cadena2)
for i in range(10):
print(i)
| 3.96875 | 4 |
main.py | HisenZhang/WRPI-auto | 1 | 12790684 | <gh_stars>1-10
import headless
import argparse
import threading
import sys
import logging
import py_cui
from modules.tui import TUI
from modules.util import configManager
def runSchedule(station):
logging.info("Scheduler started.")
while station.mixer.get_init():
station.scheduleRun()
def runTUI(mainWindow):
logging.info("TUI starting...")
mainWindow.start()
logging.info("TUI exited.")
def main():
parser = argparse.ArgumentParser(
description='{station} Broadcast Automation System'.format(station=configManager.cfg.station.name))
parser.add_argument('--headless', action='store_true',
help='run without TUI')
args = parser.parse_args()
if args.headless:
headless.routine()
else:
try:
frame = None
mainWindow = py_cui.PyCUI(4, 3, exit_key=1)
mainWindow.set_refresh_timeout(1)
mainWindow.set_title(
'{} Broadcast Automation System'.format(configManager.cfg.station.name))
frame = TUI(mainWindow)
daemonThread = threading.Thread(
name='Daemon', target=runSchedule, args=(frame.station,), daemon=True)
daemonThread.start()
TUIThread = threading.Thread(
name='TUI', target=runTUI, args=(mainWindow,))
TUIThread.start()
TUIThread.join()
except KeyboardInterrupt:
logging.warning("KeyboardInterrupt detected.")
except Exception as e:
logging.critical("TUI: "+str(e))
finally:
if frame:
frame.station.signOff()
sys.exit(0)
# TODO turn this into a package at first major released
# packaging: https://uoftcoders.github.io/studyGroup/lessons/python/packages/lesson/
# with CLI: https://medium.com/nerd-for-tech/how-to-build-and-distribute-a-cli-tool-with-python-537ae41d9d78
if __name__ == "__main__":
main()
| 2.375 | 2 |
botcmd.py | JuEeHa/oonbotti2 | 0 | 12790685 | import eliza
import threading
import random
import re
import time
concmd=['/q', '/lt', '/st', '/lg', '/lm', '/sm']
blacklist = []
doctor = eliza.eliza()
# channel: [user1, user2, ..., userN]
trusted = {}
trustedlock = threading.Lock()
gods = {}
godslock = threading.Lock()
# receiver: [(sender1, origin1, message1), (sender2, origin2, message2), ..., (senderN, origin2, messageN)]
msgs = {}
msgslock = threading.Lock()
# (ID, nick, account)
accountcheck = []
accountcheckid = 0
accountchecklock = threading.Lock()
die_expr=re.compile("#[0-9]*d([0-9]+|%)([+-][0-9]+)?$")
class Cron(threading.Thread):
def __init__(self):
self.timedjobs = []
self.timedjobslock = threading.Lock()
self.cronctrl = []
self.cronctrllock = threading.Lock()
threading.Thread.__init__(self)
def queuejob(self, time, fn):
self.timedjobslock.acquire()
self.timedjobs.append((time, fn))
self.timedjobslock.release()
def ctrl(self, cmd):
self.cronctrllock.acquire()
self.cronctrl.append(cmd)
self.cronctrllock.release()
def run(self):
run = True
while run:
time.sleep(1) # Accuracy doesn't need to be high
self.cronctrllock.acquire()
for cmd in self.cronctrl:
if cmd == 'QUIT':
run = False
self.cronctrl=[]
self.cronctrllock.release()
self.timedjobslock.acquire()
self.timedjobs = map((lambda (time, fn): (time-1, fn)), self.timedjobs)
torun = map((lambda (time, fn): fn), filter((lambda (time, fn): time<=0), self.timedjobs))
self.timedjobs = filter((lambda (time, fn): time>0), self.timedjobs)
self.timedjobslock.release()
for fn in torun:
fn()
def loadmessages():
global msgs, msgslock
with msgslock:
msgs = {}
f = open('msgs.txt', 'r')
for line in f:
while len(line) > 0 and line[-1] == '\n':
line = line[:-1]
if len(line.split('\t')) == 4:
receiver, sender, origin, msg = line.split('\t')
if receiver not in msgs:
msgs[receiver] = []
msgs[receiver].append((sender, origin, msg))
f.close()
def savemessages():
global msgs, msgslock
with msgslock:
f=open('msgs.txt', 'w')
for receiver in msgs:
for sender, origin, msg in msgs[receiver]:
f.write('%s\t%s\t%s\t%s\n' % (receiver, sender, origin, msg))
f.close()
loadmessages()
def addtrusted(chan, account):
global trusted, trustedlock
trustedlock.acquire()
if chan not in trusted:
trusted[chan] = []
if account not in trusted[chan]:
trusted[chan].append(account)
trustedlock.release()
def rmtrusted(chan, account):
global trusted, trustedlock
trustedlock.acquire()
if chan in trusted and account in trusted[chan]:
trusted[chan].remove(account)
trustedlock.release()
def loadtrusted():
global trusted, trustedlock
trustedlock.acquire()
trusted = {}
trustedlock.release()
f=open('trusted.txt', 'r')
for line in f:
while len(line) > 0 and line[-1] == '\n':
line = line[:-1]
if len(line) > 0:
chan, account = line.split()
addtrusted(chan, account)
f.close()
def loadgods():
global gods, godslock
godslock.acquire()
gods = {}
f=open('gods.txt', 'r')
for line in f:
while len(line) > 0 and line[-1] == '\n':
line = line[:-1]
if len(line) > 0:
chan, account = line.split()
if chan not in gods:
gods[chan] = []
gods[chan].append(account)
addtrusted(chan, account)
f.close()
godslock.release()
def savetrusted():
global trusted, trustedlock
trustedlock.acquire()
f=open('trusted.txt', 'w')
for chan in trusted:
for account in trusted[chan]:
f.write('%s %s\n' % (chan, account))
f.close
trustedlock.release()
def init():
global cron
cron = Cron()
cron.start()
loadtrusted()
loadgods()
def chmode(irc, chan, nick, mode, args):
set_unset = mode[0]
mode = mode[1:]
if isauthorized(irc, chan, nick):
if args == ['']:
irc.send('MODE %s %s %s' % (chan, set_unset+mode, nick))
else:
nicks = []
for nick in args:
nicks.append(nick)
if len(nicks) == 4:
irc.send('MODE %s %s %s' % (chan, set_unset+mode*4, ' '.join(nicks)))
nicks = []
if nicks:
irc.send('MODE %s %s %s' % (chan, set_unset+mode*len(nicks), ' '.join(nicks)))
def istrusted(chan, account):
trustedlock.acquire()
if chan in trusted and account in trusted[chan]:
trustedlock.release()
return True
else:
trustedlock.release()
return False
def initaccountcheck(nick):
global accountcheck, accountcheckid, accountchecklock
accountchecklock.acquire()
id = accountcheckid
accountcheck.append((id, nick, None))
accountcheckid += 1
accountchecklock.release()
return id
# Warning: this does no locking, should only be used internally
# The index returned cannot be guaranteed valid if lock is released between call to getindexbyaccountcheckid and use!
def getindexbyaccountcheckid(id):
global accountcheck
for index in range(len(accountcheck)):
ckid, cknick, ckaccount = accountcheck[index]
if ckid == id:
return index
return None
def setaccountcheckvalue(id, value):
global accountcheck, accountchecklock
accountchecklock.acquire()
index = getindexbyaccountcheckid(id)
if index is not None:
ckid, nick, ckvalue = accountcheck[index]
accountcheck[index] = (id, nick, value)
accountchecklock.release()
def getaccountcheckvalue(id):
global accountcheck, accountchecklock
accountchecklock.acquire()
index = getindexbyaccountcheckid(id)
if index is not None:
ckid, cknick, value = accountcheck[index]
accountchecklock.release()
return value
def removeaccountcheck(id):
global accountcheck, accountchecklock
accountchecklock.acquire()
index = getindexbyaccountcheckid(id)
if index is not None:
del accountcheck[index]
accountchecklock.release()
def getaccountcheckidbynick(nick):
global accountcheck, accountchecklock
accountchecklock.acquire()
getid = lambda (id, nick, account): id
filterbynick = lambda (id, cknick, account): cknick == nick
ids = map(getid, filter(filterbynick, accountcheck))
accountchecklock.release()
return ids
def getaccount(irc, nick):
id = initaccountcheck(nick)
irc.send('WHOIS ' + nick)
cron.queuejob(5, (lambda : setaccountcheckvalue(id, '')))
account = None
while account == None:
account = getaccountcheckvalue(id)
time.sleep(0.1)
removeaccountcheck(id)
if account == '': # '' Signifies failure
return None
else:
return account
def isauthorized(irc, chan, nick):
account = getaccount(irc, nick)
if account:
return istrusted(chan, account)
else:
irc.msg(nick, zwsp + 'Identify with NickServ')
class ArgsfmtError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return 'Error with argument format: ' + msg
ARG_STD = 0
ARG_OPT = 1
ARG_UNL = 2
def parseargsfmt(args):
# parses the argument format used by matchcmd and parsecmd
# e.g. parseargsfmt("foo [bar] {baz} ) -> [ARG_STD, ARG_OPT, ARG_UNL]
args = args.split(' ')
out = []
for arg in args:
if len(arg) >= 2 and arg[0] == '[' and arg[-1] == ']': # Optional (0-1) argument: [bar]
out.append(ARG_OPT)
elif len(arg) >= 2 and arg[0] == '{' and arg[-1] == '}': # Unlimited (0-) number of arguments: {baz}
out.append(ARG_UNL)
else: # Normal argument: foo
out.append(ARG_STD)
return out
def getargnums(argtypes):
min = 0
max = 0 # max = None if number of arguments is unlimited
for argtype in argtypes:
if argtype == ARG_STD:
min += 1
if max != None: # Don't try to increment if max is unlimited
max += 1
elif argtype == ARG_OPT:
if max != None: # Don't try to increment if max is unlimited
max += 1
elif argtype == ARG_UNL:
max = None
return min, max
def matchcmd(line, cmd, args=None):
# matchcmd(line, cmd) matched if the command cmd is used, matchcmd(line, cmd, args) checks whether the args match too
if len(line) == 0:
return False
if line[0] != cmd:
return False
if not args:
return True
min, max = getargnums(parseargsfmt(args))
if max and len(line)-1 >= min and len(line)-1 <= max:
return True
elif not max and len(line)-1 >= min:
return True
else:
return False
def parsecmd(line, args):
# Returns a tuple containing the arguments. An optional argument that didn't get a value will be assigned ''
argtypes = parseargsfmt(args)
if len(argtypes) >= 1 and ARG_UNL in argtypes[:-1]: # Disallow non-final unlimited arguments
raise ArgsfmtError('Non-final unlimited argument')
if len(filter((lambda type: type == ARG_OPT or type == ARG_UNL), argtypes)) > 1: # Disallow more than one optional or unlimited argument per argument string
raise ArgsfmtError('Ambiguous argument format')
# Remove the command
if len(line) == 0:
raise ArgsfmtError('No command given')
line = line[1:]
min, max = getargnums(argtypes)
if len(line) == min:
# Only standard arguments given
out = []
for type in argtypes:
if type == ARG_STD:
out.append(line[0])
line = line[1:]
else:
out.append('')
elif max and len(line) == max:
# Optional argument given
out = []
for type in argtypes:
if type == ARG_STD or type == ARG_OPT:
out.append(line[0])
line = line[1:]
else:
out.append('')
elif not max and len(line) > min:
# Unlimited argument given
out = []
for type in argtypes:
if type == ARG_STD or type == ARG_OPT:
out.append(line[0])
line = line[1:]
elif type == ARG_UNL:
out.append(' '.join(line))
line = []
else:
raise ArgsfmtError('Number of given arguments not possible for given format string')
if len(out) == 1:
return out[0]
else:
return out
def parse((line, irc)):
global blacklist
global msgs, msgslock
global trusted, trustedlock, gods, godslock
global doctor, die_expr
line = line.split(' ')
nick = line[0].split('!')[0][1:]
chan = line[2] if line[2][0] == '#' else nick
zwsp = '\xe2\x80\x8b'
if nick in blacklist:
return
elif len(line) >= 4 and len(line[3]) >= len(zwsp)+1 and line[3][:len(zwsp)+1] == ':'+zwsp: # If line begins with ZWSP
return
if line[1]=='PRIVMSG' and line[3][:2] != ': ':
reply = chan
cmdline = [line[3][1:]] + line[4:]
while '' in cmdline:
cmdline.remove('')
# #chan: channel override prefix
# Don't allow this in private messages for more transparent bot usage
if matchcmd(cmdline, '#chan') and chan != nick:
if matchcmd(cmdline, '#chan', 'channel {command}'):
newchan, newcmdline = parsecmd(cmdline, 'channel {command}')
newcmdline = newcmdline.split(' ')
if isauthorized(irc, newchan, nick):
chan = newchan
cmdline = newcmdline
else:
irc.msg(chan, zwsp + 'Usage #chan channel command')
if matchcmd(cmdline, '#echo'):
text = parsecmd(cmdline, '{text}')
irc.msg(reply, zwsp+text)
elif matchcmd(cmdline, '#op'):
args = parsecmd(cmdline, '{args}')
chmode(irc, chan, nick, '+o', args.split(' '))
elif matchcmd(cmdline, '#deop'):
args = parsecmd(cmdline, '{args}')
chmode(irc, chan, nick, '-o', args.split(' '))
elif matchcmd(cmdline, '#voice'):
args = parsecmd(cmdline, '{args}')
chmode(irc, chan, nick, '+v', args.split(' '))
elif matchcmd(cmdline, '#quiet'):
arg = parsecmd(cmdline, 'nick')
chmode(irc, chan, nick, '+q', [arg + '!*@*'])
elif matchcmd(cmdline, '#dequiet'):
arg = parsecmd(cmdline, 'nick')
chmode(irc, chan, nick, '-q', [arg + '!*@*'])
elif matchcmd(cmdline, '#devoice'):
args = parsecmd(cmdline, '{args}')
chmode(irc, chan, nick, '-v', args.split(' '))
elif matchcmd(cmdline, '#kick'):
if matchcmd(cmdline, '#kick', 'nick {reason}'):
kicknick, kickreason = parsecmd(cmdline, 'nick {reason}')
if kicknick.lower() == irc.nick:
irc.send('KICK %s %s :Fuck you' % (chan, nick))
else:
if isauthorized(irc, chan, nick):
irc.send('KICK %s %s :%s'%(chan, kicknick, kickreason))
else:
irc.msg(reply, zwsp + 'Usage #kick nick reason')
elif matchcmd(cmdline, '#src'):
irc.msg(reply, zwsp + 'https://github.com/JuEeHa/oonbotti2')
elif matchcmd(cmdline, '#prefix') and chan == '#osdev-offtopic':
irc.msg(reply, zwsp + 'gopher://ayu.smar.fi:7070/0/hash-prefix')
elif matchcmd(cmdline, '#msg'):
if matchcmd(cmdline, '#msg', 'nick {message}'):
msgnick, message = parsecmd(cmdline, 'nick {message}')
if chan == nick: # In a query:
origin = "[query]"
else: # In a channel
origin = chan
with msgslock:
if msgnick not in msgs:
msgs[msgnick] = []
msgs[msgnick].append((nick, origin, message))
savemessages()
else:
irc.msg(reply, zwsp + 'Usage: #msg nick message')
elif matchcmd(cmdline, '#trusted?'):
if matchcmd(cmdline, '#trusted?', '[nick]'):
trustnick = parsecmd(cmdline, '[nick]')
if trustnick == '':
trustnick = nick
account = getaccount(irc, trustnick)
if account:
if istrusted(chan, account):
irc.msg(reply, zwsp + '%s is trusted' % trustnick)
else:
irc.msg(reply, zwsp + '%s is not trusted' % trustnick)
else:
irc.msg(reply, zwsp + 'Failed to get account for %s' % trustnick)
else:
irc.msg(reply, zwsp + 'Usage: #trusted? [nick]')
elif matchcmd(cmdline, '#trust'):
if matchcmd(cmdline, '#trust', 'nick'):
trustnick = parsecmd(cmdline, 'nick')
if isauthorized(irc, chan, nick):
account = getaccount(irc, trustnick)
if account:
addtrusted(chan, account)
savetrusted()
else:
irc.msg(reply, zwsp + 'Failed to get account for %s' % trustnick)
else:
irc.msg(reply, zwsp + 'Usage #trust nick')
elif matchcmd(cmdline, '#untrust'):
if matchcmd(cmdline, '#untrust', 'nick'):
untrustnick = parsecmd(cmdline, 'nick')
if isauthorized(irc, chan, nick):
account = getaccount(irc, untrustnick)
# If account can't be found (e.g. it has been deleted, use the parameter as-is
if not account:
if istrusted(chan, untrustnick):
account = untrustnick
if account:
godslock.acquire()
if chan not in gods or account not in gods[chan]:
rmtrusted(chan, untrustnick)
godslock.release()
savetrusted()
else:
irc.msg(reply, zwsp + 'Failed to get account for %s' % untrustnick)
else:
irc.msg(reply, zwsp + 'Usage #untrust nick')
elif matchcmd(cmdline, '#ls-trusted'):
trustedlock.acquire()
if chan in trusted:
lines = []
line = ''
for account in trusted[chan]:
if line == '':
line = account
elif len(line + ', ' + account) <= 255: # Playing it safe not to get truncated
line += ', ' + account
else:
lines.append(line)
line = account
if line != '':
lines.append(line)
for line in lines:
irc.msg(nick, zwsp + '%s: %s' % (chan, line))
trustedlock.release()
elif matchcmd(cmdline, '#invite'):
irc.msg(chan, zwsp + '%s: #invite has been removed. Use manual invite' % nick)
elif matchcmd(cmdline, '#help'):
if matchcmd(cmdline, '#help', '[command]'):
command = parsecmd(cmdline, '[command]')
helptext = help(command)
if helptext:
irc.msg(reply, zwsp+helptext)
elif matchcmd(cmdline, '#esoteric') and chan == '#esoteric':
irc.msg(reply, zwsp + 'Nothing here')
elif cmdline[0] in [irc.nick, irc.nick+',', irc.nick+':']:
question = parsecmd(cmdline, '{question}')
if len(question) < 2 or question[:2] != ':D': # Mandated by #osdev-offtopic law
irc.msg(reply, zwsp + '%s: %s' % (nick, doctor.respond(question)))
elif die_expr.match(cmdline[0]):
die = cmdline[0][1:].split('d')
times = int(die[0]) if die[0] else 1
if '+' in die[1]:
split = die[1].index('+')
plus = int(die[1][split + 1:])
die[1] = die[1][:split]
elif '-' in die[1]:
split = die[1].index('-')
plus = -int(die[1][split + 1:])
die[1] = die[1][:split]
else:
plus = 0
die = '%' if die[1] == '%' else int(die[1])
if die == '%':
if times != 1:
irc.msg(reply, zwsp + 'Not supported')
else:
irc.msg(reply, zwsp + '%s%s' % (random.randint(0,9), random.randint(0,9)))
elif die < 1:
irc.msg(reply, zwsp + 'This die is not available in your space-time region.')
elif times < 1:
irc.msg(reply, zwsp + 'What exactly do you want me to do?')
elif times > 128:
irc.msg(reply, zwsp + 'Sorry, I don\'t have that many. Can I borrow yours?')
else:
rolls = [random.randint(1, die) for i in xrange(times)]
result = reduce((lambda x, y: x + y), rolls)
if times > 1:
text = '%s (%s)' % (str(result), ', '.join([str(i) for i in rolls]))
else:
text = str(result)
if plus > 0:
text = '%i (%s + %i)' % (result + plus, text, plus)
elif plus < 0:
text = '%i (%s - %i)' % (result + plus, text, -plus)
irc.msg(reply, zwsp + text)
elif line[1] == '330': # WHOIS: is logged in as
whoisnick = line[3]
account = line[4]
for id in getaccountcheckidbynick(whoisnick):
setaccountcheckvalue(id, account)
elif line[1] == '318': # WHOIS: End of /WHOIS list.
whoisnick = line[3]
for id in getaccountcheckidbynick(whoisnick):
if getaccountcheckvalue(id) == None:
setaccountcheckvalue(id, '') # Mark as failed, '' is used because None is already reserved
elif line[1] == 'INVITE' and line[2] == irc.nick and line[3][1:] in irc.chan.split(' '):
if isauthorized(irc, line[3][1:], nick):
irc.send('JOIN ' + line[3])
elif line[1] == '482':
irc.msg(line[3], zwsp + 'Not op')
msgs_changed = False
with msgslock:
if (line[1] == 'PRIVMSG' or line[1] == 'JOIN') and nick in msgs:
for sender, origin, msg in msgs.pop(nick):
irc.msg(nick, zwsp + '%s <%s> %s' % (origin, sender, msg))
msgs_changed = True
if msgs_changed:
savemessages()
def execcmd(cmdline):
if cmdline[0] == '/q':
cron.ctrl('QUIT')
elif cmdline[0] == '/lt':
loadtrusted()
elif cmdline[0] == '/st':
savetrusted()
elif cmdline[0] == '/lg':
loadgods()
elif cmdline[0] == '/lm':
loadmessages()
elif cmdline[0] == '/sm':
savemessages()
def usage(cmd, message = True):
usage = {'#echo': 'text',
'#op': '[nick]',
'#deop': '[nick]',
'#voice': '[nick]',
'#devoice': '[nick]',
'#quiet': 'nick',
'#dequiet': 'nick',
'#kick': 'nick [reason]',
'#src': '',
'#msg': 'nick message',
'#trusted?': '[nick]',
'#trust': 'nick',
'#untrust': 'nick',
'#ls-trusted': '',
'#chan': 'channel command',
'#help': '[command]'}
if cmd in usage:
if message:
return 'Usage: %s %s' % (cmd, usage[cmd])
else:
return usage[cmd]
else:
return None
def help(cmd):
helptext = {'#echo': '#echo text back',
'#op': 'give nick or yourself op rights in case you are trusted by oonbotti2 and identified with NickServ',
'#deop': 'remove your/nick\'s op rights',
'#voice': 'give nick or yourself voice in case you are trusted by oonbotti2 and identified with NickServ',
'#devoice': 'remove your or nick\'s voice in case you are trusted by oonbotti2 and identified with NickServ',
'#quiet': 'give +q to nick!*@*',
'#dequiet': 'remove +q from nick!*@*',
'#kick': 'kicks nick with specified reason',
'#src': 'paste a link to oonbotti2\'s git repo',
'#msg': 'send a message to nick',
'#trusted?': 'tell you if nick or yourself is trusted by oonbotti2',
'#trust': 'add nick to trusted list',
'#untrust': 'remove nick from trusted list',
'#ls-trusted': 'list nicks that are trusted. use only in a query',
'#chan': 'Runs the command as if it was sent on the specified channel. Requires user to be trusted',
'#help': 'give short info of command or list commands'}
if cmd=='':
return '#echo #op #deop #voice #devoice #quiet #dequiet #kick #src #msg #trusted? #trust #untrust #ls-trusted #chan #help'
elif cmd=='me':
return 'I shall.'
elif cmd in helptext:
if helptext[cmd]:
return '%s %s %s' % (cmd, usage(cmd, False), helptext[cmd])
else:
return '%s %s' % (cmd, usage(cmd, False))
else:
return None
| 2.265625 | 2 |
models.py | Geson-anko/VoiceBand | 0 | 12790686 | import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
from torchsummaryX import summary
from torch.nn.utils import weight_norm, remove_weight_norm
from utils import get_padding, get_conv1d_outlen, init_weights, get_padding_down, get_padding_up,walk_ratent_space
from typing import Tuple
from torchsummaryX import summary
import numpy as np
import random
from torch.utils.tensorboard import SummaryWriter
import matplotlib.pyplot as plt
LRELU_SLOPE = 0.1
class ResBlock(nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super().__init__()
assert len(dilation) == 3
self.convs1 = nn.ModuleList([
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class Encoder(nn.Module):
def __init__(self, h):
super().__init__()
self.h = h
rks = h.resblock_kernel_sizes
rds = h.resblock_dilation_sizes
drs = h.downsample_rates
drks = h.downsample_kernel_sizes
dci = h.downsample_initial_channel
self.num_kernels = len(rks)
self.num_downsamples = len(drs)
self.conv_pre = weight_norm(nn.Conv1d(1, dci, 7,1,3))
# get expected input lengthes and output lengths
init_len = h.n_fft
self.L_ins = [init_len]
self.L_outs = []
for r in drs:
lo = int(init_len/r)
self.L_outs.append(lo)
self.L_ins.append(lo)
init_len = lo
self.L_outs.append(1)
# get downsampling paddings
self.pads = []
for i,r in enumerate(drs):
pad = get_padding_down(self.L_ins[i],self.L_outs[i],drks[i],r)
self.pads.append(pad)
# get downsampling channels
self.channels = []
for i in range(len(drs)+1):
self.channels.append(dci*(2**i))
self.dns = nn.ModuleList()
for i, (u, k) in enumerate(zip(drs, drks)):
self.dns.append(weight_norm(
nn.Conv1d(self.channels[i], self.channels[i+1],k,u,self.pads[i])
))
self.resblocks = nn.ModuleList()
for i in range(len(self.dns)):
ch = self.channels[i+1]
for j,(k,d) in enumerate(zip(rks,rds)):
self.resblocks.append(ResBlock(ch,k,d))
self.conv_post = weight_norm(nn.Conv1d(self.channels[-1],h.ratent_dim,self.L_ins[-1]))
self.conv_post_var = weight_norm(nn.Conv1d(self.channels[-1],h.ratent_dim,self.L_ins[-1]))
self.dns.apply(init_weights)
self.conv_post.apply(init_weights)
self.conv_post_var.apply(init_weights)
def forward(self, x:torch.Tensor) -> torch.Tensor:
x = self.conv_pre(x)
for i in range(self.num_downsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.dns[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i*self.num_kernels+j](x)
else:
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
mean = self.conv_post(x)
var = F.softplus(self.conv_post_var(x)) + 1e-8
return mean,var
def dual_flow(self, x1:torch.Tensor, x2:torch.Tensor,with_random:bool=True) -> torch.Tensor:
mean1,var1 = self.forward(x1)
mean2,var2 = self.forward(x2)
if with_random:
out1 = self.random_sample(mean1,var1)
out2 = self.random_sample(mean2,var2)
else:
out1,out2 = mean1,mean2
out = torch.cat([out1, out2], dim=1) #.tanh() # notanh
return out
@staticmethod
def random_sample(mean:torch.Tensor, var:torch.Tensor):
return mean + torch.randn_like(mean)*torch.sqrt(var)
def summary(self):
dummy = torch.randn(1,1,self.h.n_fft)
summary(self, dummy)
def remove_weight_norm(self):
print("Removing weight norm...")
for l in self.dns:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class Decoder(nn.Module):
def __init__(self, h) -> None:
super().__init__()
self.h = h
rks = h.resblock_kernel_sizes
rds = h.resblock_dilation_sizes
uik = h.upsample_initial_kernel
urs = h.upsample_rates
urks = h.upsample_kernel_sizes
uic = h.upsample_initial_channel
self.out_len = h.n_fft +h.hop_len
self.num_kernels = len(rks)
self.num_upsamples = len(urs)
self.conv_pre = weight_norm(nn.ConvTranspose1d(h.ratent_dim*2, uic,uik))
# get expected input lengthes and output lengthes
init_len = uik
self.L_ins = [init_len]
self.L_outs = []
for r in urs:
lo = init_len * r
self.L_ins.append(lo)
self.L_outs.append(lo)
init_len = lo
# get upsampling paddings
self.pads = []
for i,r in enumerate(urs):
pad = get_padding_up(self.L_ins[i],self.L_outs[i],urks[i],r)
self.pads.append(pad)
# get upsampling channels
self.channels = [uic]
ch = uic
for i in range(len(urs)):
self.channels.append(int(ch/(2**i)))
self.ups = nn.ModuleList()
for i, (u,k) in enumerate(zip(urs,urks)):
self.ups.append(weight_norm(
nn.ConvTranspose1d(self.channels[i], self.channels[i+1],k,u,self.pads[i])
))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = self.channels[i+1]
for j,(k,d) in enumerate(zip(rks,rds)):
self.resblocks.append(ResBlock(ch,k,d))
self.conv_post = weight_norm(nn.Conv1d(self.channels[-1],1,7,1,3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x:torch.Tensor) -> torch.Tensor:
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.ups[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i*self.num_kernels+j](x)
else:
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
l = x.size(-1)
start = int((l - self.out_len)/2)
x = x[:,:,start:start+self.out_len]
#x = x.tanh() # grad explosion ?
return x
def summary(self):
dummy = torch.randn(1,self.h.ratent_dim*2,1)
summary(self,dummy)
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class VoiceBand(pl.LightningModule):
def __init__(self, h,dtype:torch.dtype=torch.float,device:torch.device='cpu') -> None:
super().__init__()
self.h = h
self.reset_seed()
self.encoder = Encoder(h).type(dtype).to(self.device)
self.decoder = Decoder(h).type(dtype).to(self.device)
self.n_fft = h.n_fft
self.ratent_dim = h.ratent_dim
self.walking_steps = int(h.breath_len / h.hop_len) + 1
self.walking_resolution = h.walking_resolution
self.out_len = self.decoder.out_len
self.view_interval = 10
self.kl_lambda = h.kl_lambda
# training settings
self.MSE = nn.MSELoss()
self.MAE = nn.L1Loss()
self.actions = walk_ratent_space(self.ratent_dim, self.walking_steps,self.walking_resolution,device=device,dtype=dtype)
def forward(self, x1:torch.Tensor,x2:torch.Tensor) -> torch.Tensor:
"""
x1: (-1, 1, n_fft)
x2: (-1, 1, n_fft)
"""
mean1,var1 = self.encoder.forward(x1)
mean2,var2 = self.encoder.forward(x2)
mean,var = torch.cat([mean1,mean2],dim=1),torch.cat([var1,var2],dim=1)
out = self.encoder.random_sample(mean,var)#.tanh()# notanh
out = self.decoder(out)
return out,mean,var
def on_fit_start(self) -> None:
self.logger.log_hyperparams(self.h)
def training_step(self, batch:Tuple[torch.Tensor], batch_idx) -> torch.Tensor:
"""
batch : (-1, ch, n_fft+hop_len)
"""
sound, = batch
sound = sound.type(self.dtype)
if self.h.random_gain:
sound= self.random_gain(sound)
x1,x2,ans = sound[:,:,:self.h.n_fft], sound[:,:,-self.h.n_fft:], sound
out_,mean,var = self.forward(x1,x2)
out = out_.tanh() # atanh grad explotsion
mse = self.MSE(ans, out)
mae = self.MAE(ans,out)
KL = 0.5*torch.sum(
torch.pow(mean,2) +
var -
torch.log(var) -1
).sum() / out.size(0)
#marginal_likelihood = self.BCEwithLogits(torch.atanh(out),0.5*ans+1)
#print(True in torch.isnan(out))
marginal_likelihood= F.binary_cross_entropy_with_logits(out,0.5*ans+1,reduction="sum") / out.size(0)
loss = marginal_likelihood + KL * self.kl_lambda
#loss = self.kl_lambda * KL + mse
self.log("loss",loss)
self.log("mse",mse)
self.log("mae",mae)
self.log("KL div",KL)
self.log("Marginal likelihood",marginal_likelihood)
return loss
@torch.no_grad()
def on_epoch_end(self) -> None:
"""
walk through the ratent space and log audio wave.
"""
if self.current_epoch%self.view_interval !=0:
return
self.actions = walk_ratent_space(self.ratent_dim, self.walking_steps,self.walking_resolution,
device=self.device,dtype=self.dtype)
wave = None
for act in self.actions.unsqueeze(1):
wave= self.predict_one_step(act,wave)
wave = wave.squeeze(0).T.detach().cpu().numpy()
# tensorboard logging
tb:SummaryWriter = self.logger.experiment
tb.add_audio("Ratent space audio",wave, self.current_epoch,self.h.frame_rate)
fig = plt.figure()
ax = fig.add_subplot()
ax.plot(wave)
tb.add_figure("Walked wave",fig, self.current_epoch)
return
def random_gain(self, sound:torch.Tensor) -> torch.Tensor:
n,c,l = sound.shape
maxes= sound.view(n,c*l).abs().max(dim=1,keepdim=True).values.unsqueeze(-1)
maxes[maxes==0.0] = 1.0
gains = torch.rand_like(maxes)
sound = (sound/maxes) * gains
return sound
def configure_optimizers(self):
optim = torch.optim.AdamW(self.parameters(), self.h.lr,[self.h.adam_b1,self.h.adam_b2])
scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=self.h.lr_decay)
scheduler.last_epoch=self.trainer.max_epochs
return [optim],[scheduler]
silence = None
def set_silence(self):
self.silence = torch.zeros(1,self.h.sample_ch,self.n_fft,device=self.device,dtype=self.dtype)
def set_view_interval(self, interval:int=None):
if interval:
self.view_interval= interval
def predict_one_step(self, action:torch.Tensor,previous_wave:torch.Tensor=None) -> torch.Tensor:
"""
action : (-1, ratent_dim, 1)
previous_wave : (-1,ch, l)
"""
if previous_wave is None:
if self.silence is None:
self.set_silence()
previous_wave = self.silence
assert len(action.shape) == 3
assert len(previous_wave.shape) == 3
if previous_wave.size(-1) < self.n_fft :
pad_len = self.n_fft - previous_wave.size(-1)
n,c,l = previous_wave.shape
pad = torch.zeros(n,c,pad_len,dtype=previous_wave.dtype,device=previous_wave.device)
previous_wave = torch.cat([pad,previous_wave],dim=-1)
enc_in = previous_wave[:,:,-self.n_fft:].to(self.dtype).to(self.device)
encoded = self.encoder.forward(enc_in)[0]#.tanh()# notanh
dec_in = torch.cat([encoded,action],dim=1)
d_out = self.decoder.forward(dec_in)[:,:,self.n_fft:].type_as(previous_wave)
d_out = d_out.tanh() # grad explosion ?
wave = torch.cat([previous_wave,d_out],dim=-1)
return wave
def reset_seed(self):
seed = self.h.seed
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
def summary(self,tensorboard:bool = True):
dummy = torch.randn(1,1,self.n_fft)
summary(self, dummy,dummy)
if tensorboard:
writer = SummaryWriter()
writer.add_graph(self, [dummy,dummy])
def remove_weight_norm(self):
self.encoder.remove_weight_norm()
self.decoder.remove_weight_norm()
if __name__ == '__main__':
from utils import load_config
config = load_config("hparams/origin.json")
model = VoiceBand(config)
model.summary()
model.remove_weight_norm() | 2.15625 | 2 |
perceptron_pacman.py | Tskken/classification | 0 | 12790687 | """Perceptron implementation for apprenticeship learning in pacman.
Author: <NAME>, <NAME>, and <NAME>
Class: CSI-480-01
Assignment: PA 5 -- Supervised Learning
Due Date: Nov 30, 2018 11:59 PM
Certification of Authenticity:
I certify that this is entirely my own work, except where I have given
fully-documented references to the work of others. I understand the definition
and consequences of plagiarism and acknowledge that the assessor of this
assignment may, for the purpose of assessing this assignment:
- Reproduce this assignment and provide a copy to another member of academic
- staff; and/or Communicate a copy of this assignment to a plagiarism checking
- service (which may then retain a copy of this assignment on its database for
- the purpose of future plagiarism checking)
Champlain College CSI-480, Fall 2018
The following code was adapted by <NAME> (<EMAIL>)
from the UC Berkeley Pacman Projects (see license and attribution below).
----------------------
Licensing Information: You are free to use or extend these projects for
educational purposes provided that (1) you do not distribute or publish
solutions, (2) you retain this notice, and (3) you provide clear
attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
Attribution Information: The Pacman AI projects were developed at UC Berkeley.
The core projects and autograders were primarily created by <NAME>
(<EMAIL>) and <NAME> (<EMAIL>).
Student side autograding was added by <NAME>, <NAME>, and
<NAME> (<EMAIL>).
"""
import util
from perceptron import PerceptronClassifier
PRINT = True
class PerceptronClassifierPacman(PerceptronClassifier):
"""A PerceptronClassifier for apprenticeeship learning in pacman."""
def __init__(self, legal_labels, max_iterations):
"""Initialize the perceptron.
Args:
legal_labels: list of legal_labels
max_iterations: the max number of iterations to train for
"""
super().__init__(legal_labels, max_iterations)
self.weights = util.Counter()
def classify(self, data):
"""Classify the data points.
Data contains a list of (datum, legal moves)
Datum is a Counter representing the features of each GameState.
legal_moves is a list of legal moves for that GameState.
"""
guesses = []
for datum, legal_moves in data:
vectors = util.Counter()
for l in legal_moves:
vectors[l] = self.weights * datum[l]
guesses.append(vectors.arg_max())
return guesses
def train(self, training_data, training_labels, validation_data,
validation_labels):
"""Train the perceptron."""
# could be useful later
self.features = list(training_data[0][0]['Stop'].keys())
# DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR
# THE AUTOGRADER WILL LIKELY DEDUCT POINTS.
for iteration in range(self.max_iterations):
print("Starting iteration ", iteration, "...")
for (datum, legal_moves), label in zip(training_data,
training_labels):
# *** YOUR CODE HERE ***
# Gets the guess action, then updates the weights
guess = self.classify([(datum, legal_moves)])[0]
if guess != label:
self.weights += datum[label]
self.weights -= datum[guess]
| 2.53125 | 3 |
generated-libraries/python/netapp/storage_array/__init__.py | radekg/netapp-ontap-lib-get | 2 | 12790688 | from netapp.connection import NaConnection
from storage_array_config_summary import StorageArrayConfigSummary # 8 properties
from storage_array_port_stats import StorageArrayPortStats # 13 properties
from storage_array_stats_info import StorageArrayStatsInfo # 6 properties
from arrayfailovertype import Arrayfailovertype # 0 properties
from arrayerrortype import Arrayerrortype # 0 properties
from lunownershipfiltertype import Lunownershipfiltertype # 0 properties
from storage_array_profile import StorageArrayProfile # 16 properties
from storage_array_port_stats_info import StorageArrayPortStatsInfo # 19 properties
from storage_array_port import StorageArrayPort # 9 properties
from connectiontype import Connectiontype # 0 properties
from storage_array_stats_error_info import StorageArrayStatsErrorInfo # 3 properties
from storage_array_error_info import StorageArrayErrorInfo # 3 properties
class StorageArrayConnection(NaConnection):
def storage_array_modify(self, array_name, max_queue_depth=None, vendor=None, is_upgrade_pending=None, prefix=None, lun_queue_depth=None, model=None, options=None):
"""
Update an array profile with new or changed information.
:param array_name: The name of the array profile to update. (28 chars max)
:param max_queue_depth: The target port queue depth for all target ports on this array.
:param vendor: The name of the array's manufacturer. (8 chars max)
:param is_upgrade_pending: Used to indicate that the specified array will under go an
upgrade in the near future.
:param prefix: A unique user supplied 4 character code used to refer to this
array and used in naming the array's LUNs.
:param lun_queue_depth: The queue depth assigned to array LUNs from this array.
:param model: The model of the array. (16 chars max)
:param options: Array profile specific options. (comma separated list of
name/value pairs) (127 chars max)
"""
return self.request( "storage-array-modify", {
'max_queue_depth': [ max_queue_depth, 'max-queue-depth', [ int, 'None' ], False ],
'vendor': [ vendor, 'vendor', [ basestring, 'None' ], False ],
'is_upgrade_pending': [ is_upgrade_pending, 'is-upgrade-pending', [ bool, 'None' ], False ],
'prefix': [ prefix, 'prefix', [ basestring, 'None' ], False ],
'lun_queue_depth': [ lun_queue_depth, 'lun-queue-depth', [ int, 'None' ], False ],
'model': [ model, 'model', [ basestring, 'None' ], False ],
'array_name': [ array_name, 'array-name', [ basestring, 'None' ], False ],
'options': [ options, 'options', [ basestring, 'None' ], False ],
}, {
} )
def storage_array_profile_change_notification(self, is_modify, array_id):
"""
Signal the changes made in array profile RDB to the D-Blade
:param is_modify: A boolean value which indicates if it's an rdb_modify operation.
True - operation is rdb_modify
False - operation is rdb_create
:param array_id: Primary key (system defined) for the array record.
"""
return self.request( "storage-array-profile-change-notification", {
'is_modify': [ is_modify, 'is-modify', [ bool, 'None' ], False ],
'array_id': [ array_id, 'array-id', [ int, 'None' ], False ],
}, {
} )
def storage_array_rename(self, array_name, new_name):
"""
Rename an array profile
:param array_name: The name of the array profile to update. (28 chars max)
:param new_name: The new name to assign to this array profile. (28 chars max)
"""
return self.request( "storage-array-rename", {
'array_name': [ array_name, 'array-name', [ basestring, 'None' ], False ],
'new_name': [ new_name, 'new-name', [ basestring, 'None' ], False ],
}, {
} )
def storage_array_port_modify(self, wwpn, wwnn, array_name, max_queue_depth=None):
"""
Update an array port with new or changed information.
:param wwpn: World wide port name of array's target port (64 chars).
:param wwnn: World wide node name of array's target port (64 chars).
:param array_name: The name of the array profile to update. (28 chars max)
:param max_queue_depth: The target port queue depth for this target port.
"""
return self.request( "storage-array-port-modify", {
'wwpn': [ wwpn, 'wwpn', [ basestring, 'None' ], False ],
'wwnn': [ wwnn, 'wwnn', [ basestring, 'None' ], False ],
'array_name': [ array_name, 'array-name', [ basestring, 'None' ], False ],
'max_queue_depth': [ max_queue_depth, 'max-queue-depth', [ int, 'None' ], False ],
}, {
} )
def storage_array_port_remove(self, wwpn, wwnn, array_id):
"""
Remove one port from an array profile record
:param wwpn: The WWPN of the array port to remove.
:param wwnn: The WWNN of the array port to remove.
:param array_id: Primary key (system defined) for the array record.
"""
return self.request( "storage-array-port-remove", {
'wwpn': [ wwpn, 'wwpn', [ basestring, 'None' ], False ],
'wwnn': [ wwnn, 'wwnn', [ basestring, 'None' ], False ],
'array_id': [ array_id, 'array-id', [ int, 'None' ], False ],
}, {
} )
def storage_array_port_change_notification(self, wwpn, wwnn, is_modify, array_id):
"""
Signal the changes made in array port table to the D-Blade
:param wwpn: The WWPN of the array port whose attributes changed
:param wwnn: The WWNN of the array port whose attributes changed
:param is_modify: A boolean value which indicates if it's an rdb_modify operation.
True - operation is rdb_modify
False - operation is rdb_create
:param array_id: Primary key (system defined) for the array record.
"""
return self.request( "storage-array-port-change-notification", {
'wwpn': [ wwpn, 'wwpn', [ basestring, 'None' ], False ],
'wwnn': [ wwnn, 'wwnn', [ basestring, 'None' ], False ],
'is_modify': [ is_modify, 'is-modify', [ bool, 'None' ], False ],
'array_id': [ array_id, 'array-id', [ int, 'None' ], False ],
}, {
} )
def storage_array_stats_list_info(self):
"""
Used to get dynamic information about backend arrays.
"""
return self.request( "storage-array-stats-list-info", {
}, {
'array-stat-info': [ StorageArrayStatsInfo, True ],
} )
def storage_array_profile_sync(self):
"""
Purge a node's array profile database, thereby
synchronizing it with the RDB.
"""
return self.request( "storage-array-profile-sync", {
}, {
} )
def storage_array_port_stats_list_info(self):
"""
return stats for array ports
"""
return self.request( "storage-array-port-stats-list-info", {
}, {
'port-stat-info': [ StorageArrayPortStatsInfo, True ],
} )
def storage_array_update(self, array_name, vendor=None, network_address=None, firmware=None, prefix=None, new_array_name=None, model=None, options=None):
"""
Update an array profile with new or changed information.
Arguments passed in will be used to update the profile. Arguments not
passed will keep their existing values.
:param array_name: 28 character string, no spaces
The name of the array profile to update.
:param vendor: The name of the array's manufacturer. (8 chars)
:param network_address: The I/O address of the array's SNMP management service in dotted-decimal format (for example, "192.168.11.12").
:param firmware: The firmware revision of the array being entered. (64 chars)
:param prefix: A unique 5 character user defined code used to refer to this array.
:param new_array_name: 28 character string, no spaces
The new name to assign to this array profile.
:param model: The model number of the array. (16 chars)
:param options: Array profile specific options. (comma separated list of name/value pairs) (128 chars)
"""
return self.request( "storage-array-update", {
'vendor': [ vendor, 'vendor', [ basestring, 'None' ], False ],
'network_address': [ network_address, 'network-address', [ basestring, 'None' ], False ],
'firmware': [ firmware, 'firmware', [ basestring, 'None' ], False ],
'prefix': [ prefix, 'prefix', [ basestring, 'None' ], False ],
'new_array_name': [ new_array_name, 'new-array-name', [ basestring, 'None' ], False ],
'model': [ model, 'model', [ basestring, 'None' ], False ],
'array_name': [ array_name, 'array-name', [ basestring, 'None' ], False ],
'options': [ options, 'options', [ basestring, 'None' ], False ],
}, {
'array-profile': [ StorageArrayProfile, False ],
} )
def storage_array_get_config_summary(self, node=None, ownership_type=None):
"""
Generates a high level summary of array LUN pathing (connectivity)
information.
:param node: Obtain array LUN pathing information for a specified node.
:param ownership_type: Option that allows the user to select which array LUNs are displayed.
Valid values for ownership-type are 'assigned', 'unassigned' and 'all'.
If ownership-type is set to 'assigned' only assigned array LUNs are displayed.
If ownership-type is set to 'unassigned' only unassigned array LUNs are
displayed. If ownership-type is set to 'all', all array LUNs are
displayed. Default: 'all'.
"""
return self.request( "storage-array-get-config-summary", {
'node': [ node, 'node', [ basestring, 'None' ], False ],
'ownership_type': [ ownership_type, 'ownership-type', [ basestring, 'None' ], False ],
}, {
'config-summary': [ StorageArrayConfigSummary, True ],
} )
def storage_array_ports_list_info(self, array_name=None):
"""
generate a list of online array ports and their associated arrays
:param array_name: When supplied, only port records for the named array are returned. (28 chars)
"""
return self.request( "storage-array-ports-list-info", {
'array_name': [ array_name, 'array-name', [ basestring, 'None' ], False ],
}, {
'array-ports': [ StorageArrayPort, True ],
} )
def storage_array_luns_list_info(self, array_name, ownership_type=None):
"""
Generate a list of array LUNs associated with the named array.
:param array_name: The name of the array profile to list array LUN information for. (28 chars)
:param ownership_type: Option that allows the user to select which array LUNs are displayed.
Valid values for ownership-type are 'assigned', 'unassigned' and 'all'.
If ownership-type is set to 'assigned' only assigned array LUNs are displayed.
If ownership-type is set to 'unassigned' only unassigned array LUNs are
displayed. If ownership-type is set to 'all', all array LUNs are
displayed. Default: 'all'.
"""
return self.request( "storage-array-luns-list-info", {
'array_name': [ array_name, 'array-name', [ basestring, 'None' ], False ],
'ownership_type': [ ownership_type, 'ownership-type', [ basestring, 'None' ], False ],
}, {
'array-luns': [ DiskDetailInfo, True ],
} )
def storage_array_list_info(self, array_name=None):
"""
Retrieves a list of all array profiles known to the controller.
:param array_name: When specified, only the named array profile record will be returned. (28 chars)
"""
return self.request( "storage-array-list-info", {
'array_name': [ array_name, 'array-name', [ basestring, 'None' ], False ],
}, {
'array-profiles': [ StorageArrayProfile, True ],
} )
| 2.015625 | 2 |
LSTM_test.py | xingshulicc/xingshulicc | 10 | 12790689 | # -*- coding: utf-8 -*-
from __future__ import print_function
"""
Created on Tue Oct 6 16:23:04 2020
@author: Admin
"""
import numpy as np
import pandas as pd
import math
import os
from keras.layers import Dense
from keras.layers import LSTM
from keras.optimizers import Adam
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
import matplotlib.pyplot as plt
#load data
filename = 'international-airline-passengers.csv'
filepath = os.path.join(os.getcwd(), filename)
dataframe = pd.read_csv(filepath,
usecols = [1],
engine = 'python')
dataset = dataframe.values
#convert dataframe to numpy array
dataset = dataset.astype('float32')
#the shape of dataset: num_samples, features
#normalise the dataset
feature_range = (0, 1)
scaler = MinMaxScaler(feature_range = feature_range)
dataset = scaler.fit_transform(dataset)
#split the dataset into training and test set
i_split = 0.8
train_size = int(len(dataset) * i_split)
#print(train_size)
test_size = len(dataset) - train_size
#print(test_size)
train_set = dataset[0:train_size, :]
test_set = dataset[train_size:, :]
#convert an array values into a dataset matrix for LSTM
def create_dataset(dataset, look_back):
dataX = []
dataY = []
for i in range(len(dataset) - look_back - 1):
a = dataset[i:(i+look_back), 0]
b = dataset[i+look_back, 0]
dataX.append(a)
dataY.append(b)
dataX = np.array(dataX)
dataY = np.array(dataY)
return dataX, dataY
look_back = 1
#look_back = time_steps: the number of previous time steps
trainX, trainY = create_dataset(train_set, look_back)
testX, testY = create_dataset(test_set, look_back)
#reshape input to be [samples, time_steps, features]
time_steps = look_back
features = dataset.shape[1]
trainX = np.reshape(trainX, (trainX.shape[0], time_steps, features))
testX = np.reshape(testX, (testX.shape[0], time_steps, features))
#create and fit the LSTM
input_shape = (time_steps, features)
lstm_neurons = 4
#lstm_neurons is a hyper-parameter
dense_neurons = 1
#dense_neurions is equal to the shape of trainY(= 1)
batch_size = 1
epochs = 100
lr = 0.001
optimizer = Adam(lr = lr, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8, decay = 0.0, amsgrad = True)
model = Sequential()
model.add(LSTM(lstm_neurons, input_shape = input_shape, return_sequences = False))
model.add(Dense(dense_neurons, activation = 'linear'))
model.compile(loss = 'mean_squared_error', optimizer = optimizer)
model.fit(trainX,
trainY,
batch_size = batch_size,
epochs = epochs,
verbose = 1,
shuffle = True)
#make predictions
trainPredict = model.predict(trainX, batch_size = batch_size)
testPredict = model.predict(testX, batch_size = batch_size)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
# shift train predictions for plotting
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
# shift test predictions for plotting
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
# plot baseline and predictions
plt.plot(scaler.inverse_transform(dataset))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()
'''
the most important hyper-parameter is look_back and batch_size
researchers should try few times to determine the best values
'''
| 3.265625 | 3 |
mmseg/models/segmentors/__init__.py | yoyoyoohh/SAR_build_extract_v2 | 0 | 12790690 | '''
Author: <NAME>
Created Date: 2021-08-08
Last Modified: 2021-08-28
content:
'''
from .base import BaseSegmentor
from .cascade_encoder_decoder import CascadeEncoderDecoder
from .encoder_decoder import EncoderDecoder
from .semi import Semi
from .semi_v2 import SemiV2
from .reco import ReCo
# __all__ = ['BaseSegmentor', 'EncoderDecoder', 'CascadeEncoderDecoder']
| 1.1875 | 1 |
simplemooc/forum/admin.py | KelsonMaciel/simplemooc- | 0 | 12790691 | from django.contrib import admin
from .models import Thread, Reply
class ThreadAdmin(admin.ModelAdmin):
list_display = ['title', 'author', 'created', 'modifield']
search_fields = ['title', 'author__email', 'body']
prepopulated_fields = {'slug':('title',)}
class ReplyAdmin(admin.ModelAdmin):
list_display = ['thread', 'author', 'created', 'modifield']
search_fields = ['thread__title', 'author__email', 'reply']
admin.site.register(Thread,ThreadAdmin)
admin.site.register(Reply,ReplyAdmin)
| 1.914063 | 2 |
bin/points_to_features.py | digital-land/dataset | 1 | 12790692 | #!/usr/bin/env python3
import sys
import json
# add parent directory
sys.path.append(".")
from utils import get
from digital_land_frontend.render import wkt_to_json_geometry
sample_file = "docs/brownfield-land/organisation/local-authority-eng/HAG/sites.json"
def create_feature_collection(features):
return {"type": "FeatureCollection", "features": features}
def create_feature(row):
feature = {"type": "Feature"}
feature["properties"] = row
if row["point"] is not None:
feature["geometry"] = wkt_to_json_geometry(row["point"])
return feature
def convert_json_to_geojson(data):
features = []
for row in data:
features.append(create_feature(row))
return create_feature_collection(features)
def test_convert(fn):
# if file local
with open(fn) as file:
data = json.load(file)
gjson = convert_json_to_geojson(data)
with open(
f"docs/brownfield-land/organisation/local-authority-eng/HAG/sites.geojson", "w"
) as file:
file.write(json.dumps(gjson))
| 2.578125 | 3 |
bsbetl/alltable_calcs/_2StVols_SlowDailyVols.py | t0rus1/bsbetl | 0 | 12790693 | from bsbetl import ov_helpers
import logging
import math
from datetime import date, datetime
from numpy.core.numeric import NaN
from pandas.core.indexes.base import Index
import pandas as pd
from bsbetl.alltable_calcs import Calculation
from bsbetl.alltable_calcs.at_params import at_calc_params
from bsbetl.calc_helpers import between_dates_condition, get_row_index_from_daily_df, last_trading_row_index, single_day_condition
class _2StVols_SlowDailyVols(Calculation.Calculation):
def __init__(self):
super().__init__('SlowDailyVols')
self.description = 'Modified Daily Volume calculation'
self.dependents = ['DV'] # this column we assume exists
self.at_computeds = ['DaysDVup', 'SDVBsl', 'SDVBm', 'SDVBf',
'SDVCsl', 'SDVCm', 'SDVCf1', 'SDVCf2',
'DVFDf', 'DVFf1', 'DVFf2', 'DVFm', 'DVFsl'
]
self.ov_computeds = []
def day_calculate(self, df: pd.DataFrame, share_num: str, idx: Index, prior: Index, top_up: bool, stage: int):
''' Implementation per Gunther's 210209 Calc Daily Vol Initial Stage.odt
Daily Vol 1. Make Slow Daily Vols:
Calculates the 'computeds' of single (daily) row of the df
'''
assert stage == 2, f'{self.name} calculation should only run at stage 2'
# df is assumed daily since stage 2 is asserted
# print(f'prior_idx={prior},idx={idx}')
curday_ordinal = df.index.tolist().index(idx[0])
#print(f'_2StVols_SlowDailyVols:day_calculate: curday_ordinal={curday_ordinal}')
# 1a) Slow Daily Vol Basic slow "SDVBsl":
#print(f"{idx[0]} DV= {df.at[idx[0], 'DV']}")
if (prior is None):
# first row
df.at[idx[0], 'DaysDVup'] = 0
# compute starting SlowVols figures by using average of 1st 5 days Volume
DV_avg = df.iloc[:5]['ODV'].mean(0)
df.at[idx[0],'SDVBsl'] = DV_avg
df.at[idx[0],'SDVBm'] = DV_avg
df.at[idx[0],'SDVBf'] = DV_avg
df.at[idx[0],'SDVCsl'] = DV_avg
df.at[idx[0],'SDVCm'] = DV_avg
df.at[idx[0],'SDVCf1'] = DV_avg
df.at[idx[0],'SDVCf2'] = DV_avg
elif df.at[idx[0], 'DV'] > df.at[prior[0], 'SDVBsl']:
# Case 1: DV D > SDVBsl D-1
# we're not on the very first row
# "DaysDVupD" is the number of days in a row the Slow Daily Vol Basic slow "SDVBsl D" increased.
up_till = between_dates_condition(df, df.index[0], prior[0])
up_tillDF = df[up_till]
#print(f"up_tillDF rows={up_tillDF.shape[0]} {df.index[0]} -> {prior[0]}")
if up_tillDF['SDVBsl'].is_monotonic_increasing:
# been increasing till this row, write the count in DaysDVup
#print(f'up_tilDF rows={up_tillDF.shape[0]}')
daysDVup = min(up_tillDF.shape[0], 50) # not more than 50
daysDVup = max(1, daysDVup) # not less than 1
df.at[idx[0], 'DaysDVup'] = daysDVup
else:
daysDVup = 1
df.at[idx[0], 'DaysDVup'] = daysDVup
# SDVB sl D = SDVBsl D-1 + YDVBsl u / DaysDVupD * ( DVD - SDVB sl D-1)eSDVBsl u
df.at[idx[0], 'SDVBsl'] = df.at[prior[0], 'SDVBsl'] + (at_calc_params['atp_YDVBslu']['setting']/daysDVup) * (
df.at[idx[0], 'DV'] - df.at[prior[0], 'SDVBsl']) ** at_calc_params['atp_eSDVBslu']['setting']
elif df.at[idx[0], 'DV'] <= df.at[prior[0], 'SDVBsl']:
# Case 2: DVD < SDVBsl D-1
# SDVBsl D = SDVBsl D-1 - YDVBsl d * (SDVB sl D-1 - DVD)eSDVBsl d
df.at[idx[0], 'SDVBsl'] = df.at[prior[0], 'SDVBsl'] - at_calc_params['atp_YDVBsld']['setting'] * (
df.at[prior[0], 'SDVBsl']-df.at[idx[0], 'DV']) ** at_calc_params['atp_eSDVBsld']['setting']
# 1b) Slow Daily Vol Basic medium "SDVB m D":
if (prior is None):
# first row
pass
elif df.at[idx[0], 'DV'] > df.at[prior[0], 'SDVBm']:
# Case 1: DVD > SDVBm D-1
# SDVBm D = SDVBm D-1 + YDVBm u * ( DVD - SDVBm D-1)eSDVBm u
df.at[idx[0], 'SDVBm'] = df.at[prior[0], 'SDVBm'] + at_calc_params['atp_YDVBmu']['setting'] * (
df.at[idx[0], 'DV'] - df.at[prior[0], 'SDVBm']) ** at_calc_params['atp_eSDVBmu']['setting']
elif df.at[idx[0], 'DV'] <= df.at[prior[0], 'SDVBm']:
# Case 2: DVD < SDVBm D-1
# SDVBm D = SDVBm D-1 - YDVB m d * (SDVBm D-1 - DVD)eSDVBm d
df.at[idx[0], 'SDVBm'] = df.at[prior[0], 'SDVBm'] - at_calc_params['atp_YDVBmd']['setting'] * (
df.at[prior[0], 'SDVBm']-df.at[idx[0], 'DV']) ** at_calc_params['atp_eSDVBmd']['setting']
# 1c) Slow Daily Vol Basic fast "SDVB bf D":
if (prior is None):
# first row
pass
elif df.at[idx[0], 'DV'] > df.at[prior[0], 'SDVBf']:
# Case 1: DVD > SDVBf D-1
# SDVBf D = SDVBf D-1 + YDVBf u * ( DVD - SDVBf D-1)eSDVBf u
df.at[idx[0], 'SDVBf'] = df.at[prior[0], 'SDVBf'] + at_calc_params['atp_YDVBfu']['setting'] * (
df.at[idx[0], 'DV'] - df.at[prior[0], 'SDVBf']) ** at_calc_params['atp_eSDVBfu']['setting']
elif df.at[idx[0], 'DV'] <= df.at[prior[0], 'SDVBf']:
# Case 2: DVD < SDVBf D-1
# SDVBf D = SDVBf D-1 - YDVB f d * (SDVBf D-1 - DVD)eSDVBf d
df.at[idx[0], 'SDVBf'] = df.at[prior[0], 'SDVBf'] - at_calc_params['atp_YDVBfd']['setting'] * (
df.at[prior[0], 'SDVBf']-df.at[idx[0], 'DV']) ** at_calc_params['atp_eSDVBfd']['setting']
# 1d) Slow Daily Vol Compare slow "SDVCsl D":
if (prior is None):
# first row
pass
elif df.at[idx[0], 'DV'] > df.at[prior[0], 'SDVCsl']:
# Case 1: DVD > SDVCsl D-1
# SDVCsl D = SDVCsl D-1 + YDVCsl u * ( DVD - SDVCsl D-1)eSDVCsl u
df.at[idx[0], 'SDVCsl'] = df.at[prior[0], 'SDVCsl'] + at_calc_params['atp_YDVCslu']['setting'] * (
df.at[idx[0], 'DV'] - df.at[prior[0], 'SDVCsl']) ** at_calc_params['atp_eSDVCslu']['setting']
elif df.at[idx[0], 'DV'] <= df.at[prior[0], 'SDVCsl']:
# Case 2: DVD < SDVCsl D-1
# SDVCsl D = SDVCsl D-1 - YDVC sl d * (SDVCsl D-1 - DVD)eSDVCsl d
df.at[idx[0], 'SDVCsl'] = df.at[prior[0], 'SDVCsl'] - at_calc_params['atp_YDVCsld']['setting'] * (
df.at[prior[0], 'SDVCsl']-df.at[idx[0], 'DV']) ** at_calc_params['atp_eSDVCsld']['setting']
# 1e) Slow Daily Vol Compare medium "SDVCm D":
if (prior is None):
# first row
pass
elif df.at[idx[0], 'DV'] > df.at[prior[0], 'SDVCm']:
# Case 1: DVD > SDVCm D-1
# SDVCm D = SDVCm D-1 + YDVCm u * ( DVD - SDVCm D-1)eSDVCm u
df.at[idx[0], 'SDVCm'] = df.at[prior[0], 'SDVCm'] + at_calc_params['atp_YDVCmu']['setting'] * (
df.at[idx[0], 'DV'] - df.at[prior[0], 'SDVCm']) ** at_calc_params['atp_eSDVCmu']['setting']
elif df.at[idx[0], 'DV'] <= df.at[prior[0], 'SDVCm']:
# Case 2: DVD < SDVCm D-1
# SDVCm D = SDVCm D-1 - YDVC m d * (SDVCm D-1 - DVD)eSDVCm d
df.at[idx[0], 'SDVCm'] = df.at[prior[0], 'SDVCm'] - at_calc_params['atp_YDVCmd']['setting'] * (
df.at[prior[0], 'SDVCm']-df.at[idx[0], 'DV']) ** at_calc_params['atp_eSDVCmd']['setting']
# 1f) Slow Daily Vol Compare fast1 "SDVCf1 D":
if (prior is None):
# first row
pass
elif df.at[idx[0], 'DV'] > df.at[prior[0], 'SDVCf1']:
# Case 1: DVD > SDVCf1 D-1
# SDVCm D = SDVCf1 D-1 + YDVCf1 u * ( DVD - SDVCf1 D-1)eSDVCf1 u
df.at[idx[0], 'SDVCf1'] = df.at[prior[0], 'SDVCf1'] + at_calc_params['atp_YDVCf1u']['setting'] * (
df.at[idx[0], 'DV'] - df.at[prior[0], 'SDVCf1']) ** at_calc_params['atp_eSDVCf1u']['setting']
elif df.at[idx[0], 'DV'] <= df.at[prior[0], 'SDVCf1']:
# Case 2: DVD < SDVCf1 D-1
# SDVCm D = SDVCf1 D-1 - YDVC f1 d * (SDVCf1 D-1 - DVD)eSDVCf1 d
df.at[idx[0], 'SDVCf1'] = df.at[prior[0], 'SDVCf1'] - at_calc_params['atp_YDVCf1d']['setting'] * (
df.at[prior[0], 'SDVCf1']-df.at[idx[0], 'DV']) ** at_calc_params['atp_eSDVCf1d']['setting']
# 1g) Slow Daily Vol Compare fast1 "SDVCf2 D":
if (prior is None):
# first row
pass
elif df.at[idx[0], 'DV'] > df.at[prior[0], 'SDVCf2']:
# Case 1: DVD > SDVCf2 D-1
# SDVCf2 D = SDVCf2 D-1 + YDVCf2 u * ( DVD - SDVCf2 D-1)eSDVCf2 u
df.at[idx[0], 'SDVCf2'] = df.at[prior[0], 'SDVCf2'] + at_calc_params['atp_YDVCf2u']['setting'] * (
df.at[idx[0], 'DV'] - df.at[prior[0], 'SDVCf2']) ** at_calc_params['atp_eSDVCf2u']['setting']
elif df.at[idx[0], 'DV'] <= df.at[prior[0], 'SDVCf2']:
# Case 2: DVD < SDVCf2 D-1
# SDVCf2 D = SDVCf2 D-1 - YDVC f2 d * (SDVCf2 D-1 - DVD)eSDVCf2 d
df.at[idx[0], 'SDVCf2'] = df.at[prior[0], 'SDVCf2'] - at_calc_params['atp_YDVCf2d']['setting'] * (
df.at[prior[0], 'SDVCf2']-df.at[idx[0], 'DV']) ** at_calc_params['atp_eSDVCf2d']['setting']
# 1h) As in the old ShW, we need figures to show a volumes constellation, the Daily Vols Figure, "DVFxx"
if not prior is None:
# 'DVFDf' ???
# df.at[idx[0], 'DVFDf'] = df.at[idx[0], 'DV'] / df.at[idx[0], 'SDVBf']
# 'DVFf3'
if curday_ordinal >= 1:
_1_back=df.index[curday_ordinal-1]
df.at[idx[0], 'DVFf3'] = df.at[idx[0], 'DV'] / df.at[_1_back, 'SDVBf']
# 'DVFf2'
if curday_ordinal >= 2:
_2_back=df.index[curday_ordinal-2]
df.at[idx[0], 'DVFf2'] = df.at[idx[0], 'SDVCf2'] / df.at[_2_back, 'SDVBf']
# 'DVFf1'
if curday_ordinal >= 3:
_3_back=df.index[curday_ordinal-3]
df.at[idx[0], 'DVFf1'] = df.at[idx[0], 'SDVCf1'] / df.at[_3_back, 'SDVBf']
# 'DVFm'
df.at[idx[0], 'DVFm'] = df.at[idx[0], 'SDVCm'] / df.at[idx[0], 'SDVBm']
# 'DVFsl'
df.at[idx[0], 'DVFsl'] = df.at[idx[0], 'SDVCsl'] / df.at[idx[0], 'SDVBsl']
''' additional calcs performed AFTER day by day operations '''
def wrap_up(self, df, share_num, calc_dates_in_df, top_up, stage):
assert stage == 2, f'{self.name} wrap_up calculation should only run at stage 2'
# assign into Ov SDVBf.D-1, and SDVBf.D-2
try:
ov_helpers.global_ov_update(share_num, 'SDVBf.D-1', df.loc[df.index[-2],'SDVBf'])
ov_helpers.global_ov_update(share_num, 'SDVBf.D-2', df.loc[df.index[-3],'SDVBf'])
except IndexError as exc:
logging.error(f'_2StVols_SlowDailyVols wrap_up exception {exc}')
return | 2.171875 | 2 |
yeast_mrcnn/model.py | Hekstra-Lab/yeast-mrcnn | 0 | 12790694 | __all__ = [
"make_mrcnn",
"mrcnn",
]
import torch
from torchvision.models.detection import MaskRCNN, maskrcnn_resnet50_fpn
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from torchvision.models.detection.rpn import AnchorGenerator
from torchvision.models.detection.transform import GeneralizedRCNNTransform
def make_mrcnn():
model = maskrcnn_resnet50_fpn(
num_classes=2, pretrained_backbone=True, trainable_backbone_layers=5
)
transform = GeneralizedRCNNTransform(
min_size=800, max_size=1333, image_mean=[0], image_std=[1]
)
model.transform = transform
model.backbone.body.conv1 = torch.nn.Conv2d(
1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False
)
return model
def mrcnn():
# Get a resnet50 fpn backbone and change the first layer for grayscale
backbone = resnet_fpn_backbone("resnet50", pretrained=True, trainable_layers=5)
backbone.body.conv1 = torch.nn.Conv2d(
1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False
)
# Make anchor generator with 3 sizes per feature map and 5 aspect ratios
sizes = tuple(2.0 ** x for x in range(5, 12))
aspects = tuple(0.5 * x for x in range(1, 5))
n_feature_maps = 5 # true for resnet50 with FPN
ag_sizes = tuple(tuple(sizes[i : i + 3]) for i in range(n_feature_maps))
ag_aspects = n_feature_maps * (aspects,)
anchor_generator = AnchorGenerator(sizes=ag_sizes, aspect_ratios=ag_aspects)
# Assemble into MaskRCNN
mrcnn = MaskRCNN(
backbone,
2,
image_mean=[0],
image_std=[1],
rpn_anchor_generator=anchor_generator,
)
return mrcnn
| 2.125 | 2 |
get_article.py | Astruj/Regional-English-Wiki | 0 | 12790695 | <filename>get_article.py
import kdap
knol = kdap.knol()
knol.get_wiki_article('Citizenship (Amendment) Act, 2019','/home/akash/Desktop/pro')
a=kdap.analysis.knol()
| 1.5 | 2 |
SQLFileManager.py | whde/Movie | 1 | 12790696 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import threading
mu = threading.Lock()
def create_sql_file():
open('sql.txt', 'w+', encoding='utf-8')
def lock_test(sql):
if mu.acquire(True):
write_to_file(sql)
mu.release()
def write_to_file(sql):
fp = open('sql.txt', 'a+')
print('write start!')
try:
fp.write(sql)
finally:
fp.close()
print('write finish!')
def read_sql_file():
fp = open('sql.txt', 'r+')
return fp.read()
| 3.109375 | 3 |
Beecrowd/Python/2152 Pepe, I Already Took the Candle!.py | nazmul629/OJ-Problem_Solution | 0 | 12790697 | for i in range(int(input())):
H,M,O =map(int,input().split())
if H <10:
H='0'+str(H)
if M <10:
M='0'+str(M)
if O<1:
print(f"{H}:{M} - A porta fechou!")
else:
print(f"{H}:{M} - A porta abriu!")
| 3.21875 | 3 |
pyramid_sms/outgoing.py | timgates42/pyramid_sms | 6 | 12790698 | """Outgoing SMS API."""
import logging
import pkg_resources
from pyramid.renderers import render
from pyramid.settings import asbool
from pyramid_sms.utils import get_sms_backend
try:
pkg_resources.get_distribution('websauna')
from websauna.system.http import Request
from websauna.system.task.tasks import task
from websauna.system.task.tasks import ScheduleOnCommitTask
HAS_WEBSAUNA = False
except pkg_resources.DistributionNotFound:
from pyramid.request import Request
HAS_WEBSAUNA = False
from .interfaces import SMSConfigurationError
from .events import SMSSent
logger = logging.getLogger(__name__)
def _send_sms(request, receiver, text_body, sender, log_failure):
"""Perform actual SMS outbound operation through a configured service."""
service = get_sms_backend(request)
service.send_sms(receiver, text_body, sender, log_failure)
if HAS_WEBSAUNA:
# TODO: Factor this to a separate configurable module
@task(base=ScheduleOnCommitTask, bind=True)
def _send_sms_async(self, receiver, from_, text_body, log_failure):
"""Celery task to send the SMS synchronously outside HTTP request proccesing."""
request = self.request.request
_send_sms(request, receiver, from_, text_body, log_failure)
def send_sms(request: Request, receiver: str, text_body: str, sender: str=None, log_failure: bool=True, _async: bool=None, user_dialog: bool=False):
"""Send outgoing SMS message using the default configured SMS service.
Example:
.. code-block:: python
def test_sms_view(request):
'''Dummy view to simulate outgoing SMS.'''
send_sms(request, "+15551231234", "Test message")
:param receiver: Receiver's phone number as international format. You should normalize this number from all user input before passing in. See :py:mod:`pyramid_sms.utils` for examples.
:param text_body: Outbound SMS body. Usually up to 1600 characters.
:param sender: Envelope from number. Needs to be configured in the service. If none use default configured "sms.default_from".
:param log_failure: If there is an exception from the SMS backend then log this using Python logging system. Otherwise raise the error as an exception.
:param async: Force asynchronous operation through task subsystem. If ``None`` respect ``sms.async`` settings. If the operation is asynchronous, this function returns instantly and does not block HTTP request due to slow API calls to a third party service.
:param user_dialog: This SMS is part of a dialog with a known user. Use this flag to log messages with the user in your conversation dashboard. Set ``False`` to two-factor auth tokens and such.
:raise SMSConfigurationError: If configuration settings are missing
"""
if _async is None:
_async = request.registry.settings.get("sms.async")
if _async is None:
raise SMSConfigurationError("sms.async setting not defined")
_async = asbool(_async)
if sender is None:
sender = request.registry.settings.get("sms.default_sender")
if not sender:
raise SMSConfigurationError("sms.default_sender not configured")
# https://www.twilio.com/help/faq/sms/does-twilio-support-concatenated-sms-messages-or-messages-over-160-characters
if len(text_body) >= 1600:
logger.warn("Too long SMS: %s", text_body)
logger.info("Queuing sending SMS to: %s, body: %s", receiver, text_body)
# Put the actual Twilio operation async queue
if _async:
if not HAS_WEBSAUNA:
raise SMSConfigurationError("Async operations are only supported with Websauna framework")
_send_sms_async.apply_async(args=(receiver, text_body, sender, log_failure,))
else:
_send_sms(request, receiver, text_body, sender, log_failure)
request.registry.notify(SMSSent(request, receiver, text_body, sender, user_dialog))
def send_templated_sms(request: Request, template: str, context: dict, receiver: str, sender: str=None, log_failure: bool=True, _async: bool=None, user_dialog: bool=False):
"""Send out a SMS that is constructed using a page template.
Same as :py:meth:`pyramid_sms.outgoing.send_sms`, but uses templates instead of hardcoded messages.
:param request: HTTP request
:param template: Template name. Like ``welcome_sms.txt.jinja``.
:param context: Dictionary passed to template rendering engine
"""
text_body = render(template, context, request=request)
send_sms(request, receiver, text_body, sender, log_failure, _async, user_dialog)
| 2.5 | 2 |
labelprop/lightning_model.py | nathandecaux/labelprop | 0 | 12790699 | import torch
from torch import nn
import torch.nn.functional as F
import pytorch_lightning as pl
import kornia
from .voxelmorph2d import VxmDense,NCC,Grad,Dice
from monai.losses import BendingEnergyLoss,GlobalMutualInformationLoss,DiceLoss,LocalNormalizedCrossCorrelationLoss
from kornia.filters import sobel, gaussian_blur2d,canny,spatial_gradient
class LabelProp(pl.LightningModule):
@property
def automatic_optimization(self):
return False
def norm(self, x):
if len(x.shape)==4:
x = kornia.enhance.normalize_min_max(x)
elif len(x.shape)==3:
x= kornia.enhance.normalize_min_max(x[:, None, ...])[:,0, ...]
else:
x = kornia.enhance.normalize_min_max(x[None, None, ...])[0, 0, ...]
return x
def __init__(self,n_channels=1,n_classes=2,learning_rate=5e-3,weight_decay=1e-8,way='up',shape=256,selected_slices=None,losses={},by_composition=False):
super().__init__()
self.n_classes = n_classes
self.learning_rate=learning_rate
self.weight_decay=weight_decay
self.selected_slices=selected_slices #Used in validation step
if isinstance(shape,int):shape=[shape,shape]
self.registrator= VxmDense(shape,bidir=False,int_downsize=1,int_steps=7)
self.way=way #If up, learning only "forward" transitions (phi_i->j with j>i). Other choices : "down", "both". Bet you understood ;)
self.by_composition=by_composition
self.loss_model = MTL_loss(['sim','seg','comp','smooth'])
self.losses=losses
if self.by_composition: print('Using composition for training')
print('Losses',losses)
self.save_hyperparameters()
def apply_deform(self,x,field):
"""Apply deformation to x from flow field
Args:
x (Tensor): Image or mask to deform (BxCxHxW)
field (Tensor): Deformation field (Bx2xHxW)
Returns:
Tensor: Transformed image
"""
return self.registrator.transformer(x,field)
def compose_list(self,flows):
flows=list(flows)
compo=flows[-1]
for flow in reversed(flows[:-1]):
compo=self.compose_deformation(flow,compo)
return compo
def compose_deformation(self,flow_i_k,flow_k_j):
""" Returns flow_k_j(flow_i_k(.)) flow
Args:
flow_i_k
flow_k_j
Returns:
[Tensor]: Flow field flow_i_j = flow_k_j(flow_i_k(.))
"""
flow_i_j= flow_k_j+self.apply_deform(flow_i_k,flow_k_j)
return flow_i_j
def forward(self, moving,target,registration=True):
"""
Args:
moving (Tensor): Moving image (BxCxHxW)
target ([type]): Fixed image (BxCxHxW)
registration (bool, optional): If False, also return non-integrated inverse flow field. Else return the integrated one. Defaults to False.
Returns:
moved (Tensor): Moved image
field (Tensor): Deformation field from moving to target
"""
return self.registrator.forward(moving,target,registration=registration)
# def multi_level_training(self,moving,target,level=3):
# """
# Args:
# moving (Tensor): Moving image (BxCxHxW)
# target ([type]): Fixed image (BxCxHxW)
# registration (bool, optional): If False, also return non-integrated inverse flow field. Else return the integrated one. Defaults to False.
# Returns:
# moved (Tensor): Moved image
# field (Tensor): Deformation field from moving to target
# """
# stack_moved=[]
# stack_field=[]
# stack_preint=[]
# resampling=torch.nn.Upsample(size=self.shape,mode='bilinear',align_corners=True)
# for i in range(level):
# downsampling=nn.Upsample(scale_factor=1/(i+1), mode='bilinear',align_corners=True)
# downsampled_moving=downsampling(moving)
# downsampled_target=downsampling(target)
# moved,field,preint_field=self.forward(downsampled_moving,downsampled_target)
# self.compute_loss(moved,target,field=field)
# stack_moved.append(moved)
# stack_field.append(field)
# stack_preint.append(preint_field)
# return torch.stack(stack_moved,0).mean(0),torch.stack(stack_field,0).mean(0),torch.stack(stack_preint,0).mean(0)
def compute_loss(self,moved=None,target=None,moved_mask=None,target_mask=None,field=None):
"""
Args:
moved : Transformed anatomical image
target : Target anatomical image
moved_mask : Transformed mask
target_mask : Target mask
field : Velocity field (=non integrated)
"""
losses={}
if moved!=None:
# max_peak=F.conv2d(target,target).sum()
# loss_ncc=-F.conv2d(moved,target).sum()/max_peak#+NCC().loss(moved,target)
# loss_ncc=NCC().loss(moved,target)
loss_ncc=GlobalMutualInformationLoss()(moved,target)*0.8 #MONAI
# loss_ncc=LocalNormalizedCrossCorrelationLoss(spatial_dims=2, kernel_size=99)(moved,target) #MONAI
# loss_ncc=nn.MSELoss()(moved,target)
losses['sim']=loss_ncc
if moved_mask!=None:
# loss_seg= Dice().loss(moved_mask,target_mask)
loss_seg=DiceLoss(include_background=False)(moved_mask,target_mask)-1
losses['seg']=loss_seg
if field!=None:
# loss_trans=BendingEnergyLoss()(field) #MONAI
loss_trans=Grad().loss(field,field)
losses['smooth']=loss_trans
#Return dict of losses
return losses#{'sim': loss_ncc,'seg':loss_seg,'smooth':loss_trans}
def compute_contour_loss(self,img,moved_mask):
#Compute contour loss
mag,mask_contour=canny(moved_mask[:,1:2])
# edges,mag=canny(img)
return BendingEnergyLoss()(mag)
def weighting_loss(self,losses):
"""
Args:
losses (dict): Dictionary of losses
Returns:
loss (Tensor): Weighted loss
"""
def blend(self,x,y):
#For visualization
x=self.norm(x)
blended=torch.stack([y,x,x])
return blended
def training_step(self, batch, batch_nb):
X,Y=batch # X : Full scan (1x1xLxHxW) | Y : Ground truth (1xCxLxHxW)
y_opt=self.optimizers()
dices_prop=[]
Y_multi_lab=torch.clone(Y)
for lab in list(range(Y_multi_lab.shape[1]))[1:]:
chunks=[]
chunk=[]
#Binarize ground truth according to the label
Y=torch.stack([1-Y_multi_lab[:,lab],Y_multi_lab[:,lab]],dim=1)
#Identifying chunks (i->j)
for i in range(X.shape[2]):
y=Y[:,:,i,...]
if len(torch.unique(torch.argmax(y,1)))>1:
chunk.append(i)
if len(chunk)==2:
chunks.append(chunk)
chunk=[i]
if self.current_epoch==0:
print(lab,chunks)
for chunk in chunks:
y_opt.zero_grad()
#Sequences of flow fields (field_up=forward, field_down=backward)
fields_up=[]
fields_down=[]
loss_up_sim=[]
loss_up_smooth=[]
loss_down_sim=[]
loss_down_smooth=[]
loss=0
losses={'sim':None,'seg':None,'comp':None,'smooth':None}
for i in range(chunk[0],chunk[1]):
#Computing flow fields and loss for each hop from chunk[0] to chunk[1]
x1=X[:,:,i,...]
x2=X[:,:,i+1,...]
if not self.way=='down':
moved_x1,field_up,preint_field=self.forward(x1,x2,registration=False)
cur_loss=self.compute_loss(moved_x1,x2,field=preint_field)
loss_up_sim.append(cur_loss['sim'])
loss_up_smooth.append(cur_loss['smooth'])
# field_down=self.registrator.integrate(-preint_field)
# moved_x2=self.registrator.transformer(x2,field_down)
# loss_up_sim.append(self.compute_loss(moved_x2,x1)['sim'])
fields_up.append(field_up)
# if len(fields_up)>0:
# field_up_2=self.compose_deformation(fields_up[-1],field_up)
# loss_up.append(self.compute_loss(self.apply_deform(X[:,:,i-1],field_up_2),x2))
if not self.way=='up':
moved_x2,field_down,preint_field=self.forward(x2,x1,registration=False)#
fields_down.append(field_down)
moved_x2=self.registrator.transformer(x2,field_down)
cur_loss=self.compute_loss(moved_x2,x1,field=preint_field)
loss_down_sim.append(cur_loss['sim'])
loss_down_smooth.append(cur_loss['smooth'])
# field_up=self.registrator.integrate(-preint_field)
# moved_x1=self.registrator.transformer(x1,field_up)
# loss_down_sim.append(self.compute_loss(moved_x1,x2)['sim'])
# if len(fields_down)>0:
# field_down_2=self.compose_deformation(fields_down[-1],field_down)
# loss_down.append(self.compute_loss(self.apply_deform(X[:,:,i+1],field_down_2),x1))
#Better with mean
if self.way=='up':
loss=torch.stack(loss_up).mean()
elif self.way=='down':
loss=torch.stack(loss_down).mean()
else:
losses['sim']=torch.stack(loss_up_sim).mean()+torch.stack(loss_down_sim).mean()
losses['smooth']=torch.stack(loss_up_smooth).mean()+torch.stack(loss_down_smooth).mean()
# loss=(loss_up+loss_down)
# Computing registration from the sequence of flow fields
if not self.way=='down':
prop_x_up=X[:,:,chunk[0],...]
prop_y_up=Y[:,:,chunk[0],...]
composed_fields_up=self.compose_list(fields_up)
if self.by_composition:
prop_x_up=self.apply_deform(prop_x_up,composed_fields_up)
prop_y_up=self.apply_deform(prop_y_up,composed_fields_up)
else:
for i,field_up in enumerate(fields_up):
prop_x_up=self.apply_deform(prop_x_up,field_up)
prop_y_up=self.apply_deform(prop_y_up,field_up)
losses['contours']=self.compute_contour_loss(X[:,:,chunk[0]+i+1],prop_y_up)
if self.losses['compo-reg-up']:
losses['comp']=self.compute_loss(prop_x_up,X[:,:,chunk[1],...])['sim']
if self.losses['compo-dice-up']:
dice_loss=self.compute_loss(moved_mask=prop_y_up,target_mask=Y[:,:,chunk[1],...])['seg']
losses['seg']=dice_loss
dices_prop.append(dice_loss)
if not self.way=='up':
prop_x_down=X[:,:,chunk[1],...]
prop_y_down=Y[:,:,chunk[1],...]
composed_fields_down=self.compose_list(fields_down[::-1])
if self.by_composition:
prop_x_down=self.apply_deform(prop_x_down,composed_fields_down)
prop_y_down=self.apply_deform(prop_y_down,composed_fields_down)
else:
i=1
for field_down in reversed(fields_down):
prop_x_down=self.apply_deform(prop_x_down,field_down)
prop_y_down=self.apply_deform(prop_y_down,field_down)
losses['contours']+=self.compute_contour_loss(X[:,:,chunk[1]-i],prop_y_down)
i+=1
if self.losses['compo-reg-down']:
losses['comp']+=self.compute_loss(prop_x_down,X[:,:,chunk[0],...])['sim']
if self.losses['compo-dice-down']:
dice_loss=self.compute_loss(moved_mask=prop_y_down,target_mask=Y[:,:,chunk[0],...])['seg']
losses['seg']+=dice_loss
dices_prop.append(dice_loss)
#Additionnal loss to ensure sequences (images and masks) generated from "positive" and "negative" flows are equal
# if self.way=='both':
# #This helps
# if self.losses['bidir-cons-dice']:
# loss+=self.compute_loss(moved_mask=prop_y_down,target_mask=prop_y_up)
# #This breaks stuff
# if self.losses['bidir-cons-reg']:
# loss+=self.compute_loss(prop_x_up,prop_x_down)
# loss+=nn.L1Loss()(self.apply_deform(X[:,:,chunk[0],...], self.compose_deformation(composed_fields_up,composed_fields_down)),X[:,:,chunk[0],...])
# loss+=nn.L1Loss()(self.apply_deform(X[:,:,chunk[1],...], self.compose_deformation(composed_fields_down,composed_fields_up)),X[:,:,chunk[1],...])
loss=losses['seg']+losses['sim']+losses['contours']#+losses['smooth']#torch.stack([v for v in losses.values()]).mean()
# loss=self.loss_model(losses)
self.log_dict({'loss':loss},prog_bar=True)
self.manual_backward(loss)
y_opt.step()
# self.logger.experiment.add_image('x_true',X[0,:,chunk[0],...])
# self.logger.experiment.add_image('prop_x_down',prop_x_down[0,:,0,...])
# self.logger.experiment.add_image('x_true_f',X[0,:,chunk[1],...])
# self.logger.experiment.add_image('prop_x_up',prop_x_up[0,:,-1,...])
if len(dices_prop)>0:
dices_prop=-torch.stack(dices_prop).mean()
self.log('val_accuracy',dices_prop)
print(dices_prop)
else:
self.log('val_accuracy',self.current_epoch)
return loss
def register_images(self,moving,target,moving_mask):
moved,field=self.forward(moving,target,registration=True)
return moved,self.apply_deform(moving_mask,field),field
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay,amsgrad=True)
def hardmax(self,Y,dim):
return torch.moveaxis(F.one_hot(torch.argmax(Y,dim),self.n_classes), -1, dim)
class MTL_loss(torch.nn.Module):
def __init__(self, losses):
super().__init__()
start=1.
self.lw={}
self.sigmas = nn.ParameterDict()
for k in losses:
self.lw[k]= start
self.set_dict(self.lw)
def set_dict(self, dic):
self.lw = dic
for k in dic.keys():
if dic[k] > 0:
self.sigmas[k] = nn.Parameter(torch.ones(1) * dic[k])
def forward(self, loss_dict):
loss = 0
with torch.set_grad_enabled(True):
for k in loss_dict.keys():
if k in self.lw.keys():
loss +=0.5 * loss_dict[k] / (self.sigmas[k])**2 + torch.log(self.sigmas[k])
return loss | 2.140625 | 2 |
MapHeightEval.py | hal-lab-u-tokyo/GenMap | 0 | 12790700 | # This file is part of GenMap and released under the MIT License, see LICENSE.
# Author: <NAME>
from EvalBase import EvalBase
import networkx as nx
import os
import signal
import math
main_pid = os.getpid()
class MapHeightEval(EvalBase):
def __init__(self):
pass
@staticmethod
def eval(CGRA, app, sim_params, individual, **info):
"""Return mapping height.
Args:
CGRA (PEArrayModel): A model of the CGRA
app (Application): An application to be optimized
sim_params (SimParameters): parameters for some simulations
individual (Individual): An individual to be evaluated
Returns:
int: mapping height
"""
y_coords = []
SEs = [v for v in individual.routed_graph.nodes() if CGRA.isSE(v)]
ALUs = [v for v in individual.routed_graph.nodes() if CGRA.isALU(v)]
width, height = CGRA.getSize()
for node in SEs + ALUs:
for x in range(width):
for y in range(height):
rsc = CGRA.get_PE_resources((x, y))
if node in [v for se_set in rsc["SE"].values() for v in se_set ] or \
node == rsc["ALU"]:
y_coords.append(y)
break
map_height = max(y_coords) + 1
if "quit_minheight" in info.keys():
if info["quit_minheight"] is True:
input_count = len(set(nx.get_node_attributes(\
app.getInputSubGraph(), "input").keys()))
output_count = len(set(nx.get_node_attributes(\
app.getOutputSubGraph(), "output").keys()))
minh_op = math.ceil(len(app.getCompSubGraph().nodes()) \
/ width)
if CGRA.isIOShared():
min_maph = max(math.ceil((input_count + output_count) / 2),\
minh_op)
else:
min_maph = max(input_count, output_count, minh_op)
if min_maph == map_height and individual.isValid():
os.kill(main_pid, signal.SIGUSR1)
return map_height
@staticmethod
def isMinimize():
return True
@staticmethod
def name():
return "Mapping_Height" | 2.171875 | 2 |
sdk/python/pulumi_gcp/iot/outputs.py | dimpu47/pulumi-gcp | 0 | 12790701 | <reponame>dimpu47/pulumi-gcp
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'DeviceConfig',
'DeviceCredential',
'DeviceCredentialPublicKey',
'DeviceGatewayConfig',
'DeviceLastErrorStatus',
'DeviceState',
'RegistryCredential',
'RegistryCredentialPublicKeyCertificate',
'RegistryEventNotificationConfigItem',
'RegistryHttpConfig',
'RegistryMqttConfig',
'RegistryStateNotificationConfig',
]
@pulumi.output_type
class DeviceConfig(dict):
def __init__(__self__, *,
binary_data: Optional[str] = None,
cloud_update_time: Optional[str] = None,
device_ack_time: Optional[str] = None,
version: Optional[str] = None):
if binary_data is not None:
pulumi.set(__self__, "binary_data", binary_data)
if cloud_update_time is not None:
pulumi.set(__self__, "cloud_update_time", cloud_update_time)
if device_ack_time is not None:
pulumi.set(__self__, "device_ack_time", device_ack_time)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="binaryData")
def binary_data(self) -> Optional[str]:
return pulumi.get(self, "binary_data")
@property
@pulumi.getter(name="cloudUpdateTime")
def cloud_update_time(self) -> Optional[str]:
return pulumi.get(self, "cloud_update_time")
@property
@pulumi.getter(name="deviceAckTime")
def device_ack_time(self) -> Optional[str]:
return pulumi.get(self, "device_ack_time")
@property
@pulumi.getter
def version(self) -> Optional[str]:
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DeviceCredential(dict):
def __init__(__self__, *,
public_key: 'outputs.DeviceCredentialPublicKey',
expiration_time: Optional[str] = None):
"""
:param 'DeviceCredentialPublicKeyArgs' public_key: A public key used to verify the signature of JSON Web Tokens (JWTs).
Structure is documented below.
:param str expiration_time: The time at which this credential becomes invalid.
"""
pulumi.set(__self__, "public_key", public_key)
if expiration_time is not None:
pulumi.set(__self__, "expiration_time", expiration_time)
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> 'outputs.DeviceCredentialPublicKey':
"""
A public key used to verify the signature of JSON Web Tokens (JWTs).
Structure is documented below.
"""
return pulumi.get(self, "public_key")
@property
@pulumi.getter(name="expirationTime")
def expiration_time(self) -> Optional[str]:
"""
The time at which this credential becomes invalid.
"""
return pulumi.get(self, "expiration_time")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DeviceCredentialPublicKey(dict):
def __init__(__self__, *,
format: str,
key: str):
"""
:param str format: The format of the key.
Possible values are `RSA_PEM`, `RSA_X509_PEM`, `ES256_PEM`, and `ES256_X509_PEM`.
:param str key: The key data.
"""
pulumi.set(__self__, "format", format)
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def format(self) -> str:
"""
The format of the key.
Possible values are `RSA_PEM`, `RSA_X509_PEM`, `ES256_PEM`, and `ES256_X509_PEM`.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def key(self) -> str:
"""
The key data.
"""
return pulumi.get(self, "key")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DeviceGatewayConfig(dict):
def __init__(__self__, *,
gateway_auth_method: Optional[str] = None,
gateway_type: Optional[str] = None,
last_accessed_gateway_id: Optional[str] = None,
last_accessed_gateway_time: Optional[str] = None):
"""
:param str gateway_auth_method: Indicates whether the device is a gateway.
Possible values are `ASSOCIATION_ONLY`, `DEVICE_AUTH_TOKEN_ONLY`, and `ASSOCIATION_AND_DEVICE_AUTH_TOKEN`.
:param str gateway_type: Indicates whether the device is a gateway.
Default value is `NON_GATEWAY`.
Possible values are `GATEWAY` and `NON_GATEWAY`.
:param str last_accessed_gateway_id: -
The ID of the gateway the device accessed most recently.
:param str last_accessed_gateway_time: -
The most recent time at which the device accessed the gateway specified in last_accessed_gateway.
"""
if gateway_auth_method is not None:
pulumi.set(__self__, "gateway_auth_method", gateway_auth_method)
if gateway_type is not None:
pulumi.set(__self__, "gateway_type", gateway_type)
if last_accessed_gateway_id is not None:
pulumi.set(__self__, "last_accessed_gateway_id", last_accessed_gateway_id)
if last_accessed_gateway_time is not None:
pulumi.set(__self__, "last_accessed_gateway_time", last_accessed_gateway_time)
@property
@pulumi.getter(name="gatewayAuthMethod")
def gateway_auth_method(self) -> Optional[str]:
"""
Indicates whether the device is a gateway.
Possible values are `ASSOCIATION_ONLY`, `DEVICE_AUTH_TOKEN_ONLY`, and `ASSOCIATION_AND_DEVICE_AUTH_TOKEN`.
"""
return pulumi.get(self, "gateway_auth_method")
@property
@pulumi.getter(name="gatewayType")
def gateway_type(self) -> Optional[str]:
"""
Indicates whether the device is a gateway.
Default value is `NON_GATEWAY`.
Possible values are `GATEWAY` and `NON_GATEWAY`.
"""
return pulumi.get(self, "gateway_type")
@property
@pulumi.getter(name="lastAccessedGatewayId")
def last_accessed_gateway_id(self) -> Optional[str]:
"""
-
The ID of the gateway the device accessed most recently.
"""
return pulumi.get(self, "last_accessed_gateway_id")
@property
@pulumi.getter(name="lastAccessedGatewayTime")
def last_accessed_gateway_time(self) -> Optional[str]:
"""
-
The most recent time at which the device accessed the gateway specified in last_accessed_gateway.
"""
return pulumi.get(self, "last_accessed_gateway_time")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DeviceLastErrorStatus(dict):
def __init__(__self__, *,
details: Optional[List[Mapping[str, Any]]] = None,
message: Optional[str] = None,
number: Optional[float] = None):
if details is not None:
pulumi.set(__self__, "details", details)
if message is not None:
pulumi.set(__self__, "message", message)
if number is not None:
pulumi.set(__self__, "number", number)
@property
@pulumi.getter
def details(self) -> Optional[List[Mapping[str, Any]]]:
return pulumi.get(self, "details")
@property
@pulumi.getter
def message(self) -> Optional[str]:
return pulumi.get(self, "message")
@property
@pulumi.getter
def number(self) -> Optional[float]:
return pulumi.get(self, "number")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DeviceState(dict):
def __init__(__self__, *,
binary_data: Optional[str] = None,
update_time: Optional[str] = None):
if binary_data is not None:
pulumi.set(__self__, "binary_data", binary_data)
if update_time is not None:
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter(name="binaryData")
def binary_data(self) -> Optional[str]:
return pulumi.get(self, "binary_data")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> Optional[str]:
return pulumi.get(self, "update_time")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RegistryCredential(dict):
def __init__(__self__, *,
public_key_certificate: 'outputs.RegistryCredentialPublicKeyCertificate'):
"""
:param 'RegistryCredentialPublicKeyCertificateArgs' public_key_certificate: A public key certificate format and data.
"""
pulumi.set(__self__, "public_key_certificate", public_key_certificate)
@property
@pulumi.getter(name="publicKeyCertificate")
def public_key_certificate(self) -> 'outputs.RegistryCredentialPublicKeyCertificate':
"""
A public key certificate format and data.
"""
return pulumi.get(self, "public_key_certificate")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RegistryCredentialPublicKeyCertificate(dict):
def __init__(__self__, *,
certificate: str,
format: str):
"""
:param str certificate: The certificate data.
:param str format: The field allows only `X509_CERTIFICATE_PEM`.
"""
pulumi.set(__self__, "certificate", certificate)
pulumi.set(__self__, "format", format)
@property
@pulumi.getter
def certificate(self) -> str:
"""
The certificate data.
"""
return pulumi.get(self, "certificate")
@property
@pulumi.getter
def format(self) -> str:
"""
The field allows only `X509_CERTIFICATE_PEM`.
"""
return pulumi.get(self, "format")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RegistryEventNotificationConfigItem(dict):
def __init__(__self__, *,
pubsub_topic_name: str,
subfolder_matches: Optional[str] = None):
"""
:param str pubsub_topic_name: PubSub topic name to publish device events.
:param str subfolder_matches: If the subfolder name matches this string exactly, this
configuration will be used. The string must not include the
leading '/' character. If empty, all strings are matched. Empty
value can only be used for the last `event_notification_configs`
item.
"""
pulumi.set(__self__, "pubsub_topic_name", pubsub_topic_name)
if subfolder_matches is not None:
pulumi.set(__self__, "subfolder_matches", subfolder_matches)
@property
@pulumi.getter(name="pubsubTopicName")
def pubsub_topic_name(self) -> str:
"""
PubSub topic name to publish device events.
"""
return pulumi.get(self, "pubsub_topic_name")
@property
@pulumi.getter(name="subfolderMatches")
def subfolder_matches(self) -> Optional[str]:
"""
If the subfolder name matches this string exactly, this
configuration will be used. The string must not include the
leading '/' character. If empty, all strings are matched. Empty
value can only be used for the last `event_notification_configs`
item.
"""
return pulumi.get(self, "subfolder_matches")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RegistryHttpConfig(dict):
def __init__(__self__, *,
http_enabled_state: str):
"""
:param str http_enabled_state: The field allows `HTTP_ENABLED` or `HTTP_DISABLED`.
"""
pulumi.set(__self__, "http_enabled_state", http_enabled_state)
@property
@pulumi.getter(name="httpEnabledState")
def http_enabled_state(self) -> str:
"""
The field allows `HTTP_ENABLED` or `HTTP_DISABLED`.
"""
return pulumi.get(self, "http_enabled_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RegistryMqttConfig(dict):
def __init__(__self__, *,
mqtt_enabled_state: str):
"""
:param str mqtt_enabled_state: The field allows `MQTT_ENABLED` or `MQTT_DISABLED`.
"""
pulumi.set(__self__, "mqtt_enabled_state", mqtt_enabled_state)
@property
@pulumi.getter(name="mqttEnabledState")
def mqtt_enabled_state(self) -> str:
"""
The field allows `MQTT_ENABLED` or `MQTT_DISABLED`.
"""
return pulumi.get(self, "mqtt_enabled_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RegistryStateNotificationConfig(dict):
def __init__(__self__, *,
pubsub_topic_name: str):
"""
:param str pubsub_topic_name: PubSub topic name to publish device events.
"""
pulumi.set(__self__, "pubsub_topic_name", pubsub_topic_name)
@property
@pulumi.getter(name="pubsubTopicName")
def pubsub_topic_name(self) -> str:
"""
PubSub topic name to publish device events.
"""
return pulumi.get(self, "pubsub_topic_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 1.585938 | 2 |
LeetCode/Weekly Contests/Biweekly Contest 36/Find Servers That Handled Most Number of Requests.py | UtkarshPathrabe/Competitive-Coding | 13 | 12790702 | <filename>LeetCode/Weekly Contests/Biweekly Contest 36/Find Servers That Handled Most Number of Requests.py
from sortedcontainers import SortedList
class Solution:
def busiestServers(self, k: int, arrival: List[int], load: List[int]) -> List[int]:
requestCounter, priorityQueue, serverList, maxRequestHandled = Counter(), [], SortedList(range(k)), 0
for requestNumber, (requestArrivalTime, requestLoad) in enumerate(zip(arrival, load)):
while priorityQueue and priorityQueue[0][0] <= requestArrivalTime: # Free up the server if request is completed
serverList.add(heappop(priorityQueue)[1])
if not serverList: # Drop the request as no server is busy
continue
freeServerIndex = serverList.bisect_left(requestNumber % k)
if freeServerIndex >= len(serverList):
freeServerIndex = 0
serverName = serverList.pop(freeServerIndex)
heappush(priorityQueue, (requestArrivalTime + requestLoad, serverName))
requestCounter[serverName] += 1
maxRequestHandled = max(maxRequestHandled, requestCounter[serverName])
return [serverName for serverName, count in requestCounter.items() if count == maxRequestHandled] | 3.421875 | 3 |
benchmarks/vision/cifar_10.py | tongh18/df-dn-paper | 0 | 12790703 | """
Coauthors: <NAME>
<NAME>
"""
from toolbox import *
import argparse
import random
from sklearn.ensemble import RandomForestClassifier
import torchvision.models as models
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from sklearn.model_selection import ParameterSampler
from scipy.stats.distributions import expon
import json
def run_naive_rf():
naive_rf_kappa = []
naive_rf_ece = []
naive_rf_train_time = []
naive_rf_test_time = []
for classes in classes_space:
# cohen_kappa vs num training samples (naive_rf)
for samples in samples_space:
RF = RandomForestClassifier(n_estimators=100, n_jobs=-1)
cohen_kappa, ece, train_time, test_time = run_rf_image_set(
RF,
cifar_train_images,
cifar_train_labels,
cifar_test_images,
cifar_test_labels,
samples,
classes,
)
naive_rf_kappa.append(cohen_kappa)
naive_rf_ece.append(ece)
naive_rf_train_time.append(train_time)
naive_rf_test_time.append(test_time)
print("naive_rf finished")
write_result(prefix + "naive_rf_kappa.txt", naive_rf_kappa)
write_result(prefix + "naive_rf_ece.txt", naive_rf_ece)
write_result(prefix + "naive_rf_train_time.txt", naive_rf_train_time)
write_result(prefix + "naive_rf_test_time.txt", naive_rf_test_time)
def run_cnn32():
cnn32_kappa = []
cnn32_ece = []
cnn32_train_time = []
cnn32_test_time = []
rng = np.random.RandomState(0)
param_grid = {'lr':[0.0001,0.001,0.0125,0.025],
'mo': [0.01,0.05,0.1,0.2,],
'bs': [32,64,128,256],
'wd': [0.00005,0.0001,0.0005,0.001,0.005]
}
param_list = list(ParameterSampler(param_grid, n_iter=20,
random_state=rng))
rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items()) for d in param_list]
outputlist=[]
total_train_time=0
for samples in samples_space:
totalaccuracy=[]
# cohen_kappa vs num training samples (cnn32)
for i in range(len(rounded_list)):
average_accuracy=0
for classes in classes_space:
# train data
cifar_trainset = datasets.CIFAR10(
root="./", train=True, download=True, transform=data_transforms
)
cifar_train_labels = np.array(cifar_trainset.targets)
# test data
cifar_testset = datasets.CIFAR10(
root="./", train=False, download=True, transform=data_transforms
)
cifar_test_labels = np.array(cifar_testset.targets)
cnn32 = SimpleCNN32Filter(len(classes))
total_train_time=0
maxaccuracy=0
param=rounded_list[i]
lr=param['lr']
momentum=param['mo']
wd=param['wd']
batch=param['bs']
train_loader, valid_loader, test_loader = create_loaders_es(
cifar_train_labels,
cifar_test_labels,
classes,
cifar_trainset,
cifar_testset,
samples,
batch,
)
cohen_kappa, ece, train_time, test_time,accuracy = test_dn_image_es_multiple(
cnn32,
train_loader,
valid_loader,
valid_loader,
lr,
momentum,
wd,
)
total_train_time+=train_time
average_accuracy+=accuracy
average_accuracy=average_accuracy/len(classes_space)
totalaccuracy.append(average_accuracy)
yy=np.asarray(totalaccuracy)
z=np.argmax(yy)
classifier='CNN32'
num_classes=int(n_classes)
sample_size=int(samples)
outputdic=rounded_list[z].copy()
outputdic['classifier']=classifier
outputdic['number of classes']=num_classes
outputdic['sample size']=sample_size
outputlist.append(outputdic)
outputdic={}
with open("parameters.json", "w") as outfile:
for j in range(len(outputlist)):
json.dump(outputlist[j], outfile)
outfile.write("\n")
print("cnn32 finished")
write_result(prefix + "cnn32_kappa.txt", cnn32_kappa)
write_result(prefix + "cnn32_ece.txt", cnn32_ece)
write_result(prefix + "cnn32_train_time.txt", cnn32_train_time)
write_result(prefix + "cnn32_test_time.txt", cnn32_test_time)
def run_cnn32_2l():
cnn32_2l_kappa = []
cnn32_2l_ece = []
cnn32_2l_train_time = []
cnn32_2l_test_time = []
for classes in classes_space:
# cohen_kappa vs num training samples (cnn32_2l)
for samples in samples_space:
# train data
cifar_trainset = datasets.CIFAR10(
root="./", train=True, download=True, transform=data_transforms
)
cifar_train_labels = np.array(cifar_trainset.targets)
# test data
cifar_testset = datasets.CIFAR10(
root="./", train=False, download=True, transform=data_transforms
)
cifar_test_labels = np.array(cifar_testset.targets)
cnn32_2l = SimpleCNN32Filter2Layers(len(classes))
train_loader, valid_loader, test_loader = create_loaders_es(
cifar_train_labels,
cifar_test_labels,
classes,
cifar_trainset,
cifar_testset,
samples,
)
cohen_kappa, ece, train_time, test_time = run_dn_image_es(
cnn32_2l,
train_loader,
valid_loader,
test_loader,
)
cnn32_2l_kappa.append(cohen_kappa)
cnn32_2l_ece.append(ece)
cnn32_2l_train_time.append(train_time)
cnn32_2l_test_time.append(test_time)
print("cnn32_2l finished")
write_result(prefix + "cnn32_2l_kappa.txt", cnn32_2l_kappa)
write_result(prefix + "cnn32_2l_ece.txt", cnn32_2l_ece)
write_result(prefix + "cnn32_2l_train_time.txt", cnn32_2l_train_time)
write_result(prefix + "cnn32_2l_test_time.txt", cnn32_2l_test_time)
def run_cnn32_5l():
cnn32_5l_kappa = []
cnn32_5l_ece = []
cnn32_5l_train_time = []
cnn32_5l_test_time = []
for classes in classes_space:
# cohen_kappa vs num training samples (cnn32_5l)
for samples in samples_space:
# train data
cifar_trainset = datasets.CIFAR10(
root="./", train=True, download=True, transform=data_transforms
)
cifar_train_labels = np.array(cifar_trainset.targets)
# test data
cifar_testset = datasets.CIFAR10(
root="./", train=False, download=True, transform=data_transforms
)
cifar_test_labels = np.array(cifar_testset.targets)
cnn32_5l = SimpleCNN32Filter5Layers(len(classes))
train_loader, valid_loader, test_loader = create_loaders_es(
cifar_train_labels,
cifar_test_labels,
classes,
cifar_trainset,
cifar_testset,
samples,
)
cohen_kappa, ece, train_time, test_time = run_dn_image_es(
cnn32_5l,
train_loader,
valid_loader,
test_loader,
)
cnn32_5l_kappa.append(cohen_kappa)
cnn32_5l_ece.append(ece)
cnn32_5l_train_time.append(train_time)
cnn32_5l_test_time.append(test_time)
print("cnn32_5l finished")
write_result(prefix + "cnn32_5l_kappa.txt", cnn32_5l_kappa)
write_result(prefix + "cnn32_5l_ece.txt", cnn32_5l_ece)
write_result(prefix + "cnn32_5l_train_time.txt", cnn32_5l_train_time)
write_result(prefix + "cnn32_5l_test_time.txt", cnn32_5l_test_time)
def run_resnet18():
resnet18_kappa = []
resnet18_ece = []
resnet18_train_time = []
resnet18_test_time = []
for classes in classes_space:
# cohen_kappa vs num training samples (resnet18)
for samples in samples_space:
# train data
cifar_trainset = datasets.CIFAR10(
root="./", train=True, download=True, transform=data_transforms
)
cifar_train_labels = np.array(cifar_trainset.targets)
# test data
cifar_testset = datasets.CIFAR10(
root="./", train=False, download=True, transform=data_transforms
)
cifar_test_labels = np.array(cifar_testset.targets)
res = models.resnet18(pretrained=True)
num_ftrs = res.fc.in_features
res.fc = nn.Linear(num_ftrs, len(classes))
train_loader, valid_loader, test_loader = create_loaders_es(
cifar_train_labels,
cifar_test_labels,
classes,
cifar_trainset,
cifar_testset,
samples,
)
cohen_kappa, ece, train_time, test_time = run_dn_image_es(
res,
train_loader,
valid_loader,
test_loader,
)
resnet18_kappa.append(cohen_kappa)
resnet18_ece.append(ece)
resnet18_train_time.append(train_time)
resnet18_test_time.append(test_time)
print("resnet18 finished")
write_result(prefix + "resnet18_kappa.txt", resnet18_kappa)
write_result(prefix + "resnet18_ece.txt", resnet18_ece)
write_result(prefix + "resnet18_train_time.txt", resnet18_train_time)
write_result(prefix + "resnet18_test_time.txt", resnet18_test_time)
if __name__ == "__main__":
torch.multiprocessing.freeze_support()
# Example usage: python cifar_10.py -m 3
parser = argparse.ArgumentParser()
parser.add_argument("-m", help="class number")
args = parser.parse_args()
n_classes = int(args.m)
prefix = args.m + "_class/"
samples_space = np.geomspace(10, 10000, num=8, dtype=int)
nums = list(range(10))
random.shuffle(nums)
classes_space = list(combinations_45(nums, n_classes))
# normalize
scale = np.mean(np.arange(0, 256))
normalize = lambda x: (x - scale) / scale
# train data
cifar_trainset = datasets.CIFAR10(
root="./", train=True, download=True, transform=None
)
cifar_train_images = normalize(cifar_trainset.data)
cifar_train_labels = np.array(cifar_trainset.targets)
# test data
cifar_testset = datasets.CIFAR10(
root="./", train=False, download=True, transform=None
)
cifar_test_images = normalize(cifar_testset.data)
cifar_test_labels = np.array(cifar_testset.targets)
cifar_train_images = cifar_train_images.reshape(-1, 32 * 32 * 3)
cifar_test_images = cifar_test_images.reshape(-1, 32 * 32 * 3)
#run_naive_rf()
data_transforms = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
run_cnn32()
run_cnn32_2l()
run_cnn32_5l()
data_transforms = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
run_resnet18()
| 2.25 | 2 |
COVID-Tracker.py | Lal-Jr/COVID-Tracker | 0 | 12790704 | <reponame>Lal-Jr/COVID-Tracker<filename>COVID-Tracker.py
from covid import Covid
import matplotlib.pyplot as pyplot
country = input("Enter Country Name : ")
covid = Covid()
data = covid.get_status_by_country_name(country)
cadr = {
key: data[key]
for key in data.keys() & {"confirmed","active","deaths","recovered"}
}
n = list(cadr.keys())
v = list(cadr.values())
print(cadr)
pyplot.title(country)
pyplot.bar(range(len(cadr)), v, tick_label=n)
pyplot.show()
| 3.59375 | 4 |
tests/cache/test_cache.py | limber-project/limberframework | 3 | 12790705 | <filename>tests/cache/test_cache.py
from datetime import datetime
from unittest.mock import AsyncMock, Mock
from pytest import fixture, mark, raises
from limberframework.cache.cache import Cache
from limberframework.cache.exceptions import CacheLockError
@fixture
def mock_store():
return AsyncMock()
@mark.asyncio
async def test_load(mock_store):
data = {"data": "test", "expires_at": datetime.now()}
mock_store.get.return_value = data
cache = Cache(mock_store)
await cache.load("test_cache")
assert cache._key == "test_cache"
assert cache.value == data["data"]
assert cache.expires_at == data["expires_at"]
@mark.parametrize(
"value,expires_at,updated",
[
("test", datetime.now(), True),
(None, None, False),
(None, datetime.now(), False),
("test", None, False),
],
)
@mark.asyncio
async def test_update(value, expires_at, updated, mock_store):
mock_store.put.return_value = True
key = "test_key"
cache = Cache(mock_store)
cache._key = key
cache.value = value
cache.expires_at = expires_at
response = await cache.update()
assert response == updated
@mark.asyncio
async def test_lock():
key = "test"
mock_locker = Mock()
mock_locker.lock = AsyncMock()
cache = Cache(Mock(), mock_locker)
cache._key = key
await cache.lock()
mock_locker.lock.assert_called_once_with(key)
@mark.asyncio
async def test_lock_key_not_set():
cache = Cache(Mock(), Mock())
with raises(CacheLockError, match="Cannot set lock for key None."):
await cache.lock()
@mark.asyncio
async def test_lock_locker_not_set():
cache = Cache(Mock())
with raises(CacheLockError, match="Cannot set lock with locker None."):
await cache.lock()
@mark.asyncio
async def test_unlock():
key = "test"
mock_locker = Mock()
mock_locker.unlock = AsyncMock()
cache = Cache(Mock(), mock_locker)
cache._key = key
await cache.unlock()
mock_locker.unlock.assert_called_once_with(key)
@mark.asyncio
async def test_unlock_key_not_set():
mock_locker = Mock()
mock_locker.unlock = AsyncMock()
cache = Cache(Mock(), mock_locker)
with raises(CacheLockError, match="Cannot unset lock for key None."):
await cache.unlock()
@mark.asyncio
async def test_unlock_locker_not_set():
cache = Cache(Mock())
with raises(CacheLockError, match="Cannot unset lock with locker None."):
await cache.unlock()
@mark.asyncio
async def test_secure():
mock_lock = AsyncMock()
mock_unlock = AsyncMock()
cache = Cache(Mock())
cache.lock = mock_lock
cache.unlock = mock_unlock
async with cache.secure():
pass
mock_lock.assert_called_once()
mock_unlock.assert_called_once()
| 2.40625 | 2 |
packages/watchmen-auth/src/watchmen_auth/__init__.py | Indexical-Metrics-Measure-Advisory/watchmen | 0 | 12790706 | from .auth_helper import authorize, authorize_token
from .authentication import AuthenticationDetails, AuthenticationManager, AuthenticationProvider, AuthenticationScheme
from .authorization import AuthFailOn401, AuthFailOn403, Authorization
from .principal_service import PrincipalService
| 1.070313 | 1 |
vjemmie/db/__init__.py | PederHA/vjemmie | 1 | 12790707 | from typing import Dict
from pathlib import Path
from discord.ext import commands
from ..config import MAIN_DB
from .db import DatabaseConnection
# Maybe this is a little clumsy?
_CONNECTIONS: Dict[str, DatabaseConnection] = {}
def add_db(path: str, bot: commands.Bot) -> DatabaseConnection:
if path not in _CONNECTIONS:
_CONNECTIONS[path] = DatabaseConnection(path, bot)
return _CONNECTIONS[path]
def get_db(path: str=MAIN_DB) -> DatabaseConnection:
return _CONNECTIONS[MAIN_DB]
def init_db(path: str, bot: commands.Bot):
p = Path(MAIN_DB)
# Create db if it doesn't exist
if not p.exists():
# NOTE: assumes the database file resides in a subdirectory
# within the project root
#
# TODO: Actually make this not completely explode if the db file resides in
# the root directory.
p.parent.mkdir(parents=True, exist_ok=True)
p.touch()
# Connect to DB
db = add_db(path, bot)
# Add tables (if not already exists)
with open("db/vjemmie.db.sql", "r") as f:
script = f.read()
db.cursor.executescript(script)
| 3.046875 | 3 |
get_stats.py | kdrag0n/gci-stats | 1 | 12790708 | #!/usr/bin/env python3
import sys
import graphyte
import requests
def get_orgs():
with requests.get("https://codein.withgoogle.com/api/program/current/organization/") as resp:
if resp.status_code != 200:
print(f"Received status code {resp.status_code}: {resp.text}")
exit(1)
return resp.json()["results"]
def report_graphite(orgs):
for org in orgs:
name_base = f"gci.{org['program_year']}.{org['slug']}"
count = org["completed_task_instance_count"]
graphyte.send(f"{name_base}.tasks_completed", count)
def report_console(orgs):
counts = ((org["name"], org["completed_task_instance_count"]) for org in orgs)
# Sort and print by descending order of tasks completed
counts = sorted(counts, key=lambda x: x[1], reverse=True)
for org, count in counts:
print(f"{org}: {count}")
def main():
orgs = get_orgs()
report_console(orgs)
if len(sys.argv) > 1:
graphyte.init(sys.argv[1])
report_graphite(orgs)
if __name__ == '__main__':
main()
| 2.765625 | 3 |
aiomisc/iterator_wrapper.py | vasiliykovalev/aiomisc | 0 | 12790709 | <gh_stars>0
import asyncio
import inspect
import threading
import typing as t
from collections import deque
from concurrent.futures import Executor
from typing import Awaitable
from weakref import finalize
from aiomisc.counters import Statistic
T = t.TypeVar("T")
R = t.TypeVar("R")
GenType = t.Generator[T, R, None]
FuncType = t.Callable[[], GenType]
class IteratorWrapperStatistic(Statistic):
started: int
queue_size: int
queue_length: int
yielded: int
enqueued: int
class IteratorWrapper(t.AsyncIterator):
__slots__ = (
"__close_event", "__closed", "__gen_func", "__gen_task", "__queue",
"__queue_maxsize", "__read_event", "__write_event", "executor", "loop",
"_statistic",
)
def __init__(
self, gen_func: FuncType, loop: asyncio.AbstractEventLoop = None,
max_size: int = 0, executor: Executor = None,
statistic_name: t.Optional[str] = None,
):
current_loop = loop or asyncio.get_event_loop()
self.loop: asyncio.AbstractEventLoop = current_loop
self.executor = executor
self.__closed = threading.Event()
self.__close_event = asyncio.Event()
self.__queue: t.Deque[t.Any] = deque()
self.__queue_maxsize = max_size
self.__gen_task: t.Optional[asyncio.Task] = None
self.__gen_func: t.Callable = gen_func
self.__write_event = threading.Event()
self.__read_event = asyncio.Event()
self._statistic = IteratorWrapperStatistic(statistic_name)
self._statistic.queue_size = max_size
@property
def closed(self) -> bool:
return self.__closed.is_set()
@staticmethod
def __throw(_: t.Any) -> t.NoReturn:
pass
def _set_read_event(self) -> None:
def setter() -> None:
if self.__read_event.is_set():
return
self.__read_event.set()
self.loop.call_soon_threadsafe(setter)
def _in_thread(self) -> None:
self._statistic.started += 1
try:
gen = iter(self.__gen_func())
throw = self.__throw
if inspect.isgenerator(gen):
throw = gen.throw # type: ignore
while not self.closed:
item = next(gen)
while len(self.__queue) > self.__queue_maxsize:
self.__write_event.wait(0.1)
if self.closed:
throw(asyncio.CancelledError())
return
self.__queue.append((item, False))
del item
self._statistic.enqueued += 1
self._set_read_event()
if self.__write_event.is_set():
self.__write_event.clear()
except StopIteration as e:
if self.closed:
return
self.__queue.append((e, None))
self._set_read_event()
except Exception as e:
if self.closed:
return
self.__queue.append((e, True))
self.loop.call_soon_threadsafe(self.__read_event.set)
finally:
self._statistic.started -= 1
self._set_read_event()
self.loop.call_soon_threadsafe(self.__close_event.set)
async def _run(self) -> t.Any:
return await self.loop.run_in_executor(self.executor, self._in_thread)
async def close(self) -> None:
self.__closed.set()
self.__queue.clear()
if self.__gen_task is None:
return
if not self.__gen_task.done():
self.__gen_task.cancel()
await self.__close_event.wait()
await asyncio.gather(
self.__gen_task, loop=self.loop, return_exceptions=True,
)
def __aiter__(self) -> t.AsyncIterator[t.Any]:
if self.__gen_task is not None:
return self
self.__gen_task = self.loop.create_task(self._run())
return IteratorProxy(self, self.__finalizer)
def __finalizer(self) -> None:
self.__closed.set()
self.loop.create_task(self.close())
async def __anext__(self) -> t.Awaitable[T]:
while len(self.__queue) == 0:
await self.__read_event.wait()
item, is_exc = self.__queue.popleft()
self.__write_event.set()
if len(self.__queue) == 0:
self.__read_event.clear()
if is_exc is None:
await self.close()
raise StopAsyncIteration(*item.args) from item
elif is_exc:
await self.close()
raise item from item
self._statistic.yielded += 1
return item
async def __aenter__(self) -> "IteratorWrapper":
return self
async def __aexit__(
self, exc_type: t.Any, exc_val: t.Any,
exc_tb: t.Any,
) -> None:
if self.closed:
return
await self.close()
class IteratorProxy(t.AsyncIterator):
def __init__(
self, iterator: t.AsyncIterator,
finalizer: t.Callable[[], None],
):
self.__iterator = iterator
finalize(self, finalizer)
def __anext__(self) -> Awaitable[t.Any]:
return self.__iterator.__anext__()
| 2.34375 | 2 |
Chapter 11/Organizing clusters as a hierarchical tree/program1.py | fagaiera/python-machine-learning-book-3rd-edition-examples | 0 | 12790710 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import linkage, dendrogram
from sklearn.cluster import AgglomerativeClustering
# # Organizing clusters as a hierarchical tree
# ## Grouping clusters in bottom-up fashion
np.random.seed(123)
variables = ['X', 'Y', 'Z']
labels = ['ID_0', 'ID_1', 'ID_2', 'ID_3', 'ID_4']
X = np.random.random_sample([5, 3])*10
df = pd.DataFrame(X, columns=variables, index=labels)
print(df)
# ## Performing hierarchical clustering on a distance matrix
row_dist = pd.DataFrame(squareform(pdist(df, metric='euclidean')),
columns=labels,
index=labels)
print(row_dist)
# We can either pass a condensed distance matrix (upper triangular) from the `pdist` function, or we can pass the "original" data array and define the `metric='euclidean'` argument in `linkage`. However, we should not pass the squareform distance matrix, which would yield different distance values although the overall clustering could be the same.
# 1. incorrect approach: Squareform distance matrix
#row_clusters = linkage(row_dist, method='complete', metric='euclidean')
#pd.DataFrame(row_clusters,
# columns=['row label 1', 'row label 2',
# 'distance', 'no. of items in clust.'],
# index=['cluster %d' % (i + 1)
# for i in range(row_clusters.shape[0])])
# 2. correct approach: Condensed distance matrix
row_clusters = linkage(pdist(df, metric='euclidean'), method='complete')
pd.DataFrame(row_clusters,
columns=['row label 1', 'row label 2',
'distance', 'no. of items in clust.'],
index=['cluster %d' % (i + 1)
for i in range(row_clusters.shape[0])])
# 3. correct approach: Input matrix
row_clusters = linkage(df.values, method='complete', metric='euclidean')
pd.DataFrame(row_clusters,
columns=['row label 1', 'row label 2',
'distance', 'no. of items in clust.'],
index=['cluster %d' % (i + 1)
for i in range(row_clusters.shape[0])])
# make dendrogram
row_dendr = dendrogram(row_clusters,
labels=labels,
color_threshold=np.inf
)
plt.tight_layout()
plt.ylabel('Euclidean distance')
plt.show()
# ## Attaching dendrograms to a heat map
# plot row dendrogram
fig = plt.figure(figsize=(8, 8), facecolor='white')
axd = fig.add_axes([0.09, 0.1, 0.2, 0.6])
# note: for matplotlib < v1.5.1, please use orientation='right'
row_dendr = dendrogram(row_clusters, orientation='left')
# reorder data with respect to clustering
df_rowclust = df.iloc[row_dendr['leaves'][::-1]]
axd.set_xticks([])
axd.set_yticks([])
# remove axes spines from dendrogram
for i in axd.spines.values():
i.set_visible(False)
# plot heatmap
axm = fig.add_axes([0.23, 0.1, 0.6, 0.6]) # x-pos, y-pos, width, height
cax = axm.matshow(df_rowclust, interpolation='nearest', cmap='hot_r')
fig.colorbar(cax)
axm.set_xticklabels([''] + list(df_rowclust.columns))
axm.set_yticklabels([''] + list(df_rowclust.index))
plt.show()
# ## Applying agglomerative clustering via scikit-learn
ac = AgglomerativeClustering(n_clusters=3,
affinity='euclidean',
linkage='complete')
labels = ac.fit_predict(X)
print('Cluster labels: %s' % labels)
ac = AgglomerativeClustering(n_clusters=2,
affinity='euclidean',
linkage='complete')
labels = ac.fit_predict(X)
print('Cluster labels: %s' % labels) | 3.328125 | 3 |
CodeChallange.py/codeone.py | jvillalp/Leetcodepractice | 0 | 12790711 | <reponame>jvillalp/Leetcodepractice<filename>CodeChallange.py/codeone.py
from collections import Counter #dict subclass for counting hashable objects {(Key, value)}
#It is a collection where elements are stored as dictionary keys and
# their counts are stored as dictionary values.
#we want to return the first time that a non repeating charactter is in the string (s)
#if none, return string with underscore
def first_not_repeating_character(s):
counts = Counter(s) # a new counter, not empty, contains string (s) #ex: Counter({'a': 3, 'b': 0, 'c': -3, 'd': -6})
for character in s: #iterate through string
if counts[character] == 1: #if counts of each charcter is equal to 1
return character #that's the input we receive
return "_" #else return None
"""
T 0(n) - where n is the number of unique characters
Counter first goes through list o(n) +
for loop also goes through each char n times =
which means its an O(n) operation,
S 0(n) -
Storing in Counter n times where n is the number of unique character.
find a o(1) situation ===== even though 0(n),we can make it go through the list once. - not possible
"""
"""
- i knew there was a library from collections called Counter
- Counter stores elements as keys and their corresponding counts as value (key,value)
- Created a Counter where it kept track of the # of times each character appeared
- key = character, value = # of times it appears
- iterated through a given string
- checked if character appeared once, and if it did, i wanted to return that first chracter
- otherwise "_" if there is no instance of a non-repeating character
"""
| 3.6875 | 4 |
examples/helloworld/helloworld.py | pyrelease/PyRelease | 1 | 12790712 | <reponame>pyrelease/PyRelease
from __future__ import print_function
import sys
__version__ = "0.1.1" # This will set the version
__license__ = "MIT" # This sets the license
# The package description comes from the docstring of the
# first function in __all__
__all__ = ["main"]
def say_hello(name):
print("Hello " + name + "!")
def main():
"""My hello world application"""
args = sys.argv
if len(args) > 1:
name = args[1]
else:
name = "world"
say_hello(name)
| 2.328125 | 2 |
pyxrd/generic/gtk_tools/dummy_gtk.py | PyXRD/pyxrd | 27 | 12790713 | # coding=UTF-8
# ex:ts=4:sw=4:et
# Copyright (c) 2013, <NAME>
# All rights reserved.
# Complete license can be found in the LICENSE file.
from mvc.support.utils import get_new_uuid
__all__ = [
"gobject",
"GtkTreeIter",
"GenericTreeModel"
"TREE_MODEL_LIST_ONLY"
]
TREE_MODEL_LIST_ONLY = 0x00
TREE_MODEL_ITERS_PERSIST = 0x00
events_pending = lambda: False
class GtkTreeIter():
def __init__(self, user_data, path=None):
self.user_data = user_data
self.path = path
pass # end of class
class GenericTreeModel(object):
__connected_signals__ = None
def __init__(self):
self.__connected_signals__ = {}
def connect(self, signal_name, handler, *args):
handlers = self.__connected_signals__.get(signal_name, {})
handler_id = get_new_uuid()
handlers[handler_id] = (handler, args)
self.__connected_signals__[signal_name] = handlers
return handler_id
def disconnect(self, signal_name, handler_id):
try:
handlers = self.__connected_signals__.get(signal_name, {})
del handlers[handler_id]
except KeyError:
pass
return
def emit(self, signal_name, args=()):
handlers = self.__connected_signals__.get(signal_name, {})
for id, (handler, user_args) in handlers.items(): # @ReservedAssignment
handler(self, *((args,) + user_args))
pass
def set_property(self, *args, **kwargs):
pass
def create_tree_iter(self, user_data):
return GtkTreeIter(user_data)
def get_path(self, itr):
return self.on_get_path(itr.user_data)
def get_iter(self, path):
return self.create_tree_iter(self.on_get_iter(path))
def row_inserted(self, path, itr):
self.emit("row-inserted", (path, itr))
def row_deleted(self, indeces):
self.emit("row-deleted", (indeces,))
def invalidate_iters(self):
pass # TOD0!
def iter_is_valid(self, itr):
return True # TODO!
def __len__(self):
return len(self._model_data)
pass # end of class
| 1.835938 | 2 |
Month 03/Week 01/Day 06/d.py | KevinKnott/Coding-Review | 0 | 12790714 | # Random Pick with Weight: https://leetcode.com/problems/random-pick-with-weight/
# You are given an array of positive integers w where w[i] describes the weight of ith index (0-indexed).
# We need to call the function pickIndex() which randomly returns an integer in the range [0, w.length - 1]. pickIndex() should return the integer proportional to its weight in the w array. For example, for w = [1, 3], the probability of picking the index 0 is 1 / (1 + 3) = 0.25 (i.e 25%) while the probability of picking the index 1 is 3 / (1 + 3) = 0.75 (i.e 75%).
# More formally, the probability of picking index i is w[i] / sum(w).
# This problem is actually quite easy we keep a rolling total and then go across
# from left to right until we are over that value and return it
import random
class Solution:
def __init__(self, w):
self.curSum = 0
self.values = []
for weight in w:
self.curSum += weight
self.values.append(self.curSum)
def pickIndex(self) -> int:
if len(self.values) <= 1:
return 0
weightedPick = random() * self.curSum
for i in range(len(self.values)):
if self.values[i] > weightedPick:
return i
# Now the above runs in o(N) but we can do this in O(nlogn) as it is really easy
# to binary search through sorted numbers like the weighted sum (based off of cum sum)
def pickIndex(self) -> int:
if len(self.values) <= 1:
return 0
# Create random num for 0 .. curSum so lets use random to create a value that is from 0 .. 1 and multiply it by cursum
ourPick = random() * self.curSum
lo, hi = 0, len(self.values) - 1
while lo < hi:
mid = lo + (hi - lo) // 2
if ourPick > self.values[mid]:
lo = mid + 1
else:
hi = mid
return lo
# Your Solution object will be instantiated and called as such:
# obj = Solution(w)
# param_1 = obj.pickIndex()
# Score Card
# Did I need hints? N
# Did you finish within 30 min? 15
# Was the solution optimal? This is optimal
# Were there any bugs? No
# 5 5 5 5 = 5
| 3.921875 | 4 |
author/forms.py | vindeolal/pari | 0 | 12790715 | from django import forms
from wagtail.wagtailimages.widgets import AdminImageChooser
from .models import Author
class AuthorAdminForm(forms.ModelForm):
class Meta:
model = Author
# TODO: Ability to add author image
exclude = ['image', 'slug', 'bio']
| 1.828125 | 2 |
lambda/automations/update_tag.py | hyperglance/aws-rule-automations | 0 | 12790716 | <filename>lambda/automations/update_tag.py
"""Tags an EC2 Resource
This automation attempts to fix a tag in for an EC2 resource, identified as above or below the configured threshold
by Hyperglance Rule(s)
This automation will operate across accounts, where the appropriate IAM Role exists.
"""
import processing.automation_utils as utils
def hyperglance_automation(boto_session, resource: dict, automation_params=''):
""" Attempts to Tag an EC2 Resource
Parameters
----------
boto_session : object
The boto session to use to invoke the automation
resource: dict
Dict of Resource attributes touse in the automation
automation_params : str
Automation parameters passed from the Hyperglance UI
"""
new_key = automation_params.get('New Key')
matched_tag_attrs = [attr for attr in resource['matchedAttributes'].items() if attr[0] in resource['tags']]
if (len(matched_tag_attrs) == 0):
res_id = resource['id']
raise RuntimeError(f'No tags to update on {res_id} because none of its tags matched the search criteria.')
for old_key, value in matched_tag_attrs:
# tag might already be 'good'
if old_key == new_key:
continue
## Create the new tag and retain existing value
utils.add_tag(boto_session, new_key, value, resource)
## Remove the old offending tag (we make sure to do the destructive action 2nd!)
utils.remove_tag(boto_session, old_key, resource)
def info() -> dict:
INFO = {
"displayName": "Update Tag",
"description": "Replaces a tags key but keeps its value",
"resourceTypes": [
"Security Group",
"EC2 Instance",
"AMI",
"Internet Gateway",
"Network ACL",
"Network Interface",
"Placement Group",
"Route Table",
"EC2 Snapshot",
"Subnet",
"EBS Snapshot",
"EBS Volume",
"VPC",
"SNS Topic",
"SQS Queue"
],
"params": [
{
"name": "New Key",
"type": "string",
"default": ""
}
],
"permissions": [
"ec2:CreateTags",
"sns:TagResource",
"sqs:TagQueue",
"ec2:DeleteTags",
"sns:UntagResource",
"sqs:UntagQueue"
]
}
return INFO
| 2.546875 | 3 |
dossiers/forms.py | adrienlachaize/dezede | 0 | 12790717 | from django.forms import BooleanField, ModelForm
from tree.forms import TreeChoiceField
from .models import DossierDEvenements
class DossierDEvenementsForm(ModelForm):
statique = BooleanField(required=False)
class Meta(object):
model = DossierDEvenements
exclude = ()
field_classes = {
'parent': TreeChoiceField,
}
class Media(object):
css = {
'all': ('css/custom_admin.css',),
}
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance')
if instance is not None:
initial = kwargs.get('initial', {})
initial['statique'] = instance.evenements.exists()
kwargs['initial'] = initial
super(DossierDEvenementsForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(DossierDEvenementsForm, self).clean()
if cleaned_data['categorie'] is not None \
and cleaned_data['parent'] is not None:
msg = 'Ne pas saisir de catégorie si le dossier a un parent.'
self.add_error('categorie', msg)
self.add_error('parent', msg)
evenements = cleaned_data.get('evenements')
if cleaned_data['statique']:
if not evenements:
cleaned_data['evenements'] = \
self.instance.get_queryset(dynamic=True)
self.instance.evenements.add(*evenements)
else:
cleaned_data['evenements'] = []
if self.instance.pk is not None:
self.instance.evenements.clear()
return cleaned_data
| 2.125 | 2 |
sbin/db_mgmt_cwe.py | AlexFaraino/cve-search | 377 | 12790718 | <gh_stars>100-1000
#!/usr/bin/env python3
#
# Import script of NIST CWE Common Weakness Enumeration.
#
# Until now, the import is only import Weakness description.
#
# The format is the following:
#
# { "_id" : ObjectId("52b70521b261026f36818515"), "weaknessabs" : "Variant",
# "name" : "ASP.NET Misconfiguration: Missing Custom Error Page",
# "description_summary" : "An ASP .NET application must enable custom error
# pages in order to prevent attackers from mining information from the
# framework's built-in responses.An ASP .NET application must enable custom
# error pages in order to prevent attackers from mining information from the
# framework's built-in responses.", "status" : "Draft", "id" : "12" }
#
# Software is free software released under the "Modified BSD license"
#
# Copyright (c) 2013-2014 <NAME> - <EMAIL>
# Copyright (c) 2015-2016 <NAME> - <EMAIL>
# Imports
import os
import sys
runPath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(runPath, ".."))
from dateutil.parser import parse as parse_datetime
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
import argparse
import zipfile
import tempfile
from lib.ProgressBar import progressbar
from lib.Config import Configuration
import lib.DatabaseLayer as db
argparser = argparse.ArgumentParser(description='populate/update NIST CWE Common Weakness Enumeration database')
argparser.add_argument('-v', action='store_true', help='verbose output')
args = argparser.parse_args()
class CWEHandler(ContentHandler):
def __init__(self):
self.cwe = []
self.description_summary_tag = False
self.weakness_tag = False
def startElement(self, name, attrs):
if name == 'Weakness':
self.weakness_tag = True
self.statement = ""
self.weaknessabs = attrs.get('Weakness_Abstraction')
self.name = attrs.get('Name')
self.idname = attrs.get('ID')
self.status = attrs.get('Status')
self.cwe.append({'name': self.name, 'id': self.idname, 'status': self.status, 'weaknessabs': self.weaknessabs})
elif name == 'Description_Summary' and self.weakness_tag:
self.description_summary_tag = True
self.description_summary = ""
def characters(self, ch):
if self.description_summary_tag:
self.description_summary += ch.replace(" ", "")
def endElement(self, name):
if name == 'Description_Summary' and self.weakness_tag:
self.description_summary_tag = False
self.description_summary = self.description_summary + self.description_summary
self.cwe[-1]['description_summary'] = self.description_summary.replace("\n", "")
elif name == 'Weakness':
self.weakness_tag = False
# make parser
parser = make_parser()
ch = CWEHandler()
parser.setContentHandler(ch)
# check modification date
try:
(f, r) = Configuration.getFeedData('cwe')
except Exception as e:
print(e)
sys.exit("Cannot open url %s. Bad URL or not connected to the internet?"%(Configuration.getFeedURL("cwe")))
lastmodified = parse_datetime(r.headers['last-modified'], ignoretz=True)
i = db.getLastModified('cwe')
if i is not None:
if lastmodified == i:
print("Not modified")
sys.exit(0)
# parse xml and store in database
parser.parse(f)
cweList=[]
for cwe in progressbar(ch.cwe):
cwe['description_summary']=cwe['description_summary'].replace("\t\t\t\t\t", " ")
if args.v:
print (cwe)
cweList.append(cwe)
db.bulkUpdate('cwe', cweList)
#update database info after successful program-run
db.setColUpdate('cwe', lastmodified)
| 1.84375 | 2 |
relex/modules/offset_embedders/offset_embedder.py | DFKI-NLP/RelEx | 16 | 12790719 | <reponame>DFKI-NLP/RelEx
import torch
from allennlp.common import Registrable
class OffsetEmbedder(torch.nn.Module, Registrable):
"""
"""
default_implementation = "relative"
def get_output_dim(self) -> int:
"""
Returns the final output dimension that this ``OffsetEmbedder`` uses to represent each
offset. This is `not` the shape of the returned tensor, but the last element of that shape.
"""
raise NotImplementedError
def is_additive(self) -> bool:
raise NotImplementedError
| 2.640625 | 3 |
history/admin.py | fahimfarhan/cancer-web-app | 0 | 12790720 | <gh_stars>0
from django.contrib import admin
# Register your models here.
from history.models import HistoryModelFile
admin.site.register(HistoryModelFile)
| 1.289063 | 1 |
example_code/selection_problem.py | dbwebb-se/python-slides | 0 | 12790721 | import random
import time
n = 10000000
random_list = random.sample(range(n * 10), n)
start = time.time()
median = sorted(random_list, reverse=True)[n // 2]
end = time.time()
print(median)
print(end - start)
| 3.296875 | 3 |
CATS/cleaning_empty_rows.py | janetvanbilsen/Twitter-Mining-Raspberry_Pi | 0 | 12790722 | #!/usr/bin/env python3
import pandas as pd
dataset = pd.read_csv('Dataset.csv')
dataset.to_csv('Dataset.csv', index=False)
| 2.390625 | 2 |
crawl_data/crawl_data/spiders/companies_spider.py | huynhnhathao/job_recommender | 0 | 12790723 | import scrapy
class CompaniesSpider(scrapy.Spider):
"""This spider wil crawl all the company link available in itviec and save it
to a json line file.
"""
name = "companies"
start_urls = [
'https://itviec.com/companies',
]
def parse(self, response):
all_companies = response.xpath("//div[@class='first-group companies']/a[@class='featured-company']/@href").getall()
for company_link in all_companies:
relative_link = '/'.join(company_link.split('/') [:-1])
company_name = company_link.split('/') [-2]
absolute_link = response.urljoin(relative_link)
yield {'company_name': company_name, 'url': absolute_link }
next_page = response.xpath("//a[@class='more-jobs-link more-company']/@href").get()
# next_page now has the form of '/companies?page=2' or None
if next_page is not None:
# makes absolute url
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback = self.parse)
| 3.25 | 3 |
PFM_CP/matcreate.py | DylanAgius/PFM-CP | 0 | 12790724 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 24 15:19:55 2020
@author: mi19356
"""
import numpy as np
import os
import pandas as pd
from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree
import random
import math
from scrape import vtk_scrap
from dataconversions import data_reconstruct, reso_change, data_reconstruct_dream,sphericaldiam,printtofiletext
#scrape data
#orien,vtkdata,vtkdataPoints,const=vtk_scrap('PF_00189000','graindata')
dream=0
if dream==1:
orien,vtkdata,vtkdataPoints,const=vtk_scrap('PF_00130000','graindata',dream)
grainids=data_reconstruct(vtkdata, vtkdataPoints,1,orien)
else:
orien,vtkdata,const=vtk_scrap('vtkupdate','graindata',dream)
grainids,diameter=data_reconstruct_dream(vtkdata,orien)
#construct a vtk file
#vtkdatareso=reso_change(vtkdata)
"""
Create orientatio matrix
"""
def rotation_info(orien,grainids):
#Defining local variables
vec1=[0,0,1]
vec2=[0,1,0]
#modify the orientations
orien=orien[1:,1:]
#check to see if there are missing orientations
if len(orien)<len(grainids):
totaldif=len(grainids)-len(orien)
for i in range(0,int(totaldif)):
orien=np.append(orien,[random.uniform(0,2*math.pi),random.uniform(0,2*math.pi),random.uniform(0,2*math.pi)])
orien=orien.reshape(int(len(orien)/3),3)
#contruct rotation matrix
zrot=np.array([[np.cos((orien[:,0])),np.sin((orien[:,0])),np.zeros(len(orien))],[-np.sin((orien[:,0])),np.cos((orien[:,0])),np.zeros(len(orien))],[np.zeros(len(orien)),np.zeros(len(orien)),np.ones(len(orien))]])
xrot=np.array([[np.ones(len(orien)),np.zeros(len(orien)),np.zeros(len(orien))],[np.zeros(len(orien)),np.cos((orien[:,1])),np.sin((orien[:,1]))],[np.zeros(len(orien)),-np.sin((orien[:,1])),np.cos((orien[:,1]))]])
zrot2=np.array([[np.cos((orien[:,2])),np.sin((orien[:,2])),np.zeros(len(orien))],[-np.sin((orien[:,2])),np.cos((orien[:,2])),np.zeros(len(orien))],[np.zeros(len(orien)),np.zeros(len(orien)),np.ones(len(orien))]])
total_rot=[[]*len(orien)]*len(orien)
samp1=[[]*len(orien)]*len(orien)
samp2=[[]*len(orien)]*len(orien)
for i in range(0,len(orien)):
total_rot[i]=np.transpose(np.dot(np.dot(zrot2[:,:,i],xrot[:,:,i]),zrot[:,:,i]))
samp1[i]=np.dot(total_rot[i],vec1)
samp2[i]=np.dot(total_rot[i],vec2)
return vec1, vec2, samp1, samp2, total_rot, orien
"""
create material file for AMITEX
"""
def mat_create(orien,const, diameter,statev):
#rotating vectors using grain orientations
vec1,vec2,samp1,samp2,total_rot, orien=rotation_info(orien,grainids)
#use the diameter to create a variable parameter for \tau
#diameter currnetly in microns, convert to mm
#need to add 17.9 and 10 to excel const file.
diameter=(2*diameter)/1000
#writing diameters to file
printtofiletext(diameter,'diameters')
#writing orientations to file
orienprint=list(orien)
printtofiletext(orienprint,'orientations')
taud=220 + (17.9/((diameter)**0.5))
#check to make sure the there are no
#checkgreater=np.where(taud>350)[0]
#replace these values
#taud[checkgreater]=340.0
Materials = Element('Materials')
comment = Comment('REFERENCE MATERIAL')
Materials.append(comment)
child = SubElement(Materials, 'Reference_Material',Lambda0= '2.0431e+5', Mu0='0.8756e+5' )
comment = Comment('MATERIAL 1')
Materials.append(comment)
"orientation files required if material zone technique is used in AMITEX"
fsamp1 = open('fsam1.txt', 'w')
fsamp2 = open('fsam2.txt', 'w')
fsamp3 = open('fsam3.txt', 'w')
fsamp21 = open('fsam21.txt', 'w')
fsamp22 = open('fsam22.txt', 'w')
fsamp23 = open('fsam23.txt', 'w')
orien1 = open('orien1.txt', 'w')
orien2 = open('orien2.txt', 'w')
orien3 = open('orien3.txt', 'w')
tau01 = open('tau1.txt', 'w')
tau02 = open('tau2.txt', 'w')
for numMat in range(1,len(orien)+1):
for i in range(0,(len(const))):
if i==59:
const[i,0]=samp1[numMat-1][0]
fsamp1.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==60:
const[i,0]=samp1[numMat-1][1]
fsamp2.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==61:
const[i,0]=samp1[numMat-1][2]
fsamp3.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==67:
const[i,0]=samp2[numMat-1][0]
fsamp21.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==68:
const[i,0]=samp2[numMat-1][1]
fsamp22.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==69:
const[i,0]=samp2[numMat-1][2]
fsamp23.write(str("{:.16f}".format(const[i,0]))+'\n')
#adjust const array to include grain dependent info
#grain orientations
#update the value for tau0
elif i==98:
const[i,0]=taud[numMat-1]
tau01.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==114:
const[i,0]=taud[numMat-1]
tau02.write(str("{:.16f}".format(const[i,0]))+'\n')
elif i==168:
const[i,0]=(orien[numMat-1,0])
orien1.write(str(const[i,0])+'\n')
elif i==169:
const[i,0]=(orien[numMat-1,1])
orien2.write(str(const[i,0])+'\n')
elif i==170:
const[i,0]=(orien[numMat-1,2])
orien3.write(str(const[i,0])+'\n')
fsamp1.close()
fsamp2.close()
fsamp3.close()
fsamp21.close()
fsamp22.close()
fsamp23.close()
orien1.close()
orien2.close()
orien3.close()
child_grain=SubElement(Materials, 'Material', numM="1",Lib='/mnt/storage/home/mi19356/amitex_fftp-v8.17.1/Grainsize/UMAT/libUmatAmitex.so', Law='UMATBCCGDGS')
"This stores all the parameters required for the material"
"Coeff is the element of the grain material, and the atrributes are the parameter values"
"iterate across the different material constants to create subelelements for each constant2"
for i in range(0,(len(const))):
if i==59:
const[i,0]=samp1[numMat-1][0]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam1.txt")
elif i==60:
const[i,0]=samp1[numMat-1][1]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam2.txt")
elif i==61:
const[i,0]=samp1[numMat-1][2]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam3.txt")
elif i==67:
const[i,0]=samp2[numMat-1][0]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam21.txt")
elif i==68:
const[i,0]=samp2[numMat-1][1]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam22.txt")
elif i==69:
const[i,0]=samp2[numMat-1][2]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/fsam23.txt")
elif i==98:
const[i,0]=taud[numMat-1]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/tau1.txt")
elif i==114:
const[i,0]=taud[numMat-1]
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/tau2.txt")
elif i==168:
const[i,0]=(orien[numMat-1,0])
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/orien1.txt")
elif i==169:
const[i,0]=(orien[numMat-1,1])
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/orien2.txt")
elif i==170:
const[i,0]=(orien[numMat-1,2])
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant_Zone', File="MAT/Coeff/orien3.txt")
else:
child_grain_tail = SubElement(child_grain, 'Coeff',Index=str(i+1), Type='Constant',Value=str(const[i,0]))
#iterate across the required number of state vairables needed
for i in range(0,statev):
child_grain_tail = SubElement(child_grain, 'IntVar',Index=str(i+1), Type='Constant',Value='0.')
tree = ElementTree(Materials)
tree.write("fatemptzone2.xml")
mat_create(orien,const,diameter,900)
| 2.15625 | 2 |
src/PhoneBook.py | Seniru/at.py | 0 | 12790725 | <filename>src/PhoneBook.py
"""
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from enum import Enum
from .message import Message
class NumberingSchemes(Enum):
"""Numbering schemes implemented in phonebooks
"""
NATIONAL = 0
INTERNATIONAL = 1
class PhoneBookEntry:
"""A Phonebook entry
"""
def __init__(self, idx: int, number: str, scheme: NumberingSchemes, contact_name: str):
"""Constructor not recommended to called directly
Args:
idx (int): Index of the entry in the phonebook
number (str): The phonebook entry number
scheme (NumberingSchemes): Numbering scheme used (National or International)
contact_name (str): The name the entry has been saved as
"""
self.index = idx
self.number = number
self.scheme = scheme
self.contact_name = contact_name
def __repr__(self):
"""`__repr__` magicmethod to be used in repr().
Returns:
str: Representation of the object.
"""
return str(self)
def __str__(self):
"""`__str__` magicmethod to be used in repr().
Returns:
str: A stringified version of the object.
"""
return f"<PhoneBookEntry<index={self.index}, number='{self.number}', scheme={self.scheme}, contactname='{self.contact_name}'>"
@staticmethod
def from_payload(payload: str):
"""Creates a phonebook entry from a payload.
Args:
payload (str): Raw payload that was resulted upon requesting phonebook entries
Returns:
PhoneBookEntry: A phonebook entry
"""
params = Message.from_payload(payload).parameters
return PhoneBookEntry(
params[0],
params[1],
NumberingSchemes.NATIONAL if params[2] == 129 else NumberingSchemes.INTERNATIONAL,
params[3]
)
| 2.890625 | 3 |
challenger_bot/car_controllers.py | twobackfromtheend/challenger | 0 | 12790726 | <reponame>twobackfromtheend/challenger<filename>challenger_bot/car_controllers.py
from enum import Enum
class CarController(Enum):
AGENT = 0
DS4 = 1
| 1.796875 | 2 |
scripts/bulkLoadGeojson2elasticsearch.py | wkhchow/opendrr-api | 0 | 12790727 | <filename>scripts/bulkLoadGeojson2elasticsearch.py
# =================================================================
#
# Authors: <NAME> <<EMAIL>>
#
# =================================================================
import os
for eqScenario in ['sim6p8_cr2022_rlz_1']:
for retrofitPrefix in ['b0']: #,'r1','r2']:
for view in ['casualties_agg_view',
'damage_state_agg_view',
'economic_loss_agg_view',
'full_retrofit_agg_view',
'functional_state_agg_view',
'partial_retrofit_agg_view',
'recovery_agg_view',
'scenario_hazard_agg_view',
'scenario_hazard_threat_agg_view',
'scenario_rupture_agg_view',
'social_disruption_agg_view']:
print('loading: '+'dsra_{eqScenario}_{retrofitPrefix}_{view}.json'.format(**{'eqScenario':eqScenario, 'retrofitPrefix':retrofitPrefix, 'view':view}))
os.system('python load_es_data.py dsra_{eqScenario}_{retrofitPrefix}_{view}.json "Sauid"'.format(**{'eqScenario':eqScenario, 'retrofitPrefix':retrofitPrefix, 'view':view})) | 2.171875 | 2 |
release/src-rt-6.x.4708/router/samba3/source4/scripting/devel/speedtest.py | zaion520/ATtomato | 2 | 12790728 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Unix SMB/CIFS implementation.
# This speed test aims to show difference in execution time for bulk
# creation of user objects. This will help us compare
# Samba4 vs MS Active Directory performance.
# Copyright (C) <NAME> <<EMAIL>> 2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import optparse
import sys
import time
import base64
from decimal import Decimal
sys.path.insert(0, "bin/python")
import samba
samba.ensure_external_module("testtools", "testtools")
samba.ensure_external_module("subunit", "subunit/python")
import samba.getopt as options
from ldb import (
SCOPE_BASE, SCOPE_SUBTREE, LdbError, ERR_NO_SUCH_OBJECT,
ERR_UNWILLING_TO_PERFORM, ERR_INSUFFICIENT_ACCESS_RIGHTS)
from samba.ndr import ndr_pack, ndr_unpack
from samba.dcerpc import security
from samba.auth import system_session
from samba import gensec, sd_utils
from samba.samdb import SamDB
from samba.credentials import Credentials
import samba.tests
from samba.tests import delete_force
from subunit.run import SubunitTestRunner
import unittest
parser = optparse.OptionParser("speedtest.py [options] <host>")
sambaopts = options.SambaOptions(parser)
parser.add_option_group(sambaopts)
parser.add_option_group(options.VersionOptions(parser))
# use command line creds if available
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_usage()
sys.exit(1)
host = args[0]
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL)
#
# Tests start here
#
class SpeedTest(samba.tests.TestCase):
def find_domain_sid(self, ldb):
res = ldb.search(base=self.base_dn, expression="(objectClass=*)", scope=SCOPE_BASE)
return ndr_unpack(security.dom_sid,res[0]["objectSid"][0])
def setUp(self):
super(SpeedTest, self).setUp()
self.ldb_admin = ldb
self.base_dn = ldb.domain_dn()
self.domain_sid = security.dom_sid(ldb.get_domain_sid())
self.user_pass = "<PASSWORD>@"
print "baseDN: %s" % self.base_dn
def create_user(self, user_dn):
ldif = """
dn: """ + user_dn + """
sAMAccountName: """ + user_dn.split(",")[0][3:] + """
objectClass: user
unicodePwd:: """ + base64.b64encode(("\"%s\"" % self.user_pass).encode('utf-16-le')) + """
url: www.example.com
"""
self.ldb_admin.add_ldif(ldif)
def create_group(self, group_dn, desc=None):
ldif = """
dn: """ + group_dn + """
objectClass: group
sAMAccountName: """ + group_dn.split(",")[0][3:] + """
groupType: 4
url: www.example.com
"""
self.ldb_admin.add_ldif(ldif)
def create_bundle(self, count):
for i in range(count):
self.create_user("cn=speedtestuser%d,cn=Users,%s" % (i+1, self.base_dn))
def remove_bundle(self, count):
for i in range(count):
delete_force(self.ldb_admin, "cn=speedtestuser%d,cn=Users,%s" % (i+1, self.base_dn))
def remove_test_users(self):
res = ldb.search(base="cn=Users,%s" % self.base_dn, expression="(objectClass=user)", scope=SCOPE_SUBTREE)
dn_list = [item.dn for item in res if "speedtestuser" in str(item.dn)]
for dn in dn_list:
delete_force(self.ldb_admin, dn)
class SpeedTestAddDel(SpeedTest):
def setUp(self):
super(SpeedTestAddDel, self).setUp()
def run_bundle(self, num):
print "\n=== Test ADD/DEL %s user objects ===\n" % num
avg_add = Decimal("0.0")
avg_del = Decimal("0.0")
for x in [1, 2, 3]:
start = time.time()
self.create_bundle(num)
res_add = Decimal( str(time.time() - start) )
avg_add += res_add
print " Attempt %s ADD: %.3fs" % ( x, float(res_add) )
#
start = time.time()
self.remove_bundle(num)
res_del = Decimal( str(time.time() - start) )
avg_del += res_del
print " Attempt %s DEL: %.3fs" % ( x, float(res_del) )
print "Average ADD: %.3fs" % float( Decimal(avg_add) / Decimal("3.0") )
print "Average DEL: %.3fs" % float( Decimal(avg_del) / Decimal("3.0") )
print ""
def test_00000(self):
""" Remove possibly undeleted test users from previous test
"""
self.remove_test_users()
def test_00010(self):
self.run_bundle(10)
def test_00100(self):
self.run_bundle(100)
def test_01000(self):
self.run_bundle(1000)
def _test_10000(self):
""" This test should be enabled preferably against MS Active Directory.
It takes quite the time against Samba4 (1-2 days).
"""
self.run_bundle(10000)
class AclSearchSpeedTest(SpeedTest):
def setUp(self):
super(AclSearchSpeedTest, self).setUp()
self.ldb_admin.newuser("acltestuser", "samba123@")
self.sd_utils = sd_utils.SDUtils(self.ldb_admin)
self.ldb_user = self.get_ldb_connection("acltestuser", "samba123@")
self.user_sid = self.sd_utils.get_object_sid(self.get_user_dn("acltestuser"))
def tearDown(self):
super(AclSearchSpeedTest, self).tearDown()
delete_force(self.ldb_admin, self.get_user_dn("acltestuser"))
def run_search_bundle(self, num, _ldb):
print "\n=== Creating %s user objects ===\n" % num
self.create_bundle(num)
mod = "(A;;LC;;;%s)(D;;RP;;;%s)" % (str(self.user_sid), str(self.user_sid))
for i in range(num):
self.sd_utils.dacl_add_ace("cn=speedtestuser%d,cn=Users,%s" %
(i+1, self.base_dn), mod)
print "\n=== %s user objects created ===\n" % num
print "\n=== Test search on %s user objects ===\n" % num
avg_search = Decimal("0.0")
for x in [1, 2, 3]:
start = time.time()
res = _ldb.search(base=self.base_dn, expression="(objectClass=*)", scope=SCOPE_SUBTREE)
res_search = Decimal( str(time.time() - start) )
avg_search += res_search
print " Attempt %s SEARCH: %.3fs" % ( x, float(res_search) )
print "Average Search: %.3fs" % float( Decimal(avg_search) / Decimal("3.0") )
self.remove_bundle(num)
def get_user_dn(self, name):
return "CN=%s,CN=Users,%s" % (name, self.base_dn)
def get_ldb_connection(self, target_username, target_password):
creds_tmp = Credentials()
creds_tmp.set_username(target_username)
creds_tmp.set_password(<PASSWORD>)
creds_tmp.set_domain(creds.get_domain())
creds_tmp.set_realm(creds.get_realm())
creds_tmp.set_workstation(creds.get_workstation())
creds_tmp.set_gensec_features(creds_tmp.get_gensec_features()
| gensec.FEATURE_SEAL)
ldb_target = SamDB(url=host, credentials=creds_tmp, lp=lp)
return ldb_target
def test_search_01000(self):
self.run_search_bundle(1000, self.ldb_admin)
def test_search2_01000(self):
# allow the user to see objects but not attributes, all attributes will be filtered out
mod = "(A;;LC;;;%s)(D;;RP;;;%s)" % (str(self.user_sid), str(self.user_sid))
self.sd_utils.dacl_add_ace("CN=Users,%s" % self.base_dn, mod)
self.run_search_bundle(1000, self.ldb_user)
# Important unit running information
if not "://" in host:
host = "ldap://%s" % host
ldb_options = ["modules:paged_searches"]
ldb = SamDB(host, credentials=creds, session_info=system_session(), lp=lp, options=ldb_options)
runner = SubunitTestRunner()
rc = 0
if not runner.run(unittest.makeSuite(SpeedTestAddDel)).wasSuccessful():
rc = 1
if not runner.run(unittest.makeSuite(AclSearchSpeedTest)).wasSuccessful():
rc = 1
sys.exit(rc)
| 1.921875 | 2 |
Abstract_Factory/furniture_factory.py | edgardeng/design-patterns-in-python | 0 | 12790729 | <reponame>edgardeng/design-patterns-in-python<gh_stars>0
"""
抽象工厂 代码实例
Abstract Factory Code Demo
家具工厂
"""
from __future__ import annotations
from abc import ABC, abstractmethod
class Chair(ABC):
"""
product interface 1: Chair
"""
@abstractmethod
def sit_on(self) -> str:
pass
class Sofa(ABC):
"""
product interface 2: Sofa
"""
@abstractmethod
def lie_on(self) -> str:
pass
class ModernChair(Chair):
"""
product implement Chair: ModernChair
"""
def sit_on(self) -> str:
return 'I sit on a Modern Chair'
class ClassicChair(Chair):
"""
product implement Chair: ClassicChair
"""
def sit_on(self) -> str:
return 'I sit on a Classic Chair'
class ModernSofa(Sofa):
"""
product implement Sofa: ModernSofa
"""
def lie_on(self) -> str:
return 'I sit on a Modern Sofa'
class ClassicSofa(Sofa):
"""
product implement Sofa: ClassicSofa
"""
def lie_on(self) -> str:
return 'I sit on a Classic Sofa'
class FurnitureFactory(ABC):
"""
一个抽象工厂接口 定义了一系列方法,用来返回不同的抽象产品
The Abstract Factory interface declares a set of methods that return different abstract products.
家具工厂生成沙发和椅子 Furniture Factory produce Chair and SOfa
"""
@abstractmethod
def produce_chair(self) -> Chair:
pass
@abstractmethod
def produce_sofa(self) -> Sofa:
pass
class ModernFurnitureFactory(FurnitureFactory):
"""
一个抽象工厂的实现类 implement FurnitureFactory to produce true product
"""
def produce_chair(self) -> Chair:
print('ModernFurnitureFactory produce chair ...')
return ModernChair()
def produce_sofa(self) -> Sofa:
print('ModernFurnitureFactory produce sofa ...')
return ModernSofa()
class ClassicFurnitureFactory(FurnitureFactory):
"""
一个抽象工厂的实现类 implement FurnitureFactory to produce true product
"""
def produce_chair(self) -> Chair:
print('ClassicFurnitureFactory produce chair ...')
return ClassicChair()
def produce_sofa(self) -> Sofa:
print('ClassicFurnitureFactory produce sofa ...')
return ClassicSofa()
def client_code(factory: FurnitureFactory):
chair = factory.produce_chair()
print(chair.sit_on())
sofa = factory.produce_sofa()
print(sofa.lie_on())
if __name__ == '__main__':
print('\r\n--- I want some Modern Furniture ---\r\n')
client_code(ModernFurnitureFactory())
print('\r\n--- I want some Classic Furniture ---\r\n')
client_code(ClassicFurnitureFactory())
| 4.25 | 4 |
HomeLab/homepage/views.py | amalik18/HomeLab | 0 | 12790730 | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.template import loader
from django.shortcuts import render
from django.http import Http404
# Create your views here.
def index(request):
return render(request=request, template_name='homepage.html')
| 1.734375 | 2 |
SHS4py/setup.py | YukiMitsuta/shs4py | 2 | 12790731 | <gh_stars>1-10
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright 2019.12.19 <NAME>
# Distributed under terms of the MIT license.
from distutils.core import setup
from Cython.Build import cythonize
import numpy
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Compiler import Options
print("numpy.get_include() = %s"%numpy.get_include())
setup(
name = 'calcgau',
ext_modules = cythonize('calcgau.pyx'),
#include_path = [numpy.get_include()]
include_dirs = [numpy.get_include()]
)
setup(
name = 'calcVES',
ext_modules = cythonize('calcVES.pyx'),
#include_path = [numpy.get_include()]
include_dirs = [numpy.get_include()]
)
ext_modules = [Extension("calcRCMC", ["calcRCMC.pyx"], language="c++")]#,
#Extension("module2", ["module2.pyx"], language="c++")]
setup(cmdclass={'build_ext': build_ext}, ext_modules=ext_modules, include_dirs = [numpy.get_include()])
#setup(
#name = 'calcRCMC',
#ext_modules = cythonize('calcRCMC.pyx'),
##include_path = [numpy.get_include()]
#include_dirs = [numpy.get_include()]
##)
| 1.882813 | 2 |
tests/test_acdh_geonames_utils.py | acdh-oeaw/acdh_geonames_utils | 0 | 12790732 | #!/usr/bin/env python
"""Tests for `acdh_geonames_utils` package."""
import os
import unittest
from click.testing import CliRunner
from acdh_geonames_utils import acdh_geonames_utils as gn
from acdh_geonames_utils import cli
good_country_code = 'YU'
bad_country_code = 'BAAAD'
good_ft_code = "en"
bad_ft_code = "de"
TEST_GN_FILE = os.path.join(
"./fixtures",
"AL.txt"
)
class TestAcdh_geonames_utils(unittest.TestCase):
"""Tests for `acdh_geonames_utils` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_001_download(self):
"""Test download of zip."""
good = gn.download_country_zip(good_country_code)
bad = gn.download_country_zip(bad_country_code)
self.assertTrue(good.endswith(f"{good_country_code}.zip"))
self.assertEqual(bad, "")
def test_002_download_and_unzip(self):
"""Test download and unzip."""
good = gn.download_and_unzip_country_zip(good_country_code)
bad = gn.download_and_unzip_country_zip(bad_country_code)
self.assertTrue(good.endswith(f"{good_country_code}.txt"))
self.assertEqual(bad, "")
def test_003_unzip(self):
"""Test unzipping of zip."""
bad = gn.unzip_country_zip("")
self.assertEqual(bad, "")
def test_004_file_to_df(self):
"""Test loading file into pandas.DataFrame"""
df = gn.countries_as_df(TEST_GN_FILE)
self.assertEqual(len(df), 9356)
def test_005_dl_to_df(self):
"""Test loading download into pandas.DataFrame"""
good_df = gn.download_to_df('YU')
bad_df = gn.download_to_df('YUUU')
self.assertEqual(len(good_df), 1)
self.assertFalse(bad_df)
def test_006_dl_ft(self):
good = gn.dl_feature_codes(good_ft_code)
bad = gn.dl_feature_codes(bad_ft_code)
self.assertTrue(good != "")
self.assertTrue(bad == "")
def test_007_dl_ft_as_df(self):
good = gn.feature_codes_df(good_ft_code)
bad = gn.feature_codes_df(bad_ft_code)
self.assertIsNotNone(good)
self.assertIsNone(bad)
def test_command_line_interface(self):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'acdh_geonames_utils.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| 2.703125 | 3 |
pptag.py | dsm1212/ppTag | 0 | 12790733 | <filename>pptag.py
#!/usr/bin/env python
## python 3
# pip install watchdog
import sys
import getopt
import logging
import urllib
import time
import os
import threading
from datetime import datetime, date, timedelta
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from exif.exifread.tags import DEFAULT_STOP_TAG, FIELD_TYPES
from exif.exifread import process_file, exif_log, __version__
from plexUsers import plexUsers
from lightroomTags import parse_xmp_for_lightroom_tags
from photoElement import PhotoElement
from config import ppTagConfig
logger = exif_log.get_logger()
doUpdate = []
lock = None
firstRun = ppTagConfig.FORCE_RUN_AT_START
# plex
p = None
# timer
t = None
# last incoming event time
lastTS = datetime.now()
def updateMetadata(item, tags, rating):
# update rating
for user in p.users:
data = p.fetchPlexApi("/:/rate?key=%s&identifier=com.plexapp.plugins.library&rating=%i" %(item, rating),"PUT", False, user.token)
# write the metadata
# prepare the tags
tagQuery = "?"
i = 0
for tag in tags:
tagQuery = tagQuery + "tag[%s].tag.tag=%s&" %(i, urllib.parse.quote(tag.encode('utf-8')))
i = i + 1
#logging.debug(" updateMetaData: tagQuery is '%s'" % tagQuery)
data = p.fetchPlexApi("/library/metadata/%s%s" %(item, tagQuery), "PUT")
def getdata(filename):
detailed = True
stop_tag = DEFAULT_STOP_TAG
debug = False
strict = False
color = False
#exif_log.setup_logger(debug, color)
try:
filename = ppTagConfig.PHOTOS_LIBRARY_PATH + filename
img_file = open(str(filename), 'rb')
data = process_file(img_file, stop_tag=stop_tag, details=detailed, strict=strict, debug=debug)
img_file.close()
if not data:
logging.info("No EXIF information for '%s'" % filename)
return None
if 'JPEGThumbnail' in data:
del data['JPEGThumbnail']
if 'TIFFThumbnail' in data:
del data['TIFFThumbnail']
except IOError:
logging.debug("'%s' is unreadable" % filename)
return None
except:
logging.error("Exif process_file error: '%s'" % filename)
return None
return data
def getXMP(data):
XMP = None
if 'Image ApplicationNotes' in data:
try:
xml = data['Image ApplicationNotes'].printable
XMP = parse_xmp_for_lightroom_tags(xml)
except:
logging.error("Unable to parse XMP")
return XMP
def updateTagsAndRating(key, filename):
data = getdata(filename)
if not data:
return
parsedXMP = getXMP(data)
if parsedXMP:
logging.info("Processing '%s'" % filename)
updateMetadata(key, parsedXMP['tags'], int(parsedXMP['rating'])*2)
else:
logging.info("No XMP data for '%s'" % filename)
def parseExifAndTags(filename):
data = getdata(filename)
if not data:
return None
parsedXMP = getXMP(data)
if not parsedXMP:
parsedXMP = {}
parsedXMP['rating'] = 0
parsedXMP['tags'] = []
try:
date = datetime.fromtimestamp(datetime.strptime(data['EXIF DateTimeOriginal'].printable+data['EXIF Tag 0x9011'].printable, '%Y:%m:%d %H:%M:%S%z').timestamp()).date()
except:
try:
date = datetime.strptime(data['EXIF DateTimeOriginal'].printable, '%Y:%m:%d %H:%M:%S').date()
except:
# fallback to the modify date on the file
datetimeModified = datetime.fromtimestamp(os.path.getmtime(filename))
date = datetimeModified.date()
return PhotoElement(filename, date, parsedXMP['tags'], parsedXMP['rating'])
def triggerProcess():
global t
global lastTS
lastTS = datetime.now()
if t is None or not t.is_alive() :
logging.info("Starting timer")
t = threading.Timer(120,fetchPhotosAndProcess)
t.start()
def uniqify(seq):
return list(dict.fromkeys(seq)) # order preserving
def fetchPhotosAndProcess():
global firstRun
global lastTS
if firstRun: # complete update on startup requested
loopThroughAllPhotos()
else: # must be in the timer thread so process backlog
# keep processing until there is nothing more to do so we don't have to worry about missed triggers
while len(doUpdate) > 0:
# wait for 120 seconds of idle time so that plex can process any creates first
while datetime.now()-lastTS < timedelta(seconds=120):
time.sleep(120-(datetime.now()-lastTS).total_seconds()+1)
# Try to find all photos based on date
if fetchAndProcessByDate():
# failed so loop through all photoa to find the rest
loopThroughAllPhotos()
def fetchAndProcessByDate():
global doUpdate
global lock
dateSearchFailed = []
while len(doUpdate) > 0:
lock.acquire()
doUpdateTemp = uniqify(doUpdate)
doUpdate = []
lock.release()
photoGroups = {}
# first group all photos by date
for filepath in doUpdateTemp[:] :
photoElement = parseExifAndTags(filepath)
if photoElement:
# this has exif data
date = photoElement.date()
if date in photoGroups.keys():
photoGroups[date].append(photoElement)
else:
photoGroups[date] = [photoElement]
else: # missing or not a photo
doUpdateTemp.remove(filepath)
for date in photoGroups.keys():
fromTimecode = int(datetime.strptime(date.isoformat(), '%Y-%m-%d').timestamp())
toTimecode = int((datetime.strptime(date.isoformat(), '%Y-%m-%d') + timedelta(days=1)).timestamp())-1
toDo = True
start = 0
size = 1000
# Make a key list of all pics in the date range
plexData = {}
if p.photoSection:
while toDo:
url = "/library/sections/" + str(p.photoSection) + "/all?originallyAvailableAt%3E=" + str(fromTimecode) + "&originallyAvailableAt%3C=" + str(toTimecode) + "&X-Plex-Container-Start=%i&X-Plex-Container-Size=%i" % (start, size)
#logging.info("URL: %s", url)
metadata = p.fetchPlexApi(url)
container = metadata["MediaContainer"]
if 'Metadata' not in container:
# no photos in this time range (probably wrong section)
break
elements = container["Metadata"]
totalSize = container["totalSize"]
offset = container["offset"]
size = container["size"]
start = start + size
if totalSize-offset-size == 0:
toDo = False
# loop through all elements
for photo in elements:
mediaType = photo["type"]
if mediaType != "photo":
continue
key = photo["ratingKey"]
src = photo["Media"][0]["Part"][0]["file"].replace(ppTagConfig.PHOTOS_LIBRARY_PATH_PLEX,"", 1)
#logging.info(" Map: %s -> %s", src, key)
plexData[src] = key
# Update the pics that changed in the date range
for photo in photoGroups[date]:
path = photo.path()
# make sure path seperator is equal in plex and ppTag
if "/" in ppTagConfig.PHOTOS_LIBRARY_PATH_PLEX:
path = path.replace("\\","/")
if path in plexData.keys():
logging.info("Processing by date '%s'" % path)
updateMetadata(plexData[path], photo.tags(), photo.rating()*2)
doUpdateTemp.remove(path)
# if we failed to process something then defer those to a full scan
if len(doUpdateTemp):
dateSearchFailed = [*dateSearchFailed, *doUpdateTemp]
# if we failed to process something then trigger a full scan
if len(dateSearchFailed) > 0:
logging.warning("Some updated files were not found by date range.")
lock.acquire()
doUpdate = [*dateSearchFailed, *doUpdate]
lock.release()
return True
return False
def loopThroughAllPhotos():
global doUpdate
global firstRun
doUpdateTemp = uniqify(doUpdate)
doUpdate = []
toDo = True
start = 0
size = 1000
#print('loop through all, started %i' % int(time.time()))
if p.photoSection:
while toDo:
url = "/library/sections/" + str(p.photoSection) + "/all?clusterZoomLevel=1&X-Plex-Container-Start=%i&X-Plex-Container-Size=%i" % (start, size)
metadata = p.fetchPlexApi(url)
container = metadata["MediaContainer"]
elements = container["Metadata"]
totalSize = container["totalSize"]
offset = container["offset"]
size = container["size"]
start = start + size
if totalSize-offset-size == 0:
toDo = False
# loop through all elements
for photo in elements:
mediaType = photo["type"]
if mediaType != "photo":
continue
key = photo["ratingKey"]
src = photo["Media"][0]["Part"][0]["file"].replace(ppTagConfig.PHOTOS_LIBRARY_PATH_PLEX,"", 1)
# make sure path seperator is equal in plex and ppTag
if "\\" in ppTagConfig.PHOTOS_LIBRARY_PATH:
src = src.replace("/","\\")
if src in doUpdateTemp or firstRun:
# update tags and rating
# print(key)
# print(src)
updateTagsAndRating(key, src)
try:
doUpdateTemp.remove(src)
except:
pass # ok if missing, probably firstRun
if not firstRun and len(doUpdateTemp) == 0:
toDo = False
break
if not firstRun:
for src in doUpdateTemp:
logging.info("Skipped file not found in this section '%s'" % src)
firstRun = False
class PhotoHandler(PatternMatchingEventHandler):
patterns=["*"]
ignore_patterns=["*thumb*"]
def process(self, event):
"""
event.event_type
'modified' | 'created' | 'moved' | 'deleted'
event.is_directory
True | False
event.src_path
path/to/observed/file
"""
if not event.is_directory:
if (event.event_type == 'modified' or event.event_type == 'created' or event.event_type == 'moved'):
# check if file belongs to monitored section
for folder in p.photoLocations:
if event.src_path.startswith(folder):
# put file into forced update list
pptag_path=event.src_path.replace(ppTagConfig.PHOTOS_LIBRARY_PATH,"", 1)
if pptag_path not in doUpdate:
logging.info("Queued '%s'", event.src_path)
lock.acquire()
doUpdate.append(pptag_path)
lock.release()
triggerProcess()
return
logging.debug("Ignored file in wrong location: '%s'" % event.src_path)
else:
logging.debug("Ignored event '%s' for file '%s'" % (event.event_type,event.src_path))
def on_modified(self, event):
self.process(event)
def on_created(self, event):
self.process(event)
if __name__ == '__main__':
if ppTagConfig.LOG_LEVEL is None or ppTagConfig.LOG_LEVEL == '':
ppTagConfig.LOG_LEVEL = 'CRITICAL'
logging.basicConfig(level=getattr(logging,ppTagConfig.LOG_LEVEL), format='%(asctime)s %(levelname)s - %(message)s')
if ppTagConfig.TIMEZONE is not None :
os.environ['TZ'] = ppTagConfig.TIMEZONE
lock = threading.Lock()
# setup observer
observer = Observer()
observer.schedule(PhotoHandler(), path=ppTagConfig.PHOTOS_LIBRARY_PATH, recursive=True)
p = plexUsers()
# run at startup
fetchPhotosAndProcess()
# now start the observer
observer.start()
try:
while True:
time.sleep(5)
except KeyboardInterrupt:
observer.stop()
observer.join()
| 2.296875 | 2 |
data/livecode/fill_ts.py | james94/driverlessai-recipes | 0 | 12790734 | """Add any missing Group by Date records and fill with a default value - additional columns will be null for the default values"""
# Column names in our dataset
ts_column = "Date"
group_by_columns = ["Store", "Dept"]
target_column = "Weekly_Sales"
default_missing_value = 0
# check the datatype of user-defined input variables
if not isinstance(ts_column, str):
raise ValueError("Variable: 'ts_column' should be <str>")
if not isinstance(group_by_columns, list):
raise ValueError("Column: 'group_by_columns' should be <list>")
if not isinstance(target_column, str):
raise ValueError("Column: 'target_column' should be <str>")
# don't check datatype of default_missing_value because it depends on the column
# check if user-defined inputs exist in the dataset
features = list(X.names)
if ts_column not in features:
raise ValueError("Column: '" + ts_column + "' is not present in the data set")
for _ in group_by_columns:
if _ not in features:
raise ValueError("Group by Column: '" + str(_) + "' is not present in the dataset")
if target_column not in features:
raise ValueError("Column: '" + target_column + "' is not present in the data set")
# convert to pandas
df = X.to_pandas()
# order by group(s) and time
df = df.sort_values(group_by_columns + [ts_column])
# cross join of dates and groups
unique_dates = pd.DataFrame(df[ts_column].unique(), columns=[ts_column])
unique_dates['key'] = 0
unique_groups = df[group_by_columns].drop_duplicates()
unique_groups['key'] = 0
all_vals = pd.merge(unique_dates, unique_groups, how="outer").drop("key", axis=1)
# join back to the original dataset
df_filled = pd.merge(df, all_vals, how="outer")
# fill all nulls with default value - this is appropriate for TS experiments, even if there were existing nulls
df_filled[target_column] = df_filled[target_column].fillna(0)
return df_filled
| 3.515625 | 4 |
anondolok_library/admin_panel/migrations/0017_wish_list_is_wished.py | Horraira/anondolokLibrary | 0 | 12790735 | # Generated by Django 3.2.1 on 2021-08-08 07:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('admin_panel', '0016_wish_list'),
]
operations = [
migrations.AddField(
model_name='wish_list',
name='is_wished',
field=models.BooleanField(default=False),
),
]
| 1.554688 | 2 |
src/postprocessing/put_softmax_in_subject_space.py | LucasFidon/feta-inference | 0 | 12790736 | """
Use this script to post-process the predicted softmax segmentation.
This script performs rigid register of the softmax prediction to the subject space.
@author: <NAME> (<EMAIL>)
"""
import os
from argparse import ArgumentParser
import numpy as np
import nibabel as nib
parser = ArgumentParser()
parser.add_argument('--softmax', required=True,
help='path to the softmax prediction in the template space.')
parser.add_argument('--aff', required=True,
help='path to the Affine transformation that was used'
'to go from subject space to template space.')
parser.add_argument('--input_img', required=True,
help='Path to the SRR to preprocess')
parser.add_argument('--output_folder', required=True)
def invert_affine(aff_path, output_dir):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
aff_name = os.path.split(aff_path)[1].replace('.txt', '')
save_inv_aff_path = os.path.join(
output_dir,
'%s_inv.txt' % aff_name,
)
cmd = 'reg_transform -invAff %s %s' % (aff_path, save_inv_aff_path)
os.system(cmd)
return save_inv_aff_path
def warp_softmax(softmax_path, ref_img_path, save_path, aff_path):
# Warp the softmax
cmd = 'reg_resample -ref %s -flo %s -trans %s -res %s -inter 1 -pad 0 -voff' % \
(ref_img_path, softmax_path, aff_path, save_path)
os.system(cmd)
# Fix border effects due to padding with 0 AND change order of channels
softmax_nii = nib.load(save_path)
softmax = softmax_nii.get_fdata().astype(np.float32)
sum_proba = np.sum(softmax, axis=-1)
softmax[:, :, :, 0] += 1. - sum_proba
post_softmax_nii = nib.Nifti1Image(softmax, softmax_nii.affine)
nib.save(post_softmax_nii, save_path)
def main(args):
if not os.path.exists(args.output_folder):
os.mkdir(args.output_folder)
# Compute the inverse affine transform
print('Invert %s' % args.aff)
inv_aff_path = invert_affine(aff_path=args.aff, output_dir=args.output_folder)
print(inv_aff_path)
# Warp the softmax
save_path = os.path.join(args.output_folder, 'softmax.nii.gz')
print('warp %s' % args.softmax)
warp_softmax(
softmax_path=args.softmax,
ref_img_path=args.input_img,
save_path=save_path,
aff_path=inv_aff_path,
)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| 2.671875 | 3 |
PiBlynk-py/oled96/__init__.py | BLavery/PyBlynk | 12 | 12790737 | <reponame>BLavery/PyBlynk<filename>PiBlynk-py/oled96/__init__.py
#!/usr/bin/env python
# USAGE:
# from oled96 import oled
# or
# import oled96
# oled = oled96.OLED(0x3c) or 3d
from PIL import Image, ImageDraw, ImageFont
from smbus import SMBus
font1 = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSans.ttf', 12)
font2 = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSans.ttf', 19)
font3 = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSans.ttf', 36)
#font4 = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf', 12)
font5 = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf', 19)
#font6 = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf', 36)
class OLED():
def __init__(self, address=0x3C):
try:
self.bus = SMBus(1)
except:
try:
self.bus = SMBus(0)
except:
exit(7)
self.cmd_mode = 0x00
self.data_mode = 0x40
self.addr = address
self.width = 128
self.height = 64
self.pages = int(self.height / 8)
self.image = Image.new('1', (self.width, self.height))
self.canvas = ImageDraw.Draw(self.image) # this is a "draw" object for preparing display contents
self.jnl4=["","Jnl:","",""]
self._command(
const.DISPLAYOFF,
const.SETDISPLAYCLOCKDIV, 0x80,
const.SETMULTIPLEX, 0x3F,
const.SETDISPLAYOFFSET, 0x00,
const.SETSTARTLINE,
const.CHARGEPUMP, 0x14,
const.MEMORYMODE, 0x00,
const.SEGREMAP,
const.COMSCANDEC,
const.SETCOMPINS, 0x12,
const.SETCONTRAST, 0xCF,
const.SETPRECHARGE, 0xF1,
const.SETVCOMDETECT, 0x40,
const.DISPLAYALLON_RESUME,
const.NORMALDISPLAY,
const.DISPLAYON)
def _command(self, *cmd):
assert(len(cmd) <= 31)
self.bus.write_i2c_block_data(self.addr, self.cmd_mode, list(cmd))
def _data(self, data):
# In our library, only data operation used is 128x64 long, ie whole canvas.
for i in range(0, len(data), 31):
self.bus.write_i2c_block_data(self.addr, self.data_mode, list(data[i:i+31]))
def display(self):
"""
The image on the "canvas" is flushed through to the hardware display.
Takes the 1-bit image and dumps it to the SSD1306 OLED display.
"""
self._command(
const.COLUMNADDR, 0x00, self.width-1, # Column start/end address
const.PAGEADDR, 0x00, self.pages-1) # Page start/end address
pix = list(self.image.getdata())
step = self.width * 8
buf = []
for y in range(0, self.pages * step, step):
i = y + self.width-1
while i >= y:
byte = 0
for n in range(0, step, self.width):
byte |= (pix[i + n] & 0x01) << 8
byte >>= 1
buf.append(byte)
i -= 1
self._data(buf) # push out the whole lot
def cls(self):
self.blank()
self.display()
def blank(self):
self.canvas.rectangle((0, 0, self.width-1, self.height-1), outline=0, fill=0)
def onoff(self, onoff):
if onoff == 0:
self._command(const.DISPLAYOFF)
else:
self._command(const.DISPLAYON)
# ABOVE are raw oled functions
# BELOW are some pre-formatted layouts
def msgBox(self,hdr="", str1="", str2="", str3=""): # header autocentred
oled.blank()
self.canvas.rectangle((0, 19, oled.width-1, oled.height-1), outline=1, fill=0)
self.canvas.text((2+(11-len(hdr))/2*124/11, 2, 0), hdr, font=font5, fill=1)
self.canvas.text((4,23), str1, font=font1, fill=1)
self.canvas.text((4,36), str2, font=font1, fill=1)
self.canvas.text((4,49), str3, font=font1, fill=1)
oled.display()
def yell2(self,str1="", str2=""): # 11 char max x 2 lines
oled.blank()
self.canvas.text((2, 10), str1, font=font2, fill=1)
self.canvas.text((2,40), str2, font=font2, fill=1)
oled.display()
def yell(self,str1="", str2=""): # 5 char max, 1 line
oled.blank()
self.canvas.text((2, 20), str1, font=font3, fill=1)
oled.display()
def bar(self,str1,val,dispval=None): # val = 0 to 100 for graph, dispval if different from val. Autocentre.
oled.blank()
if dispval == None:
dispval = val
dispval = str(int(dispval))
#print(2+(11-len(str1))/2*124/11)
self.canvas.text((2+(11-len(str1))/2*124/11, 2), str1, font=font5, fill=1)
self.canvas.rectangle((0, 31, oled.width-1, 40), outline=1, fill=1)
self.canvas.rectangle((int((val*126)/100), 32, oled.width-2, 39), outline=1, fill=0)
self.canvas.text((2+(11-len(dispval))/2*124/11,43), dispval, font=font2, fill=1)
oled.display()
def jnl(self,str1):
oled.blank()
self.jnl4.pop(0)
self.jnl4.append(str1)
self.canvas.rectangle((0, 0, oled.width-1, oled.height-1), outline=1, fill=0)
self.canvas.text((4, 3), self.jnl4[0], font=font1, fill=1)
self.canvas.text((4,18), self.jnl4[1], font=font1, fill=1)
self.canvas.text((4,33), self.jnl4[2], font=font1, fill=1)
self.canvas.text((4,48), self.jnl4[3], font=font1, fill=1)
oled.display()
class const:
CHARGEPUMP = 0x8D
COLUMNADDR = 0x21
COMSCANDEC = 0xC8
COMSCANINC = 0xC0
DISPLAYALLON = 0xA5
DISPLAYALLON_RESUME = 0xA4
DISPLAYOFF = 0xAE
DISPLAYON = 0xAF
EXTERNALVCC = 0x1
INVERTDISPLAY = 0xA7
MEMORYMODE = 0x20
NORMALDISPLAY = 0xA6
PAGEADDR = 0x22
SEGREMAP = 0xA0
SETCOMPINS = 0xDA
SETCONTRAST = 0x81
SETDISPLAYCLOCKDIV = 0xD5
SETDISPLAYOFFSET = 0xD3
SETHIGHCOLUMN = 0x10
SETLOWCOLUMN = 0x00
SETMULTIPLEX = 0xA8
SETPRECHARGE = 0xD9
SETSEGMENTREMAP = 0xA1
SETSTARTLINE = 0x40
SETVCOMDETECT = 0xDB
SWITCHCAPVCC = 0x2
oled = OLED()
import sys
if __name__ == '__main__':
print (sys.argv[0], 'is an importable module:')
exit()
| 2.1875 | 2 |
Python/Courses/Python-Tutorials.Telusko/02.Miscellaneous/24.zip.py | shihab4t/Books-Code | 0 | 12790738 | <gh_stars>0
names = ["Navin", "Kiran", "Harsh", "Navin"]
comps = ["Dell", "Apple", "MS", "Dell"]
zipped = zip(names, comps)
for a, b in zipped:
print(a, b)
| 3.1875 | 3 |
LeetCode/python/211-240/225-implement-stack-using-queues/solution.py | shootsoft/practice | 0 | 12790739 | class Stack:
# initialize your data structure here.
def __init__(self):
self.q1 = []
self.q2 =[]
self.switch = 1
self.length = 0
# @param x, an integer
# @return nothing
def push(self, x):
if self.switch == 1:
self.q1.append(x)
while(len(self.q2)>0):
self.q1.append(self.q2.pop(0))
self.switch = 2
else:
self.q2.append(x)
while(len(self.q1)>0):
self.q2.append(self.q1.pop(0))
self.switch = 1
self.length += 1
# @return nothing
def pop(self):
if self.length ==0:
return None
self.length -= 1
if self.switch == 1:
return self.q2.pop(0)
else:
return self.q1.pop(0)
# @return an integer
def top(self):
if self.length ==0:
return None
if self.switch == 1:
return self.q2[0]
else:
return self.q1[0]
# @return an boolean
def empty(self):
return self.length == 0
s = Stack()
s.push(1)
s.push(2)
s.push(3)
print s.top()
print s.pop()
print s.pop()
print s.pop() | 3.90625 | 4 |
chapter14_OOP/2019/Number.py | motazsaad/WDMM1405 | 4 | 12790740 | <reponame>motazsaad/WDMM1405<filename>chapter14_OOP/2019/Number.py
class Number:
def __init__(self, num):
self.number = num
def __str__(self):
return str(self.number)
def __add__(self, other):
self.number += other
return Number(self.number)
# class
n = Number(10)
print(n)
n + 10
print(n)
n += 10
print(n)
print(type(n))
# primitive type
x = 10
print(x)
x += 10
print(x)
print(type(x))
| 3.953125 | 4 |
pseudo/middlewares/standard_middleware.py | mifieldxu/pseudo-lang | 661 | 12790741 | from pseudo.middlewares.middleware import Middleware
from pseudo.pseudo_tree import Node
class StandardMiddleware(Middleware):
'''
changes standard_iterable_call in return to a special type
used by go
'''
@classmethod
def process(cls, tree):
return cls().transform(tree)
def transform_r(self, node, in_block=False, assignment=None):
if node.value.type == 'standard_iterable_call':
node.value.type = 'standard_iterable_call_return'
return node.value
else:
return node
transform_explicit_return = transform_implicit_return = transform_r
| 2.65625 | 3 |
sample.py | tecdan/NCE_Alias_Sampling | 0 | 12790742 | """A minimal sample script for illustration of basic usage of NCE module"""
import torch
from nce import IndexLinear
class_freq = [0, 2, 2, 3, 4, 5, 6] # an unigram class probability
freq_count = torch.FloatTensor(class_freq)
print("total counts for all tokens:", freq_count.sum())
noise = freq_count / freq_count.sum()
# IndexLinear 继承了NCELoss 类
nce_linear = IndexLinear(
embedding_dim=100, # input dim
num_classes=300000, # output dim
noise=noise,
)
# 这里 input 假装是经过了 embedding之后的
input = torch.Tensor(200, 100) # [batch, emb_dim]
# target中这里是ones, 但是我们的task中应该是 对应的正确的token的id
target = torch.ones(200, 1).long() # [batch, 1]
# training mode
loss = nce_linear(target, input).mean()
print(loss.item())
# evaluation mode for fast probability computation
nce_linear.eval()
prob = nce_linear(target, input).mean()
print(prob.item())
| 2.875 | 3 |
aybtools/minitest.py | abc1035/aiyubin-task1 | 0 | 12790743 | """this file is aimed to generate a small datasets for test"""
import json
f = open("/home/ayb/UVM_Datasets/voc_test3.json", "r")
line = f.readline()
f.close()
dic = eval(line)
images = dic['images']
new_images=[]
for image in images:
if "ten" in image['file_name']:
continue
else:
new_images.append(image)
image_id = []
annotations = dic['annotations']
new_annotations = []
for image in new_images:
# print(image)
image_id.append(image['id'])
for annotation in annotations:
if annotation['image_id'] in image_id:
new_annotations.append(annotation)
dic["images"] = new_images
dic["annotations"] = new_annotations
f1 = open("/home/ayb/UVM_Datasets/voc_test_not_ten.json", "w")
dic_json = json.dumps(dic)
f1.write(str(dic_json))
f1.close() | 2.8125 | 3 |
ex011.py | mateusguida/ExerciciosPython | 0 | 12790744 | <reponame>mateusguida/ExerciciosPython
larg = float(input("Largura da parede: "))
alt = float(input("Altura da parede: "))
area = larg * alt
print(f'SUa parede tem a dimensão de {larg:.2f}x{alt:.2f} e sua área é de {area:.3f}m².')
print(f'Para pintar essa parede, você precisará de {area/2} litros de tinta') | 3.8125 | 4 |
movielist_app/tests.py | rhedwan/BuildingDjangoAPI | 0 | 12790745 | <reponame>rhedwan/BuildingDjangoAPI
from django.contrib.auth.models import User
from django.http import response
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from rest_framework.authtoken.models import Token
from movielist_app.api import serializers
from movielist_app import models
class StreamPlatformTestCase(APITestCase):
def setUp(self):
self.user = User.objects.create_user(username="example", password="<PASSWORD>")
self.token = Token.objects.get(user__username = self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
self.stream = models.StreamPlatform.objects.create(
name = "Netflix",
about = "#1 Streaming Platform",
website = "https://netflix.com"
)
def test_streamplatform_create(self):
data = {
"name" : "Netflix",
"about" : "#1 Streaming Platform",
"website" : "https://netflix.com"
}
response = self.client.post(reverse('streamplatform-list'), data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_streamplatform_list(self):
response = self.client.get(reverse('streamplatform-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_streamplatform_ind(self):
response = self.client.get(reverse('streamplatform-detail' ,args= (self.stream.id,)))
self.assertEqual(response.status_code, status.HTTP_200_OK)
class WatchListTestCase(APITestCase):
def setUp(self):
self.user = User.objects.create_user(username="example", password="<PASSWORD>")
self.token = Token.objects.get(user__username = self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
self.stream = models.StreamPlatform.objects.create(
name = "Netflix",
about = "#1 Streaming Platform",
website = "https://netflix.com"
)
self.watchlist = models.WatchList.objects.create(
title = "Example Movie" ,
storyline = "Example Story",
platform = self.stream ,
active = True
)
def test_watchlist_create(self):
data = {
"title": "Example Movie" ,
"storyline": "Example Story",
"platform" : self.stream ,
"active" : True
}
response = self.client.post(reverse('movie-list') , data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_watchlist_list(self):
response = self.client.get(reverse('movie-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_watchlist_ind(self):
response = self.client.get(reverse('movie-detail' ,args= (self.watchlist.id,)))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(models.WatchList.objects.count(), 1)
self.assertEqual(models.WatchList.objects.get().title, 'Example Movie')
class ReviewTestCase(APITestCase):
def setUp(self):
self.user = User.objects.create_user(username="example", password="<PASSWORD>")
self.token = Token.objects.get(user__username = self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
self.stream = models.StreamPlatform.objects.create(
name = "Netflix",
about = "#1 Streaming Platform",
website = "https://netflix.com"
)
self.watchlist = models.WatchList.objects.create(
title = "Example Movie" ,
storyline = "Example Story",
platform = self.stream ,
active = True
)
self.watchlist2 = models.WatchList.objects.create(
title = "Example Movie" ,
storyline = "Example Story",
platform = self.stream ,
active = True
)
self.review = models.Review.objects.create(
review_user =self.user ,
rating = 5,
description = "Great Movie!!!",
watchlist = self.watchlist2,
active = True
)
def test_review_create(self):
data = {
"review_user": self.user ,
"rating " : 5,
"description" : "Great Movie!!!",
"watchlist" : self.watchlist,
"active" : True
}
response = self.client.post(reverse('review-create', args=(self.watchlist.id,)), data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Added more checks to the tests. i.c(For the content and
# the number of reviews that can be created on a movie)
self.assertEqual(models.Review.objects.count(), 2)
response = self.client.post(reverse('review-create', args=(self.watchlist.id,)), data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_review_create_unauth(self):
data = {
"review_user": self.user ,
"rating " : 5,
"description" : "Great Movie!!!",
"watchlist" : self.watchlist,
"active" : True
}
self.client.force_authenticate(user=None)
response = self.client.post(reverse('review-create', args=(self.watchlist.id,)), data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_review_create_update(self):
data = {
"review_user": self.user ,
"rating " : 4,
"description" : "Great Movie!!!-(Updated)",
"watchlist" : self.watchlist,
"active" : False
}
response = self.client.put(reverse('review-detail', args=(self.review.id,)), data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_review_list(self):
response = self.client.get(reverse('review-list', args=(self.watchlist.id,)))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_review_ind(self):
response = self.client.get(reverse('review-detail', args=(self.review.id,)))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_review_delete(self):
response = self.client.delete(reverse('review-detail', args=(self.review.id,)))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_review_user(self):
response = self.client.get('/watch/reviews/?username' + self.user.username)
self.assertEqual(response.status_code, status.HTTP_200_OK)
"""
IMPORTANT: The we are using the 'user' which isn't the 'admin'. Hence
it going to return "HTTP_403_FORBIDDEN" which 'ok'
NOTE: Once the request is sent without the authorization. It returns
"HTTP_401_UNAUTHORIZED".
The "setUp" method is taking care of that.
NOTE: Once the request is sent without "admin" credentials. It returns
"HTTP_403_FORBIDDEN"
NOTE: "self.stream" is used for creating the "streamplatorm" manually.
NOTE: "test_streamplatform_ind" method is for getting the individual object
using the "self.stream"
<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< WatchListTestCase >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
1. we are using the "setUp" methods again to create a "StreamPlatform"
for test we are currently writing. Hence, "each test classes are independent".
2. "self.watchlist" is attribute in the "setUp" for creating the "watchlist" object
manually.
----------------------- Forcing authentication-------------------------
LINKS: https://www.django-rest-framework.org/api-guide/testing/#forcing-authentication
3. We are "Forcing authentication" to login as anonymous
4. Created a "self.watchlist" attribute to get an id for the review to be updated.
NOTE: The 2 'self.watchlist' i.e('self.watchlist & self.watchlist2') was created because
we aren't allowed to send multilpe review on a watchlist.
The first is used to test the "test_review_create and test_review_create_unauth"
While, the second is used to for the update, "PUT"
""" | 2.53125 | 3 |
Flask/15.py | Pendragon21/FlaskExercises | 0 | 12790746 | import os
from flask import Flask, g
from flask_sijax import sijax
path = os.path.join('.', os.path.dirname(__file__), 'static/js/sijax/')
app = Flask(__name__)
app.config['SIJAX_STATIC_PATH'] = path
app.config['SIJAX_JSON_URI'] = '/static/js/sijax/json2.js'
flask_sijax.Sijax(app)
@app.route('/')
def index():
return 'Index'
@flask_sijax.route(app, '/hello')
def hello():
def say_hi(obj_response):
obj_response.alert('Hi there!')
if g.sijax.is_sijax_request:
g.sijax.register_callback('say_hi', say_hi)
return g.sijax.process_request()
return _render_template('sijaxexample.html')
if __name__ == '__main__':
app.run(debug = True) | 2.390625 | 2 |
wtforms_dynamic_fields/wtforms_dynamic_fields.py | Timusan/wtforms-dynamic-fields | 20 | 12790747 | import re
import sys
from wtforms.form import FormMeta
class WTFormsDynamicFields():
""" Add dynamic (set) fields to a WTForm.
Instantiating this class will merely create a configuration
dictionary on which you can add fields and validators using
the designated methods "add_field" and "add_validator".
Calling the "process" method will take care of
actually applying the build configuration to the WTForm form.
This method will take a WTForm form object and attach new
fields to it according to a match between what is in the POST
and what is defined in the build configuration dictionary.
It has the added ability to process sets of fields that
are suffixed with the convention of '_X' where X is a number.
For ease of configuration, these set names will be traced back
to their canonical name so that each of these fields only have
to be defined once in the configuration.
Inside the configuration there is the ability to reference
other fields within the validator arguments with the convention
of surrounding it with % signs. Fields that belong to a set
will be automatically suffixed with their set number (_X)
when they are bound to the validator.
The latter brings the power to reference set fields with their
canonical name without needing to care about the set number that
will be used later on when injecting them in the DOM.
"""
def __init__(self, flask_wtf=False):
""" Class init.
:param flask_wtf: Is this form a Flask WTF or a plain WTF instance?
"""
self._dyn_fields = {}
self.flask_wtf=flask_wtf
def add_field(self, name, label, field_type, *args, **kwargs):
""" Add the field to the internal configuration dictionary. """
if name in self._dyn_fields:
raise AttributeError('Field already added to the form.')
else:
self._dyn_fields[name] = {'label': label, 'type': field_type,
'args': args, 'kwargs': kwargs}
def add_validator(self, name, validator, *args, **kwargs):
""" Add the validator to the internal configuration dictionary.
:param name:
The field machine name to apply the validator on
:param validator:
The WTForms validator object
The rest are optional arguments and keyword arguments that
belong to the validator. We let them simply pass through
to be checked and bound later.
"""
if name in self._dyn_fields:
if 'validators' in self._dyn_fields[name]:
self._dyn_fields[name]['validators'].append(validator)
self._dyn_fields[name][validator.__name__] = {}
if args:
self._dyn_fields[name][validator.__name__]['args'] = args
if kwargs:
self._dyn_fields[name][validator.__name__]['kwargs'] = kwargs
else:
self._dyn_fields[name]['validators'] = []
self.add_validator(name, validator, *args, **kwargs)
else:
raise AttributeError('Field "{0}" does not exist. '
'Did you forget to add it?'.format(name))
@staticmethod
def iteritems(dict):
""" Refusing to use a possible memory hugging
Python2 .items() method. So for providing
both Python2 and 3 support, setting up iteritems()
as either items() in 3 or iteritems() in 2.
"""
if sys.version_info[0] >= 3:
return dict.items()
else:
return dict.iteritems()
def process(self, form, post):
""" Process the given WTForm Form object.
Itterate over the POST values and check each field
against the configuration that was made.
For each field that is valid, check all the validator
parameters for possible %field% replacement, then bind
these parameters to their validator.
Finally, add the field together with their validators
to the form.
:param form:
A valid WTForm Form object
:param post:
A MultiDict with the POST variables
"""
if not isinstance(form, FormMeta):
raise TypeError('Given form is not a valid WTForm.')
re_field_name = re.compile(r'\%([a-zA-Z0-9_]*)\%')
class F(form):
pass
for field, data in post.iteritems():
if field in F():
# Skip it if the POST field is one of the standard form fields.
continue
else:
if field in self._dyn_fields:
# If we can find the field name directly, it means the field
# is not a set so just set the canonical name and go on.
field_cname = field
# Since we are not in a set, (re)set the current set.
current_set_number = None
elif (field.split('_')[-1].isdigit()
and field[:-(len(field.split('_')[-1]))-1] in self._dyn_fields.keys()):
# If the field can be split on underscore characters,
# the last part contains only digits and the
# everything *but* the last part is found in the
# field configuration, we are good to go.
# (Cowardly refusing to use regex here).
field_cname = field[:-(len(field.split('_')[-1]))-1]
# Since we apparently are in a set, remember the
# the set number we are at.
current_set_number = str(field.split('_')[-1])
else:
# The field did not match to a canonical name
# from the fields dictionary or the name
# was malformed, throw it out.
continue
# Since the field seems to be a valid one, let us
# prepare the validator arguments and, if we are in a set
# replace the %field_name% convention where we find it.
validators = []
if 'validators' in self._dyn_fields[field_cname]:
for validator in self._dyn_fields[field_cname]['validators']:
args = []
kwargs = {}
if 'args' in self._dyn_fields[field_cname]\
[validator.__name__]:
if not current_set_number:
args = self._dyn_fields[field_cname]\
[validator.__name__]['args']
else:
# If we are currently in a set, append the set number
# to all the words that are decorated with %'s within
# the arguments.
for arg in self._dyn_fields[field_cname]\
[validator.__name__]['args']:
try:
arg = re_field_name.sub(r'\1'+'_'+current_set_number,
arg)
except:
# The argument does not seem to be regex-able
# Probably not a string, thus we can skip it.
pass
args.append(arg)
if 'kwargs' in self._dyn_fields[field_cname]\
[validator.__name__]:
if not current_set_number:
kwargs = self._dyn_fields[field_cname]\
[validator.__name__]['kwargs']
else:
# If we are currently in a set, append the set number
# to all the words that are decorated with %'s within
# the arguments.
for key, arg in self.iteritems(self._dyn_fields[field_cname]\
[validator.__name__]['kwargs']):
try:
arg = re_field_name.sub(r'\1'+'_'+current_set_number,
arg)
except:
# The argument does not seem to be regex-able
# Probably not a string, thus we can skip it.
pass
kwargs[key] = arg
# Finally, bind arguments to the validator
# and add it to the list
validators.append(validator(*args, **kwargs))
# The field is setup, it is time to add it to the form.
field_type = self._dyn_fields[field_cname]['type']
field_label = self._dyn_fields[field_cname]['label']
field_args = self._dyn_fields[field_cname]['args']
field_kwargs = self._dyn_fields[field_cname]['kwargs']
setattr(F, field, field_type(field_label,
validators=validators,
*field_args,
**field_kwargs))
# Create an instance of the form with the newly
# created fields and give it back to the caller.
if self.flask_wtf:
# Flask WTF overrides the form initialization
# and already injects the POST variables.
form = F()
else:
form = F(post)
return form
| 3.0625 | 3 |
conf.py | evhub/coconut | 3,624 | 12790748 | <reponame>evhub/coconut<gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------------------------------------------------
# INFO:
# -----------------------------------------------------------------------------------------------------------------------
"""
Author: <NAME>
License: Apache 2.0
Description: Sphinx configuration file for the Coconut Programming Language.
"""
# -----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
# -----------------------------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
import sys
import os.path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from coconut.root import * # NOQA
from coconut.constants import (
version_str_tag,
without_toc,
with_toc,
)
from coconut.util import univ_open
import pydata_sphinx_theme # NOQA
import myst_parser # NOQA
# -----------------------------------------------------------------------------------------------------------------------
# README:
# -----------------------------------------------------------------------------------------------------------------------
with univ_open("README.rst", "r") as readme_file:
readme = readme_file.read()
with univ_open("index.rst", "w") as index_file:
index_file.write(readme.replace(without_toc, with_toc))
# -----------------------------------------------------------------------------------------------------------------------
# DEFINITIONS:
# -----------------------------------------------------------------------------------------------------------------------
from coconut.constants import ( # NOQA
project,
copyright,
author,
highlight_language,
)
version = VERSION
release = version_str_tag
html_theme = "pydata_sphinx_theme"
html_theme_options = {
}
master_doc = "index"
exclude_patterns = ["README.*"]
source_suffix = [".rst", ".md"]
default_role = "code"
extensions = ["myst_parser"]
myst_enable_extensions = [
"smartquotes",
]
myst_heading_anchors = 4
html_sidebars = {
"**": [
"localtoc.html",
],
}
| 1.445313 | 1 |
Clase01/hipoteca.py | qagustina/python-exercises | 0 | 12790749 | # 1.7: La hipoteca de David
# 1.9: Calculadora de adelantos
# 1.10: Tablas
# 1.7
saldo = 500000.0
tasa = 0.05
pago_mensual = 2684.11
total_pagado = 0.0
mes = 0
# 1.9
pago_extra_mes_comienzo = 61
pago_extra_mes_fin = 108
pago_extra = 1000
mes_adelantado = 0
while saldo > 0:
saldo = saldo * (1+tasa/12)
mes = mes + 1
if (pago_extra_mes_comienzo <= mes <= pago_extra_mes_fin):
saldo = saldo - pago_mensual - pago_extra
total_pagado = total_pagado + pago_mensual + pago_extra
mes_adelantado = mes_adelantado + 1
else:
saldo = saldo - pago_mensual
total_pagado = total_pagado + pago_mensual
print(mes, round(total_pagado, 2), round(saldo, 2)) # 1.10
print('Total pagado: ', round(total_pagado, ndigits=2))
print('Meses: ', mes)
print ('Meses adelantados: ', mes_adelantado) | 3.5625 | 4 |
Python/easy/0977_squares_of_a_sorted_array.py | CalmScout/LeetCode | 0 | 12790750 | <filename>Python/easy/0977_squares_of_a_sorted_array.py
"""
Given an array of integers A sorted in non-decreasing order,
return an array of the squares of each number, also in sorted non-decreasing order.
Example 1:
Input: [-4,-1,0,3,10]
Output: [0,1,9,16,100]
Example 2:
Input: [-7,-3,2,3,11]
Output: [4,9,9,49,121]
Note:
1 <= A.length <= 10000
-10000 <= A[i] <= 10000
A is sorted in non-decreasing order.
"""
from typing import List
class Solution:
def sortedSquares(self, A: List[int]) -> List[int]:
return sorted([el * el for el in A])
if __name__ == "__main__":
assert Solution().sortedSquares([-4,-1,0,3,10]) == [0,1,9,16,100]
assert Solution().sortedSquares([-7,-3,2,3,11]) == [4,9,9,49,121]
| 4.21875 | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.